Commit 402a26f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] block/elevator.c: remove unused exports
  [PATCH] splice: fix smaller sized splice reads
  [PATCH] Don't inherit ->splice_pipe across forks
  [patch] cleanup: use blk_queue_stopped
  [PATCH] Document online io scheduler switching
parents bf7cf6ee 4f73247f
As of the Linux 2.6.10 kernel, it is now possible to change the
IO scheduler for a given block device on the fly (thus making it possible,
for instance, to set the CFQ scheduler for the system default, but
set a specific device to use the anticipatory or noop schedulers - which
can improve that device's throughput).
To set a specific scheduler, simply do this:
echo SCHEDNAME > /sys/block/DEV/queue/scheduler
where SCHEDNAME is the name of a defined IO scheduler, and DEV is the
device name (hda, hdb, sga, or whatever you happen to have).
The list of defined schedulers can be found by simply doing
a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
will be displayed, with the currently selected scheduler in brackets:
# cat /sys/block/hda/queue/scheduler
noop anticipatory deadline [cfq]
# echo anticipatory > /sys/block/hda/queue/scheduler
# cat /sys/block/hda/queue/scheduler
noop [anticipatory] deadline cfq
......@@ -895,10 +895,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
EXPORT_SYMBOL(elv_dispatch_sort);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_requeue_request);
EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_dequeue_request);
EXPORT_SYMBOL(elv_queue_empty);
EXPORT_SYMBOL(elv_completed_request);
EXPORT_SYMBOL(elevator_exit);
EXPORT_SYMBOL(elevator_init);
......@@ -1554,7 +1554,7 @@ void blk_plug_device(request_queue_t *q)
* don't plug a stopped queue, it must be paired with blk_start_queue()
* which will restart the queueing
*/
if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
if (blk_queue_stopped(q))
return;
if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
......@@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(blk_remove_plug);
*/
void __generic_unplug_device(request_queue_t *q)
{
if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))
if (unlikely(blk_queue_stopped(q)))
return;
if (!blk_remove_plug(q))
......
......@@ -275,6 +275,15 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
error = 0;
bytes = 0;
for (i = 0; i < nr_pages; i++, index++) {
unsigned int this_len;
if (!len)
break;
/*
* this_len is the max we'll use from this page
*/
this_len = min(len, PAGE_CACHE_SIZE - loff);
find_page:
/*
* lookup the page for this index
......@@ -366,11 +375,13 @@ readpage:
* force quit after adding this page
*/
nr_pages = i;
this_len = min(this_len, loff);
}
}
fill_it:
pages[i] = page;
bytes += PAGE_CACHE_SIZE - loff;
bytes += this_len;
len -= this_len;
loff = 0;
}
......
......@@ -180,6 +180,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
atomic_set(&tsk->usage,2);
atomic_set(&tsk->fs_excl, 0);
tsk->btrace_seq = 0;
tsk->splice_pipe = NULL;
return tsk;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment