Commit cd9bb7e7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/data/git/linux-2.6-block:
  [PATCH] elevator: elv_list_lock does not need irq disabling
  [BLOCK] Don't pin lots of memory in mempools
  cfq-iosched: speedup cic rb lookup
  ll_rw_blk: add io_context private pointer
  cfq-iosched: get rid of cfqq hash
  cfq-iosched: tighten queue request overlap condition
  cfq-iosched: improve sync vs async workloads
  cfq-iosched: never allow an async queue idling
  cfq-iosched: get rid of ->dispatch_slice
  cfq-iosched: don't pass unused preemption variable around
  cfq-iosched: get rid of ->cur_rr and ->cfq_list
  cfq-iosched: slice offset should take ioprio into account
  [PATCH] cfq-iosched: style cleanups and comments
  cfq-iosched: sort IDLE queues into the rbtree
  cfq-iosched: sort RT queues into the rbtree
  [PATCH] cfq-iosched: speed up rbtree handling
  cfq-iosched: rework the whole round-robin list concept
  cfq-iosched: minor updates
  cfq-iosched: development update
  cfq-iosched: improve preemption for cooperating tasks
parents 24a77daf 07e44708
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/elevator.h> #include <linux/elevator.h>
#include <linux/hash.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/ioprio.h> #include <linux/ioprio.h>
...@@ -26,19 +25,17 @@ static int cfq_slice_async = HZ / 25; ...@@ -26,19 +25,17 @@ static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2; static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125; static int cfq_slice_idle = HZ / 125;
/*
* grace period before allowing idle class to get disk access
*/
#define CFQ_IDLE_GRACE (HZ / 10) #define CFQ_IDLE_GRACE (HZ / 10)
#define CFQ_SLICE_SCALE (5)
#define CFQ_KEY_ASYNC (0)
/* /*
* for the hash of cfqq inside the cfqd * below this threshold, we consider thinktime immediate
*/ */
#define CFQ_QHASH_SHIFT 6 #define CFQ_MIN_TT (2)
#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) #define CFQ_SLICE_SCALE (5)
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2) #define RQ_CFQQ(rq) ((rq)->elevator_private2)
...@@ -56,16 +53,20 @@ static struct completion *ioc_gone; ...@@ -56,16 +53,20 @@ static struct completion *ioc_gone;
#define ASYNC (0) #define ASYNC (0)
#define SYNC (1) #define SYNC (1)
#define cfq_cfqq_dispatched(cfqq) \
((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
#define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
#define cfq_cfqq_sync(cfqq) \
(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
#define sample_valid(samples) ((samples) > 80) #define sample_valid(samples) ((samples) > 80)
/*
* Most of our rbtree usage is for sorting with min extraction, so
* if we cache the leftmost node we don't have to walk down the tree
* to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
* move this into the elevator for the rq sorting as well.
*/
struct cfq_rb_root {
struct rb_root rb;
struct rb_node *left;
};
#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
/* /*
* Per block device queue structure * Per block device queue structure
*/ */
...@@ -75,18 +76,11 @@ struct cfq_data { ...@@ -75,18 +76,11 @@ struct cfq_data {
/* /*
* rr list of queues with requests and the count of them * rr list of queues with requests and the count of them
*/ */
struct list_head rr_list[CFQ_PRIO_LISTS]; struct cfq_rb_root service_tree;
struct list_head busy_rr;
struct list_head cur_rr;
struct list_head idle_rr;
unsigned int busy_queues; unsigned int busy_queues;
/*
* cfqq lookup hash
*/
struct hlist_head *cfq_hash;
int rq_in_driver; int rq_in_driver;
int sync_flight;
int hw_tag; int hw_tag;
/* /*
...@@ -97,12 +91,10 @@ struct cfq_data { ...@@ -97,12 +91,10 @@ struct cfq_data {
struct cfq_queue *active_queue; struct cfq_queue *active_queue;
struct cfq_io_context *active_cic; struct cfq_io_context *active_cic;
int cur_prio, cur_end_prio;
unsigned int dispatch_slice;
struct timer_list idle_class_timer; struct timer_list idle_class_timer;
sector_t last_sector; sector_t last_position;
unsigned long last_end_request; unsigned long last_end_request;
/* /*
...@@ -117,6 +109,9 @@ struct cfq_data { ...@@ -117,6 +109,9 @@ struct cfq_data {
unsigned int cfq_slice_idle; unsigned int cfq_slice_idle;
struct list_head cic_list; struct list_head cic_list;
sector_t new_seek_mean;
u64 new_seek_total;
}; };
/* /*
...@@ -127,12 +122,10 @@ struct cfq_queue { ...@@ -127,12 +122,10 @@ struct cfq_queue {
atomic_t ref; atomic_t ref;
/* parent cfq_data */ /* parent cfq_data */
struct cfq_data *cfqd; struct cfq_data *cfqd;
/* cfqq lookup hash */ /* service_tree member */
struct hlist_node cfq_hash; struct rb_node rb_node;
/* hash key */ /* service_tree key */
unsigned int key; unsigned long rb_key;
/* member of the rr/busy/cur/idle cfqd list */
struct list_head cfq_list;
/* sorted list of pending requests */ /* sorted list of pending requests */
struct rb_root sort_list; struct rb_root sort_list;
/* if fifo isn't expired, next request to serve */ /* if fifo isn't expired, next request to serve */
...@@ -147,11 +140,10 @@ struct cfq_queue { ...@@ -147,11 +140,10 @@ struct cfq_queue {
struct list_head fifo; struct list_head fifo;
unsigned long slice_end; unsigned long slice_end;
unsigned long service_last;
long slice_resid; long slice_resid;
/* number of requests that are on the dispatch list */ /* number of requests that are on the dispatch list or inside driver */
int on_dispatch[2]; int dispatched;
/* io prio of this group */ /* io prio of this group */
unsigned short ioprio, org_ioprio; unsigned short ioprio, org_ioprio;
...@@ -159,6 +151,8 @@ struct cfq_queue { ...@@ -159,6 +151,8 @@ struct cfq_queue {
/* various state flags, see below */ /* various state flags, see below */
unsigned int flags; unsigned int flags;
sector_t last_request_pos;
}; };
enum cfqq_state_flags { enum cfqq_state_flags {
...@@ -172,6 +166,7 @@ enum cfqq_state_flags { ...@@ -172,6 +166,7 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
}; };
#define CFQ_CFQQ_FNS(name) \ #define CFQ_CFQQ_FNS(name) \
...@@ -198,11 +193,38 @@ CFQ_CFQQ_FNS(idle_window); ...@@ -198,11 +193,38 @@ CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(queue_new); CFQ_CFQQ_FNS(queue_new);
CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
#undef CFQ_CFQQ_FNS #undef CFQ_CFQQ_FNS
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
static void cfq_dispatch_insert(request_queue_t *, struct request *); static void cfq_dispatch_insert(request_queue_t *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
struct task_struct *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
struct io_context *);
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
int is_sync)
{
return cic->cfqq[!!is_sync];
}
static inline void cic_set_cfqq(struct cfq_io_context *cic,
struct cfq_queue *cfqq, int is_sync)
{
cic->cfqq[!!is_sync] = cfqq;
}
/*
* We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE).
*/
static inline int cfq_bio_sync(struct bio *bio)
{
if (bio_data_dir(bio) == READ || bio_sync(bio))
return 1;
return 0;
}
/* /*
* scheduler run of queue, if there are requests pending and no one in the * scheduler run of queue, if there are requests pending and no one in the
...@@ -221,44 +243,31 @@ static int cfq_queue_empty(request_queue_t *q) ...@@ -221,44 +243,31 @@ static int cfq_queue_empty(request_queue_t *q)
return !cfqd->busy_queues; return !cfqd->busy_queues;
} }
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
{
/*
* Use the per-process queue, for read requests and syncronous writes
*/
if (!(rw & REQ_RW) || is_sync)
return task->pid;
return CFQ_KEY_ASYNC;
}
/* /*
* Scale schedule slice based on io priority. Use the sync time slice only * Scale schedule slice based on io priority. Use the sync time slice only
* if a queue is marked sync and has sync io queued. A sync queue with async * if a queue is marked sync and has sync io queued. A sync queue with async
* io only, should not get full sync slice length. * io only, should not get full sync slice length.
*/ */
static inline int static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) unsigned short prio)
{ {
const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)]; const int base_slice = cfqd->cfq_slice[sync];
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); WARN_ON(prio >= IOPRIO_BE_NR);
return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
}
return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio)); static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
} }
static inline void static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
cfqq->slice_end += cfqq->slice_resid;
/*
* Don't carry over residual for more than one slice, we only want
* to slightly correct the fairness. Carrying over forever would
* easily introduce oscillations.
*/
cfqq->slice_resid = 0;
} }
/* /*
...@@ -307,7 +316,7 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) ...@@ -307,7 +316,7 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
s1 = rq1->sector; s1 = rq1->sector;
s2 = rq2->sector; s2 = rq2->sector;
last = cfqd->last_sector; last = cfqd->last_position;
/* /*
* by definition, 1KiB is 2 sectors * by definition, 1KiB is 2 sectors
...@@ -371,6 +380,26 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) ...@@ -371,6 +380,26 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
} }
} }
/*
* The below is leftmost cache rbtree addon
*/
static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
{
if (!root->left)
root->left = rb_first(&root->rb);
return root->left;
}
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
{
if (root->left == n)
root->left = NULL;
rb_erase(n, &root->rb);
RB_CLEAR_NODE(n);
}
/* /*
* would be nice to take fifo expire time into account as well * would be nice to take fifo expire time into account as well
*/ */
...@@ -398,78 +427,96 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -398,78 +427,96 @@ cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
return cfq_choose_req(cfqd, next, prev); return cfq_choose_req(cfqd, next, prev);
} }
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
struct cfq_queue *cfqq)
{ {
struct cfq_data *cfqd = cfqq->cfqd;
struct list_head *list, *n;
struct cfq_queue *__cfqq;
/* /*
* Resorting requires the cfqq to be on the RR list already. * just an approximation, should be ok.
*/ */
if (!cfq_cfqq_on_rr(cfqq)) return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
return; cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
}
list_del(&cfqq->cfq_list); /*
* The cfqd->service_tree holds all pending cfq_queue's that have
* requests waiting to be processed. It is sorted in the order that
* we will service the queues.
*/
static void cfq_service_tree_add(struct cfq_data *cfqd,
struct cfq_queue *cfqq, int add_front)
{
struct rb_node **p = &cfqd->service_tree.rb.rb_node;
struct rb_node *parent = NULL;
unsigned long rb_key;
int left;
if (!add_front) {
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
rb_key += cfqq->slice_resid;
cfqq->slice_resid = 0;
} else
rb_key = 0;
if (cfq_class_rt(cfqq)) if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
list = &cfqd->cur_rr;
else if (cfq_class_idle(cfqq))
list = &cfqd->idle_rr;
else {
/* /*
* if cfqq has requests in flight, don't allow it to be * same position, nothing more to do
* found in cfq_set_active_queue before it has finished them.
* this is done to increase fairness between a process that
* has lots of io pending vs one that only generates one
* sporadically or synchronously
*/ */
if (cfq_cfqq_dispatched(cfqq)) if (rb_key == cfqq->rb_key)
list = &cfqd->busy_rr; return;
else
list = &cfqd->rr_list[cfqq->ioprio]; cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
} }
if (preempted || cfq_cfqq_queue_new(cfqq)) { left = 1;
/* while (*p) {
* If this queue was preempted or is new (never been serviced), struct cfq_queue *__cfqq;
* let it be added first for fairness but beind other new struct rb_node **n;
* queues.
*/ parent = *p;
n = list; __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
while (n->next != list) {
__cfqq = list_entry_cfqq(n->next);
if (!cfq_cfqq_queue_new(__cfqq))
break;
n = n->next;
}
list_add_tail(&cfqq->cfq_list, n);
} else if (!cfq_cfqq_class_sync(cfqq)) {
/*
* async queue always goes to the end. this wont be overly
* unfair to writes, as the sort of the sync queue wont be
* allowed to pass the async queue again.
*/
list_add_tail(&cfqq->cfq_list, list);
} else {
/* /*
* sort by last service, but don't cross a new or async * sort RT queues first, we always want to give
* queue. we don't cross a new queue because it hasn't been * preference to them. IDLE queues goes to the back.
* service before, and we don't cross an async queue because * after that, sort on the next service time.
* it gets added to the end on expire.
*/ */
n = list; if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
while ((n = n->prev) != list) { n = &(*p)->rb_left;
struct cfq_queue *__cfqq = list_entry_cfqq(n); else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
n = &(*p)->rb_right;
else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
n = &(*p)->rb_left;
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
n = &(*p)->rb_right;
else if (rb_key < __cfqq->rb_key)
n = &(*p)->rb_left;
else
n = &(*p)->rb_right;
if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last) if (n == &(*p)->rb_right)
break; left = 0;
if (time_before(__cfqq->service_last, cfqq->service_last))
break; p = n;
}
list_add(&cfqq->cfq_list, n);
} }
if (left)
cfqd->service_tree.left = &cfqq->rb_node;
cfqq->rb_key = rb_key;
rb_link_node(&cfqq->rb_node, parent, p);
rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
}
/*
* Update cfqq's position in the service tree.
*/
static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
/*
* Resorting requires the cfqq to be on the RR list already.
*/
if (cfq_cfqq_on_rr(cfqq))
cfq_service_tree_add(cfqd, cfqq, 0);
} }
/* /*
...@@ -483,15 +530,21 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -483,15 +530,21 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfq_mark_cfqq_on_rr(cfqq); cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++; cfqd->busy_queues++;
cfq_resort_rr_list(cfqq, 0); cfq_resort_rr_list(cfqd, cfqq);
} }
/*
* Called when the cfqq no longer has requests pending, remove it from
* the service tree.
*/
static inline void static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
BUG_ON(!cfq_cfqq_on_rr(cfqq)); BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq); cfq_clear_cfqq_on_rr(cfqq);
list_del_init(&cfqq->cfq_list);
if (!RB_EMPTY_NODE(&cfqq->rb_node))
cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
BUG_ON(!cfqd->busy_queues); BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--; cfqd->busy_queues--;
...@@ -552,10 +605,14 @@ static struct request * ...@@ -552,10 +605,14 @@ static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio)); struct cfq_io_context *cic;
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
if (!cic)
return NULL;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
if (cfqq) { if (cfqq) {
sector_t sector = bio->bi_sector + bio_sectors(bio); sector_t sector = bio->bi_sector + bio_sectors(bio);
...@@ -579,6 +636,8 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq) ...@@ -579,6 +636,8 @@ static void cfq_activate_request(request_queue_t *q, struct request *rq)
*/ */
if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
cfqd->hw_tag = 1; cfqd->hw_tag = 1;
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
} }
static void cfq_deactivate_request(request_queue_t *q, struct request *rq) static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
...@@ -605,8 +664,7 @@ static void cfq_remove_request(struct request *rq) ...@@ -605,8 +664,7 @@ static void cfq_remove_request(struct request *rq)
} }
} }
static int static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{ {
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq; struct request *__rq;
...@@ -648,23 +706,24 @@ static int cfq_allow_merge(request_queue_t *q, struct request *rq, ...@@ -648,23 +706,24 @@ static int cfq_allow_merge(request_queue_t *q, struct request *rq,
struct bio *bio) struct bio *bio)
{ {
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
const int rw = bio_data_dir(bio); struct cfq_io_context *cic;
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
pid_t key;
/* /*
* Disallow merge of a sync bio into an async request. * Disallow merge of a sync bio into an async request.
*/ */
if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq)) if (cfq_bio_sync(bio) && !rq_is_sync(rq))
return 0; return 0;
/* /*
* Lookup the cfqq that this bio will be queued with. Allow * Lookup the cfqq that this bio will be queued with. Allow
* merge only if rq is queued there. * merge only if rq is queued there.
*/ */
key = cfq_queue_pid(current, rw, bio_sync(bio)); cic = cfq_cic_rb_lookup(cfqd, current->io_context);
cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); if (!cic)
return 0;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
if (cfqq == RQ_CFQQ(rq)) if (cfqq == RQ_CFQQ(rq))
return 1; return 1;
...@@ -684,6 +743,7 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -684,6 +743,7 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfq_clear_cfqq_must_alloc_slice(cfqq); cfq_clear_cfqq_must_alloc_slice(cfqq);
cfq_clear_cfqq_fifo_expire(cfqq); cfq_clear_cfqq_fifo_expire(cfqq);
cfq_mark_cfqq_slice_new(cfqq); cfq_mark_cfqq_slice_new(cfqq);
cfq_clear_cfqq_queue_new(cfqq);
} }
cfqd->active_queue = cfqq; cfqd->active_queue = cfqq;
...@@ -694,23 +754,21 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -694,23 +754,21 @@ __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
*/ */
static void static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int preempted, int timed_out) int timed_out)
{ {
if (cfq_cfqq_wait_request(cfqq)) if (cfq_cfqq_wait_request(cfqq))
del_timer(&cfqd->idle_slice_timer); del_timer(&cfqd->idle_slice_timer);
cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_queue_new(cfqq);
/* /*
* store what was left of this slice, if the queue idled out * store what was left of this slice, if the queue idled/timed out
* or was preempted
*/ */
if (timed_out && !cfq_cfqq_slice_new(cfqq)) if (timed_out && !cfq_cfqq_slice_new(cfqq))
cfqq->slice_resid = cfqq->slice_end - jiffies; cfqq->slice_resid = cfqq->slice_end - jiffies;
cfq_resort_rr_list(cfqq, preempted); cfq_resort_rr_list(cfqd, cfqq);
if (cfqq == cfqd->active_queue) if (cfqq == cfqd->active_queue)
cfqd->active_queue = NULL; cfqd->active_queue = NULL;
...@@ -719,163 +777,152 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -719,163 +777,152 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
put_io_context(cfqd->active_cic->ioc); put_io_context(cfqd->active_cic->ioc);
cfqd->active_cic = NULL; cfqd->active_cic = NULL;
} }
cfqd->dispatch_slice = 0;
} }
static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted, static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
int timed_out)
{ {
struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_queue *cfqq = cfqd->active_queue;
if (cfqq) if (cfqq)
__cfq_slice_expired(cfqd, cfqq, preempted, timed_out); __cfq_slice_expired(cfqd, cfqq, timed_out);
} }
/* /*
* 0 * Get next queue for service. Unless we have a queue preemption,
* 0,1 * we'll simply select the first cfqq in the service tree.
* 0,1,2
* 0,1,2,3
* 0,1,2,3,4
* 0,1,2,3,4,5
* 0,1,2,3,4,5,6
* 0,1,2,3,4,5,6,7
*/ */
static int cfq_get_next_prio_level(struct cfq_data *cfqd) static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{ {
int prio, wrap; struct cfq_queue *cfqq;
struct rb_node *n;
prio = -1; if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
wrap = 0; return NULL;
do {
int p;
for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { n = cfq_rb_first(&cfqd->service_tree);
if (!list_empty(&cfqd->rr_list[p])) { cfqq = rb_entry(n, struct cfq_queue, rb_node);
prio = p;
break;
}
}
if (prio != -1) if (cfq_class_idle(cfqq)) {
break; unsigned long end;
cfqd->cur_prio = 0;
if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
cfqd->cur_end_prio = 0;
if (wrap)
break;
wrap = 1;
}
} while (1);
if (unlikely(prio == -1)) /*
return -1; * if we have idle queues and no rt or be queues had
* pending requests, either allow immediate service if
* the grace period has passed or arm the idle grace
* timer
*/
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
if (time_before(jiffies, end)) {
mod_timer(&cfqd->idle_class_timer, end);
cfqq = NULL;
}
}
BUG_ON(prio >= CFQ_PRIO_LISTS); return cfqq;
}
list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); /*
* Get and set a new active queue for service.
*/
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq;
cfqd->cur_prio = prio + 1; cfqq = cfq_get_next_queue(cfqd);
if (cfqd->cur_prio > cfqd->cur_end_prio) { __cfq_set_active_queue(cfqd, cfqq);
cfqd->cur_end_prio = cfqd->cur_prio; return cfqq;
cfqd->cur_prio = 0; }
}
if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
cfqd->cur_prio = 0;
cfqd->cur_end_prio = 0;
}
return prio; static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
struct request *rq)
{
if (rq->sector >= cfqd->last_position)
return rq->sector - cfqd->last_position;
else
return cfqd->last_position - rq->sector;
} }
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
{ {
struct cfq_queue *cfqq = NULL; struct cfq_io_context *cic = cfqd->active_cic;
if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) { if (!sample_valid(cic->seek_samples))
/* return 0;
* if current list is non-empty, grab first entry. if it is
* empty, get next prio level and grab first entry then if any
* are spliced
*/
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
} else if (!list_empty(&cfqd->busy_rr)) {
/*
* If no new queues are available, check if the busy list has
* some before falling back to idle io.
*/
cfqq = list_entry_cfqq(cfqd->busy_rr.next);
} else if (!list_empty(&cfqd->idle_rr)) {
/*
* if we have idle queues and no rt or be queues had pending
* requests, either allow immediate service if the grace period
* has passed or arm the idle grace timer
*/
unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
if (time_after_eq(jiffies, end)) return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
cfqq = list_entry_cfqq(cfqd->idle_rr.next); }
else
mod_timer(&cfqd->idle_class_timer, end);
}
__cfq_set_active_queue(cfqd, cfqq); static int cfq_close_cooperator(struct cfq_data *cfq_data,
return cfqq; struct cfq_queue *cfqq)
{
/*
* We should notice if some of the queues are cooperating, eg
* working closely on the same area of the disk. In that case,
* we can group them together and don't waste time idling.
*/
return 0;
} }
#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024)) #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
static int cfq_arm_slice_timer(struct cfq_data *cfqd) static void cfq_arm_slice_timer(struct cfq_data *cfqd)
{ {
struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_queue *cfqq = cfqd->active_queue;
struct cfq_io_context *cic; struct cfq_io_context *cic;
unsigned long sl; unsigned long sl;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
WARN_ON(cfq_cfqq_slice_new(cfqq));
/* /*
* idle is disabled, either manually or by past process history * idle is disabled, either manually or by past process history
*/ */
if (!cfqd->cfq_slice_idle) if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
return 0; return;
if (!cfq_cfqq_idle_window(cfqq))
return 0;
/* /*
* task has exited, don't wait * task has exited, don't wait
*/ */
cic = cfqd->active_cic; cic = cfqd->active_cic;
if (!cic || !cic->ioc->task) if (!cic || !cic->ioc->task)
return 0; return;
/*
* See if this prio level has a good candidate
*/
if (cfq_close_cooperator(cfqd, cfqq) &&
(sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
return;
cfq_mark_cfqq_must_dispatch(cfqq); cfq_mark_cfqq_must_dispatch(cfqq);
cfq_mark_cfqq_wait_request(cfqq); cfq_mark_cfqq_wait_request(cfqq);
sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
/* /*
* we don't want to idle for seeks, but we do want to allow * we don't want to idle for seeks, but we do want to allow
* fair distribution of slice time for a process doing back-to-back * fair distribution of slice time for a process doing back-to-back
* seeks. so allow a little bit of time for him to submit a new rq * seeks. so allow a little bit of time for him to submit a new rq
*/ */
sl = cfqd->cfq_slice_idle;
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
sl = min(sl, msecs_to_jiffies(2)); sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
mod_timer(&cfqd->idle_slice_timer, jiffies + sl); mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
return 1;
} }
/*
* Move request from internal lists to the request queue dispatch list.
*/
static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
{ {
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_remove_request(rq); cfq_remove_request(rq);
cfqq->on_dispatch[rq_is_sync(rq)]++; cfqq->dispatched++;
elv_dispatch_sort(q, rq); elv_dispatch_sort(q, rq);
rq = list_entry(q->queue_head.prev, struct request, queuelist); if (cfq_cfqq_sync(cfqq))
cfqd->last_sector = rq->sector + rq->nr_sectors; cfqd->sync_flight++;
} }
/* /*
...@@ -895,13 +942,13 @@ static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) ...@@ -895,13 +942,13 @@ static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (list_empty(&cfqq->fifo)) if (list_empty(&cfqq->fifo))
return NULL; return NULL;
fifo = cfq_cfqq_class_sync(cfqq); fifo = cfq_cfqq_sync(cfqq);
rq = rq_entry_fifo(cfqq->fifo.next); rq = rq_entry_fifo(cfqq->fifo.next);
if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
return rq; return NULL;
return NULL; return rq;
} }
static inline int static inline int
...@@ -915,7 +962,8 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -915,7 +962,8 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
} }
/* /*
* get next queue for service * Select a queue for service. If we have a current active queue,
* check whether to continue servicing it, or retrieve and set a new one.
*/ */
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
{ {
...@@ -926,33 +974,41 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) ...@@ -926,33 +974,41 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
goto new_queue; goto new_queue;
/* /*
* slice has expired * The active queue has run out of time, expire it and select new.
*/ */
if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq)) if (cfq_slice_used(cfqq))
goto expire; goto expire;
/* /*
* if queue has requests, dispatch one. if not, check if * The active queue has requests and isn't expired, allow it to
* enough slice is left to wait for one * dispatch.
*/ */
if (!RB_EMPTY_ROOT(&cfqq->sort_list)) if (!RB_EMPTY_ROOT(&cfqq->sort_list))
goto keep_queue; goto keep_queue;
else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) {
/*
* No requests pending. If the active queue still has requests in
* flight or is idling for a new request, allow either of these
* conditions to happen (or time out) before selecting a new queue.
*/
if (timer_pending(&cfqd->idle_slice_timer) ||
(cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
cfqq = NULL; cfqq = NULL;
goto keep_queue; goto keep_queue;
} else if (cfq_cfqq_class_sync(cfqq)) {
if (cfq_arm_slice_timer(cfqd))
return NULL;
} }
expire: expire:
cfq_slice_expired(cfqd, 0, 0); cfq_slice_expired(cfqd, 0);
new_queue: new_queue:
cfqq = cfq_set_active_queue(cfqd); cfqq = cfq_set_active_queue(cfqd);
keep_queue: keep_queue:
return cfqq; return cfqq;
} }
/*
* Dispatch some requests from cfqq, moving them to the request queue
* dispatch list.
*/
static int static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int max_dispatch) int max_dispatch)
...@@ -975,7 +1031,6 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -975,7 +1031,6 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
*/ */
cfq_dispatch_insert(cfqd->queue, rq); cfq_dispatch_insert(cfqd->queue, rq);
cfqd->dispatch_slice++;
dispatched++; dispatched++;
if (!cfqd->active_cic) { if (!cfqd->active_cic) {
...@@ -993,57 +1048,54 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -993,57 +1048,54 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* queue always expire after 1 dispatch round. * queue always expire after 1 dispatch round.
*/ */
if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) || dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
cfq_class_idle(cfqq))) { cfq_class_idle(cfqq))) {
cfqq->slice_end = jiffies + 1; cfqq->slice_end = jiffies + 1;
cfq_slice_expired(cfqd, 0, 0); cfq_slice_expired(cfqd, 0);
} }
return dispatched; return dispatched;
} }
static int static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
cfq_forced_dispatch_cfqqs(struct list_head *list)
{ {
struct cfq_queue *cfqq, *next; int dispatched = 0;
int dispatched;
dispatched = 0; while (cfqq->next_rq) {
list_for_each_entry_safe(cfqq, next, list, cfq_list) { cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
while (cfqq->next_rq) { dispatched++;
cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
dispatched++;
}
BUG_ON(!list_empty(&cfqq->fifo));
} }
BUG_ON(!list_empty(&cfqq->fifo));
return dispatched; return dispatched;
} }
static int /*
cfq_forced_dispatch(struct cfq_data *cfqd) * Drain our current requests. Used for barriers and when switching
* io schedulers on-the-fly.
*/
static int cfq_forced_dispatch(struct cfq_data *cfqd)
{ {
int i, dispatched = 0; int dispatched = 0;
struct rb_node *n;
for (i = 0; i < CFQ_PRIO_LISTS; i++) while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]); struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr); dispatched += __cfq_forced_dispatch_cfqq(cfqq);
dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr); }
dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
cfq_slice_expired(cfqd, 0, 0); cfq_slice_expired(cfqd, 0);
BUG_ON(cfqd->busy_queues); BUG_ON(cfqd->busy_queues);
return dispatched; return dispatched;
} }
static int static int cfq_dispatch_requests(request_queue_t *q, int force)
cfq_dispatch_requests(request_queue_t *q, int force)
{ {
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq, *prev_cfqq; struct cfq_queue *cfqq;
int dispatched; int dispatched;
if (!cfqd->busy_queues) if (!cfqd->busy_queues)
...@@ -1053,36 +1105,28 @@ cfq_dispatch_requests(request_queue_t *q, int force) ...@@ -1053,36 +1105,28 @@ cfq_dispatch_requests(request_queue_t *q, int force)
return cfq_forced_dispatch(cfqd); return cfq_forced_dispatch(cfqd);
dispatched = 0; dispatched = 0;
prev_cfqq = NULL;
while ((cfqq = cfq_select_queue(cfqd)) != NULL) { while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
int max_dispatch; int max_dispatch;
if (cfqd->busy_queues > 1) { max_dispatch = cfqd->cfq_quantum;
/* if (cfq_class_idle(cfqq))
* Don't repeat dispatch from the previous queue. max_dispatch = 1;
*/
if (prev_cfqq == cfqq)
break;
/* if (cfqq->dispatched >= max_dispatch) {
* So we have dispatched before in this round, if the if (cfqd->busy_queues > 1)
* next queue has idling enabled (must be sync), don't break;
* allow it service until the previous have continued. if (cfqq->dispatched >= 4 * max_dispatch)
*/
if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq))
break; break;
} }
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
break;
cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq); cfq_clear_cfqq_wait_request(cfqq);
del_timer(&cfqd->idle_slice_timer); del_timer(&cfqd->idle_slice_timer);
max_dispatch = cfqd->cfq_quantum;
if (cfq_class_idle(cfqq))
max_dispatch = 1;
dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
prev_cfqq = cfqq;
} }
return dispatched; return dispatched;
...@@ -1108,48 +1152,21 @@ static void cfq_put_queue(struct cfq_queue *cfqq) ...@@ -1108,48 +1152,21 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq)); BUG_ON(cfq_cfqq_on_rr(cfqq));
if (unlikely(cfqd->active_queue == cfqq)) { if (unlikely(cfqd->active_queue == cfqq)) {
__cfq_slice_expired(cfqd, cfqq, 0, 0); __cfq_slice_expired(cfqd, cfqq, 0);
cfq_schedule_dispatch(cfqd); cfq_schedule_dispatch(cfqd);
} }
/*
* it's on the empty list and still hashed
*/
list_del(&cfqq->cfq_list);
hlist_del(&cfqq->cfq_hash);
kmem_cache_free(cfq_pool, cfqq); kmem_cache_free(cfq_pool, cfqq);
} }
static struct cfq_queue *
__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
const int hashval)
{
struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
struct hlist_node *entry;
struct cfq_queue *__cfqq;
hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
if (__cfqq->key == key && (__p == prio || !prio))
return __cfqq;
}
return NULL;
}
static struct cfq_queue *
cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
{
return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
}
static void cfq_free_io_context(struct io_context *ioc) static void cfq_free_io_context(struct io_context *ioc)
{ {
struct cfq_io_context *__cic; struct cfq_io_context *__cic;
struct rb_node *n; struct rb_node *n;
int freed = 0; int freed = 0;
ioc->ioc_data = NULL;
while ((n = rb_first(&ioc->cic_root)) != NULL) { while ((n = rb_first(&ioc->cic_root)) != NULL) {
__cic = rb_entry(n, struct cfq_io_context, rb_node); __cic = rb_entry(n, struct cfq_io_context, rb_node);
rb_erase(&__cic->rb_node, &ioc->cic_root); rb_erase(&__cic->rb_node, &ioc->cic_root);
...@@ -1166,7 +1183,7 @@ static void cfq_free_io_context(struct io_context *ioc) ...@@ -1166,7 +1183,7 @@ static void cfq_free_io_context(struct io_context *ioc)
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
if (unlikely(cfqq == cfqd->active_queue)) { if (unlikely(cfqq == cfqd->active_queue)) {
__cfq_slice_expired(cfqd, cfqq, 0, 0); __cfq_slice_expired(cfqd, cfqq, 0);
cfq_schedule_dispatch(cfqd); cfq_schedule_dispatch(cfqd);
} }
...@@ -1191,10 +1208,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, ...@@ -1191,10 +1208,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
} }
} }
/*
* Called with interrupts disabled
*/
static void cfq_exit_single_io_context(struct cfq_io_context *cic) static void cfq_exit_single_io_context(struct cfq_io_context *cic)
{ {
struct cfq_data *cfqd = cic->key; struct cfq_data *cfqd = cic->key;
...@@ -1208,15 +1221,20 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) ...@@ -1208,15 +1221,20 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
} }
} }
/*
* The process that ioc belongs to has exited, we need to clean up
* and put the internal structures we have that belongs to that process.
*/
static void cfq_exit_io_context(struct io_context *ioc) static void cfq_exit_io_context(struct io_context *ioc)
{ {
struct cfq_io_context *__cic; struct cfq_io_context *__cic;
struct rb_node *n; struct rb_node *n;
ioc->ioc_data = NULL;
/* /*
* put the reference this task is holding to the various queues * put the reference this task is holding to the various queues
*/ */
n = rb_first(&ioc->cic_root); n = rb_first(&ioc->cic_root);
while (n != NULL) { while (n != NULL) {
__cic = rb_entry(n, struct cfq_io_context, rb_node); __cic = rb_entry(n, struct cfq_io_context, rb_node);
...@@ -1284,8 +1302,6 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) ...@@ -1284,8 +1302,6 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
*/ */
cfqq->org_ioprio = cfqq->ioprio; cfqq->org_ioprio = cfqq->ioprio;
cfqq->org_ioprio_class = cfqq->ioprio_class; cfqq->org_ioprio_class = cfqq->ioprio_class;
cfq_resort_rr_list(cfqq, 0);
cfq_clear_cfqq_prio_changed(cfqq); cfq_clear_cfqq_prio_changed(cfqq);
} }
...@@ -1303,7 +1319,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) ...@@ -1303,7 +1319,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
cfqq = cic->cfqq[ASYNC]; cfqq = cic->cfqq[ASYNC];
if (cfqq) { if (cfqq) {
struct cfq_queue *new_cfqq; struct cfq_queue *new_cfqq;
new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task, new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
GFP_ATOMIC); GFP_ATOMIC);
if (new_cfqq) { if (new_cfqq) {
cic->cfqq[ASYNC] = new_cfqq; cic->cfqq[ASYNC] = new_cfqq;
...@@ -1335,16 +1351,16 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) ...@@ -1335,16 +1351,16 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
} }
static struct cfq_queue * static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
struct cfq_queue *cfqq, *new_cfqq = NULL; struct cfq_queue *cfqq, *new_cfqq = NULL;
unsigned short ioprio; struct cfq_io_context *cic;
retry: retry:
ioprio = tsk->ioprio; cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); /* cic always exists here */
cfqq = cic_to_cfqq(cic, is_sync);
if (!cfqq) { if (!cfqq) {
if (new_cfqq) { if (new_cfqq) {
...@@ -1369,20 +1385,20 @@ retry: ...@@ -1369,20 +1385,20 @@ retry:
memset(cfqq, 0, sizeof(*cfqq)); memset(cfqq, 0, sizeof(*cfqq));
INIT_HLIST_NODE(&cfqq->cfq_hash); RB_CLEAR_NODE(&cfqq->rb_node);
INIT_LIST_HEAD(&cfqq->cfq_list);
INIT_LIST_HEAD(&cfqq->fifo); INIT_LIST_HEAD(&cfqq->fifo);
cfqq->key = key;
hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
atomic_set(&cfqq->ref, 0); atomic_set(&cfqq->ref, 0);
cfqq->cfqd = cfqd; cfqq->cfqd = cfqd;
if (key != CFQ_KEY_ASYNC) if (is_sync) {
cfq_mark_cfqq_idle_window(cfqq); cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_sync(cfqq);
}
cfq_mark_cfqq_prio_changed(cfqq); cfq_mark_cfqq_prio_changed(cfqq);
cfq_mark_cfqq_queue_new(cfqq); cfq_mark_cfqq_queue_new(cfqq);
cfq_init_prio_data(cfqq); cfq_init_prio_data(cfqq);
} }
...@@ -1395,10 +1411,17 @@ out: ...@@ -1395,10 +1411,17 @@ out:
return cfqq; return cfqq;
} }
/*
* We drop cfq io contexts lazily, so we may find a dead one.
*/
static void static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
{ {
WARN_ON(!list_empty(&cic->queue_list)); WARN_ON(!list_empty(&cic->queue_list));
if (ioc->ioc_data == cic)
ioc->ioc_data = NULL;
rb_erase(&cic->rb_node, &ioc->cic_root); rb_erase(&cic->rb_node, &ioc->cic_root);
kmem_cache_free(cfq_ioc_pool, cic); kmem_cache_free(cfq_ioc_pool, cic);
elv_ioc_count_dec(ioc_count); elv_ioc_count_dec(ioc_count);
...@@ -1411,6 +1434,16 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) ...@@ -1411,6 +1434,16 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
struct cfq_io_context *cic; struct cfq_io_context *cic;
void *k, *key = cfqd; void *k, *key = cfqd;
if (unlikely(!ioc))
return NULL;
/*
* we maintain a last-hit cache, to avoid browsing over the tree
*/
cic = ioc->ioc_data;
if (cic && cic->key == cfqd)
return cic;
restart: restart:
n = ioc->cic_root.rb_node; n = ioc->cic_root.rb_node;
while (n) { while (n) {
...@@ -1426,8 +1459,10 @@ restart: ...@@ -1426,8 +1459,10 @@ restart:
n = n->rb_left; n = n->rb_left;
else if (key > k) else if (key > k)
n = n->rb_right; n = n->rb_right;
else else {
ioc->ioc_data = cic;
return cic; return cic;
}
} }
return NULL; return NULL;
...@@ -1524,7 +1559,8 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) ...@@ -1524,7 +1559,8 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
} }
static void static void
cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq) cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
struct request *rq)
{ {
sector_t sdist; sector_t sdist;
u64 total; u64 total;
...@@ -1534,6 +1570,11 @@ cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq) ...@@ -1534,6 +1570,11 @@ cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
else else
sdist = cic->last_request_pos - rq->sector; sdist = cic->last_request_pos - rq->sector;
if (!cic->seek_samples) {
cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
cfqd->new_seek_mean = cfqd->new_seek_total / 256;
}
/* /*
* Don't allow the seek distance to get too large from the * Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc * odd fragment, pagein, etc
...@@ -1558,7 +1599,12 @@ static void ...@@ -1558,7 +1599,12 @@ static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_io_context *cic) struct cfq_io_context *cic)
{ {
int enable_idle = cfq_cfqq_idle_window(cfqq); int enable_idle;
if (!cfq_cfqq_sync(cfqq))
return;
enable_idle = cfq_cfqq_idle_window(cfqq);
if (!cic->ioc->task || !cfqd->cfq_slice_idle || if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
(cfqd->hw_tag && CIC_SEEKY(cic))) (cfqd->hw_tag && CIC_SEEKY(cic)))
...@@ -1584,24 +1630,28 @@ static int ...@@ -1584,24 +1630,28 @@ static int
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
struct request *rq) struct request *rq)
{ {
struct cfq_queue *cfqq = cfqd->active_queue; struct cfq_queue *cfqq;
if (cfq_class_idle(new_cfqq)) cfqq = cfqd->active_queue;
if (!cfqq)
return 0; return 0;
if (!cfqq) if (cfq_slice_used(cfqq))
return 1;
if (cfq_class_idle(new_cfqq))
return 0; return 0;
if (cfq_class_idle(cfqq)) if (cfq_class_idle(cfqq))
return 1; return 1;
if (!cfq_cfqq_wait_request(new_cfqq))
return 0;
/* /*
* if the new request is sync, but the currently running queue is * if the new request is sync, but the currently running queue is
* not, let the sync request have priority. * not, let the sync request have priority.
*/ */
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1; return 1;
/* /*
* So both queues are sync. Let the new request get disk time if * So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO. * it's a metadata request and the current queue is doing regular IO.
...@@ -1609,6 +1659,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, ...@@ -1609,6 +1659,16 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
if (rq_is_meta(rq) && !cfqq->meta_pending) if (rq_is_meta(rq) && !cfqq->meta_pending)
return 1; return 1;
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return 0;
/*
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
if (cfq_rq_close(cfqd, rq))
return 1;
return 0; return 0;
} }
...@@ -1618,14 +1678,15 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, ...@@ -1618,14 +1678,15 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
*/ */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{ {
cfq_slice_expired(cfqd, 1, 1); cfq_slice_expired(cfqd, 1);
/* /*
* Put the new queue at the front of the of the current list, * Put the new queue at the front of the of the current list,
* so we know that it will be selected next. * so we know that it will be selected next.
*/ */
BUG_ON(!cfq_cfqq_on_rr(cfqq)); BUG_ON(!cfq_cfqq_on_rr(cfqq));
list_move(&cfqq->cfq_list, &cfqd->cur_rr);
cfq_service_tree_add(cfqd, cfqq, 1);
cfqq->slice_end = 0; cfqq->slice_end = 0;
cfq_mark_cfqq_slice_new(cfqq); cfq_mark_cfqq_slice_new(cfqq);
...@@ -1644,28 +1705,12 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1644,28 +1705,12 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (rq_is_meta(rq)) if (rq_is_meta(rq))
cfqq->meta_pending++; cfqq->meta_pending++;
/*
* we never wait for an async request and we don't allow preemption
* of an async request. so just return early
*/
if (!rq_is_sync(rq)) {
/*
* sync process issued an async request, if it's waiting
* then expire it and kick rq handling.
*/
if (cic == cfqd->active_cic &&
del_timer(&cfqd->idle_slice_timer)) {
cfq_slice_expired(cfqd, 0, 0);
blk_start_queueing(cfqd->queue);
}
return;
}
cfq_update_io_thinktime(cfqd, cic); cfq_update_io_thinktime(cfqd, cic);
cfq_update_io_seektime(cic, rq); cfq_update_io_seektime(cfqd, cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic); cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_request_pos = rq->sector + rq->nr_sectors; cic->last_request_pos = rq->sector + rq->nr_sectors;
cfqq->last_request_pos = cic->last_request_pos;
if (cfqq == cfqd->active_queue) { if (cfqq == cfqd->active_queue) {
/* /*
...@@ -1714,16 +1759,16 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) ...@@ -1714,16 +1759,16 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
now = jiffies; now = jiffies;
WARN_ON(!cfqd->rq_in_driver); WARN_ON(!cfqd->rq_in_driver);
WARN_ON(!cfqq->on_dispatch[sync]); WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--; cfqd->rq_in_driver--;
cfqq->on_dispatch[sync]--; cfqq->dispatched--;
cfqq->service_last = now;
if (cfq_cfqq_sync(cfqq))
cfqd->sync_flight--;
if (!cfq_class_idle(cfqq)) if (!cfq_class_idle(cfqq))
cfqd->last_end_request = now; cfqd->last_end_request = now;
cfq_resort_rr_list(cfqq, 0);
if (sync) if (sync)
RQ_CIC(rq)->last_end_request = now; RQ_CIC(rq)->last_end_request = now;
...@@ -1737,12 +1782,13 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) ...@@ -1737,12 +1782,13 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
cfq_clear_cfqq_slice_new(cfqq); cfq_clear_cfqq_slice_new(cfqq);
} }
if (cfq_slice_used(cfqq)) if (cfq_slice_used(cfqq))
cfq_slice_expired(cfqd, 0, 1); cfq_slice_expired(cfqd, 1);
else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) { else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
if (!cfq_arm_slice_timer(cfqd)) cfq_arm_slice_timer(cfqd);
cfq_schedule_dispatch(cfqd);
}
} }
if (!cfqd->rq_in_driver)
cfq_schedule_dispatch(cfqd);
} }
/* /*
...@@ -1751,9 +1797,6 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq) ...@@ -1751,9 +1797,6 @@ static void cfq_completed_request(request_queue_t *q, struct request *rq)
*/ */
static void cfq_prio_boost(struct cfq_queue *cfqq) static void cfq_prio_boost(struct cfq_queue *cfqq)
{ {
const int ioprio_class = cfqq->ioprio_class;
const int ioprio = cfqq->ioprio;
if (has_fs_excl()) { if (has_fs_excl()) {
/* /*
* boost idle prio on transactions that would lock out other * boost idle prio on transactions that would lock out other
...@@ -1772,12 +1815,6 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) ...@@ -1772,12 +1815,6 @@ static void cfq_prio_boost(struct cfq_queue *cfqq)
if (cfqq->ioprio != cfqq->org_ioprio) if (cfqq->ioprio != cfqq->org_ioprio)
cfqq->ioprio = cfqq->org_ioprio; cfqq->ioprio = cfqq->org_ioprio;
} }
/*
* refile between round-robin lists if we moved the priority class
*/
if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio))
cfq_resort_rr_list(cfqq, 0);
} }
static inline int __cfq_may_queue(struct cfq_queue *cfqq) static inline int __cfq_may_queue(struct cfq_queue *cfqq)
...@@ -1795,10 +1832,8 @@ static int cfq_may_queue(request_queue_t *q, int rw) ...@@ -1795,10 +1832,8 @@ static int cfq_may_queue(request_queue_t *q, int rw)
{ {
struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct cfq_io_context *cic;
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned int key;
key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
/* /*
* don't force setup of a queue from here, as a call to may_queue * don't force setup of a queue from here, as a call to may_queue
...@@ -1806,7 +1841,11 @@ static int cfq_may_queue(request_queue_t *q, int rw) ...@@ -1806,7 +1841,11 @@ static int cfq_may_queue(request_queue_t *q, int rw)
* so just lookup a possibly existing queue, or return 'may queue' * so just lookup a possibly existing queue, or return 'may queue'
* if that fails * if that fails
*/ */
cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
if (!cic)
return ELV_MQUEUE_MAY;
cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
if (cfqq) { if (cfqq) {
cfq_init_prio_data(cfqq); cfq_init_prio_data(cfqq);
cfq_prio_boost(cfqq); cfq_prio_boost(cfqq);
...@@ -1850,7 +1889,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) ...@@ -1850,7 +1889,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
struct cfq_io_context *cic; struct cfq_io_context *cic;
const int rw = rq_data_dir(rq); const int rw = rq_data_dir(rq);
const int is_sync = rq_is_sync(rq); const int is_sync = rq_is_sync(rq);
pid_t key = cfq_queue_pid(tsk, rw, is_sync);
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned long flags; unsigned long flags;
...@@ -1863,14 +1901,15 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) ...@@ -1863,14 +1901,15 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
if (!cic) if (!cic)
goto queue_fail; goto queue_fail;
if (!cic->cfqq[is_sync]) { cfqq = cic_to_cfqq(cic, is_sync);
cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask); if (!cfqq) {
cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
if (!cfqq) if (!cfqq)
goto queue_fail; goto queue_fail;
cic->cfqq[is_sync] = cfqq; cic_set_cfqq(cic, cfqq, is_sync);
} else }
cfqq = cic->cfqq[is_sync];
cfqq->allocated[rw]++; cfqq->allocated[rw]++;
cfq_clear_cfqq_must_alloc(cfqq); cfq_clear_cfqq_must_alloc(cfqq);
...@@ -1940,7 +1979,7 @@ static void cfq_idle_slice_timer(unsigned long data) ...@@ -1940,7 +1979,7 @@ static void cfq_idle_slice_timer(unsigned long data)
} }
} }
expire: expire:
cfq_slice_expired(cfqd, 0, timed_out); cfq_slice_expired(cfqd, timed_out);
out_kick: out_kick:
cfq_schedule_dispatch(cfqd); cfq_schedule_dispatch(cfqd);
out_cont: out_cont:
...@@ -1986,7 +2025,7 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -1986,7 +2025,7 @@ static void cfq_exit_queue(elevator_t *e)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (cfqd->active_queue) if (cfqd->active_queue)
__cfq_slice_expired(cfqd, cfqd->active_queue, 0, 0); __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
while (!list_empty(&cfqd->cic_list)) { while (!list_empty(&cfqd->cic_list)) {
struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
...@@ -2000,14 +2039,12 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -2000,14 +2039,12 @@ static void cfq_exit_queue(elevator_t *e)
cfq_shutdown_timer_wq(cfqd); cfq_shutdown_timer_wq(cfqd);
kfree(cfqd->cfq_hash);
kfree(cfqd); kfree(cfqd);
} }
static void *cfq_init_queue(request_queue_t *q) static void *cfq_init_queue(request_queue_t *q)
{ {
struct cfq_data *cfqd; struct cfq_data *cfqd;
int i;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
if (!cfqd) if (!cfqd)
...@@ -2015,21 +2052,9 @@ static void *cfq_init_queue(request_queue_t *q) ...@@ -2015,21 +2052,9 @@ static void *cfq_init_queue(request_queue_t *q)
memset(cfqd, 0, sizeof(*cfqd)); memset(cfqd, 0, sizeof(*cfqd));
for (i = 0; i < CFQ_PRIO_LISTS; i++) cfqd->service_tree = CFQ_RB_ROOT;
INIT_LIST_HEAD(&cfqd->rr_list[i]);
INIT_LIST_HEAD(&cfqd->busy_rr);
INIT_LIST_HEAD(&cfqd->cur_rr);
INIT_LIST_HEAD(&cfqd->idle_rr);
INIT_LIST_HEAD(&cfqd->cic_list); INIT_LIST_HEAD(&cfqd->cic_list);
cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
if (!cfqd->cfq_hash)
goto out_free;
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
cfqd->queue = q; cfqd->queue = q;
init_timer(&cfqd->idle_slice_timer); init_timer(&cfqd->idle_slice_timer);
...@@ -2053,9 +2078,6 @@ static void *cfq_init_queue(request_queue_t *q) ...@@ -2053,9 +2078,6 @@ static void *cfq_init_queue(request_queue_t *q)
cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_slice_idle = cfq_slice_idle;
return cfqd; return cfqd;
out_free:
kfree(cfqd);
return NULL;
} }
static void cfq_slab_kill(void) static void cfq_slab_kill(void)
...@@ -2087,7 +2109,6 @@ fail: ...@@ -2087,7 +2109,6 @@ fail:
/* /*
* sysfs parts below --> * sysfs parts below -->
*/ */
static ssize_t static ssize_t
cfq_var_show(unsigned int var, char *page) cfq_var_show(unsigned int var, char *page)
{ {
......
...@@ -134,13 +134,13 @@ static struct elevator_type *elevator_get(const char *name) ...@@ -134,13 +134,13 @@ static struct elevator_type *elevator_get(const char *name)
{ {
struct elevator_type *e; struct elevator_type *e;
spin_lock_irq(&elv_list_lock); spin_lock(&elv_list_lock);
e = elevator_find(name); e = elevator_find(name);
if (e && !try_module_get(e->elevator_owner)) if (e && !try_module_get(e->elevator_owner))
e = NULL; e = NULL;
spin_unlock_irq(&elv_list_lock); spin_unlock(&elv_list_lock);
return e; return e;
} }
...@@ -965,10 +965,11 @@ void elv_unregister_queue(struct request_queue *q) ...@@ -965,10 +965,11 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e) int elv_register(struct elevator_type *e)
{ {
char *def = ""; char *def = "";
spin_lock_irq(&elv_list_lock);
spin_lock(&elv_list_lock);
BUG_ON(elevator_find(e->elevator_name)); BUG_ON(elevator_find(e->elevator_name));
list_add_tail(&e->list, &elv_list); list_add_tail(&e->list, &elv_list);
spin_unlock_irq(&elv_list_lock); spin_unlock(&elv_list_lock);
if (!strcmp(e->elevator_name, chosen_elevator) || if (!strcmp(e->elevator_name, chosen_elevator) ||
(!*chosen_elevator && (!*chosen_elevator &&
...@@ -998,9 +999,9 @@ void elv_unregister(struct elevator_type *e) ...@@ -998,9 +999,9 @@ void elv_unregister(struct elevator_type *e)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
spin_lock_irq(&elv_list_lock); spin_lock(&elv_list_lock);
list_del_init(&e->list); list_del_init(&e->list);
spin_unlock_irq(&elv_list_lock); spin_unlock(&elv_list_lock);
} }
EXPORT_SYMBOL_GPL(elv_unregister); EXPORT_SYMBOL_GPL(elv_unregister);
...@@ -1118,7 +1119,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) ...@@ -1118,7 +1119,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
struct list_head *entry; struct list_head *entry;
int len = 0; int len = 0;
spin_lock_irq(&elv_list_lock); spin_lock(&elv_list_lock);
list_for_each(entry, &elv_list) { list_for_each(entry, &elv_list) {
struct elevator_type *__e; struct elevator_type *__e;
...@@ -1128,7 +1129,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) ...@@ -1128,7 +1129,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
else else
len += sprintf(name+len, "%s ", __e->elevator_name); len += sprintf(name+len, "%s ", __e->elevator_name);
} }
spin_unlock_irq(&elv_list_lock); spin_unlock(&elv_list_lock);
len += sprintf(len+name, "\n"); len += sprintf(len+name, "\n");
return len; return len;
......
...@@ -3741,6 +3741,7 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) ...@@ -3741,6 +3741,7 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
ret->nr_batch_requests = 0; /* because this is 0 */ ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL; ret->aic = NULL;
ret->cic_root.rb_node = NULL; ret->cic_root.rb_node = NULL;
ret->ioc_data = NULL;
/* make sure set_task_ioprio() sees the settings above */ /* make sure set_task_ioprio() sees the settings above */
smp_wmb(); smp_wmb();
tsk->io_context = ret; tsk->io_context = ret;
......
...@@ -867,7 +867,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -867,7 +867,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad4; goto bad4;
} }
cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4); cc->bs = bioset_create(MIN_IOS, MIN_IOS);
if (!cc->bs) { if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset"; ti->error = "Cannot allocate crypt bioset";
goto bad_bs; goto bad_bs;
......
...@@ -60,7 +60,7 @@ static int resize_pool(unsigned int new_ios) ...@@ -60,7 +60,7 @@ static int resize_pool(unsigned int new_ios)
if (!_io_pool) if (!_io_pool)
return -ENOMEM; return -ENOMEM;
_bios = bioset_create(16, 16, 4); _bios = bioset_create(16, 16);
if (!_bios) { if (!_bios) {
mempool_destroy(_io_pool); mempool_destroy(_io_pool);
_io_pool = NULL; _io_pool = NULL;
......
...@@ -1012,7 +1012,7 @@ static struct mapped_device *alloc_dev(int minor) ...@@ -1012,7 +1012,7 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->tio_pool) if (!md->tio_pool)
goto bad3; goto bad3;
md->bs = bioset_create(16, 16, 4); md->bs = bioset_create(16, 16);
if (!md->bs) if (!md->bs)
goto bad_no_bioset; goto bad_no_bioset;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
#define SG_MEMPOOL_SIZE 32 #define SG_MEMPOOL_SIZE 2
struct scsi_host_sg_pool { struct scsi_host_sg_pool {
size_t size; size_t size;
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <scsi/sg.h> /* for struct sg_iovec */ #include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 256 #define BIO_POOL_SIZE 2
static struct kmem_cache *bio_slab __read_mostly; static struct kmem_cache *bio_slab __read_mostly;
...@@ -38,7 +38,7 @@ static struct kmem_cache *bio_slab __read_mostly; ...@@ -38,7 +38,7 @@ static struct kmem_cache *bio_slab __read_mostly;
* a small number of entries is fine, not going to be performance critical. * a small number of entries is fine, not going to be performance critical.
* basically we just need to survive * basically we just need to survive
*/ */
#define BIO_SPLIT_ENTRIES 8 #define BIO_SPLIT_ENTRIES 2
mempool_t *bio_split_pool __read_mostly; mempool_t *bio_split_pool __read_mostly;
struct biovec_slab { struct biovec_slab {
...@@ -1120,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) ...@@ -1120,7 +1120,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
* create memory pools for biovec's in a bio_set. * create memory pools for biovec's in a bio_set.
* use the global biovec slabs created for general use. * use the global biovec slabs created for general use.
*/ */
static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) static int biovec_create_pools(struct bio_set *bs, int pool_entries)
{ {
int i; int i;
...@@ -1128,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) ...@@ -1128,9 +1128,6 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale)
struct biovec_slab *bp = bvec_slabs + i; struct biovec_slab *bp = bvec_slabs + i;
mempool_t **bvp = bs->bvec_pools + i; mempool_t **bvp = bs->bvec_pools + i;
if (pool_entries > 1 && i >= scale)
pool_entries >>= 1;
*bvp = mempool_create_slab_pool(pool_entries, bp->slab); *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
if (!*bvp) if (!*bvp)
return -ENOMEM; return -ENOMEM;
...@@ -1161,7 +1158,7 @@ void bioset_free(struct bio_set *bs) ...@@ -1161,7 +1158,7 @@ void bioset_free(struct bio_set *bs)
kfree(bs); kfree(bs);
} }
struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
{ {
struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
...@@ -1172,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) ...@@ -1172,7 +1169,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale)
if (!bs->bio_pool) if (!bs->bio_pool)
goto bad; goto bad;
if (!biovec_create_pools(bs, bvec_pool_size, scale)) if (!biovec_create_pools(bs, bvec_pool_size))
return bs; return bs;
bad: bad:
...@@ -1196,38 +1193,12 @@ static void __init biovec_init_slabs(void) ...@@ -1196,38 +1193,12 @@ static void __init biovec_init_slabs(void)
static int __init init_bio(void) static int __init init_bio(void)
{ {
int megabytes, bvec_pool_entries;
int scale = BIOVEC_NR_POOLS;
bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0, bio_slab = kmem_cache_create("bio", sizeof(struct bio), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
biovec_init_slabs(); biovec_init_slabs();
megabytes = nr_free_pages() >> (20 - PAGE_SHIFT); fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
/*
* find out where to start scaling
*/
if (megabytes <= 16)
scale = 0;
else if (megabytes <= 32)
scale = 1;
else if (megabytes <= 64)
scale = 2;
else if (megabytes <= 96)
scale = 3;
else if (megabytes <= 128)
scale = 4;
/*
* Limit number of entries reserved -- mempools are only used when
* the system is completely unable to allocate memory, so we only
* need enough to make progress.
*/
bvec_pool_entries = 1 + scale;
fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
if (!fs_bio_set) if (!fs_bio_set)
panic("bio: can't allocate bios\n"); panic("bio: can't allocate bios\n");
......
...@@ -276,7 +276,7 @@ extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, ...@@ -276,7 +276,7 @@ extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
extern mempool_t *bio_split_pool; extern mempool_t *bio_split_pool;
extern void bio_pair_release(struct bio_pair *dbio); extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(int, int, int); extern struct bio_set *bioset_create(int, int);
extern void bioset_free(struct bio_set *); extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int); extern struct bio *bio_alloc(gfp_t, int);
......
...@@ -116,6 +116,7 @@ struct io_context { ...@@ -116,6 +116,7 @@ struct io_context {
struct as_io_context *aic; struct as_io_context *aic;
struct rb_root cic_root; struct rb_root cic_root;
void *ioc_data;
}; };
void put_io_context(struct io_context *ioc); void put_io_context(struct io_context *ioc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment