Commit 8922e16c authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

[PATCH] 01/05 Implement generic dispatch queue

Implements generic dispatch queue which can replace all
dispatch queues implemented by each iosched.  This reduces
code duplication, eases enforcing semantics over dispatch
queue, and simplifies specific ioscheds.
Signed-off-by: default avatarTejun Heo <htejun@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 2824bc93
...@@ -40,6 +40,11 @@ ...@@ -40,6 +40,11 @@
static DEFINE_SPINLOCK(elv_list_lock); static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list); static LIST_HEAD(elv_list);
static inline sector_t rq_last_sector(struct request *rq)
{
return rq->sector + rq->nr_sectors;
}
/* /*
* can we safely merge with this request? * can we safely merge with this request?
*/ */
...@@ -143,6 +148,9 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, ...@@ -143,6 +148,9 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
INIT_LIST_HEAD(&q->queue_head); INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL; q->last_merge = NULL;
q->elevator = eq; q->elevator = eq;
q->last_sector = 0;
q->boundary_rq = NULL;
q->max_back_kb = 0;
if (eq->ops->elevator_init_fn) if (eq->ops->elevator_init_fn)
ret = eq->ops->elevator_init_fn(q, eq); ret = eq->ops->elevator_init_fn(q, eq);
...@@ -225,6 +233,48 @@ void elevator_exit(elevator_t *e) ...@@ -225,6 +233,48 @@ void elevator_exit(elevator_t *e)
kfree(e); kfree(e);
} }
/*
* Insert rq into dispatch queue of q. Queue lock must be held on
* entry. If sort != 0, rq is sort-inserted; otherwise, rq will be
* appended to the dispatch queue. To be used by specific elevators.
*/
void elv_dispatch_insert(request_queue_t *q, struct request *rq, int sort)
{
sector_t boundary;
unsigned max_back;
struct list_head *entry;
if (!sort) {
/* Specific elevator is performing sort. Step away. */
q->last_sector = rq_last_sector(rq);
q->boundary_rq = rq;
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
boundary = q->last_sector;
max_back = q->max_back_kb * 2;
boundary = boundary > max_back ? boundary - max_back : 0;
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
break;
if (rq->sector >= boundary) {
if (pos->sector < boundary)
continue;
} else {
if (pos->sector >= boundary)
break;
}
if (rq->sector >= pos->sector)
break;
}
list_add(&rq->queuelist, entry);
}
int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
{ {
elevator_t *e = q->elevator; elevator_t *e = q->elevator;
...@@ -255,13 +305,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, ...@@ -255,13 +305,7 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->ops->elevator_merge_req_fn(q, rq, next); e->ops->elevator_merge_req_fn(q, rq, next);
} }
/* void elv_requeue_request(request_queue_t *q, struct request *rq)
* For careful internal use by the block layer. Essentially the same as
* a requeue in that it tells the io scheduler that this request is not
* active in the driver or hardware anymore, but we don't want the request
* added back to the scheduler. Function is not exported.
*/
void elv_deactivate_request(request_queue_t *q, struct request *rq)
{ {
elevator_t *e = q->elevator; elevator_t *e = q->elevator;
...@@ -269,18 +313,13 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq) ...@@ -269,18 +313,13 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq)
* it already went through dequeue, we need to decrement the * it already went through dequeue, we need to decrement the
* in_flight count again * in_flight count again
*/ */
if (blk_account_rq(rq)) if (blk_account_rq(rq)) {
q->in_flight--; q->in_flight--;
if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn)
rq->flags &= ~REQ_STARTED;
if (e->ops->elevator_deactivate_req_fn)
e->ops->elevator_deactivate_req_fn(q, rq); e->ops->elevator_deactivate_req_fn(q, rq);
} }
void elv_requeue_request(request_queue_t *q, struct request *rq) rq->flags &= ~REQ_STARTED;
{
elv_deactivate_request(q, rq);
/* /*
* if this is the flush, requeue the original instead and drop the flush * if this is the flush, requeue the original instead and drop the flush
...@@ -290,40 +329,81 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) ...@@ -290,40 +329,81 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
rq = rq->end_io_data; rq = rq->end_io_data;
} }
/*
* the request is prepped and may have some resources allocated.
* allowing unprepped requests to pass this one may cause resource
* deadlock. turn on softbarrier.
*/
rq->flags |= REQ_SOFTBARRIER;
/*
* if iosched has an explicit requeue hook, then use that. otherwise
* just put the request at the front of the queue
*/
if (q->elevator->ops->elevator_requeue_req_fn)
q->elevator->ops->elevator_requeue_req_fn(q, rq);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
} }
void __elv_add_request(request_queue_t *q, struct request *rq, int where, void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug) int plug)
{ {
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
/* /*
* barriers implicitly indicate back insertion * barriers implicitly indicate back insertion
*/ */
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) && if (where == ELEVATOR_INSERT_SORT)
where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK; where = ELEVATOR_INSERT_BACK;
/*
* this request is scheduling boundary, update last_sector
*/
if (blk_fs_request(rq)) {
q->last_sector = rq_last_sector(rq);
q->boundary_rq = rq;
}
}
if (plug) if (plug)
blk_plug_device(q); blk_plug_device(q);
rq->q = q; rq->q = q;
if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) {
q->elevator->ops->elevator_add_req_fn(q, rq, where); /*
* if drain is set, store the request "locally". when the drain
* is finished, the requests will be handed ordered to the io
* scheduler
*/
list_add_tail(&rq->queuelist, &q->drain_list);
return;
}
switch (where) {
case ELEVATOR_INSERT_FRONT:
rq->flags |= REQ_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_BACK:
rq->flags |= REQ_SOFTBARRIER;
while (q->elevator->ops->elevator_dispatch_fn(q, 1))
;
list_add_tail(&rq->queuelist, &q->queue_head);
/*
* We kick the queue here for the following reasons.
* - The elevator might have returned NULL previously
* to delay requests and returned them now. As the
* queue wasn't empty before this request, ll_rw_blk
* won't run the queue on return, resulting in hang.
* - Usually, back inserted requests won't be merged
* with anything. There's no point in delaying queue
* processing.
*/
blk_remove_plug(q);
q->request_fn(q);
break;
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
rq->flags |= REQ_SORTED;
q->elevator->ops->elevator_add_req_fn(q, rq);
break;
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
__FUNCTION__, where);
BUG();
}
if (blk_queue_plugged(q)) { if (blk_queue_plugged(q)) {
int nrq = q->rq.count[READ] + q->rq.count[WRITE] int nrq = q->rq.count[READ] + q->rq.count[WRITE]
...@@ -332,13 +412,6 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, ...@@ -332,13 +412,6 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
if (nrq >= q->unplug_thresh) if (nrq >= q->unplug_thresh)
__generic_unplug_device(q); __generic_unplug_device(q);
} }
} else
/*
* if drain is set, store the request "locally". when the drain
* is finished, the requests will be handed ordered to the io
* scheduler
*/
list_add_tail(&rq->queuelist, &q->drain_list);
} }
void elv_add_request(request_queue_t *q, struct request *rq, int where, void elv_add_request(request_queue_t *q, struct request *rq, int where,
...@@ -353,13 +426,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where, ...@@ -353,13 +426,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where,
static inline struct request *__elv_next_request(request_queue_t *q) static inline struct request *__elv_next_request(request_queue_t *q)
{ {
struct request *rq = q->elevator->ops->elevator_next_req_fn(q); struct request *rq;
if (unlikely(list_empty(&q->queue_head) &&
!q->elevator->ops->elevator_dispatch_fn(q, 0)))
return NULL;
rq = list_entry_rq(q->queue_head.next);
/* /*
* if this is a barrier write and the device has to issue a * if this is a barrier write and the device has to issue a
* flush sequence to support it, check how far we are * flush sequence to support it, check how far we are
*/ */
if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) { if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
BUG_ON(q->ordered == QUEUE_ORDERED_NONE); BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
if (q->ordered == QUEUE_ORDERED_FLUSH && if (q->ordered == QUEUE_ORDERED_FLUSH &&
...@@ -376,16 +455,34 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -376,16 +455,34 @@ struct request *elv_next_request(request_queue_t *q)
int ret; int ret;
while ((rq = __elv_next_request(q)) != NULL) { while ((rq = __elv_next_request(q)) != NULL) {
if (!(rq->flags & REQ_STARTED)) {
elevator_t *e = q->elevator;
/*
* This is the first time the device driver
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
if (blk_sorted_rq(rq) &&
e->ops->elevator_activate_req_fn)
e->ops->elevator_activate_req_fn(q, rq);
/* /*
* just mark as started even if we don't start it, a request * just mark as started even if we don't start
* that has been delayed should not be passed by new incoming * it, a request that has been delayed should
* requests * not be passed by new incoming requests
*/ */
rq->flags |= REQ_STARTED; rq->flags |= REQ_STARTED;
}
if (rq == q->last_merge) if (rq == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
if (!q->boundary_rq || q->boundary_rq == rq) {
q->last_sector = rq_last_sector(rq);
q->boundary_rq = NULL;
}
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break; break;
...@@ -396,9 +493,9 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -396,9 +493,9 @@ struct request *elv_next_request(request_queue_t *q)
/* /*
* the request may have been (partially) prepped. * the request may have been (partially) prepped.
* we need to keep this request in the front to * we need to keep this request in the front to
* avoid resource deadlock. turn on softbarrier. * avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one.
*/ */
rq->flags |= REQ_SOFTBARRIER;
rq = NULL; rq = NULL;
break; break;
} else if (ret == BLKPREP_KILL) { } else if (ret == BLKPREP_KILL) {
...@@ -421,16 +518,16 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -421,16 +518,16 @@ struct request *elv_next_request(request_queue_t *q)
return rq; return rq;
} }
void elv_remove_request(request_queue_t *q, struct request *rq) void elv_dequeue_request(request_queue_t *q, struct request *rq)
{ {
elevator_t *e = q->elevator; BUG_ON(list_empty(&rq->queuelist));
list_del_init(&rq->queuelist);
/* /*
* the time frame between a request being removed from the lists * the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at * and to it is freed is accounted as io that is in progress at
* the driver side. note that we only account requests that the * the driver side.
* driver has seen (REQ_STARTED set), to avoid false accounting
* for request-request merges
*/ */
if (blk_account_rq(rq)) if (blk_account_rq(rq))
q->in_flight++; q->in_flight++;
...@@ -444,19 +541,19 @@ void elv_remove_request(request_queue_t *q, struct request *rq) ...@@ -444,19 +541,19 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
*/ */
if (rq == q->last_merge) if (rq == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
if (e->ops->elevator_remove_req_fn)
e->ops->elevator_remove_req_fn(q, rq);
} }
int elv_queue_empty(request_queue_t *q) int elv_queue_empty(request_queue_t *q)
{ {
elevator_t *e = q->elevator; elevator_t *e = q->elevator;
if (!list_empty(&q->queue_head))
return 0;
if (e->ops->elevator_queue_empty_fn) if (e->ops->elevator_queue_empty_fn)
return e->ops->elevator_queue_empty_fn(q); return e->ops->elevator_queue_empty_fn(q);
return list_empty(&q->queue_head); return 1;
} }
struct request *elv_latter_request(request_queue_t *q, struct request *rq) struct request *elv_latter_request(request_queue_t *q, struct request *rq)
...@@ -528,11 +625,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq) ...@@ -528,11 +625,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
/* /*
* request is released from the driver, io must be done * request is released from the driver, io must be done
*/ */
if (blk_account_rq(rq)) if (blk_account_rq(rq)) {
q->in_flight--; q->in_flight--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
if (e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq); e->ops->elevator_completed_req_fn(q, rq);
}
} }
int elv_register_queue(struct request_queue *q) int elv_register_queue(struct request_queue *q)
...@@ -705,11 +802,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) ...@@ -705,11 +802,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
return len; return len;
} }
EXPORT_SYMBOL(elv_dispatch_insert);
EXPORT_SYMBOL(elv_add_request); EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_requeue_request); EXPORT_SYMBOL(elv_requeue_request);
EXPORT_SYMBOL(elv_next_request); EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request); EXPORT_SYMBOL(elv_dequeue_request);
EXPORT_SYMBOL(elv_queue_empty); EXPORT_SYMBOL(elv_queue_empty);
EXPORT_SYMBOL(elv_completed_request); EXPORT_SYMBOL(elv_completed_request);
EXPORT_SYMBOL(elevator_exit); EXPORT_SYMBOL(elevator_exit);
......
...@@ -353,6 +353,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq) ...@@ -353,6 +353,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq)
struct request *rq = flush_rq->end_io_data; struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q; request_queue_t *q = rq->q;
elv_completed_request(q, flush_rq);
rq->flags |= REQ_BAR_PREFLUSH; rq->flags |= REQ_BAR_PREFLUSH;
if (!flush_rq->errors) if (!flush_rq->errors)
...@@ -369,6 +371,8 @@ static void blk_post_flush_end_io(struct request *flush_rq) ...@@ -369,6 +371,8 @@ static void blk_post_flush_end_io(struct request *flush_rq)
struct request *rq = flush_rq->end_io_data; struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q; request_queue_t *q = rq->q;
elv_completed_request(q, flush_rq);
rq->flags |= REQ_BAR_POSTFLUSH; rq->flags |= REQ_BAR_POSTFLUSH;
q->end_flush_fn(q, flush_rq); q->end_flush_fn(q, flush_rq);
...@@ -408,8 +412,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) ...@@ -408,8 +412,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
if (!list_empty(&rq->queuelist)) if (!list_empty(&rq->queuelist))
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
elv_deactivate_request(q, rq);
flush_rq->end_io_data = rq; flush_rq->end_io_data = rq;
flush_rq->end_io = blk_pre_flush_end_io; flush_rq->end_io = blk_pre_flush_end_io;
...@@ -1040,6 +1042,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); ...@@ -1040,6 +1042,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
static char *rq_flags[] = { static char *rq_flags[] = {
"REQ_RW", "REQ_RW",
"REQ_FAILFAST", "REQ_FAILFAST",
"REQ_SORTED",
"REQ_SOFTBARRIER", "REQ_SOFTBARRIER",
"REQ_HARDBARRIER", "REQ_HARDBARRIER",
"REQ_CMD", "REQ_CMD",
...@@ -2456,6 +2459,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) ...@@ -2456,6 +2459,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
if (unlikely(--req->ref_count)) if (unlikely(--req->ref_count))
return; return;
elv_completed_request(q, req);
req->rq_status = RQ_INACTIVE; req->rq_status = RQ_INACTIVE;
req->rl = NULL; req->rl = NULL;
...@@ -2466,8 +2471,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req) ...@@ -2466,8 +2471,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
if (rl) { if (rl) {
int rw = rq_data_dir(req); int rw = rq_data_dir(req);
elv_completed_request(q, req);
BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!list_empty(&req->queuelist));
blk_free_request(q, req); blk_free_request(q, req);
...@@ -2477,14 +2480,14 @@ static void __blk_put_request(request_queue_t *q, struct request *req) ...@@ -2477,14 +2480,14 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
void blk_put_request(struct request *req) void blk_put_request(struct request *req)
{ {
/*
* if req->rl isn't set, this request didnt originate from the
* block layer, so it's safe to just disregard it
*/
if (req->rl) {
unsigned long flags; unsigned long flags;
request_queue_t *q = req->q; request_queue_t *q = req->q;
/*
* Gee, IDE calls in w/ NULL q. Fix IDE and remove the
* following if (q) test.
*/
if (q) {
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
__blk_put_request(q, req); __blk_put_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
......
...@@ -203,6 +203,7 @@ struct request { ...@@ -203,6 +203,7 @@ struct request {
enum rq_flag_bits { enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */ __REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST, /* no low level driver retries */ __REQ_FAILFAST, /* no low level driver retries */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */ __REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_CMD, /* is a regular fs rw request */ __REQ_CMD, /* is a regular fs rw request */
...@@ -235,6 +236,7 @@ enum rq_flag_bits { ...@@ -235,6 +236,7 @@ enum rq_flag_bits {
#define REQ_RW (1 << __REQ_RW) #define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST (1 << __REQ_FAILFAST) #define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_CMD (1 << __REQ_CMD) #define REQ_CMD (1 << __REQ_CMD)
...@@ -332,6 +334,13 @@ struct request_queue ...@@ -332,6 +334,13 @@ struct request_queue
prepare_flush_fn *prepare_flush_fn; prepare_flush_fn *prepare_flush_fn;
end_flush_fn *end_flush_fn; end_flush_fn *end_flush_fn;
/*
* Dispatch queue sorting
*/
sector_t last_sector;
struct request *boundary_rq;
unsigned int max_back_kb;
/* /*
* Auto-unplugging state * Auto-unplugging state
*/ */
...@@ -454,6 +463,7 @@ enum { ...@@ -454,6 +463,7 @@ enum {
#define blk_pm_request(rq) \ #define blk_pm_request(rq) \
((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME))
#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
...@@ -611,12 +621,7 @@ extern void end_request(struct request *req, int uptodate); ...@@ -611,12 +621,7 @@ extern void end_request(struct request *req, int uptodate);
static inline void blkdev_dequeue_request(struct request *req) static inline void blkdev_dequeue_request(struct request *req)
{ {
BUG_ON(list_empty(&req->queuelist)); elv_dequeue_request(req->q, req);
list_del_init(&req->queuelist);
if (req->rl)
elv_remove_request(req->q, req);
} }
/* /*
......
...@@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc ...@@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc
typedef void (elevator_merged_fn) (request_queue_t *, struct request *); typedef void (elevator_merged_fn) (request_queue_t *, struct request *);
typedef struct request *(elevator_next_req_fn) (request_queue_t *); typedef int (elevator_dispatch_fn) (request_queue_t *, int);
typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int); typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_queue_empty_fn) (request_queue_t *); typedef int (elevator_queue_empty_fn) (request_queue_t *);
typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *);
typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *);
typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int);
typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
...@@ -31,10 +30,9 @@ struct elevator_ops ...@@ -31,10 +30,9 @@ struct elevator_ops
elevator_merged_fn *elevator_merged_fn; elevator_merged_fn *elevator_merged_fn;
elevator_merge_req_fn *elevator_merge_req_fn; elevator_merge_req_fn *elevator_merge_req_fn;
elevator_next_req_fn *elevator_next_req_fn; elevator_dispatch_fn *elevator_dispatch_fn;
elevator_add_req_fn *elevator_add_req_fn; elevator_add_req_fn *elevator_add_req_fn;
elevator_remove_req_fn *elevator_remove_req_fn; elevator_activate_req_fn *elevator_activate_req_fn;
elevator_requeue_req_fn *elevator_requeue_req_fn;
elevator_deactivate_req_fn *elevator_deactivate_req_fn; elevator_deactivate_req_fn *elevator_deactivate_req_fn;
elevator_queue_empty_fn *elevator_queue_empty_fn; elevator_queue_empty_fn *elevator_queue_empty_fn;
...@@ -81,15 +79,15 @@ struct elevator_queue ...@@ -81,15 +79,15 @@ struct elevator_queue
/* /*
* block elevator interface * block elevator interface
*/ */
extern void elv_dispatch_insert(request_queue_t *, struct request *, int);
extern void elv_add_request(request_queue_t *, struct request *, int, int); extern void elv_add_request(request_queue_t *, struct request *, int, int);
extern void __elv_add_request(request_queue_t *, struct request *, int, int); extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct request **, struct bio *); extern int elv_merge(request_queue_t *, struct request **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *, extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *); struct request *);
extern void elv_merged_request(request_queue_t *, struct request *); extern void elv_merged_request(request_queue_t *, struct request *);
extern void elv_remove_request(request_queue_t *, struct request *); extern void elv_dequeue_request(request_queue_t *, struct request *);
extern void elv_requeue_request(request_queue_t *, struct request *); extern void elv_requeue_request(request_queue_t *, struct request *);
extern void elv_deactivate_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *); extern int elv_queue_empty(request_queue_t *);
extern struct request *elv_next_request(struct request_queue *q); extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(request_queue_t *, struct request *); extern struct request *elv_former_request(request_queue_t *, struct request *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment