Commit 9e2585a8 authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] as-iosched: remove arq->is_sync member

We can track this in struct request.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
parent d4f2f462
...@@ -151,7 +151,6 @@ struct as_rq { ...@@ -151,7 +151,6 @@ struct as_rq {
struct io_context *io_context; /* The submitting task */ struct io_context *io_context; /* The submitting task */
unsigned int is_sync;
enum arq_state state; enum arq_state state;
}; };
...@@ -241,7 +240,7 @@ static void as_put_io_context(struct as_rq *arq) ...@@ -241,7 +240,7 @@ static void as_put_io_context(struct as_rq *arq)
aic = arq->io_context->aic; aic = arq->io_context->aic;
if (arq->is_sync == REQ_SYNC && aic) { if (rq_is_sync(arq->request) && aic) {
spin_lock(&aic->lock); spin_lock(&aic->lock);
set_bit(AS_TASK_IORUNNING, &aic->state); set_bit(AS_TASK_IORUNNING, &aic->state);
aic->last_end_request = jiffies; aic->last_end_request = jiffies;
...@@ -254,14 +253,13 @@ static void as_put_io_context(struct as_rq *arq) ...@@ -254,14 +253,13 @@ static void as_put_io_context(struct as_rq *arq)
/* /*
* rb tree support functions * rb tree support functions
*/ */
#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) #define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))])
static void as_add_arq_rb(struct as_data *ad, struct request *rq) static void as_add_arq_rb(struct as_data *ad, struct request *rq)
{ {
struct as_rq *arq = RQ_DATA(rq);
struct request *alias; struct request *alias;
while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) { while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
as_move_to_dispatch(ad, RQ_DATA(alias)); as_move_to_dispatch(ad, RQ_DATA(alias));
as_antic_stop(ad); as_antic_stop(ad);
} }
...@@ -269,7 +267,7 @@ static void as_add_arq_rb(struct as_data *ad, struct request *rq) ...@@ -269,7 +267,7 @@ static void as_add_arq_rb(struct as_data *ad, struct request *rq)
static inline void as_del_arq_rb(struct as_data *ad, struct request *rq) static inline void as_del_arq_rb(struct as_data *ad, struct request *rq)
{ {
elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq); elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
} }
/* /*
...@@ -300,13 +298,13 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2) ...@@ -300,13 +298,13 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
if (arq2 == NULL) if (arq2 == NULL)
return arq1; return arq1;
data_dir = arq1->is_sync; data_dir = rq_is_sync(arq1->request);
last = ad->last_sector[data_dir]; last = ad->last_sector[data_dir];
s1 = arq1->request->sector; s1 = arq1->request->sector;
s2 = arq2->request->sector; s2 = arq2->request->sector;
BUG_ON(data_dir != arq2->is_sync); BUG_ON(data_dir != rq_is_sync(arq2->request));
/* /*
* Strict one way elevator _except_ in the case where we allow * Strict one way elevator _except_ in the case where we allow
...@@ -377,7 +375,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq) ...@@ -377,7 +375,7 @@ static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq)
if (rbnext) if (rbnext)
next = RQ_DATA(rb_entry_rq(rbnext)); next = RQ_DATA(rb_entry_rq(rbnext));
else { else {
const int data_dir = arq->is_sync; const int data_dir = rq_is_sync(last);
rbnext = rb_first(&ad->sort_list[data_dir]); rbnext = rb_first(&ad->sort_list[data_dir]);
if (rbnext && rbnext != &last->rb_node) if (rbnext && rbnext != &last->rb_node)
...@@ -538,8 +536,7 @@ static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, ...@@ -538,8 +536,7 @@ static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
struct request *rq) struct request *rq)
{ {
struct as_rq *arq = RQ_DATA(rq); int data_dir = rq_is_sync(rq);
int data_dir = arq->is_sync;
unsigned long thinktime = 0; unsigned long thinktime = 0;
sector_t seek_dist; sector_t seek_dist;
...@@ -674,7 +671,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq) ...@@ -674,7 +671,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
return 1; return 1;
} }
if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, aic, arq)) { if (arq && rq_is_sync(arq->request) && as_close_req(ad, aic, arq)) {
/* /*
* Found a close request that is not one of ours. * Found a close request that is not one of ours.
* *
...@@ -758,7 +755,7 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq) ...@@ -758,7 +755,7 @@ static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
*/ */
static void as_update_arq(struct as_data *ad, struct as_rq *arq) static void as_update_arq(struct as_data *ad, struct as_rq *arq)
{ {
const int data_dir = arq->is_sync; const int data_dir = rq_is_sync(arq->request);
/* keep the next_arq cache up to date */ /* keep the next_arq cache up to date */
ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]); ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
...@@ -835,7 +832,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq) ...@@ -835,7 +832,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
* actually serviced. This should help devices with big TCQ windows * actually serviced. This should help devices with big TCQ windows
* and writeback caches * and writeback caches
*/ */
if (ad->new_batch && ad->batch_data_dir == arq->is_sync) { if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
update_write_batch(ad); update_write_batch(ad);
ad->current_batch_expires = jiffies + ad->current_batch_expires = jiffies +
ad->batch_expire[REQ_SYNC]; ad->batch_expire[REQ_SYNC];
...@@ -868,7 +865,7 @@ out: ...@@ -868,7 +865,7 @@ out:
static void as_remove_queued_request(request_queue_t *q, struct request *rq) static void as_remove_queued_request(request_queue_t *q, struct request *rq)
{ {
struct as_rq *arq = RQ_DATA(rq); struct as_rq *arq = RQ_DATA(rq);
const int data_dir = arq->is_sync; const int data_dir = rq_is_sync(rq);
struct as_data *ad = q->elevator->elevator_data; struct as_data *ad = q->elevator->elevator_data;
WARN_ON(arq->state != AS_RQ_QUEUED); WARN_ON(arq->state != AS_RQ_QUEUED);
...@@ -941,7 +938,7 @@ static inline int as_batch_expired(struct as_data *ad) ...@@ -941,7 +938,7 @@ static inline int as_batch_expired(struct as_data *ad)
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
{ {
struct request *rq = arq->request; struct request *rq = arq->request;
const int data_dir = arq->is_sync; const int data_dir = rq_is_sync(rq);
BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
...@@ -1158,12 +1155,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) ...@@ -1158,12 +1155,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
arq->state = AS_RQ_NEW; arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ data_dir = rq_is_sync(rq);
|| (arq->request->cmd_flags & REQ_RW_SYNC))
arq->is_sync = 1;
else
arq->is_sync = 0;
data_dir = arq->is_sync;
arq->io_context = as_get_io_context(); arq->io_context = as_get_io_context();
......
...@@ -531,6 +531,11 @@ enum { ...@@ -531,6 +531,11 @@ enum {
#define rq_data_dir(rq) ((rq)->cmd_flags & 1) #define rq_data_dir(rq) ((rq)->cmd_flags & 1)
/*
* We regard a request as sync, if it's a READ or a SYNC write.
*/
#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
static inline int blk_queue_full(struct request_queue *q, int rw) static inline int blk_queue_full(struct request_queue *q, int rw)
{ {
if (rw == READ) if (rw == READ)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment