Commit e4313dd4 authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] as-iosched: use new io context counting mechanism

It's ok if the read path is a lot more costly, as long as inc/dec is
really cheap. The inc/dec will happen for each created/freed io context,
while the reading only happens when a disk queue exits.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 4a893e83
...@@ -149,7 +149,7 @@ enum arq_state { ...@@ -149,7 +149,7 @@ enum arq_state {
#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
static atomic_t ioc_count = ATOMIC_INIT(0); static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone; static struct completion *ioc_gone;
static void as_move_to_dispatch(struct as_data *ad, struct request *rq); static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
...@@ -163,7 +163,8 @@ static void as_antic_stop(struct as_data *ad); ...@@ -163,7 +163,8 @@ static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic) static void free_as_io_context(struct as_io_context *aic)
{ {
kfree(aic); kfree(aic);
if (atomic_dec_and_test(&ioc_count) && ioc_gone) elv_ioc_count_dec(ioc_count);
if (ioc_gone && !elv_ioc_count_read(ioc_count))
complete(ioc_gone); complete(ioc_gone);
} }
...@@ -199,7 +200,7 @@ static struct as_io_context *alloc_as_io_context(void) ...@@ -199,7 +200,7 @@ static struct as_io_context *alloc_as_io_context(void)
ret->seek_total = 0; ret->seek_total = 0;
ret->seek_samples = 0; ret->seek_samples = 0;
ret->seek_mean = 0; ret->seek_mean = 0;
atomic_inc(&ioc_count); elv_ioc_count_inc(ioc_count);
} }
return ret; return ret;
...@@ -1484,7 +1485,7 @@ static void __exit as_exit(void) ...@@ -1484,7 +1485,7 @@ static void __exit as_exit(void)
ioc_gone = &all_gone; ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */ /* ioc_gone's update must be visible before reading ioc_count */
smp_wmb(); smp_wmb();
if (atomic_read(&ioc_count)) if (elv_ioc_count_read(ioc_count))
wait_for_completion(ioc_gone); wait_for_completion(ioc_gone);
synchronize_rcu(); synchronize_rcu();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment