Commit b5deef90 authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] Make sure all block/io scheduler setups are node aware

Some were kmalloc_node(), some were still kmalloc(). Change them all to
kmalloc_node().
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent a3b05e8f
......@@ -210,9 +210,9 @@ static struct as_io_context *alloc_as_io_context(void)
* If the current task has no AS IO context then create one and initialise it.
* Then take a ref on the task's io context and return it.
*/
static struct io_context *as_get_io_context(void)
static struct io_context *as_get_io_context(int node)
{
struct io_context *ioc = get_io_context(GFP_ATOMIC);
struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
if (ioc && !ioc->aic) {
ioc->aic = alloc_as_io_context();
if (!ioc->aic) {
......@@ -1148,7 +1148,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
data_dir = rq_is_sync(rq);
rq->elevator_private = as_get_io_context();
rq->elevator_private = as_get_io_context(q->node);
if (RQ_IOC(rq)) {
as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
......@@ -1292,7 +1292,7 @@ static int as_may_queue(request_queue_t *q, int rw)
struct io_context *ioc;
if (ad->antic_status == ANTIC_WAIT_REQ ||
ad->antic_status == ANTIC_WAIT_NEXT) {
ioc = as_get_io_context();
ioc = as_get_io_context(q->node);
if (ad->io_context == ioc)
ret = ELV_MQUEUE_MUST;
put_io_context(ioc);
......
......@@ -1148,8 +1148,9 @@ static void cfq_exit_io_context(struct io_context *ioc)
static struct cfq_io_context *
cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
struct cfq_io_context *cic;
cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
if (cic) {
memset(cic, 0, sizeof(*cic));
cic->last_end_request = jiffies;
......@@ -1277,11 +1278,11 @@ retry:
* free memory.
*/
spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask|__GFP_NOFAIL);
new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
spin_lock_irq(cfqd->queue->queue_lock);
goto retry;
} else {
cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
if (!cfqq)
goto out;
}
......@@ -1407,7 +1408,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
might_sleep_if(gfp_mask & __GFP_WAIT);
ioc = get_io_context(gfp_mask);
ioc = get_io_context(gfp_mask, cfqd->queue->node);
if (!ioc)
return NULL;
......@@ -1955,7 +1956,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
if (!cfqd)
return NULL;
......@@ -1970,7 +1971,7 @@ static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
INIT_LIST_HEAD(&cfqd->empty_list);
INIT_LIST_HEAD(&cfqd->cic_list);
cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
if (!cfqd->cfq_hash)
goto out_free;
......
......@@ -161,12 +161,12 @@ __setup("elevator=", elevator_setup);
static struct kobj_type elv_ktype;
static elevator_t *elevator_alloc(struct elevator_type *e)
static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
{
elevator_t *eq;
int i;
eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node);
if (unlikely(!eq))
goto err;
......@@ -178,7 +178,8 @@ static elevator_t *elevator_alloc(struct elevator_type *e)
eq->kobj.ktype = &elv_ktype;
mutex_init(&eq->sysfs_lock);
eq->hash = kmalloc(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, GFP_KERNEL);
eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!eq->hash)
goto err;
......@@ -224,7 +225,7 @@ int elevator_init(request_queue_t *q, char *name)
e = elevator_get("noop");
}
eq = elevator_alloc(e);
eq = elevator_alloc(q, e);
if (!eq)
return -ENOMEM;
......@@ -987,7 +988,7 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
/*
* Allocate new elevator
*/
e = elevator_alloc(new_e);
e = elevator_alloc(q, new_e);
if (!e)
return 0;
......
......@@ -39,6 +39,7 @@ static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(request_queue_t *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
* For the allocated request tables
......@@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[rw]+1 >= q->nr_requests) {
ioc = current_io_context(GFP_ATOMIC);
ioc = current_io_context(GFP_ATOMIC, q->node);
/*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
......@@ -2234,7 +2235,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
* up to a big batch of them for a small period time.
* See ioc_batching, ioc_set_batching
*/
ioc = current_io_context(GFP_NOIO);
ioc = current_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, ioc);
spin_lock_irq(q->queue_lock);
......@@ -3641,7 +3642,7 @@ void exit_io_context(void)
* but since the current task itself holds a reference, the context can be
* used in general code, so long as it stays within `current` context.
*/
struct io_context *current_io_context(gfp_t gfp_flags)
static struct io_context *current_io_context(gfp_t gfp_flags, int node)
{
struct task_struct *tsk = current;
struct io_context *ret;
......@@ -3650,7 +3651,7 @@ struct io_context *current_io_context(gfp_t gfp_flags)
if (likely(ret))
return ret;
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
if (ret) {
atomic_set(&ret->refcount, 1);
ret->task = current;
......@@ -3674,10 +3675,10 @@ EXPORT_SYMBOL(current_io_context);
*
* This is always called in the context of the task which submitted the I/O.
*/
struct io_context *get_io_context(gfp_t gfp_flags)
struct io_context *get_io_context(gfp_t gfp_flags, int node)
{
struct io_context *ret;
ret = current_io_context(gfp_flags);
ret = current_io_context(gfp_flags, node);
if (likely(ret))
atomic_inc(&ret->refcount);
return ret;
......
......@@ -69,7 +69,7 @@ static void *noop_init_queue(request_queue_t *q, elevator_t *e)
{
struct noop_data *nd;
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
if (!nd)
return NULL;
INIT_LIST_HEAD(&nd->queue);
......
......@@ -104,8 +104,7 @@ struct io_context {
void put_io_context(struct io_context *ioc);
void exit_io_context(void);
struct io_context *current_io_context(gfp_t gfp_flags);
struct io_context *get_io_context(gfp_t gfp_flags);
struct io_context *get_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment