Commit a90779bf authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'block-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/block

* 'block-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/block:
  [PATCH] fix rmmod problems with elevator attributes, clean them up
  [PATCH] elevator_t lifetime rules and sysfs fixes
  [PATCH] noise removal: cfq-iosched.c
  [PATCH] don't bother with refcounting for cfq_data
  [PATCH] fix sysfs interaction and lifetime rules handling for queues
  [PATCH] regularize blk_cleanup_queue() use
  [PATCH] fix cfq_get_queue()/ioprio_set(2) races
  [PATCH] deal with rmmod/put_io_context() races
  [PATCH] stop elv_unregister() from rogering other iosched's data, fix locking
  [PATCH] stop cfq from pinning queue down
  [PATCH] make cfq_exit_queue() prune the cfq_io_context for that queue
  [PATCH] fix the exclusion for ioprio_set()
  [PATCH] keep sync and async cfq_queue separate
  [PATCH] switch to use of ->key to get cfq_data by cfq_io_context
  [PATCH] stop leaking cfq_data in cfq_set_request()
  [PATCH] fix cfq hash lookups
  [PATCH] fix locking in queue_requests_store()
  [PATCH] fix double-free in blk_init_queue_node()
  [PATCH] don't do exit_io_context() until we know we won't be doing any IO
parents 7705a879 e572ec7e
......@@ -182,6 +182,9 @@ struct as_rq {
static kmem_cache_t *arq_pool;
static atomic_t ioc_count = ATOMIC_INIT(0);
static struct completion *ioc_gone;
static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
static void as_antic_stop(struct as_data *ad);
......@@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic)
{
kfree(aic);
if (atomic_dec_and_test(&ioc_count) && ioc_gone)
complete(ioc_gone);
}
static void as_trim(struct io_context *ioc)
{
if (ioc->aic)
free_as_io_context(ioc->aic);
ioc->aic = NULL;
}
/* Called when the task exits */
......@@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void)
ret->seek_total = 0;
ret->seek_samples = 0;
ret->seek_mean = 0;
atomic_inc(&ioc_count);
}
return ret;
......@@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
/*
* sysfs parts below
*/
struct as_fs_entry {
struct attribute attr;
ssize_t (*show)(struct as_data *, char *);
ssize_t (*store)(struct as_data *, const char *, size_t);
};
static ssize_t
as_var_show(unsigned int var, char *page)
......@@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count)
return count;
}
static ssize_t as_est_show(struct as_data *ad, char *page)
static ssize_t est_time_show(elevator_t *e, char *page)
{
struct as_data *ad = e->elevator_data;
int pos = 0;
pos += sprintf(page+pos, "%lu %% exit probability\n",
......@@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page)
}
#define SHOW_FUNCTION(__FUNC, __VAR) \
static ssize_t __FUNC(struct as_data *ad, char *page) \
static ssize_t __FUNC(elevator_t *e, char *page) \
{ \
struct as_data *ad = e->elevator_data; \
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
}
SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
{ \
int ret = as_var_store(__PTR, (page), count); \
struct as_data *ad = e->elevator_data; \
int ret = as_var_store(__PTR, (page), count); \
if (*(__PTR) < (MIN)) \
*(__PTR) = (MIN); \
else if (*(__PTR) > (MAX)) \
......@@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
*(__PTR) = msecs_to_jiffies(*(__PTR)); \
return ret; \
}
STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
STORE_FUNCTION(as_read_batchexpire_store,
STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
STORE_FUNCTION(as_read_batch_expire_store,
&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
STORE_FUNCTION(as_write_batchexpire_store,
STORE_FUNCTION(as_write_batch_expire_store,
&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
#undef STORE_FUNCTION
static struct as_fs_entry as_est_entry = {
.attr = {.name = "est_time", .mode = S_IRUGO },
.show = as_est_show,
};
static struct as_fs_entry as_readexpire_entry = {
.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
.show = as_readexpire_show,
.store = as_readexpire_store,
};
static struct as_fs_entry as_writeexpire_entry = {
.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
.show = as_writeexpire_show,
.store = as_writeexpire_store,
};
static struct as_fs_entry as_anticexpire_entry = {
.attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
.show = as_anticexpire_show,
.store = as_anticexpire_store,
};
static struct as_fs_entry as_read_batchexpire_entry = {
.attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
.show = as_read_batchexpire_show,
.store = as_read_batchexpire_store,
};
static struct as_fs_entry as_write_batchexpire_entry = {
.attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
.show = as_write_batchexpire_show,
.store = as_write_batchexpire_store,
};
static struct attribute *default_attrs[] = {
&as_est_entry.attr,
&as_readexpire_entry.attr,
&as_writeexpire_entry.attr,
&as_anticexpire_entry.attr,
&as_read_batchexpire_entry.attr,
&as_write_batchexpire_entry.attr,
NULL,
};
#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
static ssize_t
as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
struct as_fs_entry *entry = to_as(attr);
if (!entry->show)
return -EIO;
return entry->show(e->elevator_data, page);
}
static ssize_t
as_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
struct as_fs_entry *entry = to_as(attr);
if (!entry->store)
return -EIO;
return entry->store(e->elevator_data, page, length);
}
static struct sysfs_ops as_sysfs_ops = {
.show = as_attr_show,
.store = as_attr_store,
};
static struct kobj_type as_ktype = {
.sysfs_ops = &as_sysfs_ops,
.default_attrs = default_attrs,
#define AS_ATTR(name) \
__ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
static struct elv_fs_entry as_attrs[] = {
__ATTR_RO(est_time),
AS_ATTR(read_expire),
AS_ATTR(write_expire),
AS_ATTR(antic_expire),
AS_ATTR(read_batch_expire),
AS_ATTR(write_batch_expire),
__ATTR_NULL
};
static struct elevator_type iosched_as = {
......@@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = {
.elevator_may_queue_fn = as_may_queue,
.elevator_init_fn = as_init_queue,
.elevator_exit_fn = as_exit_queue,
.trim = as_trim,
},
.elevator_ktype = &as_ktype,
.elevator_attrs = as_attrs,
.elevator_name = "anticipatory",
.elevator_owner = THIS_MODULE,
};
......@@ -1893,7 +1841,13 @@ static int __init as_init(void)
static void __exit as_exit(void)
{
DECLARE_COMPLETION(all_gone);
elv_unregister(&iosched_as);
ioc_gone = &all_gone;
barrier();
if (atomic_read(&ioc_count))
complete(ioc_gone);
synchronize_rcu();
kmem_cache_destroy(arq_pool);
}
......
This diff is collapsed.
......@@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
/*
* sysfs parts below
*/
struct deadline_fs_entry {
struct attribute attr;
ssize_t (*show)(struct deadline_data *, char *);
ssize_t (*store)(struct deadline_data *, const char *, size_t);
};
static ssize_t
deadline_var_show(int var, char *page)
......@@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count)
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
static ssize_t __FUNC(struct deadline_data *dd, char *page) \
static ssize_t __FUNC(elevator_t *e, char *page) \
{ \
int __data = __VAR; \
struct deadline_data *dd = e->elevator_data; \
int __data = __VAR; \
if (__CONV) \
__data = jiffies_to_msecs(__data); \
return deadline_var_show(__data, (page)); \
}
SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
{ \
struct deadline_data *dd = e->elevator_data; \
int __data; \
int ret = deadline_var_store(&__data, (page), count); \
if (__data < (MIN)) \
......@@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)
*(__PTR) = __data; \
return ret; \
}
STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
#undef STORE_FUNCTION
static struct deadline_fs_entry deadline_readexpire_entry = {
.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
.show = deadline_readexpire_show,
.store = deadline_readexpire_store,
};
static struct deadline_fs_entry deadline_writeexpire_entry = {
.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
.show = deadline_writeexpire_show,
.store = deadline_writeexpire_store,
};
static struct deadline_fs_entry deadline_writesstarved_entry = {
.attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
.show = deadline_writesstarved_show,
.store = deadline_writesstarved_store,
};
static struct deadline_fs_entry deadline_frontmerges_entry = {
.attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
.show = deadline_frontmerges_show,
.store = deadline_frontmerges_store,
};
static struct deadline_fs_entry deadline_fifobatch_entry = {
.attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
.show = deadline_fifobatch_show,
.store = deadline_fifobatch_store,
};
static struct attribute *default_attrs[] = {
&deadline_readexpire_entry.attr,
&deadline_writeexpire_entry.attr,
&deadline_writesstarved_entry.attr,
&deadline_frontmerges_entry.attr,
&deadline_fifobatch_entry.attr,
NULL,
};
#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
static ssize_t
deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
struct deadline_fs_entry *entry = to_deadline(attr);
if (!entry->show)
return -EIO;
return entry->show(e->elevator_data, page);
}
static ssize_t
deadline_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
struct deadline_fs_entry *entry = to_deadline(attr);
if (!entry->store)
return -EIO;
return entry->store(e->elevator_data, page, length);
}
static struct sysfs_ops deadline_sysfs_ops = {
.show = deadline_attr_show,
.store = deadline_attr_store,
};
static struct kobj_type deadline_ktype = {
.sysfs_ops = &deadline_sysfs_ops,
.default_attrs = default_attrs,
#define DD_ATTR(name) \
__ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
deadline_##name##_store)
static struct elv_fs_entry deadline_attrs[] = {
DD_ATTR(read_expire),
DD_ATTR(write_expire),
DD_ATTR(writes_starved),
DD_ATTR(front_merges),
DD_ATTR(fifo_batch),
__ATTR_NULL
};
static struct elevator_type iosched_deadline = {
......@@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = {
.elevator_exit_fn = deadline_exit_queue,
},
.elevator_ktype = &deadline_ktype,
.elevator_attrs = deadline_attrs,
.elevator_name = "deadline",
.elevator_owner = THIS_MODULE,
};
......
......@@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
static int elevator_attach(request_queue_t *q, struct elevator_type *e,
struct elevator_queue *eq)
static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
{
int ret = 0;
memset(eq, 0, sizeof(*eq));
eq->ops = &e->ops;
eq->elevator_type = e;
q->elevator = eq;
if (eq->ops->elevator_init_fn)
......@@ -154,6 +149,32 @@ static int __init elevator_setup(char *str)
__setup("elevator=", elevator_setup);
static struct kobj_type elv_ktype;
static elevator_t *elevator_alloc(struct elevator_type *e)
{
elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
if (eq) {
memset(eq, 0, sizeof(*eq));
eq->ops = &e->ops;
eq->elevator_type = e;
kobject_init(&eq->kobj);
snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
eq->kobj.ktype = &elv_ktype;
mutex_init(&eq->sysfs_lock);
} else {
elevator_put(e);
}
return eq;
}
static void elevator_release(struct kobject *kobj)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
elevator_put(e->elevator_type);
kfree(e);
}
int elevator_init(request_queue_t *q, char *name)
{
struct elevator_type *e = NULL;
......@@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name)
e = elevator_get("noop");
}
eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
if (!eq) {
elevator_put(e);
eq = elevator_alloc(e);
if (!eq)
return -ENOMEM;
}
ret = elevator_attach(q, e, eq);
if (ret) {
kfree(eq);
elevator_put(e);
}
ret = elevator_attach(q, eq);
if (ret)
kobject_put(&eq->kobj);
return ret;
}
void elevator_exit(elevator_t *e)
{
mutex_lock(&e->sysfs_lock);
if (e->ops->elevator_exit_fn)
e->ops->elevator_exit_fn(e);
e->ops = NULL;
mutex_unlock(&e->sysfs_lock);
elevator_put(e->elevator_type);
e->elevator_type = NULL;
kfree(e);
kobject_put(&e->kobj);
}
/*
......@@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
}
}
int elv_register_queue(struct request_queue *q)
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
static ssize_t
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
elevator_t *e = q->elevator;
elevator_t *e = container_of(kobj, elevator_t, kobj);
struct elv_fs_entry *entry = to_elv(attr);
ssize_t error;
e->kobj.parent = kobject_get(&q->kobj);
if (!e->kobj.parent)
return -EBUSY;
if (!entry->show)
return -EIO;
snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
e->kobj.ktype = e->elevator_type->elevator_ktype;
mutex_lock(&e->sysfs_lock);
error = e->ops ? entry->show(e, page) : -ENOENT;
mutex_unlock(&e->sysfs_lock);
return error;
}
static ssize_t
elv_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
elevator_t *e = container_of(kobj, elevator_t, kobj);
struct elv_fs_entry *entry = to_elv(attr);
ssize_t error;
if (!entry->store)
return -EIO;
mutex_lock(&e->sysfs_lock);
error = e->ops ? entry->store(e, page, length) : -ENOENT;
mutex_unlock(&e->sysfs_lock);
return error;
}
static struct sysfs_ops elv_sysfs_ops = {
.show = elv_attr_show,
.store = elv_attr_store,
};
static struct kobj_type elv_ktype = {
.sysfs_ops = &elv_sysfs_ops,
.release = elevator_release,
};
return kobject_register(&e->kobj);
int elv_register_queue(struct request_queue *q)
{
elevator_t *e = q->elevator;
int error;
e->kobj.parent = &q->kobj;
error = kobject_add(&e->kobj);
if (!error) {
struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
if (attr) {
while (attr->attr.name) {
if (sysfs_create_file(&e->kobj, &attr->attr))
break;
attr++;
}
}
kobject_uevent(&e->kobj, KOBJ_ADD);
}
return error;
}
void elv_unregister_queue(struct request_queue *q)
{
if (q) {
elevator_t *e = q->elevator;
kobject_unregister(&e->kobj);
kobject_put(&q->kobj);
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
}
}
......@@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e)
/*
* Iterate every thread in the process to remove the io contexts.
*/
read_lock(&tasklist_lock);
do_each_thread(g, p) {
struct io_context *ioc = p->io_context;
if (ioc && ioc->cic) {
ioc->cic->exit(ioc->cic);
ioc->cic->dtor(ioc->cic);
ioc->cic = NULL;
}
if (ioc && ioc->aic) {
ioc->aic->exit(ioc->aic);
ioc->aic->dtor(ioc->aic);
ioc->aic = NULL;
}
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
if (e->ops.trim) {
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
e->ops.trim(p->io_context);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
spin_lock_irq(&elv_list_lock);
list_del_init(&e->list);
......@@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister);
* need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason.
*/
static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
/*
* Allocate new elevator
*/
e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
e = elevator_alloc(new_e);
if (!e)
goto error;
return 0;
/*
* Turn on BYPASS and drain all requests w/ elevator private data
......@@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
/*
* attach and start new elevator
*/
if (elevator_attach(q, new_e, e))
if (elevator_attach(q, e))
goto fail;
if (elv_register_queue(q))
......@@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
*/
elevator_exit(old_elevator);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
return;
return 1;
fail_register:
/*
......@@ -767,10 +832,9 @@ fail:
q->elevator = old_elevator;
elv_register_queue(q);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
kfree(e);
error:
elevator_put(new_e);
printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
if (e)
kobject_put(&e->kobj);
return 0;
}
ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
......@@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
return count;
}
elevator_switch(q, e);
if (!elevator_switch(q, e))
printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
return count;
}
......
......@@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue);
* Hopefully the low level driver will have finished any
* outstanding requests first...
**/
void blk_cleanup_queue(request_queue_t * q)
static void blk_release_queue(struct kobject *kobj)
{
request_queue_t *q = container_of(kobj, struct request_queue, kobj);
struct request_list *rl = &q->rq;
if (!atomic_dec_and_test(&q->refcnt))
return;
if (q->elevator)
elevator_exit(q->elevator);
blk_sync_queue(q);
if (rl->rq_pool)
......@@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q)
kmem_cache_free(requestq_cachep, q);
}
void blk_put_queue(request_queue_t *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
void blk_cleanup_queue(request_queue_t * q)
{
mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
mutex_unlock(&q->sysfs_lock);
if (q->elevator)
elevator_exit(q->elevator);
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
static int blk_init_free_list(request_queue_t *q)
......@@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
}
EXPORT_SYMBOL(blk_alloc_queue);
static struct kobj_type queue_ktype;
request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
request_queue_t *q;
......@@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
memset(q, 0, sizeof(*q));
init_timer(&q->unplug_timer);
atomic_set(&q->refcnt, 1);
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
q->kobj.ktype = &queue_ktype;
kobject_init(&q->kobj);
q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
q->backing_dev_info.unplug_io_data = q;
mutex_init(&q->sysfs_lock);
return q;
}
EXPORT_SYMBOL(blk_alloc_queue_node);
......@@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
return NULL;
q->node = node_id;
if (blk_init_free_list(q))
goto out_init;
if (blk_init_free_list(q)) {
kmem_cache_free(requestq_cachep, q);
return NULL;
}
/*
* if caller didn't supply a lock, they get per-queue locking with
......@@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
return q;
}
blk_cleanup_queue(q);
out_init:
kmem_cache_free(requestq_cachep, q);
blk_put_queue(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
......@@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
int blk_get_queue(request_queue_t *q)
{
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
atomic_inc(&q->refcnt);
kobject_get(&q->kobj);
return 0;
}
......@@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc)
BUG_ON(atomic_read(&ioc->refcount) == 0);
if (atomic_dec_and_test(&ioc->refcount)) {
rcu_read_lock();
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
if (ioc->cic && ioc->cic->dtor)
ioc->cic->dtor(ioc->cic);
rcu_read_unlock();
kmem_cache_free(iocontext_cachep, ioc);
}
......@@ -3614,10 +3636,13 @@ static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
struct request_list *rl = &q->rq;
unsigned long nr;
int ret = queue_var_store(&nr, page, count);
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
int ret = queue_var_store(&q->nr_requests, page, count);
if (q->nr_requests < BLKDEV_MIN_RQ)
q->nr_requests = BLKDEV_MIN_RQ;
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
if (rl->count[READ] >= queue_congestion_on_threshold(q))
......@@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
blk_clear_queue_full(q, WRITE);
wake_up(&rl->wait[WRITE]);
}
spin_unlock_irq(q->queue_lock);
return ret;
}
......@@ -3758,13 +3784,19 @@ static ssize_t
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct queue_sysfs_entry *entry = to_queue(attr);
struct request_queue *q;
request_queue_t *q = container_of(kobj, struct request_queue, kobj);
ssize_t res;
q = container_of(kobj, struct request_queue, kobj);
if (!entry->show)
return -EIO;
return entry->show(q, page);
mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
res = entry->show(q, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t
......@@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct queue_sysfs_entry *entry = to_queue(attr);
struct request_queue *q;
request_queue_t *q = container_of(kobj, struct request_queue, kobj);
ssize_t res;
q = container_of(kobj, struct request_queue, kobj);
if (!entry->store)
return -EIO;
return entry->store(q, page, length);
mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
res = entry->store(q, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
static struct sysfs_ops queue_sysfs_ops = {
......@@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = {
static struct kobj_type queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
.default_attrs = default_attrs,
.release = blk_release_queue,
};
int blk_register_queue(struct gendisk *disk)
......@@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk)
return -ENXIO;
q->kobj.parent = kobject_get(&disk->kobj);
if (!q->kobj.parent)
return -EBUSY;
snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
q->kobj.ktype = &queue_ktype;
ret = kobject_register(&q->kobj);
ret = kobject_add(&q->kobj);
if (ret < 0)
return ret;
kobject_uevent(&q->kobj, KOBJ_ADD);
ret = elv_register_queue(q);
if (ret) {
kobject_unregister(&q->kobj);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
return ret;
}
......@@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk)
if (q && q->request_fn) {
elv_unregister_queue(q);
kobject_unregister(&q->kobj);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
kobject_put(&disk->kobj);
}
}
......@@ -1307,7 +1307,7 @@ static int __init loop_init(void)
out_mem4:
while (i--)
blk_put_queue(loop_dev[i].lo_queue);
blk_cleanup_queue(loop_dev[i].lo_queue);
devfs_remove("loop");
i = max_loop;
out_mem3:
......@@ -1328,7 +1328,7 @@ static void loop_exit(void)
for (i = 0; i < max_loop; i++) {
del_gendisk(disks[i]);
blk_put_queue(loop_dev[i].lo_queue);
blk_cleanup_queue(loop_dev[i].lo_queue);
put_disk(disks[i]);
}
devfs_remove("loop");
......
......@@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
return 0;
out_new_dev:
blk_put_queue(disk->queue);
blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
......@@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
del_gendisk(pd->disk);
blk_put_queue(pd->disk->queue);
blk_cleanup_queue(pd->disk->queue);
put_disk(pd->disk);
pkt_devs[idx] = NULL;
......
......@@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev)
pci_free_consistent(card->dev, PAGE_SIZE*2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
blk_put_queue(card->queue);
blk_cleanup_queue(card->queue);
}
static const struct pci_device_id mm_pci_ids[] = { {
......
......@@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
bad3:
mempool_destroy(md->io_pool);
bad2:
blk_put_queue(md->queue);
blk_cleanup_queue(md->queue);
free_minor(minor);
bad1:
kfree(md);
......@@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md)
del_gendisk(md->disk);
free_minor(minor);
put_disk(md->disk);
blk_put_queue(md->queue);
blk_cleanup_queue(md->queue);
kfree(md);
}
......
......@@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev)
return;
if (!mddev->raid_disks && list_empty(&mddev->disks)) {
list_del(&mddev->all_mddevs);
blk_put_queue(mddev->queue);
/* that blocks */
blk_cleanup_queue(mddev->queue);
/* that also blocks */
kobject_unregister(&mddev->kobj);
/* result blows... */
}
spin_unlock(&all_mddevs_lock);
}
......
......@@ -273,7 +273,7 @@ removeseg:
list_del(&dev_info->lh);
del_gendisk(dev_info->gd);
blk_put_queue(dev_info->dcssblk_queue);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(dev);
......@@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
unregister_dev:
PRINT_ERR("device_create_file() failed!\n");
list_del(&dev_info->lh);
blk_put_queue(dev_info->dcssblk_queue);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
......@@ -505,7 +505,7 @@ list_del:
unload_seg:
segment_unload(local_buf);
dealloc_gendisk:
blk_put_queue(dev_info->dcssblk_queue);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
free_dev_info:
......@@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
list_del(&dev_info->lh);
del_gendisk(dev_info->gd);
blk_put_queue(dev_info->dcssblk_queue);
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
......
......@@ -58,7 +58,7 @@ struct cfq_io_context {
* circular list of cfq_io_contexts belonging to a process io context
*/
struct list_head list;
struct cfq_queue *cfqq;
struct cfq_queue *cfqq[2];
void *key;
struct io_context *ioc;
......@@ -69,6 +69,8 @@ struct cfq_io_context {
unsigned long ttime_samples;
unsigned long ttime_mean;
struct list_head queue_list;
void (*dtor)(struct cfq_io_context *);
void (*exit)(struct cfq_io_context *);
};
......@@ -404,8 +406,6 @@ struct request_queue
struct blk_queue_tag *queue_tags;
atomic_t refcnt;
unsigned int nr_sorted;
unsigned int in_flight;
......@@ -424,6 +424,8 @@ struct request_queue
struct request pre_flush_rq, bar_rq, post_flush_rq;
struct request *orig_bar_rq;
unsigned int bi_size;
struct mutex sysfs_lock;
};
#define RQ_INACTIVE (-1)
......@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void);
int blk_get_queue(request_queue_t *);
request_queue_t *blk_alloc_queue(gfp_t);
request_queue_t *blk_alloc_queue_node(gfp_t, int);
#define blk_put_queue(q) blk_cleanup_queue((q))
extern void blk_put_queue(request_queue_t *);
/*
* tag stuff
......
......@@ -48,10 +48,17 @@ struct elevator_ops
elevator_init_fn *elevator_init_fn;
elevator_exit_fn *elevator_exit_fn;
void (*trim)(struct io_context *);
};
#define ELV_NAME_MAX (16)
struct elv_fs_entry {
struct attribute attr;
ssize_t (*show)(elevator_t *, char *);
ssize_t (*store)(elevator_t *, const char *, size_t);
};
/*
* identifies an elevator type, such as AS or deadline
*/
......@@ -60,7 +67,7 @@ struct elevator_type
struct list_head list;
struct elevator_ops ops;
struct elevator_type *elevator_type;
struct kobj_type *elevator_ktype;
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
};
......@@ -74,6 +81,7 @@ struct elevator_queue
void *elevator_data;
struct kobject kobj;
struct elevator_type *elevator_type;
struct mutex sysfs_lock;
};
/*
......
......@@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code)
panic("Attempted to kill the idle task!");
if (unlikely(tsk->pid == 1))
panic("Attempted to kill init!");
if (tsk->io_context)
exit_io_context();
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
current->ptrace_message = code;
......@@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code)
if (unlikely(tsk->flags & PF_EXITING)) {
printk(KERN_ALERT
"Fixing recursive fault but reboot is needed!\n");
if (tsk->io_context)
exit_io_context();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
......@@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code)
*/
mutex_debug_check_no_locks_held(tsk);
if (tsk->io_context)
exit_io_context();
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
BUG_ON(tsk->flags & PF_DEAD);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment