Commit c1637532 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] more workqueue fixes.

Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent e45ccc05
...@@ -179,6 +179,7 @@ struct tape_char_data { ...@@ -179,6 +179,7 @@ struct tape_char_data {
/* Block Frontend Data */ /* Block Frontend Data */
struct tape_blk_data struct tape_blk_data
{ {
struct tape_device * device;
/* Block device request queue. */ /* Block device request queue. */
request_queue_t * request_queue; request_queue_t * request_queue;
spinlock_t request_queue_lock; spinlock_t request_queue_lock;
...@@ -240,7 +241,7 @@ struct tape_device { ...@@ -240,7 +241,7 @@ struct tape_device {
#endif #endif
/* Function to start or stop the next request later. */ /* Function to start or stop the next request later. */
struct work_struct tape_dnr; struct delayed_work tape_dnr;
}; };
/* Externals from tape_core.c */ /* Externals from tape_core.c */
......
...@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device) ...@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device)
return rc; return rc;
} }
struct tape_34xx_work {
struct tape_device *device;
enum tape_op op;
struct work_struct work;
};
/* /*
* These functions are currently used only to schedule a medium_sense for * These functions are currently used only to schedule a medium_sense for
* later execution. This is because we get an interrupt whenever a medium * later execution. This is because we get an interrupt whenever a medium
...@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device) ...@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device)
* interrupt handler. * interrupt handler.
*/ */
static void static void
tape_34xx_work_handler(void *data) tape_34xx_work_handler(struct work_struct *work)
{ {
struct { struct tape_34xx_work *p =
struct tape_device *device; container_of(work, struct tape_34xx_work, work);
enum tape_op op;
struct work_struct work;
} *p = data;
switch(p->op) { switch(p->op) {
case TO_MSEN: case TO_MSEN:
...@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data) ...@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data)
static int static int
tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
{ {
struct { struct tape_34xx_work *p;
struct tape_device *device;
enum tape_op op;
struct work_struct work;
} *p;
if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM; return -ENOMEM;
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
INIT_WORK(&p->work, tape_34xx_work_handler, p); INIT_WORK(&p->work, tape_34xx_work_handler);
p->device = tape_get_device_reference(device); p->device = tape_get_device_reference(device);
p->op = op; p->op = op;
......
...@@ -236,9 +236,10 @@ struct work_handler_data { ...@@ -236,9 +236,10 @@ struct work_handler_data {
}; };
static void static void
tape_3590_work_handler(void *data) tape_3590_work_handler(struct work_struct *work)
{ {
struct work_handler_data *p = data; struct work_handler_data *p =
container_of(work, struct work_handler_data, work);
switch (p->op) { switch (p->op) {
case TO_MSEN: case TO_MSEN:
...@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op) ...@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
return -ENOMEM; return -ENOMEM;
INIT_WORK(&p->work, tape_3590_work_handler, p); INIT_WORK(&p->work, tape_3590_work_handler);
p->device = tape_get_device_reference(device); p->device = tape_get_device_reference(device);
p->op = op; p->op = op;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/kernel.h>
#include <asm/debug.h> #include <asm/debug.h>
...@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req) ...@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
* queue. * queue.
*/ */
static void static void
tapeblock_requeue(void *data) { tapeblock_requeue(struct work_struct *work) {
struct tape_blk_data * blkdat;
struct tape_device * device; struct tape_device * device;
request_queue_t * queue; request_queue_t * queue;
int nr_queued; int nr_queued;
...@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) { ...@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) {
struct list_head * l; struct list_head * l;
int rc; int rc;
device = (struct tape_device *) data; blkdat = container_of(work, struct tape_blk_data, requeue_task);
device = blkdat->device;
if (!device) if (!device)
return; return;
...@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device) ...@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device)
int rc; int rc;
blkdat = &device->blk_data; blkdat = &device->blk_data;
blkdat->device = device;
spin_lock_init(&blkdat->request_queue_lock); spin_lock_init(&blkdat->request_queue_lock);
atomic_set(&blkdat->requeue_scheduled, 0); atomic_set(&blkdat->requeue_scheduled, 0);
...@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device) ...@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device)
add_disk(disk); add_disk(disk);
INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, tape_get_device_reference(device);
tape_get_device_reference(device)); INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
return 0; return 0;
...@@ -271,7 +275,7 @@ void ...@@ -271,7 +275,7 @@ void
tapeblock_cleanup_device(struct tape_device *device) tapeblock_cleanup_device(struct tape_device *device)
{ {
flush_scheduled_work(); flush_scheduled_work();
device->blk_data.requeue_task.data = tape_put_device(device); tape_put_device(device);
if (!device->blk_data.disk) { if (!device->blk_data.disk) {
PRINT_ERR("(%s): No gendisk to clean up!\n", PRINT_ERR("(%s): No gendisk to clean up!\n",
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define PRINTK_HEADER "TAPE_CORE: " #define PRINTK_HEADER "TAPE_CORE: "
static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
static void tape_delayed_next_request(void * data); static void tape_delayed_next_request(struct work_struct *);
/* /*
* One list to contain all tape devices of all disciplines, so * One list to contain all tape devices of all disciplines, so
...@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request) ...@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request)
return 0; return 0;
case -EBUSY: case -EBUSY:
request->status = TAPE_REQUEST_CANCEL; request->status = TAPE_REQUEST_CANCEL;
schedule_work(&device->tape_dnr); schedule_delayed_work(&device->tape_dnr, 0);
return 0; return 0;
case -ENODEV: case -ENODEV:
DBF_EXCEPTION(2, "device gone, retry\n"); DBF_EXCEPTION(2, "device gone, retry\n");
...@@ -470,7 +470,7 @@ tape_alloc_device(void) ...@@ -470,7 +470,7 @@ tape_alloc_device(void)
*device->modeset_byte = 0; *device->modeset_byte = 0;
device->first_minor = -1; device->first_minor = -1;
atomic_set(&device->ref_count, 1); atomic_set(&device->ref_count, 1);
INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
return device; return device;
} }
...@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) ...@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
} else if (rc == -EBUSY) { } else if (rc == -EBUSY) {
/* The common I/O subsystem is currently busy. Retry later. */ /* The common I/O subsystem is currently busy. Retry later. */
request->status = TAPE_REQUEST_QUEUED; request->status = TAPE_REQUEST_QUEUED;
schedule_work(&device->tape_dnr); schedule_delayed_work(&device->tape_dnr, 0);
rc = 0; rc = 0;
} else { } else {
/* Start failed. Remove request and indicate failure. */ /* Start failed. Remove request and indicate failure. */
...@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device) ...@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device)
} }
static void static void
tape_delayed_next_request(void *data) tape_delayed_next_request(struct work_struct *work)
{ {
struct tape_device * device; struct tape_device *device =
container_of(work, struct tape_device, tape_dnr.work);
device = (struct tape_device *) data;
DBF_LH(6, "tape_delayed_next_request(%p)\n", device); DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
spin_lock_irq(get_ccwdev_lock(device->cdev)); spin_lock_irq(get_ccwdev_lock(device->cdev));
__tape_start_next_request(device); __tape_start_next_request(device);
......
...@@ -73,6 +73,8 @@ struct senseid { ...@@ -73,6 +73,8 @@ struct senseid {
} __attribute__ ((packed,aligned(4))); } __attribute__ ((packed,aligned(4)));
struct ccw_device_private { struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
int state; /* device state */ int state; /* device state */
atomic_t onoff; atomic_t onoff;
unsigned long registered; unsigned long registered;
......
...@@ -585,12 +585,13 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, ...@@ -585,12 +585,13 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
} }
static void static void
ccw_device_add_changed(void *data) ccw_device_add_changed(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
if (device_add(&cdev->dev)) { if (device_add(&cdev->dev)) {
put_device(&cdev->dev); put_device(&cdev->dev);
return; return;
...@@ -605,13 +606,15 @@ ccw_device_add_changed(void *data) ...@@ -605,13 +606,15 @@ ccw_device_add_changed(void *data)
extern int css_get_ssd_info(struct subchannel *sch); extern int css_get_ssd_info(struct subchannel *sch);
void void
ccw_device_do_unreg_rereg(void *data) ccw_device_do_unreg_rereg(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int need_rename; int need_rename;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
/* /*
...@@ -659,7 +662,7 @@ ccw_device_do_unreg_rereg(void *data) ...@@ -659,7 +662,7 @@ ccw_device_do_unreg_rereg(void *data)
snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
sch->schid.ssid, sch->schib.pmcw.dev); sch->schid.ssid, sch->schib.pmcw.dev);
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_add_changed, cdev); ccw_device_add_changed);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
...@@ -677,14 +680,16 @@ ccw_device_release(struct device *dev) ...@@ -677,14 +680,16 @@ ccw_device_release(struct device *dev)
* Register recognized device. * Register recognized device.
*/ */
static void static void
io_subchannel_register(void *data) io_subchannel_register(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
unsigned long flags; unsigned long flags;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
/* /*
...@@ -734,11 +739,14 @@ out: ...@@ -734,11 +739,14 @@ out:
} }
void void
ccw_device_call_sch_unregister(void *data) ccw_device_call_sch_unregister(struct work_struct *work)
{ {
struct ccw_device *cdev = data; struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
css_sch_device_unregister(sch); css_sch_device_unregister(sch);
/* Reset intparm to zeroes. */ /* Reset intparm to zeroes. */
...@@ -768,7 +776,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) ...@@ -768,7 +776,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
break; break;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, cdev); ccw_device_call_sch_unregister);
queue_work(slow_path_wq, &cdev->private->kick_work); queue_work(slow_path_wq, &cdev->private->kick_work);
if (atomic_dec_and_test(&ccw_device_init_count)) if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq); wake_up(&ccw_device_init_wq);
...@@ -783,7 +791,7 @@ io_subchannel_recog_done(struct ccw_device *cdev) ...@@ -783,7 +791,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
if (!get_device(&cdev->dev)) if (!get_device(&cdev->dev))
break; break;
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
io_subchannel_register, cdev); io_subchannel_register);
queue_work(slow_path_wq, &cdev->private->kick_work); queue_work(slow_path_wq, &cdev->private->kick_work);
break; break;
} }
...@@ -865,6 +873,7 @@ io_subchannel_probe (struct subchannel *sch) ...@@ -865,6 +873,7 @@ io_subchannel_probe (struct subchannel *sch)
kfree(cdev); kfree(cdev);
return -ENOMEM; return -ENOMEM;
} }
cdev->private->cdev = cdev;
atomic_set(&cdev->private->onoff, 0); atomic_set(&cdev->private->onoff, 0);
cdev->dev.parent = &sch->dev; cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release; cdev->dev.release = ccw_device_release;
...@@ -890,12 +899,13 @@ io_subchannel_probe (struct subchannel *sch) ...@@ -890,12 +899,13 @@ io_subchannel_probe (struct subchannel *sch)
return rc; return rc;
} }
static void static void ccw_device_unregister(struct work_struct *work)
ccw_device_unregister(void *data)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
cdev = (struct ccw_device *)data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
if (test_and_clear_bit(1, &cdev->private->registered)) if (test_and_clear_bit(1, &cdev->private->registered))
device_unregister(&cdev->dev); device_unregister(&cdev->dev);
put_device(&cdev->dev); put_device(&cdev->dev);
...@@ -921,7 +931,7 @@ io_subchannel_remove (struct subchannel *sch) ...@@ -921,7 +931,7 @@ io_subchannel_remove (struct subchannel *sch)
*/ */
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_unregister, cdev); ccw_device_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
return 0; return 0;
...@@ -1048,6 +1058,7 @@ ccw_device_probe_console(void) ...@@ -1048,6 +1058,7 @@ ccw_device_probe_console(void)
memset(&console_cdev, 0, sizeof(struct ccw_device)); memset(&console_cdev, 0, sizeof(struct ccw_device));
memset(&console_private, 0, sizeof(struct ccw_device_private)); memset(&console_private, 0, sizeof(struct ccw_device_private));
console_cdev.private = &console_private; console_cdev.private = &console_private;
console_private.cdev = &console_cdev;
ret = ccw_device_console_enable(&console_cdev, sch); ret = ccw_device_console_enable(&console_cdev, sch);
if (ret) { if (ret) {
cio_release_console(); cio_release_console();
......
...@@ -78,8 +78,8 @@ void io_subchannel_recog_done(struct ccw_device *cdev); ...@@ -78,8 +78,8 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
int ccw_device_cancel_halt_clear(struct ccw_device *); int ccw_device_cancel_halt_clear(struct ccw_device *);
void ccw_device_do_unreg_rereg(void *); void ccw_device_do_unreg_rereg(struct work_struct *);
void ccw_device_call_sch_unregister(void *); void ccw_device_call_sch_unregister(struct work_struct *);
int ccw_device_recognition(struct ccw_device *); int ccw_device_recognition(struct ccw_device *);
int ccw_device_online(struct ccw_device *); int ccw_device_online(struct ccw_device *);
......
...@@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) ...@@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
cdev->id.dev_model != cdev->private->senseid.dev_model || cdev->id.dev_model != cdev->private->senseid.dev_model ||
cdev->private->dev_id.devno != sch->schib.pmcw.dev) { cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_do_unreg_rereg, cdev); ccw_device_do_unreg_rereg);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
return 0; return 0;
} }
...@@ -329,19 +329,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) ...@@ -329,19 +329,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
} }
static void static void
ccw_device_oper_notify(void *data) ccw_device_oper_notify(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ? ret = (sch->driver && sch->driver->notify) ?
sch->driver->notify(&sch->dev, CIO_OPER) : 0; sch->driver->notify(&sch->dev, CIO_OPER) : 0;
if (!ret) if (!ret)
/* Driver doesn't want device back. */ /* Driver doesn't want device back. */
ccw_device_do_unreg_rereg(cdev); ccw_device_do_unreg_rereg(work);
else { else {
/* Reenable channel measurements, if needed. */ /* Reenable channel measurements, if needed. */
cmf_reenable(cdev); cmf_reenable(cdev);
...@@ -377,8 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state) ...@@ -377,8 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (cdev->private->flags.donotify) { if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0; cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
cdev);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} }
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
...@@ -528,13 +529,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -528,13 +529,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
static void static void
ccw_device_nopath_notify(void *data) ccw_device_nopath_notify(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
struct subchannel *sch; struct subchannel *sch;
int ret; int ret;
cdev = data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */ /* Extra sanity. */
if (sch->lpm) if (sch->lpm)
...@@ -547,8 +550,7 @@ ccw_device_nopath_notify(void *data) ...@@ -547,8 +550,7 @@ ccw_device_nopath_notify(void *data)
cio_disable_subchannel(sch); cio_disable_subchannel(sch);
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, ccw_device_call_sch_unregister);
cdev);
queue_work(ccw_device_work, queue_work(ccw_device_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -607,7 +609,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) ...@@ -607,7 +609,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
/* Reset oper notify indication after verify error. */ /* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0; cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
ccw_device_done(cdev, DEV_STATE_NOT_OPER); ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break; break;
...@@ -738,7 +740,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -738,7 +740,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, cdev); ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
...@@ -769,7 +771,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -769,7 +771,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
} }
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, cdev); ccw_device_call_sch_unregister);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
wake_up(&cdev->private->wait_q); wake_up(&cdev->private->wait_q);
...@@ -874,7 +876,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -874,7 +876,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, queue_work(ccw_device_notify_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -969,7 +971,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -969,7 +971,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
ERR_PTR(-EIO)); ERR_PTR(-EIO));
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify) } else if (cdev->private->flags.doverify)
/* Start delayed path verification. */ /* Start delayed path verification. */
...@@ -992,7 +994,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ...@@ -992,7 +994,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent); sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, queue_work(ccw_device_notify_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -1021,7 +1023,7 @@ void device_kill_io(struct subchannel *sch) ...@@ -1021,7 +1023,7 @@ void device_kill_io(struct subchannel *sch)
if (ret == -ENODEV) { if (ret == -ENODEV) {
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, queue_work(ccw_device_notify_work,
&cdev->private->kick_work); &cdev->private->kick_work);
} else } else
...@@ -1033,7 +1035,7 @@ void device_kill_io(struct subchannel *sch) ...@@ -1033,7 +1035,7 @@ void device_kill_io(struct subchannel *sch)
ERR_PTR(-EIO)); ERR_PTR(-EIO));
if (!sch->lpm) { if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, cdev); ccw_device_nopath_notify);
queue_work(ccw_device_notify_work, &cdev->private->kick_work); queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else } else
/* Start delayed path verification. */ /* Start delayed path verification. */
......
...@@ -2045,11 +2045,13 @@ omit_handler_call: ...@@ -2045,11 +2045,13 @@ omit_handler_call:
} }
static void static void
qdio_call_shutdown(void *data) qdio_call_shutdown(struct work_struct *work)
{ {
struct ccw_device_private *priv;
struct ccw_device *cdev; struct ccw_device *cdev;
cdev = (struct ccw_device *)data; priv = container_of(work, struct ccw_device_private, kick_work);
cdev = priv->cdev;
qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
put_device(&cdev->dev); put_device(&cdev->dev);
} }
...@@ -2091,7 +2093,7 @@ qdio_timeout_handler(struct ccw_device *cdev) ...@@ -2091,7 +2093,7 @@ qdio_timeout_handler(struct ccw_device *cdev)
if (get_device(&cdev->dev)) { if (get_device(&cdev->dev)) {
/* Can't call shutdown from interrupt context. */ /* Can't call shutdown from interrupt context. */
PREPARE_WORK(&cdev->private->kick_work, PREPARE_WORK(&cdev->private->kick_work,
qdio_call_shutdown, (void *)cdev); qdio_call_shutdown);
queue_work(ccw_device_work, &cdev->private->kick_work); queue_work(ccw_device_work, &cdev->private->kick_work);
} }
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment