Commit 152587de authored by 's avatar Committed by James Bottomley

[PATCH] fix NMI lockup with CFQ scheduler

The current problem seen is that the queue lock is actually in the
SCSI device structure, so when that structure is freed on device
release, we go boom if the queue tries to access the lock again.

The fix here is to move the lock from the scsi_device to the queue.
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 56fece20
...@@ -1715,6 +1715,15 @@ request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) ...@@ -1715,6 +1715,15 @@ request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
if (blk_init_free_list(q)) if (blk_init_free_list(q))
goto out_init; goto out_init;
/*
* if caller didn't supply a lock, they get per-queue locking with
* our embedded lock
*/
if (!lock) {
spin_lock_init(&q->__queue_lock);
lock = &q->__queue_lock;
}
q->request_fn = rfn; q->request_fn = rfn;
q->back_merge_fn = ll_back_merge_fn; q->back_merge_fn = ll_back_merge_fn;
q->front_merge_fn = ll_front_merge_fn; q->front_merge_fn = ll_front_merge_fn;
......
...@@ -360,9 +360,9 @@ void scsi_device_unbusy(struct scsi_device *sdev) ...@@ -360,9 +360,9 @@ void scsi_device_unbusy(struct scsi_device *sdev)
shost->host_failed)) shost->host_failed))
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock); spin_unlock(shost->host_lock);
spin_lock(&sdev->sdev_lock); spin_lock(sdev->request_queue->queue_lock);
sdev->device_busy--; sdev->device_busy--;
spin_unlock_irqrestore(&sdev->sdev_lock, flags); spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
} }
/* /*
...@@ -1425,7 +1425,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) ...@@ -1425,7 +1425,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
struct Scsi_Host *shost = sdev->host; struct Scsi_Host *shost = sdev->host;
struct request_queue *q; struct request_queue *q;
q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock); q = blk_init_queue(scsi_request_fn, NULL);
if (!q) if (!q)
return NULL; return NULL;
......
...@@ -249,7 +249,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, ...@@ -249,7 +249,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/ */
sdev->borken = 1; sdev->borken = 1;
spin_lock_init(&sdev->sdev_lock);
sdev->request_queue = scsi_alloc_queue(sdev); sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue) { if (!sdev->request_queue) {
/* release fn is set up in scsi_sysfs_device_initialise, so /* release fn is set up in scsi_sysfs_device_initialise, so
......
...@@ -355,8 +355,11 @@ struct request_queue ...@@ -355,8 +355,11 @@ struct request_queue
unsigned long queue_flags; unsigned long queue_flags;
/* /*
* protects queue structures from reentrancy * protects queue structures from reentrancy. ->__queue_lock should
* _never_ be used directly, it is queue private. always use
* ->queue_lock.
*/ */
spinlock_t __queue_lock;
spinlock_t *queue_lock; spinlock_t *queue_lock;
/* /*
......
...@@ -44,7 +44,6 @@ struct scsi_device { ...@@ -44,7 +44,6 @@ struct scsi_device {
struct list_head same_target_siblings; /* just the devices sharing same target id */ struct list_head same_target_siblings; /* just the devices sharing same target id */
volatile unsigned short device_busy; /* commands actually active on low-level */ volatile unsigned short device_busy; /* commands actually active on low-level */
spinlock_t sdev_lock; /* also the request queue_lock */
spinlock_t list_lock; spinlock_t list_lock;
struct list_head cmd_list; /* queue of in use SCSI Command structures */ struct list_head cmd_list; /* queue of in use SCSI Command structures */
struct list_head starved_entry; struct list_head starved_entry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment