Commit e91442b6 authored by James Bottomley's avatar James Bottomley

[SCSI] SCSI core: fix leakage of scsi_cmnd's

From: 	Alan Stern <stern@rowland.harvard.edu>

This patch (as559b) adds a new routine, scsi_unprep_request, which
gets called every place a request is requeued.  (That includes
scsi_queue_insert as well as scsi_requeue_command.)  It also changes
scsi_kill_requests to make it call __scsi_done with result equal to
DID_NO_CONNECT << 16.  (I'm not sure if it's necessary to call
scsi_init_cmd_errh here; maybe you can check on that.)  Finally, the
patch changes the return value from scsi_end_request, to avoid
returning a stale pointer in the case where the request was requeued.
Fortunately the return value is used in only place, and the change
actually simplified it.
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>

Rejections fixed up and
Signed-off-by: default avatarJames Bottomley <James.Bottomley@SteelEye.com>
parent 286f3e13
...@@ -97,6 +97,30 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head) ...@@ -97,6 +97,30 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
} }
static void scsi_run_queue(struct request_queue *q); static void scsi_run_queue(struct request_queue *q);
static void scsi_release_buffers(struct scsi_cmnd *cmd);
/*
* Function: scsi_unprep_request()
*
* Purpose: Remove all preparation done for a request, including its
* associated scsi_cmnd, so that it can be requeued.
*
* Arguments: req - request to unprepare
*
* Lock status: Assumed that no locks are held upon entry.
*
* Returns: Nothing.
*/
static void scsi_unprep_request(struct request *req)
{
struct scsi_cmnd *cmd = req->special;
req->flags &= ~REQ_DONTPREP;
req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
scsi_release_buffers(cmd);
scsi_put_command(cmd);
}
/* /*
* Function: scsi_queue_insert() * Function: scsi_queue_insert()
...@@ -116,12 +140,14 @@ static void scsi_run_queue(struct request_queue *q); ...@@ -116,12 +140,14 @@ static void scsi_run_queue(struct request_queue *q);
* commands. * commands.
* Notes: This could be called either from an interrupt context or a * Notes: This could be called either from an interrupt context or a
* normal process context. * normal process context.
* Notes: Upon return, cmd is a stale pointer.
*/ */
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{ {
struct Scsi_Host *host = cmd->device->host; struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device; struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue; struct request_queue *q = device->request_queue;
struct request *req = cmd->request;
unsigned long flags; unsigned long flags;
SCSI_LOG_MLQUEUE(1, SCSI_LOG_MLQUEUE(1,
...@@ -162,8 +188,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) ...@@ -162,8 +188,9 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
* function. The SCSI request function detects the blocked condition * function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately. * and plugs the queue appropriately.
*/ */
scsi_unprep_request(req);
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request); blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q); scsi_run_queue(q);
...@@ -552,15 +579,16 @@ static void scsi_run_queue(struct request_queue *q) ...@@ -552,15 +579,16 @@ static void scsi_run_queue(struct request_queue *q)
* I/O errors in the middle of the request, in which case * I/O errors in the middle of the request, in which case
* we need to request the blocks that come after the bad * we need to request the blocks that come after the bad
* sector. * sector.
* Notes: Upon return, cmd is a stale pointer.
*/ */
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{ {
struct request *req = cmd->request;
unsigned long flags; unsigned long flags;
cmd->request->flags &= ~REQ_DONTPREP; scsi_unprep_request(req);
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request); blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q); scsi_run_queue(q);
...@@ -595,13 +623,14 @@ void scsi_run_host_queues(struct Scsi_Host *shost) ...@@ -595,13 +623,14 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
* *
* Lock status: Assumed that lock is not held upon entry. * Lock status: Assumed that lock is not held upon entry.
* *
* Returns: cmd if requeue done or required, NULL otherwise * Returns: cmd if requeue required, NULL otherwise.
* *
* Notes: This is called for block device requests in order to * Notes: This is called for block device requests in order to
* mark some number of sectors as complete. * mark some number of sectors as complete.
* *
* We are guaranteeing that the request queue will be goosed * We are guaranteeing that the request queue will be goosed
* at some point during this call. * at some point during this call.
* Notes: If cmd was requeued, upon return it will be a stale pointer.
*/ */
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
int bytes, int requeue) int bytes, int requeue)
...@@ -624,14 +653,15 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, ...@@ -624,14 +653,15 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
if (!uptodate && blk_noretry_request(req)) if (!uptodate && blk_noretry_request(req))
end_that_request_chunk(req, 0, leftover); end_that_request_chunk(req, 0, leftover);
else { else {
if (requeue) if (requeue) {
/* /*
* Bleah. Leftovers again. Stick the * Bleah. Leftovers again. Stick the
* leftovers in the front of the * leftovers in the front of the
* queue, and goose the queue again. * queue, and goose the queue again.
*/ */
scsi_requeue_command(q, cmd); scsi_requeue_command(q, cmd);
cmd = NULL;
}
return cmd; return cmd;
} }
} }
...@@ -857,16 +887,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, ...@@ -857,16 +887,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
* requeueing right here - we will requeue down below * requeueing right here - we will requeue down below
* when we handle the bad sectors. * when we handle the bad sectors.
*/ */
cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
/* /*
* If the command completed without error, then either finish off the * If the command completed without error, then either
* rest of the command, or start a new one. * finish off the rest of the command, or start a new one.
*/ */
if (result == 0 || cmd == NULL ) { if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
return; return;
} }
}
/* /*
* Now, if we were good little boys and girls, Santa left us a request * Now, if we were good little boys and girls, Santa left us a request
* sense buffer. We can extract information from this, so we * sense buffer. We can extract information from this, so we
...@@ -880,7 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, ...@@ -880,7 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
* and quietly refuse further access. * and quietly refuse further access.
*/ */
cmd->device->changed = 1; cmd->device->changed = 1;
cmd = scsi_end_request(cmd, 0, scsi_end_request(cmd, 0,
this_count, 1); this_count, 1);
return; return;
} else { } else {
...@@ -914,7 +942,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, ...@@ -914,7 +942,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
scsi_requeue_command(q, cmd); scsi_requeue_command(q, cmd);
result = 0; result = 0;
} else { } else {
cmd = scsi_end_request(cmd, 0, this_count, 1); scsi_end_request(cmd, 0, this_count, 1);
return; return;
} }
break; break;
...@@ -931,7 +959,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, ...@@ -931,7 +959,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
dev_printk(KERN_INFO, dev_printk(KERN_INFO,
&cmd->device->sdev_gendev, &cmd->device->sdev_gendev,
"Device not ready.\n"); "Device not ready.\n");
cmd = scsi_end_request(cmd, 0, this_count, 1); scsi_end_request(cmd, 0, this_count, 1);
return; return;
case VOLUME_OVERFLOW: case VOLUME_OVERFLOW:
if (!(req->flags & REQ_QUIET)) { if (!(req->flags & REQ_QUIET)) {
...@@ -941,7 +969,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, ...@@ -941,7 +969,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
__scsi_print_command(cmd->data_cmnd); __scsi_print_command(cmd->data_cmnd);
scsi_print_sense("", cmd); scsi_print_sense("", cmd);
} }
cmd = scsi_end_request(cmd, 0, block_bytes, 1); scsi_end_request(cmd, 0, block_bytes, 1);
return; return;
default: default:
break; break;
...@@ -972,7 +1000,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, ...@@ -972,7 +1000,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
block_bytes = req->hard_cur_sectors << 9; block_bytes = req->hard_cur_sectors << 9;
if (!block_bytes) if (!block_bytes)
block_bytes = req->data_len; block_bytes = req->data_len;
cmd = scsi_end_request(cmd, 0, block_bytes, 1); scsi_end_request(cmd, 0, block_bytes, 1);
} }
} }
EXPORT_SYMBOL(scsi_io_completion); EXPORT_SYMBOL(scsi_io_completion);
...@@ -1336,19 +1364,24 @@ static inline int scsi_host_queue_ready(struct request_queue *q, ...@@ -1336,19 +1364,24 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
} }
/* /*
* Kill requests for a dead device * Kill a request for a dead device
*/ */
static void scsi_kill_requests(request_queue_t *q) static void scsi_kill_request(struct request *req, request_queue_t *q)
{ {
struct request *req; struct scsi_cmnd *cmd = req->special;
while ((req = elv_next_request(q)) != NULL) { spin_unlock(q->queue_lock);
blkdev_dequeue_request(req); if (unlikely(cmd == NULL)) {
req->flags |= REQ_QUIET; printk(KERN_CRIT "impossible request in %s.\n",
while (end_that_request_first(req, 0, req->nr_sectors)) __FUNCTION__);
; BUG();
end_that_request_last(req);
} }
scsi_init_cmd_errh(cmd);
cmd->result = DID_NO_CONNECT << 16;
atomic_inc(&cmd->device->iorequest_cnt);
__scsi_done(cmd);
spin_lock(q->queue_lock);
} }
/* /*
...@@ -1371,7 +1404,8 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1371,7 +1404,8 @@ static void scsi_request_fn(struct request_queue *q)
if (!sdev) { if (!sdev) {
printk("scsi: killing requests for dead queue\n"); printk("scsi: killing requests for dead queue\n");
scsi_kill_requests(q); while ((req = elv_next_request(q)) != NULL)
scsi_kill_request(req, q);
return; return;
} }
...@@ -1399,10 +1433,7 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1399,10 +1433,7 @@ static void scsi_request_fn(struct request_queue *q)
printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n", printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
sdev->host->host_no, sdev->id, sdev->lun); sdev->host->host_no, sdev->id, sdev->lun);
blkdev_dequeue_request(req); blkdev_dequeue_request(req);
req->flags |= REQ_QUIET; scsi_kill_request(req, q);
while (end_that_request_first(req, 0, req->nr_sectors))
;
end_that_request_last(req);
continue; continue;
} }
...@@ -1415,6 +1446,14 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1415,6 +1446,14 @@ static void scsi_request_fn(struct request_queue *q)
sdev->device_busy++; sdev->device_busy++;
spin_unlock(q->queue_lock); spin_unlock(q->queue_lock);
cmd = req->special;
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org",
__FUNCTION__);
BUG();
}
spin_lock(shost->host_lock); spin_lock(shost->host_lock);
if (!scsi_host_queue_ready(q, shost, sdev)) if (!scsi_host_queue_ready(q, shost, sdev))
...@@ -1433,15 +1472,6 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1433,15 +1472,6 @@ static void scsi_request_fn(struct request_queue *q)
*/ */
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
cmd = req->special;
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org",
__FUNCTION__);
BUG();
}
/* /*
* Finally, initialize any error handling parameters, and set up * Finally, initialize any error handling parameters, and set up
* the timers for timeouts. * the timers for timeouts.
...@@ -1477,6 +1507,7 @@ static void scsi_request_fn(struct request_queue *q) ...@@ -1477,6 +1507,7 @@ static void scsi_request_fn(struct request_queue *q)
* cases (host limits or settings) should run the queue at some * cases (host limits or settings) should run the queue at some
* later time. * later time.
*/ */
scsi_unprep_request(req);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req); blk_requeue_request(q, req);
sdev->device_busy--; sdev->device_busy--;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment