ide: ide_hwgroup_t.rq doesn't need an ide_lock held

While at it:
- no need to check for hwgroup presence in ide_dump_opcode()
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent 44e31231
...@@ -317,7 +317,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st) ...@@ -317,7 +317,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->hwgroup->rq; ide_hwgroup_t *hwgroup = hwif->hwgroup;
struct request *rq = hwgroup->rq;
int stat, err, sense_key; int stat, err, sense_key;
/* check for errors */ /* check for errors */
...@@ -508,9 +509,10 @@ end_request: ...@@ -508,9 +509,10 @@ end_request:
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
hwgroup->rq = NULL;
cdrom_queue_request_sense(drive, rq->sense, rq); cdrom_queue_request_sense(drive, rq->sense, rq);
} else } else
cdrom_end_request(drive, 0); cdrom_end_request(drive, 0);
...@@ -950,7 +952,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq) ...@@ -950,7 +952,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq)
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq; ide_hwgroup_t *hwgroup = hwif->hwgroup;
struct request *rq = hwgroup->rq;
xfer_func_t *xferfunc; xfer_func_t *xferfunc;
ide_expiry_t *expiry = NULL; ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, stat, thislen, uptodate = 0; int dma_error = 0, dma, stat, thislen, uptodate = 0;
...@@ -1157,8 +1160,9 @@ end_request: ...@@ -1157,8 +1160,9 @@ end_request:
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
if (__blk_end_request(rq, 0, dlen)) if (__blk_end_request(rq, 0, dlen))
BUG(); BUG();
HWGROUP(drive)->rq = NULL;
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
hwgroup->rq = NULL;
} else { } else {
if (!uptodate) if (!uptodate)
rq->cmd_flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
......
...@@ -107,17 +107,10 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ...@@ -107,17 +107,10 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{ {
unsigned int nr_bytes = nr_sectors << 9; unsigned int nr_bytes = nr_sectors << 9;
struct request *rq; struct request *rq = drive->hwif->hwgroup->rq;
unsigned long flags; unsigned long flags;
int ret = 1; int ret = 1;
/*
* room for locking improvements here, the calls below don't
* need the queue lock held at all
*/
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
if (!nr_bytes) { if (!nr_bytes) {
if (blk_pc_request(rq)) if (blk_pc_request(rq))
nr_bytes = rq->data_len; nr_bytes = rq->data_len;
...@@ -125,9 +118,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) ...@@ -125,9 +118,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
nr_bytes = rq->hard_cur_sectors << 9; nr_bytes = rq->hard_cur_sectors << 9;
} }
spin_lock_irqsave(&ide_lock, flags);
ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(ide_end_request); EXPORT_SYMBOL(ide_end_request);
...@@ -245,8 +239,9 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, ...@@ -245,8 +239,9 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
unsigned long flags; unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&ide_lock, flags);
BUG_ON(!blk_rq_started(rq)); BUG_ON(!blk_rq_started(rq));
spin_lock_irqsave(&ide_lock, flags);
ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
...@@ -278,7 +273,11 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) ...@@ -278,7 +273,11 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
drive->dev_flags &= ~IDE_DFLAG_BLOCKED; drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
blk_start_queue(drive->queue); blk_start_queue(drive->queue);
} }
HWGROUP(drive)->rq = NULL; spin_unlock_irqrestore(&ide_lock, flags);
drive->hwif->hwgroup->rq = NULL;
spin_lock_irqsave(&ide_lock, flags);
if (__blk_end_request(rq, 0, 0)) if (__blk_end_request(rq, 0, 0))
BUG(); BUG();
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
...@@ -300,12 +299,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) ...@@ -300,12 +299,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
{ {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
struct request *rq = hwgroup->rq;
unsigned long flags; unsigned long flags;
struct request *rq;
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
spin_unlock_irqrestore(&ide_lock, flags);
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *task = (ide_task_t *)rq->special; ide_task_t *task = (ide_task_t *)rq->special;
...@@ -333,15 +329,16 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) ...@@ -333,15 +329,16 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
return; return;
} }
spin_lock_irqsave(&ide_lock, flags); hwgroup->rq = NULL;
HWGROUP(drive)->rq = NULL;
rq->errors = err; rq->errors = err;
spin_lock_irqsave(&ide_lock, flags);
if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0), if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
blk_rq_bytes(rq)))) blk_rq_bytes(rq))))
BUG(); BUG();
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
} }
EXPORT_SYMBOL(ide_end_drive_cmd); EXPORT_SYMBOL(ide_end_drive_cmd);
static void ide_kill_rq(ide_drive_t *drive, struct request *rq) static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
...@@ -1489,11 +1486,12 @@ out: ...@@ -1489,11 +1486,12 @@ out:
void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
{ {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
unsigned long flags; unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
spin_lock_irqsave(&ide_lock, flags);
hwgroup->rq = NULL; hwgroup->rq = NULL;
spin_lock_irqsave(&ide_lock, flags);
__elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0); __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
blk_start_queueing(drive->queue); blk_start_queueing(drive->queue);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
......
...@@ -277,14 +277,9 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) ...@@ -277,14 +277,9 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
static void ide_dump_opcode(ide_drive_t *drive) static void ide_dump_opcode(ide_drive_t *drive)
{ {
struct request *rq; struct request *rq = drive->hwif->hwgroup->rq;
ide_task_t *task = NULL; ide_task_t *task = NULL;
spin_lock(&ide_lock);
rq = NULL;
if (HWGROUP(drive))
rq = HWGROUP(drive)->rq;
spin_unlock(&ide_lock);
if (!rq) if (!rq)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment