Commit 97c566ce authored by Borislav Petkov's avatar Borislav Petkov Committed by Bartlomiej Zolnierkiewicz

ide-tape: remove pipeline-specific code from idetape_add_chrdev_write_request

Refrain from adding more write requests to the pipeline and queue them
directly on the device's request queue instead.

[bart: re-do for minimal behavior changes]
Signed-off-by: default avatarBorislav Petkov <petkovbb@gmail.com>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent f64eee7b
...@@ -2202,28 +2202,16 @@ static void idetape_wait_first_stage(ide_drive_t *drive) ...@@ -2202,28 +2202,16 @@ static void idetape_wait_first_stage(ide_drive_t *drive)
spin_unlock_irqrestore(&tape->lock, flags); spin_unlock_irqrestore(&tape->lock, flags);
} }
/* /* Queue up a character device originated write request. */
* Try to add a character device originated write request to our pipeline. In
* case we don't succeed, we revert to non-pipelined operation mode for this
* request. In order to accomplish that, we
*
* 1. Try to allocate a new pipeline stage.
* 2. If we can't, wait for more and more requests to be serviced and try again
* each time.
* 3. If we still can't allocate a stage, fallback to non-pipelined operation
* mode for this request.
*/
static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks) static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
{ {
idetape_tape_t *tape = drive->driver_data; idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *new_stage;
unsigned long flags; unsigned long flags;
struct request *rq;
debug_log(DBG_CHRDEV, "Enter %s\n", __func__); debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
/* Attempt to allocate a new stage. Beware possible race conditions. */ /* Attempt to allocate a new stage. Beware possible race conditions. */
while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) { while (1) {
spin_lock_irqsave(&tape->lock, flags); spin_lock_irqsave(&tape->lock, flags);
if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) { if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
idetape_wait_for_request(drive, tape->active_data_rq); idetape_wait_for_request(drive, tape->active_data_rq);
...@@ -2234,49 +2222,10 @@ static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks) ...@@ -2234,49 +2222,10 @@ static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
&tape->flags)) &tape->flags))
continue; continue;
/*
* The machine is short on memory. Fallback to non-
* pipelined operation mode for this request.
*/
return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
blocks, tape->merge_stage->bh); blocks, tape->merge_stage->bh);
} }
} }
rq = &new_stage->rq;
idetape_init_rq(rq, REQ_IDETAPE_WRITE);
/* Doesn't actually matter - We always assume sequential access */
rq->sector = tape->first_frame;
rq->current_nr_sectors = blocks;
rq->nr_sectors = blocks;
idetape_switch_buffers(tape, new_stage);
idetape_add_stage_tail(drive, new_stage);
tape->pipeline_head++;
idetape_calculate_speeds(drive);
/*
* Estimate whether the tape has stopped writing by checking if our
* write pipeline is currently empty. If we are not writing anymore,
* wait for the pipeline to be almost completely full (90%) before
* starting to service requests, so that we will be able to keep up with
* the higher speeds of the tape.
*/
if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
tape->nr_stages >= tape->max_stages -
tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
tape->blk_size) {
tape->measure_insert_time = 1;
tape->insert_time = jiffies;
tape->insert_size = 0;
tape->insert_speed = 0;
idetape_plug_pipeline(drive);
}
}
if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
/* Return a deferred error */
return -EIO;
return blocks;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment