Commit 46c271be authored by Peter Osterlund's avatar Peter Osterlund Committed by Linus Torvalds

[PATCH] Improve CD/DVD packet driver write performance

This patch improves write performance for the CD/DVD packet writing driver.
 The logic for switching between reading and writing has been changed so
that streaming writes are no longer interrupted by read requests.
Signed-off-by: default avatarPeter Osterlund <petero2@telia.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dfb388bf
...@@ -467,14 +467,12 @@ static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsign ...@@ -467,14 +467,12 @@ static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsign
* Queue a bio for processing by the low-level CD device. Must be called * Queue a bio for processing by the low-level CD device. Must be called
* from process context. * from process context.
*/ */
static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read) static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{ {
spin_lock(&pd->iosched.lock); spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ) { if (bio_data_dir(bio) == READ) {
pkt_add_list_last(bio, &pd->iosched.read_queue, pkt_add_list_last(bio, &pd->iosched.read_queue,
&pd->iosched.read_queue_tail); &pd->iosched.read_queue_tail);
if (high_prio_read)
pd->iosched.high_prio_read = 1;
} else { } else {
pkt_add_list_last(bio, &pd->iosched.write_queue, pkt_add_list_last(bio, &pd->iosched.write_queue,
&pd->iosched.write_queue_tail); &pd->iosched.write_queue_tail);
...@@ -490,15 +488,16 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_p ...@@ -490,15 +488,16 @@ static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_p
* requirements for CDRW drives: * requirements for CDRW drives:
* - A cache flush command must be inserted before a read request if the * - A cache flush command must be inserted before a read request if the
* previous request was a write. * previous request was a write.
* - Switching between reading and writing is slow, so don't it more often * - Switching between reading and writing is slow, so don't do it more often
* than necessary. * than necessary.
* - Optimize for throughput at the expense of latency. This means that streaming
* writes will never be interrupted by a read, but if the drive has to seek
* before the next write, switch to reading instead if there are any pending
* read requests.
* - Set the read speed according to current usage pattern. When only reading * - Set the read speed according to current usage pattern. When only reading
* from the device, it's best to use the highest possible read speed, but * from the device, it's best to use the highest possible read speed, but
* when switching often between reading and writing, it's better to have the * when switching often between reading and writing, it's better to have the
* same read and write speeds. * same read and write speeds.
* - Reads originating from user space should have higher priority than reads
* originating from pkt_gather_data, because some process is usually waiting
* on reads of the first kind.
*/ */
static void pkt_iosched_process_queue(struct pktcdvd_device *pd) static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
{ {
...@@ -512,21 +511,24 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) ...@@ -512,21 +511,24 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
for (;;) { for (;;) {
struct bio *bio; struct bio *bio;
int reads_queued, writes_queued, high_prio_read; int reads_queued, writes_queued;
spin_lock(&pd->iosched.lock); spin_lock(&pd->iosched.lock);
reads_queued = (pd->iosched.read_queue != NULL); reads_queued = (pd->iosched.read_queue != NULL);
writes_queued = (pd->iosched.write_queue != NULL); writes_queued = (pd->iosched.write_queue != NULL);
if (!reads_queued)
pd->iosched.high_prio_read = 0;
high_prio_read = pd->iosched.high_prio_read;
spin_unlock(&pd->iosched.lock); spin_unlock(&pd->iosched.lock);
if (!reads_queued && !writes_queued) if (!reads_queued && !writes_queued)
break; break;
if (pd->iosched.writing) { if (pd->iosched.writing) {
if (high_prio_read || (!writes_queued && reads_queued)) { int need_write_seek = 1;
spin_lock(&pd->iosched.lock);
bio = pd->iosched.write_queue;
spin_unlock(&pd->iosched.lock);
if (bio && (bio->bi_sector == pd->iosched.last_write))
need_write_seek = 0;
if (need_write_seek && reads_queued) {
if (atomic_read(&pd->cdrw.pending_bios) > 0) { if (atomic_read(&pd->cdrw.pending_bios) > 0) {
VPRINTK("pktcdvd: write, waiting\n"); VPRINTK("pktcdvd: write, waiting\n");
break; break;
...@@ -559,8 +561,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) ...@@ -559,8 +561,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
if (bio_data_dir(bio) == READ) if (bio_data_dir(bio) == READ)
pd->iosched.successive_reads += bio->bi_size >> 10; pd->iosched.successive_reads += bio->bi_size >> 10;
else else {
pd->iosched.successive_reads = 0; pd->iosched.successive_reads = 0;
pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
}
if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
if (pd->read_speed == pd->write_speed) { if (pd->read_speed == pd->write_speed) {
pd->read_speed = MAX_SPEED; pd->read_speed = MAX_SPEED;
...@@ -765,7 +769,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) ...@@ -765,7 +769,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
atomic_inc(&pkt->io_wait); atomic_inc(&pkt->io_wait);
bio->bi_rw = READ; bio->bi_rw = READ;
pkt_queue_bio(pd, bio, 0); pkt_queue_bio(pd, bio);
frames_read++; frames_read++;
} }
...@@ -1062,7 +1066,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) ...@@ -1062,7 +1066,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
atomic_set(&pkt->io_wait, 1); atomic_set(&pkt->io_wait, 1);
pkt->w_bio->bi_rw = WRITE; pkt->w_bio->bi_rw = WRITE;
pkt_queue_bio(pd, pkt->w_bio, 0); pkt_queue_bio(pd, pkt->w_bio);
} }
static void pkt_finish_packet(struct packet_data *pkt, int uptodate) static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
...@@ -2120,7 +2124,7 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio) ...@@ -2120,7 +2124,7 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio)
cloned_bio->bi_private = psd; cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned; cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9; pd->stats.secs_r += bio->bi_size >> 9;
pkt_queue_bio(pd, cloned_bio, 1); pkt_queue_bio(pd, cloned_bio);
return 0; return 0;
} }
......
...@@ -159,7 +159,7 @@ struct packet_iosched ...@@ -159,7 +159,7 @@ struct packet_iosched
struct bio *read_queue_tail; struct bio *read_queue_tail;
struct bio *write_queue; struct bio *write_queue;
struct bio *write_queue_tail; struct bio *write_queue_tail;
int high_prio_read; /* An important read request has been queued */ sector_t last_write; /* The sector where the last write ended */
int successive_reads; int successive_reads;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment