Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] Improve CD/DVD packet driver write performance

This patch improves write performance for the CD/DVD packet writing driver.
The logic for switching between reading and writing has been changed so
that streaming writes are no longer interrupted by read requests.

Signed-off-by: Peter Osterlund <petero2@telia.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Peter Osterlund and committed by
Linus Torvalds
46c271be dfb388bf

+21 -17
+20 -16
drivers/block/pktcdvd.c
··· 467 467 * Queue a bio for processing by the low-level CD device. Must be called 468 468 * from process context. 469 469 */ 470 - static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read) 470 + static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) 471 471 { 472 472 spin_lock(&pd->iosched.lock); 473 473 if (bio_data_dir(bio) == READ) { 474 474 pkt_add_list_last(bio, &pd->iosched.read_queue, 475 475 &pd->iosched.read_queue_tail); 476 - if (high_prio_read) 477 - pd->iosched.high_prio_read = 1; 478 476 } else { 479 477 pkt_add_list_last(bio, &pd->iosched.write_queue, 480 478 &pd->iosched.write_queue_tail); ··· 488 490 * requirements for CDRW drives: 489 491 * - A cache flush command must be inserted before a read request if the 490 492 * previous request was a write. 491 - * - Switching between reading and writing is slow, so don't it more often 493 + * - Switching between reading and writing is slow, so don't do it more often 492 494 * than necessary. 495 + * - Optimize for throughput at the expense of latency. This means that streaming 496 + * writes will never be interrupted by a read, but if the drive has to seek 497 + * before the next write, switch to reading instead if there are any pending 498 + * read requests. 493 499 * - Set the read speed according to current usage pattern. When only reading 494 500 * from the device, it's best to use the highest possible read speed, but 495 501 * when switching often between reading and writing, it's better to have the 496 502 * same read and write speeds. 497 - * - Reads originating from user space should have higher priority than reads 498 - * originating from pkt_gather_data, because some process is usually waiting 499 - * on reads of the first kind. 500 503 */ 501 504 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) 502 505 { ··· 511 512 512 513 for (;;) { 513 514 struct bio *bio; 514 - int reads_queued, writes_queued, high_prio_read; 515 + int reads_queued, writes_queued; 515 516 516 517 spin_lock(&pd->iosched.lock); 517 518 reads_queued = (pd->iosched.read_queue != NULL); 518 519 writes_queued = (pd->iosched.write_queue != NULL); 519 - if (!reads_queued) 520 - pd->iosched.high_prio_read = 0; 521 - high_prio_read = pd->iosched.high_prio_read; 522 520 spin_unlock(&pd->iosched.lock); 523 521 524 522 if (!reads_queued && !writes_queued) 525 523 break; 526 524 527 525 if (pd->iosched.writing) { 528 - if (high_prio_read || (!writes_queued && reads_queued)) { 526 + int need_write_seek = 1; 527 + spin_lock(&pd->iosched.lock); 528 + bio = pd->iosched.write_queue; 529 + spin_unlock(&pd->iosched.lock); 530 + if (bio && (bio->bi_sector == pd->iosched.last_write)) 531 + need_write_seek = 0; 532 + if (need_write_seek && reads_queued) { 529 533 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 530 534 VPRINTK("pktcdvd: write, waiting\n"); 531 535 break; ··· 561 559 562 560 if (bio_data_dir(bio) == READ) 563 561 pd->iosched.successive_reads += bio->bi_size >> 10; 564 - else 562 + else { 565 563 pd->iosched.successive_reads = 0; 564 + pd->iosched.last_write = bio->bi_sector + bio_sectors(bio); 565 + } 566 566 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { 567 567 if (pd->read_speed == pd->write_speed) { 568 568 pd->read_speed = MAX_SPEED; ··· 769 765 770 766 atomic_inc(&pkt->io_wait); 771 767 bio->bi_rw = READ; 772 - pkt_queue_bio(pd, bio, 0); 768 + pkt_queue_bio(pd, bio); 773 769 frames_read++; 774 770 } 775 771 ··· 1066 1062 1067 1063 atomic_set(&pkt->io_wait, 1); 1068 1064 pkt->w_bio->bi_rw = WRITE; 1069 - pkt_queue_bio(pd, pkt->w_bio, 0); 1065 + pkt_queue_bio(pd, pkt->w_bio); 1070 1066 } 1071 1067 1072 1068 static void pkt_finish_packet(struct packet_data *pkt, int uptodate) ··· 2124 2120 cloned_bio->bi_private = psd; 2125 2121 cloned_bio->bi_end_io = pkt_end_io_read_cloned; 2126 2122 pd->stats.secs_r += bio->bi_size >> 9; 2127 - pkt_queue_bio(pd, cloned_bio, 1); 2123 + pkt_queue_bio(pd, cloned_bio); 2128 2124 return 0; 2129 2125 } 2130 2126
+1 -1
include/linux/pktcdvd.h
··· 159 159 struct bio *read_queue_tail; 160 160 struct bio *write_queue; 161 161 struct bio *write_queue_tail; 162 - int high_prio_read; /* An important read request has been queued */ 162 + sector_t last_write; /* The sector where the last write ended */ 163 163 int successive_reads; 164 164 }; 165 165