Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md: raid1,10: Handle REQ_WRITE_SAME flag in write bios

Set mddev queue's max_write_same_sectors to its chunk_sector value (before
disk_stack_limits merges the underlying disk limits.) With that in place,
be sure to handle writes coming down from the block layer that have the
REQ_WRITE_SAME flag set. That flag needs to be copied into any newly cloned
write bio.

Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com>
Acked-by: "Martin K. Petersen" <martin.petersen@oracle.com>
Signed-off-by: NeilBrown <neilb@suse.de>

authored by

Joe Lawrence and committed by
NeilBrown
c8dc9c65 bbfa57c0

+13 -3
+6 -1
drivers/md/raid1.c
··· 1000 1000 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 1001 1001 const unsigned long do_discard = (bio->bi_rw 1002 1002 & (REQ_DISCARD | REQ_SECURE)); 1003 + const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); 1003 1004 struct md_rdev *blocked_rdev; 1004 1005 struct blk_plug_cb *cb; 1005 1006 struct raid1_plug_cb *plug = NULL; ··· 1302 1301 conf->mirrors[i].rdev->data_offset); 1303 1302 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1304 1303 mbio->bi_end_io = raid1_end_write_request; 1305 - mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard; 1304 + mbio->bi_rw = 1305 + WRITE | do_flush_fua | do_sync | do_discard | do_same; 1306 1306 mbio->bi_private = r1_bio; 1307 1307 1308 1308 atomic_inc(&r1_bio->remaining); ··· 2820 2818 if (IS_ERR(conf)) 2821 2819 return PTR_ERR(conf); 2822 2820 2821 + if (mddev->queue) 2822 + blk_queue_max_write_same_sectors(mddev->queue, 2823 + mddev->chunk_sectors); 2823 2824 rdev_for_each(rdev, mddev) { 2824 2825 if (!mddev->gendisk) 2825 2826 continue;
+7 -2
drivers/md/raid10.c
··· 1105 1105 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1106 1106 const unsigned long do_discard = (bio->bi_rw 1107 1107 & (REQ_DISCARD | REQ_SECURE)); 1108 + const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME); 1108 1109 unsigned long flags; 1109 1110 struct md_rdev *blocked_rdev; 1110 1111 struct blk_plug_cb *cb; ··· 1461 1460 rdev)); 1462 1461 mbio->bi_bdev = rdev->bdev; 1463 1462 mbio->bi_end_io = raid10_end_write_request; 1464 - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; 1463 + mbio->bi_rw = 1464 + WRITE | do_sync | do_fua | do_discard | do_same; 1465 1465 mbio->bi_private = r10_bio; 1466 1466 1467 1467 atomic_inc(&r10_bio->remaining); ··· 1504 1502 r10_bio, rdev)); 1505 1503 mbio->bi_bdev = rdev->bdev; 1506 1504 mbio->bi_end_io = raid10_end_write_request; 1507 - mbio->bi_rw = WRITE | do_sync | do_fua | do_discard; 1505 + mbio->bi_rw = 1506 + WRITE | do_sync | do_fua | do_discard | do_same; 1508 1507 mbio->bi_private = r10_bio; 1509 1508 1510 1509 atomic_inc(&r10_bio->remaining); ··· 3572 3569 if (mddev->queue) { 3573 3570 blk_queue_max_discard_sectors(mddev->queue, 3574 3571 mddev->chunk_sectors); 3572 + blk_queue_max_write_same_sectors(mddev->queue, 3573 + mddev->chunk_sectors); 3575 3574 blk_queue_io_min(mddev->queue, chunk_size); 3576 3575 if (conf->geo.raid_disks % conf->geo.near_copies) 3577 3576 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);