Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

md/raid1: convert to use bio_submit_split_bioset()

Unify bio split code, and prepare to fix ordering of split IO.

Noted that bio_submit_split_bioset() can fail the original bio directly
by split error, set R1BIO_Returned in this case to notify raid_end_bio_io()
that the original bio is returned already.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Yu Kuai and committed by
Jens Axboe
a6fcc160 5b38ee5a

+14 -28
+11 -27
drivers/md/raid1.c
··· 1317 1317 struct raid1_info *mirror; 1318 1318 struct bio *read_bio; 1319 1319 int max_sectors; 1320 - int rdisk, error; 1320 + int rdisk; 1321 1321 bool r1bio_existed = !!r1_bio; 1322 1322 1323 1323 /* ··· 1377 1377 } 1378 1378 1379 1379 if (max_sectors < bio_sectors(bio)) { 1380 - struct bio *split = bio_split(bio, max_sectors, 1381 - gfp, &conf->bio_split); 1382 - 1383 - if (IS_ERR(split)) { 1384 - error = PTR_ERR(split); 1380 + bio = bio_submit_split_bioset(bio, max_sectors, 1381 + &conf->bio_split); 1382 + if (!bio) { 1383 + set_bit(R1BIO_Returned, &r1_bio->state); 1385 1384 goto err_handle; 1386 1385 } 1387 1386 1388 - bio_chain(split, bio); 1389 - trace_block_split(split, bio->bi_iter.bi_sector); 1390 - submit_bio_noacct(bio); 1391 - bio = split; 1392 1387 r1_bio->master_bio = bio; 1393 1388 r1_bio->sectors = max_sectors; 1394 1389 } ··· 1411 1416 1412 1417 err_handle: 1413 1418 atomic_dec(&mirror->rdev->nr_pending); 1414 - bio->bi_status = errno_to_blk_status(error); 1415 - set_bit(R1BIO_Uptodate, &r1_bio->state); 1416 1419 raid_end_bio_io(r1_bio); 1417 1420 } 1418 1421 ··· 1477 1484 { 1478 1485 struct r1conf *conf = mddev->private; 1479 1486 struct r1bio *r1_bio; 1480 - int i, disks, k, error; 1487 + int i, disks, k; 1481 1488 unsigned long flags; 1482 1489 int first_clone; 1483 1490 int max_sectors; ··· 1581 1588 * complexity of supporting that is not worth 1582 1589 * the benefit. 1583 1590 */ 1584 - if (bio->bi_opf & REQ_ATOMIC) { 1585 - error = -EIO; 1591 + if (bio->bi_opf & REQ_ATOMIC) 1586 1592 goto err_handle; 1587 - } 1588 1593 1589 1594 good_sectors = first_bad - r1_bio->sector; 1590 1595 if (good_sectors < max_sectors) ··· 1602 1611 max_sectors = min_t(int, max_sectors, 1603 1612 BIO_MAX_VECS * (PAGE_SIZE >> 9)); 1604 1613 if (max_sectors < bio_sectors(bio)) { 1605 - struct bio *split = bio_split(bio, max_sectors, 1606 - GFP_NOIO, &conf->bio_split); 1607 - 1608 - if (IS_ERR(split)) { 1609 - error = PTR_ERR(split); 1614 + bio = bio_submit_split_bioset(bio, max_sectors, 1615 + &conf->bio_split); 1616 + if (!bio) { 1617 + set_bit(R1BIO_Returned, &r1_bio->state); 1610 1618 goto err_handle; 1611 1619 } 1612 1620 1613 - bio_chain(split, bio); 1614 - trace_block_split(split, bio->bi_iter.bi_sector); 1615 - submit_bio_noacct(bio); 1616 - bio = split; 1617 1621 r1_bio->master_bio = bio; 1618 1622 r1_bio->sectors = max_sectors; 1619 1623 } ··· 1684 1698 } 1685 1699 } 1686 1700 1687 - bio->bi_status = errno_to_blk_status(error); 1688 - set_bit(R1BIO_Uptodate, &r1_bio->state); 1689 1701 raid_end_bio_io(r1_bio); 1690 1702 } 1691 1703
+3 -1
drivers/md/raid1.h
··· 178 178 * any write was successful. Otherwise we call when 179 179 * any write-behind write succeeds, otherwise we call 180 180 * with failure when last write completes (and all failed). 181 - * Record that bi_end_io was called with this flag... 181 + * 182 + * And for bio_split errors, record that bi_end_io was called 183 + * with this flag... 182 184 */ 183 185 R1BIO_Returned, 184 186 /* If a write for this request means we can clear some