Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: enhance alloc_multiple_bios() to be more versatile

alloc_multiple_bios() has the useful ability to try allocating bios
with GFP_NOWAIT but will fallback to using GFP_NOIO. The callers
service both empty flush bios and abnormal bios (e.g. discard).

alloc_multiple_bios() enhancements offered in this commit:
- don't require table_devices_lock if num_bios = 1
- allow caller to pass GFP_NOWAIT to do usual GFP_NOWAIT with GFP_NOIO
fallback
- allow caller to pass GFP_NOIO to _only_ allocate using GFP_NOIO

Flush bios with data may be issued to DM with REQ_NOWAIT, as such it
makes sense to attempt servicing them with GFP_NOWAIT allocations.

But abnormal IO should never be issued using REQ_NOWAIT (if that
changes in the future that's fine, but no sense supporting it now).

While at it, rename __send_changing_extent_only() to
__send_abnormal_io().

[Thanks to both Ming and Mikulas for help with translating known
possible IO scenarios to requirements.]

Suggested-by: Ming Lei <ming.lei@redhat.com>
Suggested-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>

+34 -34
+34 -34
drivers/md/dm.c
··· 1478 1478 1479 1479 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, 1480 1480 struct dm_target *ti, unsigned int num_bios, 1481 - unsigned *len) 1481 + unsigned *len, gfp_t gfp_flag) 1482 1482 { 1483 1483 struct bio *bio; 1484 - int try; 1484 + int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1; 1485 1485 1486 - for (try = 0; try < 2; try++) { 1486 + for (; try < 2; try++) { 1487 1487 int bio_nr; 1488 1488 1489 - if (try) 1489 + if (try && num_bios > 1) 1490 1490 mutex_lock(&ci->io->md->table_devices_lock); 1491 1491 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { 1492 1492 bio = alloc_tio(ci, ti, bio_nr, len, ··· 1496 1496 1497 1497 bio_list_add(blist, bio); 1498 1498 } 1499 - if (try) 1499 + if (try && num_bios > 1) 1500 1500 mutex_unlock(&ci->io->md->table_devices_lock); 1501 1501 if (bio_nr == num_bios) 1502 1502 return; ··· 1507 1507 } 1508 1508 1509 1509 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1510 - unsigned int num_bios, unsigned int *len) 1510 + unsigned int num_bios, unsigned int *len, 1511 + gfp_t gfp_flag) 1511 1512 { 1512 1513 struct bio_list blist = BIO_EMPTY_LIST; 1513 1514 struct bio *clone; 1514 1515 unsigned int ret = 0; 1515 1516 1516 - switch (num_bios) { 1517 - case 0: 1518 - break; 1519 - case 1: 1520 - if (len) 1521 - setup_split_accounting(ci, *len); 1522 - clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); 1523 - __map_bio(clone); 1524 - ret = 1; 1525 - break; 1526 - default: 1527 - if (len) 1528 - setup_split_accounting(ci, *len); 1529 - /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 1530 - alloc_multiple_bios(&blist, ci, ti, num_bios, len); 1531 - while ((clone = bio_list_pop(&blist))) { 1517 + if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */ 1518 + return 0; 1519 + 1520 + /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ 1521 + if (len) 1522 + setup_split_accounting(ci, *len); 1523 + 1524 + /* 1525 + * Using alloc_multiple_bios(), even if num_bios is 1, to consistently 1526 + * support allocating using GFP_NOWAIT with GFP_NOIO fallback. 1527 + */ 1528 + alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag); 1529 + while ((clone = bio_list_pop(&blist))) { 1530 + if (num_bios > 1) 1532 1531 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); 1533 - __map_bio(clone); 1534 - ret += 1; 1535 - } 1536 - break; 1532 + __map_bio(clone); 1533 + ret += 1; 1537 1534 } 1538 1535 1539 1536 return ret; ··· 1557 1560 unsigned int bios; 1558 1561 struct dm_target *ti = dm_table_get_target(t, i); 1559 1562 1563 + if (unlikely(ti->num_flush_bios == 0)) 1564 + continue; 1565 + 1560 1566 atomic_add(ti->num_flush_bios, &ci->io->io_count); 1561 - bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1567 + bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios, 1568 + NULL, GFP_NOWAIT); 1562 1569 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count); 1563 1570 } 1564 1571 ··· 1575 1574 bio_uninit(ci->bio); 1576 1575 } 1577 1576 1578 - static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, 1579 - unsigned int num_bios, 1580 - unsigned int max_granularity, 1581 - unsigned int max_sectors) 1577 + static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti, 1578 + unsigned int num_bios, unsigned int max_granularity, 1579 + unsigned int max_sectors) 1582 1580 { 1583 1581 unsigned int len, bios; 1584 1582 ··· 1585 1585 __max_io_len(ti, ci->sector, max_granularity, max_sectors)); 1586 1586 1587 1587 atomic_add(num_bios, &ci->io->io_count); 1588 - bios = __send_duplicate_bios(ci, ti, num_bios, &len); 1588 + bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO); 1589 1589 /* 1590 1590 * alloc_io() takes one extra reference for submission, so the 1591 1591 * reference won't reach 0 without the following (+1) subtraction ··· 1654 1654 if (unlikely(!num_bios)) 1655 1655 return BLK_STS_NOTSUPP; 1656 1656 1657 - __send_changing_extent_only(ci, ti, num_bios, 1658 - max_granularity, max_sectors); 1657 + __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors); 1658 + 1659 1659 return BLK_STS_OK; 1660 1660 } 1661 1661