block: Require subsystems to explicitly allocate bio_set integrity mempool

MD and DM create a new bio_set for every metadevice. Each bio_set has an
integrity mempool attached regardless of whether the metadevice is
capable of passing integrity metadata. This is a waste of memory.

Instead we defer the allocation decision to MD and DM since we know at
metadevice creation time whether integrity passthrough is needed or not.

Automatic integrity mempool allocation can then be removed from
bioset_create() and we make an explicit integrity allocation for the
fs_bio_set.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Reported-by: Zdenek Kabelac <zkabelac@redhat.com>
Acked-by: Mike Snitzer <snizer@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

authored by Martin K. Petersen and committed by Jens Axboe a91a2785 82f04ab4

+41 -22
+5 -2
drivers/md/dm-table.c
··· 55 struct dm_target *targets; 56 57 unsigned discards_supported:1; 58 59 /* 60 * Indicates the rw permissions for the new logical ··· 860 return -EINVAL; 861 } 862 863 - t->mempools = dm_alloc_md_mempools(type); 864 if (!t->mempools) 865 return -ENOMEM; 866 ··· 936 struct dm_dev_internal *dd; 937 938 list_for_each_entry(dd, devices, list) 939 - if (bdev_get_integrity(dd->dm_dev.bdev)) 940 return blk_integrity_register(dm_disk(md), NULL); 941 942 return 0; 943 }
··· 55 struct dm_target *targets; 56 57 unsigned discards_supported:1; 58 + unsigned integrity_supported:1; 59 60 /* 61 * Indicates the rw permissions for the new logical ··· 859 return -EINVAL; 860 } 861 862 + t->mempools = dm_alloc_md_mempools(type, t->integrity_supported); 863 if (!t->mempools) 864 return -ENOMEM; 865 ··· 935 struct dm_dev_internal *dd; 936 937 list_for_each_entry(dd, devices, list) 938 + if (bdev_get_integrity(dd->dm_dev.bdev)) { 939 + t->integrity_supported = 1; 940 return blk_integrity_register(dm_disk(md), NULL); 941 + } 942 943 return 0; 944 }
+9 -3
drivers/md/dm.c
··· 2620 } 2621 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2622 2623 - struct dm_md_mempools *dm_alloc_md_mempools(unsigned type) 2624 { 2625 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2626 2627 if (!pools) 2628 return NULL; ··· 2640 if (!pools->tio_pool) 2641 goto free_io_pool_and_out; 2642 2643 - pools->bs = (type == DM_TYPE_BIO_BASED) ? 2644 - bioset_create(16, 0) : bioset_create(MIN_IOS, 0); 2645 if (!pools->bs) 2646 goto free_tio_pool_and_out; 2647 2648 return pools; 2649 2650 free_tio_pool_and_out: 2651 mempool_destroy(pools->tio_pool);
··· 2620 } 2621 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2622 2623 + struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) 2624 { 2625 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2626 + unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; 2627 2628 if (!pools) 2629 return NULL; ··· 2639 if (!pools->tio_pool) 2640 goto free_io_pool_and_out; 2641 2642 + pools->bs = bioset_create(pool_size, 0); 2643 if (!pools->bs) 2644 goto free_tio_pool_and_out; 2645 2646 + if (integrity && bioset_integrity_create(pools->bs, pool_size)) 2647 + goto free_bioset_and_out; 2648 + 2649 return pools; 2650 + 2651 + free_bioset_and_out: 2652 + bioset_free(pools->bs); 2653 2654 free_tio_pool_and_out: 2655 mempool_destroy(pools->tio_pool);
+1 -1
drivers/md/dm.h
··· 149 /* 150 * Mempool operations 151 */ 152 - struct dm_md_mempools *dm_alloc_md_mempools(unsigned type); 153 void dm_free_md_mempools(struct dm_md_mempools *pools); 154 155 #endif
··· 149 /* 150 * Mempool operations 151 */ 152 + struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity); 153 void dm_free_md_mempools(struct dm_md_mempools *pools); 154 155 #endif
+1 -2
drivers/md/linear.c
··· 210 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 211 mddev->queue->backing_dev_info.congested_fn = linear_congested; 212 mddev->queue->backing_dev_info.congested_data = mddev; 213 - md_integrity_register(mddev); 214 - return 0; 215 } 216 217 static void free_conf(struct rcu_head *head)
··· 210 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 211 mddev->queue->backing_dev_info.congested_fn = linear_congested; 212 mddev->queue->backing_dev_info.congested_data = mddev; 213 + return md_integrity_register(mddev); 214 } 215 216 static void free_conf(struct rcu_head *head)
+6 -2
drivers/md/md.c
··· 1803 mdname(mddev)); 1804 return -EINVAL; 1805 } 1806 - printk(KERN_NOTICE "md: data integrity on %s enabled\n", 1807 - mdname(mddev)); 1808 return 0; 1809 } 1810 EXPORT_SYMBOL(md_integrity_register);
··· 1803 mdname(mddev)); 1804 return -EINVAL; 1805 } 1806 + printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1807 + if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1808 + printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1809 + mdname(mddev)); 1810 + return -EINVAL; 1811 + } 1812 return 0; 1813 } 1814 EXPORT_SYMBOL(md_integrity_register);
+5 -2
drivers/md/multipath.c
··· 315 p->rdev = rdev; 316 goto abort; 317 } 318 - md_integrity_register(mddev); 319 } 320 abort: 321 ··· 489 490 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 491 mddev->queue->backing_dev_info.congested_data = mddev; 492 - md_integrity_register(mddev); 493 return 0; 494 495 out_free_conf:
··· 315 p->rdev = rdev; 316 goto abort; 317 } 318 + err = md_integrity_register(mddev); 319 } 320 abort: 321 ··· 489 490 mddev->queue->backing_dev_info.congested_fn = multipath_congested; 491 mddev->queue->backing_dev_info.congested_data = mddev; 492 + 493 + if (md_integrity_register(mddev)) 494 + goto out_free_conf; 495 + 496 return 0; 497 498 out_free_conf:
+1 -2
drivers/md/raid0.c
··· 379 380 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 381 dump_zones(mddev); 382 - md_integrity_register(mddev); 383 - return 0; 384 } 385 386 static int raid0_stop(mddev_t *mddev)
··· 379 380 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 381 dump_zones(mddev); 382 + return md_integrity_register(mddev); 383 } 384 385 static int raid0_stop(mddev_t *mddev)
+2 -3
drivers/md/raid1.c
··· 1132 p->rdev = rdev; 1133 goto abort; 1134 } 1135 - md_integrity_register(mddev); 1136 } 1137 abort: 1138 ··· 2017 2018 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2019 mddev->queue->backing_dev_info.congested_data = mddev; 2020 - md_integrity_register(mddev); 2021 - return 0; 2022 } 2023 2024 static int stop(mddev_t *mddev)
··· 1132 p->rdev = rdev; 1133 goto abort; 1134 } 1135 + err = md_integrity_register(mddev); 1136 } 1137 abort: 1138 ··· 2017 2018 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2019 mddev->queue->backing_dev_info.congested_data = mddev; 2020 + return md_integrity_register(mddev); 2021 } 2022 2023 static int stop(mddev_t *mddev)
+5 -2
drivers/md/raid10.c
··· 1188 p->rdev = rdev; 1189 goto abort; 1190 } 1191 - md_integrity_register(mddev); 1192 } 1193 abort: 1194 ··· 2343 2344 if (conf->near_copies < conf->raid_disks) 2345 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2346 - md_integrity_register(mddev); 2347 return 0; 2348 2349 out_free_conf:
··· 1188 p->rdev = rdev; 1189 goto abort; 1190 } 1191 + err = md_integrity_register(mddev); 1192 } 1193 abort: 1194 ··· 2343 2344 if (conf->near_copies < conf->raid_disks) 2345 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); 2346 + 2347 + if (md_integrity_register(mddev)) 2348 + goto out_free_conf; 2349 + 2350 return 0; 2351 2352 out_free_conf:
+3
fs/bio-integrity.c
··· 761 { 762 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); 763 764 bs->bio_integrity_pool = 765 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); 766
··· 761 { 762 unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); 763 764 + if (bs->bio_integrity_pool) 765 + return 0; 766 + 767 bs->bio_integrity_pool = 768 mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); 769
+3 -3
fs/bio.c
··· 1636 if (!bs->bio_pool) 1637 goto bad; 1638 1639 - if (bioset_integrity_create(bs, pool_size)) 1640 - goto bad; 1641 - 1642 if (!biovec_create_pools(bs, pool_size)) 1643 return bs; 1644 ··· 1678 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 1679 if (!fs_bio_set) 1680 panic("bio: can't allocate bios\n"); 1681 1682 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, 1683 sizeof(struct bio_pair));
··· 1636 if (!bs->bio_pool) 1637 goto bad; 1638 1639 if (!biovec_create_pools(bs, pool_size)) 1640 return bs; 1641 ··· 1681 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 1682 if (!fs_bio_set) 1683 panic("bio: can't allocate bios\n"); 1684 + 1685 + if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 1686 + panic("bio: can't create integrity pool\n"); 1687 1688 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, 1689 sizeof(struct bio_pair));