Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dm-3.20-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper changes from Mike Snitzer:

- The most significant change this cycle is request-based DM now
supports stacking ontop of blk-mq devices. This blk-mq support
changes the model request-based DM uses for cloning a request to
relying on calling blk_get_request() directly from the underlying
blk-mq device.

An early consumer of this code is Intel's emerging NVMe hardware;
thanks to Keith Busch for working on, and pushing for, these changes.

- A few other small fixes and cleanups across other DM targets.

* tag 'dm-3.20-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm: inherit QUEUE_FLAG_SG_GAPS flags from underlying queues
dm snapshot: remove unnecessary NULL checks before vfree() calls
dm mpath: simplify failure path of dm_multipath_init()
dm thin metadata: remove unused dm_pool_get_data_block_size()
dm ioctl: fix stale comment above dm_get_inactive_table()
dm crypt: update url in CONFIG_DM_CRYPT help text
dm bufio: fix time comparison to use time_after_eq()
dm: use time_in_range() and time_after()
dm raid: fix a couple integer overflows
dm table: train hybrid target type detection to select blk-mq if appropriate
dm: allocate requests in target when stacking on blk-mq devices
dm: prepare for allocating blk-mq clone requests in target
dm: submit stacked requests in irq enabled context
dm: split request structure out from dm_rq_target_io structure
dm: remove exports for request-based interfaces without external callers

+415 -197
+2 -3
drivers/md/Kconfig
··· 231 231 transparently encrypts the data on it. You'll need to activate 232 232 the ciphers you're going to use in the cryptoapi configuration. 233 233 234 - Information on how to use dm-crypt can be found on 235 - 236 - <http://www.saout.de/misc/dm-crypt/> 234 + For further information on dm-crypt and userspace tools see: 235 + <http://code.google.com/p/cryptsetup/wiki/DMCrypt> 237 236 238 237 To compile this code as a module, choose M here: the module will 239 238 be called dm-crypt.
+2 -1
drivers/md/dm-bufio.c
··· 11 11 #include <linux/device-mapper.h> 12 12 #include <linux/dm-io.h> 13 13 #include <linux/slab.h> 14 + #include <linux/jiffies.h> 14 15 #include <linux/vmalloc.h> 15 16 #include <linux/shrinker.h> 16 17 #include <linux/module.h> ··· 1740 1739 1741 1740 static bool older_than(struct dm_buffer *b, unsigned long age_hz) 1742 1741 { 1743 - return (jiffies - b->last_accessed) >= age_hz; 1742 + return time_after_eq(jiffies, b->last_accessed + age_hz); 1744 1743 } 1745 1744 1746 1745 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
+3 -2
drivers/md/dm-cache-target.c
··· 11 11 12 12 #include <linux/dm-io.h> 13 13 #include <linux/dm-kcopyd.h> 14 + #include <linux/jiffies.h> 14 15 #include <linux/init.h> 15 16 #include <linux/mempool.h> 16 17 #include <linux/module.h> ··· 1563 1562 1564 1563 static int need_commit_due_to_time(struct cache *cache) 1565 1564 { 1566 - return jiffies < cache->last_commit_jiffies || 1567 - jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; 1565 + return !time_in_range(jiffies, cache->last_commit_jiffies, 1566 + cache->last_commit_jiffies + COMMIT_PERIOD); 1568 1567 } 1569 1568 1570 1569 static int commit_if_needed(struct cache *cache)
+2 -2
drivers/md/dm-ioctl.c
··· 639 639 640 640 /* 641 641 * On successful return, the caller must not attempt to acquire 642 - * _hash_lock without first calling dm_table_put, because dm_table_destroy 643 - * waits for this dm_table_put and could be called under this lock. 642 + * _hash_lock without first calling dm_put_live_table, because dm_table_destroy 643 + * waits for this dm_put_live_table and could be called under this lock. 644 644 */ 645 645 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) 646 646 {
+3 -2
drivers/md/dm-log-userspace-base.c
··· 6 6 7 7 #include <linux/bio.h> 8 8 #include <linux/slab.h> 9 + #include <linux/jiffies.h> 9 10 #include <linux/dm-dirty-log.h> 10 11 #include <linux/device-mapper.h> 11 12 #include <linux/dm-log-userspace.h> ··· 830 829 int r; 831 830 uint64_t region64 = region; 832 831 struct log_c *lc = log->context; 833 - static unsigned long long limit; 832 + static unsigned long limit; 834 833 struct { 835 834 int64_t is_recovering; 836 835 uint64_t in_sync_hint; ··· 846 845 */ 847 846 if (region < lc->in_sync_hint) 848 847 return 0; 849 - else if (jiffies < limit) 848 + else if (time_after(limit, jiffies)) 850 849 return 1; 851 850 852 851 limit = jiffies + (HZ / 4);
+66 -21
drivers/md/dm-mpath.c
··· 11 11 #include "dm-path-selector.h" 12 12 #include "dm-uevent.h" 13 13 14 + #include <linux/blkdev.h> 14 15 #include <linux/ctype.h> 15 16 #include <linux/init.h> 16 17 #include <linux/mempool.h> ··· 379 378 /* 380 379 * Map cloned requests 381 380 */ 382 - static int multipath_map(struct dm_target *ti, struct request *clone, 383 - union map_info *map_context) 381 + static int __multipath_map(struct dm_target *ti, struct request *clone, 382 + union map_info *map_context, 383 + struct request *rq, struct request **__clone) 384 384 { 385 385 struct multipath *m = (struct multipath *) ti->private; 386 386 int r = DM_MAPIO_REQUEUE; 387 - size_t nr_bytes = blk_rq_bytes(clone); 388 - unsigned long flags; 387 + size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq); 389 388 struct pgpath *pgpath; 390 389 struct block_device *bdev; 391 390 struct dm_mpath_io *mpio; 392 391 393 - spin_lock_irqsave(&m->lock, flags); 392 + spin_lock_irq(&m->lock); 394 393 395 394 /* Do we need to select a new pgpath? */ 396 395 if (!m->current_pgpath || ··· 412 411 /* ENOMEM, requeue */ 413 412 goto out_unlock; 414 413 415 - bdev = pgpath->path.dev->bdev; 416 - clone->q = bdev_get_queue(bdev); 417 - clone->rq_disk = bdev->bd_disk; 418 - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 419 414 mpio = map_context->ptr; 420 415 mpio->pgpath = pgpath; 421 416 mpio->nr_bytes = nr_bytes; 417 + 418 + bdev = pgpath->path.dev->bdev; 419 + 420 + spin_unlock_irq(&m->lock); 421 + 422 + if (clone) { 423 + /* Old request-based interface: allocated clone is passed in */ 424 + clone->q = bdev_get_queue(bdev); 425 + clone->rq_disk = bdev->bd_disk; 426 + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 427 + } else { 428 + /* blk-mq request-based interface */ 429 + *__clone = blk_get_request(bdev_get_queue(bdev), 430 + rq_data_dir(rq), GFP_KERNEL); 431 + if (IS_ERR(*__clone)) 432 + /* ENOMEM, requeue */ 433 + return r; 434 + (*__clone)->bio = (*__clone)->biotail = NULL; 435 + (*__clone)->rq_disk = bdev->bd_disk; 436 + (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; 437 + } 438 + 422 439 if (pgpath->pg->ps.type->start_io) 423 440 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, 424 441 &pgpath->path, 425 442 nr_bytes); 426 - r = DM_MAPIO_REMAPPED; 443 + return DM_MAPIO_REMAPPED; 427 444 428 445 out_unlock: 429 - spin_unlock_irqrestore(&m->lock, flags); 446 + spin_unlock_irq(&m->lock); 430 447 431 448 return r; 449 + } 450 + 451 + static int multipath_map(struct dm_target *ti, struct request *clone, 452 + union map_info *map_context) 453 + { 454 + return __multipath_map(ti, clone, map_context, NULL, NULL); 455 + } 456 + 457 + static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, 458 + union map_info *map_context, 459 + struct request **clone) 460 + { 461 + return __multipath_map(ti, NULL, map_context, rq, clone); 462 + } 463 + 464 + static void multipath_release_clone(struct request *clone) 465 + { 466 + blk_put_request(clone); 432 467 } 433 468 434 469 /* ··· 1703 1666 *---------------------------------------------------------------*/ 1704 1667 static struct target_type multipath_target = { 1705 1668 .name = "multipath", 1706 - .version = {1, 7, 0}, 1669 + .version = {1, 8, 0}, 1707 1670 .module = THIS_MODULE, 1708 1671 .ctr = multipath_ctr, 1709 1672 .dtr = multipath_dtr, 1710 1673 .map_rq = multipath_map, 1674 + .clone_and_map_rq = multipath_clone_and_map, 1675 + .release_clone_rq = multipath_release_clone, 1711 1676 .rq_end_io = multipath_end_io, 1712 1677 .presuspend = multipath_presuspend, 1713 1678 .postsuspend = multipath_postsuspend, ··· 1733 1694 r = dm_register_target(&multipath_target); 1734 1695 if (r < 0) { 1735 1696 DMERR("register failed %d", r); 1736 - kmem_cache_destroy(_mpio_cache); 1737 - return -EINVAL; 1697 + r = -EINVAL; 1698 + goto bad_register_target; 1738 1699 } 1739 1700 1740 1701 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 1741 1702 if (!kmultipathd) { 1742 1703 DMERR("failed to create workqueue kmpathd"); 1743 - dm_unregister_target(&multipath_target); 1744 - kmem_cache_destroy(_mpio_cache); 1745 - return -ENOMEM; 1704 + r = -ENOMEM; 1705 + goto bad_alloc_kmultipathd; 1746 1706 } 1747 1707 1748 1708 /* ··· 1754 1716 WQ_MEM_RECLAIM); 1755 1717 if (!kmpath_handlerd) { 1756 1718 DMERR("failed to create workqueue kmpath_handlerd"); 1757 - destroy_workqueue(kmultipathd); 1758 - dm_unregister_target(&multipath_target); 1759 - kmem_cache_destroy(_mpio_cache); 1760 - return -ENOMEM; 1719 + r = -ENOMEM; 1720 + goto bad_alloc_kmpath_handlerd; 1761 1721 } 1762 1722 1763 1723 DMINFO("version %u.%u.%u loaded", 1764 1724 multipath_target.version[0], multipath_target.version[1], 1765 1725 multipath_target.version[2]); 1726 + 1727 + return 0; 1728 + 1729 + bad_alloc_kmpath_handlerd: 1730 + destroy_workqueue(kmultipathd); 1731 + bad_alloc_kmultipathd: 1732 + dm_unregister_target(&multipath_target); 1733 + bad_register_target: 1734 + kmem_cache_destroy(_mpio_cache); 1766 1735 1767 1736 return r; 1768 1737 }
+7 -9
drivers/md/dm-raid.c
··· 1237 1237 argv++; 1238 1238 1239 1239 /* Skip over RAID params for now and find out # of devices */ 1240 - if (num_raid_params + 1 > argc) { 1240 + if (num_raid_params >= argc) { 1241 1241 ti->error = "Arguments do not agree with counts given"; 1242 1242 return -EINVAL; 1243 1243 } ··· 1245 1245 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) || 1246 1246 (num_raid_devs >= INT_MAX)) { 1247 1247 ti->error = "Cannot understand number of raid devices"; 1248 + return -EINVAL; 1249 + } 1250 + 1251 + argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */ 1252 + if (argc != (num_raid_devs * 2)) { 1253 + ti->error = "Supplied RAID devices does not match the count given"; 1248 1254 return -EINVAL; 1249 1255 } 1250 1256 ··· 1262 1256 if (ret) 1263 1257 goto bad; 1264 1258 1265 - ret = -EINVAL; 1266 - 1267 - argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */ 1268 1259 argv += num_raid_params + 1; 1269 - 1270 - if (argc != (num_raid_devs * 2)) { 1271 - ti->error = "Supplied RAID devices does not match the count given"; 1272 - goto bad; 1273 - } 1274 1260 1275 1261 ret = dev_parms(rs, argv); 1276 1262 if (ret)
+4 -10
drivers/md/dm-snap-persistent.c
··· 200 200 201 201 static void free_area(struct pstore *ps) 202 202 { 203 - if (ps->area) 204 - vfree(ps->area); 203 + vfree(ps->area); 205 204 ps->area = NULL; 206 - 207 - if (ps->zero_area) 208 - vfree(ps->zero_area); 205 + vfree(ps->zero_area); 209 206 ps->zero_area = NULL; 210 - 211 - if (ps->header_area) 212 - vfree(ps->header_area); 207 + vfree(ps->header_area); 213 208 ps->header_area = NULL; 214 209 } 215 210 ··· 600 605 free_area(ps); 601 606 602 607 /* Allocated in persistent_read_metadata */ 603 - if (ps->callbacks) 604 - vfree(ps->callbacks); 608 + vfree(ps->callbacks); 605 609 606 610 kfree(ps); 607 611 }
+57 -15
drivers/md/dm-table.c
··· 827 827 { 828 828 unsigned i; 829 829 unsigned bio_based = 0, request_based = 0, hybrid = 0; 830 + bool use_blk_mq = false; 830 831 struct dm_target *tgt; 831 832 struct dm_dev_internal *dd; 832 833 struct list_head *devices; 833 - unsigned live_md_type; 834 + unsigned live_md_type = dm_get_md_type(t->md); 834 835 835 836 for (i = 0; i < t->num_targets; i++) { 836 837 tgt = t->targets + i; ··· 855 854 * Determine the type from the live device. 856 855 * Default to bio-based if device is new. 857 856 */ 858 - live_md_type = dm_get_md_type(t->md); 859 - if (live_md_type == DM_TYPE_REQUEST_BASED) 857 + if (live_md_type == DM_TYPE_REQUEST_BASED || 858 + live_md_type == DM_TYPE_MQ_REQUEST_BASED) 860 859 request_based = 1; 861 860 else 862 861 bio_based = 1; ··· 870 869 871 870 BUG_ON(!request_based); /* No targets in this table */ 872 871 873 - /* Non-request-stackable devices can't be used for request-based dm */ 874 - devices = dm_table_get_devices(t); 875 - list_for_each_entry(dd, devices, list) { 876 - if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev->bdev))) { 877 - DMWARN("table load rejected: including" 878 - " non-request-stackable devices"); 879 - return -EINVAL; 880 - } 881 - } 882 - 883 872 /* 884 873 * Request-based dm supports only tables that have a single target now. 885 874 * To support multiple targets, request splitting support is needed, ··· 881 890 return -EINVAL; 882 891 } 883 892 884 - t->type = DM_TYPE_REQUEST_BASED; 893 + /* Non-request-stackable devices can't be used for request-based dm */ 894 + devices = dm_table_get_devices(t); 895 + list_for_each_entry(dd, devices, list) { 896 + struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); 897 + 898 + if (!blk_queue_stackable(q)) { 899 + DMERR("table load rejected: including" 900 + " non-request-stackable devices"); 901 + return -EINVAL; 902 + } 903 + 904 + if (q->mq_ops) 905 + use_blk_mq = true; 906 + } 907 + 908 + if (use_blk_mq) { 909 + /* verify _all_ devices in the table are blk-mq devices */ 910 + list_for_each_entry(dd, devices, list) 911 + if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) { 912 + DMERR("table load rejected: not all devices" 913 + " are blk-mq request-stackable"); 914 + return -EINVAL; 915 + } 916 + t->type = DM_TYPE_MQ_REQUEST_BASED; 917 + 918 + } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { 919 + /* inherit live MD type */ 920 + t->type = live_md_type; 921 + 922 + } else 923 + t->type = DM_TYPE_REQUEST_BASED; 885 924 886 925 return 0; 887 926 } ··· 928 907 929 908 bool dm_table_request_based(struct dm_table *t) 930 909 { 931 - return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; 910 + unsigned table_type = dm_table_get_type(t); 911 + 912 + return (table_type == DM_TYPE_REQUEST_BASED || 913 + table_type == DM_TYPE_MQ_REQUEST_BASED); 914 + } 915 + 916 + bool dm_table_mq_request_based(struct dm_table *t) 917 + { 918 + return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED; 932 919 } 933 920 934 921 static int dm_table_alloc_md_mempools(struct dm_table *t) ··· 1389 1360 return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); 1390 1361 } 1391 1362 1363 + static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev, 1364 + sector_t start, sector_t len, void *data) 1365 + { 1366 + struct request_queue *q = bdev_get_queue(dev->bdev); 1367 + 1368 + return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags); 1369 + } 1370 + 1392 1371 static bool dm_table_all_devices_attribute(struct dm_table *t, 1393 1372 iterate_devices_callout_fn func) 1394 1373 { ··· 1516 1479 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1517 1480 else 1518 1481 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); 1482 + 1483 + if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps)) 1484 + queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q); 1485 + else 1486 + queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q); 1519 1487 1520 1488 dm_table_set_integrity(t); 1521 1489
+14 -1
drivers/md/dm-target.c
··· 137 137 return -EIO; 138 138 } 139 139 140 + static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, 141 + union map_info *map_context, 142 + struct request **clone) 143 + { 144 + return -EIO; 145 + } 146 + 147 + static void io_err_release_clone_rq(struct request *clone) 148 + { 149 + } 150 + 140 151 static struct target_type error_target = { 141 152 .name = "error", 142 - .version = {1, 2, 0}, 153 + .version = {1, 3, 0}, 143 154 .ctr = io_err_ctr, 144 155 .dtr = io_err_dtr, 145 156 .map = io_err_map, 146 157 .map_rq = io_err_map_rq, 158 + .clone_and_map_rq = io_err_clone_and_map_rq, 159 + .release_clone_rq = io_err_release_clone_rq, 147 160 }; 148 161 149 162 int __init dm_target_init(void)
-9
drivers/md/dm-thin-metadata.c
··· 1635 1635 return r; 1636 1636 } 1637 1637 1638 - int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result) 1639 - { 1640 - down_read(&pmd->root_lock); 1641 - *result = pmd->data_block_size; 1642 - up_read(&pmd->root_lock); 1643 - 1644 - return 0; 1645 - } 1646 - 1647 1638 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) 1648 1639 { 1649 1640 int r = -EINVAL;
-2
drivers/md/dm-thin-metadata.h
··· 182 182 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, 183 183 dm_block_t *result); 184 184 185 - int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result); 186 - 187 185 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); 188 186 189 187 int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+3 -2
drivers/md/dm-thin.c
··· 11 11 #include <linux/device-mapper.h> 12 12 #include <linux/dm-io.h> 13 13 #include <linux/dm-kcopyd.h> 14 + #include <linux/jiffies.h> 14 15 #include <linux/log2.h> 15 16 #include <linux/list.h> 16 17 #include <linux/rculist.h> ··· 1701 1700 */ 1702 1701 static int need_commit_due_to_time(struct pool *pool) 1703 1702 { 1704 - return jiffies < pool->last_commit_jiffies || 1705 - jiffies > pool->last_commit_jiffies + COMMIT_PERIOD; 1703 + return !time_in_range(jiffies, pool->last_commit_jiffies, 1704 + pool->last_commit_jiffies + COMMIT_PERIOD); 1706 1705 } 1707 1706 1708 1707 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
+236 -109
drivers/md/dm.c
··· 20 20 #include <linux/hdreg.h> 21 21 #include <linux/delay.h> 22 22 #include <linux/wait.h> 23 + #include <linux/kthread.h> 23 24 24 25 #include <trace/events/block.h> 25 26 ··· 79 78 struct dm_rq_target_io { 80 79 struct mapped_device *md; 81 80 struct dm_target *ti; 82 - struct request *orig, clone; 81 + struct request *orig, *clone; 82 + struct kthread_work work; 83 83 int error; 84 84 union map_info info; 85 85 }; ··· 181 179 * io objects are allocated from here. 182 180 */ 183 181 mempool_t *io_pool; 182 + mempool_t *rq_pool; 184 183 185 184 struct bio_set *bs; 186 185 ··· 213 210 unsigned internal_suspend_count; 214 211 215 212 struct dm_stats stats; 213 + 214 + struct kthread_worker kworker; 215 + struct task_struct *kworker_task; 216 216 }; 217 217 218 218 /* ··· 223 217 */ 224 218 struct dm_md_mempools { 225 219 mempool_t *io_pool; 220 + mempool_t *rq_pool; 226 221 struct bio_set *bs; 227 222 }; 228 223 ··· 238 231 #define RESERVED_MAX_IOS 1024 239 232 static struct kmem_cache *_io_cache; 240 233 static struct kmem_cache *_rq_tio_cache; 234 + static struct kmem_cache *_rq_cache; 241 235 242 236 /* 243 237 * Bio-based DM's mempools' reserved IOs set by the user. ··· 296 288 if (!_rq_tio_cache) 297 289 goto out_free_io_cache; 298 290 291 + _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request), 292 + __alignof__(struct request), 0, NULL); 293 + if (!_rq_cache) 294 + goto out_free_rq_tio_cache; 295 + 299 296 r = dm_uevent_init(); 300 297 if (r) 301 - goto out_free_rq_tio_cache; 298 + goto out_free_rq_cache; 302 299 303 300 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); 304 301 if (!deferred_remove_workqueue) { ··· 325 312 destroy_workqueue(deferred_remove_workqueue); 326 313 out_uevent_exit: 327 314 dm_uevent_exit(); 315 + out_free_rq_cache: 316 + kmem_cache_destroy(_rq_cache); 328 317 out_free_rq_tio_cache: 329 318 kmem_cache_destroy(_rq_tio_cache); 330 319 out_free_io_cache: ··· 340 325 flush_scheduled_work(); 341 326 destroy_workqueue(deferred_remove_workqueue); 342 327 328 + kmem_cache_destroy(_rq_cache); 343 329 kmem_cache_destroy(_rq_tio_cache); 344 330 kmem_cache_destroy(_io_cache); 345 331 unregister_blkdev(_major, _name); ··· 591 575 static void free_rq_tio(struct dm_rq_target_io *tio) 592 576 { 593 577 mempool_free(tio, tio->md->io_pool); 578 + } 579 + 580 + static struct request *alloc_clone_request(struct mapped_device *md, 581 + gfp_t gfp_mask) 582 + { 583 + return mempool_alloc(md->rq_pool, gfp_mask); 584 + } 585 + 586 + static void free_clone_request(struct mapped_device *md, struct request *rq) 587 + { 588 + mempool_free(rq, md->rq_pool); 594 589 } 595 590 596 591 static int md_in_flight(struct mapped_device *md) ··· 1019 992 * the md may be freed in dm_put() at the end of this function. 1020 993 * Or do dm_get() before calling this function and dm_put() later. 1021 994 */ 1022 - static void rq_completed(struct mapped_device *md, int rw, int run_queue) 995 + static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1023 996 { 1024 997 atomic_dec(&md->pending[rw]); 1025 998 ··· 1047 1020 struct dm_rq_target_io *tio = clone->end_io_data; 1048 1021 1049 1022 blk_rq_unprep_clone(clone); 1023 + if (clone->q && clone->q->mq_ops) 1024 + tio->ti->type->release_clone_rq(clone); 1025 + else 1026 + free_clone_request(tio->md, clone); 1050 1027 free_rq_tio(tio); 1051 1028 } 1052 1029 1053 1030 /* 1054 1031 * Complete the clone and the original request. 1055 - * Must be called without queue lock. 1032 + * Must be called without clone's queue lock held, 1033 + * see end_clone_request() for more details. 1056 1034 */ 1057 1035 static void dm_end_request(struct request *clone, int error) 1058 1036 { ··· 1086 1054 1087 1055 static void dm_unprep_request(struct request *rq) 1088 1056 { 1089 - struct request *clone = rq->special; 1057 + struct dm_rq_target_io *tio = rq->special; 1058 + struct request *clone = tio->clone; 1090 1059 1091 1060 rq->special = NULL; 1092 1061 rq->cmd_flags &= ~REQ_DONTPREP; 1093 1062 1094 - free_rq_clone(clone); 1063 + if (clone) 1064 + free_rq_clone(clone); 1095 1065 } 1096 1066 1097 1067 /* 1098 1068 * Requeue the original request of a clone. 1099 1069 */ 1100 - void dm_requeue_unmapped_request(struct request *clone) 1070 + static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1071 + struct request *rq) 1101 1072 { 1102 - int rw = rq_data_dir(clone); 1103 - struct dm_rq_target_io *tio = clone->end_io_data; 1104 - struct mapped_device *md = tio->md; 1105 - struct request *rq = tio->orig; 1073 + int rw = rq_data_dir(rq); 1106 1074 struct request_queue *q = rq->q; 1107 1075 unsigned long flags; 1108 1076 ··· 1112 1080 blk_requeue_request(q, rq); 1113 1081 spin_unlock_irqrestore(q->queue_lock, flags); 1114 1082 1115 - rq_completed(md, rw, 0); 1083 + rq_completed(md, rw, false); 1116 1084 } 1117 - EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request); 1085 + 1086 + static void dm_requeue_unmapped_request(struct request *clone) 1087 + { 1088 + struct dm_rq_target_io *tio = clone->end_io_data; 1089 + 1090 + dm_requeue_unmapped_original_request(tio->md, tio->orig); 1091 + } 1118 1092 1119 1093 static void __stop_queue(struct request_queue *q) 1120 1094 { ··· 1189 1151 static void dm_softirq_done(struct request *rq) 1190 1152 { 1191 1153 bool mapped = true; 1192 - struct request *clone = rq->completion_data; 1193 - struct dm_rq_target_io *tio = clone->end_io_data; 1154 + struct dm_rq_target_io *tio = rq->special; 1155 + struct request *clone = tio->clone; 1156 + 1157 + if (!clone) { 1158 + blk_end_request_all(rq, tio->error); 1159 + rq_completed(tio->md, rq_data_dir(rq), false); 1160 + free_rq_tio(tio); 1161 + return; 1162 + } 1194 1163 1195 1164 if (rq->cmd_flags & REQ_FAILED) 1196 1165 mapped = false; ··· 1209 1164 * Complete the clone and the original request with the error status 1210 1165 * through softirq context. 1211 1166 */ 1212 - static void dm_complete_request(struct request *clone, int error) 1167 + static void dm_complete_request(struct request *rq, int error) 1213 1168 { 1214 - struct dm_rq_target_io *tio = clone->end_io_data; 1215 - struct request *rq = tio->orig; 1169 + struct dm_rq_target_io *tio = rq->special; 1216 1170 1217 1171 tio->error = error; 1218 - rq->completion_data = clone; 1219 1172 blk_complete_request(rq); 1220 1173 } 1221 1174 ··· 1221 1178 * Complete the not-mapped clone and the original request with the error status 1222 1179 * through softirq context. 1223 1180 * Target's rq_end_io() function isn't called. 1224 - * This may be used when the target's map_rq() function fails. 1181 + * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. 1225 1182 */ 1226 - void dm_kill_unmapped_request(struct request *clone, int error) 1183 + static void dm_kill_unmapped_request(struct request *rq, int error) 1227 1184 { 1228 - struct dm_rq_target_io *tio = clone->end_io_data; 1229 - struct request *rq = tio->orig; 1230 - 1231 1185 rq->cmd_flags |= REQ_FAILED; 1232 - dm_complete_request(clone, error); 1186 + dm_complete_request(rq, error); 1233 1187 } 1234 - EXPORT_SYMBOL_GPL(dm_kill_unmapped_request); 1235 1188 1236 1189 /* 1237 - * Called with the queue lock held 1190 + * Called with the clone's queue lock held 1238 1191 */ 1239 1192 static void end_clone_request(struct request *clone, int error) 1240 1193 { 1241 - /* 1242 - * For just cleaning up the information of the queue in which 1243 - * the clone was dispatched. 1244 - * The clone is *NOT* freed actually here because it is alloced from 1245 - * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags. 1246 - */ 1247 - __blk_put_request(clone->q, clone); 1194 + struct dm_rq_target_io *tio = clone->end_io_data; 1195 + 1196 + if (!clone->q->mq_ops) { 1197 + /* 1198 + * For just cleaning up the information of the queue in which 1199 + * the clone was dispatched. 1200 + * The clone is *NOT* freed actually here because it is alloced 1201 + * from dm own mempool (REQ_ALLOCED isn't set). 1202 + */ 1203 + __blk_put_request(clone->q, clone); 1204 + } 1248 1205 1249 1206 /* 1250 1207 * Actual request completion is done in a softirq context which doesn't 1251 - * hold the queue lock. Otherwise, deadlock could occur because: 1208 + * hold the clone's queue lock. Otherwise, deadlock could occur because: 1252 1209 * - another request may be submitted by the upper level driver 1253 1210 * of the stacking during the completion 1254 1211 * - the submission which requires queue lock may be done 1255 - * against this queue 1212 + * against this clone's queue 1256 1213 */ 1257 - dm_complete_request(clone, error); 1214 + dm_complete_request(tio->orig, error); 1258 1215 } 1259 1216 1260 1217 /* ··· 1732 1689 _dm_request(q, bio); 1733 1690 } 1734 1691 1735 - void dm_dispatch_request(struct request *rq) 1692 + static void dm_dispatch_clone_request(struct request *clone, struct request *rq) 1736 1693 { 1737 1694 int r; 1738 1695 1739 - if (blk_queue_io_stat(rq->q)) 1740 - rq->cmd_flags |= REQ_IO_STAT; 1696 + if (blk_queue_io_stat(clone->q)) 1697 + clone->cmd_flags |= REQ_IO_STAT; 1741 1698 1742 - rq->start_time = jiffies; 1743 - r = blk_insert_cloned_request(rq->q, rq); 1699 + clone->start_time = jiffies; 1700 + r = blk_insert_cloned_request(clone->q, clone); 1744 1701 if (r) 1702 + /* must complete clone in terms of original request */ 1745 1703 dm_complete_request(rq, r); 1746 1704 } 1747 - EXPORT_SYMBOL_GPL(dm_dispatch_request); 1748 1705 1749 1706 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, 1750 1707 void *data) ··· 1761 1718 } 1762 1719 1763 1720 static int setup_clone(struct request *clone, struct request *rq, 1764 - struct dm_rq_target_io *tio) 1721 + struct dm_rq_target_io *tio, gfp_t gfp_mask) 1765 1722 { 1766 1723 int r; 1767 1724 1768 - blk_rq_init(NULL, clone); 1769 - r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, 1725 + r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, 1770 1726 dm_rq_bio_constructor, tio); 1771 1727 if (r) 1772 1728 return r; ··· 1776 1734 clone->end_io = end_clone_request; 1777 1735 clone->end_io_data = tio; 1778 1736 1737 + tio->clone = clone; 1738 + 1779 1739 return 0; 1780 1740 } 1781 1741 1782 1742 static struct request *clone_rq(struct request *rq, struct mapped_device *md, 1783 - gfp_t gfp_mask) 1743 + struct dm_rq_target_io *tio, gfp_t gfp_mask) 1784 1744 { 1785 - struct request *clone; 1745 + struct request *clone = alloc_clone_request(md, gfp_mask); 1746 + 1747 + if (!clone) 1748 + return NULL; 1749 + 1750 + blk_rq_init(NULL, clone); 1751 + if (setup_clone(clone, rq, tio, gfp_mask)) { 1752 + /* -ENOMEM */ 1753 + free_clone_request(md, clone); 1754 + return NULL; 1755 + } 1756 + 1757 + return clone; 1758 + } 1759 + 1760 + static void map_tio_request(struct kthread_work *work); 1761 + 1762 + static struct dm_rq_target_io *prep_tio(struct request *rq, 1763 + struct mapped_device *md, gfp_t gfp_mask) 1764 + { 1786 1765 struct dm_rq_target_io *tio; 1766 + int srcu_idx; 1767 + struct dm_table *table; 1787 1768 1788 1769 tio = alloc_rq_tio(md, gfp_mask); 1789 1770 if (!tio) ··· 1814 1749 1815 1750 tio->md = md; 1816 1751 tio->ti = NULL; 1752 + tio->clone = NULL; 1817 1753 tio->orig = rq; 1818 1754 tio->error = 0; 1819 1755 memset(&tio->info, 0, sizeof(tio->info)); 1756 + init_kthread_work(&tio->work, map_tio_request); 1820 1757 1821 - clone = &tio->clone; 1822 - if (setup_clone(clone, rq, tio)) { 1823 - /* -ENOMEM */ 1824 - free_rq_tio(tio); 1825 - return NULL; 1758 + table = dm_get_live_table(md, &srcu_idx); 1759 + if (!dm_table_mq_request_based(table)) { 1760 + if (!clone_rq(rq, md, tio, gfp_mask)) { 1761 + dm_put_live_table(md, srcu_idx); 1762 + free_rq_tio(tio); 1763 + return NULL; 1764 + } 1826 1765 } 1766 + dm_put_live_table(md, srcu_idx); 1827 1767 1828 - return clone; 1768 + return tio; 1829 1769 } 1830 1770 1831 1771 /* ··· 1839 1769 static int dm_prep_fn(struct request_queue *q, struct request *rq) 1840 1770 { 1841 1771 struct mapped_device *md = q->queuedata; 1842 - struct request *clone; 1772 + struct dm_rq_target_io *tio; 1843 1773 1844 1774 if (unlikely(rq->special)) { 1845 1775 DMWARN("Already has something in rq->special."); 1846 1776 return BLKPREP_KILL; 1847 1777 } 1848 1778 1849 - clone = clone_rq(rq, md, GFP_ATOMIC); 1850 - if (!clone) 1779 + tio = prep_tio(rq, md, GFP_ATOMIC); 1780 + if (!tio) 1851 1781 return BLKPREP_DEFER; 1852 1782 1853 - rq->special = clone; 1783 + rq->special = tio; 1854 1784 rq->cmd_flags |= REQ_DONTPREP; 1855 1785 1856 1786 return BLKPREP_OK; ··· 1858 1788 1859 1789 /* 1860 1790 * Returns: 1861 - * 0 : the request has been processed (not requeued) 1862 - * !0 : the request has been requeued 1791 + * 0 : the request has been processed 1792 + * DM_MAPIO_REQUEUE : the original request needs to be requeued 1793 + * < 0 : the request was completed due to failure 1863 1794 */ 1864 - static int map_request(struct dm_target *ti, struct request *clone, 1795 + static int map_request(struct dm_target *ti, struct request *rq, 1865 1796 struct mapped_device *md) 1866 1797 { 1867 - int r, requeued = 0; 1868 - struct dm_rq_target_io *tio = clone->end_io_data; 1798 + int r; 1799 + struct dm_rq_target_io *tio = rq->special; 1800 + struct request *clone = NULL; 1869 1801 1870 - tio->ti = ti; 1871 - r = ti->type->map_rq(ti, clone, &tio->info); 1802 + if (tio->clone) { 1803 + clone = tio->clone; 1804 + r = ti->type->map_rq(ti, clone, &tio->info); 1805 + } else { 1806 + r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); 1807 + if (r < 0) { 1808 + /* The target wants to complete the I/O */ 1809 + dm_kill_unmapped_request(rq, r); 1810 + return r; 1811 + } 1812 + if (IS_ERR(clone)) 1813 + return DM_MAPIO_REQUEUE; 1814 + if (setup_clone(clone, rq, tio, GFP_KERNEL)) { 1815 + /* -ENOMEM */ 1816 + ti->type->release_clone_rq(clone); 1817 + return DM_MAPIO_REQUEUE; 1818 + } 1819 + } 1820 + 1872 1821 switch (r) { 1873 1822 case DM_MAPIO_SUBMITTED: 1874 1823 /* The target has taken the I/O to submit by itself later */ ··· 1895 1806 case DM_MAPIO_REMAPPED: 1896 1807 /* The target has remapped the I/O so dispatch it */ 1897 1808 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), 1898 - blk_rq_pos(tio->orig)); 1899 - dm_dispatch_request(clone); 1809 + blk_rq_pos(rq)); 1810 + dm_dispatch_clone_request(clone, rq); 1900 1811 break; 1901 1812 case DM_MAPIO_REQUEUE: 1902 1813 /* The target wants to requeue the I/O */ 1903 1814 dm_requeue_unmapped_request(clone); 1904 - requeued = 1; 1905 1815 break; 1906 1816 default: 1907 1817 if (r > 0) { ··· 1909 1821 } 1910 1822 1911 1823 /* The target wants to complete the I/O */ 1912 - dm_kill_unmapped_request(clone, r); 1913 - break; 1824 + dm_kill_unmapped_request(rq, r); 1825 + return r; 1914 1826 } 1915 1827 1916 - return requeued; 1828 + return 0; 1917 1829 } 1918 1830 1919 - static struct request *dm_start_request(struct mapped_device *md, struct request *orig) 1831 + static void map_tio_request(struct kthread_work *work) 1920 1832 { 1921 - struct request *clone; 1833 + struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); 1834 + struct request *rq = tio->orig; 1835 + struct mapped_device *md = tio->md; 1922 1836 1837 + if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE) 1838 + dm_requeue_unmapped_original_request(md, rq); 1839 + } 1840 + 1841 + static void dm_start_request(struct mapped_device *md, struct request *orig) 1842 + { 1923 1843 blk_start_request(orig); 1924 - clone = orig->special; 1925 - atomic_inc(&md->pending[rq_data_dir(clone)]); 1844 + atomic_inc(&md->pending[rq_data_dir(orig)]); 1926 1845 1927 1846 /* 1928 1847 * Hold the md reference here for the in-flight I/O. ··· 1939 1844 * See the comment in rq_completed() too. 1940 1845 */ 1941 1846 dm_get(md); 1942 - 1943 - return clone; 1944 1847 } 1945 1848 1946 1849 /* ··· 1951 1858 int srcu_idx; 1952 1859 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 1953 1860 struct dm_target *ti; 1954 - struct request *rq, *clone; 1861 + struct request *rq; 1862 + struct dm_rq_target_io *tio; 1955 1863 sector_t pos; 1956 1864 1957 1865 /* ··· 1974 1880 ti = dm_table_find_target(map, pos); 1975 1881 if (!dm_target_is_valid(ti)) { 1976 1882 /* 1977 - * Must perform setup, that dm_done() requires, 1883 + * Must perform setup, that rq_completed() requires, 1978 1884 * before calling dm_kill_unmapped_request 1979 1885 */ 1980 1886 DMERR_LIMIT("request attempted access beyond the end of device"); 1981 - clone = dm_start_request(md, rq); 1982 - dm_kill_unmapped_request(clone, -EIO); 1887 + dm_start_request(md, rq); 1888 + dm_kill_unmapped_request(rq, -EIO); 1983 1889 continue; 1984 1890 } 1985 1891 1986 1892 if (ti->type->busy && ti->type->busy(ti)) 1987 1893 goto delay_and_out; 1988 1894 1989 - clone = dm_start_request(md, rq); 1895 + dm_start_request(md, rq); 1990 1896 1991 - spin_unlock(q->queue_lock); 1992 - if (map_request(ti, clone, md)) 1993 - goto requeued; 1994 - 1897 + tio = rq->special; 1898 + /* Establish tio->ti before queuing work (map_tio_request) */ 1899 + tio->ti = ti; 1900 + queue_kthread_work(&md->kworker, &tio->work); 1995 1901 BUG_ON(!irqs_disabled()); 1996 - spin_lock(q->queue_lock); 1997 1902 } 1998 1903 1999 1904 goto out; 2000 - 2001 - requeued: 2002 - BUG_ON(!irqs_disabled()); 2003 - spin_lock(q->queue_lock); 2004 1905 2005 1906 delay_and_out: 2006 1907 blk_delay_queue(q, HZ / 10); ··· 2182 2093 INIT_WORK(&md->work, dm_wq_work); 2183 2094 init_waitqueue_head(&md->eventq); 2184 2095 init_completion(&md->kobj_holder.completion); 2096 + md->kworker_task = NULL; 2185 2097 2186 2098 md->disk->major = _major; 2187 2099 md->disk->first_minor = minor; ··· 2243 2153 unlock_fs(md); 2244 2154 bdput(md->bdev); 2245 2155 destroy_workqueue(md->wq); 2156 + 2157 + if (md->kworker_task) 2158 + kthread_stop(md->kworker_task); 2246 2159 if (md->io_pool) 2247 2160 mempool_destroy(md->io_pool); 2161 + if (md->rq_pool) 2162 + mempool_destroy(md->rq_pool); 2248 2163 if (md->bs) 2249 2164 bioset_free(md->bs); 2250 2165 blk_integrity_unregister(md->disk); ··· 2283 2188 bioset_free(md->bs); 2284 2189 md->bs = p->bs; 2285 2190 p->bs = NULL; 2286 - } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) { 2287 - /* 2288 - * There's no need to reload with request-based dm 2289 - * because the size of front_pad doesn't change. 2290 - * Note for future: If you are to reload bioset, 2291 - * prep-ed requests in the queue may refer 2292 - * to bio from the old bioset, so you must walk 2293 - * through the queue to unprep. 2294 - */ 2295 2191 } 2192 + /* 2193 + * There's no need to reload with request-based dm 2194 + * because the size of front_pad doesn't change. 2195 + * Note for future: If you are to reload bioset, 2196 + * prep-ed requests in the queue may refer 2197 + * to bio from the old bioset, so you must walk 2198 + * through the queue to unprep. 2199 + */ 2296 2200 goto out; 2297 2201 } 2298 2202 2299 - BUG_ON(!p || md->io_pool || md->bs); 2203 + BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); 2300 2204 2301 2205 md->io_pool = p->io_pool; 2302 2206 p->io_pool = NULL; 2207 + md->rq_pool = p->rq_pool; 2208 + p->rq_pool = NULL; 2303 2209 md->bs = p->bs; 2304 2210 p->bs = NULL; 2305 2211 ··· 2503 2407 return md->type; 2504 2408 } 2505 2409 2410 + static bool dm_md_type_request_based(struct mapped_device *md) 2411 + { 2412 + unsigned table_type = dm_get_md_type(md); 2413 + 2414 + return (table_type == DM_TYPE_REQUEST_BASED || 2415 + table_type == DM_TYPE_MQ_REQUEST_BASED); 2416 + } 2417 + 2506 2418 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2507 2419 { 2508 2420 return md->immutable_target_type; ··· 2548 2444 blk_queue_prep_rq(md->queue, dm_prep_fn); 2549 2445 blk_queue_lld_busy(md->queue, dm_lld_busy); 2550 2446 2447 + /* Also initialize the request-based DM worker thread */ 2448 + init_kthread_worker(&md->kworker); 2449 + md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2450 + "kdmwork-%s", dm_device_name(md)); 2451 + 2551 2452 elv_register_queue(md->queue); 2552 2453 2553 2454 return 1; ··· 2563 2454 */ 2564 2455 int dm_setup_md_queue(struct mapped_device *md) 2565 2456 { 2566 - if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 2567 - !dm_init_request_based_queue(md)) { 2457 + if (dm_md_type_request_based(md) && !dm_init_request_based_queue(md)) { 2568 2458 DMWARN("Cannot initialize queue for request-based mapped device"); 2569 2459 return -EINVAL; 2570 2460 } ··· 2641 2533 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2642 2534 set_bit(DMF_FREEING, &md->flags); 2643 2535 spin_unlock(&_minor_lock); 2536 + 2537 + if (dm_request_based(md)) 2538 + flush_kthread_worker(&md->kworker); 2644 2539 2645 2540 if (!dm_suspended_md(md)) { 2646 2541 dm_table_presuspend_targets(map); ··· 2888 2777 * Stop md->queue before flushing md->wq in case request-based 2889 2778 * dm defers requests to md->wq from md->queue. 2890 2779 */ 2891 - if (dm_request_based(md)) 2780 + if (dm_request_based(md)) { 2892 2781 stop_queue(md->queue); 2782 + flush_kthread_worker(&md->kworker); 2783 + } 2893 2784 2894 2785 flush_workqueue(md->wq); 2895 2786 ··· 3237 3124 { 3238 3125 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); 3239 3126 struct kmem_cache *cachep; 3240 - unsigned int pool_size; 3127 + unsigned int pool_size = 0; 3241 3128 unsigned int front_pad; 3242 3129 3243 3130 if (!pools) 3244 3131 return NULL; 3245 3132 3246 - if (type == DM_TYPE_BIO_BASED) { 3133 + switch (type) { 3134 + case DM_TYPE_BIO_BASED: 3247 3135 cachep = _io_cache; 3248 3136 pool_size = dm_get_reserved_bio_based_ios(); 3249 3137 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); 3250 - } else if (type == DM_TYPE_REQUEST_BASED) { 3251 - cachep = _rq_tio_cache; 3138 + break; 3139 + case DM_TYPE_REQUEST_BASED: 3252 3140 pool_size = dm_get_reserved_rq_based_ios(); 3141 + pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); 3142 + if (!pools->rq_pool) 3143 + goto out; 3144 + /* fall through to setup remaining rq-based pools */ 3145 + case DM_TYPE_MQ_REQUEST_BASED: 3146 + cachep = _rq_tio_cache; 3147 + if (!pool_size) 3148 + pool_size = dm_get_reserved_rq_based_ios(); 3253 3149 front_pad = offsetof(struct dm_rq_clone_bio_info, clone); 3254 3150 /* per_bio_data_size is not used. See __bind_mempools(). */ 3255 3151 WARN_ON(per_bio_data_size != 0); 3256 - } else 3152 + break; 3153 + default: 3257 3154 goto out; 3155 + } 3258 3156 3259 3157 pools->io_pool = mempool_create_slab_pool(pool_size, cachep); 3260 3158 if (!pools->io_pool) ··· 3293 3169 3294 3170 if (pools->io_pool) 3295 3171 mempool_destroy(pools->io_pool); 3172 + 3173 + if (pools->rq_pool) 3174 + mempool_destroy(pools->rq_pool); 3296 3175 3297 3176 if (pools->bs) 3298 3177 bioset_free(pools->bs);
+7 -4
drivers/md/dm.h
··· 34 34 /* 35 35 * Type of table and mapped_device's mempool 36 36 */ 37 - #define DM_TYPE_NONE 0 38 - #define DM_TYPE_BIO_BASED 1 39 - #define DM_TYPE_REQUEST_BASED 2 37 + #define DM_TYPE_NONE 0 38 + #define DM_TYPE_BIO_BASED 1 39 + #define DM_TYPE_REQUEST_BASED 2 40 + #define DM_TYPE_MQ_REQUEST_BASED 3 40 41 41 42 /* 42 43 * List of devices that a metadevice uses and should open/close. ··· 74 73 unsigned dm_table_get_type(struct dm_table *t); 75 74 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); 76 75 bool dm_table_request_based(struct dm_table *t); 76 + bool dm_table_mq_request_based(struct dm_table *t); 77 77 void dm_table_free_md_mempools(struct dm_table *t); 78 78 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 79 79 ··· 101 99 /* 102 100 * To check whether the target type is request-based or not (bio-based). 103 101 */ 104 - #define dm_target_request_based(t) ((t)->type->map_rq != NULL) 102 + #define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \ 103 + ((t)->type->clone_and_map_rq != NULL)) 105 104 106 105 /* 107 106 * To check whether the target type is a hybrid (capable of being
+7 -3
include/linux/device-mapper.h
··· 48 48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 49 49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 50 50 union map_info *map_context); 51 + typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, 52 + struct request *rq, 53 + union map_info *map_context, 54 + struct request **clone); 55 + typedef void (*dm_release_clone_request_fn) (struct request *clone); 51 56 52 57 /* 53 58 * Returns: ··· 148 143 dm_dtr_fn dtr; 149 144 dm_map_fn map; 150 145 dm_map_request_fn map_rq; 146 + dm_clone_and_map_request_fn clone_and_map_rq; 147 + dm_release_clone_request_fn release_clone_rq; 151 148 dm_endio_fn end_io; 152 149 dm_request_endio_fn rq_end_io; 153 150 dm_presuspend_fn presuspend; ··· 607 600 /*----------------------------------------------------------------- 608 601 * Helper for block layer and dm core operations 609 602 *---------------------------------------------------------------*/ 610 - void dm_dispatch_request(struct request *rq); 611 - void dm_requeue_unmapped_request(struct request *rq); 612 - void dm_kill_unmapped_request(struct request *rq, int error); 613 603 int dm_underlying_device_busy(struct request_queue *q); 614 604 615 605 #endif /* _LINUX_DEVICE_MAPPER_H */
+2 -2
include/uapi/linux/dm-ioctl.h
··· 267 267 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 268 268 269 269 #define DM_VERSION_MAJOR 4 270 - #define DM_VERSION_MINOR 29 270 + #define DM_VERSION_MINOR 30 271 271 #define DM_VERSION_PATCHLEVEL 0 272 - #define DM_VERSION_EXTRA "-ioctl (2014-10-28)" 272 + #define DM_VERSION_EXTRA "-ioctl (2014-12-22)" 273 273 274 274 /* Status bits */ 275 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */