Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: add full blk-mq support to request-based DM

Commit e5863d9ad ("dm: allocate requests in target when stacking on
blk-mq devices") served as the first step toward fully utilizing blk-mq
in request-based DM -- it enabled stacking an old-style (request_fn)
request_queue ontop of the underlying blk-mq device(s). That first step
didn't improve performance of DM multipath ontop of fast blk-mq devices
(e.g. NVMe) because the top-level old-style request_queue was severely
limited by the queue_lock.

The second step offered here enables stacking a blk-mq request_queue
ontop of the underlying blk-mq device(s). This unlocks significant
performance gains on fast blk-mq devices, Keith Busch tested on his NVMe
testbed and offered this really positive news:

"Just providing a performance update. All my fio tests are getting
roughly equal performance whether accessed through the raw block
device or the multipath device mapper (~470k IOPS). I could only push
~20% of the raw iops through dm before this conversion, so this latest
tree is looking really solid from a performance standpoint."

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Tested-by: Keith Busch <keith.busch@intel.com>

+261 -73
+1 -1
drivers/md/dm-mpath.c
··· 1703 1703 *---------------------------------------------------------------*/ 1704 1704 static struct target_type multipath_target = { 1705 1705 .name = "multipath", 1706 - .version = {1, 8, 0}, 1706 + .version = {1, 9, 0}, 1707 1707 .module = THIS_MODULE, 1708 1708 .ctr = multipath_ctr, 1709 1709 .dtr = multipath_dtr,
+8 -3
drivers/md/dm-table.c
··· 18 18 #include <linux/mutex.h> 19 19 #include <linux/delay.h> 20 20 #include <linux/atomic.h> 21 + #include <linux/blk-mq.h> 21 22 22 23 #define DM_MSG_PREFIX "table" 23 24 ··· 1696 1695 md = dm_table_get_md(t); 1697 1696 queue = dm_get_md_queue(md); 1698 1697 if (queue) { 1699 - spin_lock_irqsave(queue->queue_lock, flags); 1700 - blk_run_queue_async(queue); 1701 - spin_unlock_irqrestore(queue->queue_lock, flags); 1698 + if (queue->mq_ops) 1699 + blk_mq_run_hw_queues(queue, true); 1700 + else { 1701 + spin_lock_irqsave(queue->queue_lock, flags); 1702 + blk_run_queue_async(queue); 1703 + spin_unlock_irqrestore(queue->queue_lock, flags); 1704 + } 1702 1705 } 1703 1706 } 1704 1707 EXPORT_SYMBOL(dm_table_run_md_queue_async);
+250 -67
drivers/md/dm.c
··· 23 23 #include <linux/kthread.h> 24 24 #include <linux/ktime.h> 25 25 #include <linux/elevator.h> /* for rq_end_sector() */ 26 + #include <linux/blk-mq.h> 26 27 27 28 #include <trace/events/block.h> 28 29 ··· 225 224 int last_rq_rw; 226 225 sector_t last_rq_pos; 227 226 ktime_t last_rq_start_time; 227 + 228 + /* for blk-mq request-based DM support */ 229 + struct blk_mq_tag_set tag_set; 228 230 }; 229 231 230 232 /* ··· 1029 1025 blk_update_request(tio->orig, 0, nr_bytes); 1030 1026 } 1031 1027 1028 + static struct dm_rq_target_io *tio_from_request(struct request *rq) 1029 + { 1030 + return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); 1031 + } 1032 + 1032 1033 /* 1033 1034 * Don't touch any member of the md after calling this function because 1034 1035 * the md may be freed in dm_put() at the end of this function. ··· 1057 1048 * queue lock again. 1058 1049 */ 1059 1050 if (run_queue) { 1060 - if (!nr_requests_pending || 1061 - (nr_requests_pending >= md->queue->nr_congestion_on)) 1051 + if (md->queue->mq_ops) 1052 + blk_mq_run_hw_queues(md->queue, true); 1053 + else if (!nr_requests_pending || 1054 + (nr_requests_pending >= md->queue->nr_congestion_on)) 1062 1055 blk_run_queue_async(md->queue); 1063 1056 } 1064 1057 ··· 1073 1062 static void free_rq_clone(struct request *clone) 1074 1063 { 1075 1064 struct dm_rq_target_io *tio = clone->end_io_data; 1065 + struct mapped_device *md = tio->md; 1076 1066 1077 1067 blk_rq_unprep_clone(clone); 1068 + 1078 1069 if (clone->q && clone->q->mq_ops) 1079 1070 tio->ti->type->release_clone_rq(clone); 1080 1071 else 1081 - free_clone_request(tio->md, clone); 1082 - free_rq_tio(tio); 1072 + free_clone_request(md, clone); 1073 + 1074 + if (!md->queue->mq_ops) 1075 + free_rq_tio(tio); 1083 1076 } 1084 1077 1085 1078 /* ··· 1112 1097 } 1113 1098 1114 1099 free_rq_clone(clone); 1115 - blk_end_request_all(rq, error); 1100 + if (!rq->q->mq_ops) 1101 + blk_end_request_all(rq, error); 1102 + else 1103 + blk_mq_end_request(rq, error); 1116 1104 rq_completed(md, rw, true); 1117 1105 } 1118 1106 1119 1107 static void dm_unprep_request(struct request *rq) 1120 1108 { 1121 - struct dm_rq_target_io *tio = rq->special; 1109 + struct dm_rq_target_io *tio = tio_from_request(rq); 1122 1110 struct request *clone = tio->clone; 1123 1111 1124 - rq->special = NULL; 1125 - rq->cmd_flags &= ~REQ_DONTPREP; 1112 + if (!rq->q->mq_ops) { 1113 + rq->special = NULL; 1114 + rq->cmd_flags &= ~REQ_DONTPREP; 1115 + } 1126 1116 1127 1117 if (clone) 1128 1118 free_rq_clone(clone); ··· 1136 1116 /* 1137 1117 * Requeue the original request of a clone. 1138 1118 */ 1139 - static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1140 - struct request *rq) 1119 + static void old_requeue_request(struct request *rq) 1141 1120 { 1142 - int rw = rq_data_dir(rq); 1143 1121 struct request_queue *q = rq->q; 1144 1122 unsigned long flags; 1145 - 1146 - dm_unprep_request(rq); 1147 1123 1148 1124 spin_lock_irqsave(q->queue_lock, flags); 1149 1125 blk_requeue_request(q, rq); 1150 1126 spin_unlock_irqrestore(q->queue_lock, flags); 1127 + } 1128 + 1129 + static void dm_requeue_unmapped_original_request(struct mapped_device *md, 1130 + struct request *rq) 1131 + { 1132 + int rw = rq_data_dir(rq); 1133 + 1134 + dm_unprep_request(rq); 1135 + 1136 + if (!rq->q->mq_ops) 1137 + old_requeue_request(rq); 1138 + else { 1139 + blk_mq_requeue_request(rq); 1140 + blk_mq_kick_requeue_list(rq->q); 1141 + } 1151 1142 1152 1143 rq_completed(md, rw, false); 1153 1144 } ··· 1170 1139 dm_requeue_unmapped_original_request(tio->md, tio->orig); 1171 1140 } 1172 1141 1173 - static void __stop_queue(struct request_queue *q) 1142 + static void old_stop_queue(struct request_queue *q) 1174 1143 { 1144 + unsigned long flags; 1145 + 1146 + if (blk_queue_stopped(q)) 1147 + return; 1148 + 1149 + spin_lock_irqsave(q->queue_lock, flags); 1175 1150 blk_stop_queue(q); 1151 + spin_unlock_irqrestore(q->queue_lock, flags); 1176 1152 } 1177 1153 1178 1154 static void stop_queue(struct request_queue *q) 1179 1155 { 1156 + if (!q->mq_ops) 1157 + old_stop_queue(q); 1158 + else 1159 + blk_mq_stop_hw_queues(q); 1160 + } 1161 + 1162 + static void old_start_queue(struct request_queue *q) 1163 + { 1180 1164 unsigned long flags; 1181 1165 1182 1166 spin_lock_irqsave(q->queue_lock, flags); 1183 - __stop_queue(q); 1184 - spin_unlock_irqrestore(q->queue_lock, flags); 1185 - } 1186 - 1187 - static void __start_queue(struct request_queue *q) 1188 - { 1189 1167 if (blk_queue_stopped(q)) 1190 1168 blk_start_queue(q); 1169 + spin_unlock_irqrestore(q->queue_lock, flags); 1191 1170 } 1192 1171 1193 1172 static void start_queue(struct request_queue *q) 1194 1173 { 1195 - unsigned long flags; 1196 - 1197 - spin_lock_irqsave(q->queue_lock, flags); 1198 - __start_queue(q); 1199 - spin_unlock_irqrestore(q->queue_lock, flags); 1174 + if (!q->mq_ops) 1175 + old_start_queue(q); 1176 + else 1177 + blk_mq_start_stopped_hw_queues(q, true); 1200 1178 } 1201 1179 1202 1180 static void dm_done(struct request *clone, int error, bool mapped) ··· 1246 1206 static void dm_softirq_done(struct request *rq) 1247 1207 { 1248 1208 bool mapped = true; 1249 - struct dm_rq_target_io *tio = rq->special; 1209 + struct dm_rq_target_io *tio = tio_from_request(rq); 1250 1210 struct request *clone = tio->clone; 1211 + int rw; 1251 1212 1252 1213 if (!clone) { 1253 - blk_end_request_all(rq, tio->error); 1254 - rq_completed(tio->md, rq_data_dir(rq), false); 1255 - free_rq_tio(tio); 1214 + rw = rq_data_dir(rq); 1215 + if (!rq->q->mq_ops) { 1216 + blk_end_request_all(rq, tio->error); 1217 + rq_completed(tio->md, rw, false); 1218 + free_rq_tio(tio); 1219 + } else { 1220 + blk_mq_end_request(rq, tio->error); 1221 + rq_completed(tio->md, rw, false); 1222 + } 1256 1223 return; 1257 1224 } 1258 1225 ··· 1275 1228 */ 1276 1229 static void dm_complete_request(struct request *rq, int error) 1277 1230 { 1278 - struct dm_rq_target_io *tio = rq->special; 1231 + struct dm_rq_target_io *tio = tio_from_request(rq); 1279 1232 1280 1233 tio->error = error; 1281 1234 blk_complete_request(rq); ··· 1294 1247 } 1295 1248 1296 1249 /* 1297 - * Called with the clone's queue lock held 1250 + * Called with the clone's queue lock held (for non-blk-mq) 1298 1251 */ 1299 1252 static void end_clone_request(struct request *clone, int error) 1300 1253 { ··· 1855 1808 1856 1809 static void map_tio_request(struct kthread_work *work); 1857 1810 1811 + static void init_tio(struct dm_rq_target_io *tio, struct request *rq, 1812 + struct mapped_device *md) 1813 + { 1814 + tio->md = md; 1815 + tio->ti = NULL; 1816 + tio->clone = NULL; 1817 + tio->orig = rq; 1818 + tio->error = 0; 1819 + memset(&tio->info, 0, sizeof(tio->info)); 1820 + init_kthread_work(&tio->work, map_tio_request); 1821 + } 1822 + 1858 1823 static struct dm_rq_target_io *prep_tio(struct request *rq, 1859 1824 struct mapped_device *md, gfp_t gfp_mask) 1860 1825 { ··· 1878 1819 if (!tio) 1879 1820 return NULL; 1880 1821 1881 - tio->md = md; 1882 - tio->ti = NULL; 1883 - tio->clone = NULL; 1884 - tio->orig = rq; 1885 - tio->error = 0; 1886 - memset(&tio->info, 0, sizeof(tio->info)); 1887 - init_kthread_work(&tio->work, map_tio_request); 1822 + init_tio(tio, rq, md); 1888 1823 1889 1824 table = dm_get_live_table(md, &srcu_idx); 1890 1825 if (!dm_table_mq_request_based(table)) { ··· 1922 1869 * DM_MAPIO_REQUEUE : the original request needs to be requeued 1923 1870 * < 0 : the request was completed due to failure 1924 1871 */ 1925 - static int map_request(struct dm_target *ti, struct request *rq, 1872 + static int map_request(struct dm_rq_target_io *tio, struct request *rq, 1926 1873 struct mapped_device *md) 1927 1874 { 1928 1875 int r; 1929 - struct dm_rq_target_io *tio = rq->special; 1876 + struct dm_target *ti = tio->ti; 1930 1877 struct request *clone = NULL; 1931 1878 1932 1879 if (tio->clone) { ··· 1941 1888 } 1942 1889 if (IS_ERR(clone)) 1943 1890 return DM_MAPIO_REQUEUE; 1944 - if (setup_clone(clone, rq, tio, GFP_KERNEL)) { 1891 + if (setup_clone(clone, rq, tio, GFP_NOIO)) { 1945 1892 /* -ENOMEM */ 1946 1893 ti->type->release_clone_rq(clone); 1947 1894 return DM_MAPIO_REQUEUE; ··· 1982 1929 struct request *rq = tio->orig; 1983 1930 struct mapped_device *md = tio->md; 1984 1931 1985 - if (map_request(tio->ti, rq, md) == DM_MAPIO_REQUEUE) 1932 + if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 1986 1933 dm_requeue_unmapped_original_request(md, rq); 1987 1934 } 1988 1935 1989 1936 static void dm_start_request(struct mapped_device *md, struct request *orig) 1990 1937 { 1991 - blk_start_request(orig); 1938 + if (!orig->q->mq_ops) 1939 + blk_start_request(orig); 1940 + else 1941 + blk_mq_start_request(orig); 1992 1942 atomic_inc(&md->pending[rq_data_dir(orig)]); 1993 1943 1994 1944 if (md->seq_rq_merge_deadline_usecs) { ··· 2101 2045 2102 2046 dm_start_request(md, rq); 2103 2047 2104 - tio = rq->special; 2048 + tio = tio_from_request(rq); 2105 2049 /* Establish tio->ti before queuing work (map_tio_request) */ 2106 2050 tio->ti = ti; 2107 2051 queue_kthread_work(&md->kworker, &tio->work); ··· 2198 2142 { 2199 2143 /* 2200 2144 * Request-based dm devices cannot be stacked on top of bio-based dm 2201 - * devices. The type of this dm device has not been decided yet. 2145 + * devices. The type of this dm device may not have been decided yet. 2202 2146 * The type is decided at the first table loading time. 2203 2147 * To prevent problematic device stacking, clear the queue flag 2204 2148 * for request stacking support until then. ··· 2206 2150 * This queue is new, so no concurrency on the queue_flags. 2207 2151 */ 2208 2152 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 2153 + } 2209 2154 2155 + static void dm_init_old_md_queue(struct mapped_device *md) 2156 + { 2157 + dm_init_md_queue(md); 2158 + 2159 + /* 2160 + * Initialize aspects of queue that aren't relevant for blk-mq 2161 + */ 2210 2162 md->queue->queuedata = md; 2211 2163 md->queue->backing_dev_info.congested_fn = dm_any_congested; 2212 2164 md->queue->backing_dev_info.congested_data = md; ··· 2337 2273 static void free_dev(struct mapped_device *md) 2338 2274 { 2339 2275 int minor = MINOR(disk_devt(md->disk)); 2276 + bool using_blk_mq = !!md->queue->mq_ops; 2340 2277 2341 2278 unlock_fs(md); 2342 2279 destroy_workqueue(md->wq); ··· 2363 2298 del_gendisk(md->disk); 2364 2299 put_disk(md->disk); 2365 2300 blk_cleanup_queue(md->queue); 2301 + if (using_blk_mq) 2302 + blk_mq_free_tag_set(&md->tag_set); 2366 2303 bdput(md->bdev); 2367 2304 free_minor(minor); 2368 2305 ··· 2524 2457 * This must be done before setting the queue restrictions, 2525 2458 * because request-based dm may be run just after the setting. 2526 2459 */ 2527 - if (dm_table_request_based(t) && !blk_queue_stopped(q)) 2460 + if (dm_table_request_based(t)) 2528 2461 stop_queue(q); 2529 2462 2530 2463 __bind_mempools(md, t); ··· 2606 2539 return md->type; 2607 2540 } 2608 2541 2609 - static bool dm_md_type_request_based(struct mapped_device *md) 2610 - { 2611 - unsigned table_type = dm_get_md_type(md); 2612 - 2613 - return (table_type == DM_TYPE_REQUEST_BASED || 2614 - table_type == DM_TYPE_MQ_REQUEST_BASED); 2615 - } 2616 - 2617 2542 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) 2618 2543 { 2619 2544 return md->immutable_target_type; ··· 2622 2563 } 2623 2564 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2624 2565 2566 + static void init_rq_based_worker_thread(struct mapped_device *md) 2567 + { 2568 + /* Initialize the request-based DM worker thread */ 2569 + init_kthread_worker(&md->kworker); 2570 + md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2571 + "kdmwork-%s", dm_device_name(md)); 2572 + } 2573 + 2625 2574 /* 2626 2575 * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2627 2576 */ ··· 2638 2571 struct request_queue *q = NULL; 2639 2572 2640 2573 if (md->queue->elevator) 2641 - return 1; 2574 + return 0; 2642 2575 2643 2576 /* Fully initialize the queue */ 2644 2577 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2645 2578 if (!q) 2646 - return 0; 2579 + return -EINVAL; 2647 2580 2648 2581 /* disable dm_request_fn's merge heuristic by default */ 2649 2582 md->seq_rq_merge_deadline_usecs = 0; 2650 2583 2651 2584 md->queue = q; 2652 - dm_init_md_queue(md); 2585 + dm_init_old_md_queue(md); 2653 2586 blk_queue_softirq_done(md->queue, dm_softirq_done); 2654 2587 blk_queue_prep_rq(md->queue, dm_prep_fn); 2655 2588 2656 - /* Also initialize the request-based DM worker thread */ 2657 - init_kthread_worker(&md->kworker); 2658 - md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, 2659 - "kdmwork-%s", dm_device_name(md)); 2589 + init_rq_based_worker_thread(md); 2660 2590 2661 2591 elv_register_queue(md->queue); 2662 2592 2663 - return 1; 2593 + return 0; 2594 + } 2595 + 2596 + static int dm_mq_init_request(void *data, struct request *rq, 2597 + unsigned int hctx_idx, unsigned int request_idx, 2598 + unsigned int numa_node) 2599 + { 2600 + struct mapped_device *md = data; 2601 + struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2602 + 2603 + /* 2604 + * Must initialize md member of tio, otherwise it won't 2605 + * be available in dm_mq_queue_rq. 2606 + */ 2607 + tio->md = md; 2608 + 2609 + return 0; 2610 + } 2611 + 2612 + static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 2613 + const struct blk_mq_queue_data *bd) 2614 + { 2615 + struct request *rq = bd->rq; 2616 + struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); 2617 + struct mapped_device *md = tio->md; 2618 + int srcu_idx; 2619 + struct dm_table *map = dm_get_live_table(md, &srcu_idx); 2620 + struct dm_target *ti; 2621 + sector_t pos; 2622 + 2623 + /* always use block 0 to find the target for flushes for now */ 2624 + pos = 0; 2625 + if (!(rq->cmd_flags & REQ_FLUSH)) 2626 + pos = blk_rq_pos(rq); 2627 + 2628 + ti = dm_table_find_target(map, pos); 2629 + if (!dm_target_is_valid(ti)) { 2630 + dm_put_live_table(md, srcu_idx); 2631 + DMERR_LIMIT("request attempted access beyond the end of device"); 2632 + /* 2633 + * Must perform setup, that rq_completed() requires, 2634 + * before returning BLK_MQ_RQ_QUEUE_ERROR 2635 + */ 2636 + dm_start_request(md, rq); 2637 + return BLK_MQ_RQ_QUEUE_ERROR; 2638 + } 2639 + dm_put_live_table(md, srcu_idx); 2640 + 2641 + if (ti->type->busy && ti->type->busy(ti)) 2642 + return BLK_MQ_RQ_QUEUE_BUSY; 2643 + 2644 + dm_start_request(md, rq); 2645 + 2646 + /* Init tio using md established in .init_request */ 2647 + init_tio(tio, rq, md); 2648 + 2649 + /* Establish tio->ti before queuing work (map_tio_request) */ 2650 + tio->ti = ti; 2651 + queue_kthread_work(&md->kworker, &tio->work); 2652 + 2653 + return BLK_MQ_RQ_QUEUE_OK; 2654 + } 2655 + 2656 + static struct blk_mq_ops dm_mq_ops = { 2657 + .queue_rq = dm_mq_queue_rq, 2658 + .map_queue = blk_mq_map_queue, 2659 + .complete = dm_softirq_done, 2660 + .init_request = dm_mq_init_request, 2661 + }; 2662 + 2663 + static int dm_init_request_based_blk_mq_queue(struct mapped_device *md) 2664 + { 2665 + struct request_queue *q; 2666 + int err; 2667 + 2668 + memset(&md->tag_set, 0, sizeof(md->tag_set)); 2669 + md->tag_set.ops = &dm_mq_ops; 2670 + md->tag_set.queue_depth = BLKDEV_MAX_RQ; 2671 + md->tag_set.numa_node = NUMA_NO_NODE; 2672 + md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; 2673 + md->tag_set.nr_hw_queues = 1; 2674 + md->tag_set.cmd_size = sizeof(struct dm_rq_target_io); 2675 + md->tag_set.driver_data = md; 2676 + 2677 + err = blk_mq_alloc_tag_set(&md->tag_set); 2678 + if (err) 2679 + return err; 2680 + 2681 + q = blk_mq_init_allocated_queue(&md->tag_set, md->queue); 2682 + if (IS_ERR(q)) { 2683 + err = PTR_ERR(q); 2684 + goto out_tag_set; 2685 + } 2686 + md->queue = q; 2687 + dm_init_md_queue(md); 2688 + 2689 + /* backfill 'mq' sysfs registration normally done in blk_register_queue */ 2690 + blk_mq_register_disk(md->disk); 2691 + 2692 + init_rq_based_worker_thread(md); 2693 + 2694 + return 0; 2695 + 2696 + out_tag_set: 2697 + blk_mq_free_tag_set(&md->tag_set); 2698 + return err; 2664 2699 } 2665 2700 2666 2701 /* ··· 2770 2601 */ 2771 2602 int dm_setup_md_queue(struct mapped_device *md) 2772 2603 { 2773 - if (dm_md_type_request_based(md)) { 2774 - if (!dm_init_request_based_queue(md)) { 2604 + int r; 2605 + unsigned md_type = dm_get_md_type(md); 2606 + 2607 + switch (md_type) { 2608 + case DM_TYPE_REQUEST_BASED: 2609 + r = dm_init_request_based_queue(md); 2610 + if (r) { 2775 2611 DMWARN("Cannot initialize queue for request-based mapped device"); 2776 - return -EINVAL; 2612 + return r; 2777 2613 } 2778 - } else { 2779 - /* bio-based specific initialization */ 2614 + break; 2615 + case DM_TYPE_MQ_REQUEST_BASED: 2616 + r = dm_init_request_based_blk_mq_queue(md); 2617 + if (r) { 2618 + DMWARN("Cannot initialize queue for request-based blk-mq mapped device"); 2619 + return r; 2620 + } 2621 + break; 2622 + case DM_TYPE_BIO_BASED: 2623 + dm_init_old_md_queue(md); 2780 2624 blk_queue_make_request(md->queue, dm_make_request); 2781 2625 blk_queue_merge_bvec(md->queue, dm_merge_bvec); 2626 + break; 2782 2627 } 2783 2628 2784 2629 return 0;
+2 -2
include/uapi/linux/dm-ioctl.h
··· 267 267 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 268 268 269 269 #define DM_VERSION_MAJOR 4 270 - #define DM_VERSION_MINOR 30 270 + #define DM_VERSION_MINOR 31 271 271 #define DM_VERSION_PATCHLEVEL 0 272 - #define DM_VERSION_EXTRA "-ioctl (2014-12-22)" 272 + #define DM_VERSION_EXTRA "-ioctl (2015-3-12)" 273 273 274 274 /* Status bits */ 275 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */