dm: support barriers on simple devices

Implement barrier support for single device DM devices

This patch implements barrier support in DM for the common case of dm linear
just remapping a single underlying device. In this case we can safely
pass the barrier through because there can be no reordering between
devices.

NB. Any DM device might cease to support barriers if it gets
reconfigured so code must continue to allow for a possible
-EOPNOTSUPP on every barrier bio submitted. - agk

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Andi Kleen and committed by
Alasdair G Kergon
ab4c1424 7d76345d

+33 -10
+1
drivers/md/dm-linear.c
··· 142 142 .status = linear_status, 143 143 .ioctl = linear_ioctl, 144 144 .merge = linear_merge, 145 + .features = DM_TARGET_SUPPORTS_BARRIERS, 145 146 }; 146 147 147 148 int __init dm_linear_init(void)
+19
drivers/md/dm-table.c
··· 38 38 sector_t *highs; 39 39 struct dm_target *targets; 40 40 41 + unsigned barriers_supported:1; 42 + 41 43 /* 42 44 * Indicates the rw permissions for the new logical 43 45 * device. This should be a combination of FMODE_READ ··· 229 227 230 228 INIT_LIST_HEAD(&t->devices); 231 229 atomic_set(&t->holders, 1); 230 + t->barriers_supported = 1; 232 231 233 232 if (!num_targets) 234 233 num_targets = KEYS_PER_NODE; ··· 731 728 /* FIXME: the plan is to combine high here and then have 732 729 * the merge fn apply the target level restrictions. */ 733 730 combine_restrictions_low(&t->limits, &tgt->limits); 731 + 732 + if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS)) 733 + t->barriers_supported = 0; 734 + 734 735 return 0; 735 736 736 737 bad: ··· 778 771 unsigned int leaf_nodes; 779 772 780 773 check_for_valid_limits(&t->limits); 774 + 775 + /* 776 + * We only support barriers if there is exactly one underlying device. 777 + */ 778 + if (!list_is_singular(&t->devices)) 779 + t->barriers_supported = 0; 781 780 782 781 /* how many indexes will the btree have ? */ 783 782 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); ··· 998 985 999 986 return t->md; 1000 987 } 988 + 989 + int dm_table_barrier_ok(struct dm_table *t) 990 + { 991 + return t->barriers_supported; 992 + } 993 + EXPORT_SYMBOL(dm_table_barrier_ok); 1001 994 1002 995 EXPORT_SYMBOL(dm_vcalloc); 1003 996 EXPORT_SYMBOL(dm_get_device);
+5 -10
drivers/md/dm.c
··· 835 835 ci.map = dm_get_table(md); 836 836 if (unlikely(!ci.map)) 837 837 return -EIO; 838 - 838 + if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { 839 + dm_table_put(ci.map); 840 + bio_endio(bio, -EOPNOTSUPP); 841 + return 0; 842 + } 839 843 ci.md = md; 840 844 ci.bio = bio; 841 845 ci.io = alloc_io(md); ··· 922 918 int rw = bio_data_dir(bio); 923 919 struct mapped_device *md = q->queuedata; 924 920 int cpu; 925 - 926 - /* 927 - * There is no use in forwarding any barrier request since we can't 928 - * guarantee it is (or can be) handled by the targets correctly. 929 - */ 930 - if (unlikely(bio_barrier(bio))) { 931 - bio_endio(bio, -EOPNOTSUPP); 932 - return 0; 933 - } 934 921 935 922 down_read(&md->io_lock); 936 923
+1
drivers/md/dm.h
··· 51 51 * To check the return value from dm_table_find_target(). 52 52 */ 53 53 #define dm_target_is_valid(t) ((t)->table) 54 + int dm_table_barrier_ok(struct dm_table *t); 54 55 55 56 /*----------------------------------------------------------------- 56 57 * A registry of target types.
+7
include/linux/device-mapper.h
··· 112 112 /* 113 113 * Information about a target type 114 114 */ 115 + 116 + /* 117 + * Target features 118 + */ 119 + #define DM_TARGET_SUPPORTS_BARRIERS 0x00000001 120 + 115 121 struct target_type { 122 + uint64_t features; 116 123 const char *name; 117 124 struct module *module; 118 125 unsigned version[3];