dm: support barriers on simple devices

Implement barrier support for single device DM devices

This patch implements barrier support in DM for the common case of dm linear
just remapping a single underlying device. In this case we can safely
pass the barrier through because there can be no reordering between
devices.

NB. Any DM device might cease to support barriers if it gets
reconfigured so code must continue to allow for a possible
-EOPNOTSUPP on every barrier bio submitted. - agk

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>

authored by

Andi Kleen and committed by
Alasdair G Kergon
ab4c1424 7d76345d

+33 -10
+1
drivers/md/dm-linear.c
··· 142 .status = linear_status, 143 .ioctl = linear_ioctl, 144 .merge = linear_merge, 145 }; 146 147 int __init dm_linear_init(void)
··· 142 .status = linear_status, 143 .ioctl = linear_ioctl, 144 .merge = linear_merge, 145 + .features = DM_TARGET_SUPPORTS_BARRIERS, 146 }; 147 148 int __init dm_linear_init(void)
+19
drivers/md/dm-table.c
··· 38 sector_t *highs; 39 struct dm_target *targets; 40 41 /* 42 * Indicates the rw permissions for the new logical 43 * device. This should be a combination of FMODE_READ ··· 229 230 INIT_LIST_HEAD(&t->devices); 231 atomic_set(&t->holders, 1); 232 233 if (!num_targets) 234 num_targets = KEYS_PER_NODE; ··· 731 /* FIXME: the plan is to combine high here and then have 732 * the merge fn apply the target level restrictions. */ 733 combine_restrictions_low(&t->limits, &tgt->limits); 734 return 0; 735 736 bad: ··· 778 unsigned int leaf_nodes; 779 780 check_for_valid_limits(&t->limits); 781 782 /* how many indexes will the btree have ? */ 783 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); ··· 998 999 return t->md; 1000 } 1001 1002 EXPORT_SYMBOL(dm_vcalloc); 1003 EXPORT_SYMBOL(dm_get_device);
··· 38 sector_t *highs; 39 struct dm_target *targets; 40 41 + unsigned barriers_supported:1; 42 + 43 /* 44 * Indicates the rw permissions for the new logical 45 * device. This should be a combination of FMODE_READ ··· 227 228 INIT_LIST_HEAD(&t->devices); 229 atomic_set(&t->holders, 1); 230 + t->barriers_supported = 1; 231 232 if (!num_targets) 233 num_targets = KEYS_PER_NODE; ··· 728 /* FIXME: the plan is to combine high here and then have 729 * the merge fn apply the target level restrictions. */ 730 combine_restrictions_low(&t->limits, &tgt->limits); 731 + 732 + if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS)) 733 + t->barriers_supported = 0; 734 + 735 return 0; 736 737 bad: ··· 771 unsigned int leaf_nodes; 772 773 check_for_valid_limits(&t->limits); 774 + 775 + /* 776 + * We only support barriers if there is exactly one underlying device. 777 + */ 778 + if (!list_is_singular(&t->devices)) 779 + t->barriers_supported = 0; 780 781 /* how many indexes will the btree have ? */ 782 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); ··· 985 986 return t->md; 987 } 988 + 989 + int dm_table_barrier_ok(struct dm_table *t) 990 + { 991 + return t->barriers_supported; 992 + } 993 + EXPORT_SYMBOL(dm_table_barrier_ok); 994 995 EXPORT_SYMBOL(dm_vcalloc); 996 EXPORT_SYMBOL(dm_get_device);
+5 -10
drivers/md/dm.c
··· 835 ci.map = dm_get_table(md); 836 if (unlikely(!ci.map)) 837 return -EIO; 838 - 839 ci.md = md; 840 ci.bio = bio; 841 ci.io = alloc_io(md); ··· 922 int rw = bio_data_dir(bio); 923 struct mapped_device *md = q->queuedata; 924 int cpu; 925 - 926 - /* 927 - * There is no use in forwarding any barrier request since we can't 928 - * guarantee it is (or can be) handled by the targets correctly. 929 - */ 930 - if (unlikely(bio_barrier(bio))) { 931 - bio_endio(bio, -EOPNOTSUPP); 932 - return 0; 933 - } 934 935 down_read(&md->io_lock); 936
··· 835 ci.map = dm_get_table(md); 836 if (unlikely(!ci.map)) 837 return -EIO; 838 + if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { 839 + dm_table_put(ci.map); 840 + bio_endio(bio, -EOPNOTSUPP); 841 + return 0; 842 + } 843 ci.md = md; 844 ci.bio = bio; 845 ci.io = alloc_io(md); ··· 918 int rw = bio_data_dir(bio); 919 struct mapped_device *md = q->queuedata; 920 int cpu; 921 922 down_read(&md->io_lock); 923
+1
drivers/md/dm.h
··· 51 * To check the return value from dm_table_find_target(). 52 */ 53 #define dm_target_is_valid(t) ((t)->table) 54 55 /*----------------------------------------------------------------- 56 * A registry of target types.
··· 51 * To check the return value from dm_table_find_target(). 52 */ 53 #define dm_target_is_valid(t) ((t)->table) 54 + int dm_table_barrier_ok(struct dm_table *t); 55 56 /*----------------------------------------------------------------- 57 * A registry of target types.
+7
include/linux/device-mapper.h
··· 112 /* 113 * Information about a target type 114 */ 115 struct target_type { 116 const char *name; 117 struct module *module; 118 unsigned version[3];
··· 112 /* 113 * Information about a target type 114 */ 115 + 116 + /* 117 + * Target features 118 + */ 119 + #define DM_TARGET_SUPPORTS_BARRIERS 0x00000001 120 + 121 struct target_type { 122 + uint64_t features; 123 const char *name; 124 struct module *module; 125 unsigned version[3];