Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm: (33 commits)
dm mpath: support discard
dm stripe: support discards
dm: split discard requests on target boundaries
dm stripe: optimize sector division
dm stripe: move sector translation to a function
dm: error return error for discards
dm delay: support discard
dm: zero silently drop discards
dm: use dm_target_offset macro
dm: factor out max_io_len_target_boundary
dm: use common __issue_target_request for flush and discard support
dm: linear support discard
dm crypt: simplify crypt_ctr
dm crypt: simplify crypt_config destruction logic
dm: allow autoloading of dm mod
dm: rename map_info flush_request to target_request_nr
dm ioctl: refactor dm_table_complete
dm snapshot: implement merge
dm: do not initialise full request queue when bio based
dm ioctl: make bio or request based device type immutable
...

+825 -389
+1
Documentation/devices.txt
··· 445 233 = /dev/kmview View-OS A process with a view 446 234 = /dev/btrfs-control Btrfs control device 447 235 = /dev/autofs Autofs control device 448 240-254 Reserved for local use 449 255 Reserved for MISC_DYNAMIC_MINOR 450
··· 445 233 = /dev/kmview View-OS A process with a view 446 234 = /dev/btrfs-control Btrfs control device 447 235 = /dev/autofs Autofs control device 448 + 236 = /dev/mapper/control Device-Mapper control device 449 240-254 Reserved for local use 450 255 Reserved for MISC_DYNAMIC_MINOR 451
+187 -159
drivers/md/dm-crypt.c
··· 107 struct workqueue_struct *io_queue; 108 struct workqueue_struct *crypt_queue; 109 110 - /* 111 - * crypto related data 112 - */ 113 struct crypt_iv_operations *iv_gen_ops; 114 - char *iv_mode; 115 union { 116 struct iv_essiv_private essiv; 117 struct iv_benbi_private benbi; ··· 134 unsigned int dmreq_start; 135 struct ablkcipher_request *req; 136 137 - char cipher[CRYPTO_MAX_ALG_NAME]; 138 - char chainmode[CRYPTO_MAX_ALG_NAME]; 139 struct crypto_ablkcipher *tfm; 140 unsigned long flags; 141 unsigned int key_size; ··· 996 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 997 } 998 999 - /* 1000 - * Construct an encryption mapping: 1001 - * <cipher> <key> <iv_offset> <dev_path> <start> 1002 - */ 1003 - static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1004 { 1005 - struct crypt_config *cc; 1006 - struct crypto_ablkcipher *tfm; 1007 - char *tmp; 1008 - char *cipher; 1009 - char *chainmode; 1010 - char *ivmode; 1011 - char *ivopts; 1012 - unsigned int key_size; 1013 - unsigned long long tmpll; 1014 1015 - if (argc != 5) { 1016 - ti->error = "Not enough arguments"; 1017 return -EINVAL; 1018 } 1019 1020 - tmp = argv[0]; 1021 cipher = strsep(&tmp, "-"); 1022 chainmode = strsep(&tmp, "-"); 1023 ivopts = strsep(&tmp, "-"); 1024 ivmode = strsep(&ivopts, ":"); 1025 1026 if (tmp) 1027 - DMWARN("Unexpected additional cipher options"); 1028 1029 - key_size = strlen(argv[1]) >> 1; 1030 - 1031 - cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1032 - if (cc == NULL) { 1033 - ti->error = 1034 - "Cannot allocate transparent encryption context"; 1035 - return -ENOMEM; 1036 - } 1037 - 1038 - /* Compatibility mode for old dm-crypt cipher strings */ 1039 - if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { 1040 chainmode = "cbc"; 1041 ivmode = "plain"; 1042 } 1043 1044 if (strcmp(chainmode, "ecb") && !ivmode) { 1045 - ti->error = "This chaining mode requires an IV mechanism"; 1046 - goto bad_cipher; 1047 } 1048 1049 - if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", 1050 - chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) { 1051 - ti->error = "Chain mode + cipher name is too long"; 1052 - goto bad_cipher; 1053 } 1054 1055 - tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0); 1056 - if (IS_ERR(tfm)) { 1057 ti->error = "Error allocating crypto tfm"; 1058 - goto bad_cipher; 1059 } 1060 1061 - strcpy(cc->cipher, cipher); 1062 - strcpy(cc->chainmode, chainmode); 1063 - cc->tfm = tfm; 1064 - 1065 - if (crypt_set_key(cc, argv[1]) < 0) { 1066 ti->error = "Error decoding and setting key"; 1067 - goto bad_ivmode; 1068 } 1069 1070 - /* 1071 - * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi". 1072 - * See comments at iv code 1073 - */ 1074 1075 if (ivmode == NULL) 1076 cc->iv_gen_ops = NULL; 1077 else if (strcmp(ivmode, "plain") == 0) ··· 1138 else if (strcmp(ivmode, "null") == 0) 1139 cc->iv_gen_ops = &crypt_iv_null_ops; 1140 else { 1141 ti->error = "Invalid IV mode"; 1142 - goto bad_ivmode; 1143 } 1144 1145 - if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && 1146 - cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) 1147 - goto bad_ivmode; 1148 - 1149 - if (cc->iv_gen_ops && cc->iv_gen_ops->init && 1150 - cc->iv_gen_ops->init(cc) < 0) { 1151 - ti->error = "Error initialising IV"; 1152 - goto bad_slab_pool; 1153 - } 1154 - 1155 - cc->iv_size = crypto_ablkcipher_ivsize(tfm); 1156 - if (cc->iv_size) 1157 - /* at least a 64 bit sector number should fit in our buffer */ 1158 - cc->iv_size = max(cc->iv_size, 1159 - (unsigned int)(sizeof(u64) / sizeof(u8))); 1160 - else { 1161 - if (cc->iv_gen_ops) { 1162 - DMWARN("Selected cipher does not support IVs"); 1163 - if (cc->iv_gen_ops->dtr) 1164 - cc->iv_gen_ops->dtr(cc); 1165 - cc->iv_gen_ops = NULL; 1166 } 1167 } 1168 1169 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1170 if (!cc->io_pool) { 1171 ti->error = "Cannot allocate crypt io mempool"; 1172 - goto bad_slab_pool; 1173 } 1174 1175 cc->dmreq_start = sizeof(struct ablkcipher_request); 1176 - cc->dmreq_start += crypto_ablkcipher_reqsize(tfm); 1177 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1178 - cc->dmreq_start += crypto_ablkcipher_alignmask(tfm) & 1179 ~(crypto_tfm_ctx_alignment() - 1); 1180 1181 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1182 sizeof(struct dm_crypt_request) + cc->iv_size); 1183 if (!cc->req_pool) { 1184 ti->error = "Cannot allocate crypt request mempool"; 1185 - goto bad_req_pool; 1186 } 1187 cc->req = NULL; 1188 1189 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1190 if (!cc->page_pool) { 1191 ti->error = "Cannot allocate page mempool"; 1192 - goto bad_page_pool; 1193 } 1194 1195 cc->bs = bioset_create(MIN_IOS, 0); 1196 if (!cc->bs) { 1197 ti->error = "Cannot allocate crypt bioset"; 1198 - goto bad_bs; 1199 } 1200 1201 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1202 ti->error = "Invalid iv_offset sector"; 1203 - goto bad_device; 1204 } 1205 cc->iv_offset = tmpll; 1206 1207 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 1208 ti->error = "Invalid device sector"; 1209 - goto bad_device; 1210 } 1211 cc->start = tmpll; 1212 1213 - if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1214 - ti->error = "Device lookup failed"; 1215 - goto bad_device; 1216 - } 1217 - 1218 - if (ivmode && cc->iv_gen_ops) { 1219 - if (ivopts) 1220 - *(ivopts - 1) = ':'; 1221 - cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); 1222 - if (!cc->iv_mode) { 1223 - ti->error = "Error kmallocing iv_mode string"; 1224 - goto bad_ivmode_string; 1225 - } 1226 - strcpy(cc->iv_mode, ivmode); 1227 - } else 1228 - cc->iv_mode = NULL; 1229 - 1230 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1231 if (!cc->io_queue) { 1232 ti->error = "Couldn't create kcryptd io queue"; 1233 - goto bad_io_queue; 1234 } 1235 1236 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1237 if (!cc->crypt_queue) { 1238 ti->error = "Couldn't create kcryptd queue"; 1239 - goto bad_crypt_queue; 1240 } 1241 1242 ti->num_flush_requests = 1; 1243 - ti->private = cc; 1244 return 0; 1245 1246 - bad_crypt_queue: 1247 - destroy_workqueue(cc->io_queue); 1248 - bad_io_queue: 1249 - kfree(cc->iv_mode); 1250 - bad_ivmode_string: 1251 - dm_put_device(ti, cc->dev); 1252 - bad_device: 1253 - bioset_free(cc->bs); 1254 - bad_bs: 1255 - mempool_destroy(cc->page_pool); 1256 - bad_page_pool: 1257 - mempool_destroy(cc->req_pool); 1258 - bad_req_pool: 1259 - mempool_destroy(cc->io_pool); 1260 - bad_slab_pool: 1261 - if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1262 - cc->iv_gen_ops->dtr(cc); 1263 - bad_ivmode: 1264 - crypto_free_ablkcipher(tfm); 1265 - bad_cipher: 1266 - /* Must zero key material before freeing */ 1267 - kzfree(cc); 1268 - return -EINVAL; 1269 - } 1270 - 1271 - static void crypt_dtr(struct dm_target *ti) 1272 - { 1273 - struct crypt_config *cc = (struct crypt_config *) ti->private; 1274 - 1275 - destroy_workqueue(cc->io_queue); 1276 - destroy_workqueue(cc->crypt_queue); 1277 - 1278 - if (cc->req) 1279 - mempool_free(cc->req, cc->req_pool); 1280 - 1281 - bioset_free(cc->bs); 1282 - mempool_destroy(cc->page_pool); 1283 - mempool_destroy(cc->req_pool); 1284 - mempool_destroy(cc->io_pool); 1285 - 1286 - kfree(cc->iv_mode); 1287 - if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1288 - cc->iv_gen_ops->dtr(cc); 1289 - crypto_free_ablkcipher(cc->tfm); 1290 - dm_put_device(ti, cc->dev); 1291 - 1292 - /* Must zero key material before freeing */ 1293 - kzfree(cc); 1294 } 1295 1296 static int crypt_map(struct dm_target *ti, struct bio *bio, ··· 1284 return DM_MAPIO_REMAPPED; 1285 } 1286 1287 - io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin); 1288 1289 if (bio_data_dir(io->base_bio) == READ) 1290 kcryptd_queue_io(io); ··· 1297 static int crypt_status(struct dm_target *ti, status_type_t type, 1298 char *result, unsigned int maxlen) 1299 { 1300 - struct crypt_config *cc = (struct crypt_config *) ti->private; 1301 unsigned int sz = 0; 1302 1303 switch (type) { ··· 1306 break; 1307 1308 case STATUSTYPE_TABLE: 1309 - if (cc->iv_mode) 1310 - DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode, 1311 - cc->iv_mode); 1312 else 1313 - DMEMIT("%s-%s ", cc->cipher, cc->chainmode); 1314 1315 if (cc->key_size > 0) { 1316 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) ··· 1406 return max_size; 1407 1408 bvm->bi_bdev = cc->dev->bdev; 1409 - bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin; 1410 1411 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1412 }
··· 107 struct workqueue_struct *io_queue; 108 struct workqueue_struct *crypt_queue; 109 110 + char *cipher; 111 + char *cipher_mode; 112 + 113 struct crypt_iv_operations *iv_gen_ops; 114 union { 115 struct iv_essiv_private essiv; 116 struct iv_benbi_private benbi; ··· 135 unsigned int dmreq_start; 136 struct ablkcipher_request *req; 137 138 struct crypto_ablkcipher *tfm; 139 unsigned long flags; 140 unsigned int key_size; ··· 999 return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); 1000 } 1001 1002 + static void crypt_dtr(struct dm_target *ti) 1003 { 1004 + struct crypt_config *cc = ti->private; 1005 1006 + ti->private = NULL; 1007 + 1008 + if (!cc) 1009 + return; 1010 + 1011 + if (cc->io_queue) 1012 + destroy_workqueue(cc->io_queue); 1013 + if (cc->crypt_queue) 1014 + destroy_workqueue(cc->crypt_queue); 1015 + 1016 + if (cc->bs) 1017 + bioset_free(cc->bs); 1018 + 1019 + if (cc->page_pool) 1020 + mempool_destroy(cc->page_pool); 1021 + if (cc->req_pool) 1022 + mempool_destroy(cc->req_pool); 1023 + if (cc->io_pool) 1024 + mempool_destroy(cc->io_pool); 1025 + 1026 + if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) 1027 + cc->iv_gen_ops->dtr(cc); 1028 + 1029 + if (cc->tfm && !IS_ERR(cc->tfm)) 1030 + crypto_free_ablkcipher(cc->tfm); 1031 + 1032 + if (cc->dev) 1033 + dm_put_device(ti, cc->dev); 1034 + 1035 + kzfree(cc->cipher); 1036 + kzfree(cc->cipher_mode); 1037 + 1038 + /* Must zero key material before freeing */ 1039 + kzfree(cc); 1040 + } 1041 + 1042 + static int crypt_ctr_cipher(struct dm_target *ti, 1043 + char *cipher_in, char *key) 1044 + { 1045 + struct crypt_config *cc = ti->private; 1046 + char *tmp, *cipher, *chainmode, *ivmode, *ivopts; 1047 + char *cipher_api = NULL; 1048 + int ret = -EINVAL; 1049 + 1050 + /* Convert to crypto api definition? */ 1051 + if (strchr(cipher_in, '(')) { 1052 + ti->error = "Bad cipher specification"; 1053 return -EINVAL; 1054 } 1055 1056 + /* 1057 + * Legacy dm-crypt cipher specification 1058 + * cipher-mode-iv:ivopts 1059 + */ 1060 + tmp = cipher_in; 1061 cipher = strsep(&tmp, "-"); 1062 + 1063 + cc->cipher = kstrdup(cipher, GFP_KERNEL); 1064 + if (!cc->cipher) 1065 + goto bad_mem; 1066 + 1067 + if (tmp) { 1068 + cc->cipher_mode = kstrdup(tmp, GFP_KERNEL); 1069 + if (!cc->cipher_mode) 1070 + goto bad_mem; 1071 + } 1072 + 1073 chainmode = strsep(&tmp, "-"); 1074 ivopts = strsep(&tmp, "-"); 1075 ivmode = strsep(&ivopts, ":"); 1076 1077 if (tmp) 1078 + DMWARN("Ignoring unexpected additional cipher options"); 1079 1080 + /* Compatibility mode for old dm-crypt mappings */ 1081 + if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { 1082 + kfree(cc->cipher_mode); 1083 + cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL); 1084 chainmode = "cbc"; 1085 ivmode = "plain"; 1086 } 1087 1088 if (strcmp(chainmode, "ecb") && !ivmode) { 1089 + ti->error = "IV mechanism required"; 1090 + return -EINVAL; 1091 } 1092 1093 + cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); 1094 + if (!cipher_api) 1095 + goto bad_mem; 1096 + 1097 + ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, 1098 + "%s(%s)", chainmode, cipher); 1099 + if (ret < 0) { 1100 + kfree(cipher_api); 1101 + goto bad_mem; 1102 } 1103 1104 + /* Allocate cipher */ 1105 + cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); 1106 + if (IS_ERR(cc->tfm)) { 1107 + ret = PTR_ERR(cc->tfm); 1108 ti->error = "Error allocating crypto tfm"; 1109 + goto bad; 1110 } 1111 1112 + /* Initialize and set key */ 1113 + ret = crypt_set_key(cc, key); 1114 + if (ret < 0) { 1115 ti->error = "Error decoding and setting key"; 1116 + goto bad; 1117 } 1118 1119 + /* Initialize IV */ 1120 + cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); 1121 + if (cc->iv_size) 1122 + /* at least a 64 bit sector number should fit in our buffer */ 1123 + cc->iv_size = max(cc->iv_size, 1124 + (unsigned int)(sizeof(u64) / sizeof(u8))); 1125 + else if (ivmode) { 1126 + DMWARN("Selected cipher does not support IVs"); 1127 + ivmode = NULL; 1128 + } 1129 1130 + /* Choose ivmode, see comments at iv code. */ 1131 if (ivmode == NULL) 1132 cc->iv_gen_ops = NULL; 1133 else if (strcmp(ivmode, "plain") == 0) ··· 1088 else if (strcmp(ivmode, "null") == 0) 1089 cc->iv_gen_ops = &crypt_iv_null_ops; 1090 else { 1091 + ret = -EINVAL; 1092 ti->error = "Invalid IV mode"; 1093 + goto bad; 1094 } 1095 1096 + /* Allocate IV */ 1097 + if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { 1098 + ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); 1099 + if (ret < 0) { 1100 + ti->error = "Error creating IV"; 1101 + goto bad; 1102 } 1103 } 1104 1105 + /* Initialize IV (set keys for ESSIV etc) */ 1106 + if (cc->iv_gen_ops && cc->iv_gen_ops->init) { 1107 + ret = cc->iv_gen_ops->init(cc); 1108 + if (ret < 0) { 1109 + ti->error = "Error initialising IV"; 1110 + goto bad; 1111 + } 1112 + } 1113 + 1114 + ret = 0; 1115 + bad: 1116 + kfree(cipher_api); 1117 + return ret; 1118 + 1119 + bad_mem: 1120 + ti->error = "Cannot allocate cipher strings"; 1121 + return -ENOMEM; 1122 + } 1123 + 1124 + /* 1125 + * Construct an encryption mapping: 1126 + * <cipher> <key> <iv_offset> <dev_path> <start> 1127 + */ 1128 + static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1129 + { 1130 + struct crypt_config *cc; 1131 + unsigned int key_size; 1132 + unsigned long long tmpll; 1133 + int ret; 1134 + 1135 + if (argc != 5) { 1136 + ti->error = "Not enough arguments"; 1137 + return -EINVAL; 1138 + } 1139 + 1140 + key_size = strlen(argv[1]) >> 1; 1141 + 1142 + cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); 1143 + if (!cc) { 1144 + ti->error = "Cannot allocate encryption context"; 1145 + return -ENOMEM; 1146 + } 1147 + 1148 + ti->private = cc; 1149 + ret = crypt_ctr_cipher(ti, argv[0], argv[1]); 1150 + if (ret < 0) 1151 + goto bad; 1152 + 1153 + ret = -ENOMEM; 1154 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); 1155 if (!cc->io_pool) { 1156 ti->error = "Cannot allocate crypt io mempool"; 1157 + goto bad; 1158 } 1159 1160 cc->dmreq_start = sizeof(struct ablkcipher_request); 1161 + cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); 1162 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); 1163 + cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & 1164 ~(crypto_tfm_ctx_alignment() - 1); 1165 1166 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + 1167 sizeof(struct dm_crypt_request) + cc->iv_size); 1168 if (!cc->req_pool) { 1169 ti->error = "Cannot allocate crypt request mempool"; 1170 + goto bad; 1171 } 1172 cc->req = NULL; 1173 1174 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); 1175 if (!cc->page_pool) { 1176 ti->error = "Cannot allocate page mempool"; 1177 + goto bad; 1178 } 1179 1180 cc->bs = bioset_create(MIN_IOS, 0); 1181 if (!cc->bs) { 1182 ti->error = "Cannot allocate crypt bioset"; 1183 + goto bad; 1184 } 1185 1186 + ret = -EINVAL; 1187 if (sscanf(argv[2], "%llu", &tmpll) != 1) { 1188 ti->error = "Invalid iv_offset sector"; 1189 + goto bad; 1190 } 1191 cc->iv_offset = tmpll; 1192 1193 + if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { 1194 + ti->error = "Device lookup failed"; 1195 + goto bad; 1196 + } 1197 + 1198 if (sscanf(argv[4], "%llu", &tmpll) != 1) { 1199 ti->error = "Invalid device sector"; 1200 + goto bad; 1201 } 1202 cc->start = tmpll; 1203 1204 + ret = -ENOMEM; 1205 cc->io_queue = create_singlethread_workqueue("kcryptd_io"); 1206 if (!cc->io_queue) { 1207 ti->error = "Couldn't create kcryptd io queue"; 1208 + goto bad; 1209 } 1210 1211 cc->crypt_queue = create_singlethread_workqueue("kcryptd"); 1212 if (!cc->crypt_queue) { 1213 ti->error = "Couldn't create kcryptd queue"; 1214 + goto bad; 1215 } 1216 1217 ti->num_flush_requests = 1; 1218 return 0; 1219 1220 + bad: 1221 + crypt_dtr(ti); 1222 + return ret; 1223 } 1224 1225 static int crypt_map(struct dm_target *ti, struct bio *bio, ··· 1255 return DM_MAPIO_REMAPPED; 1256 } 1257 1258 + io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); 1259 1260 if (bio_data_dir(io->base_bio) == READ) 1261 kcryptd_queue_io(io); ··· 1268 static int crypt_status(struct dm_target *ti, status_type_t type, 1269 char *result, unsigned int maxlen) 1270 { 1271 + struct crypt_config *cc = ti->private; 1272 unsigned int sz = 0; 1273 1274 switch (type) { ··· 1277 break; 1278 1279 case STATUSTYPE_TABLE: 1280 + if (cc->cipher_mode) 1281 + DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode); 1282 else 1283 + DMEMIT("%s ", cc->cipher); 1284 1285 if (cc->key_size > 0) { 1286 if ((maxlen - sz) < ((cc->key_size << 1) + 1)) ··· 1378 return max_size; 1379 1380 bvm->bi_bdev = cc->dev->bdev; 1381 + bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); 1382 1383 return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 1384 }
+3 -3
drivers/md/dm-delay.c
··· 198 atomic_set(&dc->may_delay, 1); 199 200 ti->num_flush_requests = 1; 201 ti->private = dc; 202 return 0; 203 ··· 282 bio->bi_bdev = dc->dev_write->bdev; 283 if (bio_sectors(bio)) 284 bio->bi_sector = dc->start_write + 285 - (bio->bi_sector - ti->begin); 286 287 return delay_bio(dc, dc->write_delay, bio); 288 } 289 290 bio->bi_bdev = dc->dev_read->bdev; 291 - bio->bi_sector = dc->start_read + 292 - (bio->bi_sector - ti->begin); 293 294 return delay_bio(dc, dc->read_delay, bio); 295 }
··· 198 atomic_set(&dc->may_delay, 1); 199 200 ti->num_flush_requests = 1; 201 + ti->num_discard_requests = 1; 202 ti->private = dc; 203 return 0; 204 ··· 281 bio->bi_bdev = dc->dev_write->bdev; 282 if (bio_sectors(bio)) 283 bio->bi_sector = dc->start_write + 284 + dm_target_offset(ti, bio->bi_sector); 285 286 return delay_bio(dc, dc->write_delay, bio); 287 } 288 289 bio->bi_bdev = dc->dev_read->bdev; 290 + bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); 291 292 return delay_bio(dc, dc->read_delay, bio); 293 }
+3 -1
drivers/md/dm-exception-store.c
··· 173 174 /* Validate the chunk size against the device block size */ 175 if (chunk_size % 176 - (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { 177 *error = "Chunk size is not a multiple of device blocksize"; 178 return -EINVAL; 179 }
··· 173 174 /* Validate the chunk size against the device block size */ 175 if (chunk_size % 176 + (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) || 177 + chunk_size % 178 + (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) { 179 *error = "Chunk size is not a multiple of device blocksize"; 180 return -EINVAL; 181 }
+2 -1
drivers/md/dm-exception-store.h
··· 126 }; 127 128 /* 129 - * Obtain the cow device used by a given snapshot. 130 */ 131 struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); 132 133 /*
··· 126 }; 127 128 /* 129 + * Obtain the origin or cow device used by a given snapshot. 130 */ 131 + struct dm_dev *dm_snap_origin(struct dm_snapshot *snap); 132 struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); 133 134 /*
+106 -103
drivers/md/dm-ioctl.c
··· 249 250 static void dm_hash_remove_all(int keep_open_devices) 251 { 252 - int i, dev_skipped, dev_removed; 253 struct hash_cell *hc; 254 - struct list_head *tmp, *n; 255 256 down_write(&_hash_lock); 257 258 - retry: 259 - dev_skipped = dev_removed = 0; 260 for (i = 0; i < NUM_BUCKETS; i++) { 261 - list_for_each_safe (tmp, n, _name_buckets + i) { 262 - hc = list_entry(tmp, struct hash_cell, name_list); 263 264 - if (keep_open_devices && 265 - dm_lock_for_deletion(hc->md)) { 266 dev_skipped++; 267 continue; 268 } 269 __hash_remove(hc); 270 - dev_removed = 1; 271 } 272 } 273 274 - /* 275 - * Some mapped devices may be using other mapped devices, so if any 276 - * still exist, repeat until we make no further progress. 277 - */ 278 - if (dev_skipped) { 279 - if (dev_removed) 280 - goto retry; 281 - 282 - DMWARN("remove_all left %d open device(s)", dev_skipped); 283 - } 284 - 285 up_write(&_hash_lock); 286 } 287 288 - static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, 289 - const char *new) 290 { 291 char *new_name, *old_name; 292 struct hash_cell *hc; 293 struct dm_table *table; 294 295 /* 296 * duplicate new. 297 */ 298 new_name = kstrdup(new, GFP_KERNEL); 299 if (!new_name) 300 - return -ENOMEM; 301 302 down_write(&_hash_lock); 303 ··· 317 */ 318 hc = __get_name_cell(new); 319 if (hc) { 320 - DMWARN("asked to rename to an already existing name %s -> %s", 321 - old, new); 322 dm_put(hc->md); 323 up_write(&_hash_lock); 324 kfree(new_name); 325 - return -EBUSY; 326 } 327 328 /* 329 * Is there such a device as 'old' ? 330 */ 331 - hc = __get_name_cell(old); 332 if (!hc) { 333 - DMWARN("asked to rename a non existent device %s -> %s", 334 - old, new); 335 up_write(&_hash_lock); 336 kfree(new_name); 337 - return -ENXIO; 338 } 339 340 /* ··· 356 dm_table_put(table); 357 } 358 359 - if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie)) 360 - *flags |= DM_UEVENT_GENERATED_FLAG; 361 362 - dm_put(hc->md); 363 up_write(&_hash_lock); 364 kfree(old_name); 365 - return 0; 366 } 367 368 /*----------------------------------------------------------------- ··· 585 * Fills in a dm_ioctl structure, ready for sending back to 586 * userland. 587 */ 588 - static int __dev_status(struct mapped_device *md, struct dm_ioctl *param) 589 { 590 struct gendisk *disk = dm_disk(md); 591 struct dm_table *table; ··· 629 dm_table_put(table); 630 } 631 } 632 - 633 - return 0; 634 } 635 636 static int dev_create(struct dm_ioctl *param, size_t param_size) ··· 650 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); 651 if (r) { 652 dm_put(md); 653 return r; 654 } 655 656 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 657 658 - r = __dev_status(md, param); 659 dm_put(md); 660 661 - return r; 662 } 663 664 /* ··· 754 param->flags |= DM_UEVENT_GENERATED_FLAG; 755 756 dm_put(md); 757 return 0; 758 } 759 ··· 775 { 776 int r; 777 char *new_name = (char *) param + param->data_start; 778 779 if (new_name < param->data || 780 invalid_str(new_name, (void *) param + param_size) || ··· 788 if (r) 789 return r; 790 791 - param->data_size = 0; 792 793 - return dm_hash_rename(param->event_nr, &param->flags, param->name, 794 - new_name); 795 } 796 797 static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) ··· 836 geometry.start = indata[3]; 837 838 r = dm_set_geometry(md, &geometry); 839 - if (!r) 840 - r = __dev_status(md, param); 841 842 param->data_size = 0; 843 ··· 859 if (param->flags & DM_NOFLUSH_FLAG) 860 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 861 862 - if (!dm_suspended_md(md)) 863 r = dm_suspend(md, suspend_flags); 864 865 - if (!r) 866 - r = __dev_status(md, param); 867 868 dm_put(md); 869 return r; 870 } 871 ··· 931 dm_table_destroy(old_map); 932 933 if (!r) 934 - r = __dev_status(md, param); 935 936 dm_put(md); 937 return r; ··· 955 */ 956 static int dev_status(struct dm_ioctl *param, size_t param_size) 957 { 958 - int r; 959 struct mapped_device *md; 960 961 md = find_device(param); 962 if (!md) 963 return -ENXIO; 964 965 - r = __dev_status(md, param); 966 dm_put(md); 967 - return r; 968 } 969 970 /* ··· 1039 */ 1040 static int dev_wait(struct dm_ioctl *param, size_t param_size) 1041 { 1042 - int r; 1043 struct mapped_device *md; 1044 struct dm_table *table; 1045 ··· 1060 * changed to trigger the event, so we may as well tell 1061 * him and save an ioctl. 1062 */ 1063 - r = __dev_status(md, param); 1064 - if (r) 1065 - goto out; 1066 1067 table = dm_get_live_or_inactive_table(md, param); 1068 if (table) { ··· 1068 dm_table_put(table); 1069 } 1070 1071 - out: 1072 dm_put(md); 1073 return r; 1074 } 1075 ··· 1131 next = spec->next; 1132 } 1133 1134 - r = dm_table_set_type(table); 1135 - if (r) { 1136 - DMWARN("unable to set table type"); 1137 - return r; 1138 - } 1139 - 1140 return dm_table_complete(table); 1141 - } 1142 - 1143 - static int table_prealloc_integrity(struct dm_table *t, 1144 - struct mapped_device *md) 1145 - { 1146 - struct list_head *devices = dm_table_get_devices(t); 1147 - struct dm_dev_internal *dd; 1148 - 1149 - list_for_each_entry(dd, devices, list) 1150 - if (bdev_get_integrity(dd->dm_dev.bdev)) 1151 - return blk_integrity_register(dm_disk(md), NULL); 1152 - 1153 - return 0; 1154 } 1155 1156 static int table_load(struct dm_ioctl *param, size_t param_size) ··· 1155 goto out; 1156 } 1157 1158 - r = table_prealloc_integrity(t, md); 1159 - if (r) { 1160 - DMERR("%s: could not register integrity profile.", 1161 - dm_device_name(md)); 1162 dm_table_destroy(t); 1163 goto out; 1164 } 1165 1166 - r = dm_table_alloc_md_mempools(t); 1167 if (r) { 1168 - DMWARN("unable to allocate mempools for this table"); 1169 dm_table_destroy(t); 1170 goto out; 1171 } 1172 1173 down_write(&_hash_lock); 1174 hc = dm_get_mdptr(md); 1175 if (!hc || hc->md != md) { ··· 1195 up_write(&_hash_lock); 1196 1197 param->flags |= DM_INACTIVE_PRESENT_FLAG; 1198 - r = __dev_status(md, param); 1199 1200 out: 1201 dm_put(md); ··· 1205 1206 static int table_clear(struct dm_ioctl *param, size_t param_size) 1207 { 1208 - int r; 1209 struct hash_cell *hc; 1210 struct mapped_device *md; 1211 ··· 1224 1225 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1226 1227 - r = __dev_status(hc->md, param); 1228 md = hc->md; 1229 up_write(&_hash_lock); 1230 dm_put(md); 1231 - return r; 1232 } 1233 1234 /* ··· 1274 1275 static int table_deps(struct dm_ioctl *param, size_t param_size) 1276 { 1277 - int r = 0; 1278 struct mapped_device *md; 1279 struct dm_table *table; 1280 ··· 1281 if (!md) 1282 return -ENXIO; 1283 1284 - r = __dev_status(md, param); 1285 - if (r) 1286 - goto out; 1287 1288 table = dm_get_live_or_inactive_table(md, param); 1289 if (table) { ··· 1289 dm_table_put(table); 1290 } 1291 1292 - out: 1293 dm_put(md); 1294 - return r; 1295 } 1296 1297 /* ··· 1300 */ 1301 static int table_status(struct dm_ioctl *param, size_t param_size) 1302 { 1303 - int r; 1304 struct mapped_device *md; 1305 struct dm_table *table; 1306 ··· 1307 if (!md) 1308 return -ENXIO; 1309 1310 - r = __dev_status(md, param); 1311 - if (r) 1312 - goto out; 1313 1314 table = dm_get_live_or_inactive_table(md, param); 1315 if (table) { ··· 1315 dm_table_put(table); 1316 } 1317 1318 - out: 1319 dm_put(md); 1320 - return r; 1321 } 1322 1323 /* ··· 1335 md = find_device(param); 1336 if (!md) 1337 return -ENXIO; 1338 - 1339 - r = __dev_status(md, param); 1340 - if (r) 1341 - goto out; 1342 1343 if (tmsg < (struct dm_target_msg *) param->data || 1344 invalid_str(tmsg->message, (void *) param + param_size)) { ··· 1592 #endif 1593 1594 static const struct file_operations _ctl_fops = { 1595 .unlocked_ioctl = dm_ctl_ioctl, 1596 .compat_ioctl = dm_compat_ctl_ioctl, 1597 .owner = THIS_MODULE, 1598 }; 1599 1600 static struct miscdevice _dm_misc = { 1601 - .minor = MISC_DYNAMIC_MINOR, 1602 .name = DM_NAME, 1603 - .nodename = "mapper/control", 1604 .fops = &_ctl_fops 1605 }; 1606 1607 /* 1608 * Create misc character device and link to DM_DIR/control.
··· 249 250 static void dm_hash_remove_all(int keep_open_devices) 251 { 252 + int i, dev_skipped; 253 struct hash_cell *hc; 254 + struct mapped_device *md; 255 + 256 + retry: 257 + dev_skipped = 0; 258 259 down_write(&_hash_lock); 260 261 for (i = 0; i < NUM_BUCKETS; i++) { 262 + list_for_each_entry(hc, _name_buckets + i, name_list) { 263 + md = hc->md; 264 + dm_get(md); 265 266 + if (keep_open_devices && dm_lock_for_deletion(md)) { 267 + dm_put(md); 268 dev_skipped++; 269 continue; 270 } 271 + 272 __hash_remove(hc); 273 + 274 + up_write(&_hash_lock); 275 + 276 + dm_put(md); 277 + if (likely(keep_open_devices)) 278 + dm_destroy(md); 279 + else 280 + dm_destroy_immediate(md); 281 + 282 + /* 283 + * Some mapped devices may be using other mapped 284 + * devices, so repeat until we make no further 285 + * progress. If a new mapped device is created 286 + * here it will also get removed. 287 + */ 288 + goto retry; 289 } 290 } 291 292 up_write(&_hash_lock); 293 + 294 + if (dev_skipped) 295 + DMWARN("remove_all left %d open device(s)", dev_skipped); 296 } 297 298 + static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, 299 + const char *new) 300 { 301 char *new_name, *old_name; 302 struct hash_cell *hc; 303 struct dm_table *table; 304 + struct mapped_device *md; 305 306 /* 307 * duplicate new. 308 */ 309 new_name = kstrdup(new, GFP_KERNEL); 310 if (!new_name) 311 + return ERR_PTR(-ENOMEM); 312 313 down_write(&_hash_lock); 314 ··· 306 */ 307 hc = __get_name_cell(new); 308 if (hc) { 309 + DMWARN("asked to rename to an already-existing name %s -> %s", 310 + param->name, new); 311 dm_put(hc->md); 312 up_write(&_hash_lock); 313 kfree(new_name); 314 + return ERR_PTR(-EBUSY); 315 } 316 317 /* 318 * Is there such a device as 'old' ? 319 */ 320 + hc = __get_name_cell(param->name); 321 if (!hc) { 322 + DMWARN("asked to rename a non-existent device %s -> %s", 323 + param->name, new); 324 up_write(&_hash_lock); 325 kfree(new_name); 326 + return ERR_PTR(-ENXIO); 327 } 328 329 /* ··· 345 dm_table_put(table); 346 } 347 348 + if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) 349 + param->flags |= DM_UEVENT_GENERATED_FLAG; 350 351 + md = hc->md; 352 up_write(&_hash_lock); 353 kfree(old_name); 354 + 355 + return md; 356 } 357 358 /*----------------------------------------------------------------- ··· 573 * Fills in a dm_ioctl structure, ready for sending back to 574 * userland. 575 */ 576 + static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) 577 { 578 struct gendisk *disk = dm_disk(md); 579 struct dm_table *table; ··· 617 dm_table_put(table); 618 } 619 } 620 } 621 622 static int dev_create(struct dm_ioctl *param, size_t param_size) ··· 640 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); 641 if (r) { 642 dm_put(md); 643 + dm_destroy(md); 644 return r; 645 } 646 647 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 648 649 + __dev_status(md, param); 650 + 651 dm_put(md); 652 653 + return 0; 654 } 655 656 /* ··· 742 param->flags |= DM_UEVENT_GENERATED_FLAG; 743 744 dm_put(md); 745 + dm_destroy(md); 746 return 0; 747 } 748 ··· 762 { 763 int r; 764 char *new_name = (char *) param + param->data_start; 765 + struct mapped_device *md; 766 767 if (new_name < param->data || 768 invalid_str(new_name, (void *) param + param_size) || ··· 774 if (r) 775 return r; 776 777 + md = dm_hash_rename(param, new_name); 778 + if (IS_ERR(md)) 779 + return PTR_ERR(md); 780 781 + __dev_status(md, param); 782 + dm_put(md); 783 + 784 + return 0; 785 } 786 787 static int dev_set_geometry(struct dm_ioctl *param, size_t param_size) ··· 818 geometry.start = indata[3]; 819 820 r = dm_set_geometry(md, &geometry); 821 822 param->data_size = 0; 823 ··· 843 if (param->flags & DM_NOFLUSH_FLAG) 844 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; 845 846 + if (!dm_suspended_md(md)) { 847 r = dm_suspend(md, suspend_flags); 848 + if (r) 849 + goto out; 850 + } 851 852 + __dev_status(md, param); 853 854 + out: 855 dm_put(md); 856 + 857 return r; 858 } 859 ··· 911 dm_table_destroy(old_map); 912 913 if (!r) 914 + __dev_status(md, param); 915 916 dm_put(md); 917 return r; ··· 935 */ 936 static int dev_status(struct dm_ioctl *param, size_t param_size) 937 { 938 struct mapped_device *md; 939 940 md = find_device(param); 941 if (!md) 942 return -ENXIO; 943 944 + __dev_status(md, param); 945 dm_put(md); 946 + 947 + return 0; 948 } 949 950 /* ··· 1019 */ 1020 static int dev_wait(struct dm_ioctl *param, size_t param_size) 1021 { 1022 + int r = 0; 1023 struct mapped_device *md; 1024 struct dm_table *table; 1025 ··· 1040 * changed to trigger the event, so we may as well tell 1041 * him and save an ioctl. 1042 */ 1043 + __dev_status(md, param); 1044 1045 table = dm_get_live_or_inactive_table(md, param); 1046 if (table) { ··· 1050 dm_table_put(table); 1051 } 1052 1053 + out: 1054 dm_put(md); 1055 + 1056 return r; 1057 } 1058 ··· 1112 next = spec->next; 1113 } 1114 1115 return dm_table_complete(table); 1116 } 1117 1118 static int table_load(struct dm_ioctl *param, size_t param_size) ··· 1155 goto out; 1156 } 1157 1158 + /* Protect md->type and md->queue against concurrent table loads. */ 1159 + dm_lock_md_type(md); 1160 + if (dm_get_md_type(md) == DM_TYPE_NONE) 1161 + /* Initial table load: acquire type of table. */ 1162 + dm_set_md_type(md, dm_table_get_type(t)); 1163 + else if (dm_get_md_type(md) != dm_table_get_type(t)) { 1164 + DMWARN("can't change device type after initial table load."); 1165 dm_table_destroy(t); 1166 + dm_unlock_md_type(md); 1167 + r = -EINVAL; 1168 goto out; 1169 } 1170 1171 + /* setup md->queue to reflect md's type (may block) */ 1172 + r = dm_setup_md_queue(md); 1173 if (r) { 1174 + DMWARN("unable to set up device queue for new table."); 1175 dm_table_destroy(t); 1176 + dm_unlock_md_type(md); 1177 goto out; 1178 } 1179 + dm_unlock_md_type(md); 1180 1181 + /* stage inactive table */ 1182 down_write(&_hash_lock); 1183 hc = dm_get_mdptr(md); 1184 if (!hc || hc->md != md) { ··· 1186 up_write(&_hash_lock); 1187 1188 param->flags |= DM_INACTIVE_PRESENT_FLAG; 1189 + __dev_status(md, param); 1190 1191 out: 1192 dm_put(md); ··· 1196 1197 static int table_clear(struct dm_ioctl *param, size_t param_size) 1198 { 1199 struct hash_cell *hc; 1200 struct mapped_device *md; 1201 ··· 1216 1217 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; 1218 1219 + __dev_status(hc->md, param); 1220 md = hc->md; 1221 up_write(&_hash_lock); 1222 dm_put(md); 1223 + 1224 + return 0; 1225 } 1226 1227 /* ··· 1265 1266 static int table_deps(struct dm_ioctl *param, size_t param_size) 1267 { 1268 struct mapped_device *md; 1269 struct dm_table *table; 1270 ··· 1273 if (!md) 1274 return -ENXIO; 1275 1276 + __dev_status(md, param); 1277 1278 table = dm_get_live_or_inactive_table(md, param); 1279 if (table) { ··· 1283 dm_table_put(table); 1284 } 1285 1286 dm_put(md); 1287 + 1288 + return 0; 1289 } 1290 1291 /* ··· 1294 */ 1295 static int table_status(struct dm_ioctl *param, size_t param_size) 1296 { 1297 struct mapped_device *md; 1298 struct dm_table *table; 1299 ··· 1302 if (!md) 1303 return -ENXIO; 1304 1305 + __dev_status(md, param); 1306 1307 table = dm_get_live_or_inactive_table(md, param); 1308 if (table) { ··· 1312 dm_table_put(table); 1313 } 1314 1315 dm_put(md); 1316 + 1317 + return 0; 1318 } 1319 1320 /* ··· 1332 md = find_device(param); 1333 if (!md) 1334 return -ENXIO; 1335 1336 if (tmsg < (struct dm_target_msg *) param->data || 1337 invalid_str(tmsg->message, (void *) param + param_size)) { ··· 1593 #endif 1594 1595 static const struct file_operations _ctl_fops = { 1596 + .open = nonseekable_open, 1597 .unlocked_ioctl = dm_ctl_ioctl, 1598 .compat_ioctl = dm_compat_ctl_ioctl, 1599 .owner = THIS_MODULE, 1600 }; 1601 1602 static struct miscdevice _dm_misc = { 1603 + .minor = MAPPER_CTRL_MINOR, 1604 .name = DM_NAME, 1605 + .nodename = DM_DIR "/" DM_CONTROL_NODE, 1606 .fops = &_ctl_fops 1607 }; 1608 + 1609 + MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); 1610 + MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); 1611 1612 /* 1613 * Create misc character device and link to DM_DIR/control.
+2 -1
drivers/md/dm-linear.c
··· 53 } 54 55 ti->num_flush_requests = 1; 56 ti->private = lc; 57 return 0; 58 ··· 74 { 75 struct linear_c *lc = ti->private; 76 77 - return lc->start + (bi_sector - ti->begin); 78 } 79 80 static void linear_map_bio(struct dm_target *ti, struct bio *bio)
··· 53 } 54 55 ti->num_flush_requests = 1; 56 + ti->num_discard_requests = 1; 57 ti->private = lc; 58 return 0; 59 ··· 73 { 74 struct linear_c *lc = ti->private; 75 76 + return lc->start + dm_target_offset(ti, bi_sector); 77 } 78 79 static void linear_map_bio(struct dm_target *ti, struct bio *bio)
+11
drivers/md/dm-mpath.c
··· 706 707 if (as->argc < nr_params) { 708 ti->error = "not enough path parameters"; 709 goto bad; 710 } 711 ··· 893 } 894 895 ti->num_flush_requests = 1; 896 897 return 0; 898 ··· 1271 return 0; /* I/O complete */ 1272 1273 if (error == -EOPNOTSUPP) 1274 return error; 1275 1276 if (mpio->pgpath)
··· 706 707 if (as->argc < nr_params) { 708 ti->error = "not enough path parameters"; 709 + r = -EINVAL; 710 goto bad; 711 } 712 ··· 892 } 893 894 ti->num_flush_requests = 1; 895 + ti->num_discard_requests = 1; 896 897 return 0; 898 ··· 1269 return 0; /* I/O complete */ 1270 1271 if (error == -EOPNOTSUPP) 1272 + return error; 1273 + 1274 + if (clone->cmd_flags & REQ_DISCARD) 1275 + /* 1276 + * Pass all discard request failures up. 1277 + * FIXME: only fail_path if the discard failed due to a 1278 + * transport problem. This requires precise understanding 1279 + * of the underlying failure (e.g. the SCSI sense). 1280 + */ 1281 return error; 1282 1283 if (mpio->pgpath)
+1 -1
drivers/md/dm-raid1.c
··· 445 { 446 if (unlikely(!bio->bi_size)) 447 return 0; 448 - return m->offset + (bio->bi_sector - m->ms->ti->begin); 449 } 450 451 static void map_bio(struct mirror *m, struct bio *bio)
··· 445 { 446 if (unlikely(!bio->bi_size)) 447 return 0; 448 + return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 449 } 450 451 static void map_bio(struct mirror *m, struct bio *bio)
+3 -3
drivers/md/dm-snap-persistent.c
··· 266 */ 267 static chunk_t area_location(struct pstore *ps, chunk_t area) 268 { 269 - return 1 + ((ps->exceptions_per_area + 1) * area); 270 } 271 272 /* ··· 780 * ps->current_area does not get reduced by prepare_merge() until 781 * after commit_merge() has removed the nr_merged previous exceptions. 782 */ 783 - ps->next_free = (area_location(ps, ps->current_area) - 1) + 784 - (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS; 785 786 return 0; 787 }
··· 266 */ 267 static chunk_t area_location(struct pstore *ps, chunk_t area) 268 { 269 + return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); 270 } 271 272 /* ··· 780 * ps->current_area does not get reduced by prepare_merge() until 781 * after commit_merge() has removed the nr_merged previous exceptions. 782 */ 783 + ps->next_free = area_location(ps, ps->current_area) + 784 + ps->current_committed + 1; 785 786 return 0; 787 }
+45 -17
drivers/md/dm-snap.c
··· 148 #define RUNNING_MERGE 0 149 #define SHUTDOWN_MERGE 1 150 151 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 152 { 153 return s->cow; ··· 1071 origin_mode = FMODE_WRITE; 1072 } 1073 1074 - origin_path = argv[0]; 1075 - argv++; 1076 - argc--; 1077 - 1078 s = kmalloc(sizeof(*s), GFP_KERNEL); 1079 if (!s) { 1080 ti->error = "Cannot allocate snapshot context private " 1081 "structure"; 1082 r = -ENOMEM; 1083 goto bad; 1084 } 1085 1086 cow_path = argv[0]; ··· 1108 1109 argv += args_used; 1110 argc -= args_used; 1111 - 1112 - r = dm_get_device(ti, origin_path, origin_mode, &s->origin); 1113 - if (r) { 1114 - ti->error = "Cannot get origin device"; 1115 - goto bad_origin; 1116 - } 1117 1118 s->ti = ti; 1119 s->valid = 1; ··· 1218 dm_exception_table_exit(&s->complete, exception_cache); 1219 1220 bad_hash_tables: 1221 - dm_put_device(ti, s->origin); 1222 - 1223 - bad_origin: 1224 dm_exception_store_destroy(s->store); 1225 1226 bad_store: 1227 dm_put_device(ti, s->cow); 1228 1229 bad_cow: 1230 kfree(s); 1231 1232 bad: ··· 1320 1321 mempool_destroy(s->pending_pool); 1322 1323 - dm_put_device(ti, s->origin); 1324 - 1325 dm_exception_store_destroy(s->store); 1326 1327 dm_put_device(ti, s->cow); 1328 1329 kfree(s); 1330 } ··· 1692 chunk_t chunk; 1693 1694 if (unlikely(bio_empty_barrier(bio))) { 1695 - if (!map_context->flush_request) 1696 bio->bi_bdev = s->origin->bdev; 1697 else 1698 bio->bi_bdev = s->cow->bdev; ··· 1905 iterate_devices_callout_fn fn, void *data) 1906 { 1907 struct dm_snapshot *snap = ti->private; 1908 1909 - return fn(ti, snap->origin, 0, ti->len, data); 1910 } 1911 1912 ··· 2171 return 0; 2172 } 2173 2174 static int origin_iterate_devices(struct dm_target *ti, 2175 iterate_devices_callout_fn fn, void *data) 2176 { ··· 2203 .map = origin_map, 2204 .resume = origin_resume, 2205 .status = origin_status, 2206 .iterate_devices = origin_iterate_devices, 2207 }; 2208
··· 148 #define RUNNING_MERGE 0 149 #define SHUTDOWN_MERGE 1 150 151 + struct dm_dev *dm_snap_origin(struct dm_snapshot *s) 152 + { 153 + return s->origin; 154 + } 155 + EXPORT_SYMBOL(dm_snap_origin); 156 + 157 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) 158 { 159 return s->cow; ··· 1065 origin_mode = FMODE_WRITE; 1066 } 1067 1068 s = kmalloc(sizeof(*s), GFP_KERNEL); 1069 if (!s) { 1070 ti->error = "Cannot allocate snapshot context private " 1071 "structure"; 1072 r = -ENOMEM; 1073 goto bad; 1074 + } 1075 + 1076 + origin_path = argv[0]; 1077 + argv++; 1078 + argc--; 1079 + 1080 + r = dm_get_device(ti, origin_path, origin_mode, &s->origin); 1081 + if (r) { 1082 + ti->error = "Cannot get origin device"; 1083 + goto bad_origin; 1084 } 1085 1086 cow_path = argv[0]; ··· 1096 1097 argv += args_used; 1098 argc -= args_used; 1099 1100 s->ti = ti; 1101 s->valid = 1; ··· 1212 dm_exception_table_exit(&s->complete, exception_cache); 1213 1214 bad_hash_tables: 1215 dm_exception_store_destroy(s->store); 1216 1217 bad_store: 1218 dm_put_device(ti, s->cow); 1219 1220 bad_cow: 1221 + dm_put_device(ti, s->origin); 1222 + 1223 + bad_origin: 1224 kfree(s); 1225 1226 bad: ··· 1314 1315 mempool_destroy(s->pending_pool); 1316 1317 dm_exception_store_destroy(s->store); 1318 1319 dm_put_device(ti, s->cow); 1320 + 1321 + dm_put_device(ti, s->origin); 1322 1323 kfree(s); 1324 } ··· 1686 chunk_t chunk; 1687 1688 if (unlikely(bio_empty_barrier(bio))) { 1689 + if (!map_context->target_request_nr) 1690 bio->bi_bdev = s->origin->bdev; 1691 else 1692 bio->bi_bdev = s->cow->bdev; ··· 1899 iterate_devices_callout_fn fn, void *data) 1900 { 1901 struct dm_snapshot *snap = ti->private; 1902 + int r; 1903 1904 + r = fn(ti, snap->origin, 0, ti->len, data); 1905 + 1906 + if (!r) 1907 + r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); 1908 + 1909 + return r; 1910 } 1911 1912 ··· 2159 return 0; 2160 } 2161 2162 + static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, 2163 + struct bio_vec *biovec, int max_size) 2164 + { 2165 + struct dm_dev *dev = ti->private; 2166 + struct request_queue *q = bdev_get_queue(dev->bdev); 2167 + 2168 + if (!q->merge_bvec_fn) 2169 + return max_size; 2170 + 2171 + bvm->bi_bdev = dev->bdev; 2172 + bvm->bi_sector = bvm->bi_sector; 2173 + 2174 + return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); 2175 + } 2176 + 2177 static int origin_iterate_devices(struct dm_target *ti, 2178 iterate_devices_callout_fn fn, void *data) 2179 { ··· 2176 .map = origin_map, 2177 .resume = origin_resume, 2178 .status = origin_status, 2179 + .merge = origin_merge, 2180 .iterate_devices = origin_iterate_devices, 2181 }; 2182
+74 -13
drivers/md/dm-stripe.c
··· 25 26 struct stripe_c { 27 uint32_t stripes; 28 29 /* The size of this target / num. stripes */ 30 sector_t stripe_width; ··· 164 165 /* Set pointer to dm target; used in trigger_event */ 166 sc->ti = ti; 167 - 168 sc->stripes = stripes; 169 sc->stripe_width = width; 170 ti->split_io = chunk_size; 171 ti->num_flush_requests = stripes; 172 173 sc->chunk_mask = ((sector_t) chunk_size) - 1; 174 - for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++) 175 - chunk_size >>= 1; 176 - sc->chunk_shift--; 177 178 /* 179 * Get the stripe destinations. ··· 215 kfree(sc); 216 } 217 218 static int stripe_map(struct dm_target *ti, struct bio *bio, 219 union map_info *map_context) 220 { 221 - struct stripe_c *sc = (struct stripe_c *) ti->private; 222 - sector_t offset, chunk; 223 uint32_t stripe; 224 225 if (unlikely(bio_empty_barrier(bio))) { 226 - BUG_ON(map_context->flush_request >= sc->stripes); 227 - bio->bi_bdev = sc->stripe[map_context->flush_request].dev->bdev; 228 return DM_MAPIO_REMAPPED; 229 } 230 231 - offset = bio->bi_sector - ti->begin; 232 - chunk = offset >> sc->chunk_shift; 233 - stripe = sector_div(chunk, sc->stripes); 234 235 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 236 - bio->bi_sector = sc->stripe[stripe].physical_start + 237 - (chunk << sc->chunk_shift) + (offset & sc->chunk_mask); 238 return DM_MAPIO_REMAPPED; 239 } 240
··· 25 26 struct stripe_c { 27 uint32_t stripes; 28 + int stripes_shift; 29 + sector_t stripes_mask; 30 31 /* The size of this target / num. stripes */ 32 sector_t stripe_width; ··· 162 163 /* Set pointer to dm target; used in trigger_event */ 164 sc->ti = ti; 165 sc->stripes = stripes; 166 sc->stripe_width = width; 167 + 168 + if (stripes & (stripes - 1)) 169 + sc->stripes_shift = -1; 170 + else { 171 + sc->stripes_shift = ffs(stripes) - 1; 172 + sc->stripes_mask = ((sector_t) stripes) - 1; 173 + } 174 + 175 ti->split_io = chunk_size; 176 ti->num_flush_requests = stripes; 177 + ti->num_discard_requests = stripes; 178 179 + sc->chunk_shift = ffs(chunk_size) - 1; 180 sc->chunk_mask = ((sector_t) chunk_size) - 1; 181 182 /* 183 * Get the stripe destinations. ··· 207 kfree(sc); 208 } 209 210 + static void stripe_map_sector(struct stripe_c *sc, sector_t sector, 211 + uint32_t *stripe, sector_t *result) 212 + { 213 + sector_t offset = dm_target_offset(sc->ti, sector); 214 + sector_t chunk = offset >> sc->chunk_shift; 215 + 216 + if (sc->stripes_shift < 0) 217 + *stripe = sector_div(chunk, sc->stripes); 218 + else { 219 + *stripe = chunk & sc->stripes_mask; 220 + chunk >>= sc->stripes_shift; 221 + } 222 + 223 + *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask); 224 + } 225 + 226 + static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, 227 + uint32_t target_stripe, sector_t *result) 228 + { 229 + uint32_t stripe; 230 + 231 + stripe_map_sector(sc, sector, &stripe, result); 232 + if (stripe == target_stripe) 233 + return; 234 + *result &= ~sc->chunk_mask; /* round down */ 235 + if (target_stripe < stripe) 236 + *result += sc->chunk_mask + 1; /* next chunk */ 237 + } 238 + 239 + static int stripe_map_discard(struct stripe_c *sc, struct bio *bio, 240 + uint32_t target_stripe) 241 + { 242 + sector_t begin, end; 243 + 244 + stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 245 + stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio), 246 + target_stripe, &end); 247 + if (begin < end) { 248 + bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 249 + bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; 250 + bio->bi_size = to_bytes(end - begin); 251 + return DM_MAPIO_REMAPPED; 252 + } else { 253 + /* The range doesn't map to the target stripe */ 254 + bio_endio(bio, 0); 255 + return DM_MAPIO_SUBMITTED; 256 + } 257 + } 258 + 259 static int stripe_map(struct dm_target *ti, struct bio *bio, 260 union map_info *map_context) 261 { 262 + struct stripe_c *sc = ti->private; 263 uint32_t stripe; 264 + unsigned target_request_nr; 265 266 if (unlikely(bio_empty_barrier(bio))) { 267 + target_request_nr = map_context->target_request_nr; 268 + BUG_ON(target_request_nr >= sc->stripes); 269 + bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; 270 return DM_MAPIO_REMAPPED; 271 } 272 + if (unlikely(bio->bi_rw & REQ_DISCARD)) { 273 + target_request_nr = map_context->target_request_nr; 274 + BUG_ON(target_request_nr >= sc->stripes); 275 + return stripe_map_discard(sc, bio, target_request_nr); 276 + } 277 278 + stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); 279 280 + bio->bi_sector += sc->stripe[stripe].physical_start; 281 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 282 + 283 return DM_MAPIO_REMAPPED; 284 } 285
+96 -3
drivers/md/dm-table.c
··· 54 sector_t *highs; 55 struct dm_target *targets; 56 57 /* 58 * Indicates the rw permissions for the new logical 59 * device. This should be a combination of FMODE_READ ··· 205 206 INIT_LIST_HEAD(&t->devices); 207 atomic_set(&t->holders, 0); 208 209 if (!num_targets) 210 num_targets = KEYS_PER_NODE; ··· 248 msleep(1); 249 smp_mb(); 250 251 - /* free the indexes (see dm_table_complete) */ 252 if (t->depth >= 2) 253 vfree(t->index[t->depth - 2]); 254 ··· 773 774 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 775 776 return 0; 777 778 bad: ··· 784 return r; 785 } 786 787 - int dm_table_set_type(struct dm_table *t) 788 { 789 unsigned i; 790 unsigned bio_based = 0, request_based = 0; ··· 906 /* 907 * Builds the btree to index the map. 908 */ 909 - int dm_table_complete(struct dm_table *t) 910 { 911 int r = 0; 912 unsigned int leaf_nodes; ··· 921 922 if (t->depth >= 2) 923 r = setup_indexes(t); 924 925 return r; 926 } ··· 1141 else 1142 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1143 1144 dm_table_set_integrity(t); 1145 1146 /* ··· 1290 struct mapped_device *dm_table_get_md(struct dm_table *t) 1291 { 1292 return t->md; 1293 } 1294 1295 EXPORT_SYMBOL(dm_vcalloc);
··· 54 sector_t *highs; 55 struct dm_target *targets; 56 57 + unsigned discards_supported:1; 58 + 59 /* 60 * Indicates the rw permissions for the new logical 61 * device. This should be a combination of FMODE_READ ··· 203 204 INIT_LIST_HEAD(&t->devices); 205 atomic_set(&t->holders, 0); 206 + t->discards_supported = 1; 207 208 if (!num_targets) 209 num_targets = KEYS_PER_NODE; ··· 245 msleep(1); 246 smp_mb(); 247 248 + /* free the indexes */ 249 if (t->depth >= 2) 250 vfree(t->index[t->depth - 2]); 251 ··· 770 771 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; 772 773 + if (!tgt->num_discard_requests) 774 + t->discards_supported = 0; 775 + 776 return 0; 777 778 bad: ··· 778 return r; 779 } 780 781 + static int dm_table_set_type(struct dm_table *t) 782 { 783 unsigned i; 784 unsigned bio_based = 0, request_based = 0; ··· 900 /* 901 * Builds the btree to index the map. 902 */ 903 + static int dm_table_build_index(struct dm_table *t) 904 { 905 int r = 0; 906 unsigned int leaf_nodes; ··· 915 916 if (t->depth >= 2) 917 r = setup_indexes(t); 918 + 919 + return r; 920 + } 921 + 922 + /* 923 + * Register the mapped device for blk_integrity support if 924 + * the underlying devices support it. 925 + */ 926 + static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md) 927 + { 928 + struct list_head *devices = dm_table_get_devices(t); 929 + struct dm_dev_internal *dd; 930 + 931 + list_for_each_entry(dd, devices, list) 932 + if (bdev_get_integrity(dd->dm_dev.bdev)) 933 + return blk_integrity_register(dm_disk(md), NULL); 934 + 935 + return 0; 936 + } 937 + 938 + /* 939 + * Prepares the table for use by building the indices, 940 + * setting the type, and allocating mempools. 941 + */ 942 + int dm_table_complete(struct dm_table *t) 943 + { 944 + int r; 945 + 946 + r = dm_table_set_type(t); 947 + if (r) { 948 + DMERR("unable to set table type"); 949 + return r; 950 + } 951 + 952 + r = dm_table_build_index(t); 953 + if (r) { 954 + DMERR("unable to build btrees"); 955 + return r; 956 + } 957 + 958 + r = dm_table_prealloc_integrity(t, t->md); 959 + if (r) { 960 + DMERR("could not register integrity profile."); 961 + return r; 962 + } 963 + 964 + r = dm_table_alloc_md_mempools(t); 965 + if (r) 966 + DMERR("unable to allocate mempools"); 967 968 return r; 969 } ··· 1086 else 1087 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); 1088 1089 + if (!dm_table_supports_discards(t)) 1090 + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); 1091 + else 1092 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 1093 + 1094 dm_table_set_integrity(t); 1095 1096 /* ··· 1230 struct mapped_device *dm_table_get_md(struct dm_table *t) 1231 { 1232 return t->md; 1233 + } 1234 + 1235 + static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, 1236 + sector_t start, sector_t len, void *data) 1237 + { 1238 + struct request_queue *q = bdev_get_queue(dev->bdev); 1239 + 1240 + return q && blk_queue_discard(q); 1241 + } 1242 + 1243 + bool dm_table_supports_discards(struct dm_table *t) 1244 + { 1245 + struct dm_target *ti; 1246 + unsigned i = 0; 1247 + 1248 + if (!t->discards_supported) 1249 + return 0; 1250 + 1251 + /* 1252 + * Ensure that at least one underlying device supports discards. 1253 + * t->devices includes internal dm devices such as mirror logs 1254 + * so we need to use iterate_devices here, which targets 1255 + * supporting discard must provide. 1256 + */ 1257 + while (i < dm_table_get_num_targets(t)) { 1258 + ti = dm_table_get_target(t, i++); 1259 + 1260 + if (ti->type->iterate_devices && 1261 + ti->type->iterate_devices(ti, device_discard_capable, NULL)) 1262 + return 1; 1263 + } 1264 + 1265 + return 0; 1266 } 1267 1268 EXPORT_SYMBOL(dm_vcalloc);
+5
drivers/md/dm-target.c
··· 113 */ 114 static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) 115 { 116 return 0; 117 } 118
··· 113 */ 114 static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args) 115 { 116 + /* 117 + * Return error for discards instead of -EOPNOTSUPP 118 + */ 119 + tt->num_discard_requests = 1; 120 + 121 return 0; 122 } 123
+5
drivers/md/dm-zero.c
··· 22 return -EINVAL; 23 } 24 25 return 0; 26 } 27
··· 22 return -EINVAL; 23 } 24 25 + /* 26 + * Silently drop discards, avoiding -EOPNOTSUPP. 27 + */ 28 + ti->num_discard_requests = 1; 29 + 30 return 0; 31 } 32
+250 -79
drivers/md/dm.c
··· 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/hdreg.h> 23 24 #include <trace/events/block.h> 25 ··· 125 unsigned long flags; 126 127 struct request_queue *queue; 128 struct gendisk *disk; 129 char name[16]; 130 ··· 643 * There can be just one barrier request so we use 644 * a per-device variable for error reporting. 645 * Note that you can't touch the bio after end_io_acct 646 */ 647 - if (!md->barrier_error && io_error != -EOPNOTSUPP) 648 md->barrier_error = io_error; 649 end_io_acct(io); 650 free_io(md, io); ··· 1030 dm_complete_request(clone, error); 1031 } 1032 1033 - static sector_t max_io_len(struct mapped_device *md, 1034 - sector_t sector, struct dm_target *ti) 1035 { 1036 - sector_t offset = sector - ti->begin; 1037 - sector_t len = ti->len - offset; 1038 1039 /* 1040 * Does the target need to split even further ? 1041 */ 1042 if (ti->split_io) { 1043 sector_t boundary; 1044 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 1045 - offset; 1046 if (len > boundary) ··· 1192 return tio; 1193 } 1194 1195 - static void __flush_target(struct clone_info *ci, struct dm_target *ti, 1196 - unsigned flush_nr) 1197 { 1198 struct dm_target_io *tio = alloc_tio(ci, ti); 1199 struct bio *clone; 1200 1201 - tio->info.flush_request = flush_nr; 1202 1203 - clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs); 1204 __bio_clone(clone, ci->bio); 1205 clone->bi_destructor = dm_bio_destructor; 1206 1207 __map_bio(ti, clone, tio); 1208 } 1209 1210 static int __clone_and_map_empty_barrier(struct clone_info *ci) 1211 { 1212 - unsigned target_nr = 0, flush_nr; 1213 struct dm_target *ti; 1214 1215 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1216 - for (flush_nr = 0; flush_nr < ti->num_flush_requests; 1217 - flush_nr++) 1218 - __flush_target(ci, ti, flush_nr); 1219 1220 ci->sector_count = 0; 1221 1222 return 0; 1223 } ··· 1292 if (unlikely(bio_empty_barrier(bio))) 1293 return __clone_and_map_empty_barrier(ci); 1294 1295 ti = dm_table_find_target(ci->map, ci->sector); 1296 if (!dm_target_is_valid(ti)) 1297 return -EIO; 1298 1299 - max = max_io_len(ci->md, ci->sector, ti); 1300 - 1301 - /* 1302 - * Allocate a target io object. 1303 - */ 1304 - tio = alloc_tio(ci, ti); 1305 1306 if (ci->sector_count <= max) { 1307 /* 1308 * Optimise for the simple case where we can do all of 1309 * the remaining io with a single clone. 1310 */ 1311 - clone = clone_bio(bio, ci->sector, ci->idx, 1312 - bio->bi_vcnt - ci->idx, ci->sector_count, 1313 - ci->md->bs); 1314 - __map_bio(ti, clone, tio); 1315 - ci->sector_count = 0; 1316 1317 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 1318 /* ··· 1327 len += bv_len; 1328 } 1329 1330 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 1331 ci->md->bs); 1332 __map_bio(ti, clone, tio); ··· 1350 if (!dm_target_is_valid(ti)) 1351 return -EIO; 1352 1353 - max = max_io_len(ci->md, ci->sector, ti); 1354 - 1355 - tio = alloc_tio(ci, ti); 1356 } 1357 1358 len = min(remaining, max); 1359 1360 clone = split_bvec(bio, ci->sector, ci->idx, 1361 bv->bv_offset + offset, len, 1362 ci->md->bs); ··· 1437 /* 1438 * Find maximum amount of I/O that won't need splitting 1439 */ 1440 - max_sectors = min(max_io_len(md, bvm->bi_sector, ti), 1441 (sector_t) BIO_MAX_SECTORS); 1442 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1443 if (max_size < 0) ··· 1920 static void dm_wq_work(struct work_struct *work); 1921 static void dm_rq_barrier_work(struct work_struct *work); 1922 1923 /* 1924 * Allocate and initialise a blank device with a given minor. 1925 */ ··· 1967 if (r < 0) 1968 goto bad_minor; 1969 1970 init_rwsem(&md->io_lock); 1971 mutex_init(&md->suspend_lock); 1972 spin_lock_init(&md->deferred_lock); 1973 spin_lock_init(&md->barrier_error_lock); 1974 rwlock_init(&md->map_lock); ··· 1981 INIT_LIST_HEAD(&md->uevent_list); 1982 spin_lock_init(&md->uevent_lock); 1983 1984 - md->queue = blk_init_queue(dm_request_fn, NULL); 1985 if (!md->queue) 1986 goto bad_queue; 1987 1988 - /* 1989 - * Request-based dm devices cannot be stacked on top of bio-based dm 1990 - * devices. The type of this dm device has not been decided yet, 1991 - * although we initialized the queue using blk_init_queue(). 1992 - * The type is decided at the first table loading time. 1993 - * To prevent problematic device stacking, clear the queue flag 1994 - * for request stacking support until then. 1995 - * 1996 - * This queue is new, so no concurrency on the queue_flags. 1997 - */ 1998 - queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1999 - md->saved_make_request_fn = md->queue->make_request_fn; 2000 - md->queue->queuedata = md; 2001 - md->queue->backing_dev_info.congested_fn = dm_any_congested; 2002 - md->queue->backing_dev_info.congested_data = md; 2003 - blk_queue_make_request(md->queue, dm_request); 2004 - blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 2005 - md->queue->unplug_fn = dm_unplug_all; 2006 - blk_queue_merge_bvec(md->queue, dm_merge_bvec); 2007 - blk_queue_softirq_done(md->queue, dm_softirq_done); 2008 - blk_queue_prep_rq(md->queue, dm_prep_fn); 2009 - blk_queue_lld_busy(md->queue, dm_lld_busy); 2010 - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); 2011 2012 md->disk = alloc_disk(1); 2013 if (!md->disk) ··· 2200 return 0; 2201 } 2202 2203 static struct mapped_device *dm_find_md(dev_t dev) 2204 { 2205 struct mapped_device *md; ··· 2279 md = idr_find(&_minor_idr, minor); 2280 if (md && (md == MINOR_ALLOCED || 2281 (MINOR(disk_devt(dm_disk(md))) != minor) || 2282 test_bit(DMF_FREEING, &md->flags))) { 2283 md = NULL; 2284 goto out; ··· 2314 void dm_get(struct mapped_device *md) 2315 { 2316 atomic_inc(&md->holders); 2317 } 2318 2319 const char *dm_device_name(struct mapped_device *md) ··· 2323 } 2324 EXPORT_SYMBOL_GPL(dm_device_name); 2325 2326 - void dm_put(struct mapped_device *md) 2327 { 2328 struct dm_table *map; 2329 2330 - BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2331 2332 - if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { 2333 - map = dm_get_live_table(md); 2334 - idr_replace(&_minor_idr, MINOR_ALLOCED, 2335 - MINOR(disk_devt(dm_disk(md)))); 2336 - set_bit(DMF_FREEING, &md->flags); 2337 - spin_unlock(&_minor_lock); 2338 - if (!dm_suspended_md(md)) { 2339 - dm_table_presuspend_targets(map); 2340 - dm_table_postsuspend_targets(map); 2341 - } 2342 - dm_sysfs_exit(md); 2343 - dm_table_put(map); 2344 - dm_table_destroy(__unbind(md)); 2345 - free_dev(md); 2346 } 2347 } 2348 EXPORT_SYMBOL_GPL(dm_put); 2349 ··· 2426 2427 if (!bio_empty_barrier(bio)) { 2428 __split_and_process_bio(md, bio); 2429 - dm_flush(md); 2430 } 2431 2432 if (md->barrier_error != DM_ENDIO_REQUEUE) ··· 2488 queue_work(md->wq, &md->work); 2489 } 2490 2491 - static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr) 2492 { 2493 struct dm_rq_target_io *tio = clone->end_io_data; 2494 2495 - tio->info.flush_request = flush_nr; 2496 } 2497 2498 /* Issue barrier requests to targets and wait for their completion. */ ··· 2510 ti = dm_table_get_target(map, i); 2511 for (j = 0; j < ti->num_flush_requests; j++) { 2512 clone = clone_rq(md->flush_request, md, GFP_NOIO); 2513 - dm_rq_set_flush_nr(clone, j); 2514 atomic_inc(&md->pending[rq_data_dir(clone)]); 2515 map_request(ti, clone, md); 2516 } ··· 2573 r = dm_calculate_queue_limits(table, &limits); 2574 if (r) { 2575 map = ERR_PTR(r); 2576 - goto out; 2577 - } 2578 - 2579 - /* cannot change the device type, once a table is bound */ 2580 - if (md->map && 2581 - (dm_table_get_type(md->map) != dm_table_get_type(table))) { 2582 - DMWARN("can't change the device type after a table is bound"); 2583 goto out; 2584 } 2585
··· 20 #include <linux/slab.h> 21 #include <linux/idr.h> 22 #include <linux/hdreg.h> 23 + #include <linux/delay.h> 24 25 #include <trace/events/block.h> 26 ··· 124 unsigned long flags; 125 126 struct request_queue *queue; 127 + unsigned type; 128 + /* Protect queue and type against concurrent access. */ 129 + struct mutex type_lock; 130 + 131 struct gendisk *disk; 132 char name[16]; 133 ··· 638 * There can be just one barrier request so we use 639 * a per-device variable for error reporting. 640 * Note that you can't touch the bio after end_io_acct 641 + * 642 + * We ignore -EOPNOTSUPP for empty flush reported by 643 + * underlying devices. We assume that if the device 644 + * doesn't support empty barriers, it doesn't need 645 + * cache flushing commands. 646 */ 647 + if (!md->barrier_error && 648 + !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP)) 649 md->barrier_error = io_error; 650 end_io_acct(io); 651 free_io(md, io); ··· 1019 dm_complete_request(clone, error); 1020 } 1021 1022 + /* 1023 + * Return maximum size of I/O possible at the supplied sector up to the current 1024 + * target boundary. 1025 + */ 1026 + static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti) 1027 { 1028 + sector_t target_offset = dm_target_offset(ti, sector); 1029 + 1030 + return ti->len - target_offset; 1031 + } 1032 + 1033 + static sector_t max_io_len(sector_t sector, struct dm_target *ti) 1034 + { 1035 + sector_t len = max_io_len_target_boundary(sector, ti); 1036 1037 /* 1038 * Does the target need to split even further ? 1039 */ 1040 if (ti->split_io) { 1041 sector_t boundary; 1042 + sector_t offset = dm_target_offset(ti, sector); 1043 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) 1044 - offset; 1045 if (len > boundary) ··· 1171 return tio; 1172 } 1173 1174 + static void __issue_target_request(struct clone_info *ci, struct dm_target *ti, 1175 + unsigned request_nr, sector_t len) 1176 { 1177 struct dm_target_io *tio = alloc_tio(ci, ti); 1178 struct bio *clone; 1179 1180 + tio->info.target_request_nr = request_nr; 1181 1182 + /* 1183 + * Discard requests require the bio's inline iovecs be initialized. 1184 + * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1185 + * and discard, so no need for concern about wasted bvec allocations. 1186 + */ 1187 + clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs); 1188 __bio_clone(clone, ci->bio); 1189 clone->bi_destructor = dm_bio_destructor; 1190 + if (len) { 1191 + clone->bi_sector = ci->sector; 1192 + clone->bi_size = to_bytes(len); 1193 + } 1194 1195 __map_bio(ti, clone, tio); 1196 } 1197 1198 + static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, 1199 + unsigned num_requests, sector_t len) 1200 + { 1201 + unsigned request_nr; 1202 + 1203 + for (request_nr = 0; request_nr < num_requests; request_nr++) 1204 + __issue_target_request(ci, ti, request_nr, len); 1205 + } 1206 + 1207 static int __clone_and_map_empty_barrier(struct clone_info *ci) 1208 { 1209 + unsigned target_nr = 0; 1210 struct dm_target *ti; 1211 1212 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1213 + __issue_target_requests(ci, ti, ti->num_flush_requests, 0); 1214 1215 ci->sector_count = 0; 1216 + 1217 + return 0; 1218 + } 1219 + 1220 + /* 1221 + * Perform all io with a single clone. 1222 + */ 1223 + static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti) 1224 + { 1225 + struct bio *clone, *bio = ci->bio; 1226 + struct dm_target_io *tio; 1227 + 1228 + tio = alloc_tio(ci, ti); 1229 + clone = clone_bio(bio, ci->sector, ci->idx, 1230 + bio->bi_vcnt - ci->idx, ci->sector_count, 1231 + ci->md->bs); 1232 + __map_bio(ti, clone, tio); 1233 + ci->sector_count = 0; 1234 + } 1235 + 1236 + static int __clone_and_map_discard(struct clone_info *ci) 1237 + { 1238 + struct dm_target *ti; 1239 + sector_t len; 1240 + 1241 + do { 1242 + ti = dm_table_find_target(ci->map, ci->sector); 1243 + if (!dm_target_is_valid(ti)) 1244 + return -EIO; 1245 + 1246 + /* 1247 + * Even though the device advertised discard support, 1248 + * reconfiguration might have changed that since the 1249 + * check was performed. 1250 + */ 1251 + if (!ti->num_discard_requests) 1252 + return -EOPNOTSUPP; 1253 + 1254 + len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1255 + 1256 + __issue_target_requests(ci, ti, ti->num_discard_requests, len); 1257 + 1258 + ci->sector += len; 1259 + } while (ci->sector_count -= len); 1260 1261 return 0; 1262 } ··· 1211 if (unlikely(bio_empty_barrier(bio))) 1212 return __clone_and_map_empty_barrier(ci); 1213 1214 + if (unlikely(bio->bi_rw & REQ_DISCARD)) 1215 + return __clone_and_map_discard(ci); 1216 + 1217 ti = dm_table_find_target(ci->map, ci->sector); 1218 if (!dm_target_is_valid(ti)) 1219 return -EIO; 1220 1221 + max = max_io_len(ci->sector, ti); 1222 1223 if (ci->sector_count <= max) { 1224 /* 1225 * Optimise for the simple case where we can do all of 1226 * the remaining io with a single clone. 1227 */ 1228 + __clone_and_map_simple(ci, ti); 1229 1230 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { 1231 /* ··· 1252 len += bv_len; 1253 } 1254 1255 + tio = alloc_tio(ci, ti); 1256 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len, 1257 ci->md->bs); 1258 __map_bio(ti, clone, tio); ··· 1274 if (!dm_target_is_valid(ti)) 1275 return -EIO; 1276 1277 + max = max_io_len(ci->sector, ti); 1278 } 1279 1280 len = min(remaining, max); 1281 1282 + tio = alloc_tio(ci, ti); 1283 clone = split_bvec(bio, ci->sector, ci->idx, 1284 bv->bv_offset + offset, len, 1285 ci->md->bs); ··· 1362 /* 1363 * Find maximum amount of I/O that won't need splitting 1364 */ 1365 + max_sectors = min(max_io_len(bvm->bi_sector, ti), 1366 (sector_t) BIO_MAX_SECTORS); 1367 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1368 if (max_size < 0) ··· 1845 static void dm_wq_work(struct work_struct *work); 1846 static void dm_rq_barrier_work(struct work_struct *work); 1847 1848 + static void dm_init_md_queue(struct mapped_device *md) 1849 + { 1850 + /* 1851 + * Request-based dm devices cannot be stacked on top of bio-based dm 1852 + * devices. The type of this dm device has not been decided yet. 1853 + * The type is decided at the first table loading time. 1854 + * To prevent problematic device stacking, clear the queue flag 1855 + * for request stacking support until then. 1856 + * 1857 + * This queue is new, so no concurrency on the queue_flags. 1858 + */ 1859 + queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue); 1860 + 1861 + md->queue->queuedata = md; 1862 + md->queue->backing_dev_info.congested_fn = dm_any_congested; 1863 + md->queue->backing_dev_info.congested_data = md; 1864 + blk_queue_make_request(md->queue, dm_request); 1865 + blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); 1866 + md->queue->unplug_fn = dm_unplug_all; 1867 + blk_queue_merge_bvec(md->queue, dm_merge_bvec); 1868 + } 1869 + 1870 /* 1871 * Allocate and initialise a blank device with a given minor. 1872 */ ··· 1870 if (r < 0) 1871 goto bad_minor; 1872 1873 + md->type = DM_TYPE_NONE; 1874 init_rwsem(&md->io_lock); 1875 mutex_init(&md->suspend_lock); 1876 + mutex_init(&md->type_lock); 1877 spin_lock_init(&md->deferred_lock); 1878 spin_lock_init(&md->barrier_error_lock); 1879 rwlock_init(&md->map_lock); ··· 1882 INIT_LIST_HEAD(&md->uevent_list); 1883 spin_lock_init(&md->uevent_lock); 1884 1885 + md->queue = blk_alloc_queue(GFP_KERNEL); 1886 if (!md->queue) 1887 goto bad_queue; 1888 1889 + dm_init_md_queue(md); 1890 1891 md->disk = alloc_disk(1); 1892 if (!md->disk) ··· 2123 return 0; 2124 } 2125 2126 + /* 2127 + * Functions to manage md->type. 2128 + * All are required to hold md->type_lock. 2129 + */ 2130 + void dm_lock_md_type(struct mapped_device *md) 2131 + { 2132 + mutex_lock(&md->type_lock); 2133 + } 2134 + 2135 + void dm_unlock_md_type(struct mapped_device *md) 2136 + { 2137 + mutex_unlock(&md->type_lock); 2138 + } 2139 + 2140 + void dm_set_md_type(struct mapped_device *md, unsigned type) 2141 + { 2142 + md->type = type; 2143 + } 2144 + 2145 + unsigned dm_get_md_type(struct mapped_device *md) 2146 + { 2147 + return md->type; 2148 + } 2149 + 2150 + /* 2151 + * Fully initialize a request-based queue (->elevator, ->request_fn, etc). 2152 + */ 2153 + static int dm_init_request_based_queue(struct mapped_device *md) 2154 + { 2155 + struct request_queue *q = NULL; 2156 + 2157 + if (md->queue->elevator) 2158 + return 1; 2159 + 2160 + /* Fully initialize the queue */ 2161 + q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); 2162 + if (!q) 2163 + return 0; 2164 + 2165 + md->queue = q; 2166 + md->saved_make_request_fn = md->queue->make_request_fn; 2167 + dm_init_md_queue(md); 2168 + blk_queue_softirq_done(md->queue, dm_softirq_done); 2169 + blk_queue_prep_rq(md->queue, dm_prep_fn); 2170 + blk_queue_lld_busy(md->queue, dm_lld_busy); 2171 + blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); 2172 + 2173 + elv_register_queue(md->queue); 2174 + 2175 + return 1; 2176 + } 2177 + 2178 + /* 2179 + * Setup the DM device's queue based on md's type 2180 + */ 2181 + int dm_setup_md_queue(struct mapped_device *md) 2182 + { 2183 + if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) && 2184 + !dm_init_request_based_queue(md)) { 2185 + DMWARN("Cannot initialize queue for request-based mapped device"); 2186 + return -EINVAL; 2187 + } 2188 + 2189 + return 0; 2190 + } 2191 + 2192 static struct mapped_device *dm_find_md(dev_t dev) 2193 { 2194 struct mapped_device *md; ··· 2136 md = idr_find(&_minor_idr, minor); 2137 if (md && (md == MINOR_ALLOCED || 2138 (MINOR(disk_devt(dm_disk(md))) != minor) || 2139 + dm_deleting_md(md) || 2140 test_bit(DMF_FREEING, &md->flags))) { 2141 md = NULL; 2142 goto out; ··· 2170 void dm_get(struct mapped_device *md) 2171 { 2172 atomic_inc(&md->holders); 2173 + BUG_ON(test_bit(DMF_FREEING, &md->flags)); 2174 } 2175 2176 const char *dm_device_name(struct mapped_device *md) ··· 2178 } 2179 EXPORT_SYMBOL_GPL(dm_device_name); 2180 2181 + static void __dm_destroy(struct mapped_device *md, bool wait) 2182 { 2183 struct dm_table *map; 2184 2185 + might_sleep(); 2186 2187 + spin_lock(&_minor_lock); 2188 + map = dm_get_live_table(md); 2189 + idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2190 + set_bit(DMF_FREEING, &md->flags); 2191 + spin_unlock(&_minor_lock); 2192 + 2193 + if (!dm_suspended_md(md)) { 2194 + dm_table_presuspend_targets(map); 2195 + dm_table_postsuspend_targets(map); 2196 } 2197 + 2198 + /* 2199 + * Rare, but there may be I/O requests still going to complete, 2200 + * for example. Wait for all references to disappear. 2201 + * No one should increment the reference count of the mapped_device, 2202 + * after the mapped_device state becomes DMF_FREEING. 2203 + */ 2204 + if (wait) 2205 + while (atomic_read(&md->holders)) 2206 + msleep(1); 2207 + else if (atomic_read(&md->holders)) 2208 + DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", 2209 + dm_device_name(md), atomic_read(&md->holders)); 2210 + 2211 + dm_sysfs_exit(md); 2212 + dm_table_put(map); 2213 + dm_table_destroy(__unbind(md)); 2214 + free_dev(md); 2215 + } 2216 + 2217 + void dm_destroy(struct mapped_device *md) 2218 + { 2219 + __dm_destroy(md, true); 2220 + } 2221 + 2222 + void dm_destroy_immediate(struct mapped_device *md) 2223 + { 2224 + __dm_destroy(md, false); 2225 + } 2226 + 2227 + void dm_put(struct mapped_device *md) 2228 + { 2229 + atomic_dec(&md->holders); 2230 } 2231 EXPORT_SYMBOL_GPL(dm_put); 2232 ··· 2253 2254 if (!bio_empty_barrier(bio)) { 2255 __split_and_process_bio(md, bio); 2256 + /* 2257 + * If the request isn't supported, don't waste time with 2258 + * the second flush. 2259 + */ 2260 + if (md->barrier_error != -EOPNOTSUPP) 2261 + dm_flush(md); 2262 } 2263 2264 if (md->barrier_error != DM_ENDIO_REQUEUE) ··· 2310 queue_work(md->wq, &md->work); 2311 } 2312 2313 + static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr) 2314 { 2315 struct dm_rq_target_io *tio = clone->end_io_data; 2316 2317 + tio->info.target_request_nr = request_nr; 2318 } 2319 2320 /* Issue barrier requests to targets and wait for their completion. */ ··· 2332 ti = dm_table_get_target(map, i); 2333 for (j = 0; j < ti->num_flush_requests; j++) { 2334 clone = clone_rq(md->flush_request, md, GFP_NOIO); 2335 + dm_rq_set_target_request_nr(clone, j); 2336 atomic_inc(&md->pending[rq_data_dir(clone)]); 2337 map_request(ti, clone, md); 2338 } ··· 2395 r = dm_calculate_queue_limits(table, &limits); 2396 if (r) { 2397 map = ERR_PTR(r); 2398 goto out; 2399 } 2400
+13 -1
drivers/md/dm.h
··· 59 int dm_table_resume_targets(struct dm_table *t); 60 int dm_table_any_congested(struct dm_table *t, int bdi_bits); 61 int dm_table_any_busy_target(struct dm_table *t); 62 - int dm_table_set_type(struct dm_table *t); 63 unsigned dm_table_get_type(struct dm_table *t); 64 bool dm_table_request_based(struct dm_table *t); 65 int dm_table_alloc_md_mempools(struct dm_table *t); 66 void dm_table_free_md_mempools(struct dm_table *t); 67 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 68 69 /* 70 * To check the return value from dm_table_find_target(). ··· 129 int dm_stripe_init(void); 130 void dm_stripe_exit(void); 131 132 int dm_open_count(struct mapped_device *md); 133 int dm_lock_for_deletion(struct mapped_device *md); 134
··· 59 int dm_table_resume_targets(struct dm_table *t); 60 int dm_table_any_congested(struct dm_table *t, int bdi_bits); 61 int dm_table_any_busy_target(struct dm_table *t); 62 unsigned dm_table_get_type(struct dm_table *t); 63 bool dm_table_request_based(struct dm_table *t); 64 + bool dm_table_supports_discards(struct dm_table *t); 65 int dm_table_alloc_md_mempools(struct dm_table *t); 66 void dm_table_free_md_mempools(struct dm_table *t); 67 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); 68 + 69 + void dm_lock_md_type(struct mapped_device *md); 70 + void dm_unlock_md_type(struct mapped_device *md); 71 + void dm_set_md_type(struct mapped_device *md, unsigned type); 72 + unsigned dm_get_md_type(struct mapped_device *md); 73 + 74 + int dm_setup_md_queue(struct mapped_device *md); 75 76 /* 77 * To check the return value from dm_table_find_target(). ··· 122 int dm_stripe_init(void); 123 void dm_stripe_exit(void); 124 125 + /* 126 + * mapped_device operations 127 + */ 128 + void dm_destroy(struct mapped_device *md); 129 + void dm_destroy_immediate(struct mapped_device *md); 130 int dm_open_count(struct mapped_device *md); 131 int dm_lock_for_deletion(struct mapped_device *md); 132
+14 -2
include/linux/device-mapper.h
··· 22 union map_info { 23 void *ptr; 24 unsigned long long ll; 25 - unsigned flush_request; 26 }; 27 28 /* ··· 174 * A number of zero-length barrier requests that will be submitted 175 * to the target for the purpose of flushing cache. 176 * 177 - * The request number will be placed in union map_info->flush_request. 178 * It is a responsibility of the target driver to remap these requests 179 * to the real underlying devices. 180 */ 181 unsigned num_flush_requests; 182 183 /* target specific data */ 184 void *private; ··· 397 398 #define dm_array_too_big(fixed, obj, num) \ 399 ((num) > (UINT_MAX - (fixed)) / (obj)) 400 401 static inline sector_t to_sector(unsigned long n) 402 {
··· 22 union map_info { 23 void *ptr; 24 unsigned long long ll; 25 + unsigned target_request_nr; 26 }; 27 28 /* ··· 174 * A number of zero-length barrier requests that will be submitted 175 * to the target for the purpose of flushing cache. 176 * 177 + * The request number will be placed in union map_info->target_request_nr. 178 * It is a responsibility of the target driver to remap these requests 179 * to the real underlying devices. 180 */ 181 unsigned num_flush_requests; 182 + 183 + /* 184 + * The number of discard requests that will be submitted to the 185 + * target. map_info->request_nr is used just like num_flush_requests. 186 + */ 187 + unsigned num_discard_requests; 188 189 /* target specific data */ 190 void *private; ··· 391 392 #define dm_array_too_big(fixed, obj, num) \ 393 ((num) > (UINT_MAX - (fixed)) / (obj)) 394 + 395 + /* 396 + * Sector offset taken relative to the start of the target instead of 397 + * relative to the start of the device. 398 + */ 399 + #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) 400 401 static inline sector_t to_sector(unsigned long n) 402 {
+3 -2
include/linux/dm-ioctl.h
··· 11 #include <linux/types.h> 12 13 #define DM_DIR "mapper" /* Slashes not supported */ 14 #define DM_MAX_TYPE_NAME 16 15 #define DM_NAME_LEN 128 16 #define DM_UUID_LEN 129 ··· 267 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 268 269 #define DM_VERSION_MAJOR 4 270 - #define DM_VERSION_MINOR 17 271 #define DM_VERSION_PATCHLEVEL 0 272 - #define DM_VERSION_EXTRA "-ioctl (2010-03-05)" 273 274 /* Status bits */ 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */
··· 11 #include <linux/types.h> 12 13 #define DM_DIR "mapper" /* Slashes not supported */ 14 + #define DM_CONTROL_NODE "control" 15 #define DM_MAX_TYPE_NAME 16 16 #define DM_NAME_LEN 128 17 #define DM_UUID_LEN 129 ··· 266 #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 267 268 #define DM_VERSION_MAJOR 4 269 + #define DM_VERSION_MINOR 18 270 #define DM_VERSION_PATCHLEVEL 0 271 + #define DM_VERSION_EXTRA "-ioctl (2010-06-29)" 272 273 /* Status bits */ 274 #define DM_READONLY_FLAG (1 << 0) /* In/Out */
+1
include/linux/miscdevice.h
··· 38 #define KVM_MINOR 232 39 #define BTRFS_MINOR 234 40 #define AUTOFS_MINOR 235 41 #define MISC_DYNAMIC_MINOR 255 42 43 struct device;
··· 38 #define KVM_MINOR 232 39 #define BTRFS_MINOR 234 40 #define AUTOFS_MINOR 235 41 + #define MAPPER_CTRL_MINOR 236 42 #define MISC_DYNAMIC_MINOR 255 43 44 struct device;