Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm: introduce dm_accept_partial_bio

The function dm_accept_partial_bio allows the target to specify how many
sectors of the current bio it will process. If the target only wants to
accept part of the bio, it calls dm_accept_partial_bio and the DM core
sends the rest of the data in next bio.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Mikulas Patocka and committed by
Mike Snitzer
1dd40c3e e0d6609a

+53 -8
+51 -8
drivers/md/dm.c
··· 1110 1110 } 1111 1111 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1112 1112 1113 + /* 1114 + * A target may call dm_accept_partial_bio only from the map routine. It is 1115 + * allowed for all bio types except REQ_FLUSH. 1116 + * 1117 + * dm_accept_partial_bio informs the dm that the target only wants to process 1118 + * additional n_sectors sectors of the bio and the rest of the data should be 1119 + * sent in a next bio. 1120 + * 1121 + * A diagram that explains the arithmetics: 1122 + * +--------------------+---------------+-------+ 1123 + * | 1 | 2 | 3 | 1124 + * +--------------------+---------------+-------+ 1125 + * 1126 + * <-------------- *tio->len_ptr ---------------> 1127 + * <------- bi_size -------> 1128 + * <-- n_sectors --> 1129 + * 1130 + * Region 1 was already iterated over with bio_advance or similar function. 1131 + * (it may be empty if the target doesn't use bio_advance) 1132 + * Region 2 is the remaining bio size that the target wants to process. 1133 + * (it may be empty if region 1 is non-empty, although there is no reason 1134 + * to make it empty) 1135 + * The target requires that region 3 is to be sent in the next bio. 1136 + * 1137 + * If the target wants to receive multiple copies of the bio (via num_*bios, etc), 1138 + * the partially processed part (the sum of regions 1+2) must be the same for all 1139 + * copies of the bio. 1140 + */ 1141 + void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) 1142 + { 1143 + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); 1144 + unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; 1145 + BUG_ON(bio->bi_rw & REQ_FLUSH); 1146 + BUG_ON(bi_size > *tio->len_ptr); 1147 + BUG_ON(n_sectors > bi_size); 1148 + *tio->len_ptr -= bi_size - n_sectors; 1149 + bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; 1150 + } 1151 + EXPORT_SYMBOL_GPL(dm_accept_partial_bio); 1152 + 1113 1153 static void __map_bio(struct dm_target_io *tio) 1114 1154 { 1115 1155 int r; ··· 1240 1200 1241 1201 static void __clone_and_map_simple_bio(struct clone_info *ci, 1242 1202 struct dm_target *ti, 1243 - unsigned target_bio_nr, unsigned len) 1203 + unsigned target_bio_nr, unsigned *len) 1244 1204 { 1245 1205 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr); 1246 1206 struct bio *clone = &tio->clone; 1207 + 1208 + tio->len_ptr = len; 1247 1209 1248 1210 /* 1249 1211 * Discard requests require the bio's inline iovecs be initialized. ··· 1254 1212 */ 1255 1213 __bio_clone_fast(clone, ci->bio); 1256 1214 if (len) 1257 - bio_setup_sector(clone, ci->sector, len); 1215 + bio_setup_sector(clone, ci->sector, *len); 1258 1216 1259 1217 __map_bio(tio); 1260 1218 } 1261 1219 1262 1220 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, 1263 - unsigned num_bios, unsigned len) 1221 + unsigned num_bios, unsigned *len) 1264 1222 { 1265 1223 unsigned target_bio_nr; 1266 1224 ··· 1275 1233 1276 1234 BUG_ON(bio_has_data(ci->bio)); 1277 1235 while ((ti = dm_table_get_target(ci->map, target_nr++))) 1278 - __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0); 1236 + __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); 1279 1237 1280 1238 return 0; 1281 1239 } 1282 1240 1283 1241 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1284 - sector_t sector, unsigned len) 1242 + sector_t sector, unsigned *len) 1285 1243 { 1286 1244 struct bio *bio = ci->bio; 1287 1245 struct dm_target_io *tio; ··· 1296 1254 1297 1255 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1298 1256 tio = alloc_tio(ci, ti, 0, target_bio_nr); 1299 - clone_bio(tio, bio, sector, len); 1257 + tio->len_ptr = len; 1258 + clone_bio(tio, bio, sector, *len); 1300 1259 __map_bio(tio); 1301 1260 } 1302 1261 } ··· 1349 1306 else 1350 1307 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); 1351 1308 1352 - __send_duplicate_bios(ci, ti, num_bios, len); 1309 + __send_duplicate_bios(ci, ti, num_bios, &len); 1353 1310 1354 1311 ci->sector += len; 1355 1312 } while (ci->sector_count -= len); ··· 1388 1345 1389 1346 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); 1390 1347 1391 - __clone_and_map_data_bio(ci, ti, ci->sector, len); 1348 + __clone_and_map_data_bio(ci, ti, ci->sector, &len); 1392 1349 1393 1350 ci->sector += len; 1394 1351 ci->sector_count -= len;
+2
include/linux/device-mapper.h
··· 291 291 struct dm_io *io; 292 292 struct dm_target *ti; 293 293 unsigned target_bio_nr; 294 + unsigned *len_ptr; 294 295 struct bio clone; 295 296 }; 296 297 ··· 402 401 struct gendisk *dm_disk(struct mapped_device *md); 403 402 int dm_suspended(struct dm_target *ti); 404 403 int dm_noflush_suspending(struct dm_target *ti); 404 + void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); 405 405 union map_info *dm_get_rq_mapinfo(struct request *rq); 406 406 407 407 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);