Merge git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm

* git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-2.6-dm:
dm snapshot: fix on disk chunk size validation
dm exception store: split set_chunk_size
dm snapshot: fix header corruption race on invalidation
dm snapshot: refactor zero_disk_area to use chunk_io
dm log: userspace add luid to distinguish between concurrent log instances
dm raid1: do not allow log_failure variable to unset after being set
dm log: remove incorrect field from userspace table output
dm log: fix userspace status output
dm stripe: expose correct io hints
dm table: add more context to terse warning messages
dm table: fix queue_limit checking device iterator
dm snapshot: implement iterate devices
dm multipath: fix oops when request based io fails when no paths

+198 -81
+13
drivers/md/dm-exception-store.c
··· 171 171 */ 172 172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); 173 173 174 + return dm_exception_store_set_chunk_size(store, chunk_size_ulong, 175 + error); 176 + } 177 + 178 + int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 179 + unsigned long chunk_size_ulong, 180 + char **error) 181 + { 174 182 /* Check chunk_size is a power of 2 */ 175 183 if (!is_power_of_2(chunk_size_ulong)) { 176 184 *error = "Chunk size is not a power of 2"; ··· 188 180 /* Validate the chunk size against the device block size */ 189 181 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 190 182 *error = "Chunk size is not a multiple of device blocksize"; 183 + return -EINVAL; 184 + } 185 + 186 + if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { 187 + *error = "Chunk size is too high"; 191 188 return -EINVAL; 192 189 } 193 190
+4
drivers/md/dm-exception-store.h
··· 168 168 int dm_exception_store_type_register(struct dm_exception_store_type *type); 169 169 int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 170 170 171 + int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 172 + unsigned long chunk_size_ulong, 173 + char **error); 174 + 171 175 int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 172 176 unsigned *args_used, 173 177 struct dm_exception_store **store);
+24 -15
drivers/md/dm-log-userspace-base.c
··· 21 21 struct dm_target *ti; 22 22 uint32_t region_size; 23 23 region_t region_count; 24 + uint64_t luid; 24 25 char uuid[DM_UUID_LEN]; 25 26 26 27 char *usr_argv_str; ··· 64 63 * restored. 65 64 */ 66 65 retry: 67 - r = dm_consult_userspace(uuid, request_type, data, 66 + r = dm_consult_userspace(uuid, lc->luid, request_type, data, 68 67 data_size, rdata, rdata_size); 69 68 70 69 if (r != -ESRCH) ··· 75 74 set_current_state(TASK_INTERRUPTIBLE); 76 75 schedule_timeout(2*HZ); 77 76 DMWARN("Attempting to contact userspace log server..."); 78 - r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, 77 + r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, 78 + lc->usr_argv_str, 79 79 strlen(lc->usr_argv_str) + 1, 80 80 NULL, NULL); 81 81 if (!r) 82 82 break; 83 83 } 84 84 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); 85 - r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, 85 + r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, 86 86 0, NULL, NULL); 87 87 if (!r) 88 88 goto retry; ··· 113 111 return -ENOMEM; 114 112 } 115 113 116 - for (i = 0, str_size = 0; i < argc; i++) 117 - str_size += sprintf(str + str_size, "%s ", argv[i]); 118 - str_size += sprintf(str + str_size, "%llu", 119 - (unsigned long long)ti->len); 114 + str_size = sprintf(str, "%llu", (unsigned long long)ti->len); 115 + for (i = 0; i < argc; i++) 116 + str_size += sprintf(str + str_size, " %s", argv[i]); 120 117 121 118 *ctr_str = str; 122 119 return str_size; ··· 155 154 return -ENOMEM; 156 155 } 157 156 157 + /* The ptr value is sufficient for local unique id */ 158 + lc->luid = (uint64_t)lc; 159 + 158 160 lc->ti = ti; 159 161 160 162 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { ··· 177 173 } 178 174 179 175 /* Send table string */ 180 - r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, 176 + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, 181 177 ctr_str, str_size, NULL, NULL); 182 178 183 179 if (r == -ESRCH) { ··· 187 183 188 184 /* Since the region size does not change, get it now */ 189 185 rdata_size = sizeof(rdata); 190 - r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, 186 + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, 191 187 NULL, 0, (char *)&rdata, &rdata_size); 192 188 193 189 if (r) { ··· 216 212 int r; 217 213 struct log_c *lc = log->context; 218 214 219 - r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, 215 + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, 220 216 NULL, 0, 221 217 NULL, NULL); 222 218 ··· 231 227 int r; 232 228 struct log_c *lc = log->context; 233 229 234 - r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, 230 + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, 235 231 NULL, 0, 236 232 NULL, NULL); 237 233 ··· 243 239 int r; 244 240 struct log_c *lc = log->context; 245 241 246 - r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, 242 + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, 247 243 NULL, 0, 248 244 NULL, NULL); 249 245 ··· 256 252 struct log_c *lc = log->context; 257 253 258 254 lc->in_sync_hint = 0; 259 - r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, 255 + r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, 260 256 NULL, 0, 261 257 NULL, NULL); 262 258 ··· 565 561 char *result, unsigned maxlen) 566 562 { 567 563 int r = 0; 564 + char *table_args; 568 565 size_t sz = (size_t)maxlen; 569 566 struct log_c *lc = log->context; 570 567 ··· 582 577 break; 583 578 case STATUSTYPE_TABLE: 584 579 sz = 0; 585 - DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, 586 - lc->uuid, lc->usr_argv_str); 580 + table_args = strstr(lc->usr_argv_str, " "); 581 + BUG_ON(!table_args); /* There will always be a ' ' */ 582 + table_args++; 583 + 584 + DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, 585 + lc->uuid, table_args); 587 586 break; 588 587 } 589 588 return (r) ? 0 : (int)sz;
+4 -2
drivers/md/dm-log-userspace-transfer.c
··· 147 147 148 148 /** 149 149 * dm_consult_userspace 150 - * @uuid: log's uuid (must be DM_UUID_LEN in size) 150 + * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size) 151 + * @luid: log's local unique identifier 151 152 * @request_type: found in include/linux/dm-log-userspace.h 152 153 * @data: data to tx to the server 153 154 * @data_size: size of data in bytes ··· 164 163 * 165 164 * Returns: 0 on success, -EXXX on failure 166 165 **/ 167 - int dm_consult_userspace(const char *uuid, int request_type, 166 + int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, 168 167 char *data, size_t data_size, 169 168 char *rdata, size_t *rdata_size) 170 169 { ··· 191 190 192 191 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 193 192 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 193 + tfr->luid = luid; 194 194 tfr->seq = dm_ulog_seq++; 195 195 196 196 /*
+1 -1
drivers/md/dm-log-userspace-transfer.h
··· 11 11 12 12 int dm_ulog_tfr_init(void); 13 13 void dm_ulog_tfr_exit(void); 14 - int dm_consult_userspace(const char *uuid, int request_type, 14 + int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, 15 15 char *data, size_t data_size, 16 16 char *rdata, size_t *rdata_size); 17 17
+7 -1
drivers/md/dm-raid1.c
··· 648 648 */ 649 649 dm_rh_inc_pending(ms->rh, &sync); 650 650 dm_rh_inc_pending(ms->rh, &nosync); 651 - ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; 651 + 652 + /* 653 + * If the flush fails on a previous call and succeeds here, 654 + * we must not reset the log_failure variable. We need 655 + * userspace interaction to do that. 656 + */ 657 + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; 652 658 653 659 /* 654 660 * Dispatch io.
+53 -35
drivers/md/dm-snap-persistent.c
··· 106 106 void *zero_area; 107 107 108 108 /* 109 + * An area used for header. The header can be written 110 + * concurrently with metadata (when invalidating the snapshot), 111 + * so it needs a separate buffer. 112 + */ 113 + void *header_area; 114 + 115 + /* 109 116 * Used to keep track of which metadata area the data in 110 117 * 'chunk' refers to. 111 118 */ ··· 155 148 */ 156 149 ps->area = vmalloc(len); 157 150 if (!ps->area) 158 - return r; 151 + goto err_area; 159 152 160 153 ps->zero_area = vmalloc(len); 161 - if (!ps->zero_area) { 162 - vfree(ps->area); 163 - return r; 164 - } 154 + if (!ps->zero_area) 155 + goto err_zero_area; 165 156 memset(ps->zero_area, 0, len); 166 157 158 + ps->header_area = vmalloc(len); 159 + if (!ps->header_area) 160 + goto err_header_area; 161 + 167 162 return 0; 163 + 164 + err_header_area: 165 + vfree(ps->zero_area); 166 + 167 + err_zero_area: 168 + vfree(ps->area); 169 + 170 + err_area: 171 + return r; 168 172 } 169 173 170 174 static void free_area(struct pstore *ps) ··· 187 169 if (ps->zero_area) 188 170 vfree(ps->zero_area); 189 171 ps->zero_area = NULL; 172 + 173 + if (ps->header_area) 174 + vfree(ps->header_area); 175 + ps->header_area = NULL; 190 176 } 191 177 192 178 struct mdata_req { ··· 210 188 /* 211 189 * Read or write a chunk aligned and sized block of data from a device. 212 190 */ 213 - static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) 191 + static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, 192 + int metadata) 214 193 { 215 194 struct dm_io_region where = { 216 195 .bdev = ps->store->cow->bdev, ··· 221 198 struct dm_io_request io_req = { 222 199 .bi_rw = rw, 223 200 .mem.type = DM_IO_VMA, 224 - .mem.ptr.vma = ps->area, 201 + .mem.ptr.vma = area, 225 202 .client = ps->io_client, 226 203 .notify.fn = NULL, 227 204 }; ··· 263 240 264 241 chunk = area_location(ps, ps->current_area); 265 242 266 - r = chunk_io(ps, chunk, rw, 0); 243 + r = chunk_io(ps, ps->area, chunk, rw, 0); 267 244 if (r) 268 245 return r; 269 246 ··· 277 254 278 255 static int zero_disk_area(struct pstore *ps, chunk_t area) 279 256 { 280 - struct dm_io_region where = { 281 - .bdev = ps->store->cow->bdev, 282 - .sector = ps->store->chunk_size * area_location(ps, area), 283 - .count = ps->store->chunk_size, 284 - }; 285 - struct dm_io_request io_req = { 286 - .bi_rw = WRITE, 287 - .mem.type = DM_IO_VMA, 288 - .mem.ptr.vma = ps->zero_area, 289 - .client = ps->io_client, 290 - .notify.fn = NULL, 291 - }; 292 - 293 - return dm_io(&io_req, 1, &where, NULL); 257 + return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); 294 258 } 295 259 296 260 static int read_header(struct pstore *ps, int *new_snapshot) ··· 286 276 struct disk_header *dh; 287 277 chunk_t chunk_size; 288 278 int chunk_size_supplied = 1; 279 + char *chunk_err; 289 280 290 281 /* 291 282 * Use default chunk size (or hardsect_size, if larger) if none supplied ··· 308 297 if (r) 309 298 return r; 310 299 311 - r = chunk_io(ps, 0, READ, 1); 300 + r = chunk_io(ps, ps->header_area, 0, READ, 1); 312 301 if (r) 313 302 goto bad; 314 303 315 - dh = (struct disk_header *) ps->area; 304 + dh = ps->header_area; 316 305 317 306 if (le32_to_cpu(dh->magic) == 0) { 318 307 *new_snapshot = 1; ··· 330 319 ps->version = le32_to_cpu(dh->version); 331 320 chunk_size = le32_to_cpu(dh->chunk_size); 332 321 333 - if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) 322 + if (ps->store->chunk_size == chunk_size) 334 323 return 0; 335 324 336 - DMWARN("chunk size %llu in device metadata overrides " 337 - "table chunk size of %llu.", 338 - (unsigned long long)chunk_size, 339 - (unsigned long long)ps->store->chunk_size); 325 + if (chunk_size_supplied) 326 + DMWARN("chunk size %llu in device metadata overrides " 327 + "table chunk size of %llu.", 328 + (unsigned long long)chunk_size, 329 + (unsigned long long)ps->store->chunk_size); 340 330 341 331 /* We had a bogus chunk_size. Fix stuff up. */ 342 332 free_area(ps); 343 333 344 - ps->store->chunk_size = chunk_size; 345 - ps->store->chunk_mask = chunk_size - 1; 346 - ps->store->chunk_shift = ffs(chunk_size) - 1; 334 + r = dm_exception_store_set_chunk_size(ps->store, chunk_size, 335 + &chunk_err); 336 + if (r) { 337 + DMERR("invalid on-disk chunk size %llu: %s.", 338 + (unsigned long long)chunk_size, chunk_err); 339 + return r; 340 + } 347 341 348 342 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), 349 343 ps->io_client); ··· 367 351 { 368 352 struct disk_header *dh; 369 353 370 - memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); 354 + memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); 371 355 372 - dh = (struct disk_header *) ps->area; 356 + dh = ps->header_area; 373 357 dh->magic = cpu_to_le32(SNAP_MAGIC); 374 358 dh->valid = cpu_to_le32(ps->valid); 375 359 dh->version = cpu_to_le32(ps->version); 376 360 dh->chunk_size = cpu_to_le32(ps->store->chunk_size); 377 361 378 - return chunk_io(ps, 0, WRITE, 1); 362 + return chunk_io(ps, ps->header_area, 0, WRITE, 1); 379 363 } 380 364 381 365 /* ··· 695 679 ps->valid = 1; 696 680 ps->version = SNAPSHOT_DISK_VERSION; 697 681 ps->area = NULL; 682 + ps->zero_area = NULL; 683 + ps->header_area = NULL; 698 684 ps->next_free = 2; /* skipping the header and first area */ 699 685 ps->current_committed = 0; 700 686
+21 -2
drivers/md/dm-snap.c
··· 1176 1176 return 0; 1177 1177 } 1178 1178 1179 + static int snapshot_iterate_devices(struct dm_target *ti, 1180 + iterate_devices_callout_fn fn, void *data) 1181 + { 1182 + struct dm_snapshot *snap = ti->private; 1183 + 1184 + return fn(ti, snap->origin, 0, ti->len, data); 1185 + } 1186 + 1187 + 1179 1188 /*----------------------------------------------------------------- 1180 1189 * Origin methods 1181 1190 *---------------------------------------------------------------*/ ··· 1419 1410 return 0; 1420 1411 } 1421 1412 1413 + static int origin_iterate_devices(struct dm_target *ti, 1414 + iterate_devices_callout_fn fn, void *data) 1415 + { 1416 + struct dm_dev *dev = ti->private; 1417 + 1418 + return fn(ti, dev, 0, ti->len, data); 1419 + } 1420 + 1422 1421 static struct target_type origin_target = { 1423 1422 .name = "snapshot-origin", 1424 - .version = {1, 6, 0}, 1423 + .version = {1, 7, 0}, 1425 1424 .module = THIS_MODULE, 1426 1425 .ctr = origin_ctr, 1427 1426 .dtr = origin_dtr, 1428 1427 .map = origin_map, 1429 1428 .resume = origin_resume, 1430 1429 .status = origin_status, 1430 + .iterate_devices = origin_iterate_devices, 1431 1431 }; 1432 1432 1433 1433 static struct target_type snapshot_target = { 1434 1434 .name = "snapshot", 1435 - .version = {1, 6, 0}, 1435 + .version = {1, 7, 0}, 1436 1436 .module = THIS_MODULE, 1437 1437 .ctr = snapshot_ctr, 1438 1438 .dtr = snapshot_dtr, ··· 1449 1431 .end_io = snapshot_end_io, 1450 1432 .resume = snapshot_resume, 1451 1433 .status = snapshot_status, 1434 + .iterate_devices = snapshot_iterate_devices, 1452 1435 }; 1453 1436 1454 1437 static int __init dm_snapshot_init(void)
+12 -1
drivers/md/dm-stripe.c
··· 329 329 return ret; 330 330 } 331 331 332 + static void stripe_io_hints(struct dm_target *ti, 333 + struct queue_limits *limits) 334 + { 335 + struct stripe_c *sc = ti->private; 336 + unsigned chunk_size = (sc->chunk_mask + 1) << 9; 337 + 338 + blk_limits_io_min(limits, chunk_size); 339 + limits->io_opt = chunk_size * sc->stripes; 340 + } 341 + 332 342 static struct target_type stripe_target = { 333 343 .name = "striped", 334 - .version = {1, 2, 0}, 344 + .version = {1, 3, 0}, 335 345 .module = THIS_MODULE, 336 346 .ctr = stripe_ctr, 337 347 .dtr = stripe_dtr, ··· 349 339 .end_io = stripe_end_io, 350 340 .status = stripe_status, 351 341 .iterate_devices = stripe_iterate_devices, 342 + .io_hints = stripe_io_hints, 352 343 }; 353 344 354 345 int __init dm_stripe_init(void)
+33 -18
drivers/md/dm-table.c
··· 343 343 } 344 344 345 345 /* 346 - * If possible, this checks an area of a destination device is valid. 346 + * If possible, this checks an area of a destination device is invalid. 347 347 */ 348 - static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, 349 - sector_t start, sector_t len, void *data) 348 + static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, 349 + sector_t start, sector_t len, void *data) 350 350 { 351 351 struct queue_limits *limits = data; 352 352 struct block_device *bdev = dev->bdev; ··· 357 357 char b[BDEVNAME_SIZE]; 358 358 359 359 if (!dev_size) 360 - return 1; 360 + return 0; 361 361 362 362 if ((start >= dev_size) || (start + len > dev_size)) { 363 - DMWARN("%s: %s too small for target", 364 - dm_device_name(ti->table->md), bdevname(bdev, b)); 365 - return 0; 363 + DMWARN("%s: %s too small for target: " 364 + "start=%llu, len=%llu, dev_size=%llu", 365 + dm_device_name(ti->table->md), bdevname(bdev, b), 366 + (unsigned long long)start, 367 + (unsigned long long)len, 368 + (unsigned long long)dev_size); 369 + return 1; 366 370 } 367 371 368 372 if (logical_block_size_sectors <= 1) 369 - return 1; 373 + return 0; 370 374 371 375 if (start & (logical_block_size_sectors - 1)) { 372 376 DMWARN("%s: start=%llu not aligned to h/w " 373 - "logical block size %hu of %s", 377 + "logical block size %u of %s", 374 378 dm_device_name(ti->table->md), 375 379 (unsigned long long)start, 376 380 limits->logical_block_size, bdevname(bdev, b)); 377 - return 0; 381 + return 1; 378 382 } 379 383 380 384 if (len & (logical_block_size_sectors - 1)) { 381 385 DMWARN("%s: len=%llu not aligned to h/w " 382 - "logical block size %hu of %s", 386 + "logical block size %u of %s", 383 387 dm_device_name(ti->table->md), 384 388 (unsigned long long)len, 385 389 limits->logical_block_size, bdevname(bdev, b)); 386 - return 0; 390 + return 1; 387 391 } 388 392 389 - return 1; 393 + return 0; 390 394 } 391 395 392 396 /* ··· 500 496 } 501 497 502 498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 503 - DMWARN("%s: target device %s is misaligned", 504 - dm_device_name(ti->table->md), bdevname(bdev, b)); 499 + DMWARN("%s: target device %s is misaligned: " 500 + "physical_block_size=%u, logical_block_size=%u, " 501 + "alignment_offset=%u, start=%llu", 502 + dm_device_name(ti->table->md), bdevname(bdev, b), 503 + q->limits.physical_block_size, 504 + q->limits.logical_block_size, 505 + q->limits.alignment_offset, 506 + (unsigned long long) start << 9); 507 + 505 508 506 509 /* 507 510 * Check if merge fn is supported. ··· 709 698 710 699 if (remaining) { 711 700 DMWARN("%s: table line %u (start sect %llu len %llu) " 712 - "not aligned to h/w logical block size %hu", 701 + "not aligned to h/w logical block size %u", 713 702 dm_device_name(table->md), i, 714 703 (unsigned long long) ti->begin, 715 704 (unsigned long long) ti->len, ··· 1007 996 ti->type->iterate_devices(ti, dm_set_device_limits, 1008 997 &ti_limits); 1009 998 999 + /* Set I/O hints portion of queue limits */ 1000 + if (ti->type->io_hints) 1001 + ti->type->io_hints(ti, &ti_limits); 1002 + 1010 1003 /* 1011 1004 * Check each device area is consistent with the target's 1012 1005 * overall queue limits. 1013 1006 */ 1014 - if (!ti->type->iterate_devices(ti, device_area_is_valid, 1015 - &ti_limits)) 1007 + if (ti->type->iterate_devices(ti, device_area_is_invalid, 1008 + &ti_limits)) 1016 1009 return -EINVAL; 1017 1010 1018 1011 combine_limits:
+10 -5
drivers/md/dm.c
··· 738 738 dm_put(md); 739 739 } 740 740 741 + static void free_rq_clone(struct request *clone) 742 + { 743 + struct dm_rq_target_io *tio = clone->end_io_data; 744 + 745 + blk_rq_unprep_clone(clone); 746 + free_rq_tio(tio); 747 + } 748 + 741 749 static void dm_unprep_request(struct request *rq) 742 750 { 743 751 struct request *clone = rq->special; 744 - struct dm_rq_target_io *tio = clone->end_io_data; 745 752 746 753 rq->special = NULL; 747 754 rq->cmd_flags &= ~REQ_DONTPREP; 748 755 749 - blk_rq_unprep_clone(clone); 750 - free_rq_tio(tio); 756 + free_rq_clone(clone); 751 757 } 752 758 753 759 /* ··· 831 825 rq->sense_len = clone->sense_len; 832 826 } 833 827 834 - BUG_ON(clone->bio); 835 - free_rq_tio(tio); 828 + free_rq_clone(clone); 836 829 837 830 blk_end_request_all(rq, error); 838 831
+4
include/linux/device-mapper.h
··· 91 91 iterate_devices_callout_fn fn, 92 92 void *data); 93 93 94 + typedef void (*dm_io_hints_fn) (struct dm_target *ti, 95 + struct queue_limits *limits); 96 + 94 97 /* 95 98 * Returns: 96 99 * 0: The target can handle the next I/O immediately. ··· 154 151 dm_merge_fn merge; 155 152 dm_busy_fn busy; 156 153 dm_iterate_devices_fn iterate_devices; 154 + dm_io_hints_fn io_hints; 157 155 158 156 /* For internal device-mapper use. */ 159 157 struct list_head list;
+12 -1
include/linux/dm-log-userspace.h
··· 371 371 (DM_ULOG_REQUEST_MASK & (request_type)) 372 372 373 373 struct dm_ulog_request { 374 - char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */ 374 + /* 375 + * The local unique identifier (luid) and the universally unique 376 + * identifier (uuid) are used to tie a request to a specific 377 + * mirror log. A single machine log could probably make due with 378 + * just the 'luid', but a cluster-aware log must use the 'uuid' and 379 + * the 'luid'. The uuid is what is required for node to node 380 + * communication concerning a particular log, but the 'luid' helps 381 + * differentiate between logs that are being swapped and have the 382 + * same 'uuid'. (Think "live" and "inactive" device-mapper tables.) 383 + */ 384 + uint64_t luid; 385 + char uuid[DM_UUID_LEN]; 375 386 char padding[7]; /* Padding because DM_UUID_LEN = 129 */ 376 387 377 388 int32_t error; /* Used to report back processing errors */