Merge tag 'dm-3.8-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm

Pull dm update from Alasdair G Kergon:
"Miscellaneous device-mapper fixes, cleanups and performance
improvements.

Of particular note:
- Disable broken WRITE SAME support in all targets except linear and
striped. Use it when kcopyd is zeroing blocks.
- Remove several mempools from targets by moving the data into the
bio's new front_pad area(which dm calls 'per_bio_data').
- Fix a race in thin provisioning if discards are misused.
- Prevent userspace from interfering with the ioctl parameters and
use kmalloc for the data buffer if it's small instead of vmalloc.
- Throttle some annoying error messages when I/O fails."

* tag 'dm-3.8-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (36 commits)
dm stripe: add WRITE SAME support
dm: remove map_info
dm snapshot: do not use map_context
dm thin: dont use map_context
dm raid1: dont use map_context
dm flakey: dont use map_context
dm raid1: rename read_record to bio_record
dm: move target request nr to dm_target_io
dm snapshot: use per_bio_data
dm verity: use per_bio_data
dm raid1: use per_bio_data
dm: introduce per_bio_data
dm kcopyd: add WRITE SAME support to dm_kcopyd_zero
dm linear: add WRITE SAME support
dm: add WRITE SAME support
dm: prepare to support WRITE SAME
dm ioctl: use kmalloc if possible
dm ioctl: remove PF_MEMALLOC
dm persistent data: improve improve space map block alloc failure message
dm thin: use DMERR_LIMIT for errors
...

-25
drivers/md/dm-bio-prison.c
··· 208 EXPORT_SYMBOL_GPL(dm_cell_release); 209 210 /* 211 - * There are a couple of places where we put a bio into a cell briefly 212 - * before taking it out again. In these situations we know that no other 213 - * bio may be in the cell. This function releases the cell, and also does 214 - * a sanity check. 215 - */ 216 - static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) 217 - { 218 - BUG_ON(cell->holder != bio); 219 - BUG_ON(!bio_list_empty(&cell->bios)); 220 - 221 - __cell_release(cell, NULL); 222 - } 223 - 224 - void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio) 225 - { 226 - unsigned long flags; 227 - struct dm_bio_prison *prison = cell->prison; 228 - 229 - spin_lock_irqsave(&prison->lock, flags); 230 - __cell_release_singleton(cell, bio); 231 - spin_unlock_irqrestore(&prison->lock, flags); 232 - } 233 - EXPORT_SYMBOL_GPL(dm_cell_release_singleton); 234 - 235 - /* 236 * Sometimes we don't want the holder, just the additional bios. 237 */ 238 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
··· 208 EXPORT_SYMBOL_GPL(dm_cell_release); 209 210 /* 211 * Sometimes we don't want the holder, just the additional bios. 212 */ 213 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
-1
drivers/md/dm-bio-prison.h
··· 44 struct bio *inmate, struct dm_bio_prison_cell **ref); 45 46 void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios); 47 - void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed 48 void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates); 49 void dm_cell_error(struct dm_bio_prison_cell *cell); 50
··· 44 struct bio *inmate, struct dm_bio_prison_cell **ref); 45 46 void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios); 47 void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates); 48 void dm_cell_error(struct dm_bio_prison_cell *cell); 49
+2 -3
drivers/md/dm-crypt.c
··· 1689 return ret; 1690 } 1691 1692 - static int crypt_map(struct dm_target *ti, struct bio *bio, 1693 - union map_info *map_context) 1694 { 1695 struct dm_crypt_io *io; 1696 struct crypt_config *cc = ti->private; ··· 1845 1846 static struct target_type crypt_target = { 1847 .name = "crypt", 1848 - .version = {1, 11, 0}, 1849 .module = THIS_MODULE, 1850 .ctr = crypt_ctr, 1851 .dtr = crypt_dtr,
··· 1689 return ret; 1690 } 1691 1692 + static int crypt_map(struct dm_target *ti, struct bio *bio) 1693 { 1694 struct dm_crypt_io *io; 1695 struct crypt_config *cc = ti->private; ··· 1846 1847 static struct target_type crypt_target = { 1848 .name = "crypt", 1849 + .version = {1, 12, 0}, 1850 .module = THIS_MODULE, 1851 .ctr = crypt_ctr, 1852 .dtr = crypt_dtr,
+2 -3
drivers/md/dm-delay.c
··· 274 atomic_set(&dc->may_delay, 1); 275 } 276 277 - static int delay_map(struct dm_target *ti, struct bio *bio, 278 - union map_info *map_context) 279 { 280 struct delay_c *dc = ti->private; 281 ··· 337 338 static struct target_type delay_target = { 339 .name = "delay", 340 - .version = {1, 1, 0}, 341 .module = THIS_MODULE, 342 .ctr = delay_ctr, 343 .dtr = delay_dtr,
··· 274 atomic_set(&dc->may_delay, 1); 275 } 276 277 + static int delay_map(struct dm_target *ti, struct bio *bio) 278 { 279 struct delay_c *dc = ti->private; 280 ··· 338 339 static struct target_type delay_target = { 340 .name = "delay", 341 + .version = {1, 2, 0}, 342 .module = THIS_MODULE, 343 .ctr = delay_ctr, 344 .dtr = delay_dtr,
+13 -8
drivers/md/dm-flakey.c
··· 39 DROP_WRITES 40 }; 41 42 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, 43 struct dm_target *ti) 44 { ··· 218 219 ti->num_flush_requests = 1; 220 ti->num_discard_requests = 1; 221 ti->private = fc; 222 return 0; 223 ··· 270 } 271 } 272 273 - static int flakey_map(struct dm_target *ti, struct bio *bio, 274 - union map_info *map_context) 275 { 276 struct flakey_c *fc = ti->private; 277 unsigned elapsed; 278 279 /* Are we alive ? */ 280 elapsed = (jiffies - fc->start_time) / HZ; ··· 283 /* 284 * Flag this bio as submitted while down. 285 */ 286 - map_context->ll = 1; 287 288 /* 289 * Map reads as normal. ··· 320 return DM_MAPIO_REMAPPED; 321 } 322 323 - static int flakey_end_io(struct dm_target *ti, struct bio *bio, 324 - int error, union map_info *map_context) 325 { 326 struct flakey_c *fc = ti->private; 327 - unsigned bio_submitted_while_down = map_context->ll; 328 329 /* 330 * Corrupt successful READs while in down state. 331 * If flags were specified, only corrupt those that match. 332 */ 333 - if (fc->corrupt_bio_byte && !error && bio_submitted_while_down && 334 (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && 335 all_corrupt_bio_flags_match(bio, fc)) 336 corrupt_bio_data(bio, fc); ··· 411 412 static struct target_type flakey_target = { 413 .name = "flakey", 414 - .version = {1, 2, 0}, 415 .module = THIS_MODULE, 416 .ctr = flakey_ctr, 417 .dtr = flakey_dtr,
··· 39 DROP_WRITES 40 }; 41 42 + struct per_bio_data { 43 + bool bio_submitted; 44 + }; 45 + 46 static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, 47 struct dm_target *ti) 48 { ··· 214 215 ti->num_flush_requests = 1; 216 ti->num_discard_requests = 1; 217 + ti->per_bio_data_size = sizeof(struct per_bio_data); 218 ti->private = fc; 219 return 0; 220 ··· 265 } 266 } 267 268 + static int flakey_map(struct dm_target *ti, struct bio *bio) 269 { 270 struct flakey_c *fc = ti->private; 271 unsigned elapsed; 272 + struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 273 + pb->bio_submitted = false; 274 275 /* Are we alive ? */ 276 elapsed = (jiffies - fc->start_time) / HZ; ··· 277 /* 278 * Flag this bio as submitted while down. 279 */ 280 + pb->bio_submitted = true; 281 282 /* 283 * Map reads as normal. ··· 314 return DM_MAPIO_REMAPPED; 315 } 316 317 + static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) 318 { 319 struct flakey_c *fc = ti->private; 320 + struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 321 322 /* 323 * Corrupt successful READs while in down state. 324 * If flags were specified, only corrupt those that match. 325 */ 326 + if (fc->corrupt_bio_byte && !error && pb->bio_submitted && 327 (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && 328 all_corrupt_bio_flags_match(bio, fc)) 329 corrupt_bio_data(bio, fc); ··· 406 407 static struct target_type flakey_target = { 408 .name = "flakey", 409 + .version = {1, 3, 0}, 410 .module = THIS_MODULE, 411 .ctr = flakey_ctr, 412 .dtr = flakey_dtr,
+18 -5
drivers/md/dm-io.c
··· 287 unsigned num_bvecs; 288 sector_t remaining = where->count; 289 struct request_queue *q = bdev_get_queue(where->bdev); 290 - sector_t discard_sectors; 291 292 /* 293 * where->count may be zero if rw holds a flush and we need to ··· 298 /* 299 * Allocate a suitably sized-bio. 300 */ 301 - if (rw & REQ_DISCARD) 302 num_bvecs = 1; 303 else 304 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), ··· 311 store_io_and_region_in_bio(bio, io, region); 312 313 if (rw & REQ_DISCARD) { 314 - discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 315 - bio->bi_size = discard_sectors << SECTOR_SHIFT; 316 - remaining -= discard_sectors; 317 } else while (remaining) { 318 /* 319 * Try and add as many pages as possible.
··· 287 unsigned num_bvecs; 288 sector_t remaining = where->count; 289 struct request_queue *q = bdev_get_queue(where->bdev); 290 + unsigned short logical_block_size = queue_logical_block_size(q); 291 + sector_t num_sectors; 292 293 /* 294 * where->count may be zero if rw holds a flush and we need to ··· 297 /* 298 * Allocate a suitably sized-bio. 299 */ 300 + if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) 301 num_bvecs = 1; 302 else 303 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), ··· 310 store_io_and_region_in_bio(bio, io, region); 311 312 if (rw & REQ_DISCARD) { 313 + num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 314 + bio->bi_size = num_sectors << SECTOR_SHIFT; 315 + remaining -= num_sectors; 316 + } else if (rw & REQ_WRITE_SAME) { 317 + /* 318 + * WRITE SAME only uses a single page. 319 + */ 320 + dp->get_page(dp, &page, &len, &offset); 321 + bio_add_page(bio, page, logical_block_size, offset); 322 + num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 323 + bio->bi_size = num_sectors << SECTOR_SHIFT; 324 + 325 + offset = 0; 326 + remaining -= num_sectors; 327 + dp->next_page(dp); 328 } else while (remaining) { 329 /* 330 * Try and add as many pages as possible.
+43 -21
drivers/md/dm-ioctl.c
··· 1543 return r; 1544 } 1545 1546 - static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param) 1547 { 1548 struct dm_ioctl tmp, *dmi; 1549 int secure_data; ··· 1570 1571 secure_data = tmp.flags & DM_SECURE_DATA_FLAG; 1572 1573 - dmi = vmalloc(tmp.data_size); 1574 if (!dmi) { 1575 if (secure_data && clear_user(user, tmp.data_size)) 1576 return -EFAULT; ··· 1594 if (copy_from_user(dmi, user, tmp.data_size)) 1595 goto bad; 1596 1597 /* Wipe the user buffer so we do not return it to userspace */ 1598 if (secure_data && clear_user(user, tmp.data_size)) 1599 goto bad; ··· 1610 return 0; 1611 1612 bad: 1613 - if (secure_data) 1614 - memset(dmi, 0, tmp.data_size); 1615 - vfree(dmi); 1616 return -EFAULT; 1617 } 1618 ··· 1648 static int ctl_ioctl(uint command, struct dm_ioctl __user *user) 1649 { 1650 int r = 0; 1651 - int wipe_buffer; 1652 unsigned int cmd; 1653 struct dm_ioctl *uninitialized_var(param); 1654 ioctl_fn fn = NULL; ··· 1684 } 1685 1686 /* 1687 - * Trying to avoid low memory issues when a device is 1688 - * suspended. 1689 - */ 1690 - current->flags |= PF_MEMALLOC; 1691 - 1692 - /* 1693 * Copy the parameters into kernel space. 1694 */ 1695 - r = copy_params(user, &param); 1696 - 1697 - current->flags &= ~PF_MEMALLOC; 1698 1699 if (r) 1700 return r; 1701 1702 input_param_size = param->data_size; 1703 - wipe_buffer = param->flags & DM_SECURE_DATA_FLAG; 1704 - 1705 r = validate_params(cmd, param); 1706 if (r) 1707 goto out; ··· 1706 r = -EFAULT; 1707 1708 out: 1709 - if (wipe_buffer) 1710 - memset(param, 0, input_param_size); 1711 - 1712 - vfree(param); 1713 return r; 1714 } 1715
··· 1543 return r; 1544 } 1545 1546 + #define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */ 1547 + #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */ 1548 + 1549 + static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags) 1550 + { 1551 + if (param_flags & DM_WIPE_BUFFER) 1552 + memset(param, 0, param_size); 1553 + 1554 + if (param_flags & DM_PARAMS_VMALLOC) 1555 + vfree(param); 1556 + else 1557 + kfree(param); 1558 + } 1559 + 1560 + static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags) 1561 { 1562 struct dm_ioctl tmp, *dmi; 1563 int secure_data; ··· 1556 1557 secure_data = tmp.flags & DM_SECURE_DATA_FLAG; 1558 1559 + *param_flags = secure_data ? DM_WIPE_BUFFER : 0; 1560 + 1561 + /* 1562 + * Try to avoid low memory issues when a device is suspended. 1563 + * Use kmalloc() rather than vmalloc() when we can. 1564 + */ 1565 + dmi = NULL; 1566 + if (tmp.data_size <= KMALLOC_MAX_SIZE) 1567 + dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); 1568 + 1569 + if (!dmi) { 1570 + dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL); 1571 + *param_flags |= DM_PARAMS_VMALLOC; 1572 + } 1573 + 1574 if (!dmi) { 1575 if (secure_data && clear_user(user, tmp.data_size)) 1576 return -EFAULT; ··· 1566 if (copy_from_user(dmi, user, tmp.data_size)) 1567 goto bad; 1568 1569 + /* 1570 + * Abort if something changed the ioctl data while it was being copied. 1571 + */ 1572 + if (dmi->data_size != tmp.data_size) { 1573 + DMERR("rejecting ioctl: data size modified while processing parameters"); 1574 + goto bad; 1575 + } 1576 + 1577 /* Wipe the user buffer so we do not return it to userspace */ 1578 if (secure_data && clear_user(user, tmp.data_size)) 1579 goto bad; ··· 1574 return 0; 1575 1576 bad: 1577 + free_params(dmi, tmp.data_size, *param_flags); 1578 + 1579 return -EFAULT; 1580 } 1581 ··· 1613 static int ctl_ioctl(uint command, struct dm_ioctl __user *user) 1614 { 1615 int r = 0; 1616 + int param_flags; 1617 unsigned int cmd; 1618 struct dm_ioctl *uninitialized_var(param); 1619 ioctl_fn fn = NULL; ··· 1649 } 1650 1651 /* 1652 * Copy the parameters into kernel space. 1653 */ 1654 + r = copy_params(user, &param, &param_flags); 1655 1656 if (r) 1657 return r; 1658 1659 input_param_size = param->data_size; 1660 r = validate_params(cmd, param); 1661 if (r) 1662 goto out; ··· 1681 r = -EFAULT; 1682 1683 out: 1684 + free_params(param, input_param_size, param_flags); 1685 return r; 1686 } 1687
+14 -4
drivers/md/dm-kcopyd.c
··· 349 struct dm_kcopyd_client *kc = job->kc; 350 351 if (error) { 352 - if (job->rw == WRITE) 353 job->write_err |= error; 354 else 355 job->read_err = 1; ··· 361 } 362 } 363 364 - if (job->rw == WRITE) 365 push(&kc->complete_jobs, job); 366 367 else { ··· 432 433 if (r < 0) { 434 /* error this rogue job */ 435 - if (job->rw == WRITE) 436 job->write_err = (unsigned long) -1L; 437 else 438 job->read_err = 1; ··· 585 unsigned int flags, dm_kcopyd_notify_fn fn, void *context) 586 { 587 struct kcopyd_job *job; 588 589 /* 590 * Allocate an array of jobs consisting of one master job ··· 612 memset(&job->source, 0, sizeof job->source); 613 job->source.count = job->dests[0].count; 614 job->pages = &zero_page_list; 615 - job->rw = WRITE; 616 } 617 618 job->fn = fn;
··· 349 struct dm_kcopyd_client *kc = job->kc; 350 351 if (error) { 352 + if (job->rw & WRITE) 353 job->write_err |= error; 354 else 355 job->read_err = 1; ··· 361 } 362 } 363 364 + if (job->rw & WRITE) 365 push(&kc->complete_jobs, job); 366 367 else { ··· 432 433 if (r < 0) { 434 /* error this rogue job */ 435 + if (job->rw & WRITE) 436 job->write_err = (unsigned long) -1L; 437 else 438 job->read_err = 1; ··· 585 unsigned int flags, dm_kcopyd_notify_fn fn, void *context) 586 { 587 struct kcopyd_job *job; 588 + int i; 589 590 /* 591 * Allocate an array of jobs consisting of one master job ··· 611 memset(&job->source, 0, sizeof job->source); 612 job->source.count = job->dests[0].count; 613 job->pages = &zero_page_list; 614 + 615 + /* 616 + * Use WRITE SAME to optimize zeroing if all dests support it. 617 + */ 618 + job->rw = WRITE | REQ_WRITE_SAME; 619 + for (i = 0; i < job->num_dests; i++) 620 + if (!bdev_write_same(job->dests[i].bdev)) { 621 + job->rw = WRITE; 622 + break; 623 + } 624 } 625 626 job->fn = fn;
+3 -3
drivers/md/dm-linear.c
··· 55 56 ti->num_flush_requests = 1; 57 ti->num_discard_requests = 1; 58 ti->private = lc; 59 return 0; 60 ··· 88 bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 89 } 90 91 - static int linear_map(struct dm_target *ti, struct bio *bio, 92 - union map_info *map_context) 93 { 94 linear_map_bio(ti, bio); 95 ··· 155 156 static struct target_type linear_target = { 157 .name = "linear", 158 - .version = {1, 1, 0}, 159 .module = THIS_MODULE, 160 .ctr = linear_ctr, 161 .dtr = linear_dtr,
··· 55 56 ti->num_flush_requests = 1; 57 ti->num_discard_requests = 1; 58 + ti->num_write_same_requests = 1; 59 ti->private = lc; 60 return 0; 61 ··· 87 bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 88 } 89 90 + static int linear_map(struct dm_target *ti, struct bio *bio) 91 { 92 linear_map_bio(ti, bio); 93 ··· 155 156 static struct target_type linear_target = { 157 .name = "linear", 158 + .version = {1, 2, 0}, 159 .module = THIS_MODULE, 160 .ctr = linear_ctr, 161 .dtr = linear_dtr,
+5 -3
drivers/md/dm-raid.c
··· 295 * Choose a reasonable default. All figures in sectors. 296 */ 297 if (min_region_size > (1 << 13)) { 298 DMINFO("Choosing default region size of %lu sectors", 299 region_size); 300 - region_size = min_region_size; 301 } else { 302 DMINFO("Choosing default region size of 4MiB"); 303 region_size = 1 << 13; /* sectors */ ··· 1218 context_free(rs); 1219 } 1220 1221 - static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) 1222 { 1223 struct raid_set *rs = ti->private; 1224 struct mddev *mddev = &rs->md; ··· 1432 1433 static struct target_type raid_target = { 1434 .name = "raid", 1435 - .version = {1, 3, 1}, 1436 .module = THIS_MODULE, 1437 .ctr = raid_ctr, 1438 .dtr = raid_dtr,
··· 295 * Choose a reasonable default. All figures in sectors. 296 */ 297 if (min_region_size > (1 << 13)) { 298 + /* If not a power of 2, make it the next power of 2 */ 299 + if (min_region_size & (min_region_size - 1)) 300 + region_size = 1 << fls(region_size); 301 DMINFO("Choosing default region size of %lu sectors", 302 region_size); 303 } else { 304 DMINFO("Choosing default region size of 4MiB"); 305 region_size = 1 << 13; /* sectors */ ··· 1216 context_free(rs); 1217 } 1218 1219 + static int raid_map(struct dm_target *ti, struct bio *bio) 1220 { 1221 struct raid_set *rs = ti->private; 1222 struct mddev *mddev = &rs->md; ··· 1430 1431 static struct target_type raid_target = { 1432 .name = "raid", 1433 + .version = {1, 4, 0}, 1434 .module = THIS_MODULE, 1435 .ctr = raid_ctr, 1436 .dtr = raid_dtr,
+23 -52
drivers/md/dm-raid1.c
··· 61 struct dm_region_hash *rh; 62 struct dm_kcopyd_client *kcopyd_client; 63 struct dm_io_client *io_client; 64 - mempool_t *read_record_pool; 65 66 /* recovery */ 67 region_t nr_regions; ··· 138 queue_bio(ms, bio, WRITE); 139 } 140 141 - #define MIN_READ_RECORDS 20 142 - struct dm_raid1_read_record { 143 struct mirror *m; 144 struct dm_bio_details details; 145 }; 146 - 147 - static struct kmem_cache *_dm_raid1_read_record_cache; 148 149 /* 150 * Every mirror should look like this one. ··· 874 atomic_set(&ms->suspend, 0); 875 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 876 877 - ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS, 878 - _dm_raid1_read_record_cache); 879 - 880 - if (!ms->read_record_pool) { 881 - ti->error = "Error creating mirror read_record_pool"; 882 - kfree(ms); 883 - return NULL; 884 - } 885 - 886 ms->io_client = dm_io_client_create(); 887 if (IS_ERR(ms->io_client)) { 888 ti->error = "Error creating dm_io client"; 889 - mempool_destroy(ms->read_record_pool); 890 kfree(ms); 891 return NULL; 892 } ··· 888 if (IS_ERR(ms->rh)) { 889 ti->error = "Error creating dirty region hash"; 890 dm_io_client_destroy(ms->io_client); 891 - mempool_destroy(ms->read_record_pool); 892 kfree(ms); 893 return NULL; 894 } ··· 903 904 dm_io_client_destroy(ms->io_client); 905 dm_region_hash_destroy(ms->rh); 906 - mempool_destroy(ms->read_record_pool); 907 kfree(ms); 908 } 909 ··· 1074 1075 ti->num_flush_requests = 1; 1076 ti->num_discard_requests = 1; 1077 ti->discard_zeroes_data_unsupported = true; 1078 1079 ms->kmirrord_wq = alloc_workqueue("kmirrord", ··· 1142 /* 1143 * Mirror mapping function 1144 */ 1145 - static int mirror_map(struct dm_target *ti, struct bio *bio, 1146 - union map_info *map_context) 1147 { 1148 int r, rw = bio_rw(bio); 1149 struct mirror *m; 1150 struct mirror_set *ms = ti->private; 1151 - struct dm_raid1_read_record *read_record = NULL; 1152 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1153 1154 if (rw == WRITE) { 1155 /* Save region for mirror_end_io() handler */ 1156 - map_context->ll = dm_rh_bio_to_region(ms->rh, bio); 1157 queue_bio(ms, bio, rw); 1158 return DM_MAPIO_SUBMITTED; 1159 } ··· 1183 if (unlikely(!m)) 1184 return -EIO; 1185 1186 - read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO); 1187 - if (likely(read_record)) { 1188 - dm_bio_record(&read_record->details, bio); 1189 - map_context->ptr = read_record; 1190 - read_record->m = m; 1191 - } 1192 1193 map_bio(m, bio); 1194 1195 return DM_MAPIO_REMAPPED; 1196 } 1197 1198 - static int mirror_end_io(struct dm_target *ti, struct bio *bio, 1199 - int error, union map_info *map_context) 1200 { 1201 int rw = bio_rw(bio); 1202 struct mirror_set *ms = (struct mirror_set *) ti->private; 1203 struct mirror *m = NULL; 1204 struct dm_bio_details *bd = NULL; 1205 - struct dm_raid1_read_record *read_record = map_context->ptr; 1206 1207 /* 1208 * We need to dec pending if this was a write. 1209 */ 1210 if (rw == WRITE) { 1211 if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) 1212 - dm_rh_dec(ms->rh, map_context->ll); 1213 return error; 1214 } 1215 ··· 1216 goto out; 1217 1218 if (unlikely(error)) { 1219 - if (!read_record) { 1220 /* 1221 * There wasn't enough memory to record necessary 1222 * information for a retry or there was no other ··· 1226 return -EIO; 1227 } 1228 1229 - m = read_record->m; 1230 1231 DMERR("Mirror read failed from %s. Trying alternative device.", 1232 m->dev->name); ··· 1238 * mirror. 1239 */ 1240 if (default_ok(m) || mirror_available(ms, bio)) { 1241 - bd = &read_record->details; 1242 1243 dm_bio_restore(bd, bio); 1244 - mempool_free(read_record, ms->read_record_pool); 1245 - map_context->ptr = NULL; 1246 queue_bio(ms, bio, rw); 1247 - return 1; 1248 } 1249 DMERR("All replicated volumes dead, failing I/O"); 1250 } 1251 1252 out: 1253 - if (read_record) { 1254 - mempool_free(read_record, ms->read_record_pool); 1255 - map_context->ptr = NULL; 1256 - } 1257 1258 return error; 1259 } ··· 1403 1404 static struct target_type mirror_target = { 1405 .name = "mirror", 1406 - .version = {1, 12, 1}, 1407 .module = THIS_MODULE, 1408 .ctr = mirror_ctr, 1409 .dtr = mirror_dtr, ··· 1420 { 1421 int r; 1422 1423 - _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0); 1424 - if (!_dm_raid1_read_record_cache) { 1425 - DMERR("Can't allocate dm_raid1_read_record cache"); 1426 - r = -ENOMEM; 1427 - goto bad_cache; 1428 - } 1429 - 1430 r = dm_register_target(&mirror_target); 1431 if (r < 0) { 1432 DMERR("Failed to register mirror target"); ··· 1429 return 0; 1430 1431 bad_target: 1432 - kmem_cache_destroy(_dm_raid1_read_record_cache); 1433 - bad_cache: 1434 return r; 1435 } 1436 1437 static void __exit dm_mirror_exit(void) 1438 { 1439 dm_unregister_target(&mirror_target); 1440 - kmem_cache_destroy(_dm_raid1_read_record_cache); 1441 } 1442 1443 /* Module hooks */
··· 61 struct dm_region_hash *rh; 62 struct dm_kcopyd_client *kcopyd_client; 63 struct dm_io_client *io_client; 64 65 /* recovery */ 66 region_t nr_regions; ··· 139 queue_bio(ms, bio, WRITE); 140 } 141 142 + struct dm_raid1_bio_record { 143 struct mirror *m; 144 + /* if details->bi_bdev == NULL, details were not saved */ 145 struct dm_bio_details details; 146 + region_t write_region; 147 }; 148 149 /* 150 * Every mirror should look like this one. ··· 876 atomic_set(&ms->suspend, 0); 877 atomic_set(&ms->default_mirror, DEFAULT_MIRROR); 878 879 ms->io_client = dm_io_client_create(); 880 if (IS_ERR(ms->io_client)) { 881 ti->error = "Error creating dm_io client"; 882 kfree(ms); 883 return NULL; 884 } ··· 900 if (IS_ERR(ms->rh)) { 901 ti->error = "Error creating dirty region hash"; 902 dm_io_client_destroy(ms->io_client); 903 kfree(ms); 904 return NULL; 905 } ··· 916 917 dm_io_client_destroy(ms->io_client); 918 dm_region_hash_destroy(ms->rh); 919 kfree(ms); 920 } 921 ··· 1088 1089 ti->num_flush_requests = 1; 1090 ti->num_discard_requests = 1; 1091 + ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record); 1092 ti->discard_zeroes_data_unsupported = true; 1093 1094 ms->kmirrord_wq = alloc_workqueue("kmirrord", ··· 1155 /* 1156 * Mirror mapping function 1157 */ 1158 + static int mirror_map(struct dm_target *ti, struct bio *bio) 1159 { 1160 int r, rw = bio_rw(bio); 1161 struct mirror *m; 1162 struct mirror_set *ms = ti->private; 1163 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1164 + struct dm_raid1_bio_record *bio_record = 1165 + dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1166 + 1167 + bio_record->details.bi_bdev = NULL; 1168 1169 if (rw == WRITE) { 1170 /* Save region for mirror_end_io() handler */ 1171 + bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); 1172 queue_bio(ms, bio, rw); 1173 return DM_MAPIO_SUBMITTED; 1174 } ··· 1194 if (unlikely(!m)) 1195 return -EIO; 1196 1197 + dm_bio_record(&bio_record->details, bio); 1198 + bio_record->m = m; 1199 1200 map_bio(m, bio); 1201 1202 return DM_MAPIO_REMAPPED; 1203 } 1204 1205 + static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) 1206 { 1207 int rw = bio_rw(bio); 1208 struct mirror_set *ms = (struct mirror_set *) ti->private; 1209 struct mirror *m = NULL; 1210 struct dm_bio_details *bd = NULL; 1211 + struct dm_raid1_bio_record *bio_record = 1212 + dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1213 1214 /* 1215 * We need to dec pending if this was a write. 1216 */ 1217 if (rw == WRITE) { 1218 if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) 1219 + dm_rh_dec(ms->rh, bio_record->write_region); 1220 return error; 1221 } 1222 ··· 1231 goto out; 1232 1233 if (unlikely(error)) { 1234 + if (!bio_record->details.bi_bdev) { 1235 /* 1236 * There wasn't enough memory to record necessary 1237 * information for a retry or there was no other ··· 1241 return -EIO; 1242 } 1243 1244 + m = bio_record->m; 1245 1246 DMERR("Mirror read failed from %s. Trying alternative device.", 1247 m->dev->name); ··· 1253 * mirror. 1254 */ 1255 if (default_ok(m) || mirror_available(ms, bio)) { 1256 + bd = &bio_record->details; 1257 1258 dm_bio_restore(bd, bio); 1259 + bio_record->details.bi_bdev = NULL; 1260 queue_bio(ms, bio, rw); 1261 + return DM_ENDIO_INCOMPLETE; 1262 } 1263 DMERR("All replicated volumes dead, failing I/O"); 1264 } 1265 1266 out: 1267 + bio_record->details.bi_bdev = NULL; 1268 1269 return error; 1270 } ··· 1422 1423 static struct target_type mirror_target = { 1424 .name = "mirror", 1425 + .version = {1, 13, 1}, 1426 .module = THIS_MODULE, 1427 .ctr = mirror_ctr, 1428 .dtr = mirror_dtr, ··· 1439 { 1440 int r; 1441 1442 r = dm_register_target(&mirror_target); 1443 if (r < 0) { 1444 DMERR("Failed to register mirror target"); ··· 1455 return 0; 1456 1457 bad_target: 1458 return r; 1459 } 1460 1461 static void __exit dm_mirror_exit(void) 1462 { 1463 dm_unregister_target(&mirror_target); 1464 } 1465 1466 /* Module hooks */
+35 -57
drivers/md/dm-snap.c
··· 79 80 /* Chunks with outstanding reads */ 81 spinlock_t tracked_chunk_lock; 82 - mempool_t *tracked_chunk_pool; 83 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 84 85 /* The on disk metadata handler */ ··· 190 chunk_t chunk; 191 }; 192 193 - static struct kmem_cache *tracked_chunk_cache; 194 - 195 - static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, 196 - chunk_t chunk) 197 { 198 - struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, 199 - GFP_NOIO); 200 - unsigned long flags; 201 202 c->chunk = chunk; 203 204 - spin_lock_irqsave(&s->tracked_chunk_lock, flags); 205 hlist_add_head(&c->node, 206 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 207 - spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 208 - 209 - return c; 210 } 211 212 - static void stop_tracking_chunk(struct dm_snapshot *s, 213 - struct dm_snap_tracked_chunk *c) 214 { 215 unsigned long flags; 216 217 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 218 hlist_del(&c->node); 219 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 220 - 221 - mempool_free(c, s->tracked_chunk_pool); 222 } 223 224 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) ··· 1122 goto bad_pending_pool; 1123 } 1124 1125 - s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, 1126 - tracked_chunk_cache); 1127 - if (!s->tracked_chunk_pool) { 1128 - ti->error = "Could not allocate tracked_chunk mempool for " 1129 - "tracking reads"; 1130 - goto bad_tracked_chunk_pool; 1131 - } 1132 - 1133 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1134 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1135 ··· 1129 1130 ti->private = s; 1131 ti->num_flush_requests = num_flush_requests; 1132 1133 /* Add snapshot to the list of snapshots for this origin */ 1134 /* Exceptions aren't triggered till snapshot_resume() is called */ ··· 1178 unregister_snapshot(s); 1179 1180 bad_load_and_register: 1181 - mempool_destroy(s->tracked_chunk_pool); 1182 - 1183 - bad_tracked_chunk_pool: 1184 mempool_destroy(s->pending_pool); 1185 1186 bad_pending_pool: ··· 1281 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1282 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1283 #endif 1284 - 1285 - mempool_destroy(s->tracked_chunk_pool); 1286 1287 __free_exceptions(s); 1288 ··· 1567 s->store->chunk_mask); 1568 } 1569 1570 - static int snapshot_map(struct dm_target *ti, struct bio *bio, 1571 - union map_info *map_context) 1572 { 1573 struct dm_exception *e; 1574 struct dm_snapshot *s = ti->private; 1575 int r = DM_MAPIO_REMAPPED; 1576 chunk_t chunk; 1577 struct dm_snap_pending_exception *pe = NULL; 1578 1579 if (bio->bi_rw & REQ_FLUSH) { 1580 bio->bi_bdev = s->cow->bdev; ··· 1661 } 1662 } else { 1663 bio->bi_bdev = s->origin->bdev; 1664 - map_context->ptr = track_chunk(s, chunk); 1665 } 1666 1667 out_unlock: ··· 1682 * If merging is currently taking place on the chunk in question, the 1683 * I/O is deferred by adding it to s->bios_queued_during_merge. 1684 */ 1685 - static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, 1686 - union map_info *map_context) 1687 { 1688 struct dm_exception *e; 1689 struct dm_snapshot *s = ti->private; 1690 int r = DM_MAPIO_REMAPPED; 1691 chunk_t chunk; 1692 1693 if (bio->bi_rw & REQ_FLUSH) { 1694 - if (!map_context->target_request_nr) 1695 bio->bi_bdev = s->origin->bdev; 1696 else 1697 bio->bi_bdev = s->cow->bdev; 1698 - map_context->ptr = NULL; 1699 return DM_MAPIO_REMAPPED; 1700 } 1701 ··· 1724 remap_exception(s, e, bio, chunk); 1725 1726 if (bio_rw(bio) == WRITE) 1727 - map_context->ptr = track_chunk(s, chunk); 1728 goto out_unlock; 1729 } 1730 ··· 1742 return r; 1743 } 1744 1745 - static int snapshot_end_io(struct dm_target *ti, struct bio *bio, 1746 - int error, union map_info *map_context) 1747 { 1748 struct dm_snapshot *s = ti->private; 1749 - struct dm_snap_tracked_chunk *c = map_context->ptr; 1750 1751 - if (c) 1752 - stop_tracking_chunk(s, c); 1753 1754 return 0; 1755 } ··· 2116 dm_put_device(ti, dev); 2117 } 2118 2119 - static int origin_map(struct dm_target *ti, struct bio *bio, 2120 - union map_info *map_context) 2121 { 2122 struct dm_dev *dev = ti->private; 2123 bio->bi_bdev = dev->bdev; ··· 2181 2182 static struct target_type origin_target = { 2183 .name = "snapshot-origin", 2184 - .version = {1, 7, 1}, 2185 .module = THIS_MODULE, 2186 .ctr = origin_ctr, 2187 .dtr = origin_dtr, ··· 2194 2195 static struct target_type snapshot_target = { 2196 .name = "snapshot", 2197 - .version = {1, 10, 0}, 2198 .module = THIS_MODULE, 2199 .ctr = snapshot_ctr, 2200 .dtr = snapshot_dtr, ··· 2208 2209 static struct target_type merge_target = { 2210 .name = dm_snapshot_merge_target_name, 2211 - .version = {1, 1, 0}, 2212 .module = THIS_MODULE, 2213 .ctr = snapshot_ctr, 2214 .dtr = snapshot_dtr, ··· 2269 goto bad_pending_cache; 2270 } 2271 2272 - tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); 2273 - if (!tracked_chunk_cache) { 2274 - DMERR("Couldn't create cache to track chunks in use."); 2275 - r = -ENOMEM; 2276 - goto bad_tracked_chunk_cache; 2277 - } 2278 - 2279 return 0; 2280 2281 - bad_tracked_chunk_cache: 2282 - kmem_cache_destroy(pending_cache); 2283 bad_pending_cache: 2284 kmem_cache_destroy(exception_cache); 2285 bad_exception_cache: ··· 2296 exit_origin_hash(); 2297 kmem_cache_destroy(pending_cache); 2298 kmem_cache_destroy(exception_cache); 2299 - kmem_cache_destroy(tracked_chunk_cache); 2300 2301 dm_exception_store_exit(); 2302 }
··· 79 80 /* Chunks with outstanding reads */ 81 spinlock_t tracked_chunk_lock; 82 struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; 83 84 /* The on disk metadata handler */ ··· 191 chunk_t chunk; 192 }; 193 194 + static void init_tracked_chunk(struct bio *bio) 195 { 196 + struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 197 + INIT_HLIST_NODE(&c->node); 198 + } 199 + 200 + static bool is_bio_tracked(struct bio *bio) 201 + { 202 + struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 203 + return !hlist_unhashed(&c->node); 204 + } 205 + 206 + static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) 207 + { 208 + struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 209 210 c->chunk = chunk; 211 212 + spin_lock_irq(&s->tracked_chunk_lock); 213 hlist_add_head(&c->node, 214 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); 215 + spin_unlock_irq(&s->tracked_chunk_lock); 216 } 217 218 + static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) 219 { 220 + struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); 221 unsigned long flags; 222 223 spin_lock_irqsave(&s->tracked_chunk_lock, flags); 224 hlist_del(&c->node); 225 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); 226 } 227 228 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) ··· 1120 goto bad_pending_pool; 1121 } 1122 1123 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1124 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); 1125 ··· 1135 1136 ti->private = s; 1137 ti->num_flush_requests = num_flush_requests; 1138 + ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk); 1139 1140 /* Add snapshot to the list of snapshots for this origin */ 1141 /* Exceptions aren't triggered till snapshot_resume() is called */ ··· 1183 unregister_snapshot(s); 1184 1185 bad_load_and_register: 1186 mempool_destroy(s->pending_pool); 1187 1188 bad_pending_pool: ··· 1289 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) 1290 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); 1291 #endif 1292 1293 __free_exceptions(s); 1294 ··· 1577 s->store->chunk_mask); 1578 } 1579 1580 + static int snapshot_map(struct dm_target *ti, struct bio *bio) 1581 { 1582 struct dm_exception *e; 1583 struct dm_snapshot *s = ti->private; 1584 int r = DM_MAPIO_REMAPPED; 1585 chunk_t chunk; 1586 struct dm_snap_pending_exception *pe = NULL; 1587 + 1588 + init_tracked_chunk(bio); 1589 1590 if (bio->bi_rw & REQ_FLUSH) { 1591 bio->bi_bdev = s->cow->bdev; ··· 1670 } 1671 } else { 1672 bio->bi_bdev = s->origin->bdev; 1673 + track_chunk(s, bio, chunk); 1674 } 1675 1676 out_unlock: ··· 1691 * If merging is currently taking place on the chunk in question, the 1692 * I/O is deferred by adding it to s->bios_queued_during_merge. 1693 */ 1694 + static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) 1695 { 1696 struct dm_exception *e; 1697 struct dm_snapshot *s = ti->private; 1698 int r = DM_MAPIO_REMAPPED; 1699 chunk_t chunk; 1700 1701 + init_tracked_chunk(bio); 1702 + 1703 if (bio->bi_rw & REQ_FLUSH) { 1704 + if (!dm_bio_get_target_request_nr(bio)) 1705 bio->bi_bdev = s->origin->bdev; 1706 else 1707 bio->bi_bdev = s->cow->bdev; 1708 return DM_MAPIO_REMAPPED; 1709 } 1710 ··· 1733 remap_exception(s, e, bio, chunk); 1734 1735 if (bio_rw(bio) == WRITE) 1736 + track_chunk(s, bio, chunk); 1737 goto out_unlock; 1738 } 1739 ··· 1751 return r; 1752 } 1753 1754 + static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error) 1755 { 1756 struct dm_snapshot *s = ti->private; 1757 1758 + if (is_bio_tracked(bio)) 1759 + stop_tracking_chunk(s, bio); 1760 1761 return 0; 1762 } ··· 2127 dm_put_device(ti, dev); 2128 } 2129 2130 + static int origin_map(struct dm_target *ti, struct bio *bio) 2131 { 2132 struct dm_dev *dev = ti->private; 2133 bio->bi_bdev = dev->bdev; ··· 2193 2194 static struct target_type origin_target = { 2195 .name = "snapshot-origin", 2196 + .version = {1, 8, 0}, 2197 .module = THIS_MODULE, 2198 .ctr = origin_ctr, 2199 .dtr = origin_dtr, ··· 2206 2207 static struct target_type snapshot_target = { 2208 .name = "snapshot", 2209 + .version = {1, 11, 0}, 2210 .module = THIS_MODULE, 2211 .ctr = snapshot_ctr, 2212 .dtr = snapshot_dtr, ··· 2220 2221 static struct target_type merge_target = { 2222 .name = dm_snapshot_merge_target_name, 2223 + .version = {1, 2, 0}, 2224 .module = THIS_MODULE, 2225 .ctr = snapshot_ctr, 2226 .dtr = snapshot_dtr, ··· 2281 goto bad_pending_cache; 2282 } 2283 2284 return 0; 2285 2286 bad_pending_cache: 2287 kmem_cache_destroy(exception_cache); 2288 bad_exception_cache: ··· 2317 exit_origin_hash(); 2318 kmem_cache_destroy(pending_cache); 2319 kmem_cache_destroy(exception_cache); 2320 2321 dm_exception_store_exit(); 2322 }
+10 -10
drivers/md/dm-stripe.c
··· 162 163 ti->num_flush_requests = stripes; 164 ti->num_discard_requests = stripes; 165 166 sc->chunk_size = chunk_size; 167 if (chunk_size & (chunk_size - 1)) ··· 252 *result += sc->chunk_size; /* next chunk */ 253 } 254 255 - static int stripe_map_discard(struct stripe_c *sc, struct bio *bio, 256 - uint32_t target_stripe) 257 { 258 sector_t begin, end; 259 ··· 272 } 273 } 274 275 - static int stripe_map(struct dm_target *ti, struct bio *bio, 276 - union map_info *map_context) 277 { 278 struct stripe_c *sc = ti->private; 279 uint32_t stripe; 280 unsigned target_request_nr; 281 282 if (bio->bi_rw & REQ_FLUSH) { 283 - target_request_nr = map_context->target_request_nr; 284 BUG_ON(target_request_nr >= sc->stripes); 285 bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; 286 return DM_MAPIO_REMAPPED; 287 } 288 - if (unlikely(bio->bi_rw & REQ_DISCARD)) { 289 - target_request_nr = map_context->target_request_nr; 290 BUG_ON(target_request_nr >= sc->stripes); 291 - return stripe_map_discard(sc, bio, target_request_nr); 292 } 293 294 stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); ··· 343 return 0; 344 } 345 346 - static int stripe_end_io(struct dm_target *ti, struct bio *bio, 347 - int error, union map_info *map_context) 348 { 349 unsigned i; 350 char major_minor[16];
··· 162 163 ti->num_flush_requests = stripes; 164 ti->num_discard_requests = stripes; 165 + ti->num_write_same_requests = stripes; 166 167 sc->chunk_size = chunk_size; 168 if (chunk_size & (chunk_size - 1)) ··· 251 *result += sc->chunk_size; /* next chunk */ 252 } 253 254 + static int stripe_map_range(struct stripe_c *sc, struct bio *bio, 255 + uint32_t target_stripe) 256 { 257 sector_t begin, end; 258 ··· 271 } 272 } 273 274 + static int stripe_map(struct dm_target *ti, struct bio *bio) 275 { 276 struct stripe_c *sc = ti->private; 277 uint32_t stripe; 278 unsigned target_request_nr; 279 280 if (bio->bi_rw & REQ_FLUSH) { 281 + target_request_nr = dm_bio_get_target_request_nr(bio); 282 BUG_ON(target_request_nr >= sc->stripes); 283 bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; 284 return DM_MAPIO_REMAPPED; 285 } 286 + if (unlikely(bio->bi_rw & REQ_DISCARD) || 287 + unlikely(bio->bi_rw & REQ_WRITE_SAME)) { 288 + target_request_nr = dm_bio_get_target_request_nr(bio); 289 BUG_ON(target_request_nr >= sc->stripes); 290 + return stripe_map_range(sc, bio, target_request_nr); 291 } 292 293 stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); ··· 342 return 0; 343 } 344 345 + static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error) 346 { 347 unsigned i; 348 char major_minor[16];
+40 -1
drivers/md/dm-table.c
··· 967 int dm_table_alloc_md_mempools(struct dm_table *t) 968 { 969 unsigned type = dm_table_get_type(t); 970 971 if (unlikely(type == DM_TYPE_NONE)) { 972 DMWARN("no table type is set, can't allocate mempools"); 973 return -EINVAL; 974 } 975 976 - t->mempools = dm_alloc_md_mempools(type, t->integrity_supported); 977 if (!t->mempools) 978 return -ENOMEM; 979 ··· 1423 return 1; 1424 } 1425 1426 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1427 struct queue_limits *limits) 1428 { ··· 1480 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1481 else 1482 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1483 1484 dm_table_set_integrity(t); 1485
··· 967 int dm_table_alloc_md_mempools(struct dm_table *t) 968 { 969 unsigned type = dm_table_get_type(t); 970 + unsigned per_bio_data_size = 0; 971 + struct dm_target *tgt; 972 + unsigned i; 973 974 if (unlikely(type == DM_TYPE_NONE)) { 975 DMWARN("no table type is set, can't allocate mempools"); 976 return -EINVAL; 977 } 978 979 + if (type == DM_TYPE_BIO_BASED) 980 + for (i = 0; i < t->num_targets; i++) { 981 + tgt = t->targets + i; 982 + per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size); 983 + } 984 + 985 + t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size); 986 if (!t->mempools) 987 return -ENOMEM; 988 ··· 1414 return 1; 1415 } 1416 1417 + static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, 1418 + sector_t start, sector_t len, void *data) 1419 + { 1420 + struct request_queue *q = bdev_get_queue(dev->bdev); 1421 + 1422 + return q && !q->limits.max_write_same_sectors; 1423 + } 1424 + 1425 + static bool dm_table_supports_write_same(struct dm_table *t) 1426 + { 1427 + struct dm_target *ti; 1428 + unsigned i = 0; 1429 + 1430 + while (i < dm_table_get_num_targets(t)) { 1431 + ti = dm_table_get_target(t, i++); 1432 + 1433 + if (!ti->num_write_same_requests) 1434 + return false; 1435 + 1436 + if (!ti->type->iterate_devices || 1437 + !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) 1438 + return false; 1439 + } 1440 + 1441 + return true; 1442 + } 1443 + 1444 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1445 struct queue_limits *limits) 1446 { ··· 1444 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1445 else 1446 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); 1447 + 1448 + if (!dm_table_supports_write_same(t)) 1449 + q->limits.max_write_same_sectors = 0; 1450 1451 dm_table_set_integrity(t); 1452
+2 -3
drivers/md/dm-target.c
··· 126 /* empty */ 127 } 128 129 - static int io_err_map(struct dm_target *tt, struct bio *bio, 130 - union map_info *map_context) 131 { 132 return -EIO; 133 } 134 135 static struct target_type error_target = { 136 .name = "error", 137 - .version = {1, 0, 1}, 138 .ctr = io_err_ctr, 139 .dtr = io_err_dtr, 140 .map = io_err_map,
··· 126 /* empty */ 127 } 128 129 + static int io_err_map(struct dm_target *tt, struct bio *bio) 130 { 131 return -EIO; 132 } 133 134 static struct target_type error_target = { 135 .name = "error", 136 + .version = {1, 1, 0}, 137 .ctr = io_err_ctr, 138 .dtr = io_err_dtr, 139 .map = io_err_map,
+1 -1
drivers/md/dm-thin-metadata.c
··· 408 409 pmd->tl_info.tm = pmd->tm; 410 pmd->tl_info.levels = 1; 411 - pmd->tl_info.value_type.context = &pmd->info; 412 pmd->tl_info.value_type.size = sizeof(__le64); 413 pmd->tl_info.value_type.inc = subtree_inc; 414 pmd->tl_info.value_type.dec = subtree_dec;
··· 408 409 pmd->tl_info.tm = pmd->tm; 410 pmd->tl_info.levels = 1; 411 + pmd->tl_info.value_type.context = &pmd->bl_info; 412 pmd->tl_info.value_type.size = sizeof(__le64); 413 pmd->tl_info.value_type.inc = subtree_inc; 414 pmd->tl_info.value_type.dec = subtree_dec;
+120 -114
drivers/md/dm-thin.c
··· 186 187 struct dm_thin_new_mapping *next_mapping; 188 mempool_t *mapping_pool; 189 - mempool_t *endio_hook_pool; 190 191 process_bio_fn process_bio; 192 process_bio_fn process_discard; ··· 303 bio_list_init(master); 304 305 while ((bio = bio_list_pop(&bios))) { 306 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 307 308 if (h->tc == tc) 309 bio_endio(bio, DM_ENDIO_REQUEUE); ··· 365 { 366 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && 367 dm_thin_changed_this_transaction(tc->td); 368 } 369 370 static void issue(struct thin_c *tc, struct bio *bio) ··· 484 static void overwrite_endio(struct bio *bio, int err) 485 { 486 unsigned long flags; 487 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 488 struct dm_thin_new_mapping *m = h->overwrite_mapping; 489 struct pool *pool = m->tc->pool; 490 ··· 509 /* 510 * This sends the bios in the cell back to the deferred_bios list. 511 */ 512 - static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, 513 - dm_block_t data_block) 514 { 515 struct pool *pool = tc->pool; 516 unsigned long flags; ··· 522 } 523 524 /* 525 - * Same as cell_defer above, except it omits one particular detainee, 526 - * a write bio that covers the block and has already been processed. 527 */ 528 - static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell) 529 { 530 - struct bio_list bios; 531 struct pool *pool = tc->pool; 532 unsigned long flags; 533 - 534 - bio_list_init(&bios); 535 536 spin_lock_irqsave(&pool->lock, flags); 537 dm_cell_release_no_holder(cell, &pool->deferred_bios); ··· 566 */ 567 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 568 if (r) { 569 - DMERR("dm_thin_insert_block() failed"); 570 dm_cell_error(m->cell); 571 goto out; 572 } ··· 578 * the bios in the cell. 579 */ 580 if (bio) { 581 - cell_defer_except(tc, m->cell); 582 bio_endio(bio, 0); 583 } else 584 - cell_defer(tc, m->cell, m->data_block); 585 586 out: 587 list_del(&m->list); ··· 593 struct thin_c *tc = m->tc; 594 595 bio_io_error(m->bio); 596 - cell_defer_except(tc, m->cell); 597 - cell_defer_except(tc, m->cell2); 598 mempool_free(m, tc->pool->mapping_pool); 599 } 600 ··· 602 { 603 struct thin_c *tc = m->tc; 604 605 if (m->pass_discard) 606 remap_and_issue(tc, m->bio, m->data_block); 607 else 608 bio_endio(m->bio, 0); 609 610 - cell_defer_except(tc, m->cell); 611 - cell_defer_except(tc, m->cell2); 612 mempool_free(m, tc->pool->mapping_pool); 613 } 614 ··· 621 622 r = dm_thin_remove_block(tc->td, m->virt_block); 623 if (r) 624 - DMERR("dm_thin_remove_block() failed"); 625 626 process_prepared_discard_passdown(m); 627 } ··· 713 * bio immediately. Otherwise we use kcopyd to clone the data first. 714 */ 715 if (io_overwrites_block(pool, bio)) { 716 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 717 718 h->overwrite_mapping = m; 719 m->bio = bio; 720 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 721 remap_and_issue(tc, bio, data_dest); 722 } else { 723 struct dm_io_region from, to; ··· 735 0, copy_complete, m); 736 if (r < 0) { 737 mempool_free(m, pool->mapping_pool); 738 - DMERR("dm_kcopyd_copy() failed"); 739 dm_cell_error(cell); 740 } 741 } ··· 783 process_prepared_mapping(m); 784 785 else if (io_overwrites_block(pool, bio)) { 786 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 787 788 h->overwrite_mapping = m; 789 m->bio = bio; 790 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 791 remap_and_issue(tc, bio, data_block); 792 } else { 793 int r; ··· 801 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m); 802 if (r < 0) { 803 mempool_free(m, pool->mapping_pool); 804 - DMERR("dm_kcopyd_zero() failed"); 805 dm_cell_error(cell); 806 } 807 } ··· 813 814 r = dm_pool_commit_metadata(pool->pmd); 815 if (r) 816 - DMERR("commit failed, error = %d", r); 817 818 return r; 819 } ··· 898 */ 899 static void retry_on_resume(struct bio *bio) 900 { 901 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 902 struct thin_c *tc = h->tc; 903 struct pool *pool = tc->pool; 904 unsigned long flags; ··· 945 */ 946 build_data_key(tc->td, lookup_result.block, &key2); 947 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { 948 - dm_cell_release_singleton(cell, bio); 949 break; 950 } 951 ··· 971 wake_worker(pool); 972 } 973 } else { 974 /* 975 * The DM core makes sure that the discard doesn't span 976 * a block boundary. So we submit the discard of a 977 * partial block appropriately. 978 */ 979 - dm_cell_release_singleton(cell, bio); 980 - dm_cell_release_singleton(cell2, bio); 981 if ((!lookup_result.shared) && pool->pf.discard_passdown) 982 remap_and_issue(tc, bio, lookup_result.block); 983 else ··· 991 /* 992 * It isn't provisioned, just forget it. 993 */ 994 - dm_cell_release_singleton(cell, bio); 995 bio_endio(bio, 0); 996 break; 997 998 default: 999 - DMERR("discard: find block unexpectedly returned %d", r); 1000 - dm_cell_release_singleton(cell, bio); 1001 bio_io_error(bio); 1002 break; 1003 } ··· 1024 break; 1025 1026 default: 1027 - DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1028 dm_cell_error(cell); 1029 break; 1030 } ··· 1050 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1051 break_sharing(tc, bio, block, &key, lookup_result, cell); 1052 else { 1053 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1054 1055 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1056 1057 - dm_cell_release_singleton(cell, bio); 1058 remap_and_issue(tc, bio, lookup_result->block); 1059 } 1060 } ··· 1070 * Remap empty bios (flushes) immediately, without provisioning. 1071 */ 1072 if (!bio->bi_size) { 1073 - dm_cell_release_singleton(cell, bio); 1074 remap_and_issue(tc, bio, 0); 1075 return; 1076 } ··· 1082 */ 1083 if (bio_data_dir(bio) == READ) { 1084 zero_fill_bio(bio); 1085 - dm_cell_release_singleton(cell, bio); 1086 bio_endio(bio, 0); 1087 return; 1088 } ··· 1101 break; 1102 1103 default: 1104 - DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); 1105 set_pool_mode(tc->pool, PM_READ_ONLY); 1106 dm_cell_error(cell); 1107 break; ··· 1128 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1129 switch (r) { 1130 case 0: 1131 - /* 1132 - * We can release this cell now. This thread is the only 1133 - * one that puts bios into a cell, and we know there were 1134 - * no preceding bios. 1135 - */ 1136 - /* 1137 - * TODO: this will probably have to change when discard goes 1138 - * back in. 1139 - */ 1140 - dm_cell_release_singleton(cell, bio); 1141 - 1142 - if (lookup_result.shared) 1143 process_shared_bio(tc, bio, block, &lookup_result); 1144 - else 1145 remap_and_issue(tc, bio, lookup_result.block); 1146 break; 1147 1148 case -ENODATA: 1149 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1150 - dm_cell_release_singleton(cell, bio); 1151 remap_to_origin_and_issue(tc, bio); 1152 } else 1153 provision_block(tc, bio, block, cell); 1154 break; 1155 1156 default: 1157 - DMERR("dm_thin_find_block() failed, error = %d", r); 1158 - dm_cell_release_singleton(cell, bio); 1159 bio_io_error(bio); 1160 break; 1161 } ··· 1170 case 0: 1171 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1172 bio_io_error(bio); 1173 - else 1174 remap_and_issue(tc, bio, lookup_result.block); 1175 break; 1176 1177 case -ENODATA: ··· 1183 } 1184 1185 if (tc->origin_dev) { 1186 remap_to_origin_and_issue(tc, bio); 1187 break; 1188 } ··· 1193 break; 1194 1195 default: 1196 - DMERR("dm_thin_find_block() failed, error = %d", r); 1197 bio_io_error(bio); 1198 break; 1199 } ··· 1225 spin_unlock_irqrestore(&pool->lock, flags); 1226 1227 while ((bio = bio_list_pop(&bios))) { 1228 - struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; 1229 struct thin_c *tc = h->tc; 1230 1231 /* ··· 1358 wake_worker(pool); 1359 } 1360 1361 - static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1362 { 1363 - struct pool *pool = tc->pool; 1364 - struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1365 1366 h->tc = tc; 1367 h->shared_read_entry = NULL; 1368 - h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds); 1369 h->overwrite_mapping = NULL; 1370 - 1371 - return h; 1372 } 1373 1374 /* 1375 * Non-blocking function called from the thin target's map function. 1376 */ 1377 - static int thin_bio_map(struct dm_target *ti, struct bio *bio, 1378 - union map_info *map_context) 1379 { 1380 int r; 1381 struct thin_c *tc = ti->private; 1382 dm_block_t block = get_bio_block(tc, bio); 1383 struct dm_thin_device *td = tc->td; 1384 struct dm_thin_lookup_result result; 1385 1386 - map_context->ptr = thin_hook_bio(tc, bio); 1387 1388 if (get_pool_mode(tc->pool) == PM_FAIL) { 1389 bio_io_error(bio); ··· 1416 * shared flag will be set in their case. 1417 */ 1418 thin_defer_bio(tc, bio); 1419 - r = DM_MAPIO_SUBMITTED; 1420 - } else { 1421 - remap(tc, bio, result.block); 1422 - r = DM_MAPIO_REMAPPED; 1423 } 1424 - break; 1425 1426 case -ENODATA: 1427 if (get_pool_mode(tc->pool) == PM_READ_ONLY) { ··· 1443 * of doing so. Just error it. 1444 */ 1445 bio_io_error(bio); 1446 - r = DM_MAPIO_SUBMITTED; 1447 - break; 1448 } 1449 /* fall through */ 1450 ··· 1453 * provide the hint to load the metadata into cache. 1454 */ 1455 thin_defer_bio(tc, bio); 1456 - r = DM_MAPIO_SUBMITTED; 1457 - break; 1458 1459 default: 1460 /* ··· 1462 * pool is switched to fail-io mode. 1463 */ 1464 bio_io_error(bio); 1465 - r = DM_MAPIO_SUBMITTED; 1466 - break; 1467 } 1468 - 1469 - return r; 1470 } 1471 1472 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) ··· 1590 if (pool->next_mapping) 1591 mempool_free(pool->next_mapping, pool->mapping_pool); 1592 mempool_destroy(pool->mapping_pool); 1593 - mempool_destroy(pool->endio_hook_pool); 1594 dm_deferred_set_destroy(pool->shared_read_ds); 1595 dm_deferred_set_destroy(pool->all_io_ds); 1596 kfree(pool); 1597 } 1598 1599 static struct kmem_cache *_new_mapping_cache; 1600 - static struct kmem_cache *_endio_hook_cache; 1601 1602 static struct pool *pool_create(struct mapped_device *pool_md, 1603 struct block_device *metadata_dev, ··· 1689 goto bad_mapping_pool; 1690 } 1691 1692 - pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE, 1693 - _endio_hook_cache); 1694 - if (!pool->endio_hook_pool) { 1695 - *error = "Error creating pool's endio_hook mempool"; 1696 - err_p = ERR_PTR(-ENOMEM); 1697 - goto bad_endio_hook_pool; 1698 - } 1699 pool->ref_count = 1; 1700 pool->last_commit_jiffies = jiffies; 1701 pool->pool_md = pool_md; ··· 1697 1698 return pool; 1699 1700 - bad_endio_hook_pool: 1701 - mempool_destroy(pool->mapping_pool); 1702 bad_mapping_pool: 1703 dm_deferred_set_destroy(pool->all_io_ds); 1704 bad_all_io_ds: ··· 1979 return r; 1980 } 1981 1982 - static int pool_map(struct dm_target *ti, struct bio *bio, 1983 - union map_info *map_context) 1984 { 1985 int r; 1986 struct pool_c *pt = ti->private; ··· 2370 else 2371 DMEMIT("rw "); 2372 2373 - if (pool->pf.discard_enabled && pool->pf.discard_passdown) 2374 DMEMIT("discard_passdown"); 2375 else 2376 DMEMIT("no_discard_passdown"); ··· 2468 .name = "thin-pool", 2469 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2470 DM_TARGET_IMMUTABLE, 2471 - .version = {1, 5, 0}, 2472 .module = THIS_MODULE, 2473 .ctr = pool_ctr, 2474 .dtr = pool_dtr, ··· 2590 2591 ti->num_flush_requests = 1; 2592 ti->flush_supported = true; 2593 2594 /* In case the pool supports discards, pass them on. */ 2595 if (tc->pool->pf.discard_enabled) { ··· 2624 return r; 2625 } 2626 2627 - static int thin_map(struct dm_target *ti, struct bio *bio, 2628 - union map_info *map_context) 2629 { 2630 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2631 2632 - return thin_bio_map(ti, bio, map_context); 2633 } 2634 2635 - static int thin_endio(struct dm_target *ti, 2636 - struct bio *bio, int err, 2637 - union map_info *map_context) 2638 { 2639 unsigned long flags; 2640 - struct dm_thin_endio_hook *h = map_context->ptr; 2641 struct list_head work; 2642 struct dm_thin_new_mapping *m, *tmp; 2643 struct pool *pool = h->tc->pool; ··· 2655 if (h->all_io_entry) { 2656 INIT_LIST_HEAD(&work); 2657 dm_deferred_entry_dec(h->all_io_entry, &work); 2658 - spin_lock_irqsave(&pool->lock, flags); 2659 - list_for_each_entry_safe(m, tmp, &work, list) 2660 - list_add(&m->list, &pool->prepared_discards); 2661 - spin_unlock_irqrestore(&pool->lock, flags); 2662 } 2663 - 2664 - mempool_free(h, pool->endio_hook_pool); 2665 2666 return 0; 2667 } ··· 2758 2759 static struct target_type thin_target = { 2760 .name = "thin", 2761 - .version = {1, 5, 0}, 2762 .module = THIS_MODULE, 2763 .ctr = thin_ctr, 2764 .dtr = thin_dtr, ··· 2792 if (!_new_mapping_cache) 2793 goto bad_new_mapping_cache; 2794 2795 - _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0); 2796 - if (!_endio_hook_cache) 2797 - goto bad_endio_hook_cache; 2798 - 2799 return 0; 2800 2801 - bad_endio_hook_cache: 2802 - kmem_cache_destroy(_new_mapping_cache); 2803 bad_new_mapping_cache: 2804 dm_unregister_target(&pool_target); 2805 bad_pool_target: ··· 2808 dm_unregister_target(&pool_target); 2809 2810 kmem_cache_destroy(_new_mapping_cache); 2811 - kmem_cache_destroy(_endio_hook_cache); 2812 } 2813 2814 module_init(dm_thin_init);
··· 186 187 struct dm_thin_new_mapping *next_mapping; 188 mempool_t *mapping_pool; 189 190 process_bio_fn process_bio; 191 process_bio_fn process_discard; ··· 304 bio_list_init(master); 305 306 while ((bio = bio_list_pop(&bios))) { 307 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 308 309 if (h->tc == tc) 310 bio_endio(bio, DM_ENDIO_REQUEUE); ··· 366 { 367 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && 368 dm_thin_changed_this_transaction(tc->td); 369 + } 370 + 371 + static void inc_all_io_entry(struct pool *pool, struct bio *bio) 372 + { 373 + struct dm_thin_endio_hook *h; 374 + 375 + if (bio->bi_rw & REQ_DISCARD) 376 + return; 377 + 378 + h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 379 + h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); 380 } 381 382 static void issue(struct thin_c *tc, struct bio *bio) ··· 474 static void overwrite_endio(struct bio *bio, int err) 475 { 476 unsigned long flags; 477 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 478 struct dm_thin_new_mapping *m = h->overwrite_mapping; 479 struct pool *pool = m->tc->pool; 480 ··· 499 /* 500 * This sends the bios in the cell back to the deferred_bios list. 501 */ 502 + static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell) 503 { 504 struct pool *pool = tc->pool; 505 unsigned long flags; ··· 513 } 514 515 /* 516 + * Same as cell_defer except it omits the original holder of the cell. 517 */ 518 + static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) 519 { 520 struct pool *pool = tc->pool; 521 unsigned long flags; 522 523 spin_lock_irqsave(&pool->lock, flags); 524 dm_cell_release_no_holder(cell, &pool->deferred_bios); ··· 561 */ 562 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block); 563 if (r) { 564 + DMERR_LIMIT("dm_thin_insert_block() failed"); 565 dm_cell_error(m->cell); 566 goto out; 567 } ··· 573 * the bios in the cell. 574 */ 575 if (bio) { 576 + cell_defer_no_holder(tc, m->cell); 577 bio_endio(bio, 0); 578 } else 579 + cell_defer(tc, m->cell); 580 581 out: 582 list_del(&m->list); ··· 588 struct thin_c *tc = m->tc; 589 590 bio_io_error(m->bio); 591 + cell_defer_no_holder(tc, m->cell); 592 + cell_defer_no_holder(tc, m->cell2); 593 mempool_free(m, tc->pool->mapping_pool); 594 } 595 ··· 597 { 598 struct thin_c *tc = m->tc; 599 600 + inc_all_io_entry(tc->pool, m->bio); 601 + cell_defer_no_holder(tc, m->cell); 602 + cell_defer_no_holder(tc, m->cell2); 603 + 604 if (m->pass_discard) 605 remap_and_issue(tc, m->bio, m->data_block); 606 else 607 bio_endio(m->bio, 0); 608 609 mempool_free(m, tc->pool->mapping_pool); 610 } 611 ··· 614 615 r = dm_thin_remove_block(tc->td, m->virt_block); 616 if (r) 617 + DMERR_LIMIT("dm_thin_remove_block() failed"); 618 619 process_prepared_discard_passdown(m); 620 } ··· 706 * bio immediately. Otherwise we use kcopyd to clone the data first. 707 */ 708 if (io_overwrites_block(pool, bio)) { 709 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 710 711 h->overwrite_mapping = m; 712 m->bio = bio; 713 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 714 + inc_all_io_entry(pool, bio); 715 remap_and_issue(tc, bio, data_dest); 716 } else { 717 struct dm_io_region from, to; ··· 727 0, copy_complete, m); 728 if (r < 0) { 729 mempool_free(m, pool->mapping_pool); 730 + DMERR_LIMIT("dm_kcopyd_copy() failed"); 731 dm_cell_error(cell); 732 } 733 } ··· 775 process_prepared_mapping(m); 776 777 else if (io_overwrites_block(pool, bio)) { 778 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 779 780 h->overwrite_mapping = m; 781 m->bio = bio; 782 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 783 + inc_all_io_entry(pool, bio); 784 remap_and_issue(tc, bio, data_block); 785 } else { 786 int r; ··· 792 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m); 793 if (r < 0) { 794 mempool_free(m, pool->mapping_pool); 795 + DMERR_LIMIT("dm_kcopyd_zero() failed"); 796 dm_cell_error(cell); 797 } 798 } ··· 804 805 r = dm_pool_commit_metadata(pool->pmd); 806 if (r) 807 + DMERR_LIMIT("commit failed: error = %d", r); 808 809 return r; 810 } ··· 889 */ 890 static void retry_on_resume(struct bio *bio) 891 { 892 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 893 struct thin_c *tc = h->tc; 894 struct pool *pool = tc->pool; 895 unsigned long flags; ··· 936 */ 937 build_data_key(tc->td, lookup_result.block, &key2); 938 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { 939 + cell_defer_no_holder(tc, cell); 940 break; 941 } 942 ··· 962 wake_worker(pool); 963 } 964 } else { 965 + inc_all_io_entry(pool, bio); 966 + cell_defer_no_holder(tc, cell); 967 + cell_defer_no_holder(tc, cell2); 968 + 969 /* 970 * The DM core makes sure that the discard doesn't span 971 * a block boundary. So we submit the discard of a 972 * partial block appropriately. 973 */ 974 if ((!lookup_result.shared) && pool->pf.discard_passdown) 975 remap_and_issue(tc, bio, lookup_result.block); 976 else ··· 980 /* 981 * It isn't provisioned, just forget it. 982 */ 983 + cell_defer_no_holder(tc, cell); 984 bio_endio(bio, 0); 985 break; 986 987 default: 988 + DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 989 + __func__, r); 990 + cell_defer_no_holder(tc, cell); 991 bio_io_error(bio); 992 break; 993 } ··· 1012 break; 1013 1014 default: 1015 + DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1016 + __func__, r); 1017 dm_cell_error(cell); 1018 break; 1019 } ··· 1037 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1038 break_sharing(tc, bio, block, &key, lookup_result, cell); 1039 else { 1040 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1041 1042 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); 1043 + inc_all_io_entry(pool, bio); 1044 + cell_defer_no_holder(tc, cell); 1045 1046 remap_and_issue(tc, bio, lookup_result->block); 1047 } 1048 } ··· 1056 * Remap empty bios (flushes) immediately, without provisioning. 1057 */ 1058 if (!bio->bi_size) { 1059 + inc_all_io_entry(tc->pool, bio); 1060 + cell_defer_no_holder(tc, cell); 1061 + 1062 remap_and_issue(tc, bio, 0); 1063 return; 1064 } ··· 1066 */ 1067 if (bio_data_dir(bio) == READ) { 1068 zero_fill_bio(bio); 1069 + cell_defer_no_holder(tc, cell); 1070 bio_endio(bio, 0); 1071 return; 1072 } ··· 1085 break; 1086 1087 default: 1088 + DMERR_LIMIT("%s: alloc_data_block() failed: error = %d", 1089 + __func__, r); 1090 set_pool_mode(tc->pool, PM_READ_ONLY); 1091 dm_cell_error(cell); 1092 break; ··· 1111 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1112 switch (r) { 1113 case 0: 1114 + if (lookup_result.shared) { 1115 process_shared_bio(tc, bio, block, &lookup_result); 1116 + cell_defer_no_holder(tc, cell); 1117 + } else { 1118 + inc_all_io_entry(tc->pool, bio); 1119 + cell_defer_no_holder(tc, cell); 1120 + 1121 remap_and_issue(tc, bio, lookup_result.block); 1122 + } 1123 break; 1124 1125 case -ENODATA: 1126 if (bio_data_dir(bio) == READ && tc->origin_dev) { 1127 + inc_all_io_entry(tc->pool, bio); 1128 + cell_defer_no_holder(tc, cell); 1129 + 1130 remap_to_origin_and_issue(tc, bio); 1131 } else 1132 provision_block(tc, bio, block, cell); 1133 break; 1134 1135 default: 1136 + DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1137 + __func__, r); 1138 + cell_defer_no_holder(tc, cell); 1139 bio_io_error(bio); 1140 break; 1141 } ··· 1156 case 0: 1157 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1158 bio_io_error(bio); 1159 + else { 1160 + inc_all_io_entry(tc->pool, bio); 1161 remap_and_issue(tc, bio, lookup_result.block); 1162 + } 1163 break; 1164 1165 case -ENODATA: ··· 1167 } 1168 1169 if (tc->origin_dev) { 1170 + inc_all_io_entry(tc->pool, bio); 1171 remap_to_origin_and_issue(tc, bio); 1172 break; 1173 } ··· 1176 break; 1177 1178 default: 1179 + DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d", 1180 + __func__, r); 1181 bio_io_error(bio); 1182 break; 1183 } ··· 1207 spin_unlock_irqrestore(&pool->lock, flags); 1208 1209 while ((bio = bio_list_pop(&bios))) { 1210 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1211 struct thin_c *tc = h->tc; 1212 1213 /* ··· 1340 wake_worker(pool); 1341 } 1342 1343 + static void thin_hook_bio(struct thin_c *tc, struct bio *bio) 1344 { 1345 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1346 1347 h->tc = tc; 1348 h->shared_read_entry = NULL; 1349 + h->all_io_entry = NULL; 1350 h->overwrite_mapping = NULL; 1351 } 1352 1353 /* 1354 * Non-blocking function called from the thin target's map function. 1355 */ 1356 + static int thin_bio_map(struct dm_target *ti, struct bio *bio) 1357 { 1358 int r; 1359 struct thin_c *tc = ti->private; 1360 dm_block_t block = get_bio_block(tc, bio); 1361 struct dm_thin_device *td = tc->td; 1362 struct dm_thin_lookup_result result; 1363 + struct dm_bio_prison_cell *cell1, *cell2; 1364 + struct dm_cell_key key; 1365 1366 + thin_hook_bio(tc, bio); 1367 1368 if (get_pool_mode(tc->pool) == PM_FAIL) { 1369 bio_io_error(bio); ··· 1400 * shared flag will be set in their case. 1401 */ 1402 thin_defer_bio(tc, bio); 1403 + return DM_MAPIO_SUBMITTED; 1404 } 1405 + 1406 + build_virtual_key(tc->td, block, &key); 1407 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1)) 1408 + return DM_MAPIO_SUBMITTED; 1409 + 1410 + build_data_key(tc->td, result.block, &key); 1411 + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) { 1412 + cell_defer_no_holder(tc, cell1); 1413 + return DM_MAPIO_SUBMITTED; 1414 + } 1415 + 1416 + inc_all_io_entry(tc->pool, bio); 1417 + cell_defer_no_holder(tc, cell2); 1418 + cell_defer_no_holder(tc, cell1); 1419 + 1420 + remap(tc, bio, result.block); 1421 + return DM_MAPIO_REMAPPED; 1422 1423 case -ENODATA: 1424 if (get_pool_mode(tc->pool) == PM_READ_ONLY) { ··· 1414 * of doing so. Just error it. 1415 */ 1416 bio_io_error(bio); 1417 + return DM_MAPIO_SUBMITTED; 1418 } 1419 /* fall through */ 1420 ··· 1425 * provide the hint to load the metadata into cache. 1426 */ 1427 thin_defer_bio(tc, bio); 1428 + return DM_MAPIO_SUBMITTED; 1429 1430 default: 1431 /* ··· 1435 * pool is switched to fail-io mode. 1436 */ 1437 bio_io_error(bio); 1438 + return DM_MAPIO_SUBMITTED; 1439 } 1440 } 1441 1442 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) ··· 1566 if (pool->next_mapping) 1567 mempool_free(pool->next_mapping, pool->mapping_pool); 1568 mempool_destroy(pool->mapping_pool); 1569 dm_deferred_set_destroy(pool->shared_read_ds); 1570 dm_deferred_set_destroy(pool->all_io_ds); 1571 kfree(pool); 1572 } 1573 1574 static struct kmem_cache *_new_mapping_cache; 1575 1576 static struct pool *pool_create(struct mapped_device *pool_md, 1577 struct block_device *metadata_dev, ··· 1667 goto bad_mapping_pool; 1668 } 1669 1670 pool->ref_count = 1; 1671 pool->last_commit_jiffies = jiffies; 1672 pool->pool_md = pool_md; ··· 1682 1683 return pool; 1684 1685 bad_mapping_pool: 1686 dm_deferred_set_destroy(pool->all_io_ds); 1687 bad_all_io_ds: ··· 1966 return r; 1967 } 1968 1969 + static int pool_map(struct dm_target *ti, struct bio *bio) 1970 { 1971 int r; 1972 struct pool_c *pt = ti->private; ··· 2358 else 2359 DMEMIT("rw "); 2360 2361 + if (!pool->pf.discard_enabled) 2362 + DMEMIT("ignore_discard"); 2363 + else if (pool->pf.discard_passdown) 2364 DMEMIT("discard_passdown"); 2365 else 2366 DMEMIT("no_discard_passdown"); ··· 2454 .name = "thin-pool", 2455 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2456 DM_TARGET_IMMUTABLE, 2457 + .version = {1, 6, 0}, 2458 .module = THIS_MODULE, 2459 .ctr = pool_ctr, 2460 .dtr = pool_dtr, ··· 2576 2577 ti->num_flush_requests = 1; 2578 ti->flush_supported = true; 2579 + ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook); 2580 2581 /* In case the pool supports discards, pass them on. */ 2582 if (tc->pool->pf.discard_enabled) { ··· 2609 return r; 2610 } 2611 2612 + static int thin_map(struct dm_target *ti, struct bio *bio) 2613 { 2614 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2615 2616 + return thin_bio_map(ti, bio); 2617 } 2618 2619 + static int thin_endio(struct dm_target *ti, struct bio *bio, int err) 2620 { 2621 unsigned long flags; 2622 + struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 2623 struct list_head work; 2624 struct dm_thin_new_mapping *m, *tmp; 2625 struct pool *pool = h->tc->pool; ··· 2643 if (h->all_io_entry) { 2644 INIT_LIST_HEAD(&work); 2645 dm_deferred_entry_dec(h->all_io_entry, &work); 2646 + if (!list_empty(&work)) { 2647 + spin_lock_irqsave(&pool->lock, flags); 2648 + list_for_each_entry_safe(m, tmp, &work, list) 2649 + list_add(&m->list, &pool->prepared_discards); 2650 + spin_unlock_irqrestore(&pool->lock, flags); 2651 + wake_worker(pool); 2652 + } 2653 } 2654 2655 return 0; 2656 } ··· 2745 2746 static struct target_type thin_target = { 2747 .name = "thin", 2748 + .version = {1, 6, 0}, 2749 .module = THIS_MODULE, 2750 .ctr = thin_ctr, 2751 .dtr = thin_dtr, ··· 2779 if (!_new_mapping_cache) 2780 goto bad_new_mapping_cache; 2781 2782 return 0; 2783 2784 bad_new_mapping_cache: 2785 dm_unregister_target(&pool_target); 2786 bad_pool_target: ··· 2801 dm_unregister_target(&pool_target); 2802 2803 kmem_cache_destroy(_new_mapping_cache); 2804 } 2805 2806 module_init(dm_thin_init);
+5 -20
drivers/md/dm-verity.c
··· 55 unsigned shash_descsize;/* the size of temporary space for crypto */ 56 int hash_failed; /* set to 1 if hash of any block failed */ 57 58 - mempool_t *io_mempool; /* mempool of struct dm_verity_io */ 59 mempool_t *vec_mempool; /* mempool of bio vector */ 60 61 struct workqueue_struct *verify_wq; ··· 65 66 struct dm_verity_io { 67 struct dm_verity *v; 68 - struct bio *bio; 69 70 /* original values of bio->bi_end_io and bio->bi_private */ 71 bio_end_io_t *orig_bi_end_io; ··· 387 */ 388 static void verity_finish_io(struct dm_verity_io *io, int error) 389 { 390 - struct bio *bio = io->bio; 391 struct dm_verity *v = io->v; 392 393 bio->bi_end_io = io->orig_bi_end_io; 394 bio->bi_private = io->orig_bi_private; 395 396 if (io->io_vec != io->io_vec_inline) 397 mempool_free(io->io_vec, v->vec_mempool); 398 - 399 - mempool_free(io, v->io_mempool); 400 401 bio_endio(bio, error); 402 } ··· 458 * Bio map function. It allocates dm_verity_io structure and bio vector and 459 * fills them. Then it issues prefetches and the I/O. 460 */ 461 - static int verity_map(struct dm_target *ti, struct bio *bio, 462 - union map_info *map_context) 463 { 464 struct dm_verity *v = ti->private; 465 struct dm_verity_io *io; ··· 481 if (bio_data_dir(bio) == WRITE) 482 return -EIO; 483 484 - io = mempool_alloc(v->io_mempool, GFP_NOIO); 485 io->v = v; 486 - io->bio = bio; 487 io->orig_bi_end_io = bio->bi_end_io; 488 io->orig_bi_private = bio->bi_private; 489 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); ··· 603 604 if (v->vec_mempool) 605 mempool_destroy(v->vec_mempool); 606 - 607 - if (v->io_mempool) 608 - mempool_destroy(v->io_mempool); 609 610 if (v->bufio) 611 dm_bufio_client_destroy(v->bufio); ··· 832 goto bad; 833 } 834 835 - v->io_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE, 836 - sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2); 837 - if (!v->io_mempool) { 838 - ti->error = "Cannot allocate io mempool"; 839 - r = -ENOMEM; 840 - goto bad; 841 - } 842 843 v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE, 844 BIO_MAX_PAGES * sizeof(struct bio_vec)); ··· 860 861 static struct target_type verity_target = { 862 .name = "verity", 863 - .version = {1, 0, 0}, 864 .module = THIS_MODULE, 865 .ctr = verity_ctr, 866 .dtr = verity_dtr,
··· 55 unsigned shash_descsize;/* the size of temporary space for crypto */ 56 int hash_failed; /* set to 1 if hash of any block failed */ 57 58 mempool_t *vec_mempool; /* mempool of bio vector */ 59 60 struct workqueue_struct *verify_wq; ··· 66 67 struct dm_verity_io { 68 struct dm_verity *v; 69 70 /* original values of bio->bi_end_io and bio->bi_private */ 71 bio_end_io_t *orig_bi_end_io; ··· 389 */ 390 static void verity_finish_io(struct dm_verity_io *io, int error) 391 { 392 struct dm_verity *v = io->v; 393 + struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); 394 395 bio->bi_end_io = io->orig_bi_end_io; 396 bio->bi_private = io->orig_bi_private; 397 398 if (io->io_vec != io->io_vec_inline) 399 mempool_free(io->io_vec, v->vec_mempool); 400 401 bio_endio(bio, error); 402 } ··· 462 * Bio map function. It allocates dm_verity_io structure and bio vector and 463 * fills them. Then it issues prefetches and the I/O. 464 */ 465 + static int verity_map(struct dm_target *ti, struct bio *bio) 466 { 467 struct dm_verity *v = ti->private; 468 struct dm_verity_io *io; ··· 486 if (bio_data_dir(bio) == WRITE) 487 return -EIO; 488 489 + io = dm_per_bio_data(bio, ti->per_bio_data_size); 490 io->v = v; 491 io->orig_bi_end_io = bio->bi_end_io; 492 io->orig_bi_private = bio->bi_private; 493 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); ··· 609 610 if (v->vec_mempool) 611 mempool_destroy(v->vec_mempool); 612 613 if (v->bufio) 614 dm_bufio_client_destroy(v->bufio); ··· 841 goto bad; 842 } 843 844 + ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io)); 845 846 v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE, 847 BIO_MAX_PAGES * sizeof(struct bio_vec)); ··· 875 876 static struct target_type verity_target = { 877 .name = "verity", 878 + .version = {1, 1, 0}, 879 .module = THIS_MODULE, 880 .ctr = verity_ctr, 881 .dtr = verity_dtr,
+2 -3
drivers/md/dm-zero.c
··· 33 /* 34 * Return zeros only on reads 35 */ 36 - static int zero_map(struct dm_target *ti, struct bio *bio, 37 - union map_info *map_context) 38 { 39 switch(bio_rw(bio)) { 40 case READ: ··· 55 56 static struct target_type zero_target = { 57 .name = "zero", 58 - .version = {1, 0, 0}, 59 .module = THIS_MODULE, 60 .ctr = zero_ctr, 61 .map = zero_map,
··· 33 /* 34 * Return zeros only on reads 35 */ 36 + static int zero_map(struct dm_target *ti, struct bio *bio) 37 { 38 switch(bio_rw(bio)) { 39 case READ: ··· 56 57 static struct target_type zero_target = { 58 .name = "zero", 59 + .version = {1, 1, 0}, 60 .module = THIS_MODULE, 61 .ctr = zero_ctr, 62 .map = zero_map,
+58 -26
drivers/md/dm.c
··· 63 }; 64 65 /* 66 - * For bio-based dm. 67 - * One of these is allocated per target within a bio. Hopefully 68 - * this will be simplified out one day. 69 - */ 70 - struct dm_target_io { 71 - struct dm_io *io; 72 - struct dm_target *ti; 73 - union map_info info; 74 - struct bio clone; 75 - }; 76 - 77 - /* 78 * For request-based dm. 79 * One of these is allocated per request. 80 */ ··· 645 error = -EIO; 646 647 if (endio) { 648 - r = endio(tio->ti, bio, error, &tio->info); 649 if (r < 0 || r == DM_ENDIO_REQUEUE) 650 /* 651 * error and requeue request are handled ··· 1004 */ 1005 atomic_inc(&tio->io->io_count); 1006 sector = clone->bi_sector; 1007 - r = ti->type->map(ti, clone, &tio->info); 1008 if (r == DM_MAPIO_REMAPPED) { 1009 /* the bio has been remapped so dispatch it */ 1010 ··· 1099 tio->io = ci->io; 1100 tio->ti = ti; 1101 memset(&tio->info, 0, sizeof(tio->info)); 1102 1103 return tio; 1104 } ··· 1110 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); 1111 struct bio *clone = &tio->clone; 1112 1113 - tio->info.target_request_nr = request_nr; 1114 1115 /* 1116 * Discard requests require the bio's inline iovecs be initialized. ··· 1163 ci->sector_count = 0; 1164 } 1165 1166 - static int __clone_and_map_discard(struct clone_info *ci) 1167 { 1168 struct dm_target *ti; 1169 sector_t len; ··· 1195 return -EIO; 1196 1197 /* 1198 - * Even though the device advertised discard support, 1199 - * that does not mean every target supports it, and 1200 * reconfiguration might also have changed that since the 1201 * check was performed. 1202 */ 1203 - if (!ti->num_discard_requests) 1204 return -EOPNOTSUPP; 1205 1206 - if (!ti->split_discard_requests) 1207 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1208 else 1209 len = min(ci->sector_count, max_io_len(ci->sector, ti)); ··· 1216 return 0; 1217 } 1218 1219 static int __clone_and_map(struct clone_info *ci) 1220 { 1221 struct bio *bio = ci->bio; ··· 1236 1237 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1238 return __clone_and_map_discard(ci); 1239 1240 ti = dm_table_find_target(ci->map, ci->sector); 1241 if (!dm_target_is_valid(ti)) ··· 1969 1970 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1971 { 1972 - struct dm_md_mempools *p; 1973 1974 - if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) 1975 - /* the md already has necessary mempools */ 1976 goto out; 1977 1978 - p = dm_table_get_md_mempools(t); 1979 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1980 1981 md->io_pool = p->io_pool; ··· 2741 } 2742 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2743 2744 - struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity) 2745 { 2746 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2747 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; 2748 2749 if (!pools) 2750 return NULL; 2751 2752 pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2753 mempool_create_slab_pool(MIN_IOS, _io_cache) : ··· 2766 2767 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2768 bioset_create(pool_size, 2769 - offsetof(struct dm_target_io, clone)) : 2770 bioset_create(pool_size, 2771 offsetof(struct dm_rq_clone_bio_info, clone)); 2772 if (!pools->bs)
··· 63 }; 64 65 /* 66 * For request-based dm. 67 * One of these is allocated per request. 68 */ ··· 657 error = -EIO; 658 659 if (endio) { 660 + r = endio(tio->ti, bio, error); 661 if (r < 0 || r == DM_ENDIO_REQUEUE) 662 /* 663 * error and requeue request are handled ··· 1016 */ 1017 atomic_inc(&tio->io->io_count); 1018 sector = clone->bi_sector; 1019 + r = ti->type->map(ti, clone); 1020 if (r == DM_MAPIO_REMAPPED) { 1021 /* the bio has been remapped so dispatch it */ 1022 ··· 1111 tio->io = ci->io; 1112 tio->ti = ti; 1113 memset(&tio->info, 0, sizeof(tio->info)); 1114 + tio->target_request_nr = 0; 1115 1116 return tio; 1117 } ··· 1121 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs); 1122 struct bio *clone = &tio->clone; 1123 1124 + tio->target_request_nr = request_nr; 1125 1126 /* 1127 * Discard requests require the bio's inline iovecs be initialized. ··· 1174 ci->sector_count = 0; 1175 } 1176 1177 + typedef unsigned (*get_num_requests_fn)(struct dm_target *ti); 1178 + 1179 + static unsigned get_num_discard_requests(struct dm_target *ti) 1180 + { 1181 + return ti->num_discard_requests; 1182 + } 1183 + 1184 + static unsigned get_num_write_same_requests(struct dm_target *ti) 1185 + { 1186 + return ti->num_write_same_requests; 1187 + } 1188 + 1189 + typedef bool (*is_split_required_fn)(struct dm_target *ti); 1190 + 1191 + static bool is_split_required_for_discard(struct dm_target *ti) 1192 + { 1193 + return ti->split_discard_requests; 1194 + } 1195 + 1196 + static int __clone_and_map_changing_extent_only(struct clone_info *ci, 1197 + get_num_requests_fn get_num_requests, 1198 + is_split_required_fn is_split_required) 1199 { 1200 struct dm_target *ti; 1201 sector_t len; ··· 1185 return -EIO; 1186 1187 /* 1188 + * Even though the device advertised support for this type of 1189 + * request, that does not mean every target supports it, and 1190 * reconfiguration might also have changed that since the 1191 * check was performed. 1192 */ 1193 + if (!get_num_requests || !get_num_requests(ti)) 1194 return -EOPNOTSUPP; 1195 1196 + if (is_split_required && !is_split_required(ti)) 1197 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); 1198 else 1199 len = min(ci->sector_count, max_io_len(ci->sector, ti)); ··· 1206 return 0; 1207 } 1208 1209 + static int __clone_and_map_discard(struct clone_info *ci) 1210 + { 1211 + return __clone_and_map_changing_extent_only(ci, get_num_discard_requests, 1212 + is_split_required_for_discard); 1213 + } 1214 + 1215 + static int __clone_and_map_write_same(struct clone_info *ci) 1216 + { 1217 + return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL); 1218 + } 1219 + 1220 static int __clone_and_map(struct clone_info *ci) 1221 { 1222 struct bio *bio = ci->bio; ··· 1215 1216 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1217 return __clone_and_map_discard(ci); 1218 + else if (unlikely(bio->bi_rw & REQ_WRITE_SAME)) 1219 + return __clone_and_map_write_same(ci); 1220 1221 ti = dm_table_find_target(ci->map, ci->sector); 1222 if (!dm_target_is_valid(ti)) ··· 1946 1947 static void __bind_mempools(struct mapped_device *md, struct dm_table *t) 1948 { 1949 + struct dm_md_mempools *p = dm_table_get_md_mempools(t); 1950 1951 + if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) { 1952 + /* 1953 + * The md already has necessary mempools. Reload just the 1954 + * bioset because front_pad may have changed because 1955 + * a different table was loaded. 1956 + */ 1957 + bioset_free(md->bs); 1958 + md->bs = p->bs; 1959 + p->bs = NULL; 1960 goto out; 1961 + } 1962 1963 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs); 1964 1965 md->io_pool = p->io_pool; ··· 2711 } 2712 EXPORT_SYMBOL_GPL(dm_noflush_suspending); 2713 2714 + struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size) 2715 { 2716 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL); 2717 unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS; 2718 2719 if (!pools) 2720 return NULL; 2721 + 2722 + per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io)); 2723 2724 pools->io_pool = (type == DM_TYPE_BIO_BASED) ? 2725 mempool_create_slab_pool(MIN_IOS, _io_cache) : ··· 2734 2735 pools->bs = (type == DM_TYPE_BIO_BASED) ? 2736 bioset_create(pool_size, 2737 + per_bio_data_size + offsetof(struct dm_target_io, clone)) : 2738 bioset_create(pool_size, 2739 offsetof(struct dm_rq_clone_bio_info, clone)); 2740 if (!pools->bs)
+1 -1
drivers/md/dm.h
··· 159 /* 160 * Mempool operations 161 */ 162 - struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity); 163 void dm_free_md_mempools(struct dm_md_mempools *pools); 164 165 #endif
··· 159 /* 160 * Mempool operations 161 */ 162 + struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size); 163 void dm_free_md_mempools(struct dm_md_mempools *pools); 164 165 #endif
+7 -5
drivers/md/persistent-data/dm-block-manager.c
··· 428 if (!v) 429 return 0; 430 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio)); 431 - if (unlikely(r)) 432 return r; 433 aux->validator = v; 434 } else { 435 if (unlikely(aux->validator != v)) { 436 - DMERR("validator mismatch (old=%s vs new=%s) for block %llu", 437 - aux->validator->name, v ? v->name : "NULL", 438 - (unsigned long long) 439 - dm_bufio_get_block_number(buf)); 440 return -EINVAL; 441 } 442 }
··· 428 if (!v) 429 return 0; 430 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio)); 431 + if (unlikely(r)) { 432 + DMERR_LIMIT("%s validator check failed for block %llu", v->name, 433 + (unsigned long long) dm_bufio_get_block_number(buf)); 434 return r; 435 + } 436 aux->validator = v; 437 } else { 438 if (unlikely(aux->validator != v)) { 439 + DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu", 440 + aux->validator->name, v ? v->name : "NULL", 441 + (unsigned long long) dm_bufio_get_block_number(buf)); 442 return -EINVAL; 443 } 444 }
+8 -8
drivers/md/persistent-data/dm-btree-internal.h
··· 36 __le32 padding; 37 } __packed; 38 39 - struct node { 40 struct node_header header; 41 __le64 keys[0]; 42 } __packed; 43 44 45 - void inc_children(struct dm_transaction_manager *tm, struct node *n, 46 struct dm_btree_value_type *vt); 47 48 int new_block(struct dm_btree_info *info, struct dm_block **result); ··· 64 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info); 65 int exit_ro_spine(struct ro_spine *s); 66 int ro_step(struct ro_spine *s, dm_block_t new_child); 67 - struct node *ro_node(struct ro_spine *s); 68 69 struct shadow_spine { 70 struct dm_btree_info *info; ··· 98 /* 99 * Some inlines. 100 */ 101 - static inline __le64 *key_ptr(struct node *n, uint32_t index) 102 { 103 return n->keys + index; 104 } 105 106 - static inline void *value_base(struct node *n) 107 { 108 return &n->keys[le32_to_cpu(n->header.max_entries)]; 109 } 110 111 - static inline void *value_ptr(struct node *n, uint32_t index) 112 { 113 uint32_t value_size = le32_to_cpu(n->header.value_size); 114 return value_base(n) + (value_size * index); ··· 117 /* 118 * Assumes the values are suitably-aligned and converts to core format. 119 */ 120 - static inline uint64_t value64(struct node *n, uint32_t index) 121 { 122 __le64 *values_le = value_base(n); 123 ··· 127 /* 128 * Searching for a key within a single node. 129 */ 130 - int lower_bound(struct node *n, uint64_t key); 131 132 extern struct dm_block_validator btree_node_validator; 133
··· 36 __le32 padding; 37 } __packed; 38 39 + struct btree_node { 40 struct node_header header; 41 __le64 keys[0]; 42 } __packed; 43 44 45 + void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 46 struct dm_btree_value_type *vt); 47 48 int new_block(struct dm_btree_info *info, struct dm_block **result); ··· 64 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info); 65 int exit_ro_spine(struct ro_spine *s); 66 int ro_step(struct ro_spine *s, dm_block_t new_child); 67 + struct btree_node *ro_node(struct ro_spine *s); 68 69 struct shadow_spine { 70 struct dm_btree_info *info; ··· 98 /* 99 * Some inlines. 100 */ 101 + static inline __le64 *key_ptr(struct btree_node *n, uint32_t index) 102 { 103 return n->keys + index; 104 } 105 106 + static inline void *value_base(struct btree_node *n) 107 { 108 return &n->keys[le32_to_cpu(n->header.max_entries)]; 109 } 110 111 + static inline void *value_ptr(struct btree_node *n, uint32_t index) 112 { 113 uint32_t value_size = le32_to_cpu(n->header.value_size); 114 return value_base(n) + (value_size * index); ··· 117 /* 118 * Assumes the values are suitably-aligned and converts to core format. 119 */ 120 + static inline uint64_t value64(struct btree_node *n, uint32_t index) 121 { 122 __le64 *values_le = value_base(n); 123 ··· 127 /* 128 * Searching for a key within a single node. 129 */ 130 + int lower_bound(struct btree_node *n, uint64_t key); 131 132 extern struct dm_block_validator btree_node_validator; 133
+25 -25
drivers/md/persistent-data/dm-btree-remove.c
··· 53 /* 54 * Some little utilities for moving node data around. 55 */ 56 - static void node_shift(struct node *n, int shift) 57 { 58 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); 59 uint32_t value_size = le32_to_cpu(n->header.value_size); ··· 79 } 80 } 81 82 - static void node_copy(struct node *left, struct node *right, int shift) 83 { 84 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 85 uint32_t value_size = le32_to_cpu(left->header.value_size); ··· 108 /* 109 * Delete a specific entry from a leaf node. 110 */ 111 - static void delete_at(struct node *n, unsigned index) 112 { 113 unsigned nr_entries = le32_to_cpu(n->header.nr_entries); 114 unsigned nr_to_copy = nr_entries - (index + 1); ··· 128 n->header.nr_entries = cpu_to_le32(nr_entries - 1); 129 } 130 131 - static unsigned merge_threshold(struct node *n) 132 { 133 return le32_to_cpu(n->header.max_entries) / 3; 134 } ··· 136 struct child { 137 unsigned index; 138 struct dm_block *block; 139 - struct node *n; 140 }; 141 142 static struct dm_btree_value_type le64_type = { ··· 147 .equal = NULL 148 }; 149 150 - static int init_child(struct dm_btree_info *info, struct node *parent, 151 unsigned index, struct child *result) 152 { 153 int r, inc; ··· 177 return dm_tm_unlock(info->tm, c->block); 178 } 179 180 - static void shift(struct node *left, struct node *right, int count) 181 { 182 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 183 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); ··· 203 right->header.nr_entries = cpu_to_le32(nr_right + count); 204 } 205 206 - static void __rebalance2(struct dm_btree_info *info, struct node *parent, 207 struct child *l, struct child *r) 208 { 209 - struct node *left = l->n; 210 - struct node *right = r->n; 211 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 212 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); 213 unsigned threshold = 2 * merge_threshold(left) + 1; ··· 239 unsigned left_index) 240 { 241 int r; 242 - struct node *parent; 243 struct child left, right; 244 245 parent = dm_block_data(shadow_current(s)); ··· 270 * in right, then rebalance2. This wastes some cpu, but I want something 271 * simple atm. 272 */ 273 - static void delete_center_node(struct dm_btree_info *info, struct node *parent, 274 struct child *l, struct child *c, struct child *r, 275 - struct node *left, struct node *center, struct node *right, 276 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) 277 { 278 uint32_t max_entries = le32_to_cpu(left->header.max_entries); ··· 301 /* 302 * Redistributes entries among 3 sibling nodes. 303 */ 304 - static void redistribute3(struct dm_btree_info *info, struct node *parent, 305 struct child *l, struct child *c, struct child *r, 306 - struct node *left, struct node *center, struct node *right, 307 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) 308 { 309 int s; ··· 343 *key_ptr(parent, r->index) = right->keys[0]; 344 } 345 346 - static void __rebalance3(struct dm_btree_info *info, struct node *parent, 347 struct child *l, struct child *c, struct child *r) 348 { 349 - struct node *left = l->n; 350 - struct node *center = c->n; 351 - struct node *right = r->n; 352 353 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 354 uint32_t nr_center = le32_to_cpu(center->header.nr_entries); ··· 371 unsigned left_index) 372 { 373 int r; 374 - struct node *parent = dm_block_data(shadow_current(s)); 375 struct child left, center, right; 376 377 /* ··· 421 { 422 int r; 423 struct dm_block *block; 424 - struct node *n; 425 426 r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); 427 if (r) ··· 438 { 439 int i, r, has_left_sibling, has_right_sibling; 440 uint32_t child_entries; 441 - struct node *n; 442 443 n = dm_block_data(shadow_current(s)); 444 ··· 483 return r; 484 } 485 486 - static int do_leaf(struct node *n, uint64_t key, unsigned *index) 487 { 488 int i = lower_bound(n, key); 489 ··· 506 uint64_t key, unsigned *index) 507 { 508 int i = *index, r; 509 - struct node *n; 510 511 for (;;) { 512 r = shadow_step(s, root, vt); ··· 556 unsigned level, last_level = info->levels - 1; 557 int index = 0, r = 0; 558 struct shadow_spine spine; 559 - struct node *n; 560 561 init_shadow_spine(&spine, info); 562 for (level = 0; level < info->levels; level++) {
··· 53 /* 54 * Some little utilities for moving node data around. 55 */ 56 + static void node_shift(struct btree_node *n, int shift) 57 { 58 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); 59 uint32_t value_size = le32_to_cpu(n->header.value_size); ··· 79 } 80 } 81 82 + static void node_copy(struct btree_node *left, struct btree_node *right, int shift) 83 { 84 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 85 uint32_t value_size = le32_to_cpu(left->header.value_size); ··· 108 /* 109 * Delete a specific entry from a leaf node. 110 */ 111 + static void delete_at(struct btree_node *n, unsigned index) 112 { 113 unsigned nr_entries = le32_to_cpu(n->header.nr_entries); 114 unsigned nr_to_copy = nr_entries - (index + 1); ··· 128 n->header.nr_entries = cpu_to_le32(nr_entries - 1); 129 } 130 131 + static unsigned merge_threshold(struct btree_node *n) 132 { 133 return le32_to_cpu(n->header.max_entries) / 3; 134 } ··· 136 struct child { 137 unsigned index; 138 struct dm_block *block; 139 + struct btree_node *n; 140 }; 141 142 static struct dm_btree_value_type le64_type = { ··· 147 .equal = NULL 148 }; 149 150 + static int init_child(struct dm_btree_info *info, struct btree_node *parent, 151 unsigned index, struct child *result) 152 { 153 int r, inc; ··· 177 return dm_tm_unlock(info->tm, c->block); 178 } 179 180 + static void shift(struct btree_node *left, struct btree_node *right, int count) 181 { 182 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 183 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); ··· 203 right->header.nr_entries = cpu_to_le32(nr_right + count); 204 } 205 206 + static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, 207 struct child *l, struct child *r) 208 { 209 + struct btree_node *left = l->n; 210 + struct btree_node *right = r->n; 211 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 212 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); 213 unsigned threshold = 2 * merge_threshold(left) + 1; ··· 239 unsigned left_index) 240 { 241 int r; 242 + struct btree_node *parent; 243 struct child left, right; 244 245 parent = dm_block_data(shadow_current(s)); ··· 270 * in right, then rebalance2. This wastes some cpu, but I want something 271 * simple atm. 272 */ 273 + static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent, 274 struct child *l, struct child *c, struct child *r, 275 + struct btree_node *left, struct btree_node *center, struct btree_node *right, 276 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) 277 { 278 uint32_t max_entries = le32_to_cpu(left->header.max_entries); ··· 301 /* 302 * Redistributes entries among 3 sibling nodes. 303 */ 304 + static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, 305 struct child *l, struct child *c, struct child *r, 306 + struct btree_node *left, struct btree_node *center, struct btree_node *right, 307 uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) 308 { 309 int s; ··· 343 *key_ptr(parent, r->index) = right->keys[0]; 344 } 345 346 + static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, 347 struct child *l, struct child *c, struct child *r) 348 { 349 + struct btree_node *left = l->n; 350 + struct btree_node *center = c->n; 351 + struct btree_node *right = r->n; 352 353 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); 354 uint32_t nr_center = le32_to_cpu(center->header.nr_entries); ··· 371 unsigned left_index) 372 { 373 int r; 374 + struct btree_node *parent = dm_block_data(shadow_current(s)); 375 struct child left, center, right; 376 377 /* ··· 421 { 422 int r; 423 struct dm_block *block; 424 + struct btree_node *n; 425 426 r = dm_tm_read_lock(tm, b, &btree_node_validator, &block); 427 if (r) ··· 438 { 439 int i, r, has_left_sibling, has_right_sibling; 440 uint32_t child_entries; 441 + struct btree_node *n; 442 443 n = dm_block_data(shadow_current(s)); 444 ··· 483 return r; 484 } 485 486 + static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index) 487 { 488 int i = lower_bound(n, key); 489 ··· 506 uint64_t key, unsigned *index) 507 { 508 int i = *index, r; 509 + struct btree_node *n; 510 511 for (;;) { 512 r = shadow_step(s, root, vt); ··· 556 unsigned level, last_level = info->levels - 1; 557 int index = 0, r = 0; 558 struct shadow_spine spine; 559 + struct btree_node *n; 560 561 init_shadow_spine(&spine, info); 562 for (level = 0; level < info->levels; level++) {
+10 -10
drivers/md/persistent-data/dm-btree-spine.c
··· 23 struct dm_block *b, 24 size_t block_size) 25 { 26 - struct node *n = dm_block_data(b); 27 struct node_header *h = &n->header; 28 29 h->blocknr = cpu_to_le64(dm_block_location(b)); ··· 38 struct dm_block *b, 39 size_t block_size) 40 { 41 - struct node *n = dm_block_data(b); 42 struct node_header *h = &n->header; 43 size_t value_size; 44 __le32 csum_disk; 45 uint32_t flags; 46 47 if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { 48 - DMERR("node_check failed blocknr %llu wanted %llu", 49 - le64_to_cpu(h->blocknr), dm_block_location(b)); 50 return -ENOTBLK; 51 } 52 ··· 54 block_size - sizeof(__le32), 55 BTREE_CSUM_XOR)); 56 if (csum_disk != h->csum) { 57 - DMERR("node_check failed csum %u wanted %u", 58 - le32_to_cpu(csum_disk), le32_to_cpu(h->csum)); 59 return -EILSEQ; 60 } 61 ··· 63 64 if (sizeof(struct node_header) + 65 (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { 66 - DMERR("node_check failed: max_entries too large"); 67 return -EILSEQ; 68 } 69 70 if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { 71 - DMERR("node_check failed, too many entries"); 72 return -EILSEQ; 73 } 74 ··· 77 */ 78 flags = le32_to_cpu(h->flags); 79 if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) { 80 - DMERR("node_check failed, node is neither INTERNAL or LEAF"); 81 return -EILSEQ; 82 } 83 ··· 164 return r; 165 } 166 167 - struct node *ro_node(struct ro_spine *s) 168 { 169 struct dm_block *block; 170
··· 23 struct dm_block *b, 24 size_t block_size) 25 { 26 + struct btree_node *n = dm_block_data(b); 27 struct node_header *h = &n->header; 28 29 h->blocknr = cpu_to_le64(dm_block_location(b)); ··· 38 struct dm_block *b, 39 size_t block_size) 40 { 41 + struct btree_node *n = dm_block_data(b); 42 struct node_header *h = &n->header; 43 size_t value_size; 44 __le32 csum_disk; 45 uint32_t flags; 46 47 if (dm_block_location(b) != le64_to_cpu(h->blocknr)) { 48 + DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu", 49 + le64_to_cpu(h->blocknr), dm_block_location(b)); 50 return -ENOTBLK; 51 } 52 ··· 54 block_size - sizeof(__le32), 55 BTREE_CSUM_XOR)); 56 if (csum_disk != h->csum) { 57 + DMERR_LIMIT("node_check failed: csum %u != wanted %u", 58 + le32_to_cpu(csum_disk), le32_to_cpu(h->csum)); 59 return -EILSEQ; 60 } 61 ··· 63 64 if (sizeof(struct node_header) + 65 (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { 66 + DMERR_LIMIT("node_check failed: max_entries too large"); 67 return -EILSEQ; 68 } 69 70 if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { 71 + DMERR_LIMIT("node_check failed: too many entries"); 72 return -EILSEQ; 73 } 74 ··· 77 */ 78 flags = le32_to_cpu(h->flags); 79 if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) { 80 + DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF"); 81 return -EILSEQ; 82 } 83 ··· 164 return r; 165 } 166 167 + struct btree_node *ro_node(struct ro_spine *s) 168 { 169 struct dm_block *block; 170
+18 -13
drivers/md/persistent-data/dm-btree.c
··· 38 /*----------------------------------------------------------------*/ 39 40 /* makes the assumption that no two keys are the same. */ 41 - static int bsearch(struct node *n, uint64_t key, int want_hi) 42 { 43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries); 44 ··· 58 return want_hi ? hi : lo; 59 } 60 61 - int lower_bound(struct node *n, uint64_t key) 62 { 63 return bsearch(n, key, 0); 64 } 65 66 - void inc_children(struct dm_transaction_manager *tm, struct node *n, 67 struct dm_btree_value_type *vt) 68 { 69 unsigned i; ··· 77 vt->inc(vt->context, value_ptr(n, i)); 78 } 79 80 - static int insert_at(size_t value_size, struct node *node, unsigned index, 81 uint64_t key, void *value) 82 __dm_written_to_disk(value) 83 { ··· 122 { 123 int r; 124 struct dm_block *b; 125 - struct node *n; 126 size_t block_size; 127 uint32_t max_entries; 128 ··· 154 #define MAX_SPINE_DEPTH 64 155 struct frame { 156 struct dm_block *b; 157 - struct node *n; 158 unsigned level; 159 unsigned nr_children; 160 unsigned current_child; ··· 230 dm_tm_unlock(s->tm, f->b); 231 } 232 233 int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 234 { 235 int r; ··· 246 s->tm = info->tm; 247 s->top = -1; 248 249 - r = push_frame(s, root, 1); 250 if (r) 251 goto out; 252 ··· 272 if (r) 273 goto out; 274 275 - } else if (f->level != (info->levels - 1)) { 276 b = value64(f->n, f->current_child); 277 f->current_child++; 278 r = push_frame(s, b, f->level + 1); ··· 300 /*----------------------------------------------------------------*/ 301 302 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, 303 - int (*search_fn)(struct node *, uint64_t), 304 uint64_t *result_key, void *v, size_t value_size) 305 { 306 int i, r; ··· 411 size_t size; 412 unsigned nr_left, nr_right; 413 struct dm_block *left, *right, *parent; 414 - struct node *ln, *rn, *pn; 415 __le64 location; 416 417 left = shadow_current(s); ··· 496 size_t size; 497 unsigned nr_left, nr_right; 498 struct dm_block *left, *right, *new_parent; 499 - struct node *pn, *ln, *rn; 500 __le64 val; 501 502 new_parent = shadow_current(s); ··· 581 uint64_t key, unsigned *index) 582 { 583 int r, i = *index, top = 1; 584 - struct node *node; 585 586 for (;;) { 587 r = shadow_step(s, root, vt); ··· 648 unsigned level, index = -1, last_level = info->levels - 1; 649 dm_block_t block = root; 650 struct shadow_spine spine; 651 - struct node *n; 652 struct dm_btree_value_type le64_type; 653 654 le64_type.context = NULL;
··· 38 /*----------------------------------------------------------------*/ 39 40 /* makes the assumption that no two keys are the same. */ 41 + static int bsearch(struct btree_node *n, uint64_t key, int want_hi) 42 { 43 int lo = -1, hi = le32_to_cpu(n->header.nr_entries); 44 ··· 58 return want_hi ? hi : lo; 59 } 60 61 + int lower_bound(struct btree_node *n, uint64_t key) 62 { 63 return bsearch(n, key, 0); 64 } 65 66 + void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 67 struct dm_btree_value_type *vt) 68 { 69 unsigned i; ··· 77 vt->inc(vt->context, value_ptr(n, i)); 78 } 79 80 + static int insert_at(size_t value_size, struct btree_node *node, unsigned index, 81 uint64_t key, void *value) 82 __dm_written_to_disk(value) 83 { ··· 122 { 123 int r; 124 struct dm_block *b; 125 + struct btree_node *n; 126 size_t block_size; 127 uint32_t max_entries; 128 ··· 154 #define MAX_SPINE_DEPTH 64 155 struct frame { 156 struct dm_block *b; 157 + struct btree_node *n; 158 unsigned level; 159 unsigned nr_children; 160 unsigned current_child; ··· 230 dm_tm_unlock(s->tm, f->b); 231 } 232 233 + static bool is_internal_level(struct dm_btree_info *info, struct frame *f) 234 + { 235 + return f->level < (info->levels - 1); 236 + } 237 + 238 int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 239 { 240 int r; ··· 241 s->tm = info->tm; 242 s->top = -1; 243 244 + r = push_frame(s, root, 0); 245 if (r) 246 goto out; 247 ··· 267 if (r) 268 goto out; 269 270 + } else if (is_internal_level(info, f)) { 271 b = value64(f->n, f->current_child); 272 f->current_child++; 273 r = push_frame(s, b, f->level + 1); ··· 295 /*----------------------------------------------------------------*/ 296 297 static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key, 298 + int (*search_fn)(struct btree_node *, uint64_t), 299 uint64_t *result_key, void *v, size_t value_size) 300 { 301 int i, r; ··· 406 size_t size; 407 unsigned nr_left, nr_right; 408 struct dm_block *left, *right, *parent; 409 + struct btree_node *ln, *rn, *pn; 410 __le64 location; 411 412 left = shadow_current(s); ··· 491 size_t size; 492 unsigned nr_left, nr_right; 493 struct dm_block *left, *right, *new_parent; 494 + struct btree_node *pn, *ln, *rn; 495 __le64 val; 496 497 new_parent = shadow_current(s); ··· 576 uint64_t key, unsigned *index) 577 { 578 int r, i = *index, top = 1; 579 + struct btree_node *node; 580 581 for (;;) { 582 r = shadow_step(s, root, vt); ··· 643 unsigned level, index = -1, last_level = info->levels - 1; 644 dm_block_t block = root; 645 struct shadow_spine spine; 646 + struct btree_node *n; 647 struct dm_btree_value_type le64_type; 648 649 le64_type.context = NULL;
+8 -8
drivers/md/persistent-data/dm-space-map-common.c
··· 39 __le32 csum_disk; 40 41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) { 42 - DMERR("index_check failed blocknr %llu wanted %llu", 43 - le64_to_cpu(mi_le->blocknr), dm_block_location(b)); 44 return -ENOTBLK; 45 } 46 ··· 48 block_size - sizeof(__le32), 49 INDEX_CSUM_XOR)); 50 if (csum_disk != mi_le->csum) { 51 - DMERR("index_check failed csum %u wanted %u", 52 - le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum)); 53 return -EILSEQ; 54 } 55 ··· 89 __le32 csum_disk; 90 91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) { 92 - DMERR("bitmap check failed blocknr %llu wanted %llu", 93 - le64_to_cpu(disk_header->blocknr), dm_block_location(b)); 94 return -ENOTBLK; 95 } 96 ··· 98 block_size - sizeof(__le32), 99 BITMAP_CSUM_XOR)); 100 if (csum_disk != disk_header->csum) { 101 - DMERR("bitmap check failed csum %u wanted %u", 102 - le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum)); 103 return -EILSEQ; 104 } 105
··· 39 __le32 csum_disk; 40 41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) { 42 + DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu", 43 + le64_to_cpu(mi_le->blocknr), dm_block_location(b)); 44 return -ENOTBLK; 45 } 46 ··· 48 block_size - sizeof(__le32), 49 INDEX_CSUM_XOR)); 50 if (csum_disk != mi_le->csum) { 51 + DMERR_LIMIT("index_check failed: csum %u != wanted %u", 52 + le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum)); 53 return -EILSEQ; 54 } 55 ··· 89 __le32 csum_disk; 90 91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) { 92 + DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu", 93 + le64_to_cpu(disk_header->blocknr), dm_block_location(b)); 94 return -ENOTBLK; 95 } 96 ··· 98 block_size - sizeof(__le32), 99 BITMAP_CSUM_XOR)); 100 if (csum_disk != disk_header->csum) { 101 + DMERR_LIMIT("bitmap check failed: csum %u != wanted %u", 102 + le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum)); 103 return -EILSEQ; 104 } 105
+1 -1
drivers/md/persistent-data/dm-space-map-metadata.c
··· 337 { 338 int r = sm_metadata_new_block_(sm, b); 339 if (r) 340 - DMERR("out of metadata space"); 341 return r; 342 } 343
··· 337 { 338 int r = sm_metadata_new_block_(sm, b); 339 if (r) 340 + DMERR("unable to allocate new metadata block"); 341 return r; 342 } 343
+47 -8
include/linux/device-mapper.h
··· 23 union map_info { 24 void *ptr; 25 unsigned long long ll; 26 - unsigned target_request_nr; 27 }; 28 29 /* ··· 45 * = 1: simple remap complete 46 * = 2: The target wants to push back the io 47 */ 48 - typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio, 49 - union map_info *map_context); 50 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 51 union map_info *map_context); 52 ··· 58 * 2 : The target wants to push back the io 59 */ 60 typedef int (*dm_endio_fn) (struct dm_target *ti, 61 - struct bio *bio, int error, 62 - union map_info *map_context); 63 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 64 struct request *clone, int error, 65 union map_info *map_context); ··· 190 * A number of zero-length barrier requests that will be submitted 191 * to the target for the purpose of flushing cache. 192 * 193 - * The request number will be placed in union map_info->target_request_nr. 194 * It is a responsibility of the target driver to remap these requests 195 * to the real underlying devices. 196 */ 197 unsigned num_flush_requests; 198 199 /* 200 - * The number of discard requests that will be submitted to the 201 - * target. map_info->request_nr is used just like num_flush_requests. 202 */ 203 unsigned num_discard_requests; 204 205 /* target specific data */ 206 void *private; ··· 249 struct list_head list; 250 int (*congested_fn) (struct dm_target_callbacks *, int); 251 }; 252 253 int dm_register_target(struct target_type *t); 254 void dm_unregister_target(struct target_type *t);
··· 23 union map_info { 24 void *ptr; 25 unsigned long long ll; 26 }; 27 28 /* ··· 46 * = 1: simple remap complete 47 * = 2: The target wants to push back the io 48 */ 49 + typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); 50 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, 51 union map_info *map_context); 52 ··· 60 * 2 : The target wants to push back the io 61 */ 62 typedef int (*dm_endio_fn) (struct dm_target *ti, 63 + struct bio *bio, int error); 64 typedef int (*dm_request_endio_fn) (struct dm_target *ti, 65 struct request *clone, int error, 66 union map_info *map_context); ··· 193 * A number of zero-length barrier requests that will be submitted 194 * to the target for the purpose of flushing cache. 195 * 196 + * The request number can be accessed with dm_bio_get_target_request_nr. 197 * It is a responsibility of the target driver to remap these requests 198 * to the real underlying devices. 199 */ 200 unsigned num_flush_requests; 201 202 /* 203 + * The number of discard requests that will be submitted to the target. 204 + * The request number can be accessed with dm_bio_get_target_request_nr. 205 */ 206 unsigned num_discard_requests; 207 + 208 + /* 209 + * The number of WRITE SAME requests that will be submitted to the target. 210 + * The request number can be accessed with dm_bio_get_target_request_nr. 211 + */ 212 + unsigned num_write_same_requests; 213 + 214 + /* 215 + * The minimum number of extra bytes allocated in each bio for the 216 + * target to use. dm_per_bio_data returns the data location. 217 + */ 218 + unsigned per_bio_data_size; 219 220 /* target specific data */ 221 void *private; ··· 240 struct list_head list; 241 int (*congested_fn) (struct dm_target_callbacks *, int); 242 }; 243 + 244 + /* 245 + * For bio-based dm. 246 + * One of these is allocated for each bio. 247 + * This structure shouldn't be touched directly by target drivers. 248 + * It is here so that we can inline dm_per_bio_data and 249 + * dm_bio_from_per_bio_data 250 + */ 251 + struct dm_target_io { 252 + struct dm_io *io; 253 + struct dm_target *ti; 254 + union map_info info; 255 + unsigned target_request_nr; 256 + struct bio clone; 257 + }; 258 + 259 + static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) 260 + { 261 + return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; 262 + } 263 + 264 + static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) 265 + { 266 + return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); 267 + } 268 + 269 + static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio) 270 + { 271 + return container_of(bio, struct dm_target_io, clone)->target_request_nr; 272 + } 273 274 int dm_register_target(struct target_type *t); 275 void dm_unregister_target(struct target_type *t);
+2 -2
include/uapi/linux/dm-ioctl.h
··· 268 269 #define DM_VERSION_MAJOR 4 270 #define DM_VERSION_MINOR 23 271 - #define DM_VERSION_PATCHLEVEL 0 272 - #define DM_VERSION_EXTRA "-ioctl (2012-07-25)" 273 274 /* Status bits */ 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */
··· 268 269 #define DM_VERSION_MAJOR 4 270 #define DM_VERSION_MINOR 23 271 + #define DM_VERSION_PATCHLEVEL 1 272 + #define DM_VERSION_EXTRA "-ioctl (2012-12-18)" 273 274 /* Status bits */ 275 #define DM_READONLY_FLAG (1 << 0) /* In/Out */