Merge tag 'for-4.14/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper updates from Mike Snitzer:

- Some request-based DM core and DM multipath fixes and cleanups

- Constify a few variables in DM core and DM integrity

- Add bufio optimization and checksum failure accounting to DM
integrity

- Fix DM integrity to avoid checking integrity of failed reads

- Fix DM integrity to use init_completion

- A couple DM log-writes target fixes

- Simplify DAX flushing by eliminating the unnecessary flush
abstraction that was stood up for DM's use.

* tag 'for-4.14/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dax: remove the pmem_dax_ops->flush abstraction
dm integrity: use init_completion instead of COMPLETION_INITIALIZER_ONSTACK
dm integrity: make blk_integrity_profile structure const
dm integrity: do not check integrity for failed read operations
dm log writes: fix >512b sectorsize support
dm log writes: don't use all the cpu while waiting to log blocks
dm ioctl: constify ioctl lookup table
dm: constify argument arrays
dm integrity: count and display checksum failures
dm integrity: optimize writing dm-bufio buffers that are partially changed
dm rq: do not update rq partially in each ending bio
dm rq: make dm-sq requeuing behavior consistent with dm-mq behavior
dm mpath: complain about unsupported __multipath_map_bio() return values
dm mpath: avoid that building with W=1 causes gcc 7 to complain about fall-through

+189 -167
+14 -7
drivers/dax/super.c
··· 201 if (!dax_dev) 202 return 0; 203 204 - if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush) 205 return 0; 206 return a->mode; 207 } 208 ··· 269 } 270 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 271 272 - void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 273 - size_t size) 274 { 275 - if (!dax_alive(dax_dev)) 276 return; 277 278 - if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)) 279 return; 280 281 - if (dax_dev->ops->flush) 282 - dax_dev->ops->flush(dax_dev, pgoff, addr, size); 283 } 284 EXPORT_SYMBOL_GPL(dax_flush); 285 286 void dax_write_cache(struct dax_device *dax_dev, bool wc)
··· 201 if (!dax_dev) 202 return 0; 203 204 + #ifndef CONFIG_ARCH_HAS_PMEM_API 205 + if (a == &dev_attr_write_cache.attr) 206 return 0; 207 + #endif 208 return a->mode; 209 } 210 ··· 267 } 268 EXPORT_SYMBOL_GPL(dax_copy_from_iter); 269 270 + #ifdef CONFIG_ARCH_HAS_PMEM_API 271 + void arch_wb_cache_pmem(void *addr, size_t size); 272 + void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 273 { 274 + if (unlikely(!dax_alive(dax_dev))) 275 return; 276 277 + if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) 278 return; 279 280 + arch_wb_cache_pmem(addr, size); 281 } 282 + #else 283 + void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) 284 + { 285 + } 286 + #endif 287 EXPORT_SYMBOL_GPL(dax_flush); 288 289 void dax_write_cache(struct dax_device *dax_dev, bool wc)
+67 -28
drivers/md/dm-bufio.c
··· 64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) 65 66 /* 67 * dm_buffer->list_mode 68 */ 69 #define LIST_CLEAN 0 ··· 155 blk_status_t write_error; 156 unsigned long state; 157 unsigned long last_accessed; 158 struct dm_bufio_client *c; 159 struct list_head write_list; 160 struct bio bio; ··· 570 } 571 572 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, 573 - unsigned n_sectors, bio_end_io_t *end_io) 574 { 575 int r; 576 struct dm_io_request io_req = { ··· 588 589 if (b->data_mode != DATA_MODE_VMALLOC) { 590 io_req.mem.type = DM_IO_KMEM; 591 - io_req.mem.ptr.addr = b->data; 592 } else { 593 io_req.mem.type = DM_IO_VMA; 594 - io_req.mem.ptr.vma = b->data; 595 } 596 597 b->bio.bi_end_io = end_io; ··· 619 } 620 621 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, 622 - unsigned n_sectors, bio_end_io_t *end_io) 623 { 624 char *ptr; 625 - int len; 626 627 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); 628 b->bio.bi_iter.bi_sector = sector; ··· 635 b->bio.bi_private = end_io; 636 bio_set_op_attrs(&b->bio, rw, 0); 637 638 - /* 639 - * We assume that if len >= PAGE_SIZE ptr is page-aligned. 640 - * If len < PAGE_SIZE the buffer doesn't cross page boundary. 641 - */ 642 - ptr = b->data; 643 len = n_sectors << SECTOR_SHIFT; 644 645 - if (len >= PAGE_SIZE) 646 - BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1)); 647 - else 648 - BUG_ON((unsigned long)ptr & (len - 1)); 649 - 650 do { 651 - if (!bio_add_page(&b->bio, virt_to_page(ptr), 652 - len < PAGE_SIZE ? len : PAGE_SIZE, 653 offset_in_page(ptr))) { 654 BUG_ON(b->c->block_size <= PAGE_SIZE); 655 - use_dmio(b, rw, sector, n_sectors, end_io); 656 return; 657 } 658 659 - len -= PAGE_SIZE; 660 - ptr += PAGE_SIZE; 661 } while (len > 0); 662 663 submit_bio(&b->bio); ··· 658 { 659 unsigned n_sectors; 660 sector_t sector; 661 - 662 - if (rw == WRITE && b->c->write_callback) 663 - b->c->write_callback(b); 664 665 sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; 666 - n_sectors = 1 << b->c->sectors_per_block_bits; 667 668 if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && 669 b->data_mode != DATA_MODE_VMALLOC) 670 - use_inline_bio(b, rw, sector, n_sectors, end_io); 671 else 672 - use_dmio(b, rw, sector, n_sectors, end_io); 673 } 674 675 /*---------------------------------------------------------------- ··· 735 736 clear_bit(B_DIRTY, &b->state); 737 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 738 739 if (!write_list) 740 submit_io(b, WRITE, write_endio); ··· 1240 } 1241 EXPORT_SYMBOL_GPL(dm_bufio_release); 1242 1243 - void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) 1244 { 1245 struct dm_bufio_client *c = b->c; 1246 1247 dm_bufio_lock(c); 1248 1249 BUG_ON(test_bit(B_READING, &b->state)); 1250 1251 - if (!test_and_set_bit(B_DIRTY, &b->state)) 1252 __relink_lru(b, LIST_DIRTY); 1253 1254 dm_bufio_unlock(c); 1255 } 1256 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); 1257 ··· 1435 wait_on_bit_io(&b->state, B_WRITING, 1436 TASK_UNINTERRUPTIBLE); 1437 set_bit(B_DIRTY, &b->state); 1438 __unlink_buffer(b); 1439 __link_buffer(b, new_block, LIST_DIRTY); 1440 } else {
··· 64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1)) 65 66 /* 67 + * Align buffer writes to this boundary. 68 + * Tests show that SSDs have the highest IOPS when using 4k writes. 69 + */ 70 + #define DM_BUFIO_WRITE_ALIGN 4096 71 + 72 + /* 73 * dm_buffer->list_mode 74 */ 75 #define LIST_CLEAN 0 ··· 149 blk_status_t write_error; 150 unsigned long state; 151 unsigned long last_accessed; 152 + unsigned dirty_start; 153 + unsigned dirty_end; 154 + unsigned write_start; 155 + unsigned write_end; 156 struct dm_bufio_client *c; 157 struct list_head write_list; 158 struct bio bio; ··· 560 } 561 562 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector, 563 + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) 564 { 565 int r; 566 struct dm_io_request io_req = { ··· 578 579 if (b->data_mode != DATA_MODE_VMALLOC) { 580 io_req.mem.type = DM_IO_KMEM; 581 + io_req.mem.ptr.addr = (char *)b->data + offset; 582 } else { 583 io_req.mem.type = DM_IO_VMA; 584 + io_req.mem.ptr.vma = (char *)b->data + offset; 585 } 586 587 b->bio.bi_end_io = end_io; ··· 609 } 610 611 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector, 612 + unsigned n_sectors, unsigned offset, bio_end_io_t *end_io) 613 { 614 char *ptr; 615 + unsigned len; 616 617 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); 618 b->bio.bi_iter.bi_sector = sector; ··· 625 b->bio.bi_private = end_io; 626 bio_set_op_attrs(&b->bio, rw, 0); 627 628 + ptr = (char *)b->data + offset; 629 len = n_sectors << SECTOR_SHIFT; 630 631 do { 632 + unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len); 633 + if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step, 634 offset_in_page(ptr))) { 635 BUG_ON(b->c->block_size <= PAGE_SIZE); 636 + use_dmio(b, rw, sector, n_sectors, offset, end_io); 637 return; 638 } 639 640 + len -= this_step; 641 + ptr += this_step; 642 } while (len > 0); 643 644 submit_bio(&b->bio); ··· 657 { 658 unsigned n_sectors; 659 sector_t sector; 660 + unsigned offset, end; 661 662 sector = (b->block << b->c->sectors_per_block_bits) + b->c->start; 663 + 664 + if (rw != WRITE) { 665 + n_sectors = 1 << b->c->sectors_per_block_bits; 666 + offset = 0; 667 + } else { 668 + if (b->c->write_callback) 669 + b->c->write_callback(b); 670 + offset = b->write_start; 671 + end = b->write_end; 672 + offset &= -DM_BUFIO_WRITE_ALIGN; 673 + end += DM_BUFIO_WRITE_ALIGN - 1; 674 + end &= -DM_BUFIO_WRITE_ALIGN; 675 + if (unlikely(end > b->c->block_size)) 676 + end = b->c->block_size; 677 + 678 + sector += offset >> SECTOR_SHIFT; 679 + n_sectors = (end - offset) >> SECTOR_SHIFT; 680 + } 681 682 if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) && 683 b->data_mode != DATA_MODE_VMALLOC) 684 + use_inline_bio(b, rw, sector, n_sectors, offset, end_io); 685 else 686 + use_dmio(b, rw, sector, n_sectors, offset, end_io); 687 } 688 689 /*---------------------------------------------------------------- ··· 719 720 clear_bit(B_DIRTY, &b->state); 721 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE); 722 + 723 + b->write_start = b->dirty_start; 724 + b->write_end = b->dirty_end; 725 726 if (!write_list) 727 submit_io(b, WRITE, write_endio); ··· 1221 } 1222 EXPORT_SYMBOL_GPL(dm_bufio_release); 1223 1224 + void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, 1225 + unsigned start, unsigned end) 1226 { 1227 struct dm_bufio_client *c = b->c; 1228 + 1229 + BUG_ON(start >= end); 1230 + BUG_ON(end > b->c->block_size); 1231 1232 dm_bufio_lock(c); 1233 1234 BUG_ON(test_bit(B_READING, &b->state)); 1235 1236 + if (!test_and_set_bit(B_DIRTY, &b->state)) { 1237 + b->dirty_start = start; 1238 + b->dirty_end = end; 1239 __relink_lru(b, LIST_DIRTY); 1240 + } else { 1241 + if (start < b->dirty_start) 1242 + b->dirty_start = start; 1243 + if (end > b->dirty_end) 1244 + b->dirty_end = end; 1245 + } 1246 1247 dm_bufio_unlock(c); 1248 + } 1249 + EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty); 1250 + 1251 + void dm_bufio_mark_buffer_dirty(struct dm_buffer *b) 1252 + { 1253 + dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size); 1254 } 1255 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty); 1256 ··· 1398 wait_on_bit_io(&b->state, B_WRITING, 1399 TASK_UNINTERRUPTIBLE); 1400 set_bit(B_DIRTY, &b->state); 1401 + b->dirty_start = 0; 1402 + b->dirty_end = c->block_size; 1403 __unlink_buffer(b); 1404 __link_buffer(b, new_block, LIST_DIRTY); 1405 } else {
+9
drivers/md/dm-bufio.h
··· 94 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); 95 96 /* 97 * Initiate writing of dirty buffers, without waiting for completion. 98 */ 99 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
··· 94 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); 95 96 /* 97 + * Mark a part of the buffer dirty. 98 + * 99 + * The specified part of the buffer is scheduled to be written. dm-bufio may 100 + * write the specified part of the buffer or it may write a larger superset. 101 + */ 102 + void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, 103 + unsigned start, unsigned end); 104 + 105 + /* 106 * Initiate writing of dirty buffers, without waiting for completion. 107 */ 108 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c);
+2 -2
drivers/md/dm-cache-target.c
··· 2306 static int parse_features(struct cache_args *ca, struct dm_arg_set *as, 2307 char **error) 2308 { 2309 - static struct dm_arg _args[] = { 2310 {0, 2, "Invalid number of cache feature arguments"}, 2311 }; 2312 ··· 2348 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, 2349 char **error) 2350 { 2351 - static struct dm_arg _args[] = { 2352 {0, 1024, "Invalid number of policy arguments"}, 2353 }; 2354
··· 2306 static int parse_features(struct cache_args *ca, struct dm_arg_set *as, 2307 char **error) 2308 { 2309 + static const struct dm_arg _args[] = { 2310 {0, 2, "Invalid number of cache feature arguments"}, 2311 }; 2312 ··· 2348 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as, 2349 char **error) 2350 { 2351 + static const struct dm_arg _args[] = { 2352 {0, 1024, "Invalid number of policy arguments"}, 2353 }; 2354
+1 -1
drivers/md/dm-crypt.c
··· 2529 { 2530 struct crypt_config *cc = ti->private; 2531 struct dm_arg_set as; 2532 - static struct dm_arg _args[] = { 2533 {0, 6, "Invalid number of feature args"}, 2534 }; 2535 unsigned int opt_params, val;
··· 2529 { 2530 struct crypt_config *cc = ti->private; 2531 struct dm_arg_set as; 2532 + static const struct dm_arg _args[] = { 2533 {0, 6, "Invalid number of feature args"}, 2534 }; 2535 unsigned int opt_params, val;
+2 -2
drivers/md/dm-flakey.c
··· 51 unsigned argc; 52 const char *arg_name; 53 54 - static struct dm_arg _args[] = { 55 {0, 6, "Invalid number of feature args"}, 56 {1, UINT_MAX, "Invalid corrupt bio byte"}, 57 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, ··· 178 */ 179 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) 180 { 181 - static struct dm_arg _args[] = { 182 {0, UINT_MAX, "Invalid up interval"}, 183 {0, UINT_MAX, "Invalid down interval"}, 184 };
··· 51 unsigned argc; 52 const char *arg_name; 53 54 + static const struct dm_arg _args[] = { 55 {0, 6, "Invalid number of feature args"}, 56 {1, UINT_MAX, "Invalid corrupt bio byte"}, 57 {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, ··· 178 */ 179 static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) 180 { 181 + static const struct dm_arg _args[] = { 182 {0, UINT_MAX, "Invalid up interval"}, 183 {0, UINT_MAX, "Invalid down interval"}, 184 };
+26 -16
drivers/md/dm-integrity.c
··· 225 struct alg_spec internal_hash_alg; 226 struct alg_spec journal_crypt_alg; 227 struct alg_spec journal_mac_alg; 228 }; 229 230 struct dm_integrity_range { ··· 300 /* 301 * DM Integrity profile, protection is performed layer above (dm-crypt) 302 */ 303 - static struct blk_integrity_profile dm_integrity_profile = { 304 .name = "DM-DIF-EXT-TAG", 305 .generate_fn = NULL, 306 .verify_fn = NULL, ··· 312 313 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) 314 { 315 if (!cmpxchg(&ic->failed, 0, err)) 316 DMERR("Error on %s: %d", msg, err); 317 } ··· 774 unsigned i; 775 776 io_comp.ic = ic; 777 - io_comp.comp = COMPLETION_INITIALIZER_ONSTACK(io_comp.comp); 778 779 if (commit_start + commit_sections <= ic->journal_sections) { 780 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); 781 if (ic->journal_io) { 782 crypt_comp_1.ic = ic; 783 - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 784 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 785 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); 786 wait_for_completion_io(&crypt_comp_1.comp); ··· 796 to_end = ic->journal_sections - commit_start; 797 if (ic->journal_io) { 798 crypt_comp_1.ic = ic; 799 - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 800 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 801 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); 802 if (try_wait_for_completion(&crypt_comp_1.comp)) { 803 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 804 - crypt_comp_1.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_1.comp); 805 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 806 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); 807 wait_for_completion_io(&crypt_comp_1.comp); 808 } else { 809 crypt_comp_2.ic = ic; 810 - crypt_comp_2.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp_2.comp); 811 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); 812 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); 813 wait_for_completion_io(&crypt_comp_1.comp); ··· 1045 memcpy(tag, dp, to_copy); 1046 } else if (op == TAG_WRITE) { 1047 memcpy(dp, tag, to_copy); 1048 - dm_bufio_mark_buffer_dirty(b); 1049 } else { 1050 /* e.g.: op == TAG_CMP */ 1051 if (unlikely(memcmp(dp, tag, to_copy))) { ··· 1279 DMERR("Checksum failed at sector 0x%llx", 1280 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); 1281 r = -EILSEQ; 1282 } 1283 if (likely(checksums != checksums_onstack)) 1284 kfree(checksums); ··· 1681 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 1682 1683 if (need_sync_io) { 1684 - read_comp = COMPLETION_INITIALIZER_ONSTACK(read_comp); 1685 dio->completion = &read_comp; 1686 } else 1687 dio->completion = NULL; ··· 1705 1706 if (need_sync_io) { 1707 wait_for_completion_io(&read_comp); 1708 - integrity_metadata(&dio->work); 1709 } else { 1710 INIT_WORK(&dio->work, integrity_metadata); 1711 queue_work(ic->metadata_wq, &dio->work); ··· 1843 1844 comp.ic = ic; 1845 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1846 - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 1847 1848 i = write_start; 1849 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { ··· 2070 if (ic->journal_io) { 2071 struct journal_completion crypt_comp; 2072 crypt_comp.ic = ic; 2073 - crypt_comp.comp = COMPLETION_INITIALIZER_ONSTACK(crypt_comp.comp); 2074 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); 2075 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); 2076 wait_for_completion(&crypt_comp.comp); ··· 2242 2243 switch (type) { 2244 case STATUSTYPE_INFO: 2245 - result[0] = '\0'; 2246 break; 2247 2248 case STATUSTYPE_TABLE: { ··· 2643 memset(iv, 0x00, ivsize); 2644 2645 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2646 - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 2647 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2648 if (do_crypt(true, req, &comp)) 2649 wait_for_completion(&comp.comp); ··· 2700 2701 sg_init_one(&sg, crypt_data, crypt_len); 2702 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2703 - comp.comp = COMPLETION_INITIALIZER_ONSTACK(comp.comp); 2704 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2705 if (do_crypt(true, req, &comp)) 2706 wait_for_completion(&comp.comp); ··· 2787 int r; 2788 unsigned extra_args; 2789 struct dm_arg_set as; 2790 - static struct dm_arg _args[] = { 2791 {0, 9, "Invalid number of feature args"}, 2792 }; 2793 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; ··· 2815 bio_list_init(&ic->flush_bio_list); 2816 init_waitqueue_head(&ic->copy_to_journal_wait); 2817 init_completion(&ic->crypto_backoff); 2818 2819 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 2820 if (r) { ··· 3212 3213 static struct target_type integrity_target = { 3214 .name = "integrity", 3215 - .version = {1, 0, 0}, 3216 .module = THIS_MODULE, 3217 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 3218 .ctr = dm_integrity_ctr,
··· 225 struct alg_spec internal_hash_alg; 226 struct alg_spec journal_crypt_alg; 227 struct alg_spec journal_mac_alg; 228 + 229 + atomic64_t number_of_mismatches; 230 }; 231 232 struct dm_integrity_range { ··· 298 /* 299 * DM Integrity profile, protection is performed layer above (dm-crypt) 300 */ 301 + static const struct blk_integrity_profile dm_integrity_profile = { 302 .name = "DM-DIF-EXT-TAG", 303 .generate_fn = NULL, 304 .verify_fn = NULL, ··· 310 311 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) 312 { 313 + if (err == -EILSEQ) 314 + atomic64_inc(&ic->number_of_mismatches); 315 if (!cmpxchg(&ic->failed, 0, err)) 316 DMERR("Error on %s: %d", msg, err); 317 } ··· 770 unsigned i; 771 772 io_comp.ic = ic; 773 + init_completion(&io_comp.comp); 774 775 if (commit_start + commit_sections <= ic->journal_sections) { 776 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); 777 if (ic->journal_io) { 778 crypt_comp_1.ic = ic; 779 + init_completion(&crypt_comp_1.comp); 780 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 781 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); 782 wait_for_completion_io(&crypt_comp_1.comp); ··· 792 to_end = ic->journal_sections - commit_start; 793 if (ic->journal_io) { 794 crypt_comp_1.ic = ic; 795 + init_completion(&crypt_comp_1.comp); 796 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 797 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); 798 if (try_wait_for_completion(&crypt_comp_1.comp)) { 799 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); 800 + reinit_completion(&crypt_comp_1.comp); 801 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); 802 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); 803 wait_for_completion_io(&crypt_comp_1.comp); 804 } else { 805 crypt_comp_2.ic = ic; 806 + init_completion(&crypt_comp_2.comp); 807 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); 808 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); 809 wait_for_completion_io(&crypt_comp_1.comp); ··· 1041 memcpy(tag, dp, to_copy); 1042 } else if (op == TAG_WRITE) { 1043 memcpy(dp, tag, to_copy); 1044 + dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy); 1045 } else { 1046 /* e.g.: op == TAG_CMP */ 1047 if (unlikely(memcmp(dp, tag, to_copy))) { ··· 1275 DMERR("Checksum failed at sector 0x%llx", 1276 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); 1277 r = -EILSEQ; 1278 + atomic64_inc(&ic->number_of_mismatches); 1279 } 1280 if (likely(checksums != checksums_onstack)) 1281 kfree(checksums); ··· 1676 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 1677 1678 if (need_sync_io) { 1679 + init_completion(&read_comp); 1680 dio->completion = &read_comp; 1681 } else 1682 dio->completion = NULL; ··· 1700 1701 if (need_sync_io) { 1702 wait_for_completion_io(&read_comp); 1703 + if (likely(!bio->bi_status)) 1704 + integrity_metadata(&dio->work); 1705 + else 1706 + dec_in_flight(dio); 1707 + 1708 } else { 1709 INIT_WORK(&dio->work, integrity_metadata); 1710 queue_work(ic->metadata_wq, &dio->work); ··· 1834 1835 comp.ic = ic; 1836 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1837 + init_completion(&comp.comp); 1838 1839 i = write_start; 1840 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { ··· 2061 if (ic->journal_io) { 2062 struct journal_completion crypt_comp; 2063 crypt_comp.ic = ic; 2064 + init_completion(&crypt_comp.comp); 2065 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); 2066 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); 2067 wait_for_completion(&crypt_comp.comp); ··· 2233 2234 switch (type) { 2235 case STATUSTYPE_INFO: 2236 + DMEMIT("%llu", (unsigned long long)atomic64_read(&ic->number_of_mismatches)); 2237 break; 2238 2239 case STATUSTYPE_TABLE: { ··· 2634 memset(iv, 0x00, ivsize); 2635 2636 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2637 + init_completion(&comp.comp); 2638 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2639 if (do_crypt(true, req, &comp)) 2640 wait_for_completion(&comp.comp); ··· 2691 2692 sg_init_one(&sg, crypt_data, crypt_len); 2693 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2694 + init_completion(&comp.comp); 2695 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2696 if (do_crypt(true, req, &comp)) 2697 wait_for_completion(&comp.comp); ··· 2778 int r; 2779 unsigned extra_args; 2780 struct dm_arg_set as; 2781 + static const struct dm_arg _args[] = { 2782 {0, 9, "Invalid number of feature args"}, 2783 }; 2784 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; ··· 2806 bio_list_init(&ic->flush_bio_list); 2807 init_waitqueue_head(&ic->copy_to_journal_wait); 2808 init_completion(&ic->crypto_backoff); 2809 + atomic64_set(&ic->number_of_mismatches, 0); 2810 2811 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 2812 if (r) { ··· 3202 3203 static struct target_type integrity_target = { 3204 .name = "integrity", 3205 + .version = {1, 1, 0}, 3206 .module = THIS_MODULE, 3207 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 3208 .ctr = dm_integrity_ctr,
+1 -1
drivers/md/dm-ioctl.c
··· 1629 *---------------------------------------------------------------*/ 1630 static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) 1631 { 1632 - static struct { 1633 int cmd; 1634 int flags; 1635 ioctl_fn fn;
··· 1629 *---------------------------------------------------------------*/ 1630 static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) 1631 { 1632 + static const struct { 1633 int cmd; 1634 int flags; 1635 ioctl_fn fn;
-15
drivers/md/dm-linear.c
··· 184 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); 185 } 186 187 - static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, 188 - size_t size) 189 - { 190 - struct linear_c *lc = ti->private; 191 - struct block_device *bdev = lc->dev->bdev; 192 - struct dax_device *dax_dev = lc->dev->dax_dev; 193 - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; 194 - 195 - dev_sector = linear_map_sector(ti, sector); 196 - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) 197 - return; 198 - dax_flush(dax_dev, pgoff, addr, size); 199 - } 200 - 201 static struct target_type linear_target = { 202 .name = "linear", 203 .version = {1, 4, 0}, ··· 198 .iterate_devices = linear_iterate_devices, 199 .direct_access = linear_dax_direct_access, 200 .dax_copy_from_iter = linear_dax_copy_from_iter, 201 - .dax_flush = linear_dax_flush, 202 }; 203 204 int __init dm_linear_init(void)
··· 184 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); 185 } 186 187 static struct target_type linear_target = { 188 .name = "linear", 189 .version = {1, 4, 0}, ··· 212 .iterate_devices = linear_iterate_devices, 213 .direct_access = linear_dax_direct_access, 214 .dax_copy_from_iter = linear_dax_copy_from_iter, 215 }; 216 217 int __init dm_linear_init(void)
+32 -12
drivers/md/dm-log-writes.c
··· 100 struct dm_dev *logdev; 101 u64 logged_entries; 102 u32 sectorsize; 103 atomic_t io_blocks; 104 atomic_t pending_blocks; 105 sector_t next_sector; ··· 128 struct per_bio_data { 129 struct pending_block *block; 130 }; 131 132 static void put_pending_block(struct log_writes_c *lc) 133 { ··· 266 267 if (!block->vec_cnt) 268 goto out; 269 - sector++; 270 271 atomic_inc(&lc->io_blocks); 272 bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); ··· 367 goto next; 368 369 sector = lc->next_sector; 370 - if (block->flags & LOG_DISCARD_FLAG) 371 - lc->next_sector++; 372 - else 373 - lc->next_sector += block->nr_sectors + 1; 374 375 /* 376 * Apparently the size of the device may not be known ··· 411 if (!try_to_freeze()) { 412 set_current_state(TASK_INTERRUPTIBLE); 413 if (!kthread_should_stop() && 414 - !atomic_read(&lc->pending_blocks)) 415 schedule(); 416 __set_current_state(TASK_RUNNING); 417 } ··· 447 INIT_LIST_HEAD(&lc->unflushed_blocks); 448 INIT_LIST_HEAD(&lc->logging_blocks); 449 init_waitqueue_head(&lc->wait); 450 - lc->sectorsize = 1 << SECTOR_SHIFT; 451 atomic_set(&lc->io_blocks, 0); 452 atomic_set(&lc->pending_blocks, 0); 453 ··· 466 goto bad; 467 } 468 469 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); 470 if (IS_ERR(lc->log_kthread)) { 471 ret = PTR_ERR(lc->log_kthread); ··· 477 goto bad; 478 } 479 480 - /* We put the super at sector 0, start logging at sector 1 */ 481 - lc->next_sector = 1; 482 lc->logging_enabled = true; 483 lc->end_sector = logdev_last_sector(lc); 484 lc->device_supports_discard = true; ··· 616 if (discard_bio) 617 block->flags |= LOG_DISCARD_FLAG; 618 619 - block->sector = bio->bi_iter.bi_sector; 620 - block->nr_sectors = bio_sectors(bio); 621 622 /* We don't need the data, just submit */ 623 if (discard_bio) { ··· 784 785 if (!q || !blk_queue_discard(q)) { 786 lc->device_supports_discard = false; 787 - limits->discard_granularity = 1 << SECTOR_SHIFT; 788 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); 789 } 790 } 791 792 static struct target_type log_writes_target = {
··· 100 struct dm_dev *logdev; 101 u64 logged_entries; 102 u32 sectorsize; 103 + u32 sectorshift; 104 atomic_t io_blocks; 105 atomic_t pending_blocks; 106 sector_t next_sector; ··· 127 struct per_bio_data { 128 struct pending_block *block; 129 }; 130 + 131 + static inline sector_t bio_to_dev_sectors(struct log_writes_c *lc, 132 + sector_t sectors) 133 + { 134 + return sectors >> (lc->sectorshift - SECTOR_SHIFT); 135 + } 136 + 137 + static inline sector_t dev_to_bio_sectors(struct log_writes_c *lc, 138 + sector_t sectors) 139 + { 140 + return sectors << (lc->sectorshift - SECTOR_SHIFT); 141 + } 142 143 static void put_pending_block(struct log_writes_c *lc) 144 { ··· 253 254 if (!block->vec_cnt) 255 goto out; 256 + sector += dev_to_bio_sectors(lc, 1); 257 258 atomic_inc(&lc->io_blocks); 259 bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES)); ··· 354 goto next; 355 356 sector = lc->next_sector; 357 + if (!(block->flags & LOG_DISCARD_FLAG)) 358 + lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); 359 + lc->next_sector += dev_to_bio_sectors(lc, 1); 360 361 /* 362 * Apparently the size of the device may not be known ··· 399 if (!try_to_freeze()) { 400 set_current_state(TASK_INTERRUPTIBLE); 401 if (!kthread_should_stop() && 402 + list_empty(&lc->logging_blocks)) 403 schedule(); 404 __set_current_state(TASK_RUNNING); 405 } ··· 435 INIT_LIST_HEAD(&lc->unflushed_blocks); 436 INIT_LIST_HEAD(&lc->logging_blocks); 437 init_waitqueue_head(&lc->wait); 438 atomic_set(&lc->io_blocks, 0); 439 atomic_set(&lc->pending_blocks, 0); 440 ··· 455 goto bad; 456 } 457 458 + lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); 459 + lc->sectorshift = ilog2(lc->sectorsize); 460 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); 461 if (IS_ERR(lc->log_kthread)) { 462 ret = PTR_ERR(lc->log_kthread); ··· 464 goto bad; 465 } 466 467 + /* 468 + * next_sector is in 512b sectors to correspond to what bi_sector expects. 469 + * The super starts at sector 0, and the next_sector is the next logical 470 + * one based on the sectorsize of the device. 471 + */ 472 + lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; 473 lc->logging_enabled = true; 474 lc->end_sector = logdev_last_sector(lc); 475 lc->device_supports_discard = true; ··· 599 if (discard_bio) 600 block->flags |= LOG_DISCARD_FLAG; 601 602 + block->sector = bio_to_dev_sectors(lc, bio->bi_iter.bi_sector); 603 + block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); 604 605 /* We don't need the data, just submit */ 606 if (discard_bio) { ··· 767 768 if (!q || !blk_queue_discard(q)) { 769 lc->device_supports_discard = false; 770 + limits->discard_granularity = lc->sectorsize; 771 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); 772 } 773 + limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); 774 + limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); 775 + limits->io_min = limits->physical_block_size; 776 } 777 778 static struct target_type log_writes_target = {
+10 -5
drivers/md/dm-mpath.c
··· 632 case DM_MAPIO_REMAPPED: 633 generic_make_request(bio); 634 break; 635 } 636 } 637 blk_finish_plug(&plug); ··· 702 struct path_selector_type *pst; 703 unsigned ps_argc; 704 705 - static struct dm_arg _args[] = { 706 {0, 1024, "invalid number of path selector args"}, 707 }; 708 ··· 826 static struct priority_group *parse_priority_group(struct dm_arg_set *as, 827 struct multipath *m) 828 { 829 - static struct dm_arg _args[] = { 830 {1, 1024, "invalid number of paths"}, 831 {0, 1024, "invalid number of selector args"} 832 }; ··· 902 int ret; 903 struct dm_target *ti = m->ti; 904 905 - static struct dm_arg _args[] = { 906 {0, 1024, "invalid number of hardware handler args"}, 907 }; 908 ··· 954 struct dm_target *ti = m->ti; 955 const char *arg_name; 956 957 - static struct dm_arg _args[] = { 958 {0, 8, "invalid number of feature args"}, 959 {1, 50, "pg_init_retries must be between 1 and 50"}, 960 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, ··· 1023 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) 1024 { 1025 /* target arguments */ 1026 - static struct dm_arg _args[] = { 1027 {0, 1024, "invalid number of priority groups"}, 1028 {0, 1024, "invalid initial priority group number"}, 1029 }; ··· 1383 case SCSI_DH_RETRY: 1384 /* Wait before retrying. */ 1385 delay_retry = 1; 1386 case SCSI_DH_IMM_RETRY: 1387 case SCSI_DH_RES_TEMP_UNAVAIL: 1388 if (pg_init_limit_reached(m, pgpath))
··· 632 case DM_MAPIO_REMAPPED: 633 generic_make_request(bio); 634 break; 635 + case 0: 636 + break; 637 + default: 638 + WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r); 639 } 640 } 641 blk_finish_plug(&plug); ··· 698 struct path_selector_type *pst; 699 unsigned ps_argc; 700 701 + static const struct dm_arg _args[] = { 702 {0, 1024, "invalid number of path selector args"}, 703 }; 704 ··· 822 static struct priority_group *parse_priority_group(struct dm_arg_set *as, 823 struct multipath *m) 824 { 825 + static const struct dm_arg _args[] = { 826 {1, 1024, "invalid number of paths"}, 827 {0, 1024, "invalid number of selector args"} 828 }; ··· 898 int ret; 899 struct dm_target *ti = m->ti; 900 901 + static const struct dm_arg _args[] = { 902 {0, 1024, "invalid number of hardware handler args"}, 903 }; 904 ··· 950 struct dm_target *ti = m->ti; 951 const char *arg_name; 952 953 + static const struct dm_arg _args[] = { 954 {0, 8, "invalid number of feature args"}, 955 {1, 50, "pg_init_retries must be between 1 and 50"}, 956 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, ··· 1019 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) 1020 { 1021 /* target arguments */ 1022 + static const struct dm_arg _args[] = { 1023 {0, 1024, "invalid number of priority groups"}, 1024 {0, 1024, "invalid initial priority group number"}, 1025 }; ··· 1379 case SCSI_DH_RETRY: 1380 /* Wait before retrying. */ 1381 delay_retry = 1; 1382 + /* fall through */ 1383 case SCSI_DH_IMM_RETRY: 1384 case SCSI_DH_RES_TEMP_UNAVAIL: 1385 if (pg_init_limit_reached(m, pgpath))
+12 -15
drivers/md/dm-rq.c
··· 117 struct dm_rq_clone_bio_info *info = 118 container_of(clone, struct dm_rq_clone_bio_info, clone); 119 struct dm_rq_target_io *tio = info->tio; 120 - struct bio *bio = info->orig; 121 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 122 blk_status_t error = clone->bi_status; 123 124 bio_put(clone); 125 ··· 137 * when the request is completed. 138 */ 139 tio->error = error; 140 - return; 141 } 142 143 /* 144 * I/O for the bio successfully completed. 145 * Notice the data completion to the upper layer. 146 */ 147 - 148 - /* 149 - * bios are processed from the head of the list. 150 - * So the completing bio should always be rq->bio. 151 - * If it's not, something wrong is happening. 152 - */ 153 - if (tio->orig->bio != bio) 154 - DMERR("bio completion is going in the middle of the request"); 155 156 /* 157 * Update the original request. 158 * Do not use blk_end_request() here, because it may complete 159 * the original request before the clone, and break the ordering. 160 */ 161 - blk_update_request(tio->orig, BLK_STS_OK, nr_bytes); 162 } 163 164 static struct dm_rq_target_io *tio_from_request(struct request *rq) ··· 232 /* 233 * Requeue the original request of a clone. 234 */ 235 - static void dm_old_requeue_request(struct request *rq) 236 { 237 struct request_queue *q = rq->q; 238 unsigned long flags; 239 240 spin_lock_irqsave(q->queue_lock, flags); 241 blk_requeue_request(q, rq); 242 - blk_run_queue_async(q); 243 spin_unlock_irqrestore(q->queue_lock, flags); 244 } 245 ··· 265 struct mapped_device *md = tio->md; 266 struct request *rq = tio->orig; 267 int rw = rq_data_dir(rq); 268 269 rq_end_stats(md, rq); 270 if (tio->clone) { ··· 274 } 275 276 if (!rq->q->mq_ops) 277 - dm_old_requeue_request(rq); 278 else 279 - dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0); 280 281 rq_completed(md, rw, false); 282 } ··· 451 tio->clone = NULL; 452 tio->orig = rq; 453 tio->error = 0; 454 /* 455 * Avoid initializing info for blk-mq; it passes 456 * target-specific data through info.ptr
··· 117 struct dm_rq_clone_bio_info *info = 118 container_of(clone, struct dm_rq_clone_bio_info, clone); 119 struct dm_rq_target_io *tio = info->tio; 120 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 121 blk_status_t error = clone->bi_status; 122 + bool is_last = !clone->bi_next; 123 124 bio_put(clone); 125 ··· 137 * when the request is completed. 138 */ 139 tio->error = error; 140 + goto exit; 141 } 142 143 /* 144 * I/O for the bio successfully completed. 145 * Notice the data completion to the upper layer. 146 */ 147 + tio->completed += nr_bytes; 148 149 /* 150 * Update the original request. 151 * Do not use blk_end_request() here, because it may complete 152 * the original request before the clone, and break the ordering. 153 */ 154 + if (is_last) 155 + exit: 156 + blk_update_request(tio->orig, BLK_STS_OK, tio->completed); 157 } 158 159 static struct dm_rq_target_io *tio_from_request(struct request *rq) ··· 237 /* 238 * Requeue the original request of a clone. 239 */ 240 + static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) 241 { 242 struct request_queue *q = rq->q; 243 unsigned long flags; 244 245 spin_lock_irqsave(q->queue_lock, flags); 246 blk_requeue_request(q, rq); 247 + blk_delay_queue(q, delay_ms); 248 spin_unlock_irqrestore(q->queue_lock, flags); 249 } 250 ··· 270 struct mapped_device *md = tio->md; 271 struct request *rq = tio->orig; 272 int rw = rq_data_dir(rq); 273 + unsigned long delay_ms = delay_requeue ? 100 : 0; 274 275 rq_end_stats(md, rq); 276 if (tio->clone) { ··· 278 } 279 280 if (!rq->q->mq_ops) 281 + dm_old_requeue_request(rq, delay_ms); 282 else 283 + dm_mq_delay_requeue_request(rq, delay_ms); 284 285 rq_completed(md, rw, false); 286 } ··· 455 tio->clone = NULL; 456 tio->orig = rq; 457 tio->error = 0; 458 + tio->completed = 0; 459 /* 460 * Avoid initializing info for blk-mq; it passes 461 * target-specific data through info.ptr
+1
drivers/md/dm-rq.h
··· 29 struct dm_stats_aux stats_aux; 30 unsigned long duration_jiffies; 31 unsigned n_sectors; 32 }; 33 34 /*
··· 29 struct dm_stats_aux stats_aux; 30 unsigned long duration_jiffies; 31 unsigned n_sectors; 32 + unsigned completed; 33 }; 34 35 /*
-20
drivers/md/dm-stripe.c
··· 351 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); 352 } 353 354 - static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr, 355 - size_t size) 356 - { 357 - sector_t dev_sector, sector = pgoff * PAGE_SECTORS; 358 - struct stripe_c *sc = ti->private; 359 - struct dax_device *dax_dev; 360 - struct block_device *bdev; 361 - uint32_t stripe; 362 - 363 - stripe_map_sector(sc, sector, &stripe, &dev_sector); 364 - dev_sector += sc->stripe[stripe].physical_start; 365 - dax_dev = sc->stripe[stripe].dev->dax_dev; 366 - bdev = sc->stripe[stripe].dev->bdev; 367 - 368 - if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff)) 369 - return; 370 - dax_flush(dax_dev, pgoff, addr, size); 371 - } 372 - 373 /* 374 * Stripe status: 375 * ··· 470 .io_hints = stripe_io_hints, 471 .direct_access = stripe_dax_direct_access, 472 .dax_copy_from_iter = stripe_dax_copy_from_iter, 473 - .dax_flush = stripe_dax_flush, 474 }; 475 476 int __init dm_stripe_init(void)
··· 351 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); 352 } 353 354 /* 355 * Stripe status: 356 * ··· 489 .io_hints = stripe_io_hints, 490 .direct_access = stripe_dax_direct_access, 491 .dax_copy_from_iter = stripe_dax_copy_from_iter, 492 }; 493 494 int __init dm_stripe_init(void)
+1 -1
drivers/md/dm-switch.c
··· 251 */ 252 static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) 253 { 254 - static struct dm_arg _args[] = { 255 {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, 256 {1, UINT_MAX, "Invalid region size"}, 257 {0, 0, "Invalid number of optional args"},
··· 251 */ 252 static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv) 253 { 254 + static const struct dm_arg _args[] = { 255 {1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"}, 256 {1, UINT_MAX, "Invalid region size"}, 257 {0, 0, "Invalid number of optional args"},
+4 -3
drivers/md/dm-table.c
··· 806 /* 807 * Target argument parsing helpers. 808 */ 809 - static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 810 unsigned *value, char **error, unsigned grouped) 811 { 812 const char *arg_str = dm_shift_arg(arg_set); ··· 825 return 0; 826 } 827 828 - int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 829 unsigned *value, char **error) 830 { 831 return validate_next_arg(arg, arg_set, value, error, 0); 832 } 833 EXPORT_SYMBOL(dm_read_arg); 834 835 - int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, 836 unsigned *value, char **error) 837 { 838 return validate_next_arg(arg, arg_set, value, error, 1);
··· 806 /* 807 * Target argument parsing helpers. 808 */ 809 + static int validate_next_arg(const struct dm_arg *arg, 810 + struct dm_arg_set *arg_set, 811 unsigned *value, char **error, unsigned grouped) 812 { 813 const char *arg_str = dm_shift_arg(arg_set); ··· 824 return 0; 825 } 826 827 + int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 828 unsigned *value, char **error) 829 { 830 return validate_next_arg(arg, arg_set, value, error, 0); 831 } 832 EXPORT_SYMBOL(dm_read_arg); 833 834 + int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 835 unsigned *value, char **error) 836 { 837 return validate_next_arg(arg, arg_set, value, error, 1);
+1 -1
drivers/md/dm-thin.c
··· 3041 unsigned argc; 3042 const char *arg_name; 3043 3044 - static struct dm_arg _args[] = { 3045 {0, 4, "Invalid number of pool feature arguments"}, 3046 }; 3047
··· 3041 unsigned argc; 3042 const char *arg_name; 3043 3044 + static const struct dm_arg _args[] = { 3045 {0, 4, "Invalid number of pool feature arguments"}, 3046 }; 3047
+1 -1
drivers/md/dm-verity-target.c
··· 839 struct dm_target *ti = v->ti; 840 const char *arg_name; 841 842 - static struct dm_arg _args[] = { 843 {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, 844 }; 845
··· 839 struct dm_target *ti = v->ti; 840 const char *arg_name; 841 842 + static const struct dm_arg _args[] = { 843 {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"}, 844 }; 845
-19
drivers/md/dm.c
··· 987 return ret; 988 } 989 990 - static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 991 - size_t size) 992 - { 993 - struct mapped_device *md = dax_get_private(dax_dev); 994 - sector_t sector = pgoff * PAGE_SECTORS; 995 - struct dm_target *ti; 996 - int srcu_idx; 997 - 998 - ti = dm_dax_get_live_target(md, sector, &srcu_idx); 999 - 1000 - if (!ti) 1001 - goto out; 1002 - if (ti->type->dax_flush) 1003 - ti->type->dax_flush(ti, pgoff, addr, size); 1004 - out: 1005 - dm_put_live_table(md, srcu_idx); 1006 - } 1007 - 1008 /* 1009 * A target may call dm_accept_partial_bio only from the map routine. It is 1010 * allowed for all bio types except REQ_PREFLUSH. ··· 2974 static const struct dax_operations dm_dax_ops = { 2975 .direct_access = dm_dax_direct_access, 2976 .copy_from_iter = dm_dax_copy_from_iter, 2977 - .flush = dm_dax_flush, 2978 }; 2979 2980 /*
··· 987 return ret; 988 } 989 990 /* 991 * A target may call dm_accept_partial_bio only from the map routine. It is 992 * allowed for all bio types except REQ_PREFLUSH. ··· 2992 static const struct dax_operations dm_dax_ops = { 2993 .direct_access = dm_dax_direct_access, 2994 .copy_from_iter = dm_dax_copy_from_iter, 2995 }; 2996 2997 /*
-7
drivers/nvdimm/pmem.c
··· 262 return copy_from_iter_flushcache(addr, bytes, i); 263 } 264 265 - static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, 266 - void *addr, size_t size) 267 - { 268 - arch_wb_cache_pmem(addr, size); 269 - } 270 - 271 static const struct dax_operations pmem_dax_ops = { 272 .direct_access = pmem_dax_direct_access, 273 .copy_from_iter = pmem_copy_from_iter, 274 - .flush = pmem_dax_flush, 275 }; 276 277 static const struct attribute_group *pmem_attribute_groups[] = {
··· 262 return copy_from_iter_flushcache(addr, bytes, i); 263 } 264 265 static const struct dax_operations pmem_dax_ops = { 266 .direct_access = pmem_dax_direct_access, 267 .copy_from_iter = pmem_copy_from_iter, 268 }; 269 270 static const struct attribute_group *pmem_attribute_groups[] = {
+2 -2
fs/dax.c
··· 734 } 735 736 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 737 - dax_flush(dax_dev, pgoff, kaddr, size); 738 /* 739 * After we have flushed the cache, we can clear the dirty tag. There 740 * cannot be new dirty data in the pfn after the flush has completed as ··· 929 return rc; 930 } 931 memset(kaddr + offset, 0, size); 932 - dax_flush(dax_dev, pgoff, kaddr + offset, size); 933 dax_read_unlock(id); 934 } 935 return 0;
··· 734 } 735 736 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn)); 737 + dax_flush(dax_dev, kaddr, size); 738 /* 739 * After we have flushed the cache, we can clear the dirty tag. There 740 * cannot be new dirty data in the pfn after the flush has completed as ··· 929 return rc; 930 } 931 memset(kaddr + offset, 0, size); 932 + dax_flush(dax_dev, kaddr + offset, size); 933 dax_read_unlock(id); 934 } 935 return 0;
+1 -4
include/linux/dax.h
··· 19 /* copy_from_iter: required operation for fs-dax direct-i/o */ 20 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 21 struct iov_iter *); 22 - /* flush: optional driver-specific cache management after writes */ 23 - void (*flush)(struct dax_device *, pgoff_t, void *, size_t); 24 }; 25 26 extern struct attribute_group dax_attribute_group; ··· 88 void **kaddr, pfn_t *pfn); 89 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 90 size_t bytes, struct iov_iter *i); 91 - void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 92 - size_t size); 93 void dax_write_cache(struct dax_device *dax_dev, bool wc); 94 bool dax_write_cache_enabled(struct dax_device *dax_dev); 95
··· 19 /* copy_from_iter: required operation for fs-dax direct-i/o */ 20 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, 21 struct iov_iter *); 22 }; 23 24 extern struct attribute_group dax_attribute_group; ··· 90 void **kaddr, pfn_t *pfn); 91 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, 92 size_t bytes, struct iov_iter *i); 93 + void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); 94 void dax_write_cache(struct dax_device *dax_dev, bool wc); 95 bool dax_write_cache_enabled(struct dax_device *dax_dev); 96
+2 -5
include/linux/device-mapper.h
··· 134 long nr_pages, void **kaddr, pfn_t *pfn); 135 typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, 136 void *addr, size_t bytes, struct iov_iter *i); 137 - typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, 138 - size_t size); 139 #define PAGE_SECTORS (PAGE_SIZE / 512) 140 141 void dm_error(const char *message); ··· 184 dm_io_hints_fn io_hints; 185 dm_dax_direct_access_fn direct_access; 186 dm_dax_copy_from_iter_fn dax_copy_from_iter; 187 - dm_dax_flush_fn dax_flush; 188 189 /* For internal device-mapper use. */ 190 struct list_head list; ··· 384 * Validate the next argument, either returning it as *value or, if invalid, 385 * returning -EINVAL and setting *error. 386 */ 387 - int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, 388 unsigned *value, char **error); 389 390 /* ··· 392 * arg->min and arg->max further arguments. Either return the size as 393 * *num_args or, if invalid, return -EINVAL and set *error. 394 */ 395 - int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, 396 unsigned *num_args, char **error); 397 398 /*
··· 134 long nr_pages, void **kaddr, pfn_t *pfn); 135 typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff, 136 void *addr, size_t bytes, struct iov_iter *i); 137 #define PAGE_SECTORS (PAGE_SIZE / 512) 138 139 void dm_error(const char *message); ··· 186 dm_io_hints_fn io_hints; 187 dm_dax_direct_access_fn direct_access; 188 dm_dax_copy_from_iter_fn dax_copy_from_iter; 189 190 /* For internal device-mapper use. */ 191 struct list_head list; ··· 387 * Validate the next argument, either returning it as *value or, if invalid, 388 * returning -EINVAL and setting *error. 389 */ 390 + int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, 391 unsigned *value, char **error); 392 393 /* ··· 395 * arg->min and arg->max further arguments. Either return the size as 396 * *num_args or, if invalid, return -EINVAL and set *error. 397 */ 398 + int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, 399 unsigned *num_args, char **error); 400 401 /*