Merge tag 'for-4.17/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

- a stable fix for DM integrity to use kvfree

- fix for a 4.17-rc1 change to dm-bufio's buffer alignment

- fixes for a few sparse warnings

- remove VLA usage in DM mirror target

- improve DM thinp Documentation for the "read_only" feature

* tag 'for-4.17/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm thin: update Documentation to clarify when "read_only" is valid
dm mirror: remove VLA usage
dm: fix some sparse warnings and whitespace in dax methods
dm cache background tracker: fix sparse warning
dm bufio: fix buffer alignment
dm integrity: use kvfree for kvmalloc'd memory

Changed files
+19 -12
Documentation
device-mapper
drivers
+4 -1
Documentation/device-mapper/thin-provisioning.txt
··· 264 264 data device, but just remove the mapping. 265 265 266 266 read_only: Don't allow any changes to be made to the pool 267 - metadata. 267 + metadata. This mode is only available after the 268 + thin-pool has been created and first used in full 269 + read/write mode. It cannot be specified on initial 270 + thin-pool creation. 268 271 269 272 error_if_no_space: Error IOs, instead of queueing, if no space. 270 273
+3 -2
drivers/md/dm-bufio.c
··· 1681 1681 1682 1682 if (block_size <= KMALLOC_MAX_SIZE && 1683 1683 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { 1684 - snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size); 1685 - c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN, 1684 + unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE); 1685 + snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size); 1686 + c->slab_cache = kmem_cache_create(slab_name, block_size, align, 1686 1687 SLAB_RECLAIM_ACCOUNT, NULL); 1687 1688 if (!c->slab_cache) { 1688 1689 r = -ENOMEM;
+1 -1
drivers/md/dm-cache-background-tracker.c
··· 166 166 atomic_read(&b->pending_demotes) >= b->max_work; 167 167 } 168 168 169 - struct bt_work *alloc_work(struct background_tracker *b) 169 + static struct bt_work *alloc_work(struct background_tracker *b) 170 170 { 171 171 if (max_work_reached(b)) 172 172 return NULL;
+1 -1
drivers/md/dm-integrity.c
··· 2440 2440 unsigned i; 2441 2441 for (i = 0; i < ic->journal_sections; i++) 2442 2442 kvfree(sl[i]); 2443 - kfree(sl); 2443 + kvfree(sl); 2444 2444 } 2445 2445 2446 2446 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
+6 -4
drivers/md/dm-raid1.c
··· 23 23 24 24 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ 25 25 26 + #define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1) 27 + 26 28 #define DM_RAID1_HANDLE_ERRORS 0x01 27 29 #define DM_RAID1_KEEP_LOG 0x02 28 30 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) ··· 257 255 unsigned long error_bits; 258 256 259 257 unsigned int i; 260 - struct dm_io_region io[ms->nr_mirrors]; 258 + struct dm_io_region io[MAX_NR_MIRRORS]; 261 259 struct mirror *m; 262 260 struct dm_io_request io_req = { 263 261 .bi_op = REQ_OP_WRITE, ··· 653 651 static void do_write(struct mirror_set *ms, struct bio *bio) 654 652 { 655 653 unsigned int i; 656 - struct dm_io_region io[ms->nr_mirrors], *dest = io; 654 + struct dm_io_region io[MAX_NR_MIRRORS], *dest = io; 657 655 struct mirror *m; 658 656 struct dm_io_request io_req = { 659 657 .bi_op = REQ_OP_WRITE, ··· 1085 1083 argc -= args_used; 1086 1084 1087 1085 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 || 1088 - nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) { 1086 + nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) { 1089 1087 ti->error = "Invalid number of mirrors"; 1090 1088 dm_dirty_log_destroy(dl); 1091 1089 return -EINVAL; ··· 1406 1404 int num_feature_args = 0; 1407 1405 struct mirror_set *ms = (struct mirror_set *) ti->private; 1408 1406 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); 1409 - char buffer[ms->nr_mirrors + 1]; 1407 + char buffer[MAX_NR_MIRRORS + 1]; 1410 1408 1411 1409 switch (type) { 1412 1410 case STATUSTYPE_INFO:
+4 -3
drivers/md/dm.c
··· 1020 1020 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); 1021 1021 1022 1022 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, 1023 - sector_t sector, int *srcu_idx) 1023 + sector_t sector, int *srcu_idx) 1024 + __acquires(md->io_barrier) 1024 1025 { 1025 1026 struct dm_table *map; 1026 1027 struct dm_target *ti; ··· 1038 1037 } 1039 1038 1040 1039 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, 1041 - long nr_pages, void **kaddr, pfn_t *pfn) 1040 + long nr_pages, void **kaddr, pfn_t *pfn) 1042 1041 { 1043 1042 struct mapped_device *md = dax_get_private(dax_dev); 1044 1043 sector_t sector = pgoff * PAGE_SECTORS; ··· 1066 1065 } 1067 1066 1068 1067 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, 1069 - void *addr, size_t bytes, struct iov_iter *i) 1068 + void *addr, size_t bytes, struct iov_iter *i) 1070 1069 { 1071 1070 struct mapped_device *md = dax_get_private(dax_dev); 1072 1071 sector_t sector = pgoff * PAGE_SECTORS;