Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-6.3/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

- Fix DM thin to work as a swap device by using 'limit_swap_bios' DM
target flag (initially added to allow swap to dm-crypt) to throttle
the amount of outstanding swap bios.

- Fix DM crypt soft lockup warnings by calling cond_resched() from the
cpu intensive loop in dmcrypt_write().

- Fix DM crypt to not access an uninitialized tasklet. This fix allows
for consistent handling of IO completion, by _not_ needlessly punting
to a workqueue when tasklets are not needed.

- Fix DM core's alloc_dev() initialization for DM stats to check for
and propagate alloc_percpu() failure.

* tag 'for-6.3/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm stats: check for and propagate alloc_percpu failure
dm crypt: avoid accessing uninitialized tasklet
dm crypt: add cond_resched() to dmcrypt_write()
dm thin: fix deadlock when swapping to thin device

+22 -9
+10 -6
drivers/md/dm-crypt.c
··· 72 72 struct crypt_config *cc; 73 73 struct bio *base_bio; 74 74 u8 *integrity_metadata; 75 - bool integrity_metadata_from_pool; 75 + bool integrity_metadata_from_pool:1; 76 + bool in_tasklet:1; 77 + 76 78 struct work_struct work; 77 79 struct tasklet_struct tasklet; 78 80 ··· 1732 1730 io->ctx.r.req = NULL; 1733 1731 io->integrity_metadata = NULL; 1734 1732 io->integrity_metadata_from_pool = false; 1733 + io->in_tasklet = false; 1735 1734 atomic_set(&io->io_pending, 0); 1736 1735 } 1737 1736 ··· 1779 1776 * our tasklet. In this case we need to delay bio_endio() 1780 1777 * execution to after the tasklet is done and dequeued. 1781 1778 */ 1782 - if (tasklet_trylock(&io->tasklet)) { 1783 - tasklet_unlock(&io->tasklet); 1784 - bio_endio(base_bio); 1779 + if (io->in_tasklet) { 1780 + INIT_WORK(&io->work, kcryptd_io_bio_endio); 1781 + queue_work(cc->io_queue, &io->work); 1785 1782 return; 1786 1783 } 1787 1784 1788 - INIT_WORK(&io->work, kcryptd_io_bio_endio); 1789 - queue_work(cc->io_queue, &io->work); 1785 + bio_endio(base_bio); 1790 1786 } 1791 1787 1792 1788 /* ··· 1938 1936 io = crypt_io_from_node(rb_first(&write_tree)); 1939 1937 rb_erase(&io->rb_node, &write_tree); 1940 1938 kcryptd_io_write(io); 1939 + cond_resched(); 1941 1940 } while (!RB_EMPTY_ROOT(&write_tree)); 1942 1941 blk_finish_plug(&plug); 1943 1942 } ··· 2233 2230 * it is being executed with irqs disabled. 2234 2231 */ 2235 2232 if (in_hardirq() || irqs_disabled()) { 2233 + io->in_tasklet = true; 2236 2234 tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); 2237 2235 tasklet_schedule(&io->tasklet); 2238 2236 return;
+6 -1
drivers/md/dm-stats.c
··· 188 188 atomic_read(&shared->in_flight[WRITE]); 189 189 } 190 190 191 - void dm_stats_init(struct dm_stats *stats) 191 + int dm_stats_init(struct dm_stats *stats) 192 192 { 193 193 int cpu; 194 194 struct dm_stats_last_position *last; ··· 197 197 INIT_LIST_HEAD(&stats->list); 198 198 stats->precise_timestamps = false; 199 199 stats->last = alloc_percpu(struct dm_stats_last_position); 200 + if (!stats->last) 201 + return -ENOMEM; 202 + 200 203 for_each_possible_cpu(cpu) { 201 204 last = per_cpu_ptr(stats->last, cpu); 202 205 last->last_sector = (sector_t)ULLONG_MAX; 203 206 last->last_rw = UINT_MAX; 204 207 } 208 + 209 + return 0; 205 210 } 206 211 207 212 void dm_stats_cleanup(struct dm_stats *stats)
+1 -1
drivers/md/dm-stats.h
··· 21 21 unsigned long long duration_ns; 22 22 }; 23 23 24 - void dm_stats_init(struct dm_stats *st); 24 + int dm_stats_init(struct dm_stats *st); 25 25 void dm_stats_cleanup(struct dm_stats *st); 26 26 27 27 struct mapped_device;
+2
drivers/md/dm-thin.c
··· 3369 3369 pt->low_water_blocks = low_water_blocks; 3370 3370 pt->adjusted_pf = pt->requested_pf = pf; 3371 3371 ti->num_flush_bios = 1; 3372 + ti->limit_swap_bios = true; 3372 3373 3373 3374 /* 3374 3375 * Only need to enable discards if the pool should pass ··· 4250 4249 goto bad; 4251 4250 4252 4251 ti->num_flush_bios = 1; 4252 + ti->limit_swap_bios = true; 4253 4253 ti->flush_supported = true; 4254 4254 ti->accounts_remapped_io = true; 4255 4255 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
+3 -1
drivers/md/dm.c
··· 2097 2097 if (!md->pending_io) 2098 2098 goto bad; 2099 2099 2100 - dm_stats_init(&md->stats); 2100 + r = dm_stats_init(&md->stats); 2101 + if (r < 0) 2102 + goto bad; 2101 2103 2102 2104 /* Populate the mapping, nobody knows we exist yet */ 2103 2105 spin_lock(&_minor_lock);