Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm integrity: fix flush with external metadata device

With external metadata device, flush requests are not passed down to the
data device.

Fix this by submitting the flush request in dm_integrity_flush_buffers. In
order to not degrade performance, we overlap the data device flush with
the metadata device flush.

Reported-by: Lukas Straub <lukasstraub2@web.de>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Mikulas Patocka and committed by
Mike Snitzer
9b594826 0378c625

+56 -11
+6
drivers/md/dm-bufio.c
··· 1534 1534 } 1535 1535 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); 1536 1536 1537 + struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) 1538 + { 1539 + return c->dm_io; 1540 + } 1541 + EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); 1542 + 1537 1543 sector_t dm_bufio_get_block_number(struct dm_buffer *b) 1538 1544 { 1539 1545 return b->block;
+49 -11
drivers/md/dm-integrity.c
··· 1379 1379 #undef MAY_BE_HASH 1380 1380 } 1381 1381 1382 - static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) 1382 + struct flush_request { 1383 + struct dm_io_request io_req; 1384 + struct dm_io_region io_reg; 1385 + struct dm_integrity_c *ic; 1386 + struct completion comp; 1387 + }; 1388 + 1389 + static void flush_notify(unsigned long error, void *fr_) 1390 + { 1391 + struct flush_request *fr = fr_; 1392 + if (unlikely(error != 0)) 1393 + dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO); 1394 + complete(&fr->comp); 1395 + } 1396 + 1397 + static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) 1383 1398 { 1384 1399 int r; 1400 + 1401 + struct flush_request fr; 1402 + 1403 + if (!ic->meta_dev) 1404 + flush_data = false; 1405 + if (flush_data) { 1406 + fr.io_req.bi_op = REQ_OP_WRITE, 1407 + fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, 1408 + fr.io_req.mem.type = DM_IO_KMEM, 1409 + fr.io_req.mem.ptr.addr = NULL, 1410 + fr.io_req.notify.fn = flush_notify, 1411 + fr.io_req.notify.context = &fr; 1412 + fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), 1413 + fr.io_reg.bdev = ic->dev->bdev, 1414 + fr.io_reg.sector = 0, 1415 + fr.io_reg.count = 0, 1416 + fr.ic = ic; 1417 + init_completion(&fr.comp); 1418 + r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); 1419 + BUG_ON(r); 1420 + } 1421 + 1385 1422 r = dm_bufio_write_dirty_buffers(ic->bufio); 1386 1423 if (unlikely(r)) 1387 1424 dm_integrity_io_error(ic, "writing tags", r); 1425 + 1426 + if (flush_data) 1427 + wait_for_completion(&fr.comp); 1388 1428 } 1389 1429 1390 1430 static void sleep_on_endio_wait(struct dm_integrity_c *ic) ··· 2150 2110 2151 2111 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { 2152 2112 integrity_metadata(&dio->work); 2153 - dm_integrity_flush_buffers(ic); 2113 + dm_integrity_flush_buffers(ic, false); 2154 2114 2155 2115 dio->in_flight = (atomic_t)ATOMIC_INIT(1); 2156 2116 dio->completion = NULL; ··· 2235 2195 flushes = bio_list_get(&ic->flush_bio_list); 2236 2196 if (unlikely(ic->mode != 'J')) { 2237 2197 spin_unlock_irq(&ic->endio_wait.lock); 2238 - dm_integrity_flush_buffers(ic); 2198 + dm_integrity_flush_buffers(ic, true); 2239 2199 goto release_flush_bios; 2240 2200 } 2241 2201 ··· 2449 2409 complete_journal_op(&comp); 2450 2410 wait_for_completion_io(&comp.comp); 2451 2411 2452 - dm_integrity_flush_buffers(ic); 2412 + dm_integrity_flush_buffers(ic, true); 2453 2413 } 2454 2414 2455 2415 static void integrity_writer(struct work_struct *w) ··· 2491 2451 { 2492 2452 int r; 2493 2453 2494 - dm_integrity_flush_buffers(ic); 2454 + dm_integrity_flush_buffers(ic, false); 2495 2455 if (dm_integrity_failed(ic)) 2496 2456 return; 2497 2457 ··· 2694 2654 unsigned long limit; 2695 2655 struct bio *bio; 2696 2656 2697 - dm_integrity_flush_buffers(ic); 2657 + dm_integrity_flush_buffers(ic, false); 2698 2658 2699 2659 range.logical_sector = 0; 2700 2660 range.n_sectors = ic->provided_data_sectors; ··· 2703 2663 add_new_range_and_wait(ic, &range); 2704 2664 spin_unlock_irq(&ic->endio_wait.lock); 2705 2665 2706 - dm_integrity_flush_buffers(ic); 2707 - if (ic->meta_dev) 2708 - blkdev_issue_flush(ic->dev->bdev, GFP_NOIO); 2666 + dm_integrity_flush_buffers(ic, true); 2709 2667 2710 2668 limit = ic->provided_data_sectors; 2711 2669 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { ··· 2972 2934 if (ic->meta_dev) 2973 2935 queue_work(ic->writer_wq, &ic->writer_work); 2974 2936 drain_workqueue(ic->writer_wq); 2975 - dm_integrity_flush_buffers(ic); 2937 + dm_integrity_flush_buffers(ic, true); 2976 2938 } 2977 2939 2978 2940 if (ic->mode == 'B') { 2979 - dm_integrity_flush_buffers(ic); 2941 + dm_integrity_flush_buffers(ic, true); 2980 2942 #if 1 2981 2943 /* set to 0 to test bitmap replay code */ 2982 2944 init_journal(ic, 0, ic->journal_sections, 0);
+1
include/linux/dm-bufio.h
··· 150 150 151 151 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); 152 152 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); 153 + struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c); 153 154 sector_t dm_bufio_get_block_number(struct dm_buffer *b); 154 155 void *dm_bufio_get_block_data(struct dm_buffer *b); 155 156 void *dm_bufio_get_aux_data(struct dm_buffer *b);