Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-6.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

- Stable fixes for 3 DM targets (integrity, verity and crypt) to
address systemic failure that can occur if user provided pages map to
the same block.

- Fix DM crypt to not allow modifying data that being encrypted for
authenticated encryption.

- Fix DM crypt and verity targets to align their respective bvec_iter
struct members to avoid the need for byte level access (due to
__packed attribute) that is costly on some arches (like RISC).

* tag 'for-6.8/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm-crypt, dm-integrity, dm-verity: bump target version
dm-verity, dm-crypt: align "struct bvec_iter" correctly
dm-crypt: recheck the integrity tag after a failure
dm-crypt: don't modify the data when using authenticated encryption
dm-verity: recheck the hash after a failure
dm-integrity: recheck the integrity tag after a failure

+256 -38
+82 -19
drivers/md/dm-crypt.c
··· 53 53 struct convert_context { 54 54 struct completion restart; 55 55 struct bio *bio_in; 56 - struct bio *bio_out; 57 56 struct bvec_iter iter_in; 57 + struct bio *bio_out; 58 58 struct bvec_iter iter_out; 59 - u64 cc_sector; 60 59 atomic_t cc_pending; 60 + u64 cc_sector; 61 61 union { 62 62 struct skcipher_request *req; 63 63 struct aead_request *req_aead; 64 64 } r; 65 + bool aead_recheck; 66 + bool aead_failed; 65 67 66 68 }; 67 69 ··· 83 81 atomic_t io_pending; 84 82 blk_status_t error; 85 83 sector_t sector; 84 + 85 + struct bvec_iter saved_bi_iter; 86 86 87 87 struct rb_node rb_node; 88 88 } CRYPTO_MINALIGN_ATTR; ··· 1374 1370 if (r == -EBADMSG) { 1375 1371 sector_t s = le64_to_cpu(*sector); 1376 1372 1377 - DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", 1378 - ctx->bio_in->bi_bdev, s); 1379 - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", 1380 - ctx->bio_in, s, 0); 1373 + ctx->aead_failed = true; 1374 + if (ctx->aead_recheck) { 1375 + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", 1376 + ctx->bio_in->bi_bdev, s); 1377 + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", 1378 + ctx->bio_in, s, 0); 1379 + } 1381 1380 } 1382 1381 1383 1382 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) ··· 1764 1757 io->base_bio = bio; 1765 1758 io->sector = sector; 1766 1759 io->error = 0; 1760 + io->ctx.aead_recheck = false; 1761 + io->ctx.aead_failed = false; 1767 1762 io->ctx.r.req = NULL; 1768 1763 io->integrity_metadata = NULL; 1769 1764 io->integrity_metadata_from_pool = false; ··· 1776 1767 { 1777 1768 atomic_inc(&io->io_pending); 1778 1769 } 1770 + 1771 + static void kcryptd_queue_read(struct dm_crypt_io *io); 1779 1772 1780 1773 /* 1781 1774 * One of the bios was finished. Check for completion of ··· 1791 1780 1792 1781 if (!atomic_dec_and_test(&io->io_pending)) 1793 1782 return; 1783 + 1784 + if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) && 1785 + cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) { 1786 + io->ctx.aead_recheck = true; 1787 + io->ctx.aead_failed = false; 1788 + io->error = 0; 1789 + kcryptd_queue_read(io); 1790 + return; 1791 + } 1794 1792 1795 1793 if (io->ctx.r.req) 1796 1794 crypt_free_req(cc, io->ctx.r.req, base_bio); ··· 1836 1816 struct dm_crypt_io *io = clone->bi_private; 1837 1817 struct crypt_config *cc = io->cc; 1838 1818 unsigned int rw = bio_data_dir(clone); 1839 - blk_status_t error; 1819 + blk_status_t error = clone->bi_status; 1820 + 1821 + if (io->ctx.aead_recheck && !error) { 1822 + kcryptd_queue_crypt(io); 1823 + return; 1824 + } 1840 1825 1841 1826 /* 1842 1827 * free the processed pages 1843 1828 */ 1844 - if (rw == WRITE) 1829 + if (rw == WRITE || io->ctx.aead_recheck) 1845 1830 crypt_free_buffer_pages(cc, clone); 1846 1831 1847 - error = clone->bi_status; 1848 1832 bio_put(clone); 1849 1833 1850 1834 if (rw == READ && !error) { ··· 1868 1844 { 1869 1845 struct crypt_config *cc = io->cc; 1870 1846 struct bio *clone; 1847 + 1848 + if (io->ctx.aead_recheck) { 1849 + if (!(gfp & __GFP_DIRECT_RECLAIM)) 1850 + return 1; 1851 + crypt_inc_pending(io); 1852 + clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); 1853 + if (unlikely(!clone)) { 1854 + crypt_dec_pending(io); 1855 + return 1; 1856 + } 1857 + clone->bi_iter.bi_sector = cc->start + io->sector; 1858 + crypt_convert_init(cc, &io->ctx, clone, clone, io->sector); 1859 + io->saved_bi_iter = clone->bi_iter; 1860 + dm_submit_bio_remap(io->base_bio, clone); 1861 + return 0; 1862 + } 1871 1863 1872 1864 /* 1873 1865 * We need the original biovec array in order to decrypt the whole bio ··· 2111 2071 io->ctx.bio_out = clone; 2112 2072 io->ctx.iter_out = clone->bi_iter; 2113 2073 2074 + if (crypt_integrity_aead(cc)) { 2075 + bio_copy_data(clone, io->base_bio); 2076 + io->ctx.bio_in = clone; 2077 + io->ctx.iter_in = clone->bi_iter; 2078 + } 2079 + 2114 2080 sector += bio_sectors(clone); 2115 2081 2116 2082 crypt_inc_pending(io); ··· 2153 2107 2154 2108 static void kcryptd_crypt_read_done(struct dm_crypt_io *io) 2155 2109 { 2110 + if (io->ctx.aead_recheck) { 2111 + if (!io->error) { 2112 + io->ctx.bio_in->bi_iter = io->saved_bi_iter; 2113 + bio_copy_data(io->base_bio, io->ctx.bio_in); 2114 + } 2115 + crypt_free_buffer_pages(io->cc, io->ctx.bio_in); 2116 + bio_put(io->ctx.bio_in); 2117 + } 2156 2118 crypt_dec_pending(io); 2157 2119 } 2158 2120 ··· 2190 2136 2191 2137 crypt_inc_pending(io); 2192 2138 2193 - crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 2194 - io->sector); 2139 + if (io->ctx.aead_recheck) { 2140 + io->ctx.cc_sector = io->sector + cc->iv_offset; 2141 + r = crypt_convert(cc, &io->ctx, 2142 + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); 2143 + } else { 2144 + crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, 2145 + io->sector); 2195 2146 2196 - r = crypt_convert(cc, &io->ctx, 2197 - test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); 2147 + r = crypt_convert(cc, &io->ctx, 2148 + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); 2149 + } 2198 2150 /* 2199 2151 * Crypto API backlogged the request, because its queue was full 2200 2152 * and we're in softirq context, so continue from a workqueue ··· 2242 2182 if (error == -EBADMSG) { 2243 2183 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)); 2244 2184 2245 - DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", 2246 - ctx->bio_in->bi_bdev, s); 2247 - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", 2248 - ctx->bio_in, s, 0); 2185 + ctx->aead_failed = true; 2186 + if (ctx->aead_recheck) { 2187 + DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu", 2188 + ctx->bio_in->bi_bdev, s); 2189 + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead", 2190 + ctx->bio_in, s, 0); 2191 + } 2249 2192 io->error = BLK_STS_PROTECTION; 2250 2193 } else if (error < 0) 2251 2194 io->error = BLK_STS_IOERR; ··· 3173 3110 sval = strchr(opt_string + strlen("integrity:"), ':') + 1; 3174 3111 if (!strcasecmp(sval, "aead")) { 3175 3112 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags); 3176 - } else if (strcasecmp(sval, "none")) { 3113 + } else if (strcasecmp(sval, "none")) { 3177 3114 ti->error = "Unknown integrity profile"; 3178 3115 return -EINVAL; 3179 3116 } ··· 3702 3639 3703 3640 static struct target_type crypt_target = { 3704 3641 .name = "crypt", 3705 - .version = {1, 24, 0}, 3642 + .version = {1, 25, 0}, 3706 3643 .module = THIS_MODULE, 3707 3644 .ctr = crypt_ctr, 3708 3645 .dtr = crypt_dtr,
+85 -10
drivers/md/dm-integrity.c
··· 278 278 279 279 atomic64_t number_of_mismatches; 280 280 281 + mempool_t recheck_pool; 282 + 281 283 struct notifier_block reboot_notifier; 282 284 }; 283 285 ··· 1691 1689 get_random_bytes(result, ic->tag_size); 1692 1690 } 1693 1691 1692 + static void integrity_recheck(struct dm_integrity_io *dio) 1693 + { 1694 + struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1695 + struct dm_integrity_c *ic = dio->ic; 1696 + struct bvec_iter iter; 1697 + struct bio_vec bv; 1698 + sector_t sector, logical_sector, area, offset; 1699 + char checksum_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; 1700 + struct page *page; 1701 + void *buffer; 1702 + 1703 + get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); 1704 + dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, 1705 + &dio->metadata_offset); 1706 + sector = get_data_sector(ic, area, offset); 1707 + logical_sector = dio->range.logical_sector; 1708 + 1709 + page = mempool_alloc(&ic->recheck_pool, GFP_NOIO); 1710 + buffer = page_to_virt(page); 1711 + 1712 + __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { 1713 + unsigned pos = 0; 1714 + 1715 + do { 1716 + char *mem; 1717 + int r; 1718 + struct dm_io_request io_req; 1719 + struct dm_io_region io_loc; 1720 + io_req.bi_opf = REQ_OP_READ; 1721 + io_req.mem.type = DM_IO_KMEM; 1722 + io_req.mem.ptr.addr = buffer; 1723 + io_req.notify.fn = NULL; 1724 + io_req.client = ic->io; 1725 + io_loc.bdev = ic->dev->bdev; 1726 + io_loc.sector = sector; 1727 + io_loc.count = ic->sectors_per_block; 1728 + 1729 + r = dm_io(&io_req, 1, &io_loc, NULL); 1730 + if (unlikely(r)) { 1731 + dio->bi_status = errno_to_blk_status(r); 1732 + goto free_ret; 1733 + } 1734 + 1735 + integrity_sector_checksum(ic, logical_sector, buffer, 1736 + checksum_onstack); 1737 + r = dm_integrity_rw_tag(ic, checksum_onstack, &dio->metadata_block, 1738 + &dio->metadata_offset, ic->tag_size, TAG_CMP); 1739 + if (r) { 1740 + if (r > 0) { 1741 + DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", 1742 + bio->bi_bdev, logical_sector); 1743 + atomic64_inc(&ic->number_of_mismatches); 1744 + dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", 1745 + bio, logical_sector, 0); 1746 + r = -EILSEQ; 1747 + } 1748 + dio->bi_status = errno_to_blk_status(r); 1749 + goto free_ret; 1750 + } 1751 + 1752 + mem = bvec_kmap_local(&bv); 1753 + memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT); 1754 + kunmap_local(mem); 1755 + 1756 + pos += ic->sectors_per_block << SECTOR_SHIFT; 1757 + sector += ic->sectors_per_block; 1758 + logical_sector += ic->sectors_per_block; 1759 + } while (pos < bv.bv_len); 1760 + } 1761 + free_ret: 1762 + mempool_free(page, &ic->recheck_pool); 1763 + } 1764 + 1694 1765 static void integrity_metadata(struct work_struct *w) 1695 1766 { 1696 1767 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); ··· 1851 1776 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); 1852 1777 if (unlikely(r)) { 1853 1778 if (r > 0) { 1854 - sector_t s; 1855 - 1856 - s = sector - ((r + ic->tag_size - 1) / ic->tag_size); 1857 - DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx", 1858 - bio->bi_bdev, s); 1859 - r = -EILSEQ; 1860 - atomic64_inc(&ic->number_of_mismatches); 1861 - dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum", 1862 - bio, s, 0); 1779 + integrity_recheck(dio); 1780 + goto skip_io; 1863 1781 } 1864 1782 if (likely(checksums != checksums_onstack)) 1865 1783 kfree(checksums); ··· 4329 4261 goto bad; 4330 4262 } 4331 4263 4264 + r = mempool_init_page_pool(&ic->recheck_pool, 1, 0); 4265 + if (r) { 4266 + ti->error = "Cannot allocate mempool"; 4267 + goto bad; 4268 + } 4269 + 4332 4270 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", 4333 4271 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE); 4334 4272 if (!ic->metadata_wq) { ··· 4683 4609 kvfree(ic->bbs); 4684 4610 if (ic->bufio) 4685 4611 dm_bufio_client_destroy(ic->bufio); 4612 + mempool_exit(&ic->recheck_pool); 4686 4613 mempool_exit(&ic->journal_io_mempool); 4687 4614 if (ic->io) 4688 4615 dm_io_client_destroy(ic->io); ··· 4736 4661 4737 4662 static struct target_type integrity_target = { 4738 4663 .name = "integrity", 4739 - .version = {1, 10, 0}, 4664 + .version = {1, 11, 0}, 4740 4665 .module = THIS_MODULE, 4741 4666 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 4742 4667 .ctr = dm_integrity_ctr,
+81 -7
drivers/md/dm-verity-target.c
··· 482 482 return 0; 483 483 } 484 484 485 + static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io, 486 + u8 *data, size_t len) 487 + { 488 + memcpy(data, io->recheck_buffer, len); 489 + io->recheck_buffer += len; 490 + 491 + return 0; 492 + } 493 + 494 + static int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, 495 + struct bvec_iter start, sector_t cur_block) 496 + { 497 + struct page *page; 498 + void *buffer; 499 + int r; 500 + struct dm_io_request io_req; 501 + struct dm_io_region io_loc; 502 + 503 + page = mempool_alloc(&v->recheck_pool, GFP_NOIO); 504 + buffer = page_to_virt(page); 505 + 506 + io_req.bi_opf = REQ_OP_READ; 507 + io_req.mem.type = DM_IO_KMEM; 508 + io_req.mem.ptr.addr = buffer; 509 + io_req.notify.fn = NULL; 510 + io_req.client = v->io; 511 + io_loc.bdev = v->data_dev->bdev; 512 + io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT); 513 + io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT); 514 + r = dm_io(&io_req, 1, &io_loc, NULL); 515 + if (unlikely(r)) 516 + goto free_ret; 517 + 518 + r = verity_hash(v, verity_io_hash_req(v, io), buffer, 519 + 1 << v->data_dev_block_bits, 520 + verity_io_real_digest(v, io), true); 521 + if (unlikely(r)) 522 + goto free_ret; 523 + 524 + if (memcmp(verity_io_real_digest(v, io), 525 + verity_io_want_digest(v, io), v->digest_size)) { 526 + r = -EIO; 527 + goto free_ret; 528 + } 529 + 530 + io->recheck_buffer = buffer; 531 + r = verity_for_bv_block(v, io, &start, verity_recheck_copy); 532 + if (unlikely(r)) 533 + goto free_ret; 534 + 535 + r = 0; 536 + free_ret: 537 + mempool_free(page, &v->recheck_pool); 538 + 539 + return r; 540 + } 541 + 485 542 static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io, 486 543 u8 *data, size_t len) 487 544 { ··· 565 508 { 566 509 bool is_zero; 567 510 struct dm_verity *v = io->v; 568 - #if defined(CONFIG_DM_VERITY_FEC) 569 511 struct bvec_iter start; 570 - #endif 571 512 struct bvec_iter iter_copy; 572 513 struct bvec_iter *iter; 573 514 struct crypto_wait wait; ··· 616 561 if (unlikely(r < 0)) 617 562 return r; 618 563 619 - #if defined(CONFIG_DM_VERITY_FEC) 620 - if (verity_fec_is_enabled(v)) 621 - start = *iter; 622 - #endif 564 + start = *iter; 623 565 r = verity_for_io_block(v, io, iter, &wait); 624 566 if (unlikely(r < 0)) 625 567 return r; ··· 638 586 * tasklet since it may sleep, so fallback to work-queue. 639 587 */ 640 588 return -EAGAIN; 589 + } else if (verity_recheck(v, io, start, cur_block) == 0) { 590 + if (v->validated_blocks) 591 + set_bit(cur_block, v->validated_blocks); 592 + continue; 641 593 #if defined(CONFIG_DM_VERITY_FEC) 642 594 } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, 643 595 cur_block, NULL, &start) == 0) { ··· 996 940 997 941 if (v->verify_wq) 998 942 destroy_workqueue(v->verify_wq); 943 + 944 + mempool_exit(&v->recheck_pool); 945 + if (v->io) 946 + dm_io_client_destroy(v->io); 999 947 1000 948 if (v->bufio) 1001 949 dm_bufio_client_destroy(v->bufio); ··· 1439 1379 } 1440 1380 v->hash_blocks = hash_position; 1441 1381 1382 + r = mempool_init_page_pool(&v->recheck_pool, 1, 0); 1383 + if (unlikely(r)) { 1384 + ti->error = "Cannot allocate mempool"; 1385 + goto bad; 1386 + } 1387 + 1388 + v->io = dm_io_client_create(); 1389 + if (IS_ERR(v->io)) { 1390 + r = PTR_ERR(v->io); 1391 + v->io = NULL; 1392 + ti->error = "Cannot allocate dm io"; 1393 + goto bad; 1394 + } 1395 + 1442 1396 v->bufio = dm_bufio_client_create(v->hash_dev->bdev, 1443 1397 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux), 1444 1398 dm_bufio_alloc_callback, NULL, ··· 1560 1486 static struct target_type verity_target = { 1561 1487 .name = "verity", 1562 1488 .features = DM_TARGET_IMMUTABLE, 1563 - .version = {1, 9, 0}, 1489 + .version = {1, 10, 0}, 1564 1490 .module = THIS_MODULE, 1565 1491 .ctr = verity_ctr, 1566 1492 .dtr = verity_dtr,
+8 -2
drivers/md/dm-verity.h
··· 11 11 #ifndef DM_VERITY_H 12 12 #define DM_VERITY_H 13 13 14 + #include <linux/dm-io.h> 14 15 #include <linux/dm-bufio.h> 15 16 #include <linux/device-mapper.h> 16 17 #include <linux/interrupt.h> ··· 69 68 unsigned long *validated_blocks; /* bitset blocks validated */ 70 69 71 70 char *signature_key_desc; /* signature keyring reference */ 71 + 72 + struct dm_io_client *io; 73 + mempool_t recheck_pool; 72 74 }; 73 75 74 76 struct dm_verity_io { ··· 80 76 /* original value of bio->bi_end_io */ 81 77 bio_end_io_t *orig_bi_end_io; 82 78 79 + struct bvec_iter iter; 80 + 83 81 sector_t block; 84 82 unsigned int n_blocks; 85 83 bool in_tasklet; 86 84 87 - struct bvec_iter iter; 88 - 89 85 struct work_struct work; 86 + 87 + char *recheck_buffer; 90 88 91 89 /* 92 90 * Three variably-size fields follow this struct: