Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm integrity: add a bitmap mode

Introduce an alternate mode of operation where dm-integrity uses a
bitmap instead of a journal. If a bit in the bitmap is 1, the
corresponding region's data and integrity tags are not synchronized - if
the machine crashes, the unsynchronized regions will be recalculated.
The bitmap mode is faster than the journal mode, because we don't have
to write the data twice, but it is also less reliable, because if data
corruption happens when the machine crashes, it may not be detected.

Benchmark results for an SSD connected to a SATA300 port, when doing
large linear writes with dd:

buffered I/O:
raw device throughput - 245MB/s
dm-integrity with journaling - 120MB/s
dm-integrity with bitmap - 238MB/s

direct I/O with 1MB block size:
raw device throughput - 248MB/s
dm-integrity with journaling - 123MB/s
dm-integrity with bitmap - 223MB/s

For more info see dm-integrity in Documentation/device-mapper/

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>

authored by

Mikulas Patocka and committed by
Mike Snitzer
468dfca3 8b3bbd49

+524 -32
+22
Documentation/device-mapper/dm-integrity.txt
··· 21 21 mode, the dm-integrity target can be used to detect silent data 22 22 corruption on the disk or in the I/O path. 23 23 24 + There's an alternate mode of operation where dm-integrity uses bitmap 25 + instead of a journal. If a bit in the bitmap is 1, the corresponding 26 + region's data and integrity tags are not synchronized - if the machine 27 + crashes, the unsynchronized regions will be recalculated. The bitmap mode 28 + is faster than the journal mode, because we don't have to write the data 29 + twice, but it is also less reliable, because if data corruption happens 30 + when the machine crashes, it may not be detected. 24 31 25 32 When loading the target for the first time, the kernel driver will format 26 33 the device. But it will only format the device if the superblock contains ··· 66 59 either both data and tag or none of them are written. The 67 60 journaled mode degrades write throughput twice because the 68 61 data have to be written twice. 62 + B - bitmap mode - data and metadata are written without any 63 + synchronization, the driver maintains a bitmap of dirty 64 + regions where data and metadata don't match. This mode can 65 + only be used with internal hash. 69 66 R - recovery mode - in this mode, journal is not replayed, 70 67 checksums are not checked and writes to the device are not 71 68 allowed. This mode is useful for data recovery if the ··· 161 150 Supported values are 512, 1024, 2048 and 4096 bytes. If not 162 151 specified the default block size is 512 bytes. 163 152 153 + sectors_per_bit:number 154 + In the bitmap mode, this parameter specifies the number of 155 + 512-byte sectors that corresponds to one bitmap bit. 156 + 157 + bitmap_flush_interval:number 158 + The bitmap flush interval in milliseconds. The metadata buffers 159 + are synchronized when this interval expires. 160 + 161 + 164 162 The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can 165 163 be changed when reloading the target (load an inactive table and swap the 166 164 tables with suspend and resume). The other arguments should not be changed ··· 194 174 * flags 195 175 SB_FLAG_HAVE_JOURNAL_MAC - a flag is set if journal_mac is used 196 176 SB_FLAG_RECALCULATING - recalculating is in progress 177 + SB_FLAG_DIRTY_BITMAP - journal area contains the bitmap of dirty 178 + blocks 197 179 * log2(sectors per block) 198 180 * a position where recalculating finished 199 181 * journal
+502 -32
drivers/md/dm-integrity.c
··· 24 24 25 25 #define DEFAULT_INTERLEAVE_SECTORS 32768 26 26 #define DEFAULT_JOURNAL_SIZE_FACTOR 7 27 + #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768 27 28 #define DEFAULT_BUFFER_SECTORS 128 28 29 #define DEFAULT_JOURNAL_WATERMARK 50 29 30 #define DEFAULT_SYNC_MSEC 10000 ··· 34 33 #define METADATA_WORKQUEUE_MAX_ACTIVE 16 35 34 #define RECALC_SECTORS 8192 36 35 #define RECALC_WRITE_SUPER 16 36 + #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ 37 + #define BITMAP_FLUSH_INTERVAL (10 * HZ) 37 38 38 39 /* 39 40 * Warning - DEBUG_PRINT prints security-sensitive data to the log, ··· 51 48 #define SB_MAGIC "integrt" 52 49 #define SB_VERSION_1 1 53 50 #define SB_VERSION_2 2 51 + #define SB_VERSION_3 3 54 52 #define SB_SECTORS 8 55 53 #define MAX_SECTORS_PER_BLOCK 8 56 54 ··· 64 60 __u64 provided_data_sectors; /* userspace uses this value */ 65 61 __u32 flags; 66 62 __u8 log2_sectors_per_block; 67 - __u8 pad[3]; 63 + __u8 log2_blocks_per_bitmap_bit; 64 + __u8 pad[2]; 68 65 __u64 recalc_sector; 69 66 }; 70 67 71 68 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 72 69 #define SB_FLAG_RECALCULATING 0x2 70 + #define SB_FLAG_DIRTY_BITMAP 0x4 73 71 74 72 #define JOURNAL_ENTRY_ROUNDUP 8 75 73 ··· 161 155 struct workqueue_struct *metadata_wq; 162 156 struct superblock *sb; 163 157 unsigned journal_pages; 158 + unsigned n_bitmap_blocks; 159 + 164 160 struct page_list *journal; 165 161 struct page_list *journal_io; 166 162 struct page_list *journal_xor; 163 + struct page_list *recalc_bitmap; 164 + struct page_list *may_write_bitmap; 165 + struct bitmap_block_status *bbs; 166 + unsigned bitmap_flush_interval; 167 + struct delayed_work bitmap_flush_work; 167 168 168 169 struct crypto_skcipher *journal_crypt; 169 170 struct scatterlist **journal_scatterlist; ··· 197 184 __s8 log2_metadata_run; 198 185 __u8 log2_buffer_sectors; 199 186 __u8 sectors_per_block; 187 + __u8 log2_blocks_per_bitmap_bit; 200 188 201 189 unsigned char mode; 202 190 int suspending; ··· 250 236 251 237 bool journal_uptodate; 252 238 bool just_formatted; 239 + bool recalculate_flag; 253 240 254 241 struct alg_spec internal_hash_alg; 255 242 struct alg_spec journal_crypt_alg; ··· 305 290 struct journal_io { 306 291 struct dm_integrity_range range; 307 292 struct journal_completion *comp; 293 + }; 294 + 295 + struct bitmap_block_status { 296 + struct work_struct work; 297 + struct dm_integrity_c *ic; 298 + unsigned idx; 299 + unsigned long *bitmap; 300 + struct bio_list bio_queue; 301 + spinlock_t bio_queue_lock; 302 + 308 303 }; 309 304 310 305 static struct kmem_cache *journal_io_cache; ··· 452 427 453 428 static void sb_set_version(struct dm_integrity_c *ic) 454 429 { 455 - if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 430 + if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) 431 + ic->sb->version = SB_VERSION_3; 432 + else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 456 433 ic->sb->version = SB_VERSION_2; 457 434 else 458 435 ic->sb->version = SB_VERSION_1; ··· 476 449 io_loc.count = SB_SECTORS; 477 450 478 451 return dm_io(&io_req, 1, &io_loc, NULL); 452 + } 453 + 454 + #define BITMAP_OP_TEST_ALL_SET 0 455 + #define BITMAP_OP_TEST_ALL_CLEAR 1 456 + #define BITMAP_OP_SET 2 457 + #define BITMAP_OP_CLEAR 3 458 + 459 + static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, sector_t sector, sector_t n_sectors, int mode) 460 + { 461 + unsigned long bit, end_bit, this_end_bit, page, end_page; 462 + unsigned long *data; 463 + 464 + if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { 465 + DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)\n", 466 + (unsigned long long)sector, 467 + (unsigned long long)n_sectors, 468 + ic->sb->log2_sectors_per_block, 469 + ic->log2_blocks_per_bitmap_bit, 470 + mode); 471 + BUG(); 472 + } 473 + 474 + if (unlikely(!n_sectors)) 475 + return true; 476 + 477 + bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 478 + end_bit = (sector + n_sectors - 1) >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 479 + 480 + page = bit / (PAGE_SIZE * 8); 481 + bit %= PAGE_SIZE * 8; 482 + 483 + end_page = end_bit / (PAGE_SIZE * 8); 484 + end_bit %= PAGE_SIZE * 8; 485 + 486 + repeat: 487 + if (page < end_page) { 488 + this_end_bit = PAGE_SIZE * 8 - 1; 489 + } else { 490 + this_end_bit = end_bit; 491 + } 492 + 493 + data = lowmem_page_address(bitmap[page].page); 494 + 495 + if (mode == BITMAP_OP_TEST_ALL_SET) { 496 + while (bit <= this_end_bit) { 497 + if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 498 + do { 499 + if (data[bit / BITS_PER_LONG] != -1) 500 + return false; 501 + bit += BITS_PER_LONG; 502 + } while (this_end_bit >= bit + BITS_PER_LONG - 1); 503 + continue; 504 + } 505 + if (!test_bit(bit, data)) 506 + return false; 507 + bit++; 508 + } 509 + } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) { 510 + while (bit <= this_end_bit) { 511 + if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 512 + do { 513 + if (data[bit / BITS_PER_LONG] != 0) 514 + return false; 515 + bit += BITS_PER_LONG; 516 + } while (this_end_bit >= bit + BITS_PER_LONG - 1); 517 + continue; 518 + } 519 + if (test_bit(bit, data)) 520 + return false; 521 + bit++; 522 + } 523 + } else if (mode == BITMAP_OP_SET) { 524 + while (bit <= this_end_bit) { 525 + if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 526 + do { 527 + data[bit / BITS_PER_LONG] = -1; 528 + bit += BITS_PER_LONG; 529 + } while (this_end_bit >= bit + BITS_PER_LONG - 1); 530 + continue; 531 + } 532 + __set_bit(bit, data); 533 + bit++; 534 + } 535 + } else if (mode == BITMAP_OP_CLEAR) { 536 + if (!bit && this_end_bit == PAGE_SIZE * 8 - 1) 537 + clear_page(data); 538 + else while (bit <= this_end_bit) { 539 + if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { 540 + do { 541 + data[bit / BITS_PER_LONG] = 0; 542 + bit += BITS_PER_LONG; 543 + } while (this_end_bit >= bit + BITS_PER_LONG - 1); 544 + continue; 545 + } 546 + __clear_bit(bit, data); 547 + bit++; 548 + } 549 + } else { 550 + BUG(); 551 + } 552 + 553 + if (unlikely(page < end_page)) { 554 + bit = 0; 555 + page++; 556 + goto repeat; 557 + } 558 + 559 + return true; 560 + } 561 + 562 + static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) 563 + { 564 + unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); 565 + unsigned i; 566 + 567 + for (i = 0; i < n_bitmap_pages; i++) { 568 + unsigned long *dst_data = lowmem_page_address(dst[i].page); 569 + unsigned long *src_data = lowmem_page_address(src[i].page); 570 + copy_page(dst_data, src_data); 571 + } 572 + } 573 + 574 + static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) 575 + { 576 + unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 577 + unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8); 578 + 579 + BUG_ON(bitmap_block >= ic->n_bitmap_blocks); 580 + return &ic->bbs[bitmap_block]; 479 581 } 480 582 481 583 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, ··· 1940 1784 goto journal_read_write; 1941 1785 } 1942 1786 1787 + if (ic->mode == 'B' && dio->write) { 1788 + if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { 1789 + struct bitmap_block_status *bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); 1790 + 1791 + spin_lock(&bbs->bio_queue_lock); 1792 + bio_list_add(&bbs->bio_queue, bio); 1793 + spin_unlock(&bbs->bio_queue_lock); 1794 + 1795 + queue_work(ic->writer_wq, &bbs->work); 1796 + 1797 + return; 1798 + } 1799 + } 1800 + 1943 1801 dio->in_flight = (atomic_t)ATOMIC_INIT(2); 1944 1802 1945 1803 if (need_sync_io) { ··· 1980 1810 1981 1811 if (need_sync_io) { 1982 1812 wait_for_completion_io(&read_comp); 1983 - if (unlikely(ic->recalc_wq != NULL) && 1984 - ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 1813 + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && 1985 1814 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) 1986 1815 goto skip_check; 1816 + if (ic->mode == 'B') { 1817 + if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) 1818 + goto skip_check; 1819 + } 1820 + 1987 1821 if (likely(!bio->bi_status)) 1988 1822 integrity_metadata(&dio->work); 1989 1823 else ··· 2025 1851 wraparound_section(ic, &ic->free_section); 2026 1852 ic->n_uncommitted_sections++; 2027 1853 } 2028 - WARN_ON(ic->journal_sections * ic->journal_section_entries != 2029 - (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors); 1854 + if (WARN_ON(ic->journal_sections * ic->journal_section_entries != 1855 + (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors)) { 1856 + printk(KERN_CRIT "dm-integrity: " 1857 + "journal_sections %u, " 1858 + "journal_section_entries %u, " 1859 + "n_uncommitted_sections %u, " 1860 + "n_committed_sections %u, " 1861 + "journal_section_entries %u, " 1862 + "free_sectors %u\n", 1863 + ic->journal_sections, 1864 + ic->journal_section_entries, 1865 + ic->n_uncommitted_sections, 1866 + ic->n_committed_sections, 1867 + ic->journal_section_entries, 1868 + ic->free_sectors); 1869 + } 2030 1870 } 2031 1871 2032 1872 static void integrity_commit(struct work_struct *w) ··· 2327 2139 sector_t area, offset; 2328 2140 sector_t metadata_block; 2329 2141 unsigned metadata_offset; 2142 + sector_t logical_sector, n_sectors; 2330 2143 __u8 *t; 2331 2144 unsigned i; 2332 2145 int r; 2333 2146 unsigned super_counter = 0; 2147 + 2148 + DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); 2334 2149 2335 2150 spin_lock_irq(&ic->endio_wait.lock); 2336 2151 ··· 2343 2152 goto unlock_ret; 2344 2153 2345 2154 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); 2346 - if (unlikely(range.logical_sector >= ic->provided_data_sectors)) 2155 + if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { 2156 + if (ic->mode == 'B') { 2157 + DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); 2158 + queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); 2159 + } 2347 2160 goto unlock_ret; 2161 + } 2348 2162 2349 2163 get_area_and_offset(ic, range.logical_sector, &area, &offset); 2350 2164 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector); ··· 2357 2161 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset); 2358 2162 2359 2163 add_new_range_and_wait(ic, &range); 2360 - 2361 2164 spin_unlock_irq(&ic->endio_wait.lock); 2165 + logical_sector = range.logical_sector; 2166 + n_sectors = range.n_sectors; 2167 + 2168 + if (ic->mode == 'B') { 2169 + if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) { 2170 + goto advance_and_next; 2171 + } 2172 + while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { 2173 + logical_sector += ic->sectors_per_block; 2174 + n_sectors -= ic->sectors_per_block; 2175 + cond_resched(); 2176 + } 2177 + while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { 2178 + n_sectors -= ic->sectors_per_block; 2179 + cond_resched(); 2180 + } 2181 + get_area_and_offset(ic, logical_sector, &area, &offset); 2182 + } 2183 + 2184 + DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors); 2362 2185 2363 2186 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { 2364 2187 recalc_write_super(ic); 2188 + if (ic->mode == 'B') { 2189 + queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); 2190 + } 2365 2191 super_counter = 0; 2366 2192 } 2367 2193 ··· 2398 2180 io_req.client = ic->io; 2399 2181 io_loc.bdev = ic->dev->bdev; 2400 2182 io_loc.sector = get_data_sector(ic, area, offset); 2401 - io_loc.count = range.n_sectors; 2183 + io_loc.count = n_sectors; 2402 2184 2403 2185 r = dm_io(&io_req, 1, &io_loc, NULL); 2404 2186 if (unlikely(r)) { ··· 2407 2189 } 2408 2190 2409 2191 t = ic->recalc_tags; 2410 - for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) { 2411 - integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); 2192 + for (i = 0; i < n_sectors; i += ic->sectors_per_block) { 2193 + integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); 2412 2194 t += ic->tag_size; 2413 2195 } 2414 2196 ··· 2419 2201 dm_integrity_io_error(ic, "writing tags", r); 2420 2202 goto err; 2421 2203 } 2204 + 2205 + advance_and_next: 2206 + cond_resched(); 2422 2207 2423 2208 spin_lock_irq(&ic->endio_wait.lock); 2424 2209 remove_range_unlocked(ic, &range); ··· 2437 2216 2438 2217 recalc_write_super(ic); 2439 2218 } 2219 + 2220 + static void bitmap_block_work(struct work_struct *w) 2221 + { 2222 + struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); 2223 + struct dm_integrity_c *ic = bbs->ic; 2224 + struct bio *bio; 2225 + struct bio_list bio_queue; 2226 + struct bio_list waiting; 2227 + 2228 + bio_list_init(&waiting); 2229 + 2230 + spin_lock(&bbs->bio_queue_lock); 2231 + bio_queue = bbs->bio_queue; 2232 + bio_list_init(&bbs->bio_queue); 2233 + spin_unlock(&bbs->bio_queue_lock); 2234 + 2235 + while ((bio = bio_list_pop(&bio_queue))) { 2236 + struct dm_integrity_io *dio; 2237 + 2238 + dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 2239 + 2240 + if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { 2241 + remove_range(ic, &dio->range); 2242 + INIT_WORK(&dio->work, integrity_bio_wait); 2243 + queue_work(ic->wait_wq, &dio->work); 2244 + } else { 2245 + block_bitmap_op(ic, ic->journal, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_SET); 2246 + bio_list_add(&waiting, bio); 2247 + } 2248 + } 2249 + 2250 + if (bio_list_empty(&waiting)) 2251 + return; 2252 + 2253 + rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); 2254 + 2255 + while ((bio = bio_list_pop(&waiting))) { 2256 + struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); 2257 + 2258 + block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, dio->range.n_sectors, BITMAP_OP_SET); 2259 + 2260 + remove_range(ic, &dio->range); 2261 + INIT_WORK(&dio->work, integrity_bio_wait); 2262 + queue_work(ic->wait_wq, &dio->work); 2263 + } 2264 + 2265 + queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); 2266 + } 2267 + 2268 + static void bitmap_flush_work(struct work_struct *work) 2269 + { 2270 + struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); 2271 + struct dm_integrity_range range; 2272 + unsigned long limit; 2273 + 2274 + dm_integrity_flush_buffers(ic); 2275 + 2276 + range.logical_sector = 0; 2277 + range.n_sectors = ic->provided_data_sectors; 2278 + 2279 + spin_lock_irq(&ic->endio_wait.lock); 2280 + add_new_range_and_wait(ic, &range); 2281 + spin_unlock_irq(&ic->endio_wait.lock); 2282 + 2283 + dm_integrity_flush_buffers(ic); 2284 + if (ic->meta_dev) 2285 + blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL); 2286 + 2287 + limit = ic->provided_data_sectors; 2288 + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 2289 + limit = le64_to_cpu(ic->sb->recalc_sector) 2290 + >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) 2291 + << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); 2292 + } 2293 + DEBUG_print("zeroing journal\n"); 2294 + block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); 2295 + block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); 2296 + 2297 + rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 2298 + 2299 + remove_range(ic, &range); 2300 + } 2301 + 2440 2302 2441 2303 static void init_journal(struct dm_integrity_c *ic, unsigned start_section, 2442 2304 unsigned n_sections, unsigned char commit_seq) ··· 2720 2416 static void dm_integrity_postsuspend(struct dm_target *ti) 2721 2417 { 2722 2418 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2419 + int r; 2723 2420 2724 2421 del_timer_sync(&ic->autocommit_timer); 2725 2422 ··· 2728 2423 2729 2424 if (ic->recalc_wq) 2730 2425 drain_workqueue(ic->recalc_wq); 2426 + 2427 + if (ic->mode == 'B') 2428 + cancel_delayed_work_sync(&ic->bitmap_flush_work); 2731 2429 2732 2430 queue_work(ic->commit_wq, &ic->commit_work); 2733 2431 drain_workqueue(ic->commit_wq); ··· 2740 2432 queue_work(ic->writer_wq, &ic->writer_work); 2741 2433 drain_workqueue(ic->writer_wq); 2742 2434 dm_integrity_flush_buffers(ic); 2435 + } 2436 + 2437 + if (ic->mode == 'B') { 2438 + dm_integrity_flush_buffers(ic); 2439 + #if 1 2440 + init_journal(ic, 0, ic->journal_sections, 0); 2441 + ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 2442 + r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); 2443 + if (unlikely(r)) 2444 + dm_integrity_io_error(ic, "writing superblock", r); 2445 + #endif 2743 2446 } 2744 2447 2745 2448 WRITE_ONCE(ic->suspending, 0); ··· 2763 2444 static void dm_integrity_resume(struct dm_target *ti) 2764 2445 { 2765 2446 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; 2447 + int r; 2448 + DEBUG_print("resume\n"); 2766 2449 2767 - replay_journal(ic); 2450 + if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { 2451 + DEBUG_print("resume dirty_bitmap\n"); 2452 + rw_journal_sectors(ic, REQ_OP_READ, 0, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 2453 + if (ic->mode == 'B') { 2454 + if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { 2455 + block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); 2456 + block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); 2457 + if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) { 2458 + ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 2459 + ic->sb->recalc_sector = cpu_to_le64(0); 2460 + } 2461 + } else { 2462 + DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n", ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); 2463 + ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; 2464 + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); 2465 + block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); 2466 + block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); 2467 + rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 2468 + ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 2469 + ic->sb->recalc_sector = cpu_to_le64(0); 2470 + } 2471 + } else { 2472 + if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && 2473 + block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) { 2474 + ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 2475 + ic->sb->recalc_sector = cpu_to_le64(0); 2476 + } 2477 + init_journal(ic, 0, ic->journal_sections, 0); 2478 + replay_journal(ic); 2479 + ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 2480 + } 2481 + r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); 2482 + if (unlikely(r)) 2483 + dm_integrity_io_error(ic, "writing superblock", r); 2484 + } else { 2485 + replay_journal(ic); 2486 + if (ic->mode == 'B') { 2487 + int mode; 2488 + ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); 2489 + ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; 2490 + r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); 2491 + if (unlikely(r)) 2492 + dm_integrity_io_error(ic, "writing superblock", r); 2768 2493 2769 - if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 2494 + mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR; 2495 + block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode); 2496 + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode); 2497 + block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode); 2498 + rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); 2499 + } 2500 + } 2501 + 2502 + DEBUG_print("testing recalc: %x\n", ic->sb->flags); 2503 + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 2770 2504 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); 2505 + DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors); 2771 2506 if (recalc_pos < ic->provided_data_sectors) { 2772 2507 queue_work(ic->recalc_wq, &ic->recalc_work); 2773 2508 } else if (recalc_pos > ic->provided_data_sectors) { ··· 2859 2486 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); 2860 2487 arg_count += ic->mode == 'J'; 2861 2488 arg_count += ic->mode == 'J'; 2489 + arg_count += ic->mode == 'B'; 2490 + arg_count += ic->mode == 'B'; 2862 2491 arg_count += !!ic->internal_hash_alg.alg_string; 2863 2492 arg_count += !!ic->journal_crypt_alg.alg_string; 2864 2493 arg_count += !!ic->journal_mac_alg.alg_string; ··· 2870 2495 DMEMIT(" meta_device:%s", ic->meta_dev->name); 2871 2496 if (ic->sectors_per_block != 1) 2872 2497 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); 2873 - if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) 2498 + if (ic->recalculate_flag) 2874 2499 DMEMIT(" recalculate"); 2875 2500 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); 2876 2501 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); ··· 2878 2503 if (ic->mode == 'J') { 2879 2504 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage); 2880 2505 DMEMIT(" commit_time:%u", ic->autocommit_msec); 2506 + } 2507 + if (ic->mode == 'B') { 2508 + DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); 2509 + DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); 2881 2510 } 2882 2511 2883 2512 #define EMIT_ALG(a, n) \ ··· 3464 3085 * device 3465 3086 * offset from the start of the device 3466 3087 * tag size 3467 - * D - direct writes, J - journal writes, R - recovery mode 3088 + * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode 3468 3089 * number of optional arguments 3469 3090 * optional arguments: 3470 3091 * journal_sectors ··· 3474 3095 * commit_time 3475 3096 * meta_device 3476 3097 * block_size 3098 + * sectors_per_bit 3099 + * bitmap_flush_interval 3477 3100 * internal_hash 3478 3101 * journal_crypt 3479 3102 * journal_mac ··· 3492 3111 {0, 9, "Invalid number of feature args"}, 3493 3112 }; 3494 3113 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; 3495 - bool recalculate; 3496 3114 bool should_write_sb; 3497 3115 __u64 threshold; 3498 3116 unsigned long long start; 3117 + __s8 log2_sectors_per_bitmap_bit = -1; 3118 + __s8 log2_blocks_per_bitmap_bit; 3119 + __u64 bits_in_journal; 3120 + __u64 n_bitmap_bits; 3499 3121 3500 3122 #define DIRECT_ARGUMENTS 4 3501 3123 ··· 3522 3138 init_waitqueue_head(&ic->copy_to_journal_wait); 3523 3139 init_completion(&ic->crypto_backoff); 3524 3140 atomic64_set(&ic->number_of_mismatches, 0); 3141 + ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; 3525 3142 3526 3143 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); 3527 3144 if (r) { ··· 3545 3160 } 3546 3161 } 3547 3162 3548 - if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) 3163 + if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) { 3549 3164 ic->mode = argv[3][0]; 3550 - else { 3551 - ti->error = "Invalid mode (expecting J, D, R)"; 3165 + } else { 3166 + ti->error = "Invalid mode (expecting J, B, D, R)"; 3552 3167 r = -EINVAL; 3553 3168 goto bad; 3554 3169 } ··· 3558 3173 buffer_sectors = DEFAULT_BUFFER_SECTORS; 3559 3174 journal_watermark = DEFAULT_JOURNAL_WATERMARK; 3560 3175 sync_msec = DEFAULT_SYNC_MSEC; 3561 - recalculate = false; 3562 3176 ic->sectors_per_block = 1; 3563 3177 3564 3178 as.argc = argc - DIRECT_ARGUMENTS; ··· 3569 3185 while (extra_args--) { 3570 3186 const char *opt_string; 3571 3187 unsigned val; 3188 + unsigned long long llval; 3572 3189 opt_string = dm_shift_arg(&as); 3573 3190 if (!opt_string) { 3574 3191 r = -EINVAL; ··· 3605 3220 goto bad; 3606 3221 } 3607 3222 ic->sectors_per_block = val >> SECTOR_SHIFT; 3223 + } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) { 3224 + log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval); 3225 + } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) { 3226 + if (val >= (uint64_t)UINT_MAX * 1000 / HZ) { 3227 + r = -EINVAL; 3228 + ti->error = "Invalid bitmap_flush_interval argument"; 3229 + } 3230 + ic->bitmap_flush_interval = msecs_to_jiffies(val); 3608 3231 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 3609 3232 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 3610 3233 "Invalid internal_hash argument"); ··· 3629 3236 if (r) 3630 3237 goto bad; 3631 3238 } else if (!strcmp(opt_string, "recalculate")) { 3632 - recalculate = true; 3239 + ic->recalculate_flag = true; 3633 3240 } else { 3634 3241 r = -EINVAL; 3635 3242 ti->error = "Invalid argument"; ··· 3680 3287 else 3681 3288 ic->log2_tag_size = -1; 3682 3289 3290 + if (ic->mode == 'B' && !ic->internal_hash) { 3291 + r = -EINVAL; 3292 + ti->error = "Bitmap mode can be only used with internal hash"; 3293 + goto bad; 3294 + } 3295 + 3683 3296 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); 3684 3297 ic->autocommit_msec = sync_msec; 3685 3298 timer_setup(&ic->autocommit_timer, autocommit_fn, 0); ··· 3731 3332 } 3732 3333 INIT_WORK(&ic->commit_work, integrity_commit); 3733 3334 3734 - if (ic->mode == 'J') { 3335 + if (ic->mode == 'J' || ic->mode == 'B') { 3735 3336 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); 3736 3337 if (!ic->writer_wq) { 3737 3338 ti->error = "Cannot allocate workqueue"; ··· 3772 3373 should_write_sb = true; 3773 3374 } 3774 3375 3775 - if (!ic->sb->version || ic->sb->version > SB_VERSION_2) { 3376 + if (!ic->sb->version || ic->sb->version > SB_VERSION_3) { 3776 3377 r = -EINVAL; 3777 3378 ti->error = "Unknown version"; 3778 3379 goto bad; ··· 3832 3433 ti->error = "The device is too small"; 3833 3434 goto bad; 3834 3435 } 3436 + 3437 + if (log2_sectors_per_bitmap_bit < 0) 3438 + log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT); 3439 + if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) 3440 + log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; 3441 + 3442 + bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3); 3443 + if (bits_in_journal > UINT_MAX) 3444 + bits_in_journal = UINT_MAX; 3445 + while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit) 3446 + log2_sectors_per_bitmap_bit++; 3447 + 3448 + log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; 3449 + ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; 3450 + if (should_write_sb) { 3451 + ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; 3452 + } 3453 + n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) 3454 + + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit; 3455 + ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); 3456 + 3835 3457 if (!ic->meta_dev) 3836 3458 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); 3837 3459 ··· 3877 3457 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); 3878 3458 DEBUG_print(" journal_entries %u\n", ic->journal_entries); 3879 3459 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); 3880 - DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors); 3460 + DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT); 3881 3461 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); 3882 3462 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); 3883 3463 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); 3884 3464 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors, 3885 3465 (unsigned long long)ic->provided_data_sectors); 3886 3466 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); 3467 + DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal); 3887 3468 3888 - if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { 3469 + if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { 3889 3470 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); 3890 3471 ic->sb->recalc_sector = cpu_to_le64(0); 3891 3472 } 3892 3473 3893 - if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { 3894 - if (!ic->internal_hash) { 3895 - r = -EINVAL; 3896 - ti->error = "Recalculate is only valid with internal hash"; 3897 - goto bad; 3898 - } 3474 + if (ic->internal_hash) { 3899 3475 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); 3900 3476 if (!ic->recalc_wq ) { 3901 3477 ti->error = "Cannot allocate workqueue"; ··· 3928 3512 r = create_journal(ic, &ti->error); 3929 3513 if (r) 3930 3514 goto bad; 3515 + 3516 + } 3517 + 3518 + if (ic->mode == 'B') { 3519 + unsigned i; 3520 + unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); 3521 + 3522 + ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); 3523 + if (!ic->recalc_bitmap) { 3524 + r = -ENOMEM; 3525 + goto bad; 3526 + } 3527 + ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); 3528 + if (!ic->may_write_bitmap) { 3529 + r = -ENOMEM; 3530 + goto bad; 3531 + } 3532 + ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); 3533 + if (!ic->bbs) { 3534 + r = -ENOMEM; 3535 + goto bad; 3536 + } 3537 + INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); 3538 + for (i = 0; i < ic->n_bitmap_blocks; i++) { 3539 + struct bitmap_block_status *bbs = &ic->bbs[i]; 3540 + unsigned sector, pl_index, pl_offset; 3541 + 3542 + INIT_WORK(&bbs->work, bitmap_block_work); 3543 + bbs->ic = ic; 3544 + bbs->idx = i; 3545 + bio_list_init(&bbs->bio_queue); 3546 + spin_lock_init(&bbs->bio_queue_lock); 3547 + 3548 + sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT); 3549 + pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); 3550 + pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); 3551 + 3552 + bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset; 3553 + } 3931 3554 } 3932 3555 3933 3556 if (should_write_sb) { ··· 3991 3536 if (r) 3992 3537 goto bad; 3993 3538 } 3539 + if (ic->mode == 'B') { 3540 + unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8); 3541 + if (!max_io_len) 3542 + max_io_len = 1U << 31; 3543 + DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len); 3544 + if (!ti->max_io_len || ti->max_io_len > max_io_len) { 3545 + r = dm_set_target_max_io_len(ti, max_io_len); 3546 + if (r) 3547 + goto bad; 3548 + } 3549 + } 3994 3550 3995 3551 if (!ic->internal_hash) 3996 3552 dm_integrity_set(ti, ic); ··· 4010 3544 ti->flush_supported = true; 4011 3545 4012 3546 return 0; 3547 + 4013 3548 bad: 4014 3549 dm_integrity_dtr(ti); 4015 3550 return r; ··· 4035 3568 destroy_workqueue(ic->recalc_wq); 4036 3569 vfree(ic->recalc_buffer); 4037 3570 kvfree(ic->recalc_tags); 3571 + kvfree(ic->bbs); 4038 3572 if (ic->bufio) 4039 3573 dm_bufio_client_destroy(ic->bufio); 4040 3574 mempool_exit(&ic->journal_io_mempool); ··· 4048 3580 dm_integrity_free_page_list(ic->journal); 4049 3581 dm_integrity_free_page_list(ic->journal_io); 4050 3582 dm_integrity_free_page_list(ic->journal_xor); 3583 + dm_integrity_free_page_list(ic->recalc_bitmap); 3584 + dm_integrity_free_page_list(ic->may_write_bitmap); 4051 3585 if (ic->journal_scatterlist) 4052 3586 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); 4053 3587 if (ic->journal_io_scatterlist) ··· 4087 3617 4088 3618 static struct target_type integrity_target = { 4089 3619 .name = "integrity", 4090 - .version = {1, 2, 0}, 3620 + .version = {1, 3, 0}, 4091 3621 .module = THIS_MODULE, 4092 3622 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, 4093 3623 .ctr = dm_integrity_ctr,