Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
4 * Copyright (C) 2016-2017 Milan Broz
5 * Copyright (C) 2016-2017 Mikulas Patocka
6 *
7 * This file is released under the GPL.
8 */
9
10#include "dm-bio-record.h"
11
12#include <linux/compiler.h>
13#include <linux/module.h>
14#include <linux/device-mapper.h>
15#include <linux/dm-io.h>
16#include <linux/vmalloc.h>
17#include <linux/sort.h>
18#include <linux/rbtree.h>
19#include <linux/delay.h>
20#include <linux/random.h>
21#include <linux/reboot.h>
22#include <crypto/hash.h>
23#include <crypto/skcipher.h>
24#include <linux/async_tx.h>
25#include <linux/dm-bufio.h>
26
27#include "dm-audit.h"
28
29#define DM_MSG_PREFIX "integrity"
30
31#define DEFAULT_INTERLEAVE_SECTORS 32768
32#define DEFAULT_JOURNAL_SIZE_FACTOR 7
33#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
34#define DEFAULT_BUFFER_SECTORS 128
35#define DEFAULT_JOURNAL_WATERMARK 50
36#define DEFAULT_SYNC_MSEC 10000
37#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
38#define MIN_LOG2_INTERLEAVE_SECTORS 3
39#define MAX_LOG2_INTERLEAVE_SECTORS 31
40#define METADATA_WORKQUEUE_MAX_ACTIVE 16
41#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
42#define RECALC_WRITE_SUPER 16
43#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
44#define BITMAP_FLUSH_INTERVAL (10 * HZ)
45#define DISCARD_FILLER 0xf6
46#define SALT_SIZE 16
47
48/*
49 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
50 * so it should not be enabled in the official kernel
51 */
52//#define DEBUG_PRINT
53//#define INTERNAL_VERIFY
54
55/*
56 * On disk structures
57 */
58
59#define SB_MAGIC "integrt"
60#define SB_VERSION_1 1
61#define SB_VERSION_2 2
62#define SB_VERSION_3 3
63#define SB_VERSION_4 4
64#define SB_VERSION_5 5
65#define SB_SECTORS 8
66#define MAX_SECTORS_PER_BLOCK 8
67
68struct superblock {
69 __u8 magic[8];
70 __u8 version;
71 __u8 log2_interleave_sectors;
72 __le16 integrity_tag_size;
73 __le32 journal_sections;
74 __le64 provided_data_sectors; /* userspace uses this value */
75 __le32 flags;
76 __u8 log2_sectors_per_block;
77 __u8 log2_blocks_per_bitmap_bit;
78 __u8 pad[2];
79 __le64 recalc_sector;
80 __u8 pad2[8];
81 __u8 salt[SALT_SIZE];
82};
83
84#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
85#define SB_FLAG_RECALCULATING 0x2
86#define SB_FLAG_DIRTY_BITMAP 0x4
87#define SB_FLAG_FIXED_PADDING 0x8
88#define SB_FLAG_FIXED_HMAC 0x10
89
90#define JOURNAL_ENTRY_ROUNDUP 8
91
92typedef __le64 commit_id_t;
93#define JOURNAL_MAC_PER_SECTOR 8
94
95struct journal_entry {
96 union {
97 struct {
98 __le32 sector_lo;
99 __le32 sector_hi;
100 } s;
101 __le64 sector;
102 } u;
103 commit_id_t last_bytes[];
104 /* __u8 tag[0]; */
105};
106
107#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
108
109#if BITS_PER_LONG == 64
110#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
111#else
112#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
113#endif
114#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
115#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
116#define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1))
117#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
118#define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2))
119
120#define JOURNAL_BLOCK_SECTORS 8
121#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
122#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
123
124struct journal_sector {
125 struct_group(sectors,
126 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
127 __u8 mac[JOURNAL_MAC_PER_SECTOR];
128 );
129 commit_id_t commit_id;
130};
131
132#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
133
134#define METADATA_PADDING_SECTORS 8
135
136#define N_COMMIT_IDS 4
137
138static unsigned char prev_commit_seq(unsigned char seq)
139{
140 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
141}
142
143static unsigned char next_commit_seq(unsigned char seq)
144{
145 return (seq + 1) % N_COMMIT_IDS;
146}
147
148/*
149 * In-memory structures
150 */
151
152struct journal_node {
153 struct rb_node node;
154 sector_t sector;
155};
156
157struct alg_spec {
158 char *alg_string;
159 char *key_string;
160 __u8 *key;
161 unsigned int key_size;
162};
163
164struct dm_integrity_c {
165 struct dm_dev *dev;
166 struct dm_dev *meta_dev;
167 unsigned int tag_size;
168 __s8 log2_tag_size;
169 sector_t start;
170 mempool_t journal_io_mempool;
171 struct dm_io_client *io;
172 struct dm_bufio_client *bufio;
173 struct workqueue_struct *metadata_wq;
174 struct superblock *sb;
175 unsigned int journal_pages;
176 unsigned int n_bitmap_blocks;
177
178 struct page_list *journal;
179 struct page_list *journal_io;
180 struct page_list *journal_xor;
181 struct page_list *recalc_bitmap;
182 struct page_list *may_write_bitmap;
183 struct bitmap_block_status *bbs;
184 unsigned int bitmap_flush_interval;
185 int synchronous_mode;
186 struct bio_list synchronous_bios;
187 struct delayed_work bitmap_flush_work;
188
189 struct crypto_skcipher *journal_crypt;
190 struct scatterlist **journal_scatterlist;
191 struct scatterlist **journal_io_scatterlist;
192 struct skcipher_request **sk_requests;
193
194 struct crypto_shash *journal_mac;
195
196 struct journal_node *journal_tree;
197 struct rb_root journal_tree_root;
198
199 sector_t provided_data_sectors;
200
201 unsigned short journal_entry_size;
202 unsigned char journal_entries_per_sector;
203 unsigned char journal_section_entries;
204 unsigned short journal_section_sectors;
205 unsigned int journal_sections;
206 unsigned int journal_entries;
207 sector_t data_device_sectors;
208 sector_t meta_device_sectors;
209 unsigned int initial_sectors;
210 unsigned int metadata_run;
211 __s8 log2_metadata_run;
212 __u8 log2_buffer_sectors;
213 __u8 sectors_per_block;
214 __u8 log2_blocks_per_bitmap_bit;
215
216 unsigned char mode;
217
218 int failed;
219
220 struct crypto_shash *internal_hash;
221
222 struct dm_target *ti;
223
224 /* these variables are locked with endio_wait.lock */
225 struct rb_root in_progress;
226 struct list_head wait_list;
227 wait_queue_head_t endio_wait;
228 struct workqueue_struct *wait_wq;
229 struct workqueue_struct *offload_wq;
230
231 unsigned char commit_seq;
232 commit_id_t commit_ids[N_COMMIT_IDS];
233
234 unsigned int committed_section;
235 unsigned int n_committed_sections;
236
237 unsigned int uncommitted_section;
238 unsigned int n_uncommitted_sections;
239
240 unsigned int free_section;
241 unsigned char free_section_entry;
242 unsigned int free_sectors;
243
244 unsigned int free_sectors_threshold;
245
246 struct workqueue_struct *commit_wq;
247 struct work_struct commit_work;
248
249 struct workqueue_struct *writer_wq;
250 struct work_struct writer_work;
251
252 struct workqueue_struct *recalc_wq;
253 struct work_struct recalc_work;
254
255 struct bio_list flush_bio_list;
256
257 unsigned long autocommit_jiffies;
258 struct timer_list autocommit_timer;
259 unsigned int autocommit_msec;
260
261 wait_queue_head_t copy_to_journal_wait;
262
263 struct completion crypto_backoff;
264
265 bool wrote_to_journal;
266 bool journal_uptodate;
267 bool just_formatted;
268 bool recalculate_flag;
269 bool reset_recalculate_flag;
270 bool discard;
271 bool fix_padding;
272 bool fix_hmac;
273 bool legacy_recalculate;
274
275 struct alg_spec internal_hash_alg;
276 struct alg_spec journal_crypt_alg;
277 struct alg_spec journal_mac_alg;
278
279 atomic64_t number_of_mismatches;
280
281 struct notifier_block reboot_notifier;
282};
283
284struct dm_integrity_range {
285 sector_t logical_sector;
286 sector_t n_sectors;
287 bool waiting;
288 union {
289 struct rb_node node;
290 struct {
291 struct task_struct *task;
292 struct list_head wait_entry;
293 };
294 };
295};
296
297struct dm_integrity_io {
298 struct work_struct work;
299
300 struct dm_integrity_c *ic;
301 enum req_op op;
302 bool fua;
303
304 struct dm_integrity_range range;
305
306 sector_t metadata_block;
307 unsigned int metadata_offset;
308
309 atomic_t in_flight;
310 blk_status_t bi_status;
311
312 struct completion *completion;
313
314 struct dm_bio_details bio_details;
315};
316
317struct journal_completion {
318 struct dm_integrity_c *ic;
319 atomic_t in_flight;
320 struct completion comp;
321};
322
323struct journal_io {
324 struct dm_integrity_range range;
325 struct journal_completion *comp;
326};
327
328struct bitmap_block_status {
329 struct work_struct work;
330 struct dm_integrity_c *ic;
331 unsigned int idx;
332 unsigned long *bitmap;
333 struct bio_list bio_queue;
334 spinlock_t bio_queue_lock;
335
336};
337
338static struct kmem_cache *journal_io_cache;
339
340#define JOURNAL_IO_MEMPOOL 32
341
342#ifdef DEBUG_PRINT
343#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
344#define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \
345 len ? ": " : "", len, bytes)
346#else
347#define DEBUG_print(x, ...) do { } while (0)
348#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
349#endif
350
351static void dm_integrity_prepare(struct request *rq)
352{
353}
354
355static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
356{
357}
358
359/*
360 * DM Integrity profile, protection is performed layer above (dm-crypt)
361 */
362static const struct blk_integrity_profile dm_integrity_profile = {
363 .name = "DM-DIF-EXT-TAG",
364 .generate_fn = NULL,
365 .verify_fn = NULL,
366 .prepare_fn = dm_integrity_prepare,
367 .complete_fn = dm_integrity_complete,
368};
369
370static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
371static void integrity_bio_wait(struct work_struct *w);
372static void dm_integrity_dtr(struct dm_target *ti);
373
374static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
375{
376 if (err == -EILSEQ)
377 atomic64_inc(&ic->number_of_mismatches);
378 if (!cmpxchg(&ic->failed, 0, err))
379 DMERR("Error on %s: %d", msg, err);
380}
381
382static int dm_integrity_failed(struct dm_integrity_c *ic)
383{
384 return READ_ONCE(ic->failed);
385}
386
387static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
388{
389 if (ic->legacy_recalculate)
390 return false;
391 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
392 ic->internal_hash_alg.key || ic->journal_mac_alg.key :
393 ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
394 return true;
395 return false;
396}
397
398static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
399 unsigned int j, unsigned char seq)
400{
401 /*
402 * Xor the number with section and sector, so that if a piece of
403 * journal is written at wrong place, it is detected.
404 */
405 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
406}
407
408static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
409 sector_t *area, sector_t *offset)
410{
411 if (!ic->meta_dev) {
412 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
413 *area = data_sector >> log2_interleave_sectors;
414 *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
415 } else {
416 *area = 0;
417 *offset = data_sector;
418 }
419}
420
421#define sector_to_block(ic, n) \
422do { \
423 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
424 (n) >>= (ic)->sb->log2_sectors_per_block; \
425} while (0)
426
427static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
428 sector_t offset, unsigned int *metadata_offset)
429{
430 __u64 ms;
431 unsigned int mo;
432
433 ms = area << ic->sb->log2_interleave_sectors;
434 if (likely(ic->log2_metadata_run >= 0))
435 ms += area << ic->log2_metadata_run;
436 else
437 ms += area * ic->metadata_run;
438 ms >>= ic->log2_buffer_sectors;
439
440 sector_to_block(ic, offset);
441
442 if (likely(ic->log2_tag_size >= 0)) {
443 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
444 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
445 } else {
446 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
447 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
448 }
449 *metadata_offset = mo;
450 return ms;
451}
452
453static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
454{
455 sector_t result;
456
457 if (ic->meta_dev)
458 return offset;
459
460 result = area << ic->sb->log2_interleave_sectors;
461 if (likely(ic->log2_metadata_run >= 0))
462 result += (area + 1) << ic->log2_metadata_run;
463 else
464 result += (area + 1) * ic->metadata_run;
465
466 result += (sector_t)ic->initial_sectors + offset;
467 result += ic->start;
468
469 return result;
470}
471
472static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
473{
474 if (unlikely(*sec_ptr >= ic->journal_sections))
475 *sec_ptr -= ic->journal_sections;
476}
477
478static void sb_set_version(struct dm_integrity_c *ic)
479{
480 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
481 ic->sb->version = SB_VERSION_5;
482 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
483 ic->sb->version = SB_VERSION_4;
484 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
485 ic->sb->version = SB_VERSION_3;
486 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
487 ic->sb->version = SB_VERSION_2;
488 else
489 ic->sb->version = SB_VERSION_1;
490}
491
492static int sb_mac(struct dm_integrity_c *ic, bool wr)
493{
494 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
495 int r;
496 unsigned int mac_size = crypto_shash_digestsize(ic->journal_mac);
497 __u8 *sb = (__u8 *)ic->sb;
498 __u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size;
499
500 if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) {
501 dm_integrity_io_error(ic, "digest is too long", -EINVAL);
502 return -EINVAL;
503 }
504
505 desc->tfm = ic->journal_mac;
506
507 if (likely(wr)) {
508 r = crypto_shash_digest(desc, sb, mac - sb, mac);
509 if (unlikely(r < 0)) {
510 dm_integrity_io_error(ic, "crypto_shash_digest", r);
511 return r;
512 }
513 } else {
514 __u8 actual_mac[HASH_MAX_DIGESTSIZE];
515
516 r = crypto_shash_digest(desc, sb, mac - sb, actual_mac);
517 if (unlikely(r < 0)) {
518 dm_integrity_io_error(ic, "crypto_shash_digest", r);
519 return r;
520 }
521 if (memcmp(mac, actual_mac, mac_size)) {
522 dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
523 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
524 return -EILSEQ;
525 }
526 }
527
528 return 0;
529}
530
531static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
532{
533 struct dm_io_request io_req;
534 struct dm_io_region io_loc;
535 const enum req_op op = opf & REQ_OP_MASK;
536 int r;
537
538 io_req.bi_opf = opf;
539 io_req.mem.type = DM_IO_KMEM;
540 io_req.mem.ptr.addr = ic->sb;
541 io_req.notify.fn = NULL;
542 io_req.client = ic->io;
543 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
544 io_loc.sector = ic->start;
545 io_loc.count = SB_SECTORS;
546
547 if (op == REQ_OP_WRITE) {
548 sb_set_version(ic);
549 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
550 r = sb_mac(ic, true);
551 if (unlikely(r))
552 return r;
553 }
554 }
555
556 r = dm_io(&io_req, 1, &io_loc, NULL);
557 if (unlikely(r))
558 return r;
559
560 if (op == REQ_OP_READ) {
561 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
562 r = sb_mac(ic, false);
563 if (unlikely(r))
564 return r;
565 }
566 }
567
568 return 0;
569}
570
571#define BITMAP_OP_TEST_ALL_SET 0
572#define BITMAP_OP_TEST_ALL_CLEAR 1
573#define BITMAP_OP_SET 2
574#define BITMAP_OP_CLEAR 3
575
576static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
577 sector_t sector, sector_t n_sectors, int mode)
578{
579 unsigned long bit, end_bit, this_end_bit, page, end_page;
580 unsigned long *data;
581
582 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
583 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
584 sector,
585 n_sectors,
586 ic->sb->log2_sectors_per_block,
587 ic->log2_blocks_per_bitmap_bit,
588 mode);
589 BUG();
590 }
591
592 if (unlikely(!n_sectors))
593 return true;
594
595 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
596 end_bit = (sector + n_sectors - 1) >>
597 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
598
599 page = bit / (PAGE_SIZE * 8);
600 bit %= PAGE_SIZE * 8;
601
602 end_page = end_bit / (PAGE_SIZE * 8);
603 end_bit %= PAGE_SIZE * 8;
604
605repeat:
606 if (page < end_page)
607 this_end_bit = PAGE_SIZE * 8 - 1;
608 else
609 this_end_bit = end_bit;
610
611 data = lowmem_page_address(bitmap[page].page);
612
613 if (mode == BITMAP_OP_TEST_ALL_SET) {
614 while (bit <= this_end_bit) {
615 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
616 do {
617 if (data[bit / BITS_PER_LONG] != -1)
618 return false;
619 bit += BITS_PER_LONG;
620 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
621 continue;
622 }
623 if (!test_bit(bit, data))
624 return false;
625 bit++;
626 }
627 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
628 while (bit <= this_end_bit) {
629 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
630 do {
631 if (data[bit / BITS_PER_LONG] != 0)
632 return false;
633 bit += BITS_PER_LONG;
634 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
635 continue;
636 }
637 if (test_bit(bit, data))
638 return false;
639 bit++;
640 }
641 } else if (mode == BITMAP_OP_SET) {
642 while (bit <= this_end_bit) {
643 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
644 do {
645 data[bit / BITS_PER_LONG] = -1;
646 bit += BITS_PER_LONG;
647 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
648 continue;
649 }
650 __set_bit(bit, data);
651 bit++;
652 }
653 } else if (mode == BITMAP_OP_CLEAR) {
654 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
655 clear_page(data);
656 else {
657 while (bit <= this_end_bit) {
658 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
659 do {
660 data[bit / BITS_PER_LONG] = 0;
661 bit += BITS_PER_LONG;
662 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
663 continue;
664 }
665 __clear_bit(bit, data);
666 bit++;
667 }
668 }
669 } else {
670 BUG();
671 }
672
673 if (unlikely(page < end_page)) {
674 bit = 0;
675 page++;
676 goto repeat;
677 }
678
679 return true;
680}
681
682static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
683{
684 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
685 unsigned int i;
686
687 for (i = 0; i < n_bitmap_pages; i++) {
688 unsigned long *dst_data = lowmem_page_address(dst[i].page);
689 unsigned long *src_data = lowmem_page_address(src[i].page);
690
691 copy_page(dst_data, src_data);
692 }
693}
694
695static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
696{
697 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
698 unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
699
700 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
701 return &ic->bbs[bitmap_block];
702}
703
704static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
705 bool e, const char *function)
706{
707#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
708 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
709
710 if (unlikely(section >= ic->journal_sections) ||
711 unlikely(offset >= limit)) {
712 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
713 function, section, offset, ic->journal_sections, limit);
714 BUG();
715 }
716#endif
717}
718
719static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
720 unsigned int *pl_index, unsigned int *pl_offset)
721{
722 unsigned int sector;
723
724 access_journal_check(ic, section, offset, false, "page_list_location");
725
726 sector = section * ic->journal_section_sectors + offset;
727
728 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
729 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
730}
731
732static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
733 unsigned int section, unsigned int offset, unsigned int *n_sectors)
734{
735 unsigned int pl_index, pl_offset;
736 char *va;
737
738 page_list_location(ic, section, offset, &pl_index, &pl_offset);
739
740 if (n_sectors)
741 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
742
743 va = lowmem_page_address(pl[pl_index].page);
744
745 return (struct journal_sector *)(va + pl_offset);
746}
747
748static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
749{
750 return access_page_list(ic, ic->journal, section, offset, NULL);
751}
752
753static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
754{
755 unsigned int rel_sector, offset;
756 struct journal_sector *js;
757
758 access_journal_check(ic, section, n, true, "access_journal_entry");
759
760 rel_sector = n % JOURNAL_BLOCK_SECTORS;
761 offset = n / JOURNAL_BLOCK_SECTORS;
762
763 js = access_journal(ic, section, rel_sector);
764 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
765}
766
767static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
768{
769 n <<= ic->sb->log2_sectors_per_block;
770
771 n += JOURNAL_BLOCK_SECTORS;
772
773 access_journal_check(ic, section, n, false, "access_journal_data");
774
775 return access_journal(ic, section, n);
776}
777
778static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
779{
780 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
781 int r;
782 unsigned int j, size;
783
784 desc->tfm = ic->journal_mac;
785
786 r = crypto_shash_init(desc);
787 if (unlikely(r < 0)) {
788 dm_integrity_io_error(ic, "crypto_shash_init", r);
789 goto err;
790 }
791
792 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
793 __le64 section_le;
794
795 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
796 if (unlikely(r < 0)) {
797 dm_integrity_io_error(ic, "crypto_shash_update", r);
798 goto err;
799 }
800
801 section_le = cpu_to_le64(section);
802 r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof(section_le));
803 if (unlikely(r < 0)) {
804 dm_integrity_io_error(ic, "crypto_shash_update", r);
805 goto err;
806 }
807 }
808
809 for (j = 0; j < ic->journal_section_entries; j++) {
810 struct journal_entry *je = access_journal_entry(ic, section, j);
811
812 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector));
813 if (unlikely(r < 0)) {
814 dm_integrity_io_error(ic, "crypto_shash_update", r);
815 goto err;
816 }
817 }
818
819 size = crypto_shash_digestsize(ic->journal_mac);
820
821 if (likely(size <= JOURNAL_MAC_SIZE)) {
822 r = crypto_shash_final(desc, result);
823 if (unlikely(r < 0)) {
824 dm_integrity_io_error(ic, "crypto_shash_final", r);
825 goto err;
826 }
827 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
828 } else {
829 __u8 digest[HASH_MAX_DIGESTSIZE];
830
831 if (WARN_ON(size > sizeof(digest))) {
832 dm_integrity_io_error(ic, "digest_size", -EINVAL);
833 goto err;
834 }
835 r = crypto_shash_final(desc, digest);
836 if (unlikely(r < 0)) {
837 dm_integrity_io_error(ic, "crypto_shash_final", r);
838 goto err;
839 }
840 memcpy(result, digest, JOURNAL_MAC_SIZE);
841 }
842
843 return;
844err:
845 memset(result, 0, JOURNAL_MAC_SIZE);
846}
847
848static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
849{
850 __u8 result[JOURNAL_MAC_SIZE];
851 unsigned int j;
852
853 if (!ic->journal_mac)
854 return;
855
856 section_mac(ic, section, result);
857
858 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
859 struct journal_sector *js = access_journal(ic, section, j);
860
861 if (likely(wr))
862 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
863 else {
864 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
865 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
866 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
867 }
868 }
869 }
870}
871
872static void complete_journal_op(void *context)
873{
874 struct journal_completion *comp = context;
875
876 BUG_ON(!atomic_read(&comp->in_flight));
877 if (likely(atomic_dec_and_test(&comp->in_flight)))
878 complete(&comp->comp);
879}
880
881static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
882 unsigned int n_sections, struct journal_completion *comp)
883{
884 struct async_submit_ctl submit;
885 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
886 unsigned int pl_index, pl_offset, section_index;
887 struct page_list *source_pl, *target_pl;
888
889 if (likely(encrypt)) {
890 source_pl = ic->journal;
891 target_pl = ic->journal_io;
892 } else {
893 source_pl = ic->journal_io;
894 target_pl = ic->journal;
895 }
896
897 page_list_location(ic, section, 0, &pl_index, &pl_offset);
898
899 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
900
901 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
902
903 section_index = pl_index;
904
905 do {
906 size_t this_step;
907 struct page *src_pages[2];
908 struct page *dst_page;
909
910 while (unlikely(pl_index == section_index)) {
911 unsigned int dummy;
912
913 if (likely(encrypt))
914 rw_section_mac(ic, section, true);
915 section++;
916 n_sections--;
917 if (!n_sections)
918 break;
919 page_list_location(ic, section, 0, §ion_index, &dummy);
920 }
921
922 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
923 dst_page = target_pl[pl_index].page;
924 src_pages[0] = source_pl[pl_index].page;
925 src_pages[1] = ic->journal_xor[pl_index].page;
926
927 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
928
929 pl_index++;
930 pl_offset = 0;
931 n_bytes -= this_step;
932 } while (n_bytes);
933
934 BUG_ON(n_sections);
935
936 async_tx_issue_pending_all();
937}
938
939static void complete_journal_encrypt(void *data, int err)
940{
941 struct journal_completion *comp = data;
942
943 if (unlikely(err)) {
944 if (likely(err == -EINPROGRESS)) {
945 complete(&comp->ic->crypto_backoff);
946 return;
947 }
948 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
949 }
950 complete_journal_op(comp);
951}
952
953static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
954{
955 int r;
956
957 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
958 complete_journal_encrypt, comp);
959 if (likely(encrypt))
960 r = crypto_skcipher_encrypt(req);
961 else
962 r = crypto_skcipher_decrypt(req);
963 if (likely(!r))
964 return false;
965 if (likely(r == -EINPROGRESS))
966 return true;
967 if (likely(r == -EBUSY)) {
968 wait_for_completion(&comp->ic->crypto_backoff);
969 reinit_completion(&comp->ic->crypto_backoff);
970 return true;
971 }
972 dm_integrity_io_error(comp->ic, "encrypt", r);
973 return false;
974}
975
976static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
977 unsigned int n_sections, struct journal_completion *comp)
978{
979 struct scatterlist **source_sg;
980 struct scatterlist **target_sg;
981
982 atomic_add(2, &comp->in_flight);
983
984 if (likely(encrypt)) {
985 source_sg = ic->journal_scatterlist;
986 target_sg = ic->journal_io_scatterlist;
987 } else {
988 source_sg = ic->journal_io_scatterlist;
989 target_sg = ic->journal_scatterlist;
990 }
991
992 do {
993 struct skcipher_request *req;
994 unsigned int ivsize;
995 char *iv;
996
997 if (likely(encrypt))
998 rw_section_mac(ic, section, true);
999
1000 req = ic->sk_requests[section];
1001 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1002 iv = req->iv;
1003
1004 memcpy(iv, iv + ivsize, ivsize);
1005
1006 req->src = source_sg[section];
1007 req->dst = target_sg[section];
1008
1009 if (unlikely(do_crypt(encrypt, req, comp)))
1010 atomic_inc(&comp->in_flight);
1011
1012 section++;
1013 n_sections--;
1014 } while (n_sections);
1015
1016 atomic_dec(&comp->in_flight);
1017 complete_journal_op(comp);
1018}
1019
1020static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
1021 unsigned int n_sections, struct journal_completion *comp)
1022{
1023 if (ic->journal_xor)
1024 return xor_journal(ic, encrypt, section, n_sections, comp);
1025 else
1026 return crypt_journal(ic, encrypt, section, n_sections, comp);
1027}
1028
1029static void complete_journal_io(unsigned long error, void *context)
1030{
1031 struct journal_completion *comp = context;
1032
1033 if (unlikely(error != 0))
1034 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1035 complete_journal_op(comp);
1036}
1037
1038static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
1039 unsigned int sector, unsigned int n_sectors,
1040 struct journal_completion *comp)
1041{
1042 struct dm_io_request io_req;
1043 struct dm_io_region io_loc;
1044 unsigned int pl_index, pl_offset;
1045 int r;
1046
1047 if (unlikely(dm_integrity_failed(ic))) {
1048 if (comp)
1049 complete_journal_io(-1UL, comp);
1050 return;
1051 }
1052
1053 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1054 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1055
1056 io_req.bi_opf = opf;
1057 io_req.mem.type = DM_IO_PAGE_LIST;
1058 if (ic->journal_io)
1059 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1060 else
1061 io_req.mem.ptr.pl = &ic->journal[pl_index];
1062 io_req.mem.offset = pl_offset;
1063 if (likely(comp != NULL)) {
1064 io_req.notify.fn = complete_journal_io;
1065 io_req.notify.context = comp;
1066 } else {
1067 io_req.notify.fn = NULL;
1068 }
1069 io_req.client = ic->io;
1070 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1071 io_loc.sector = ic->start + SB_SECTORS + sector;
1072 io_loc.count = n_sectors;
1073
1074 r = dm_io(&io_req, 1, &io_loc, NULL);
1075 if (unlikely(r)) {
1076 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
1077 "reading journal" : "writing journal", r);
1078 if (comp) {
1079 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1080 complete_journal_io(-1UL, comp);
1081 }
1082 }
1083}
1084
1085static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
1086 unsigned int section, unsigned int n_sections,
1087 struct journal_completion *comp)
1088{
1089 unsigned int sector, n_sectors;
1090
1091 sector = section * ic->journal_section_sectors;
1092 n_sectors = n_sections * ic->journal_section_sectors;
1093
1094 rw_journal_sectors(ic, opf, sector, n_sectors, comp);
1095}
1096
1097static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
1098{
1099 struct journal_completion io_comp;
1100 struct journal_completion crypt_comp_1;
1101 struct journal_completion crypt_comp_2;
1102 unsigned int i;
1103
1104 io_comp.ic = ic;
1105 init_completion(&io_comp.comp);
1106
1107 if (commit_start + commit_sections <= ic->journal_sections) {
1108 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1109 if (ic->journal_io) {
1110 crypt_comp_1.ic = ic;
1111 init_completion(&crypt_comp_1.comp);
1112 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1113 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1114 wait_for_completion_io(&crypt_comp_1.comp);
1115 } else {
1116 for (i = 0; i < commit_sections; i++)
1117 rw_section_mac(ic, commit_start + i, true);
1118 }
1119 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
1120 commit_sections, &io_comp);
1121 } else {
1122 unsigned int to_end;
1123
1124 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1125 to_end = ic->journal_sections - commit_start;
1126 if (ic->journal_io) {
1127 crypt_comp_1.ic = ic;
1128 init_completion(&crypt_comp_1.comp);
1129 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1130 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1131 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1132 rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
1133 commit_start, to_end, &io_comp);
1134 reinit_completion(&crypt_comp_1.comp);
1135 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1136 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1137 wait_for_completion_io(&crypt_comp_1.comp);
1138 } else {
1139 crypt_comp_2.ic = ic;
1140 init_completion(&crypt_comp_2.comp);
1141 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1142 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1143 wait_for_completion_io(&crypt_comp_1.comp);
1144 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1145 wait_for_completion_io(&crypt_comp_2.comp);
1146 }
1147 } else {
1148 for (i = 0; i < to_end; i++)
1149 rw_section_mac(ic, commit_start + i, true);
1150 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1151 for (i = 0; i < commit_sections - to_end; i++)
1152 rw_section_mac(ic, i, true);
1153 }
1154 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
1155 }
1156
1157 wait_for_completion_io(&io_comp.comp);
1158}
1159
1160static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
1161 unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
1162{
1163 struct dm_io_request io_req;
1164 struct dm_io_region io_loc;
1165 int r;
1166 unsigned int sector, pl_index, pl_offset;
1167
1168 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
1169
1170 if (unlikely(dm_integrity_failed(ic))) {
1171 fn(-1UL, data);
1172 return;
1173 }
1174
1175 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1176
1177 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1178 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1179
1180 io_req.bi_opf = REQ_OP_WRITE;
1181 io_req.mem.type = DM_IO_PAGE_LIST;
1182 io_req.mem.ptr.pl = &ic->journal[pl_index];
1183 io_req.mem.offset = pl_offset;
1184 io_req.notify.fn = fn;
1185 io_req.notify.context = data;
1186 io_req.client = ic->io;
1187 io_loc.bdev = ic->dev->bdev;
1188 io_loc.sector = target;
1189 io_loc.count = n_sectors;
1190
1191 r = dm_io(&io_req, 1, &io_loc, NULL);
1192 if (unlikely(r)) {
1193 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1194 fn(-1UL, data);
1195 }
1196}
1197
1198static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1199{
1200 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1201 range1->logical_sector + range1->n_sectors > range2->logical_sector;
1202}
1203
1204static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1205{
1206 struct rb_node **n = &ic->in_progress.rb_node;
1207 struct rb_node *parent;
1208
1209 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
1210
1211 if (likely(check_waiting)) {
1212 struct dm_integrity_range *range;
1213
1214 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1215 if (unlikely(ranges_overlap(range, new_range)))
1216 return false;
1217 }
1218 }
1219
1220 parent = NULL;
1221
1222 while (*n) {
1223 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1224
1225 parent = *n;
1226 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
1227 n = &range->node.rb_left;
1228 else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
1229 n = &range->node.rb_right;
1230 else
1231 return false;
1232 }
1233
1234 rb_link_node(&new_range->node, parent, n);
1235 rb_insert_color(&new_range->node, &ic->in_progress);
1236
1237 return true;
1238}
1239
1240static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1241{
1242 rb_erase(&range->node, &ic->in_progress);
1243 while (unlikely(!list_empty(&ic->wait_list))) {
1244 struct dm_integrity_range *last_range =
1245 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1246 struct task_struct *last_range_task;
1247
1248 last_range_task = last_range->task;
1249 list_del(&last_range->wait_entry);
1250 if (!add_new_range(ic, last_range, false)) {
1251 last_range->task = last_range_task;
1252 list_add(&last_range->wait_entry, &ic->wait_list);
1253 break;
1254 }
1255 last_range->waiting = false;
1256 wake_up_process(last_range_task);
1257 }
1258}
1259
1260static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1261{
1262 unsigned long flags;
1263
1264 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1265 remove_range_unlocked(ic, range);
1266 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1267}
1268
1269static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1270{
1271 new_range->waiting = true;
1272 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1273 new_range->task = current;
1274 do {
1275 __set_current_state(TASK_UNINTERRUPTIBLE);
1276 spin_unlock_irq(&ic->endio_wait.lock);
1277 io_schedule();
1278 spin_lock_irq(&ic->endio_wait.lock);
1279 } while (unlikely(new_range->waiting));
1280}
1281
1282static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1283{
1284 if (unlikely(!add_new_range(ic, new_range, true)))
1285 wait_and_add_new_range(ic, new_range);
1286}
1287
1288static void init_journal_node(struct journal_node *node)
1289{
1290 RB_CLEAR_NODE(&node->node);
1291 node->sector = (sector_t)-1;
1292}
1293
1294static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1295{
1296 struct rb_node **link;
1297 struct rb_node *parent;
1298
1299 node->sector = sector;
1300 BUG_ON(!RB_EMPTY_NODE(&node->node));
1301
1302 link = &ic->journal_tree_root.rb_node;
1303 parent = NULL;
1304
1305 while (*link) {
1306 struct journal_node *j;
1307
1308 parent = *link;
1309 j = container_of(parent, struct journal_node, node);
1310 if (sector < j->sector)
1311 link = &j->node.rb_left;
1312 else
1313 link = &j->node.rb_right;
1314 }
1315
1316 rb_link_node(&node->node, parent, link);
1317 rb_insert_color(&node->node, &ic->journal_tree_root);
1318}
1319
1320static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1321{
1322 BUG_ON(RB_EMPTY_NODE(&node->node));
1323 rb_erase(&node->node, &ic->journal_tree_root);
1324 init_journal_node(node);
1325}
1326
1327#define NOT_FOUND (-1U)
1328
1329static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1330{
1331 struct rb_node *n = ic->journal_tree_root.rb_node;
1332 unsigned int found = NOT_FOUND;
1333
1334 *next_sector = (sector_t)-1;
1335 while (n) {
1336 struct journal_node *j = container_of(n, struct journal_node, node);
1337
1338 if (sector == j->sector)
1339 found = j - ic->journal_tree;
1340
1341 if (sector < j->sector) {
1342 *next_sector = j->sector;
1343 n = j->node.rb_left;
1344 } else
1345 n = j->node.rb_right;
1346 }
1347
1348 return found;
1349}
1350
1351static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
1352{
1353 struct journal_node *node, *next_node;
1354 struct rb_node *next;
1355
1356 if (unlikely(pos >= ic->journal_entries))
1357 return false;
1358 node = &ic->journal_tree[pos];
1359 if (unlikely(RB_EMPTY_NODE(&node->node)))
1360 return false;
1361 if (unlikely(node->sector != sector))
1362 return false;
1363
1364 next = rb_next(&node->node);
1365 if (unlikely(!next))
1366 return true;
1367
1368 next_node = container_of(next, struct journal_node, node);
1369 return next_node->sector != sector;
1370}
1371
1372static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1373{
1374 struct rb_node *next;
1375 struct journal_node *next_node;
1376 unsigned int next_section;
1377
1378 BUG_ON(RB_EMPTY_NODE(&node->node));
1379
1380 next = rb_next(&node->node);
1381 if (unlikely(!next))
1382 return false;
1383
1384 next_node = container_of(next, struct journal_node, node);
1385
1386 if (next_node->sector != node->sector)
1387 return false;
1388
1389 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
1390 if (next_section >= ic->committed_section &&
1391 next_section < ic->committed_section + ic->n_committed_sections)
1392 return true;
1393 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1394 return true;
1395
1396 return false;
1397}
1398
1399#define TAG_READ 0
1400#define TAG_WRITE 1
1401#define TAG_CMP 2
1402
1403static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1404 unsigned int *metadata_offset, unsigned int total_size, int op)
1405{
1406#define MAY_BE_FILLER 1
1407#define MAY_BE_HASH 2
1408 unsigned int hash_offset = 0;
1409 unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1410
1411 do {
1412 unsigned char *data, *dp;
1413 struct dm_buffer *b;
1414 unsigned int to_copy;
1415 int r;
1416
1417 r = dm_integrity_failed(ic);
1418 if (unlikely(r))
1419 return r;
1420
1421 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1422 if (IS_ERR(data))
1423 return PTR_ERR(data);
1424
1425 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1426 dp = data + *metadata_offset;
1427 if (op == TAG_READ) {
1428 memcpy(tag, dp, to_copy);
1429 } else if (op == TAG_WRITE) {
1430 if (memcmp(dp, tag, to_copy)) {
1431 memcpy(dp, tag, to_copy);
1432 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1433 }
1434 } else {
1435 /* e.g.: op == TAG_CMP */
1436
1437 if (likely(is_power_of_2(ic->tag_size))) {
1438 if (unlikely(memcmp(dp, tag, to_copy)))
1439 if (unlikely(!ic->discard) ||
1440 unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1441 goto thorough_test;
1442 }
1443 } else {
1444 unsigned int i, ts;
1445thorough_test:
1446 ts = total_size;
1447
1448 for (i = 0; i < to_copy; i++, ts--) {
1449 if (unlikely(dp[i] != tag[i]))
1450 may_be &= ~MAY_BE_HASH;
1451 if (likely(dp[i] != DISCARD_FILLER))
1452 may_be &= ~MAY_BE_FILLER;
1453 hash_offset++;
1454 if (unlikely(hash_offset == ic->tag_size)) {
1455 if (unlikely(!may_be)) {
1456 dm_bufio_release(b);
1457 return ts;
1458 }
1459 hash_offset = 0;
1460 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1461 }
1462 }
1463 }
1464 }
1465 dm_bufio_release(b);
1466
1467 tag += to_copy;
1468 *metadata_offset += to_copy;
1469 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1470 (*metadata_block)++;
1471 *metadata_offset = 0;
1472 }
1473
1474 if (unlikely(!is_power_of_2(ic->tag_size)))
1475 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1476
1477 total_size -= to_copy;
1478 } while (unlikely(total_size));
1479
1480 return 0;
1481#undef MAY_BE_FILLER
1482#undef MAY_BE_HASH
1483}
1484
1485struct flush_request {
1486 struct dm_io_request io_req;
1487 struct dm_io_region io_reg;
1488 struct dm_integrity_c *ic;
1489 struct completion comp;
1490};
1491
1492static void flush_notify(unsigned long error, void *fr_)
1493{
1494 struct flush_request *fr = fr_;
1495
1496 if (unlikely(error != 0))
1497 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1498 complete(&fr->comp);
1499}
1500
1501static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1502{
1503 int r;
1504 struct flush_request fr;
1505
1506 if (!ic->meta_dev)
1507 flush_data = false;
1508 if (flush_data) {
1509 fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1510 fr.io_req.mem.type = DM_IO_KMEM,
1511 fr.io_req.mem.ptr.addr = NULL,
1512 fr.io_req.notify.fn = flush_notify,
1513 fr.io_req.notify.context = &fr;
1514 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1515 fr.io_reg.bdev = ic->dev->bdev,
1516 fr.io_reg.sector = 0,
1517 fr.io_reg.count = 0,
1518 fr.ic = ic;
1519 init_completion(&fr.comp);
1520 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1521 BUG_ON(r);
1522 }
1523
1524 r = dm_bufio_write_dirty_buffers(ic->bufio);
1525 if (unlikely(r))
1526 dm_integrity_io_error(ic, "writing tags", r);
1527
1528 if (flush_data)
1529 wait_for_completion(&fr.comp);
1530}
1531
1532static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1533{
1534 DECLARE_WAITQUEUE(wait, current);
1535
1536 __add_wait_queue(&ic->endio_wait, &wait);
1537 __set_current_state(TASK_UNINTERRUPTIBLE);
1538 spin_unlock_irq(&ic->endio_wait.lock);
1539 io_schedule();
1540 spin_lock_irq(&ic->endio_wait.lock);
1541 __remove_wait_queue(&ic->endio_wait, &wait);
1542}
1543
1544static void autocommit_fn(struct timer_list *t)
1545{
1546 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1547
1548 if (likely(!dm_integrity_failed(ic)))
1549 queue_work(ic->commit_wq, &ic->commit_work);
1550}
1551
1552static void schedule_autocommit(struct dm_integrity_c *ic)
1553{
1554 if (!timer_pending(&ic->autocommit_timer))
1555 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1556}
1557
1558static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1559{
1560 struct bio *bio;
1561 unsigned long flags;
1562
1563 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1564 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1565 bio_list_add(&ic->flush_bio_list, bio);
1566 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1567
1568 queue_work(ic->commit_wq, &ic->commit_work);
1569}
1570
1571static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1572{
1573 int r;
1574
1575 r = dm_integrity_failed(ic);
1576 if (unlikely(r) && !bio->bi_status)
1577 bio->bi_status = errno_to_blk_status(r);
1578 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1579 unsigned long flags;
1580
1581 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1582 bio_list_add(&ic->synchronous_bios, bio);
1583 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1584 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1585 return;
1586 }
1587 bio_endio(bio);
1588}
1589
1590static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1591{
1592 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1593
1594 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1595 submit_flush_bio(ic, dio);
1596 else
1597 do_endio(ic, bio);
1598}
1599
1600static void dec_in_flight(struct dm_integrity_io *dio)
1601{
1602 if (atomic_dec_and_test(&dio->in_flight)) {
1603 struct dm_integrity_c *ic = dio->ic;
1604 struct bio *bio;
1605
1606 remove_range(ic, &dio->range);
1607
1608 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1609 schedule_autocommit(ic);
1610
1611 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1612 if (unlikely(dio->bi_status) && !bio->bi_status)
1613 bio->bi_status = dio->bi_status;
1614 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1615 dio->range.logical_sector += dio->range.n_sectors;
1616 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1617 INIT_WORK(&dio->work, integrity_bio_wait);
1618 queue_work(ic->offload_wq, &dio->work);
1619 return;
1620 }
1621 do_endio_flush(ic, dio);
1622 }
1623}
1624
1625static void integrity_end_io(struct bio *bio)
1626{
1627 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1628
1629 dm_bio_restore(&dio->bio_details, bio);
1630 if (bio->bi_integrity)
1631 bio->bi_opf |= REQ_INTEGRITY;
1632
1633 if (dio->completion)
1634 complete(dio->completion);
1635
1636 dec_in_flight(dio);
1637}
1638
1639static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1640 const char *data, char *result)
1641{
1642 __le64 sector_le = cpu_to_le64(sector);
1643 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1644 int r;
1645 unsigned int digest_size;
1646
1647 req->tfm = ic->internal_hash;
1648
1649 r = crypto_shash_init(req);
1650 if (unlikely(r < 0)) {
1651 dm_integrity_io_error(ic, "crypto_shash_init", r);
1652 goto failed;
1653 }
1654
1655 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1656 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1657 if (unlikely(r < 0)) {
1658 dm_integrity_io_error(ic, "crypto_shash_update", r);
1659 goto failed;
1660 }
1661 }
1662
1663 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof(sector_le));
1664 if (unlikely(r < 0)) {
1665 dm_integrity_io_error(ic, "crypto_shash_update", r);
1666 goto failed;
1667 }
1668
1669 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1670 if (unlikely(r < 0)) {
1671 dm_integrity_io_error(ic, "crypto_shash_update", r);
1672 goto failed;
1673 }
1674
1675 r = crypto_shash_final(req, result);
1676 if (unlikely(r < 0)) {
1677 dm_integrity_io_error(ic, "crypto_shash_final", r);
1678 goto failed;
1679 }
1680
1681 digest_size = crypto_shash_digestsize(ic->internal_hash);
1682 if (unlikely(digest_size < ic->tag_size))
1683 memset(result + digest_size, 0, ic->tag_size - digest_size);
1684
1685 return;
1686
1687failed:
1688 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1689 get_random_bytes(result, ic->tag_size);
1690}
1691
1692static void integrity_metadata(struct work_struct *w)
1693{
1694 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1695 struct dm_integrity_c *ic = dio->ic;
1696
1697 int r;
1698
1699 if (ic->internal_hash) {
1700 struct bvec_iter iter;
1701 struct bio_vec bv;
1702 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
1703 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1704 char *checksums;
1705 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1706 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1707 sector_t sector;
1708 unsigned int sectors_to_process;
1709
1710 if (unlikely(ic->mode == 'R'))
1711 goto skip_io;
1712
1713 if (likely(dio->op != REQ_OP_DISCARD))
1714 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1715 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1716 else
1717 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1718 if (!checksums) {
1719 checksums = checksums_onstack;
1720 if (WARN_ON(extra_space &&
1721 digest_size > sizeof(checksums_onstack))) {
1722 r = -EINVAL;
1723 goto error;
1724 }
1725 }
1726
1727 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1728 unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
1729 unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1730 unsigned int max_blocks = max_size / ic->tag_size;
1731
1732 memset(checksums, DISCARD_FILLER, max_size);
1733
1734 while (bi_size) {
1735 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1736
1737 this_step_blocks = min(this_step_blocks, max_blocks);
1738 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1739 this_step_blocks * ic->tag_size, TAG_WRITE);
1740 if (unlikely(r)) {
1741 if (likely(checksums != checksums_onstack))
1742 kfree(checksums);
1743 goto error;
1744 }
1745
1746 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1747 }
1748
1749 if (likely(checksums != checksums_onstack))
1750 kfree(checksums);
1751 goto skip_io;
1752 }
1753
1754 sector = dio->range.logical_sector;
1755 sectors_to_process = dio->range.n_sectors;
1756
1757 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1758 struct bio_vec bv_copy = bv;
1759 unsigned int pos;
1760 char *mem, *checksums_ptr;
1761
1762again:
1763 mem = bvec_kmap_local(&bv_copy);
1764 pos = 0;
1765 checksums_ptr = checksums;
1766 do {
1767 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1768 checksums_ptr += ic->tag_size;
1769 sectors_to_process -= ic->sectors_per_block;
1770 pos += ic->sectors_per_block << SECTOR_SHIFT;
1771 sector += ic->sectors_per_block;
1772 } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
1773 kunmap_local(mem);
1774
1775 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1776 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1777 if (unlikely(r)) {
1778 if (r > 0) {
1779 sector_t s;
1780
1781 s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
1782 DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
1783 bio->bi_bdev, s);
1784 r = -EILSEQ;
1785 atomic64_inc(&ic->number_of_mismatches);
1786 dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
1787 bio, s, 0);
1788 }
1789 if (likely(checksums != checksums_onstack))
1790 kfree(checksums);
1791 goto error;
1792 }
1793
1794 if (!sectors_to_process)
1795 break;
1796
1797 if (unlikely(pos < bv_copy.bv_len)) {
1798 bv_copy.bv_offset += pos;
1799 bv_copy.bv_len -= pos;
1800 goto again;
1801 }
1802 }
1803
1804 if (likely(checksums != checksums_onstack))
1805 kfree(checksums);
1806 } else {
1807 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1808
1809 if (bip) {
1810 struct bio_vec biv;
1811 struct bvec_iter iter;
1812 unsigned int data_to_process = dio->range.n_sectors;
1813
1814 sector_to_block(ic, data_to_process);
1815 data_to_process *= ic->tag_size;
1816
1817 bip_for_each_vec(biv, bip, iter) {
1818 unsigned char *tag;
1819 unsigned int this_len;
1820
1821 BUG_ON(PageHighMem(biv.bv_page));
1822 tag = bvec_virt(&biv);
1823 this_len = min(biv.bv_len, data_to_process);
1824 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1825 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1826 if (unlikely(r))
1827 goto error;
1828 data_to_process -= this_len;
1829 if (!data_to_process)
1830 break;
1831 }
1832 }
1833 }
1834skip_io:
1835 dec_in_flight(dio);
1836 return;
1837error:
1838 dio->bi_status = errno_to_blk_status(r);
1839 dec_in_flight(dio);
1840}
1841
1842static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1843{
1844 struct dm_integrity_c *ic = ti->private;
1845 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1846 struct bio_integrity_payload *bip;
1847
1848 sector_t area, offset;
1849
1850 dio->ic = ic;
1851 dio->bi_status = 0;
1852 dio->op = bio_op(bio);
1853
1854 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1855 if (ti->max_io_len) {
1856 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1857 unsigned int log2_max_io_len = __fls(ti->max_io_len);
1858 sector_t start_boundary = sec >> log2_max_io_len;
1859 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1860
1861 if (start_boundary < end_boundary) {
1862 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1863
1864 dm_accept_partial_bio(bio, len);
1865 }
1866 }
1867 }
1868
1869 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1870 submit_flush_bio(ic, dio);
1871 return DM_MAPIO_SUBMITTED;
1872 }
1873
1874 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1875 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1876 if (unlikely(dio->fua)) {
1877 /*
1878 * Don't pass down the FUA flag because we have to flush
1879 * disk cache anyway.
1880 */
1881 bio->bi_opf &= ~REQ_FUA;
1882 }
1883 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1884 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1885 dio->range.logical_sector, bio_sectors(bio),
1886 ic->provided_data_sectors);
1887 return DM_MAPIO_KILL;
1888 }
1889 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
1890 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1891 ic->sectors_per_block,
1892 dio->range.logical_sector, bio_sectors(bio));
1893 return DM_MAPIO_KILL;
1894 }
1895
1896 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1897 struct bvec_iter iter;
1898 struct bio_vec bv;
1899
1900 bio_for_each_segment(bv, bio, iter) {
1901 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1902 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1903 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1904 return DM_MAPIO_KILL;
1905 }
1906 }
1907 }
1908
1909 bip = bio_integrity(bio);
1910 if (!ic->internal_hash) {
1911 if (bip) {
1912 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1913
1914 if (ic->log2_tag_size >= 0)
1915 wanted_tag_size <<= ic->log2_tag_size;
1916 else
1917 wanted_tag_size *= ic->tag_size;
1918 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1919 DMERR("Invalid integrity data size %u, expected %u",
1920 bip->bip_iter.bi_size, wanted_tag_size);
1921 return DM_MAPIO_KILL;
1922 }
1923 }
1924 } else {
1925 if (unlikely(bip != NULL)) {
1926 DMERR("Unexpected integrity data when using internal hash");
1927 return DM_MAPIO_KILL;
1928 }
1929 }
1930
1931 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1932 return DM_MAPIO_KILL;
1933
1934 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1935 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1936 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1937
1938 dm_integrity_map_continue(dio, true);
1939 return DM_MAPIO_SUBMITTED;
1940}
1941
1942static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1943 unsigned int journal_section, unsigned int journal_entry)
1944{
1945 struct dm_integrity_c *ic = dio->ic;
1946 sector_t logical_sector;
1947 unsigned int n_sectors;
1948
1949 logical_sector = dio->range.logical_sector;
1950 n_sectors = dio->range.n_sectors;
1951 do {
1952 struct bio_vec bv = bio_iovec(bio);
1953 char *mem;
1954
1955 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1956 bv.bv_len = n_sectors << SECTOR_SHIFT;
1957 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1958 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1959retry_kmap:
1960 mem = kmap_local_page(bv.bv_page);
1961 if (likely(dio->op == REQ_OP_WRITE))
1962 flush_dcache_page(bv.bv_page);
1963
1964 do {
1965 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1966
1967 if (unlikely(dio->op == REQ_OP_READ)) {
1968 struct journal_sector *js;
1969 char *mem_ptr;
1970 unsigned int s;
1971
1972 if (unlikely(journal_entry_is_inprogress(je))) {
1973 flush_dcache_page(bv.bv_page);
1974 kunmap_local(mem);
1975
1976 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1977 goto retry_kmap;
1978 }
1979 smp_rmb();
1980 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1981 js = access_journal_data(ic, journal_section, journal_entry);
1982 mem_ptr = mem + bv.bv_offset;
1983 s = 0;
1984 do {
1985 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1986 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1987 js++;
1988 mem_ptr += 1 << SECTOR_SHIFT;
1989 } while (++s < ic->sectors_per_block);
1990#ifdef INTERNAL_VERIFY
1991 if (ic->internal_hash) {
1992 char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1993
1994 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1995 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1996 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1997 logical_sector);
1998 dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
1999 bio, logical_sector, 0);
2000 }
2001 }
2002#endif
2003 }
2004
2005 if (!ic->internal_hash) {
2006 struct bio_integrity_payload *bip = bio_integrity(bio);
2007 unsigned int tag_todo = ic->tag_size;
2008 char *tag_ptr = journal_entry_tag(ic, je);
2009
2010 if (bip) {
2011 do {
2012 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2013 unsigned int tag_now = min(biv.bv_len, tag_todo);
2014 char *tag_addr;
2015
2016 BUG_ON(PageHighMem(biv.bv_page));
2017 tag_addr = bvec_virt(&biv);
2018 if (likely(dio->op == REQ_OP_WRITE))
2019 memcpy(tag_ptr, tag_addr, tag_now);
2020 else
2021 memcpy(tag_addr, tag_ptr, tag_now);
2022 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2023 tag_ptr += tag_now;
2024 tag_todo -= tag_now;
2025 } while (unlikely(tag_todo));
2026 } else if (likely(dio->op == REQ_OP_WRITE))
2027 memset(tag_ptr, 0, tag_todo);
2028 }
2029
2030 if (likely(dio->op == REQ_OP_WRITE)) {
2031 struct journal_sector *js;
2032 unsigned int s;
2033
2034 js = access_journal_data(ic, journal_section, journal_entry);
2035 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2036
2037 s = 0;
2038 do {
2039 je->last_bytes[s] = js[s].commit_id;
2040 } while (++s < ic->sectors_per_block);
2041
2042 if (ic->internal_hash) {
2043 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
2044
2045 if (unlikely(digest_size > ic->tag_size)) {
2046 char checksums_onstack[HASH_MAX_DIGESTSIZE];
2047
2048 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2049 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2050 } else
2051 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2052 }
2053
2054 journal_entry_set_sector(je, logical_sector);
2055 }
2056 logical_sector += ic->sectors_per_block;
2057
2058 journal_entry++;
2059 if (unlikely(journal_entry == ic->journal_section_entries)) {
2060 journal_entry = 0;
2061 journal_section++;
2062 wraparound_section(ic, &journal_section);
2063 }
2064
2065 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2066 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2067
2068 if (unlikely(dio->op == REQ_OP_READ))
2069 flush_dcache_page(bv.bv_page);
2070 kunmap_local(mem);
2071 } while (n_sectors);
2072
2073 if (likely(dio->op == REQ_OP_WRITE)) {
2074 smp_mb();
2075 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2076 wake_up(&ic->copy_to_journal_wait);
2077 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2078 queue_work(ic->commit_wq, &ic->commit_work);
2079 else
2080 schedule_autocommit(ic);
2081 } else
2082 remove_range(ic, &dio->range);
2083
2084 if (unlikely(bio->bi_iter.bi_size)) {
2085 sector_t area, offset;
2086
2087 dio->range.logical_sector = logical_sector;
2088 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2089 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2090 return true;
2091 }
2092
2093 return false;
2094}
2095
2096static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2097{
2098 struct dm_integrity_c *ic = dio->ic;
2099 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2100 unsigned int journal_section, journal_entry;
2101 unsigned int journal_read_pos;
2102 struct completion read_comp;
2103 bool discard_retried = false;
2104 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2105
2106 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2107 need_sync_io = true;
2108
2109 if (need_sync_io && from_map) {
2110 INIT_WORK(&dio->work, integrity_bio_wait);
2111 queue_work(ic->offload_wq, &dio->work);
2112 return;
2113 }
2114
2115lock_retry:
2116 spin_lock_irq(&ic->endio_wait.lock);
2117retry:
2118 if (unlikely(dm_integrity_failed(ic))) {
2119 spin_unlock_irq(&ic->endio_wait.lock);
2120 do_endio(ic, bio);
2121 return;
2122 }
2123 dio->range.n_sectors = bio_sectors(bio);
2124 journal_read_pos = NOT_FOUND;
2125 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2126 if (dio->op == REQ_OP_WRITE) {
2127 unsigned int next_entry, i, pos;
2128 unsigned int ws, we, range_sectors;
2129
2130 dio->range.n_sectors = min(dio->range.n_sectors,
2131 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2132 if (unlikely(!dio->range.n_sectors)) {
2133 if (from_map)
2134 goto offload_to_thread;
2135 sleep_on_endio_wait(ic);
2136 goto retry;
2137 }
2138 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2139 ic->free_sectors -= range_sectors;
2140 journal_section = ic->free_section;
2141 journal_entry = ic->free_section_entry;
2142
2143 next_entry = ic->free_section_entry + range_sectors;
2144 ic->free_section_entry = next_entry % ic->journal_section_entries;
2145 ic->free_section += next_entry / ic->journal_section_entries;
2146 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2147 wraparound_section(ic, &ic->free_section);
2148
2149 pos = journal_section * ic->journal_section_entries + journal_entry;
2150 ws = journal_section;
2151 we = journal_entry;
2152 i = 0;
2153 do {
2154 struct journal_entry *je;
2155
2156 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2157 pos++;
2158 if (unlikely(pos >= ic->journal_entries))
2159 pos = 0;
2160
2161 je = access_journal_entry(ic, ws, we);
2162 BUG_ON(!journal_entry_is_unused(je));
2163 journal_entry_set_inprogress(je);
2164 we++;
2165 if (unlikely(we == ic->journal_section_entries)) {
2166 we = 0;
2167 ws++;
2168 wraparound_section(ic, &ws);
2169 }
2170 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2171
2172 spin_unlock_irq(&ic->endio_wait.lock);
2173 goto journal_read_write;
2174 } else {
2175 sector_t next_sector;
2176
2177 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2178 if (likely(journal_read_pos == NOT_FOUND)) {
2179 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2180 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2181 } else {
2182 unsigned int i;
2183 unsigned int jp = journal_read_pos + 1;
2184
2185 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2186 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2187 break;
2188 }
2189 dio->range.n_sectors = i;
2190 }
2191 }
2192 }
2193 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2194 /*
2195 * We must not sleep in the request routine because it could
2196 * stall bios on current->bio_list.
2197 * So, we offload the bio to a workqueue if we have to sleep.
2198 */
2199 if (from_map) {
2200offload_to_thread:
2201 spin_unlock_irq(&ic->endio_wait.lock);
2202 INIT_WORK(&dio->work, integrity_bio_wait);
2203 queue_work(ic->wait_wq, &dio->work);
2204 return;
2205 }
2206 if (journal_read_pos != NOT_FOUND)
2207 dio->range.n_sectors = ic->sectors_per_block;
2208 wait_and_add_new_range(ic, &dio->range);
2209 /*
2210 * wait_and_add_new_range drops the spinlock, so the journal
2211 * may have been changed arbitrarily. We need to recheck.
2212 * To simplify the code, we restrict I/O size to just one block.
2213 */
2214 if (journal_read_pos != NOT_FOUND) {
2215 sector_t next_sector;
2216 unsigned int new_pos;
2217
2218 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2219 if (unlikely(new_pos != journal_read_pos)) {
2220 remove_range_unlocked(ic, &dio->range);
2221 goto retry;
2222 }
2223 }
2224 }
2225 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2226 sector_t next_sector;
2227 unsigned int new_pos;
2228
2229 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2230 if (unlikely(new_pos != NOT_FOUND) ||
2231 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2232 remove_range_unlocked(ic, &dio->range);
2233 spin_unlock_irq(&ic->endio_wait.lock);
2234 queue_work(ic->commit_wq, &ic->commit_work);
2235 flush_workqueue(ic->commit_wq);
2236 queue_work(ic->writer_wq, &ic->writer_work);
2237 flush_workqueue(ic->writer_wq);
2238 discard_retried = true;
2239 goto lock_retry;
2240 }
2241 }
2242 spin_unlock_irq(&ic->endio_wait.lock);
2243
2244 if (unlikely(journal_read_pos != NOT_FOUND)) {
2245 journal_section = journal_read_pos / ic->journal_section_entries;
2246 journal_entry = journal_read_pos % ic->journal_section_entries;
2247 goto journal_read_write;
2248 }
2249
2250 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2251 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2252 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2253 struct bitmap_block_status *bbs;
2254
2255 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2256 spin_lock(&bbs->bio_queue_lock);
2257 bio_list_add(&bbs->bio_queue, bio);
2258 spin_unlock(&bbs->bio_queue_lock);
2259 queue_work(ic->writer_wq, &bbs->work);
2260 return;
2261 }
2262 }
2263
2264 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2265
2266 if (need_sync_io) {
2267 init_completion(&read_comp);
2268 dio->completion = &read_comp;
2269 } else
2270 dio->completion = NULL;
2271
2272 dm_bio_record(&dio->bio_details, bio);
2273 bio_set_dev(bio, ic->dev->bdev);
2274 bio->bi_integrity = NULL;
2275 bio->bi_opf &= ~REQ_INTEGRITY;
2276 bio->bi_end_io = integrity_end_io;
2277 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2278
2279 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2280 integrity_metadata(&dio->work);
2281 dm_integrity_flush_buffers(ic, false);
2282
2283 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2284 dio->completion = NULL;
2285
2286 submit_bio_noacct(bio);
2287
2288 return;
2289 }
2290
2291 submit_bio_noacct(bio);
2292
2293 if (need_sync_io) {
2294 wait_for_completion_io(&read_comp);
2295 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2296 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2297 goto skip_check;
2298 if (ic->mode == 'B') {
2299 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2300 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2301 goto skip_check;
2302 }
2303
2304 if (likely(!bio->bi_status))
2305 integrity_metadata(&dio->work);
2306 else
2307skip_check:
2308 dec_in_flight(dio);
2309 } else {
2310 INIT_WORK(&dio->work, integrity_metadata);
2311 queue_work(ic->metadata_wq, &dio->work);
2312 }
2313
2314 return;
2315
2316journal_read_write:
2317 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2318 goto lock_retry;
2319
2320 do_endio_flush(ic, dio);
2321}
2322
2323
2324static void integrity_bio_wait(struct work_struct *w)
2325{
2326 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2327
2328 dm_integrity_map_continue(dio, false);
2329}
2330
2331static void pad_uncommitted(struct dm_integrity_c *ic)
2332{
2333 if (ic->free_section_entry) {
2334 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2335 ic->free_section_entry = 0;
2336 ic->free_section++;
2337 wraparound_section(ic, &ic->free_section);
2338 ic->n_uncommitted_sections++;
2339 }
2340 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2341 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2342 ic->journal_section_entries + ic->free_sectors)) {
2343 DMCRIT("journal_sections %u, journal_section_entries %u, "
2344 "n_uncommitted_sections %u, n_committed_sections %u, "
2345 "journal_section_entries %u, free_sectors %u",
2346 ic->journal_sections, ic->journal_section_entries,
2347 ic->n_uncommitted_sections, ic->n_committed_sections,
2348 ic->journal_section_entries, ic->free_sectors);
2349 }
2350}
2351
2352static void integrity_commit(struct work_struct *w)
2353{
2354 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2355 unsigned int commit_start, commit_sections;
2356 unsigned int i, j, n;
2357 struct bio *flushes;
2358
2359 del_timer(&ic->autocommit_timer);
2360
2361 spin_lock_irq(&ic->endio_wait.lock);
2362 flushes = bio_list_get(&ic->flush_bio_list);
2363 if (unlikely(ic->mode != 'J')) {
2364 spin_unlock_irq(&ic->endio_wait.lock);
2365 dm_integrity_flush_buffers(ic, true);
2366 goto release_flush_bios;
2367 }
2368
2369 pad_uncommitted(ic);
2370 commit_start = ic->uncommitted_section;
2371 commit_sections = ic->n_uncommitted_sections;
2372 spin_unlock_irq(&ic->endio_wait.lock);
2373
2374 if (!commit_sections)
2375 goto release_flush_bios;
2376
2377 ic->wrote_to_journal = true;
2378
2379 i = commit_start;
2380 for (n = 0; n < commit_sections; n++) {
2381 for (j = 0; j < ic->journal_section_entries; j++) {
2382 struct journal_entry *je;
2383
2384 je = access_journal_entry(ic, i, j);
2385 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2386 }
2387 for (j = 0; j < ic->journal_section_sectors; j++) {
2388 struct journal_sector *js;
2389
2390 js = access_journal(ic, i, j);
2391 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2392 }
2393 i++;
2394 if (unlikely(i >= ic->journal_sections))
2395 ic->commit_seq = next_commit_seq(ic->commit_seq);
2396 wraparound_section(ic, &i);
2397 }
2398 smp_rmb();
2399
2400 write_journal(ic, commit_start, commit_sections);
2401
2402 spin_lock_irq(&ic->endio_wait.lock);
2403 ic->uncommitted_section += commit_sections;
2404 wraparound_section(ic, &ic->uncommitted_section);
2405 ic->n_uncommitted_sections -= commit_sections;
2406 ic->n_committed_sections += commit_sections;
2407 spin_unlock_irq(&ic->endio_wait.lock);
2408
2409 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2410 queue_work(ic->writer_wq, &ic->writer_work);
2411
2412release_flush_bios:
2413 while (flushes) {
2414 struct bio *next = flushes->bi_next;
2415
2416 flushes->bi_next = NULL;
2417 do_endio(ic, flushes);
2418 flushes = next;
2419 }
2420}
2421
2422static void complete_copy_from_journal(unsigned long error, void *context)
2423{
2424 struct journal_io *io = context;
2425 struct journal_completion *comp = io->comp;
2426 struct dm_integrity_c *ic = comp->ic;
2427
2428 remove_range(ic, &io->range);
2429 mempool_free(io, &ic->journal_io_mempool);
2430 if (unlikely(error != 0))
2431 dm_integrity_io_error(ic, "copying from journal", -EIO);
2432 complete_journal_op(comp);
2433}
2434
2435static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2436 struct journal_entry *je)
2437{
2438 unsigned int s = 0;
2439
2440 do {
2441 js->commit_id = je->last_bytes[s];
2442 js++;
2443 } while (++s < ic->sectors_per_block);
2444}
2445
2446static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
2447 unsigned int write_sections, bool from_replay)
2448{
2449 unsigned int i, j, n;
2450 struct journal_completion comp;
2451 struct blk_plug plug;
2452
2453 blk_start_plug(&plug);
2454
2455 comp.ic = ic;
2456 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2457 init_completion(&comp.comp);
2458
2459 i = write_start;
2460 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2461#ifndef INTERNAL_VERIFY
2462 if (unlikely(from_replay))
2463#endif
2464 rw_section_mac(ic, i, false);
2465 for (j = 0; j < ic->journal_section_entries; j++) {
2466 struct journal_entry *je = access_journal_entry(ic, i, j);
2467 sector_t sec, area, offset;
2468 unsigned int k, l, next_loop;
2469 sector_t metadata_block;
2470 unsigned int metadata_offset;
2471 struct journal_io *io;
2472
2473 if (journal_entry_is_unused(je))
2474 continue;
2475 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2476 sec = journal_entry_get_sector(je);
2477 if (unlikely(from_replay)) {
2478 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
2479 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2480 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2481 }
2482 if (unlikely(sec >= ic->provided_data_sectors)) {
2483 journal_entry_set_unused(je);
2484 continue;
2485 }
2486 }
2487 get_area_and_offset(ic, sec, &area, &offset);
2488 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2489 for (k = j + 1; k < ic->journal_section_entries; k++) {
2490 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2491 sector_t sec2, area2, offset2;
2492
2493 if (journal_entry_is_unused(je2))
2494 break;
2495 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2496 sec2 = journal_entry_get_sector(je2);
2497 if (unlikely(sec2 >= ic->provided_data_sectors))
2498 break;
2499 get_area_and_offset(ic, sec2, &area2, &offset2);
2500 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2501 break;
2502 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2503 }
2504 next_loop = k - 1;
2505
2506 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2507 io->comp = ∁
2508 io->range.logical_sector = sec;
2509 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2510
2511 spin_lock_irq(&ic->endio_wait.lock);
2512 add_new_range_and_wait(ic, &io->range);
2513
2514 if (likely(!from_replay)) {
2515 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2516
2517 /* don't write if there is newer committed sector */
2518 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2519 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2520
2521 journal_entry_set_unused(je2);
2522 remove_journal_node(ic, §ion_node[j]);
2523 j++;
2524 sec += ic->sectors_per_block;
2525 offset += ic->sectors_per_block;
2526 }
2527 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2528 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2529
2530 journal_entry_set_unused(je2);
2531 remove_journal_node(ic, §ion_node[k - 1]);
2532 k--;
2533 }
2534 if (j == k) {
2535 remove_range_unlocked(ic, &io->range);
2536 spin_unlock_irq(&ic->endio_wait.lock);
2537 mempool_free(io, &ic->journal_io_mempool);
2538 goto skip_io;
2539 }
2540 for (l = j; l < k; l++)
2541 remove_journal_node(ic, §ion_node[l]);
2542 }
2543 spin_unlock_irq(&ic->endio_wait.lock);
2544
2545 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2546 for (l = j; l < k; l++) {
2547 int r;
2548 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2549
2550 if (
2551#ifndef INTERNAL_VERIFY
2552 unlikely(from_replay) &&
2553#endif
2554 ic->internal_hash) {
2555 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2556
2557 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2558 (char *)access_journal_data(ic, i, l), test_tag);
2559 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
2560 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2561 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
2562 }
2563 }
2564
2565 journal_entry_set_unused(je2);
2566 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2567 ic->tag_size, TAG_WRITE);
2568 if (unlikely(r))
2569 dm_integrity_io_error(ic, "reading tags", r);
2570 }
2571
2572 atomic_inc(&comp.in_flight);
2573 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2574 (k - j) << ic->sb->log2_sectors_per_block,
2575 get_data_sector(ic, area, offset),
2576 complete_copy_from_journal, io);
2577skip_io:
2578 j = next_loop;
2579 }
2580 }
2581
2582 dm_bufio_write_dirty_buffers_async(ic->bufio);
2583
2584 blk_finish_plug(&plug);
2585
2586 complete_journal_op(&comp);
2587 wait_for_completion_io(&comp.comp);
2588
2589 dm_integrity_flush_buffers(ic, true);
2590}
2591
2592static void integrity_writer(struct work_struct *w)
2593{
2594 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2595 unsigned int write_start, write_sections;
2596 unsigned int prev_free_sectors;
2597
2598 spin_lock_irq(&ic->endio_wait.lock);
2599 write_start = ic->committed_section;
2600 write_sections = ic->n_committed_sections;
2601 spin_unlock_irq(&ic->endio_wait.lock);
2602
2603 if (!write_sections)
2604 return;
2605
2606 do_journal_write(ic, write_start, write_sections, false);
2607
2608 spin_lock_irq(&ic->endio_wait.lock);
2609
2610 ic->committed_section += write_sections;
2611 wraparound_section(ic, &ic->committed_section);
2612 ic->n_committed_sections -= write_sections;
2613
2614 prev_free_sectors = ic->free_sectors;
2615 ic->free_sectors += write_sections * ic->journal_section_entries;
2616 if (unlikely(!prev_free_sectors))
2617 wake_up_locked(&ic->endio_wait);
2618
2619 spin_unlock_irq(&ic->endio_wait.lock);
2620}
2621
2622static void recalc_write_super(struct dm_integrity_c *ic)
2623{
2624 int r;
2625
2626 dm_integrity_flush_buffers(ic, false);
2627 if (dm_integrity_failed(ic))
2628 return;
2629
2630 r = sync_rw_sb(ic, REQ_OP_WRITE);
2631 if (unlikely(r))
2632 dm_integrity_io_error(ic, "writing superblock", r);
2633}
2634
2635static void integrity_recalc(struct work_struct *w)
2636{
2637 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2638 size_t recalc_tags_size;
2639 u8 *recalc_buffer = NULL;
2640 u8 *recalc_tags = NULL;
2641 struct dm_integrity_range range;
2642 struct dm_io_request io_req;
2643 struct dm_io_region io_loc;
2644 sector_t area, offset;
2645 sector_t metadata_block;
2646 unsigned int metadata_offset;
2647 sector_t logical_sector, n_sectors;
2648 __u8 *t;
2649 unsigned int i;
2650 int r;
2651 unsigned int super_counter = 0;
2652 unsigned recalc_sectors = RECALC_SECTORS;
2653
2654retry:
2655 recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
2656 if (!recalc_buffer) {
2657oom:
2658 recalc_sectors >>= 1;
2659 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
2660 goto retry;
2661 DMCRIT("out of memory for recalculate buffer - recalculation disabled");
2662 goto free_ret;
2663 }
2664 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
2665 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
2666 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
2667 recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
2668 if (!recalc_tags) {
2669 vfree(recalc_buffer);
2670 recalc_buffer = NULL;
2671 goto oom;
2672 }
2673
2674 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2675
2676 spin_lock_irq(&ic->endio_wait.lock);
2677
2678next_chunk:
2679
2680 if (unlikely(dm_post_suspending(ic->ti)))
2681 goto unlock_ret;
2682
2683 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2684 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2685 if (ic->mode == 'B') {
2686 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2687 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2688 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2689 }
2690 goto unlock_ret;
2691 }
2692
2693 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2694 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
2695 if (!ic->meta_dev)
2696 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
2697
2698 add_new_range_and_wait(ic, &range);
2699 spin_unlock_irq(&ic->endio_wait.lock);
2700 logical_sector = range.logical_sector;
2701 n_sectors = range.n_sectors;
2702
2703 if (ic->mode == 'B') {
2704 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2705 goto advance_and_next;
2706
2707 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2708 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2709 logical_sector += ic->sectors_per_block;
2710 n_sectors -= ic->sectors_per_block;
2711 cond_resched();
2712 }
2713 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2714 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2715 n_sectors -= ic->sectors_per_block;
2716 cond_resched();
2717 }
2718 get_area_and_offset(ic, logical_sector, &area, &offset);
2719 }
2720
2721 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2722
2723 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2724 recalc_write_super(ic);
2725 if (ic->mode == 'B')
2726 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2727
2728 super_counter = 0;
2729 }
2730
2731 if (unlikely(dm_integrity_failed(ic)))
2732 goto err;
2733
2734 io_req.bi_opf = REQ_OP_READ;
2735 io_req.mem.type = DM_IO_VMA;
2736 io_req.mem.ptr.addr = recalc_buffer;
2737 io_req.notify.fn = NULL;
2738 io_req.client = ic->io;
2739 io_loc.bdev = ic->dev->bdev;
2740 io_loc.sector = get_data_sector(ic, area, offset);
2741 io_loc.count = n_sectors;
2742
2743 r = dm_io(&io_req, 1, &io_loc, NULL);
2744 if (unlikely(r)) {
2745 dm_integrity_io_error(ic, "reading data", r);
2746 goto err;
2747 }
2748
2749 t = recalc_tags;
2750 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2751 integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
2752 t += ic->tag_size;
2753 }
2754
2755 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2756
2757 r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE);
2758 if (unlikely(r)) {
2759 dm_integrity_io_error(ic, "writing tags", r);
2760 goto err;
2761 }
2762
2763 if (ic->mode == 'B') {
2764 sector_t start, end;
2765
2766 start = (range.logical_sector >>
2767 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2768 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2769 end = ((range.logical_sector + range.n_sectors) >>
2770 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2771 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2772 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2773 }
2774
2775advance_and_next:
2776 cond_resched();
2777
2778 spin_lock_irq(&ic->endio_wait.lock);
2779 remove_range_unlocked(ic, &range);
2780 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2781 goto next_chunk;
2782
2783err:
2784 remove_range(ic, &range);
2785 goto free_ret;
2786
2787unlock_ret:
2788 spin_unlock_irq(&ic->endio_wait.lock);
2789
2790 recalc_write_super(ic);
2791
2792free_ret:
2793 vfree(recalc_buffer);
2794 kvfree(recalc_tags);
2795}
2796
2797static void bitmap_block_work(struct work_struct *w)
2798{
2799 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2800 struct dm_integrity_c *ic = bbs->ic;
2801 struct bio *bio;
2802 struct bio_list bio_queue;
2803 struct bio_list waiting;
2804
2805 bio_list_init(&waiting);
2806
2807 spin_lock(&bbs->bio_queue_lock);
2808 bio_queue = bbs->bio_queue;
2809 bio_list_init(&bbs->bio_queue);
2810 spin_unlock(&bbs->bio_queue_lock);
2811
2812 while ((bio = bio_list_pop(&bio_queue))) {
2813 struct dm_integrity_io *dio;
2814
2815 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2816
2817 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2818 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2819 remove_range(ic, &dio->range);
2820 INIT_WORK(&dio->work, integrity_bio_wait);
2821 queue_work(ic->offload_wq, &dio->work);
2822 } else {
2823 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2824 dio->range.n_sectors, BITMAP_OP_SET);
2825 bio_list_add(&waiting, bio);
2826 }
2827 }
2828
2829 if (bio_list_empty(&waiting))
2830 return;
2831
2832 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
2833 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2834 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2835
2836 while ((bio = bio_list_pop(&waiting))) {
2837 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2838
2839 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2840 dio->range.n_sectors, BITMAP_OP_SET);
2841
2842 remove_range(ic, &dio->range);
2843 INIT_WORK(&dio->work, integrity_bio_wait);
2844 queue_work(ic->offload_wq, &dio->work);
2845 }
2846
2847 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2848}
2849
2850static void bitmap_flush_work(struct work_struct *work)
2851{
2852 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2853 struct dm_integrity_range range;
2854 unsigned long limit;
2855 struct bio *bio;
2856
2857 dm_integrity_flush_buffers(ic, false);
2858
2859 range.logical_sector = 0;
2860 range.n_sectors = ic->provided_data_sectors;
2861
2862 spin_lock_irq(&ic->endio_wait.lock);
2863 add_new_range_and_wait(ic, &range);
2864 spin_unlock_irq(&ic->endio_wait.lock);
2865
2866 dm_integrity_flush_buffers(ic, true);
2867
2868 limit = ic->provided_data_sectors;
2869 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2870 limit = le64_to_cpu(ic->sb->recalc_sector)
2871 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2872 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2873 }
2874 /*DEBUG_print("zeroing journal\n");*/
2875 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2876 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2877
2878 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
2879 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2880
2881 spin_lock_irq(&ic->endio_wait.lock);
2882 remove_range_unlocked(ic, &range);
2883 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2884 bio_endio(bio);
2885 spin_unlock_irq(&ic->endio_wait.lock);
2886 spin_lock_irq(&ic->endio_wait.lock);
2887 }
2888 spin_unlock_irq(&ic->endio_wait.lock);
2889}
2890
2891
2892static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
2893 unsigned int n_sections, unsigned char commit_seq)
2894{
2895 unsigned int i, j, n;
2896
2897 if (!n_sections)
2898 return;
2899
2900 for (n = 0; n < n_sections; n++) {
2901 i = start_section + n;
2902 wraparound_section(ic, &i);
2903 for (j = 0; j < ic->journal_section_sectors; j++) {
2904 struct journal_sector *js = access_journal(ic, i, j);
2905
2906 BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
2907 memset(&js->sectors, 0, sizeof(js->sectors));
2908 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2909 }
2910 for (j = 0; j < ic->journal_section_entries; j++) {
2911 struct journal_entry *je = access_journal_entry(ic, i, j);
2912
2913 journal_entry_set_unused(je);
2914 }
2915 }
2916
2917 write_journal(ic, start_section, n_sections);
2918}
2919
2920static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
2921{
2922 unsigned char k;
2923
2924 for (k = 0; k < N_COMMIT_IDS; k++) {
2925 if (dm_integrity_commit_id(ic, i, j, k) == id)
2926 return k;
2927 }
2928 dm_integrity_io_error(ic, "journal commit id", -EIO);
2929 return -EIO;
2930}
2931
2932static void replay_journal(struct dm_integrity_c *ic)
2933{
2934 unsigned int i, j;
2935 bool used_commit_ids[N_COMMIT_IDS];
2936 unsigned int max_commit_id_sections[N_COMMIT_IDS];
2937 unsigned int write_start, write_sections;
2938 unsigned int continue_section;
2939 bool journal_empty;
2940 unsigned char unused, last_used, want_commit_seq;
2941
2942 if (ic->mode == 'R')
2943 return;
2944
2945 if (ic->journal_uptodate)
2946 return;
2947
2948 last_used = 0;
2949 write_start = 0;
2950
2951 if (!ic->just_formatted) {
2952 DEBUG_print("reading journal\n");
2953 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
2954 if (ic->journal_io)
2955 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2956 if (ic->journal_io) {
2957 struct journal_completion crypt_comp;
2958
2959 crypt_comp.ic = ic;
2960 init_completion(&crypt_comp.comp);
2961 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2962 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2963 wait_for_completion(&crypt_comp.comp);
2964 }
2965 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2966 }
2967
2968 if (dm_integrity_failed(ic))
2969 goto clear_journal;
2970
2971 journal_empty = true;
2972 memset(used_commit_ids, 0, sizeof(used_commit_ids));
2973 memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections));
2974 for (i = 0; i < ic->journal_sections; i++) {
2975 for (j = 0; j < ic->journal_section_sectors; j++) {
2976 int k;
2977 struct journal_sector *js = access_journal(ic, i, j);
2978
2979 k = find_commit_seq(ic, i, j, js->commit_id);
2980 if (k < 0)
2981 goto clear_journal;
2982 used_commit_ids[k] = true;
2983 max_commit_id_sections[k] = i;
2984 }
2985 if (journal_empty) {
2986 for (j = 0; j < ic->journal_section_entries; j++) {
2987 struct journal_entry *je = access_journal_entry(ic, i, j);
2988
2989 if (!journal_entry_is_unused(je)) {
2990 journal_empty = false;
2991 break;
2992 }
2993 }
2994 }
2995 }
2996
2997 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2998 unused = N_COMMIT_IDS - 1;
2999 while (unused && !used_commit_ids[unused - 1])
3000 unused--;
3001 } else {
3002 for (unused = 0; unused < N_COMMIT_IDS; unused++)
3003 if (!used_commit_ids[unused])
3004 break;
3005 if (unused == N_COMMIT_IDS) {
3006 dm_integrity_io_error(ic, "journal commit ids", -EIO);
3007 goto clear_journal;
3008 }
3009 }
3010 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
3011 unused, used_commit_ids[0], used_commit_ids[1],
3012 used_commit_ids[2], used_commit_ids[3]);
3013
3014 last_used = prev_commit_seq(unused);
3015 want_commit_seq = prev_commit_seq(last_used);
3016
3017 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
3018 journal_empty = true;
3019
3020 write_start = max_commit_id_sections[last_used] + 1;
3021 if (unlikely(write_start >= ic->journal_sections))
3022 want_commit_seq = next_commit_seq(want_commit_seq);
3023 wraparound_section(ic, &write_start);
3024
3025 i = write_start;
3026 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
3027 for (j = 0; j < ic->journal_section_sectors; j++) {
3028 struct journal_sector *js = access_journal(ic, i, j);
3029
3030 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
3031 /*
3032 * This could be caused by crash during writing.
3033 * We won't replay the inconsistent part of the
3034 * journal.
3035 */
3036 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
3037 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
3038 goto brk;
3039 }
3040 }
3041 i++;
3042 if (unlikely(i >= ic->journal_sections))
3043 want_commit_seq = next_commit_seq(want_commit_seq);
3044 wraparound_section(ic, &i);
3045 }
3046brk:
3047
3048 if (!journal_empty) {
3049 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
3050 write_sections, write_start, want_commit_seq);
3051 do_journal_write(ic, write_start, write_sections, true);
3052 }
3053
3054 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3055 continue_section = write_start;
3056 ic->commit_seq = want_commit_seq;
3057 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3058 } else {
3059 unsigned int s;
3060 unsigned char erase_seq;
3061
3062clear_journal:
3063 DEBUG_print("clearing journal\n");
3064
3065 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3066 s = write_start;
3067 init_journal(ic, s, 1, erase_seq);
3068 s++;
3069 wraparound_section(ic, &s);
3070 if (ic->journal_sections >= 2) {
3071 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3072 s += ic->journal_sections - 2;
3073 wraparound_section(ic, &s);
3074 init_journal(ic, s, 1, erase_seq);
3075 }
3076
3077 continue_section = 0;
3078 ic->commit_seq = next_commit_seq(erase_seq);
3079 }
3080
3081 ic->committed_section = continue_section;
3082 ic->n_committed_sections = 0;
3083
3084 ic->uncommitted_section = continue_section;
3085 ic->n_uncommitted_sections = 0;
3086
3087 ic->free_section = continue_section;
3088 ic->free_section_entry = 0;
3089 ic->free_sectors = ic->journal_entries;
3090
3091 ic->journal_tree_root = RB_ROOT;
3092 for (i = 0; i < ic->journal_entries; i++)
3093 init_journal_node(&ic->journal_tree[i]);
3094}
3095
3096static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3097{
3098 DEBUG_print("%s\n", __func__);
3099
3100 if (ic->mode == 'B') {
3101 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3102 ic->synchronous_mode = 1;
3103
3104 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3105 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3106 flush_workqueue(ic->commit_wq);
3107 }
3108}
3109
3110static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3111{
3112 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3113
3114 DEBUG_print("%s\n", __func__);
3115
3116 dm_integrity_enter_synchronous_mode(ic);
3117
3118 return NOTIFY_DONE;
3119}
3120
3121static void dm_integrity_postsuspend(struct dm_target *ti)
3122{
3123 struct dm_integrity_c *ic = ti->private;
3124 int r;
3125
3126 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3127
3128 del_timer_sync(&ic->autocommit_timer);
3129
3130 if (ic->recalc_wq)
3131 drain_workqueue(ic->recalc_wq);
3132
3133 if (ic->mode == 'B')
3134 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3135
3136 queue_work(ic->commit_wq, &ic->commit_work);
3137 drain_workqueue(ic->commit_wq);
3138
3139 if (ic->mode == 'J') {
3140 queue_work(ic->writer_wq, &ic->writer_work);
3141 drain_workqueue(ic->writer_wq);
3142 dm_integrity_flush_buffers(ic, true);
3143 if (ic->wrote_to_journal) {
3144 init_journal(ic, ic->free_section,
3145 ic->journal_sections - ic->free_section, ic->commit_seq);
3146 if (ic->free_section) {
3147 init_journal(ic, 0, ic->free_section,
3148 next_commit_seq(ic->commit_seq));
3149 }
3150 }
3151 }
3152
3153 if (ic->mode == 'B') {
3154 dm_integrity_flush_buffers(ic, true);
3155#if 1
3156 /* set to 0 to test bitmap replay code */
3157 init_journal(ic, 0, ic->journal_sections, 0);
3158 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3159 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3160 if (unlikely(r))
3161 dm_integrity_io_error(ic, "writing superblock", r);
3162#endif
3163 }
3164
3165 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3166
3167 ic->journal_uptodate = true;
3168}
3169
3170static void dm_integrity_resume(struct dm_target *ti)
3171{
3172 struct dm_integrity_c *ic = ti->private;
3173 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3174 int r;
3175
3176 DEBUG_print("resume\n");
3177
3178 ic->wrote_to_journal = false;
3179
3180 if (ic->provided_data_sectors != old_provided_data_sectors) {
3181 if (ic->provided_data_sectors > old_provided_data_sectors &&
3182 ic->mode == 'B' &&
3183 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3184 rw_journal_sectors(ic, REQ_OP_READ, 0,
3185 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3186 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3187 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3188 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3189 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3190 }
3191
3192 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3193 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3194 if (unlikely(r))
3195 dm_integrity_io_error(ic, "writing superblock", r);
3196 }
3197
3198 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3199 DEBUG_print("resume dirty_bitmap\n");
3200 rw_journal_sectors(ic, REQ_OP_READ, 0,
3201 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3202 if (ic->mode == 'B') {
3203 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3204 !ic->reset_recalculate_flag) {
3205 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3206 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3207 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3208 BITMAP_OP_TEST_ALL_CLEAR)) {
3209 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3210 ic->sb->recalc_sector = cpu_to_le64(0);
3211 }
3212 } else {
3213 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3214 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3215 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3216 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3217 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3218 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3219 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3220 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3221 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3222 ic->sb->recalc_sector = cpu_to_le64(0);
3223 }
3224 } else {
3225 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3226 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3227 ic->reset_recalculate_flag) {
3228 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3229 ic->sb->recalc_sector = cpu_to_le64(0);
3230 }
3231 init_journal(ic, 0, ic->journal_sections, 0);
3232 replay_journal(ic);
3233 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3234 }
3235 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3236 if (unlikely(r))
3237 dm_integrity_io_error(ic, "writing superblock", r);
3238 } else {
3239 replay_journal(ic);
3240 if (ic->reset_recalculate_flag) {
3241 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3242 ic->sb->recalc_sector = cpu_to_le64(0);
3243 }
3244 if (ic->mode == 'B') {
3245 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3246 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3247 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3248 if (unlikely(r))
3249 dm_integrity_io_error(ic, "writing superblock", r);
3250
3251 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3252 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3253 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3254 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3255 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3256 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3257 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3258 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3259 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3260 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3261 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3262 }
3263 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3264 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3265 }
3266 }
3267
3268 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3269 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3270 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3271
3272 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3273 if (recalc_pos < ic->provided_data_sectors) {
3274 queue_work(ic->recalc_wq, &ic->recalc_work);
3275 } else if (recalc_pos > ic->provided_data_sectors) {
3276 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3277 recalc_write_super(ic);
3278 }
3279 }
3280
3281 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3282 ic->reboot_notifier.next = NULL;
3283 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3284 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3285
3286#if 0
3287 /* set to 1 to stress test synchronous mode */
3288 dm_integrity_enter_synchronous_mode(ic);
3289#endif
3290}
3291
3292static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3293 unsigned int status_flags, char *result, unsigned int maxlen)
3294{
3295 struct dm_integrity_c *ic = ti->private;
3296 unsigned int arg_count;
3297 size_t sz = 0;
3298
3299 switch (type) {
3300 case STATUSTYPE_INFO:
3301 DMEMIT("%llu %llu",
3302 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3303 ic->provided_data_sectors);
3304 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3305 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3306 else
3307 DMEMIT(" -");
3308 break;
3309
3310 case STATUSTYPE_TABLE: {
3311 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3312
3313 watermark_percentage += ic->journal_entries / 2;
3314 do_div(watermark_percentage, ic->journal_entries);
3315 arg_count = 3;
3316 arg_count += !!ic->meta_dev;
3317 arg_count += ic->sectors_per_block != 1;
3318 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3319 arg_count += ic->reset_recalculate_flag;
3320 arg_count += ic->discard;
3321 arg_count += ic->mode == 'J';
3322 arg_count += ic->mode == 'J';
3323 arg_count += ic->mode == 'B';
3324 arg_count += ic->mode == 'B';
3325 arg_count += !!ic->internal_hash_alg.alg_string;
3326 arg_count += !!ic->journal_crypt_alg.alg_string;
3327 arg_count += !!ic->journal_mac_alg.alg_string;
3328 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3329 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3330 arg_count += ic->legacy_recalculate;
3331 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3332 ic->tag_size, ic->mode, arg_count);
3333 if (ic->meta_dev)
3334 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3335 if (ic->sectors_per_block != 1)
3336 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3337 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3338 DMEMIT(" recalculate");
3339 if (ic->reset_recalculate_flag)
3340 DMEMIT(" reset_recalculate");
3341 if (ic->discard)
3342 DMEMIT(" allow_discards");
3343 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3344 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3345 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3346 if (ic->mode == 'J') {
3347 DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
3348 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3349 }
3350 if (ic->mode == 'B') {
3351 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3352 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3353 }
3354 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3355 DMEMIT(" fix_padding");
3356 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3357 DMEMIT(" fix_hmac");
3358 if (ic->legacy_recalculate)
3359 DMEMIT(" legacy_recalculate");
3360
3361#define EMIT_ALG(a, n) \
3362 do { \
3363 if (ic->a.alg_string) { \
3364 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3365 if (ic->a.key_string) \
3366 DMEMIT(":%s", ic->a.key_string);\
3367 } \
3368 } while (0)
3369 EMIT_ALG(internal_hash_alg, "internal_hash");
3370 EMIT_ALG(journal_crypt_alg, "journal_crypt");
3371 EMIT_ALG(journal_mac_alg, "journal_mac");
3372 break;
3373 }
3374 case STATUSTYPE_IMA:
3375 DMEMIT_TARGET_NAME_VERSION(ti->type);
3376 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
3377 ic->dev->name, ic->start, ic->tag_size, ic->mode);
3378
3379 if (ic->meta_dev)
3380 DMEMIT(",meta_device=%s", ic->meta_dev->name);
3381 if (ic->sectors_per_block != 1)
3382 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
3383
3384 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
3385 'y' : 'n');
3386 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
3387 DMEMIT(",fix_padding=%c",
3388 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
3389 DMEMIT(",fix_hmac=%c",
3390 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
3391 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
3392
3393 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
3394 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
3395 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
3396 DMEMIT(";");
3397 break;
3398 }
3399}
3400
3401static int dm_integrity_iterate_devices(struct dm_target *ti,
3402 iterate_devices_callout_fn fn, void *data)
3403{
3404 struct dm_integrity_c *ic = ti->private;
3405
3406 if (!ic->meta_dev)
3407 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3408 else
3409 return fn(ti, ic->dev, 0, ti->len, data);
3410}
3411
3412static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3413{
3414 struct dm_integrity_c *ic = ti->private;
3415
3416 if (ic->sectors_per_block > 1) {
3417 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3418 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3419 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3420 limits->dma_alignment = limits->logical_block_size - 1;
3421 }
3422}
3423
3424static void calculate_journal_section_size(struct dm_integrity_c *ic)
3425{
3426 unsigned int sector_space = JOURNAL_SECTOR_DATA;
3427
3428 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3429 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3430 JOURNAL_ENTRY_ROUNDUP);
3431
3432 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3433 sector_space -= JOURNAL_MAC_PER_SECTOR;
3434 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3435 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3436 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3437 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3438}
3439
3440static int calculate_device_limits(struct dm_integrity_c *ic)
3441{
3442 __u64 initial_sectors;
3443
3444 calculate_journal_section_size(ic);
3445 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3446 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3447 return -EINVAL;
3448 ic->initial_sectors = initial_sectors;
3449
3450 if (!ic->meta_dev) {
3451 sector_t last_sector, last_area, last_offset;
3452
3453 /* we have to maintain excessive padding for compatibility with existing volumes */
3454 __u64 metadata_run_padding =
3455 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3456 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3457 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3458
3459 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3460 metadata_run_padding) >> SECTOR_SHIFT;
3461 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3462 ic->log2_metadata_run = __ffs(ic->metadata_run);
3463 else
3464 ic->log2_metadata_run = -1;
3465
3466 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3467 last_sector = get_data_sector(ic, last_area, last_offset);
3468 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3469 return -EINVAL;
3470 } else {
3471 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3472
3473 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3474 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3475 meta_size <<= ic->log2_buffer_sectors;
3476 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3477 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3478 return -EINVAL;
3479 ic->metadata_run = 1;
3480 ic->log2_metadata_run = 0;
3481 }
3482
3483 return 0;
3484}
3485
3486static void get_provided_data_sectors(struct dm_integrity_c *ic)
3487{
3488 if (!ic->meta_dev) {
3489 int test_bit;
3490
3491 ic->provided_data_sectors = 0;
3492 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3493 __u64 prev_data_sectors = ic->provided_data_sectors;
3494
3495 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3496 if (calculate_device_limits(ic))
3497 ic->provided_data_sectors = prev_data_sectors;
3498 }
3499 } else {
3500 ic->provided_data_sectors = ic->data_device_sectors;
3501 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3502 }
3503}
3504
3505static int initialize_superblock(struct dm_integrity_c *ic,
3506 unsigned int journal_sectors, unsigned int interleave_sectors)
3507{
3508 unsigned int journal_sections;
3509 int test_bit;
3510
3511 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3512 memcpy(ic->sb->magic, SB_MAGIC, 8);
3513 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3514 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3515 if (ic->journal_mac_alg.alg_string)
3516 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3517
3518 calculate_journal_section_size(ic);
3519 journal_sections = journal_sectors / ic->journal_section_sectors;
3520 if (!journal_sections)
3521 journal_sections = 1;
3522
3523 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3524 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3525 get_random_bytes(ic->sb->salt, SALT_SIZE);
3526 }
3527
3528 if (!ic->meta_dev) {
3529 if (ic->fix_padding)
3530 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3531 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3532 if (!interleave_sectors)
3533 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3534 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3535 ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3536 ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3537
3538 get_provided_data_sectors(ic);
3539 if (!ic->provided_data_sectors)
3540 return -EINVAL;
3541 } else {
3542 ic->sb->log2_interleave_sectors = 0;
3543
3544 get_provided_data_sectors(ic);
3545 if (!ic->provided_data_sectors)
3546 return -EINVAL;
3547
3548try_smaller_buffer:
3549 ic->sb->journal_sections = cpu_to_le32(0);
3550 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3551 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3552 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3553
3554 if (test_journal_sections > journal_sections)
3555 continue;
3556 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3557 if (calculate_device_limits(ic))
3558 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3559
3560 }
3561 if (!le32_to_cpu(ic->sb->journal_sections)) {
3562 if (ic->log2_buffer_sectors > 3) {
3563 ic->log2_buffer_sectors--;
3564 goto try_smaller_buffer;
3565 }
3566 return -EINVAL;
3567 }
3568 }
3569
3570 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3571
3572 sb_set_version(ic);
3573
3574 return 0;
3575}
3576
3577static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3578{
3579 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3580 struct blk_integrity bi;
3581
3582 memset(&bi, 0, sizeof(bi));
3583 bi.profile = &dm_integrity_profile;
3584 bi.tuple_size = ic->tag_size;
3585 bi.tag_size = bi.tuple_size;
3586 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3587
3588 blk_integrity_register(disk, &bi);
3589 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3590}
3591
3592static void dm_integrity_free_page_list(struct page_list *pl)
3593{
3594 unsigned int i;
3595
3596 if (!pl)
3597 return;
3598 for (i = 0; pl[i].page; i++)
3599 __free_page(pl[i].page);
3600 kvfree(pl);
3601}
3602
3603static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
3604{
3605 struct page_list *pl;
3606 unsigned int i;
3607
3608 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3609 if (!pl)
3610 return NULL;
3611
3612 for (i = 0; i < n_pages; i++) {
3613 pl[i].page = alloc_page(GFP_KERNEL);
3614 if (!pl[i].page) {
3615 dm_integrity_free_page_list(pl);
3616 return NULL;
3617 }
3618 if (i)
3619 pl[i - 1].next = &pl[i];
3620 }
3621 pl[i].page = NULL;
3622 pl[i].next = NULL;
3623
3624 return pl;
3625}
3626
3627static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3628{
3629 unsigned int i;
3630
3631 for (i = 0; i < ic->journal_sections; i++)
3632 kvfree(sl[i]);
3633 kvfree(sl);
3634}
3635
3636static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3637 struct page_list *pl)
3638{
3639 struct scatterlist **sl;
3640 unsigned int i;
3641
3642 sl = kvmalloc_array(ic->journal_sections,
3643 sizeof(struct scatterlist *),
3644 GFP_KERNEL | __GFP_ZERO);
3645 if (!sl)
3646 return NULL;
3647
3648 for (i = 0; i < ic->journal_sections; i++) {
3649 struct scatterlist *s;
3650 unsigned int start_index, start_offset;
3651 unsigned int end_index, end_offset;
3652 unsigned int n_pages;
3653 unsigned int idx;
3654
3655 page_list_location(ic, i, 0, &start_index, &start_offset);
3656 page_list_location(ic, i, ic->journal_section_sectors - 1,
3657 &end_index, &end_offset);
3658
3659 n_pages = (end_index - start_index + 1);
3660
3661 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3662 GFP_KERNEL);
3663 if (!s) {
3664 dm_integrity_free_journal_scatterlist(ic, sl);
3665 return NULL;
3666 }
3667
3668 sg_init_table(s, n_pages);
3669 for (idx = start_index; idx <= end_index; idx++) {
3670 char *va = lowmem_page_address(pl[idx].page);
3671 unsigned int start = 0, end = PAGE_SIZE;
3672
3673 if (idx == start_index)
3674 start = start_offset;
3675 if (idx == end_index)
3676 end = end_offset + (1 << SECTOR_SHIFT);
3677 sg_set_buf(&s[idx - start_index], va + start, end - start);
3678 }
3679
3680 sl[i] = s;
3681 }
3682
3683 return sl;
3684}
3685
3686static void free_alg(struct alg_spec *a)
3687{
3688 kfree_sensitive(a->alg_string);
3689 kfree_sensitive(a->key);
3690 memset(a, 0, sizeof(*a));
3691}
3692
3693static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3694{
3695 char *k;
3696
3697 free_alg(a);
3698
3699 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3700 if (!a->alg_string)
3701 goto nomem;
3702
3703 k = strchr(a->alg_string, ':');
3704 if (k) {
3705 *k = 0;
3706 a->key_string = k + 1;
3707 if (strlen(a->key_string) & 1)
3708 goto inval;
3709
3710 a->key_size = strlen(a->key_string) / 2;
3711 a->key = kmalloc(a->key_size, GFP_KERNEL);
3712 if (!a->key)
3713 goto nomem;
3714 if (hex2bin(a->key, a->key_string, a->key_size))
3715 goto inval;
3716 }
3717
3718 return 0;
3719inval:
3720 *error = error_inval;
3721 return -EINVAL;
3722nomem:
3723 *error = "Out of memory for an argument";
3724 return -ENOMEM;
3725}
3726
3727static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3728 char *error_alg, char *error_key)
3729{
3730 int r;
3731
3732 if (a->alg_string) {
3733 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3734 if (IS_ERR(*hash)) {
3735 *error = error_alg;
3736 r = PTR_ERR(*hash);
3737 *hash = NULL;
3738 return r;
3739 }
3740
3741 if (a->key) {
3742 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3743 if (r) {
3744 *error = error_key;
3745 return r;
3746 }
3747 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3748 *error = error_key;
3749 return -ENOKEY;
3750 }
3751 }
3752
3753 return 0;
3754}
3755
3756static int create_journal(struct dm_integrity_c *ic, char **error)
3757{
3758 int r = 0;
3759 unsigned int i;
3760 __u64 journal_pages, journal_desc_size, journal_tree_size;
3761 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3762 struct skcipher_request *req = NULL;
3763
3764 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3765 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3766 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3767 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3768
3769 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3770 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3771 journal_desc_size = journal_pages * sizeof(struct page_list);
3772 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3773 *error = "Journal doesn't fit into memory";
3774 r = -ENOMEM;
3775 goto bad;
3776 }
3777 ic->journal_pages = journal_pages;
3778
3779 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3780 if (!ic->journal) {
3781 *error = "Could not allocate memory for journal";
3782 r = -ENOMEM;
3783 goto bad;
3784 }
3785 if (ic->journal_crypt_alg.alg_string) {
3786 unsigned int ivsize, blocksize;
3787 struct journal_completion comp;
3788
3789 comp.ic = ic;
3790 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3791 if (IS_ERR(ic->journal_crypt)) {
3792 *error = "Invalid journal cipher";
3793 r = PTR_ERR(ic->journal_crypt);
3794 ic->journal_crypt = NULL;
3795 goto bad;
3796 }
3797 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3798 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3799
3800 if (ic->journal_crypt_alg.key) {
3801 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3802 ic->journal_crypt_alg.key_size);
3803 if (r) {
3804 *error = "Error setting encryption key";
3805 goto bad;
3806 }
3807 }
3808 DEBUG_print("cipher %s, block size %u iv size %u\n",
3809 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3810
3811 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3812 if (!ic->journal_io) {
3813 *error = "Could not allocate memory for journal io";
3814 r = -ENOMEM;
3815 goto bad;
3816 }
3817
3818 if (blocksize == 1) {
3819 struct scatterlist *sg;
3820
3821 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3822 if (!req) {
3823 *error = "Could not allocate crypt request";
3824 r = -ENOMEM;
3825 goto bad;
3826 }
3827
3828 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3829 if (!crypt_iv) {
3830 *error = "Could not allocate iv";
3831 r = -ENOMEM;
3832 goto bad;
3833 }
3834
3835 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3836 if (!ic->journal_xor) {
3837 *error = "Could not allocate memory for journal xor";
3838 r = -ENOMEM;
3839 goto bad;
3840 }
3841
3842 sg = kvmalloc_array(ic->journal_pages + 1,
3843 sizeof(struct scatterlist),
3844 GFP_KERNEL);
3845 if (!sg) {
3846 *error = "Unable to allocate sg list";
3847 r = -ENOMEM;
3848 goto bad;
3849 }
3850 sg_init_table(sg, ic->journal_pages + 1);
3851 for (i = 0; i < ic->journal_pages; i++) {
3852 char *va = lowmem_page_address(ic->journal_xor[i].page);
3853
3854 clear_page(va);
3855 sg_set_buf(&sg[i], va, PAGE_SIZE);
3856 }
3857 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
3858
3859 skcipher_request_set_crypt(req, sg, sg,
3860 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
3861 init_completion(&comp.comp);
3862 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3863 if (do_crypt(true, req, &comp))
3864 wait_for_completion(&comp.comp);
3865 kvfree(sg);
3866 r = dm_integrity_failed(ic);
3867 if (r) {
3868 *error = "Unable to encrypt journal";
3869 goto bad;
3870 }
3871 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3872
3873 crypto_free_skcipher(ic->journal_crypt);
3874 ic->journal_crypt = NULL;
3875 } else {
3876 unsigned int crypt_len = roundup(ivsize, blocksize);
3877
3878 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3879 if (!req) {
3880 *error = "Could not allocate crypt request";
3881 r = -ENOMEM;
3882 goto bad;
3883 }
3884
3885 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3886 if (!crypt_iv) {
3887 *error = "Could not allocate iv";
3888 r = -ENOMEM;
3889 goto bad;
3890 }
3891
3892 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3893 if (!crypt_data) {
3894 *error = "Unable to allocate crypt data";
3895 r = -ENOMEM;
3896 goto bad;
3897 }
3898
3899 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3900 if (!ic->journal_scatterlist) {
3901 *error = "Unable to allocate sg list";
3902 r = -ENOMEM;
3903 goto bad;
3904 }
3905 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3906 if (!ic->journal_io_scatterlist) {
3907 *error = "Unable to allocate sg list";
3908 r = -ENOMEM;
3909 goto bad;
3910 }
3911 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3912 sizeof(struct skcipher_request *),
3913 GFP_KERNEL | __GFP_ZERO);
3914 if (!ic->sk_requests) {
3915 *error = "Unable to allocate sk requests";
3916 r = -ENOMEM;
3917 goto bad;
3918 }
3919 for (i = 0; i < ic->journal_sections; i++) {
3920 struct scatterlist sg;
3921 struct skcipher_request *section_req;
3922 __le32 section_le = cpu_to_le32(i);
3923
3924 memset(crypt_iv, 0x00, ivsize);
3925 memset(crypt_data, 0x00, crypt_len);
3926 memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le)));
3927
3928 sg_init_one(&sg, crypt_data, crypt_len);
3929 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3930 init_completion(&comp.comp);
3931 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3932 if (do_crypt(true, req, &comp))
3933 wait_for_completion(&comp.comp);
3934
3935 r = dm_integrity_failed(ic);
3936 if (r) {
3937 *error = "Unable to generate iv";
3938 goto bad;
3939 }
3940
3941 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3942 if (!section_req) {
3943 *error = "Unable to allocate crypt request";
3944 r = -ENOMEM;
3945 goto bad;
3946 }
3947 section_req->iv = kmalloc_array(ivsize, 2,
3948 GFP_KERNEL);
3949 if (!section_req->iv) {
3950 skcipher_request_free(section_req);
3951 *error = "Unable to allocate iv";
3952 r = -ENOMEM;
3953 goto bad;
3954 }
3955 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3956 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3957 ic->sk_requests[i] = section_req;
3958 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3959 }
3960 }
3961 }
3962
3963 for (i = 0; i < N_COMMIT_IDS; i++) {
3964 unsigned int j;
3965
3966retest_commit_id:
3967 for (j = 0; j < i; j++) {
3968 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3969 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3970 goto retest_commit_id;
3971 }
3972 }
3973 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3974 }
3975
3976 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3977 if (journal_tree_size > ULONG_MAX) {
3978 *error = "Journal doesn't fit into memory";
3979 r = -ENOMEM;
3980 goto bad;
3981 }
3982 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3983 if (!ic->journal_tree) {
3984 *error = "Could not allocate memory for journal tree";
3985 r = -ENOMEM;
3986 }
3987bad:
3988 kfree(crypt_data);
3989 kfree(crypt_iv);
3990 skcipher_request_free(req);
3991
3992 return r;
3993}
3994
3995/*
3996 * Construct a integrity mapping
3997 *
3998 * Arguments:
3999 * device
4000 * offset from the start of the device
4001 * tag size
4002 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
4003 * number of optional arguments
4004 * optional arguments:
4005 * journal_sectors
4006 * interleave_sectors
4007 * buffer_sectors
4008 * journal_watermark
4009 * commit_time
4010 * meta_device
4011 * block_size
4012 * sectors_per_bit
4013 * bitmap_flush_interval
4014 * internal_hash
4015 * journal_crypt
4016 * journal_mac
4017 * recalculate
4018 */
4019static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4020{
4021 struct dm_integrity_c *ic;
4022 char dummy;
4023 int r;
4024 unsigned int extra_args;
4025 struct dm_arg_set as;
4026 static const struct dm_arg _args[] = {
4027 {0, 18, "Invalid number of feature args"},
4028 };
4029 unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
4030 bool should_write_sb;
4031 __u64 threshold;
4032 unsigned long long start;
4033 __s8 log2_sectors_per_bitmap_bit = -1;
4034 __s8 log2_blocks_per_bitmap_bit;
4035 __u64 bits_in_journal;
4036 __u64 n_bitmap_bits;
4037
4038#define DIRECT_ARGUMENTS 4
4039
4040 if (argc <= DIRECT_ARGUMENTS) {
4041 ti->error = "Invalid argument count";
4042 return -EINVAL;
4043 }
4044
4045 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
4046 if (!ic) {
4047 ti->error = "Cannot allocate integrity context";
4048 return -ENOMEM;
4049 }
4050 ti->private = ic;
4051 ti->per_io_data_size = sizeof(struct dm_integrity_io);
4052 ic->ti = ti;
4053
4054 ic->in_progress = RB_ROOT;
4055 INIT_LIST_HEAD(&ic->wait_list);
4056 init_waitqueue_head(&ic->endio_wait);
4057 bio_list_init(&ic->flush_bio_list);
4058 init_waitqueue_head(&ic->copy_to_journal_wait);
4059 init_completion(&ic->crypto_backoff);
4060 atomic64_set(&ic->number_of_mismatches, 0);
4061 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
4062
4063 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
4064 if (r) {
4065 ti->error = "Device lookup failed";
4066 goto bad;
4067 }
4068
4069 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
4070 ti->error = "Invalid starting offset";
4071 r = -EINVAL;
4072 goto bad;
4073 }
4074 ic->start = start;
4075
4076 if (strcmp(argv[2], "-")) {
4077 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
4078 ti->error = "Invalid tag size";
4079 r = -EINVAL;
4080 goto bad;
4081 }
4082 }
4083
4084 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
4085 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
4086 ic->mode = argv[3][0];
4087 } else {
4088 ti->error = "Invalid mode (expecting J, B, D, R)";
4089 r = -EINVAL;
4090 goto bad;
4091 }
4092
4093 journal_sectors = 0;
4094 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
4095 buffer_sectors = DEFAULT_BUFFER_SECTORS;
4096 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
4097 sync_msec = DEFAULT_SYNC_MSEC;
4098 ic->sectors_per_block = 1;
4099
4100 as.argc = argc - DIRECT_ARGUMENTS;
4101 as.argv = argv + DIRECT_ARGUMENTS;
4102 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4103 if (r)
4104 goto bad;
4105
4106 while (extra_args--) {
4107 const char *opt_string;
4108 unsigned int val;
4109 unsigned long long llval;
4110
4111 opt_string = dm_shift_arg(&as);
4112 if (!opt_string) {
4113 r = -EINVAL;
4114 ti->error = "Not enough feature arguments";
4115 goto bad;
4116 }
4117 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4118 journal_sectors = val ? val : 1;
4119 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4120 interleave_sectors = val;
4121 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4122 buffer_sectors = val;
4123 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4124 journal_watermark = val;
4125 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4126 sync_msec = val;
4127 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4128 if (ic->meta_dev) {
4129 dm_put_device(ti, ic->meta_dev);
4130 ic->meta_dev = NULL;
4131 }
4132 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4133 dm_table_get_mode(ti->table), &ic->meta_dev);
4134 if (r) {
4135 ti->error = "Device lookup failed";
4136 goto bad;
4137 }
4138 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4139 if (val < 1 << SECTOR_SHIFT ||
4140 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4141 (val & (val - 1))) {
4142 r = -EINVAL;
4143 ti->error = "Invalid block_size argument";
4144 goto bad;
4145 }
4146 ic->sectors_per_block = val >> SECTOR_SHIFT;
4147 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4148 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4149 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4150 if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4151 r = -EINVAL;
4152 ti->error = "Invalid bitmap_flush_interval argument";
4153 goto bad;
4154 }
4155 ic->bitmap_flush_interval = msecs_to_jiffies(val);
4156 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4157 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4158 "Invalid internal_hash argument");
4159 if (r)
4160 goto bad;
4161 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4162 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4163 "Invalid journal_crypt argument");
4164 if (r)
4165 goto bad;
4166 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4167 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4168 "Invalid journal_mac argument");
4169 if (r)
4170 goto bad;
4171 } else if (!strcmp(opt_string, "recalculate")) {
4172 ic->recalculate_flag = true;
4173 } else if (!strcmp(opt_string, "reset_recalculate")) {
4174 ic->recalculate_flag = true;
4175 ic->reset_recalculate_flag = true;
4176 } else if (!strcmp(opt_string, "allow_discards")) {
4177 ic->discard = true;
4178 } else if (!strcmp(opt_string, "fix_padding")) {
4179 ic->fix_padding = true;
4180 } else if (!strcmp(opt_string, "fix_hmac")) {
4181 ic->fix_hmac = true;
4182 } else if (!strcmp(opt_string, "legacy_recalculate")) {
4183 ic->legacy_recalculate = true;
4184 } else {
4185 r = -EINVAL;
4186 ti->error = "Invalid argument";
4187 goto bad;
4188 }
4189 }
4190
4191 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
4192 if (!ic->meta_dev)
4193 ic->meta_device_sectors = ic->data_device_sectors;
4194 else
4195 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
4196
4197 if (!journal_sectors) {
4198 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4199 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4200 }
4201
4202 if (!buffer_sectors)
4203 buffer_sectors = 1;
4204 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4205
4206 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4207 "Invalid internal hash", "Error setting internal hash key");
4208 if (r)
4209 goto bad;
4210
4211 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4212 "Invalid journal mac", "Error setting journal mac key");
4213 if (r)
4214 goto bad;
4215
4216 if (!ic->tag_size) {
4217 if (!ic->internal_hash) {
4218 ti->error = "Unknown tag size";
4219 r = -EINVAL;
4220 goto bad;
4221 }
4222 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4223 }
4224 if (ic->tag_size > MAX_TAG_SIZE) {
4225 ti->error = "Too big tag size";
4226 r = -EINVAL;
4227 goto bad;
4228 }
4229 if (!(ic->tag_size & (ic->tag_size - 1)))
4230 ic->log2_tag_size = __ffs(ic->tag_size);
4231 else
4232 ic->log2_tag_size = -1;
4233
4234 if (ic->mode == 'B' && !ic->internal_hash) {
4235 r = -EINVAL;
4236 ti->error = "Bitmap mode can be only used with internal hash";
4237 goto bad;
4238 }
4239
4240 if (ic->discard && !ic->internal_hash) {
4241 r = -EINVAL;
4242 ti->error = "Discard can be only used with internal hash";
4243 goto bad;
4244 }
4245
4246 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4247 ic->autocommit_msec = sync_msec;
4248 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4249
4250 ic->io = dm_io_client_create();
4251 if (IS_ERR(ic->io)) {
4252 r = PTR_ERR(ic->io);
4253 ic->io = NULL;
4254 ti->error = "Cannot allocate dm io";
4255 goto bad;
4256 }
4257
4258 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4259 if (r) {
4260 ti->error = "Cannot allocate mempool";
4261 goto bad;
4262 }
4263
4264 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4265 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4266 if (!ic->metadata_wq) {
4267 ti->error = "Cannot allocate workqueue";
4268 r = -ENOMEM;
4269 goto bad;
4270 }
4271
4272 /*
4273 * If this workqueue weren't ordered, it would cause bio reordering
4274 * and reduced performance.
4275 */
4276 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM);
4277 if (!ic->wait_wq) {
4278 ti->error = "Cannot allocate workqueue";
4279 r = -ENOMEM;
4280 goto bad;
4281 }
4282
4283 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4284 METADATA_WORKQUEUE_MAX_ACTIVE);
4285 if (!ic->offload_wq) {
4286 ti->error = "Cannot allocate workqueue";
4287 r = -ENOMEM;
4288 goto bad;
4289 }
4290
4291 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4292 if (!ic->commit_wq) {
4293 ti->error = "Cannot allocate workqueue";
4294 r = -ENOMEM;
4295 goto bad;
4296 }
4297 INIT_WORK(&ic->commit_work, integrity_commit);
4298
4299 if (ic->mode == 'J' || ic->mode == 'B') {
4300 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4301 if (!ic->writer_wq) {
4302 ti->error = "Cannot allocate workqueue";
4303 r = -ENOMEM;
4304 goto bad;
4305 }
4306 INIT_WORK(&ic->writer_work, integrity_writer);
4307 }
4308
4309 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4310 if (!ic->sb) {
4311 r = -ENOMEM;
4312 ti->error = "Cannot allocate superblock area";
4313 goto bad;
4314 }
4315
4316 r = sync_rw_sb(ic, REQ_OP_READ);
4317 if (r) {
4318 ti->error = "Error reading superblock";
4319 goto bad;
4320 }
4321 should_write_sb = false;
4322 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4323 if (ic->mode != 'R') {
4324 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4325 r = -EINVAL;
4326 ti->error = "The device is not initialized";
4327 goto bad;
4328 }
4329 }
4330
4331 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4332 if (r) {
4333 ti->error = "Could not initialize superblock";
4334 goto bad;
4335 }
4336 if (ic->mode != 'R')
4337 should_write_sb = true;
4338 }
4339
4340 if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4341 r = -EINVAL;
4342 ti->error = "Unknown version";
4343 goto bad;
4344 }
4345 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4346 r = -EINVAL;
4347 ti->error = "Tag size doesn't match the information in superblock";
4348 goto bad;
4349 }
4350 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4351 r = -EINVAL;
4352 ti->error = "Block size doesn't match the information in superblock";
4353 goto bad;
4354 }
4355 if (!le32_to_cpu(ic->sb->journal_sections)) {
4356 r = -EINVAL;
4357 ti->error = "Corrupted superblock, journal_sections is 0";
4358 goto bad;
4359 }
4360 /* make sure that ti->max_io_len doesn't overflow */
4361 if (!ic->meta_dev) {
4362 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4363 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4364 r = -EINVAL;
4365 ti->error = "Invalid interleave_sectors in the superblock";
4366 goto bad;
4367 }
4368 } else {
4369 if (ic->sb->log2_interleave_sectors) {
4370 r = -EINVAL;
4371 ti->error = "Invalid interleave_sectors in the superblock";
4372 goto bad;
4373 }
4374 }
4375 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4376 r = -EINVAL;
4377 ti->error = "Journal mac mismatch";
4378 goto bad;
4379 }
4380
4381 get_provided_data_sectors(ic);
4382 if (!ic->provided_data_sectors) {
4383 r = -EINVAL;
4384 ti->error = "The device is too small";
4385 goto bad;
4386 }
4387
4388try_smaller_buffer:
4389 r = calculate_device_limits(ic);
4390 if (r) {
4391 if (ic->meta_dev) {
4392 if (ic->log2_buffer_sectors > 3) {
4393 ic->log2_buffer_sectors--;
4394 goto try_smaller_buffer;
4395 }
4396 }
4397 ti->error = "The device is too small";
4398 goto bad;
4399 }
4400
4401 if (log2_sectors_per_bitmap_bit < 0)
4402 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4403 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4404 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4405
4406 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4407 if (bits_in_journal > UINT_MAX)
4408 bits_in_journal = UINT_MAX;
4409 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4410 log2_sectors_per_bitmap_bit++;
4411
4412 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4413 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4414 if (should_write_sb)
4415 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4416
4417 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4418 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4419 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4420
4421 if (!ic->meta_dev)
4422 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4423
4424 if (ti->len > ic->provided_data_sectors) {
4425 r = -EINVAL;
4426 ti->error = "Not enough provided sectors for requested mapping size";
4427 goto bad;
4428 }
4429
4430
4431 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4432 threshold += 50;
4433 do_div(threshold, 100);
4434 ic->free_sectors_threshold = threshold;
4435
4436 DEBUG_print("initialized:\n");
4437 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4438 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4439 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4440 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4441 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4442 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
4443 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4444 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4445 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
4446 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4447 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4448 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4449 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4450 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4451 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
4452
4453 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4454 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4455 ic->sb->recalc_sector = cpu_to_le64(0);
4456 }
4457
4458 if (ic->internal_hash) {
4459 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4460 if (!ic->recalc_wq) {
4461 ti->error = "Cannot allocate workqueue";
4462 r = -ENOMEM;
4463 goto bad;
4464 }
4465 INIT_WORK(&ic->recalc_work, integrity_recalc);
4466 } else {
4467 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4468 ti->error = "Recalculate can only be specified with internal_hash";
4469 r = -EINVAL;
4470 goto bad;
4471 }
4472 }
4473
4474 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4475 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4476 dm_integrity_disable_recalculate(ic)) {
4477 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4478 r = -EOPNOTSUPP;
4479 goto bad;
4480 }
4481
4482 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4483 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
4484 if (IS_ERR(ic->bufio)) {
4485 r = PTR_ERR(ic->bufio);
4486 ti->error = "Cannot initialize dm-bufio";
4487 ic->bufio = NULL;
4488 goto bad;
4489 }
4490 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4491
4492 if (ic->mode != 'R') {
4493 r = create_journal(ic, &ti->error);
4494 if (r)
4495 goto bad;
4496
4497 }
4498
4499 if (ic->mode == 'B') {
4500 unsigned int i;
4501 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4502
4503 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4504 if (!ic->recalc_bitmap) {
4505 r = -ENOMEM;
4506 goto bad;
4507 }
4508 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4509 if (!ic->may_write_bitmap) {
4510 r = -ENOMEM;
4511 goto bad;
4512 }
4513 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4514 if (!ic->bbs) {
4515 r = -ENOMEM;
4516 goto bad;
4517 }
4518 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4519 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4520 struct bitmap_block_status *bbs = &ic->bbs[i];
4521 unsigned int sector, pl_index, pl_offset;
4522
4523 INIT_WORK(&bbs->work, bitmap_block_work);
4524 bbs->ic = ic;
4525 bbs->idx = i;
4526 bio_list_init(&bbs->bio_queue);
4527 spin_lock_init(&bbs->bio_queue_lock);
4528
4529 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4530 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4531 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4532
4533 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4534 }
4535 }
4536
4537 if (should_write_sb) {
4538 init_journal(ic, 0, ic->journal_sections, 0);
4539 r = dm_integrity_failed(ic);
4540 if (unlikely(r)) {
4541 ti->error = "Error initializing journal";
4542 goto bad;
4543 }
4544 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
4545 if (r) {
4546 ti->error = "Error initializing superblock";
4547 goto bad;
4548 }
4549 ic->just_formatted = true;
4550 }
4551
4552 if (!ic->meta_dev) {
4553 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4554 if (r)
4555 goto bad;
4556 }
4557 if (ic->mode == 'B') {
4558 unsigned int max_io_len;
4559
4560 max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4561 if (!max_io_len)
4562 max_io_len = 1U << 31;
4563 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4564 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4565 r = dm_set_target_max_io_len(ti, max_io_len);
4566 if (r)
4567 goto bad;
4568 }
4569 }
4570
4571 if (!ic->internal_hash)
4572 dm_integrity_set(ti, ic);
4573
4574 ti->num_flush_bios = 1;
4575 ti->flush_supported = true;
4576 if (ic->discard)
4577 ti->num_discard_bios = 1;
4578
4579 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
4580 return 0;
4581
4582bad:
4583 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
4584 dm_integrity_dtr(ti);
4585 return r;
4586}
4587
4588static void dm_integrity_dtr(struct dm_target *ti)
4589{
4590 struct dm_integrity_c *ic = ti->private;
4591
4592 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4593 BUG_ON(!list_empty(&ic->wait_list));
4594
4595 if (ic->mode == 'B')
4596 cancel_delayed_work_sync(&ic->bitmap_flush_work);
4597 if (ic->metadata_wq)
4598 destroy_workqueue(ic->metadata_wq);
4599 if (ic->wait_wq)
4600 destroy_workqueue(ic->wait_wq);
4601 if (ic->offload_wq)
4602 destroy_workqueue(ic->offload_wq);
4603 if (ic->commit_wq)
4604 destroy_workqueue(ic->commit_wq);
4605 if (ic->writer_wq)
4606 destroy_workqueue(ic->writer_wq);
4607 if (ic->recalc_wq)
4608 destroy_workqueue(ic->recalc_wq);
4609 kvfree(ic->bbs);
4610 if (ic->bufio)
4611 dm_bufio_client_destroy(ic->bufio);
4612 mempool_exit(&ic->journal_io_mempool);
4613 if (ic->io)
4614 dm_io_client_destroy(ic->io);
4615 if (ic->dev)
4616 dm_put_device(ti, ic->dev);
4617 if (ic->meta_dev)
4618 dm_put_device(ti, ic->meta_dev);
4619 dm_integrity_free_page_list(ic->journal);
4620 dm_integrity_free_page_list(ic->journal_io);
4621 dm_integrity_free_page_list(ic->journal_xor);
4622 dm_integrity_free_page_list(ic->recalc_bitmap);
4623 dm_integrity_free_page_list(ic->may_write_bitmap);
4624 if (ic->journal_scatterlist)
4625 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4626 if (ic->journal_io_scatterlist)
4627 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4628 if (ic->sk_requests) {
4629 unsigned int i;
4630
4631 for (i = 0; i < ic->journal_sections; i++) {
4632 struct skcipher_request *req;
4633
4634 req = ic->sk_requests[i];
4635 if (req) {
4636 kfree_sensitive(req->iv);
4637 skcipher_request_free(req);
4638 }
4639 }
4640 kvfree(ic->sk_requests);
4641 }
4642 kvfree(ic->journal_tree);
4643 if (ic->sb)
4644 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4645
4646 if (ic->internal_hash)
4647 crypto_free_shash(ic->internal_hash);
4648 free_alg(&ic->internal_hash_alg);
4649
4650 if (ic->journal_crypt)
4651 crypto_free_skcipher(ic->journal_crypt);
4652 free_alg(&ic->journal_crypt_alg);
4653
4654 if (ic->journal_mac)
4655 crypto_free_shash(ic->journal_mac);
4656 free_alg(&ic->journal_mac_alg);
4657
4658 kfree(ic);
4659 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
4660}
4661
4662static struct target_type integrity_target = {
4663 .name = "integrity",
4664 .version = {1, 10, 0},
4665 .module = THIS_MODULE,
4666 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4667 .ctr = dm_integrity_ctr,
4668 .dtr = dm_integrity_dtr,
4669 .map = dm_integrity_map,
4670 .postsuspend = dm_integrity_postsuspend,
4671 .resume = dm_integrity_resume,
4672 .status = dm_integrity_status,
4673 .iterate_devices = dm_integrity_iterate_devices,
4674 .io_hints = dm_integrity_io_hints,
4675};
4676
4677static int __init dm_integrity_init(void)
4678{
4679 int r;
4680
4681 journal_io_cache = kmem_cache_create("integrity_journal_io",
4682 sizeof(struct journal_io), 0, 0, NULL);
4683 if (!journal_io_cache) {
4684 DMERR("can't allocate journal io cache");
4685 return -ENOMEM;
4686 }
4687
4688 r = dm_register_target(&integrity_target);
4689 if (r < 0) {
4690 kmem_cache_destroy(journal_io_cache);
4691 return r;
4692 }
4693
4694 return 0;
4695}
4696
4697static void __exit dm_integrity_exit(void)
4698{
4699 dm_unregister_target(&integrity_target);
4700 kmem_cache_destroy(journal_io_cache);
4701}
4702
4703module_init(dm_integrity_init);
4704module_exit(dm_integrity_exit);
4705
4706MODULE_AUTHOR("Milan Broz");
4707MODULE_AUTHOR("Mikulas Patocka");
4708MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4709MODULE_LICENSE("GPL");