Merge tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:
"All fixes marked for stable:

- Fix DM thinp btree corruption seen when inserting a new key/value
pair into a full root node.

- Fix DM thinp btree removal deadlock due to artificially low number
of allowed concurrent locks allowed.

- Fix possible DM crypt corruption if kernel keyring service is used.
Only affects ciphers using following IVs: essiv, lmk and tcw.

- Two DM crypt device initialization error checking fixes.

- Fix DM integrity to allow use of async ciphers that require DMA"

* tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm crypt: fix error return code in crypt_ctr()
dm crypt: wipe kernel key copy after IV initialization
dm integrity: don't store cipher request on the stack
dm crypt: fix crash by adding missing check for auth key size
dm btree: fix serious bug in btree_split_beneath()
dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6

+59 -35
+15 -5
drivers/md/dm-crypt.c
··· 1954 1954 /* Ignore extra keys (which are used for IV etc) */ 1955 1955 subkey_size = crypt_subkey_size(cc); 1956 1956 1957 - if (crypt_integrity_hmac(cc)) 1957 + if (crypt_integrity_hmac(cc)) { 1958 + if (subkey_size < cc->key_mac_size) 1959 + return -EINVAL; 1960 + 1958 1961 crypt_copy_authenckey(cc->authenc_key, cc->key, 1959 1962 subkey_size - cc->key_mac_size, 1960 1963 cc->key_mac_size); 1964 + } 1965 + 1961 1966 for (i = 0; i < cc->tfms_count; i++) { 1962 1967 if (crypt_integrity_hmac(cc)) 1963 1968 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], ··· 2057 2052 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2058 2053 2059 2054 ret = crypt_setkey(cc); 2060 - 2061 - /* wipe the kernel key payload copy in each case */ 2062 - memset(cc->key, 0, cc->key_size * sizeof(u8)); 2063 2055 2064 2056 if (!ret) { 2065 2057 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); ··· 2525 2523 } 2526 2524 } 2527 2525 2526 + /* wipe the kernel key payload copy */ 2527 + if (cc->key_string) 2528 + memset(cc->key, 0, cc->key_size * sizeof(u8)); 2529 + 2528 2530 return ret; 2529 2531 } 2530 2532 ··· 2746 2740 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2747 2741 if (!cc->tag_pool) { 2748 2742 ti->error = "Cannot allocate integrity tags mempool"; 2743 + ret = -ENOMEM; 2749 2744 goto bad; 2750 2745 } 2751 2746 ··· 2968 2961 return ret; 2969 2962 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2970 2963 ret = cc->iv_gen_ops->init(cc); 2964 + /* wipe the kernel key payload copy */ 2965 + if (cc->key_string) 2966 + memset(cc->key, 0, cc->key_size * sizeof(u8)); 2971 2967 return ret; 2972 2968 } 2973 2969 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { ··· 3017 3007 3018 3008 static struct target_type crypt_target = { 3019 3009 .name = "crypt", 3020 - .version = {1, 18, 0}, 3010 + .version = {1, 18, 1}, 3021 3011 .module = THIS_MODULE, 3022 3012 .ctr = crypt_ctr, 3023 3013 .dtr = crypt_dtr,
+37 -12
drivers/md/dm-integrity.c
··· 2559 2559 int r = 0; 2560 2560 unsigned i; 2561 2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2562 - unsigned char *crypt_data = NULL; 2562 + unsigned char *crypt_data = NULL, *crypt_iv = NULL; 2563 + struct skcipher_request *req = NULL; 2563 2564 2564 2565 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2565 2566 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); ··· 2618 2617 2619 2618 if (blocksize == 1) { 2620 2619 struct scatterlist *sg; 2621 - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2622 - unsigned char iv[ivsize]; 2623 - skcipher_request_set_tfm(req, ic->journal_crypt); 2620 + 2621 + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 2622 + if (!req) { 2623 + *error = "Could not allocate crypt request"; 2624 + r = -ENOMEM; 2625 + goto bad; 2626 + } 2627 + 2628 + crypt_iv = kmalloc(ivsize, GFP_KERNEL); 2629 + if (!crypt_iv) { 2630 + *error = "Could not allocate iv"; 2631 + r = -ENOMEM; 2632 + goto bad; 2633 + } 2624 2634 2625 2635 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2626 2636 if (!ic->journal_xor) { ··· 2653 2641 sg_set_buf(&sg[i], va, PAGE_SIZE); 2654 2642 } 2655 2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2656 - memset(iv, 0x00, ivsize); 2644 + memset(crypt_iv, 0x00, ivsize); 2657 2645 2658 - skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2646 + skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); 2659 2647 init_completion(&comp.comp); 2660 2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2661 2649 if (do_crypt(true, req, &comp)) ··· 2671 2659 crypto_free_skcipher(ic->journal_crypt); 2672 2660 ic->journal_crypt = NULL; 2673 2661 } else { 2674 - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2675 - unsigned char iv[ivsize]; 2676 2662 unsigned crypt_len = roundup(ivsize, blocksize); 2663 + 2664 + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 2665 + if (!req) { 2666 + *error = "Could not allocate crypt request"; 2667 + r = -ENOMEM; 2668 + goto bad; 2669 + } 2670 + 2671 + crypt_iv = kmalloc(ivsize, GFP_KERNEL); 2672 + if (!crypt_iv) { 2673 + *error = "Could not allocate iv"; 2674 + r = -ENOMEM; 2675 + goto bad; 2676 + } 2677 2677 2678 2678 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2679 2679 if (!crypt_data) { ··· 2693 2669 r = -ENOMEM; 2694 2670 goto bad; 2695 2671 } 2696 - 2697 - skcipher_request_set_tfm(req, ic->journal_crypt); 2698 2672 2699 2673 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2700 2674 if (!ic->journal_scatterlist) { ··· 2717 2695 struct skcipher_request *section_req; 2718 2696 __u32 section_le = cpu_to_le32(i); 2719 2697 2720 - memset(iv, 0x00, ivsize); 2698 + memset(crypt_iv, 0x00, ivsize); 2721 2699 memset(crypt_data, 0x00, crypt_len); 2722 2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2723 2701 2724 2702 sg_init_one(&sg, crypt_data, crypt_len); 2725 - skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2703 + skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); 2726 2704 init_completion(&comp.comp); 2727 2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2728 2706 if (do_crypt(true, req, &comp)) ··· 2780 2758 } 2781 2759 bad: 2782 2760 kfree(crypt_data); 2761 + kfree(crypt_iv); 2762 + skcipher_request_free(req); 2763 + 2783 2764 return r; 2784 2765 } 2785 2766
+5 -1
drivers/md/dm-thin-metadata.c
··· 80 80 #define SECTOR_TO_BLOCK_SHIFT 3 81 81 82 82 /* 83 + * For btree insert: 83 84 * 3 for btree insert + 84 85 * 2 for btree lookup used within space map 86 + * For btree remove: 87 + * 2 for shadow spine + 88 + * 4 for rebalance 3 child node 85 89 */ 86 - #define THIN_MAX_CONCURRENT_LOCKS 5 90 + #define THIN_MAX_CONCURRENT_LOCKS 6 87 91 88 92 /* This should be plenty */ 89 93 #define SPACE_MAP_ROOT_SIZE 128
+2 -17
drivers/md/persistent-data/dm-btree.c
··· 683 683 pn->keys[1] = rn->keys[0]; 684 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 685 685 686 - /* 687 - * rejig the spine. This is ugly, since it knows too 688 - * much about the spine 689 - */ 690 - if (s->nodes[0] != new_parent) { 691 - unlock_block(s->info, s->nodes[0]); 692 - s->nodes[0] = new_parent; 693 - } 694 - if (key < le64_to_cpu(rn->keys[0])) { 695 - unlock_block(s->info, right); 696 - s->nodes[1] = left; 697 - } else { 698 - unlock_block(s->info, left); 699 - s->nodes[1] = right; 700 - } 701 - s->count = 2; 702 - 686 + unlock_block(s->info, left); 687 + unlock_block(s->info, right); 703 688 return 0; 704 689 } 705 690