Merge tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:
"All fixes marked for stable:

- Fix DM thinp btree corruption seen when inserting a new key/value
pair into a full root node.

- Fix DM thinp btree removal deadlock due to artificially low number
of allowed concurrent locks allowed.

- Fix possible DM crypt corruption if kernel keyring service is used.
Only affects ciphers using following IVs: essiv, lmk and tcw.

- Two DM crypt device initialization error checking fixes.

- Fix DM integrity to allow use of async ciphers that require DMA"

* tag 'for-4.15/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm crypt: fix error return code in crypt_ctr()
dm crypt: wipe kernel key copy after IV initialization
dm integrity: don't store cipher request on the stack
dm crypt: fix crash by adding missing check for auth key size
dm btree: fix serious bug in btree_split_beneath()
dm thin metadata: THIN_MAX_CONCURRENT_LOCKS should be 6

+59 -35
+15 -5
drivers/md/dm-crypt.c
··· 1954 /* Ignore extra keys (which are used for IV etc) */ 1955 subkey_size = crypt_subkey_size(cc); 1956 1957 - if (crypt_integrity_hmac(cc)) 1958 crypt_copy_authenckey(cc->authenc_key, cc->key, 1959 subkey_size - cc->key_mac_size, 1960 cc->key_mac_size); 1961 for (i = 0; i < cc->tfms_count; i++) { 1962 if (crypt_integrity_hmac(cc)) 1963 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], ··· 2057 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2058 2059 ret = crypt_setkey(cc); 2060 - 2061 - /* wipe the kernel key payload copy in each case */ 2062 - memset(cc->key, 0, cc->key_size * sizeof(u8)); 2063 2064 if (!ret) { 2065 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); ··· 2525 } 2526 } 2527 2528 return ret; 2529 } 2530 ··· 2746 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2747 if (!cc->tag_pool) { 2748 ti->error = "Cannot allocate integrity tags mempool"; 2749 goto bad; 2750 } 2751 ··· 2968 return ret; 2969 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2970 ret = cc->iv_gen_ops->init(cc); 2971 return ret; 2972 } 2973 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { ··· 3017 3018 static struct target_type crypt_target = { 3019 .name = "crypt", 3020 - .version = {1, 18, 0}, 3021 .module = THIS_MODULE, 3022 .ctr = crypt_ctr, 3023 .dtr = crypt_dtr,
··· 1954 /* Ignore extra keys (which are used for IV etc) */ 1955 subkey_size = crypt_subkey_size(cc); 1956 1957 + if (crypt_integrity_hmac(cc)) { 1958 + if (subkey_size < cc->key_mac_size) 1959 + return -EINVAL; 1960 + 1961 crypt_copy_authenckey(cc->authenc_key, cc->key, 1962 subkey_size - cc->key_mac_size, 1963 cc->key_mac_size); 1964 + } 1965 + 1966 for (i = 0; i < cc->tfms_count; i++) { 1967 if (crypt_integrity_hmac(cc)) 1968 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], ··· 2052 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2053 2054 ret = crypt_setkey(cc); 2055 2056 if (!ret) { 2057 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); ··· 2523 } 2524 } 2525 2526 + /* wipe the kernel key payload copy */ 2527 + if (cc->key_string) 2528 + memset(cc->key, 0, cc->key_size * sizeof(u8)); 2529 + 2530 return ret; 2531 } 2532 ··· 2740 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2741 if (!cc->tag_pool) { 2742 ti->error = "Cannot allocate integrity tags mempool"; 2743 + ret = -ENOMEM; 2744 goto bad; 2745 } 2746 ··· 2961 return ret; 2962 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2963 ret = cc->iv_gen_ops->init(cc); 2964 + /* wipe the kernel key payload copy */ 2965 + if (cc->key_string) 2966 + memset(cc->key, 0, cc->key_size * sizeof(u8)); 2967 return ret; 2968 } 2969 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { ··· 3007 3008 static struct target_type crypt_target = { 3009 .name = "crypt", 3010 + .version = {1, 18, 1}, 3011 .module = THIS_MODULE, 3012 .ctr = crypt_ctr, 3013 .dtr = crypt_dtr,
+37 -12
drivers/md/dm-integrity.c
··· 2559 int r = 0; 2560 unsigned i; 2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2562 - unsigned char *crypt_data = NULL; 2563 2564 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2565 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); ··· 2618 2619 if (blocksize == 1) { 2620 struct scatterlist *sg; 2621 - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2622 - unsigned char iv[ivsize]; 2623 - skcipher_request_set_tfm(req, ic->journal_crypt); 2624 2625 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2626 if (!ic->journal_xor) { ··· 2653 sg_set_buf(&sg[i], va, PAGE_SIZE); 2654 } 2655 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2656 - memset(iv, 0x00, ivsize); 2657 2658 - skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2659 init_completion(&comp.comp); 2660 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2661 if (do_crypt(true, req, &comp)) ··· 2671 crypto_free_skcipher(ic->journal_crypt); 2672 ic->journal_crypt = NULL; 2673 } else { 2674 - SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2675 - unsigned char iv[ivsize]; 2676 unsigned crypt_len = roundup(ivsize, blocksize); 2677 2678 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2679 if (!crypt_data) { ··· 2693 r = -ENOMEM; 2694 goto bad; 2695 } 2696 - 2697 - skcipher_request_set_tfm(req, ic->journal_crypt); 2698 2699 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2700 if (!ic->journal_scatterlist) { ··· 2717 struct skcipher_request *section_req; 2718 __u32 section_le = cpu_to_le32(i); 2719 2720 - memset(iv, 0x00, ivsize); 2721 memset(crypt_data, 0x00, crypt_len); 2722 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2723 2724 sg_init_one(&sg, crypt_data, crypt_len); 2725 - skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2726 init_completion(&comp.comp); 2727 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2728 if (do_crypt(true, req, &comp)) ··· 2780 } 2781 bad: 2782 kfree(crypt_data); 2783 return r; 2784 } 2785
··· 2559 int r = 0; 2560 unsigned i; 2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2562 + unsigned char *crypt_data = NULL, *crypt_iv = NULL; 2563 + struct skcipher_request *req = NULL; 2564 2565 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2566 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); ··· 2617 2618 if (blocksize == 1) { 2619 struct scatterlist *sg; 2620 + 2621 + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 2622 + if (!req) { 2623 + *error = "Could not allocate crypt request"; 2624 + r = -ENOMEM; 2625 + goto bad; 2626 + } 2627 + 2628 + crypt_iv = kmalloc(ivsize, GFP_KERNEL); 2629 + if (!crypt_iv) { 2630 + *error = "Could not allocate iv"; 2631 + r = -ENOMEM; 2632 + goto bad; 2633 + } 2634 2635 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2636 if (!ic->journal_xor) { ··· 2641 sg_set_buf(&sg[i], va, PAGE_SIZE); 2642 } 2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2644 + memset(crypt_iv, 0x00, ivsize); 2645 2646 + skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); 2647 init_completion(&comp.comp); 2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2649 if (do_crypt(true, req, &comp)) ··· 2659 crypto_free_skcipher(ic->journal_crypt); 2660 ic->journal_crypt = NULL; 2661 } else { 2662 unsigned crypt_len = roundup(ivsize, blocksize); 2663 + 2664 + req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); 2665 + if (!req) { 2666 + *error = "Could not allocate crypt request"; 2667 + r = -ENOMEM; 2668 + goto bad; 2669 + } 2670 + 2671 + crypt_iv = kmalloc(ivsize, GFP_KERNEL); 2672 + if (!crypt_iv) { 2673 + *error = "Could not allocate iv"; 2674 + r = -ENOMEM; 2675 + goto bad; 2676 + } 2677 2678 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2679 if (!crypt_data) { ··· 2669 r = -ENOMEM; 2670 goto bad; 2671 } 2672 2673 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2674 if (!ic->journal_scatterlist) { ··· 2695 struct skcipher_request *section_req; 2696 __u32 section_le = cpu_to_le32(i); 2697 2698 + memset(crypt_iv, 0x00, ivsize); 2699 memset(crypt_data, 0x00, crypt_len); 2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2701 2702 sg_init_one(&sg, crypt_data, crypt_len); 2703 + skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv); 2704 init_completion(&comp.comp); 2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2706 if (do_crypt(true, req, &comp)) ··· 2758 } 2759 bad: 2760 kfree(crypt_data); 2761 + kfree(crypt_iv); 2762 + skcipher_request_free(req); 2763 + 2764 return r; 2765 } 2766
+5 -1
drivers/md/dm-thin-metadata.c
··· 80 #define SECTOR_TO_BLOCK_SHIFT 3 81 82 /* 83 * 3 for btree insert + 84 * 2 for btree lookup used within space map 85 */ 86 - #define THIN_MAX_CONCURRENT_LOCKS 5 87 88 /* This should be plenty */ 89 #define SPACE_MAP_ROOT_SIZE 128
··· 80 #define SECTOR_TO_BLOCK_SHIFT 3 81 82 /* 83 + * For btree insert: 84 * 3 for btree insert + 85 * 2 for btree lookup used within space map 86 + * For btree remove: 87 + * 2 for shadow spine + 88 + * 4 for rebalance 3 child node 89 */ 90 + #define THIN_MAX_CONCURRENT_LOCKS 6 91 92 /* This should be plenty */ 93 #define SPACE_MAP_ROOT_SIZE 128
+2 -17
drivers/md/persistent-data/dm-btree.c
··· 683 pn->keys[1] = rn->keys[0]; 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 685 686 - /* 687 - * rejig the spine. This is ugly, since it knows too 688 - * much about the spine 689 - */ 690 - if (s->nodes[0] != new_parent) { 691 - unlock_block(s->info, s->nodes[0]); 692 - s->nodes[0] = new_parent; 693 - } 694 - if (key < le64_to_cpu(rn->keys[0])) { 695 - unlock_block(s->info, right); 696 - s->nodes[1] = left; 697 - } else { 698 - unlock_block(s->info, left); 699 - s->nodes[1] = right; 700 - } 701 - s->count = 2; 702 - 703 return 0; 704 } 705
··· 683 pn->keys[1] = rn->keys[0]; 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 685 686 + unlock_block(s->info, left); 687 + unlock_block(s->info, right); 688 return 0; 689 } 690