Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
"This fixes the following issues:
- Zero-length DMA mapping in caam
- Invalidly mapping stack memory for DMA in talitos
- Use after free in cavium/nitrox
- Key parsing in authenc
- Undefined shift in sm3
- Bogus completion call in authencesn
- SHA support detection in caam"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: sm3 - fix undefined shift by >= width of value
crypto: talitos - fix ablkcipher for CONFIG_VMAP_STACK
crypto: talitos - reorder code in talitos_edesc_alloc()
crypto: adiantum - initialize crypto_spawn::inst
crypto: cavium/nitrox - Use after free in process_response_list()
crypto: authencesn - Avoid twice completion call in decrypt path
crypto: caam - fix SHA support detection
crypto: caam - fix zero-length buffer DMA mapping
crypto: ccree - convert to use crypto_authenc_extractkeys()
crypto: bcm - convert to use crypto_authenc_extractkeys()
crypto: authenc - fix parsing key with misaligned rta_len

+80 -82
+4
crypto/adiantum.c
··· 539 539 ictx = skcipher_instance_ctx(inst); 540 540 541 541 /* Stream cipher, e.g. "xchacha12" */ 542 + crypto_set_skcipher_spawn(&ictx->streamcipher_spawn, 543 + skcipher_crypto_instance(inst)); 542 544 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, 543 545 0, crypto_requires_sync(algt->type, 544 546 algt->mask)); ··· 549 547 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); 550 548 551 549 /* Block cipher, e.g. "aes" */ 550 + crypto_set_spawn(&ictx->blockcipher_spawn, 551 + skcipher_crypto_instance(inst)); 552 552 err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, 553 553 CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); 554 554 if (err)
+11 -3
crypto/authenc.c
··· 58 58 return -EINVAL; 59 59 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 60 60 return -EINVAL; 61 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 61 + 62 + /* 63 + * RTA_OK() didn't align the rtattr's payload when validating that it 64 + * fits in the buffer. Yet, the keys should start on the next 4-byte 65 + * aligned boundary. To avoid confusion, require that the rtattr 66 + * payload be exactly the param struct, which has a 4-byte aligned size. 67 + */ 68 + if (RTA_PAYLOAD(rta) != sizeof(*param)) 62 69 return -EINVAL; 70 + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); 63 71 64 72 param = RTA_DATA(rta); 65 73 keys->enckeylen = be32_to_cpu(param->enckeylen); 66 74 67 - key += RTA_ALIGN(rta->rta_len); 68 - keylen -= RTA_ALIGN(rta->rta_len); 75 + key += rta->rta_len; 76 + keylen -= rta->rta_len; 69 77 70 78 if (keylen < keys->enckeylen) 71 79 return -EINVAL;
+1 -1
crypto/authencesn.c
··· 279 279 struct aead_request *req = areq->data; 280 280 281 281 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); 282 - aead_request_complete(req, err); 282 + authenc_esn_request_complete(req, err); 283 283 } 284 284 285 285 static int crypto_authenc_esn_decrypt(struct aead_request *req)
+1 -1
crypto/sm3_generic.c
··· 100 100 101 101 for (i = 0; i <= 63; i++) { 102 102 103 - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); 103 + ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); 104 104 105 105 ss2 = ss1 ^ rol32(a, 12); 106 106
+1
drivers/crypto/Kconfig
··· 692 692 depends on ARCH_BCM_IPROC 693 693 depends on MAILBOX 694 694 default m 695 + select CRYPTO_AUTHENC 695 696 select CRYPTO_DES 696 697 select CRYPTO_MD5 697 698 select CRYPTO_SHA1
+13 -31
drivers/crypto/bcm/cipher.c
··· 2845 2845 struct spu_hw *spu = &iproc_priv.spu; 2846 2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2847 2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2848 - struct rtattr *rta = (void *)key; 2849 - struct crypto_authenc_key_param *param; 2850 - const u8 *origkey = key; 2851 - const unsigned int origkeylen = keylen; 2852 - 2853 - int ret = 0; 2848 + struct crypto_authenc_keys keys; 2849 + int ret; 2854 2850 2855 2851 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2856 2852 keylen); 2857 2853 flow_dump(" key: ", key, keylen); 2858 2854 2859 - if (!RTA_OK(rta, keylen)) 2860 - goto badkey; 2861 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 2862 - goto badkey; 2863 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 2855 + ret = crypto_authenc_extractkeys(&keys, key, keylen); 2856 + if (ret) 2864 2857 goto badkey; 2865 2858 2866 - param = RTA_DATA(rta); 2867 - ctx->enckeylen = be32_to_cpu(param->enckeylen); 2868 - 2869 - key += RTA_ALIGN(rta->rta_len); 2870 - keylen -= RTA_ALIGN(rta->rta_len); 2871 - 2872 - if (keylen < ctx->enckeylen) 2873 - goto badkey; 2874 - if (ctx->enckeylen > MAX_KEY_SIZE) 2859 + if (keys.enckeylen > MAX_KEY_SIZE || 2860 + keys.authkeylen > MAX_KEY_SIZE) 2875 2861 goto badkey; 2876 2862 2877 - ctx->authkeylen = keylen - ctx->enckeylen; 2863 + ctx->enckeylen = keys.enckeylen; 2864 + ctx->authkeylen = keys.authkeylen; 2878 2865 2879 - if (ctx->authkeylen > MAX_KEY_SIZE) 2880 - goto badkey; 2881 - 2882 - memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2866 + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 2883 2867 /* May end up padding auth key. So make sure it's zeroed. */ 2884 2868 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2885 - memcpy(ctx->authkey, key, ctx->authkeylen); 2869 + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 2886 2870 2887 2871 switch (ctx->alg->cipher_info.alg) { 2888 2872 case CIPHER_ALG_DES: ··· 2874 2890 u32 tmp[DES_EXPKEY_WORDS]; 2875 2891 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2876 2892 2877 - if (des_ekey(tmp, key) == 0) { 2893 + if (des_ekey(tmp, keys.enckey) == 0) { 2878 2894 if (crypto_aead_get_flags(cipher) & 2879 2895 CRYPTO_TFM_REQ_WEAK_KEY) { 2880 2896 crypto_aead_set_flags(cipher, flags); ··· 2889 2905 break; 2890 2906 case CIPHER_ALG_3DES: 2891 2907 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2892 - const u32 *K = (const u32 *)key; 2908 + const u32 *K = (const u32 *)keys.enckey; 2893 2909 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2894 2910 2895 2911 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || ··· 2940 2956 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2941 2957 ctx->fallback_cipher->base.crt_flags |= 2942 2958 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2943 - ret = 2944 - crypto_aead_setkey(ctx->fallback_cipher, origkey, 2945 - origkeylen); 2959 + ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); 2946 2960 if (ret) { 2947 2961 flow_log(" fallback setkey() returned:%d\n", ret); 2948 2962 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+1 -1
drivers/crypto/caam/caamalg.c
··· 3476 3476 * Skip algorithms requiring message digests 3477 3477 * if MD or MD size is not supported by device. 3478 3478 */ 3479 - if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && 3479 + if (is_mdha(c2_alg_sel) && 3480 3480 (!md_inst || t_alg->aead.maxauthsize > md_limit)) 3481 3481 continue; 3482 3482
+9 -6
drivers/crypto/caam/caamhash.c
··· 1072 1072 1073 1073 desc = edesc->hw_desc; 1074 1074 1075 - state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1076 - if (dma_mapping_error(jrdev, state->buf_dma)) { 1077 - dev_err(jrdev, "unable to map src\n"); 1078 - goto unmap; 1079 - } 1075 + if (buflen) { 1076 + state->buf_dma = dma_map_single(jrdev, buf, buflen, 1077 + DMA_TO_DEVICE); 1078 + if (dma_mapping_error(jrdev, state->buf_dma)) { 1079 + dev_err(jrdev, "unable to map src\n"); 1080 + goto unmap; 1081 + } 1080 1082 1081 - append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1083 + append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1084 + } 1082 1085 1083 1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1084 1087 digestsize);
+1
drivers/crypto/caam/desc.h
··· 1155 1155 #define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) 1156 1156 #define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) 1157 1157 #define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) 1158 + #define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT) 1158 1159 #define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) 1159 1160 #define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) 1160 1161 #define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+9
drivers/crypto/caam/error.h
··· 7 7 8 8 #ifndef CAAM_ERROR_H 9 9 #define CAAM_ERROR_H 10 + 11 + #include "desc.h" 12 + 10 13 #define CAAM_ERROR_STR_MAX 302 11 14 12 15 void caam_strstatus(struct device *dev, u32 status, bool qi_v2); ··· 20 17 void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 21 18 int rowsize, int groupsize, struct scatterlist *sg, 22 19 size_t tlen, bool ascii); 20 + 21 + static inline bool is_mdha(u32 algtype) 22 + { 23 + return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) == 24 + OP_ALG_CHA_MDHA; 25 + } 23 26 #endif /* CAAM_ERROR_H */
+1 -1
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
··· 567 567 568 568 /* ORH error code */ 569 569 err = READ_ONCE(*sr->resp.orh) & 0xff; 570 - softreq_destroy(sr); 571 570 572 571 if (sr->callback) 573 572 sr->callback(sr->cb_arg, err); 573 + softreq_destroy(sr); 574 574 575 575 req_completed++; 576 576 }
+19 -21
drivers/crypto/ccree/cc_aead.c
··· 549 549 unsigned int keylen) 550 550 { 551 551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 552 - struct rtattr *rta = (struct rtattr *)key; 553 552 struct cc_crypto_req cc_req = {}; 554 - struct crypto_authenc_key_param *param; 555 553 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 556 - int rc = -EINVAL; 557 554 unsigned int seq_len = 0; 558 555 struct device *dev = drvdata_to_dev(ctx->drvdata); 556 + const u8 *enckey, *authkey; 557 + int rc; 559 558 560 559 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", 561 560 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); ··· 562 563 /* STAT_PHASE_0: Init and sanity checks */ 563 564 564 565 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ 565 - if (!RTA_OK(rta, keylen)) 566 + struct crypto_authenc_keys keys; 567 + 568 + rc = crypto_authenc_extractkeys(&keys, key, keylen); 569 + if (rc) 566 570 goto badkey; 567 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 568 - goto badkey; 569 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 570 - goto badkey; 571 - param = RTA_DATA(rta); 572 - ctx->enc_keylen = be32_to_cpu(param->enckeylen); 573 - key += RTA_ALIGN(rta->rta_len); 574 - keylen -= RTA_ALIGN(rta->rta_len); 575 - if (keylen < ctx->enc_keylen) 576 - goto badkey; 577 - ctx->auth_keylen = keylen - ctx->enc_keylen; 571 + enckey = keys.enckey; 572 + authkey = keys.authkey; 573 + ctx->enc_keylen = keys.enckeylen; 574 + ctx->auth_keylen = keys.authkeylen; 578 575 579 576 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 580 577 /* the nonce is stored in bytes at end of key */ 578 + rc = -EINVAL; 581 579 if (ctx->enc_keylen < 582 580 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) 583 581 goto badkey; 584 582 /* Copy nonce from last 4 bytes in CTR key to 585 583 * first 4 bytes in CTR IV 586 584 */ 587 - memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + 588 - ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, 589 - CTR_RFC3686_NONCE_SIZE); 585 + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - 586 + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); 590 587 /* Set CTR key size */ 591 588 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; 592 589 } 593 590 } else { /* non-authenc - has just one key */ 591 + enckey = key; 592 + authkey = NULL; 594 593 ctx->enc_keylen = keylen; 595 594 ctx->auth_keylen = 0; 596 595 } ··· 600 603 /* STAT_PHASE_1: Copy key to ctx */ 601 604 602 605 /* Get key material */ 603 - memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); 606 + memcpy(ctx->enckey, enckey, ctx->enc_keylen); 604 607 if (ctx->enc_keylen == 24) 605 608 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 606 609 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 607 - memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); 610 + memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, 611 + ctx->auth_keylen); 608 612 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ 609 - rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); 613 + rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); 610 614 if (rc) 611 615 goto badkey; 612 616 }
+9 -17
drivers/crypto/talitos.c
··· 1361 1361 struct talitos_private *priv = dev_get_drvdata(dev); 1362 1362 bool is_sec1 = has_ftr_sec1(priv); 1363 1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1364 - void *err; 1365 1364 1366 1365 if (cryptlen + authsize > max_len) { 1367 1366 dev_err(dev, "length exceeds h/w max limit\n"); 1368 1367 return ERR_PTR(-EINVAL); 1369 1368 } 1370 1369 1371 - if (ivsize) 1372 - iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1373 - 1374 1370 if (!dst || dst == src) { 1375 1371 src_len = assoclen + cryptlen + authsize; 1376 1372 src_nents = sg_nents_for_len(src, src_len); 1377 1373 if (src_nents < 0) { 1378 1374 dev_err(dev, "Invalid number of src SG.\n"); 1379 - err = ERR_PTR(-EINVAL); 1380 - goto error_sg; 1375 + return ERR_PTR(-EINVAL); 1381 1376 } 1382 1377 src_nents = (src_nents == 1) ? 0 : src_nents; 1383 1378 dst_nents = dst ? src_nents : 0; ··· 1382 1387 src_nents = sg_nents_for_len(src, src_len); 1383 1388 if (src_nents < 0) { 1384 1389 dev_err(dev, "Invalid number of src SG.\n"); 1385 - err = ERR_PTR(-EINVAL); 1386 - goto error_sg; 1390 + return ERR_PTR(-EINVAL); 1387 1391 } 1388 1392 src_nents = (src_nents == 1) ? 0 : src_nents; 1389 1393 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1390 1394 dst_nents = sg_nents_for_len(dst, dst_len); 1391 1395 if (dst_nents < 0) { 1392 1396 dev_err(dev, "Invalid number of dst SG.\n"); 1393 - err = ERR_PTR(-EINVAL); 1394 - goto error_sg; 1397 + return ERR_PTR(-EINVAL); 1395 1398 } 1396 1399 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1397 1400 } ··· 1416 1423 /* if its a ahash, add space for a second desc next to the first one */ 1417 1424 if (is_sec1 && !dst) 1418 1425 alloc_len += sizeof(struct talitos_desc); 1426 + alloc_len += ivsize; 1419 1427 1420 1428 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1421 - if (!edesc) { 1422 - err = ERR_PTR(-ENOMEM); 1423 - goto error_sg; 1429 + if (!edesc) 1430 + return ERR_PTR(-ENOMEM); 1431 + if (ivsize) { 1432 + iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); 1433 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1424 1434 } 1425 1435 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1426 1436 ··· 1441 1445 DMA_BIDIRECTIONAL); 1442 1446 } 1443 1447 return edesc; 1444 - error_sg: 1445 - if (iv_dma) 1446 - dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1447 - return err; 1448 1448 } 1449 1449 1450 1450 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,