Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccree - don't mangle the request assoclen

We were mangling the request struct assoclen field.
Fix it by keeping an internal version and working on it.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gilad Ben-Yossef and committed by
Herbert Xu
da3cf67f c9877cbc

+37 -26
+25 -15
drivers/crypto/ccree/cc_aead.c
··· 764 764 dev_dbg(dev, "ASSOC buffer type DLLI\n"); 765 765 hw_desc_init(&desc[idx]); 766 766 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src), 767 - areq->assoclen, NS_BIT); 767 + areq_ctx->assoclen, NS_BIT); 768 768 set_flow_mode(&desc[idx], flow_mode); 769 769 if (ctx->auth_mode == DRV_HASH_XCBC_MAC && 770 770 areq_ctx->cryptlen > 0) ··· 1113 1113 struct cc_hw_desc desc[], 1114 1114 unsigned int *seq_size) 1115 1115 { 1116 + struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1116 1117 unsigned int idx = *seq_size; 1118 + 1117 1119 /* Hash associated data */ 1118 - if (req->assoclen > 0) 1120 + if (areq_ctx->assoclen > 0) 1119 1121 cc_set_assoc_desc(req, DIN_HASH, desc, &idx); 1120 1122 1121 1123 /* Hash IV */ ··· 1345 1343 { 1346 1344 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1347 1345 struct device *dev = drvdata_to_dev(ctx->drvdata); 1348 - unsigned int assoclen = req->assoclen; 1346 + unsigned int assoclen = areq_ctx->assoclen; 1349 1347 unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? 1350 1348 (req->cryptlen - ctx->authsize) : req->cryptlen; 1351 1349 ··· 1504 1502 idx++; 1505 1503 1506 1504 /* process assoc data */ 1507 - if (req->assoclen > 0) { 1505 + if (req_ctx->assoclen > 0) { 1508 1506 cc_set_assoc_desc(req, DIN_HASH, desc, &idx); 1509 1507 } else { 1510 1508 hw_desc_init(&desc[idx]); ··· 1596 1594 * NIST Special Publication 800-38C 1597 1595 */ 1598 1596 *b0 |= (8 * ((m - 2) / 2)); 1599 - if (req->assoclen > 0) 1597 + if (req_ctx->assoclen > 0) 1600 1598 *b0 |= 64; /* Enable bit 6 if Adata exists. */ 1601 1599 1602 1600 rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */ ··· 1607 1605 /* END of "taken from crypto/ccm.c" */ 1608 1606 1609 1607 /* l(a) - size of associated data. */ 1610 - req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen); 1608 + req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen); 1611 1609 1612 1610 memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1); 1613 1611 req->iv[15] = 1; ··· 1639 1637 memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, 1640 1638 CCM_BLOCK_IV_SIZE); 1641 1639 req->iv = areq_ctx->ctr_iv; 1642 - req->assoclen -= CCM_BLOCK_IV_SIZE; 1640 + areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE; 1643 1641 } 1644 1642 1645 1643 static void cc_set_ghash_desc(struct aead_request *req, ··· 1847 1845 // for gcm and rfc4106. 1848 1846 cc_set_ghash_desc(req, desc, seq_size); 1849 1847 /* process(ghash) assoc data */ 1850 - if (req->assoclen > 0) 1848 + if (req_ctx->assoclen > 0) 1851 1849 cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); 1852 1850 cc_set_gctr_desc(req, desc, seq_size); 1853 1851 /* process(gctr+ghash) */ ··· 1871 1869 (req->cryptlen - ctx->authsize); 1872 1870 __be32 counter = cpu_to_be32(2); 1873 1871 1874 - dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", 1875 - __func__, cryptlen, req->assoclen, ctx->authsize); 1872 + dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n", 1873 + __func__, cryptlen, req_ctx->assoclen, ctx->authsize); 1876 1874 1877 1875 memset(req_ctx->hkey, 0, AES_BLOCK_SIZE); 1878 1876 ··· 1888 1886 if (!req_ctx->plaintext_authenticate_only) { 1889 1887 __be64 temp64; 1890 1888 1891 - temp64 = cpu_to_be64(req->assoclen * 8); 1889 + temp64 = cpu_to_be64(req_ctx->assoclen * 8); 1892 1890 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); 1893 1891 temp64 = cpu_to_be64(cryptlen * 8); 1894 1892 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); ··· 1898 1896 */ 1899 1897 __be64 temp64; 1900 1898 1901 - temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + 1902 - cryptlen) * 8); 1899 + temp64 = cpu_to_be64((req_ctx->assoclen + 1900 + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8); 1903 1901 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); 1904 1902 temp64 = 0; 1905 1903 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); ··· 1919 1917 memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, 1920 1918 GCM_BLOCK_RFC4_IV_SIZE); 1921 1919 req->iv = areq_ctx->ctr_iv; 1922 - req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; 1920 + areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE; 1923 1921 } 1924 1922 1925 1923 static int cc_proc_aead(struct aead_request *req, ··· 1944 1942 /* Check data length according to mode */ 1945 1943 if (validate_data_size(ctx, direct, req)) { 1946 1944 dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", 1947 - req->cryptlen, req->assoclen); 1945 + req->cryptlen, areq_ctx->assoclen); 1948 1946 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); 1949 1947 return -EINVAL; 1950 1948 } ··· 2097 2095 2098 2096 /* No generated IV required */ 2099 2097 areq_ctx->backup_iv = req->iv; 2098 + areq_ctx->assoclen = req->assoclen; 2100 2099 areq_ctx->backup_giv = NULL; 2101 2100 areq_ctx->is_gcm4543 = false; 2102 2101 ··· 2129 2126 2130 2127 /* No generated IV required */ 2131 2128 areq_ctx->backup_iv = req->iv; 2129 + areq_ctx->assoclen = req->assoclen; 2132 2130 areq_ctx->backup_giv = NULL; 2133 2131 areq_ctx->is_gcm4543 = true; 2134 2132 ··· 2151 2147 2152 2148 /* No generated IV required */ 2153 2149 areq_ctx->backup_iv = req->iv; 2150 + areq_ctx->assoclen = req->assoclen; 2154 2151 areq_ctx->backup_giv = NULL; 2155 2152 areq_ctx->is_gcm4543 = false; 2156 2153 ··· 2181 2176 2182 2177 /* No generated IV required */ 2183 2178 areq_ctx->backup_iv = req->iv; 2179 + areq_ctx->assoclen = req->assoclen; 2184 2180 areq_ctx->backup_giv = NULL; 2185 2181 2186 2182 areq_ctx->is_gcm4543 = true; ··· 2301 2295 2302 2296 /* No generated IV required */ 2303 2297 areq_ctx->backup_iv = req->iv; 2298 + areq_ctx->assoclen = req->assoclen; 2304 2299 areq_ctx->backup_giv = NULL; 2305 2300 2306 2301 areq_ctx->plaintext_authenticate_only = false; ··· 2330 2323 2331 2324 /* No generated IV required */ 2332 2325 areq_ctx->backup_iv = req->iv; 2326 + areq_ctx->assoclen = req->assoclen; 2333 2327 areq_ctx->backup_giv = NULL; 2334 2328 2335 2329 cc_proc_rfc4_gcm(req); ··· 2362 2354 2363 2355 /* No generated IV required */ 2364 2356 areq_ctx->backup_iv = req->iv; 2357 + areq_ctx->assoclen = req->assoclen; 2365 2358 areq_ctx->backup_giv = NULL; 2366 2359 2367 2360 areq_ctx->plaintext_authenticate_only = false; ··· 2391 2382 2392 2383 /* No generated IV required */ 2393 2384 areq_ctx->backup_iv = req->iv; 2385 + areq_ctx->assoclen = req->assoclen; 2394 2386 areq_ctx->backup_giv = NULL; 2395 2387 2396 2388 cc_proc_rfc4_gcm(req);
+1
drivers/crypto/ccree/cc_aead.h
··· 67 67 u8 backup_mac[MAX_MAC_SIZE]; 68 68 u8 *backup_iv; /*store iv for generated IV flow*/ 69 69 u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/ 70 + u32 assoclen; /* internal assoclen */ 70 71 dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */ 71 72 /* buffer for internal ccm configurations */ 72 73 dma_addr_t ccm_iv0_dma_addr;
+11 -11
drivers/crypto/ccree/cc_buffer_mgr.c
··· 65 65 { 66 66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 67 67 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 68 - u32 skip = req->assoclen + req->cryptlen; 68 + u32 skip = areq_ctx->assoclen + req->cryptlen; 69 69 70 70 if (areq_ctx->is_gcm4543) 71 71 skip += crypto_aead_ivsize(tfm); ··· 575 575 576 576 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 577 577 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 578 - req->assoclen, req->cryptlen); 579 - size_to_unmap = req->assoclen + req->cryptlen; 578 + areq_ctx->assoclen, req->cryptlen); 579 + size_to_unmap = areq_ctx->assoclen + req->cryptlen; 580 580 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) 581 581 size_to_unmap += areq_ctx->req_authsize; 582 582 if (areq_ctx->is_gcm4543) ··· 663 663 struct scatterlist *current_sg = req->src; 664 664 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 665 665 unsigned int sg_index = 0; 666 - u32 size_of_assoc = req->assoclen; 666 + u32 size_of_assoc = areq_ctx->assoclen; 667 667 struct device *dev = drvdata_to_dev(drvdata); 668 668 669 669 if (areq_ctx->is_gcm4543) ··· 674 674 goto chain_assoc_exit; 675 675 } 676 676 677 - if (req->assoclen == 0) { 677 + if (areq_ctx->assoclen == 0) { 678 678 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; 679 679 areq_ctx->assoc.nents = 0; 680 680 areq_ctx->assoc.mlli_nents = 0; ··· 734 734 cc_dma_buf_type(areq_ctx->assoc_buff_type), 735 735 areq_ctx->assoc.nents); 736 736 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, 737 - req->assoclen, 0, is_last, 737 + areq_ctx->assoclen, 0, is_last, 738 738 &areq_ctx->assoc.mlli_nents); 739 739 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 740 740 } ··· 893 893 u32 src_mapped_nents = 0, dst_mapped_nents = 0; 894 894 u32 offset = 0; 895 895 /* non-inplace mode */ 896 - unsigned int size_for_map = req->assoclen + req->cryptlen; 896 + unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen; 897 897 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 898 898 u32 sg_index = 0; 899 899 bool is_gcm4543 = areq_ctx->is_gcm4543; 900 - u32 size_to_skip = req->assoclen; 900 + u32 size_to_skip = areq_ctx->assoclen; 901 901 902 902 if (is_gcm4543) 903 903 size_to_skip += crypto_aead_ivsize(tfm); ··· 941 941 areq_ctx->src_offset = offset; 942 942 943 943 if (req->src != req->dst) { 944 - size_for_map = req->assoclen + req->cryptlen; 944 + size_for_map = areq_ctx->assoclen + req->cryptlen; 945 945 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 946 946 authsize : 0; 947 947 if (is_gcm4543) ··· 1107 1107 areq_ctx->ccm_iv0_dma_addr = dma_addr; 1108 1108 1109 1109 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, 1110 - &sg_data, req->assoclen); 1110 + &sg_data, areq_ctx->assoclen); 1111 1111 if (rc) 1112 1112 goto aead_map_failure; 1113 1113 } ··· 1158 1158 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; 1159 1159 } 1160 1160 1161 - size_to_map = req->cryptlen + req->assoclen; 1161 + size_to_map = req->cryptlen + areq_ctx->assoclen; 1162 1162 if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) 1163 1163 size_to_map += authsize; 1164 1164