Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccree - protect against empty or NULL scatterlists

Deal gracefully with a NULL or empty scatterlist which can happen
if both cryptlen and assoclen are zero and we're doing in-place
AEAD encryption.

This fixes a crash when this causes us to try and map a NULL page,
at least with some platforms / DMA mapping configs.

Cc: stable@vger.kernel.org # v4.19+
Reported-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gilad Ben-Yossef and committed by
Herbert Xu
ce0fc6db 07b586fe

+28 -35
+27 -35
drivers/crypto/ccree/cc_buffer_mgr.c
··· 87 87 { 88 88 unsigned int nents = 0; 89 89 90 + *lbytes = 0; 91 + 90 92 while (nbytes && sg_list) { 91 93 nents++; 92 94 /* get the number of bytes in the last entry */ ··· 97 95 nbytes : sg_list->length; 98 96 sg_list = sg_next(sg_list); 99 97 } 98 + 100 99 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); 101 100 return nents; 102 101 } ··· 293 290 unsigned int nbytes, int direction, u32 *nents, 294 291 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) 295 292 { 296 - if (sg_is_last(sg)) { 297 - /* One entry only case -set to DLLI */ 298 - if (dma_map_sg(dev, sg, 1, direction) != 1) { 299 - dev_err(dev, "dma_map_sg() single buffer failed\n"); 300 - return -ENOMEM; 301 - } 302 - dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 303 - &sg_dma_address(sg), sg_page(sg), sg_virt(sg), 304 - sg->offset, sg->length); 305 - *lbytes = nbytes; 306 - *nents = 1; 307 - *mapped_nents = 1; 308 - } else { /*sg_is_last*/ 309 - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); 310 - if (*nents > max_sg_nents) { 311 - *nents = 0; 312 - dev_err(dev, "Too many fragments. current %d max %d\n", 313 - *nents, max_sg_nents); 314 - return -ENOMEM; 315 - } 316 - /* In case of mmu the number of mapped nents might 317 - * be changed from the original sgl nents 318 - */ 319 - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); 320 - if (*mapped_nents == 0) { 321 - *nents = 0; 322 - dev_err(dev, "dma_map_sg() sg buffer failed\n"); 323 - return -ENOMEM; 324 - } 293 + int ret = 0; 294 + 295 + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); 296 + if (*nents > max_sg_nents) { 297 + *nents = 0; 298 + dev_err(dev, "Too many fragments. current %d max %d\n", 299 + *nents, max_sg_nents); 300 + return -ENOMEM; 325 301 } 302 + 303 + ret = dma_map_sg(dev, sg, *nents, direction); 304 + if (dma_mapping_error(dev, ret)) { 305 + *nents = 0; 306 + dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); 307 + return -ENOMEM; 308 + } 309 + 310 + *mapped_nents = ret; 326 311 327 312 return 0; 328 313 } ··· 546 555 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 547 556 areq_ctx->assoclen, req->cryptlen); 548 557 549 - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); 558 + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, 559 + DMA_BIDIRECTIONAL); 550 560 if (req->src != req->dst) { 551 561 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 552 562 sg_virt(req->dst)); 553 - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), 563 + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, 554 564 DMA_BIDIRECTIONAL); 555 565 } 556 566 if (drvdata->coherent && ··· 873 881 &src_last_bytes); 874 882 sg_index = areq_ctx->src_sgl->length; 875 883 //check where the data starts 876 - while (sg_index <= size_to_skip) { 884 + while (src_mapped_nents && (sg_index <= size_to_skip)) { 877 885 src_mapped_nents--; 878 886 offset -= areq_ctx->src_sgl->length; 879 887 sgl = sg_next(areq_ctx->src_sgl); ··· 900 908 size_for_map += crypto_aead_ivsize(tfm); 901 909 902 910 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, 903 - &areq_ctx->dst.nents, 911 + &areq_ctx->dst.mapped_nents, 904 912 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, 905 913 &dst_mapped_nents); 906 914 if (rc) ··· 913 921 offset = size_to_skip; 914 922 915 923 //check where the data starts 916 - while (sg_index <= size_to_skip) { 924 + while (dst_mapped_nents && sg_index <= size_to_skip) { 917 925 dst_mapped_nents--; 918 926 offset -= areq_ctx->dst_sgl->length; 919 927 sgl = sg_next(areq_ctx->dst_sgl); ··· 1115 1123 if (is_gcm4543) 1116 1124 size_to_map += crypto_aead_ivsize(tfm); 1117 1125 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, 1118 - &areq_ctx->src.nents, 1126 + &areq_ctx->src.mapped_nents, 1119 1127 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + 1120 1128 LLI_MAX_NUM_OF_DATA_ENTRIES), 1121 1129 &dummy, &mapped_nents);
+1
drivers/crypto/ccree/cc_buffer_mgr.h
··· 25 25 26 26 struct cc_mlli { 27 27 cc_sram_addr_t sram_addr; 28 + unsigned int mapped_nents; 28 29 unsigned int nents; //sg nents 29 30 unsigned int mlli_nents; //mlli nents might be different than the above 30 31 };