Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccree - remove special handling of chained sg

We were handling chained scattergather lists with specialized code
needlessly as the regular sg APIs handle them just fine. The code
handling this also had an (unused) code path with a use-before-init
error, flagged by Coverity.

Remove all special handling of chained sg and leave their handling
to the regular sg APIs.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: stable@vger.kernel.org # v4.19+
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gilad Ben-Yossef and committed by
Herbert Xu
c4b22bf5 151ded73

+22 -76
+22 -76
drivers/crypto/ccree/cc_buffer_mgr.c
··· 83 83 */ 84 84 static unsigned int cc_get_sgl_nents(struct device *dev, 85 85 struct scatterlist *sg_list, 86 - unsigned int nbytes, u32 *lbytes, 87 - bool *is_chained) 86 + unsigned int nbytes, u32 *lbytes) 88 87 { 89 88 unsigned int nents = 0; 90 89 91 90 while (nbytes && sg_list) { 92 - if (sg_list->length) { 93 - nents++; 94 - /* get the number of bytes in the last entry */ 95 - *lbytes = nbytes; 96 - nbytes -= (sg_list->length > nbytes) ? 97 - nbytes : sg_list->length; 98 - sg_list = sg_next(sg_list); 99 - } else { 100 - sg_list = (struct scatterlist *)sg_page(sg_list); 101 - if (is_chained) 102 - *is_chained = true; 103 - } 91 + nents++; 92 + /* get the number of bytes in the last entry */ 93 + *lbytes = nbytes; 94 + nbytes -= (sg_list->length > nbytes) ? 95 + nbytes : sg_list->length; 96 + sg_list = sg_next(sg_list); 104 97 } 105 98 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); 106 99 return nents; ··· 135 142 { 136 143 u32 nents, lbytes; 137 144 138 - nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); 145 + nents = cc_get_sgl_nents(dev, sg, end, &lbytes); 139 146 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, 140 147 (direct == CC_SG_TO_BUF)); 141 148 } ··· 307 314 sgl_data->num_of_buffers++; 308 315 } 309 316 310 - static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents, 311 - enum dma_data_direction direction) 312 - { 313 - u32 i, j; 314 - struct scatterlist *l_sg = sg; 315 - 316 - for (i = 0; i < nents; i++) { 317 - if (!l_sg) 318 - break; 319 - if (dma_map_sg(dev, l_sg, 1, direction) != 1) { 320 - dev_err(dev, "dma_map_page() sg buffer failed\n"); 321 - goto err; 322 - } 323 - l_sg = sg_next(l_sg); 324 - } 325 - return nents; 326 - 327 - err: 328 - /* Restore mapped parts */ 329 - for (j = 0; j < i; j++) { 330 - if (!sg) 331 - break; 332 - dma_unmap_sg(dev, sg, 1, direction); 333 - sg = sg_next(sg); 334 - } 335 - return 0; 336 - } 337 - 338 317 static int cc_map_sg(struct device *dev, struct scatterlist *sg, 339 318 unsigned int nbytes, int direction, u32 *nents, 340 319 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) 341 320 { 342 - bool is_chained = false; 343 - 344 321 if (sg_is_last(sg)) { 345 322 /* One entry only case -set to DLLI */ 346 323 if (dma_map_sg(dev, sg, 1, direction) != 1) { ··· 324 361 *nents = 1; 325 362 *mapped_nents = 1; 326 363 } else { /*sg_is_last*/ 327 - *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, 328 - &is_chained); 364 + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); 329 365 if (*nents > max_sg_nents) { 330 366 *nents = 0; 331 367 dev_err(dev, "Too many fragments. current %d max %d\n", 332 368 *nents, max_sg_nents); 333 369 return -ENOMEM; 334 370 } 335 - if (!is_chained) { 336 - /* In case of mmu the number of mapped nents might 337 - * be changed from the original sgl nents 338 - */ 339 - *mapped_nents = dma_map_sg(dev, sg, *nents, direction); 340 - if (*mapped_nents == 0) { 341 - *nents = 0; 342 - dev_err(dev, "dma_map_sg() sg buffer failed\n"); 343 - return -ENOMEM; 344 - } 345 - } else { 346 - /*In this case the driver maps entry by entry so it 347 - * must have the same nents before and after map 348 - */ 349 - *mapped_nents = cc_dma_map_sg(dev, sg, *nents, 350 - direction); 351 - if (*mapped_nents != *nents) { 352 - *nents = *mapped_nents; 353 - dev_err(dev, "dma_map_sg() sg buffer failed\n"); 354 - return -ENOMEM; 355 - } 371 + /* In case of mmu the number of mapped nents might 372 + * be changed from the original sgl nents 373 + */ 374 + *mapped_nents = dma_map_sg(dev, sg, *nents, direction); 375 + if (*mapped_nents == 0) { 376 + *nents = 0; 377 + dev_err(dev, "dma_map_sg() sg buffer failed\n"); 378 + return -ENOMEM; 356 379 } 357 380 } 358 381 ··· 520 571 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 521 572 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 522 573 u32 dummy; 523 - bool chained; 524 574 u32 size_to_unmap = 0; 525 575 526 576 if (areq_ctx->mac_buf_dma_addr) { ··· 584 636 size_to_unmap += crypto_aead_ivsize(tfm); 585 637 586 638 dma_unmap_sg(dev, req->src, 587 - cc_get_sgl_nents(dev, req->src, size_to_unmap, 588 - &dummy, &chained), 639 + cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy), 589 640 DMA_BIDIRECTIONAL); 590 641 if (req->src != req->dst) { 591 642 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 592 643 sg_virt(req->dst)); 593 644 dma_unmap_sg(dev, req->dst, 594 645 cc_get_sgl_nents(dev, req->dst, size_to_unmap, 595 - &dummy, &chained), 646 + &dummy), 596 647 DMA_BIDIRECTIONAL); 597 648 } 598 649 if (drvdata->coherent && ··· 969 1022 unsigned int size_for_map = req->assoclen + req->cryptlen; 970 1023 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 971 1024 u32 sg_index = 0; 972 - bool chained = false; 973 1025 bool is_gcm4543 = areq_ctx->is_gcm4543; 974 1026 u32 size_to_skip = req->assoclen; 975 1027 ··· 989 1043 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 990 1044 authsize : 0; 991 1045 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, 992 - &src_last_bytes, &chained); 1046 + &src_last_bytes); 993 1047 sg_index = areq_ctx->src_sgl->length; 994 1048 //check where the data starts 995 1049 while (sg_index <= size_to_skip) { ··· 1029 1083 } 1030 1084 1031 1085 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, 1032 - &dst_last_bytes, &chained); 1086 + &dst_last_bytes); 1033 1087 sg_index = areq_ctx->dst_sgl->length; 1034 1088 offset = size_to_skip; 1035 1089 ··· 1430 1484 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", 1431 1485 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); 1432 1486 areq_ctx->in_nents = 1433 - cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); 1487 + cc_get_sgl_nents(dev, src, nbytes, &dummy); 1434 1488 sg_copy_to_buffer(src, areq_ctx->in_nents, 1435 1489 &curr_buff[*curr_buff_cnt], nbytes); 1436 1490 *curr_buff_cnt += nbytes;