Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: ccree - use fine grained DMA mapping dir

Use a fine grained specification of DMA mapping directions
in certain cases, allowing both a more optimized operation
as well as shushing out a harmless, though persky
dma-debug warning.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reported-by: Corentin Labbe <clabbe.montjoie@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Gilad Ben-Yossef and committed by
Herbert Xu
a260436c 476c9ab7

+15 -12
+15 -12
drivers/crypto/ccree/cc_buffer_mgr.c
··· 356 356 req_ctx->mlli_params.mlli_dma_addr); 357 357 } 358 358 359 - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); 360 - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 361 - 362 359 if (src != dst) { 363 - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); 360 + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); 361 + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); 364 362 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); 363 + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 364 + } else { 365 + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); 366 + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 365 367 } 366 368 } 367 369 ··· 379 377 u32 dummy = 0; 380 378 int rc = 0; 381 379 u32 mapped_nents = 0; 380 + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 382 381 383 382 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; 384 383 mlli_params->curr_pool = NULL; ··· 402 399 } 403 400 404 401 /* Map the src SGL */ 405 - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, 402 + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, 406 403 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); 407 404 if (rc) 408 405 goto cipher_exit; ··· 419 416 } 420 417 } else { 421 418 /* Map the dst sg */ 422 - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, 419 + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, 423 420 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 424 421 &dummy, &mapped_nents); 425 422 if (rc) ··· 459 456 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 460 457 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 461 458 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 459 + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 462 460 463 461 if (areq_ctx->mac_buf_dma_addr) { 464 462 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, ··· 518 514 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 519 515 areq_ctx->assoclen, req->cryptlen); 520 516 521 - dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, 522 - DMA_BIDIRECTIONAL); 517 + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); 523 518 if (req->src != req->dst) { 524 519 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 525 520 sg_virt(req->dst)); 526 - dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, 527 - DMA_BIDIRECTIONAL); 521 + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); 528 522 } 529 523 if (drvdata->coherent && 530 524 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && ··· 845 843 else 846 844 size_for_map -= authsize; 847 845 848 - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, 846 + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, 849 847 &areq_ctx->dst.mapped_nents, 850 848 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, 851 849 &dst_mapped_nents); ··· 1058 1056 size_to_map += authsize; 1059 1057 } 1060 1058 1061 - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, 1059 + rc = cc_map_sg(dev, req->src, size_to_map, 1060 + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), 1062 1061 &areq_ctx->src.mapped_nents, 1063 1062 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + 1064 1063 LLI_MAX_NUM_OF_DATA_ENTRIES),