Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - use len instead of nents for bulding HW S/G table

Currently, conversion of SW S/G table into HW S/G layout relies on
nents returned by sg_nents_for_len(sg, len).
However this leaves the possibility of HW S/G referencing more data
then needed: since buffer length in HW S/G entries is filled using
sg_dma_len(sg), the last entry in HW S/G table might have a length
that is bigger than needed for the crypto request.

This way of S/G table conversion is fine, unless after converting a table
more entries have to be appended to the HW S/G table.
In this case, crypto engine would access data from the S/G entry having
the incorrect length, instead of advancing in the S/G table.
This situation doesn't exist, but the upcoming implementation of
IV update for skcipher algorithms needs to add a S/G entry after
req->dst S/G (corresponding to output IV).

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Horia Geantă and committed by
Herbert Xu
059d73ee 1fa6d053

+105 -99
+17 -18
drivers/crypto/caam/caamalg.c
··· 1284 1284 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1285 1285 GFP_KERNEL : GFP_ATOMIC; 1286 1286 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 1287 + int src_len, dst_len = 0; 1287 1288 struct aead_edesc *edesc; 1288 1289 int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; 1289 1290 unsigned int authsize = ctx->authsize; 1290 1291 1291 1292 if (unlikely(req->dst != req->src)) { 1292 - src_nents = sg_nents_for_len(req->src, req->assoclen + 1293 - req->cryptlen); 1293 + src_len = req->assoclen + req->cryptlen; 1294 + dst_len = src_len + (encrypt ? authsize : (-authsize)); 1295 + 1296 + src_nents = sg_nents_for_len(req->src, src_len); 1294 1297 if (unlikely(src_nents < 0)) { 1295 1298 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1296 - req->assoclen + req->cryptlen); 1299 + src_len); 1297 1300 return ERR_PTR(src_nents); 1298 1301 } 1299 1302 1300 - dst_nents = sg_nents_for_len(req->dst, req->assoclen + 1301 - req->cryptlen + 1302 - (encrypt ? authsize : 1303 - (-authsize))); 1303 + dst_nents = sg_nents_for_len(req->dst, dst_len); 1304 1304 if (unlikely(dst_nents < 0)) { 1305 1305 dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", 1306 - req->assoclen + req->cryptlen + 1307 - (encrypt ? authsize : (-authsize))); 1306 + dst_len); 1308 1307 return ERR_PTR(dst_nents); 1309 1308 } 1310 1309 } else { 1311 - src_nents = sg_nents_for_len(req->src, req->assoclen + 1312 - req->cryptlen + 1313 - (encrypt ? authsize : 0)); 1310 + src_len = req->assoclen + req->cryptlen + 1311 + (encrypt ? authsize : 0); 1312 + 1313 + src_nents = sg_nents_for_len(req->src, src_len); 1314 1314 if (unlikely(src_nents < 0)) { 1315 1315 dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", 1316 - req->assoclen + req->cryptlen + 1317 - (encrypt ? authsize : 0)); 1316 + src_len); 1318 1317 return ERR_PTR(src_nents); 1319 1318 } 1320 1319 } ··· 1385 1386 1386 1387 sec4_sg_index = 0; 1387 1388 if (mapped_src_nents > 1) { 1388 - sg_to_sec4_sg_last(req->src, mapped_src_nents, 1389 + sg_to_sec4_sg_last(req->src, src_len, 1389 1390 edesc->sec4_sg + sec4_sg_index, 0); 1390 1391 sec4_sg_index += mapped_src_nents; 1391 1392 } 1392 1393 if (mapped_dst_nents > 1) { 1393 - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1394 + sg_to_sec4_sg_last(req->dst, dst_len, 1394 1395 edesc->sec4_sg + sec4_sg_index, 0); 1395 1396 } 1396 1397 ··· 1755 1756 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); 1756 1757 } 1757 1758 if (dst_sg_idx) 1758 - sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1759 + sg_to_sec4_sg_last(req->src, req->cryptlen, edesc->sec4_sg + 1759 1760 !!ivsize, 0); 1760 1761 1761 1762 if (mapped_dst_nents > 1) { 1762 - sg_to_sec4_sg_last(req->dst, mapped_dst_nents, 1763 + sg_to_sec4_sg_last(req->dst, req->cryptlen, 1763 1764 edesc->sec4_sg + dst_sg_idx, 0); 1764 1765 } 1765 1766
+17 -19
drivers/crypto/caam/caamalg_qi.c
··· 917 917 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 918 918 GFP_KERNEL : GFP_ATOMIC; 919 919 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 920 + int src_len, dst_len = 0; 920 921 struct aead_edesc *edesc; 921 922 dma_addr_t qm_sg_dma, iv_dma = 0; 922 923 int ivsize = 0; ··· 939 938 } 940 939 941 940 if (likely(req->src == req->dst)) { 942 - src_nents = sg_nents_for_len(req->src, req->assoclen + 943 - req->cryptlen + 944 - (encrypt ? authsize : 0)); 941 + src_len = req->assoclen + req->cryptlen + 942 + (encrypt ? authsize : 0); 943 + 944 + src_nents = sg_nents_for_len(req->src, src_len); 945 945 if (unlikely(src_nents < 0)) { 946 946 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 947 - req->assoclen + req->cryptlen + 948 - (encrypt ? authsize : 0)); 947 + src_len); 949 948 qi_cache_free(edesc); 950 949 return ERR_PTR(src_nents); 951 950 } ··· 958 957 return ERR_PTR(-ENOMEM); 959 958 } 960 959 } else { 961 - src_nents = sg_nents_for_len(req->src, req->assoclen + 962 - req->cryptlen); 960 + src_len = req->assoclen + req->cryptlen; 961 + dst_len = src_len + (encrypt ? authsize : (-authsize)); 962 + 963 + src_nents = sg_nents_for_len(req->src, src_len); 963 964 if (unlikely(src_nents < 0)) { 964 965 dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", 965 - req->assoclen + req->cryptlen); 966 + src_len); 966 967 qi_cache_free(edesc); 967 968 return ERR_PTR(src_nents); 968 969 } 969 970 970 - dst_nents = sg_nents_for_len(req->dst, req->assoclen + 971 - req->cryptlen + 972 - (encrypt ? authsize : 973 - (-authsize))); 971 + dst_nents = sg_nents_for_len(req->dst, dst_len); 974 972 if (unlikely(dst_nents < 0)) { 975 973 dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", 976 - req->assoclen + req->cryptlen + 977 - (encrypt ? authsize : (-authsize))); 974 + dst_len); 978 975 qi_cache_free(edesc); 979 976 return ERR_PTR(dst_nents); 980 977 } ··· 1081 1082 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 1082 1083 qm_sg_index++; 1083 1084 } 1084 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 1085 + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 1085 1086 qm_sg_index += mapped_src_nents; 1086 1087 1087 1088 if (mapped_dst_nents > 1) 1088 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1089 - qm_sg_index, 0); 1089 + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 1090 1090 1091 1091 qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 1092 1092 if (dma_mapping_error(qidev, qm_sg_dma)) { ··· 1338 1340 edesc->drv_req.drv_ctx = drv_ctx; 1339 1341 1340 1342 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1341 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1343 + sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0); 1342 1344 1343 1345 if (mapped_dst_nents > 1) 1344 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1346 + sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table + 1345 1347 dst_sg_idx, 0); 1346 1348 1347 1349 edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+29 -31
drivers/crypto/caam/caamalg_qi2.c
··· 371 371 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 372 372 GFP_KERNEL : GFP_ATOMIC; 373 373 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; 374 + int src_len, dst_len = 0; 374 375 struct aead_edesc *edesc; 375 376 dma_addr_t qm_sg_dma, iv_dma = 0; 376 377 int ivsize = 0; ··· 388 387 } 389 388 390 389 if (unlikely(req->dst != req->src)) { 391 - src_nents = sg_nents_for_len(req->src, req->assoclen + 392 - req->cryptlen); 390 + src_len = req->assoclen + req->cryptlen; 391 + dst_len = src_len + (encrypt ? authsize : (-authsize)); 392 + 393 + src_nents = sg_nents_for_len(req->src, src_len); 393 394 if (unlikely(src_nents < 0)) { 394 395 dev_err(dev, "Insufficient bytes (%d) in src S/G\n", 395 - req->assoclen + req->cryptlen); 396 + src_len); 396 397 qi_cache_free(edesc); 397 398 return ERR_PTR(src_nents); 398 399 } 399 400 400 - dst_nents = sg_nents_for_len(req->dst, req->assoclen + 401 - req->cryptlen + 402 - (encrypt ? authsize : 403 - (-authsize))); 401 + dst_nents = sg_nents_for_len(req->dst, dst_len); 404 402 if (unlikely(dst_nents < 0)) { 405 403 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", 406 - req->assoclen + req->cryptlen + 407 - (encrypt ? authsize : (-authsize))); 404 + dst_len); 408 405 qi_cache_free(edesc); 409 406 return ERR_PTR(dst_nents); 410 407 } ··· 433 434 mapped_dst_nents = 0; 434 435 } 435 436 } else { 436 - src_nents = sg_nents_for_len(req->src, req->assoclen + 437 - req->cryptlen + 438 - (encrypt ? authsize : 0)); 437 + src_len = req->assoclen + req->cryptlen + 438 + (encrypt ? authsize : 0); 439 + 440 + src_nents = sg_nents_for_len(req->src, src_len); 439 441 if (unlikely(src_nents < 0)) { 440 442 dev_err(dev, "Insufficient bytes (%d) in src S/G\n", 441 - req->assoclen + req->cryptlen + 442 - (encrypt ? authsize : 0)); 443 + src_len); 443 444 qi_cache_free(edesc); 444 445 return ERR_PTR(src_nents); 445 446 } ··· 535 536 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); 536 537 qm_sg_index++; 537 538 } 538 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); 539 + sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); 539 540 qm_sg_index += mapped_src_nents; 540 541 541 542 if (mapped_dst_nents > 1) 542 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 543 - qm_sg_index, 0); 543 + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); 544 544 545 545 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); 546 546 if (dma_mapping_error(dev, qm_sg_dma)) { ··· 1157 1159 edesc->qm_sg_bytes = qm_sg_bytes; 1158 1160 1159 1161 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); 1160 - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); 1162 + sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0); 1161 1163 1162 1164 if (mapped_dst_nents > 1) 1163 - sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + 1165 + sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table + 1164 1166 dst_sg_idx, 0); 1165 1167 1166 1168 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, ··· 3420 3422 3421 3423 if (to_hash) { 3422 3424 struct dpaa2_sg_entry *sg_table; 3425 + int src_len = req->nbytes - *next_buflen; 3423 3426 3424 - src_nents = sg_nents_for_len(req->src, 3425 - req->nbytes - (*next_buflen)); 3427 + src_nents = sg_nents_for_len(req->src, src_len); 3426 3428 if (src_nents < 0) { 3427 3429 dev_err(ctx->dev, "Invalid number of src SG.\n"); 3428 3430 return src_nents; ··· 3463 3465 goto unmap_ctx; 3464 3466 3465 3467 if (mapped_nents) { 3466 - sg_to_qm_sg_last(req->src, mapped_nents, 3468 + sg_to_qm_sg_last(req->src, src_len, 3467 3469 sg_table + qm_sg_src_index, 0); 3468 3470 if (*next_buflen) 3469 3471 scatterwalk_map_and_copy(next_buf, req->src, ··· 3651 3653 if (ret) 3652 3654 goto unmap_ctx; 3653 3655 3654 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); 3656 + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); 3655 3657 3656 3658 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 3657 3659 DMA_TO_DEVICE); ··· 3737 3739 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; 3738 3740 3739 3741 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); 3740 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); 3742 + sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); 3741 3743 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, 3742 3744 qm_sg_bytes, DMA_TO_DEVICE); 3743 3745 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { ··· 3880 3882 3881 3883 if (to_hash) { 3882 3884 struct dpaa2_sg_entry *sg_table; 3885 + int src_len = req->nbytes - *next_buflen; 3883 3886 3884 - src_nents = sg_nents_for_len(req->src, 3885 - req->nbytes - *next_buflen); 3887 + src_nents = sg_nents_for_len(req->src, src_len); 3886 3888 if (src_nents < 0) { 3887 3889 dev_err(ctx->dev, "Invalid number of src SG.\n"); 3888 3890 return src_nents; ··· 3916 3918 if (ret) 3917 3919 goto unmap_ctx; 3918 3920 3919 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); 3921 + sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); 3920 3922 3921 3923 if (*next_buflen) 3922 3924 scatterwalk_map_and_copy(next_buf, req->src, ··· 4035 4037 if (ret) 4036 4038 goto unmap; 4037 4039 4038 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); 4040 + sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); 4039 4041 4040 4042 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, 4041 4043 DMA_TO_DEVICE); ··· 4105 4107 4106 4108 if (to_hash) { 4107 4109 struct dpaa2_sg_entry *sg_table; 4110 + int src_len = req->nbytes - *next_buflen; 4108 4111 4109 - src_nents = sg_nents_for_len(req->src, 4110 - req->nbytes - (*next_buflen)); 4112 + src_nents = sg_nents_for_len(req->src, src_len); 4111 4113 if (src_nents < 0) { 4112 4114 dev_err(ctx->dev, "Invalid number of src SG.\n"); 4113 4115 return src_nents; ··· 4142 4144 if (mapped_nents > 1) { 4143 4145 int qm_sg_bytes; 4144 4146 4145 - sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); 4147 + sg_to_qm_sg_last(req->src, src_len, sg_table, 0); 4146 4148 qm_sg_bytes = pad_sg_nents(mapped_nents) * 4147 4149 sizeof(*sg_table); 4148 4150 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+7 -8
drivers/crypto/caam/caamhash.c
··· 729 729 unsigned int sgsize = sizeof(*sg) * 730 730 pad_sg_nents(first_sg + nents); 731 731 732 - sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); 732 + sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); 733 733 734 734 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); 735 735 if (dma_mapping_error(ctx->jrdev, src_dma)) { ··· 788 788 789 789 if (to_hash) { 790 790 int pad_nents; 791 + int src_len = req->nbytes - *next_buflen; 791 792 792 - src_nents = sg_nents_for_len(req->src, 793 - req->nbytes - (*next_buflen)); 793 + src_nents = sg_nents_for_len(req->src, src_len); 794 794 if (src_nents < 0) { 795 795 dev_err(jrdev, "Invalid number of src SG.\n"); 796 796 return src_nents; ··· 835 835 goto unmap_ctx; 836 836 837 837 if (mapped_nents) 838 - sg_to_sec4_sg_last(req->src, mapped_nents, 838 + sg_to_sec4_sg_last(req->src, src_len, 839 839 edesc->sec4_sg + sec4_sg_src_index, 840 840 0); 841 841 else ··· 1208 1208 1209 1209 if (to_hash) { 1210 1210 int pad_nents; 1211 + int src_len = req->nbytes - *next_buflen; 1211 1212 1212 - src_nents = sg_nents_for_len(req->src, 1213 - req->nbytes - *next_buflen); 1213 + src_nents = sg_nents_for_len(req->src, src_len); 1214 1214 if (src_nents < 0) { 1215 1215 dev_err(jrdev, "Invalid number of src SG.\n"); 1216 1216 return src_nents; ··· 1250 1250 if (ret) 1251 1251 goto unmap_ctx; 1252 1252 1253 - sg_to_sec4_sg_last(req->src, mapped_nents, 1254 - edesc->sec4_sg + 1, 0); 1253 + sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); 1255 1254 1256 1255 if (*next_buflen) { 1257 1256 scatterwalk_map_and_copy(next_buf, req->src,
+2 -2
drivers/crypto/caam/caampkc.c
··· 306 306 0); 307 307 308 308 if (sec4_sg_index) 309 - sg_to_sec4_sg_last(req_ctx->fixup_src, src_nents, 309 + sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, 310 310 edesc->sec4_sg + !!diff_size, 0); 311 311 312 312 if (dst_nents > 1) 313 - sg_to_sec4_sg_last(req->dst, dst_nents, 313 + sg_to_sec4_sg_last(req->dst, req->dst_len, 314 314 edesc->sec4_sg + sec4_sg_index, 0); 315 315 316 316 /* Save nents for later use in Job Descriptor */
+11 -7
drivers/crypto/caam/sg_sw_qm.h
··· 54 54 * but does not have final bit; instead, returns last entry 55 55 */ 56 56 static inline struct qm_sg_entry * 57 - sg_to_qm_sg(struct scatterlist *sg, int sg_count, 57 + sg_to_qm_sg(struct scatterlist *sg, int len, 58 58 struct qm_sg_entry *qm_sg_ptr, u16 offset) 59 59 { 60 - while (sg_count && sg) { 61 - dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), 62 - sg_dma_len(sg), offset); 60 + int ent_len; 61 + 62 + while (len) { 63 + ent_len = min_t(int, sg_dma_len(sg), len); 64 + 65 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, 66 + offset); 63 67 qm_sg_ptr++; 64 68 sg = sg_next(sg); 65 - sg_count--; 69 + len -= ent_len; 66 70 } 67 71 return qm_sg_ptr - 1; 68 72 } ··· 75 71 * convert scatterlist to h/w link table format 76 72 * scatterlist must have been previously dma mapped 77 73 */ 78 - static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, 74 + static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, 79 75 struct qm_sg_entry *qm_sg_ptr, u16 offset) 80 76 { 81 - qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); 77 + qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); 82 78 qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); 83 79 } 84 80
+11 -7
drivers/crypto/caam/sg_sw_qm2.h
··· 25 25 * but does not have final bit; instead, returns last entry 26 26 */ 27 27 static inline struct dpaa2_sg_entry * 28 - sg_to_qm_sg(struct scatterlist *sg, int sg_count, 28 + sg_to_qm_sg(struct scatterlist *sg, int len, 29 29 struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) 30 30 { 31 - while (sg_count && sg) { 32 - dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), 33 - sg_dma_len(sg), offset); 31 + int ent_len; 32 + 33 + while (len) { 34 + ent_len = min_t(int, sg_dma_len(sg), len); 35 + 36 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len, 37 + offset); 34 38 qm_sg_ptr++; 35 39 sg = sg_next(sg); 36 - sg_count--; 40 + len -= ent_len; 37 41 } 38 42 return qm_sg_ptr - 1; 39 43 } ··· 46 42 * convert scatterlist to h/w link table format 47 43 * scatterlist must have been previously dma mapped 48 44 */ 49 - static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, 45 + static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len, 50 46 struct dpaa2_sg_entry *qm_sg_ptr, 51 47 u16 offset) 52 48 { 53 - qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); 49 + qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset); 54 50 dpaa2_sg_set_final(qm_sg_ptr, true); 55 51 } 56 52
+11 -7
drivers/crypto/caam/sg_sw_sec4.h
··· 45 45 * but does not have final bit; instead, returns last entry 46 46 */ 47 47 static inline struct sec4_sg_entry * 48 - sg_to_sec4_sg(struct scatterlist *sg, int sg_count, 48 + sg_to_sec4_sg(struct scatterlist *sg, int len, 49 49 struct sec4_sg_entry *sec4_sg_ptr, u16 offset) 50 50 { 51 - while (sg_count) { 52 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 53 - sg_dma_len(sg), offset); 51 + int ent_len; 52 + 53 + while (len) { 54 + ent_len = min_t(int, sg_dma_len(sg), len); 55 + 56 + dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len, 57 + offset); 54 58 sec4_sg_ptr++; 55 59 sg = sg_next(sg); 56 - sg_count--; 60 + len -= ent_len; 57 61 } 58 62 return sec4_sg_ptr - 1; 59 63 } ··· 74 70 * convert scatterlist to h/w link table format 75 71 * scatterlist must have been previously dma mapped 76 72 */ 77 - static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, 73 + static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len, 78 74 struct sec4_sg_entry *sec4_sg_ptr, 79 75 u16 offset) 80 76 { 81 - sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); 77 + sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset); 82 78 sg_to_sec4_set_last(sec4_sg_ptr); 83 79 } 84 80