Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - chaining support

support chained scatterlists for aead, ablkcipher and ahash.

Signed-off-by: Yuan Kang <Yuan.Kang@freescale.com>

- fix dma unmap leak
- un-unlikely src == dst, due to experience with AF_ALG

Signed-off-by: Kudupudi Ugendreshwar <B38865@freescale.com>
Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Yuan Kang and committed by
Herbert Xu
643b39b0 b0e09bae

+147 -64
+70 -44
drivers/crypto/caam/caamalg.c
··· 654 654 /* 655 655 * aead_edesc - s/w-extended aead descriptor 656 656 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 657 + * @assoc_chained: if source is chained 657 658 * @src_nents: number of segments in input scatterlist 659 + * @src_chained: if source is chained 658 660 * @dst_nents: number of segments in output scatterlist 661 + * @dst_chained: if destination is chained 659 662 * @iv_dma: dma address of iv for checking continuity and link table 660 663 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 661 664 * @sec4_sg_bytes: length of dma mapped sec4_sg space ··· 667 664 */ 668 665 struct aead_edesc { 669 666 int assoc_nents; 667 + bool assoc_chained; 670 668 int src_nents; 669 + bool src_chained; 671 670 int dst_nents; 671 + bool dst_chained; 672 672 dma_addr_t iv_dma; 673 673 int sec4_sg_bytes; 674 674 dma_addr_t sec4_sg_dma; ··· 682 676 /* 683 677 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 684 678 * @src_nents: number of segments in input scatterlist 679 + * @src_chained: if source is chained 685 680 * @dst_nents: number of segments in output scatterlist 681 + * @dst_chained: if destination is chained 686 682 * @iv_dma: dma address of iv for checking continuity and link table 687 683 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 688 684 * @sec4_sg_bytes: length of dma mapped sec4_sg space ··· 693 685 */ 694 686 struct ablkcipher_edesc { 695 687 int src_nents; 688 + bool src_chained; 696 689 int dst_nents; 690 + bool dst_chained; 697 691 dma_addr_t iv_dma; 698 692 int sec4_sg_bytes; 699 693 dma_addr_t sec4_sg_dma; ··· 704 694 }; 705 695 706 696 static void caam_unmap(struct device *dev, struct scatterlist *src, 707 - struct scatterlist *dst, int src_nents, int dst_nents, 697 + struct scatterlist *dst, int src_nents, 698 + bool src_chained, int dst_nents, bool dst_chained, 708 699 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 709 700 int sec4_sg_bytes) 710 701 { 711 - if (unlikely(dst != src)) { 712 - dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 713 - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 702 + if (dst != src) { 703 + dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, 704 + src_chained); 705 + dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, 706 + dst_chained); 714 707 } else { 715 - dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 708 + dma_unmap_sg_chained(dev, src, src_nents ? : 1, 709 + DMA_BIDIRECTIONAL, src_chained); 716 710 } 717 711 718 712 if (iv_dma) ··· 733 719 struct crypto_aead *aead = crypto_aead_reqtfm(req); 734 720 int ivsize = crypto_aead_ivsize(aead); 735 721 736 - dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); 722 + dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, 723 + DMA_TO_DEVICE, edesc->assoc_chained); 737 724 738 725 caam_unmap(dev, req->src, req->dst, 739 - edesc->src_nents, edesc->dst_nents, 740 - edesc->iv_dma, ivsize, edesc->sec4_sg_dma, 741 - edesc->sec4_sg_bytes); 726 + edesc->src_nents, edesc->src_chained, edesc->dst_nents, 727 + edesc->dst_chained, edesc->iv_dma, ivsize, 728 + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 742 729 } 743 730 744 731 static void ablkcipher_unmap(struct device *dev, ··· 750 735 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 751 736 752 737 caam_unmap(dev, req->src, req->dst, 753 - edesc->src_nents, edesc->dst_nents, 754 - edesc->iv_dma, ivsize, edesc->sec4_sg_dma, 755 - edesc->sec4_sg_bytes); 738 + edesc->src_nents, edesc->src_chained, edesc->dst_nents, 739 + edesc->dst_chained, edesc->iv_dma, ivsize, 740 + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 756 741 } 757 742 758 743 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ··· 1143 1128 dma_addr_t iv_dma = 0; 1144 1129 int sgc; 1145 1130 bool all_contig = true; 1131 + bool assoc_chained = false, src_chained = false, dst_chained = false; 1146 1132 int ivsize = crypto_aead_ivsize(aead); 1147 1133 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1148 1134 1149 - assoc_nents = sg_count(req->assoc, req->assoclen); 1150 - src_nents = sg_count(req->src, req->cryptlen); 1135 + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1136 + src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1151 1137 1152 1138 if (unlikely(req->dst != req->src)) 1153 - dst_nents = sg_count(req->dst, req->cryptlen); 1139 + dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1154 1140 1155 - sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 1156 - DMA_BIDIRECTIONAL); 1141 + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1142 + DMA_BIDIRECTIONAL, assoc_chained); 1157 1143 if (likely(req->src == req->dst)) { 1158 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1159 - DMA_BIDIRECTIONAL); 1144 + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1145 + DMA_BIDIRECTIONAL, src_chained); 1160 1146 } else { 1161 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1162 - DMA_TO_DEVICE); 1163 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1164 - DMA_FROM_DEVICE); 1147 + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1148 + DMA_TO_DEVICE, src_chained); 1149 + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1150 + DMA_FROM_DEVICE, dst_chained); 1165 1151 } 1166 1152 1167 1153 /* Check if data are contiguous */ ··· 1188 1172 } 1189 1173 1190 1174 edesc->assoc_nents = assoc_nents; 1175 + edesc->assoc_chained = assoc_chained; 1191 1176 edesc->src_nents = src_nents; 1177 + edesc->src_chained = src_chained; 1192 1178 edesc->dst_nents = dst_nents; 1179 + edesc->dst_chained = dst_chained; 1193 1180 edesc->iv_dma = iv_dma; 1194 1181 edesc->sec4_sg_bytes = sec4_sg_bytes; 1195 1182 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + ··· 1326 1307 int sgc; 1327 1308 u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; 1328 1309 int ivsize = crypto_aead_ivsize(aead); 1310 + bool assoc_chained = false, src_chained = false, dst_chained = false; 1329 1311 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 1330 1312 1331 - assoc_nents = sg_count(req->assoc, req->assoclen); 1332 - src_nents = sg_count(req->src, req->cryptlen); 1313 + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); 1314 + src_nents = sg_count(req->src, req->cryptlen, &src_chained); 1333 1315 1334 1316 if (unlikely(req->dst != req->src)) 1335 - dst_nents = sg_count(req->dst, req->cryptlen); 1317 + dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); 1336 1318 1337 - sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, 1338 - DMA_BIDIRECTIONAL); 1319 + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, 1320 + DMA_BIDIRECTIONAL, assoc_chained); 1339 1321 if (likely(req->src == req->dst)) { 1340 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1341 - DMA_BIDIRECTIONAL); 1322 + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1323 + DMA_BIDIRECTIONAL, src_chained); 1342 1324 } else { 1343 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1344 - DMA_TO_DEVICE); 1345 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1346 - DMA_FROM_DEVICE); 1325 + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1326 + DMA_TO_DEVICE, src_chained); 1327 + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1328 + DMA_FROM_DEVICE, dst_chained); 1347 1329 } 1348 1330 1349 1331 /* Check if data are contiguous */ ··· 1378 1358 } 1379 1359 1380 1360 edesc->assoc_nents = assoc_nents; 1361 + edesc->assoc_chained = assoc_chained; 1381 1362 edesc->src_nents = src_nents; 1363 + edesc->src_chained = src_chained; 1382 1364 edesc->dst_nents = dst_nents; 1365 + edesc->dst_chained = dst_chained; 1383 1366 edesc->iv_dma = iv_dma; 1384 1367 edesc->sec4_sg_bytes = sec4_sg_bytes; 1385 1368 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + ··· 1482 1459 bool iv_contig = false; 1483 1460 int sgc; 1484 1461 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1462 + bool src_chained = false, dst_chained = false; 1485 1463 int sec4_sg_index; 1486 1464 1487 - src_nents = sg_count(req->src, req->nbytes); 1465 + src_nents = sg_count(req->src, req->nbytes, &src_chained); 1488 1466 1489 - if (unlikely(req->dst != req->src)) 1490 - dst_nents = sg_count(req->dst, req->nbytes); 1467 + if (req->dst != req->src) 1468 + dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 1491 1469 1492 1470 if (likely(req->src == req->dst)) { 1493 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1494 - DMA_BIDIRECTIONAL); 1471 + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1472 + DMA_BIDIRECTIONAL, src_chained); 1495 1473 } else { 1496 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 1497 - DMA_TO_DEVICE); 1498 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 1499 - DMA_FROM_DEVICE); 1474 + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1475 + DMA_TO_DEVICE, src_chained); 1476 + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 1477 + DMA_FROM_DEVICE, dst_chained); 1500 1478 } 1501 1479 1502 1480 /* ··· 1521 1497 } 1522 1498 1523 1499 edesc->src_nents = src_nents; 1500 + edesc->src_chained = src_chained; 1524 1501 edesc->dst_nents = dst_nents; 1502 + edesc->dst_chained = dst_chained; 1525 1503 edesc->sec4_sg_bytes = sec4_sg_bytes; 1526 1504 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 1527 1505 desc_bytes; ··· 1536 1510 sec4_sg_index += 1 + src_nents; 1537 1511 } 1538 1512 1539 - if (unlikely(dst_nents)) { 1513 + if (dst_nents) { 1540 1514 sg_to_sec4_sg_last(req->dst, dst_nents, 1541 1515 edesc->sec4_sg + sec4_sg_index, 0); 1542 1516 }
+38 -15
drivers/crypto/caam/caamhash.c
··· 175 175 /* Map req->src and put it in link table */ 176 176 static inline void src_map_to_sec4_sg(struct device *jrdev, 177 177 struct scatterlist *src, int src_nents, 178 - struct sec4_sg_entry *sec4_sg) 178 + struct sec4_sg_entry *sec4_sg, 179 + bool chained) 179 180 { 180 - dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE); 181 + dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); 181 182 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); 182 183 } 183 184 ··· 564 563 * ahash_edesc - s/w-extended ahash descriptor 565 564 * @dst_dma: physical mapped address of req->result 566 565 * @sec4_sg_dma: physical mapped address of h/w link table 566 + * @chained: if source is chained 567 567 * @src_nents: number of segments in input scatterlist 568 568 * @sec4_sg_bytes: length of dma mapped sec4_sg space 569 569 * @sec4_sg: pointer to h/w link table ··· 573 571 struct ahash_edesc { 574 572 dma_addr_t dst_dma; 575 573 dma_addr_t sec4_sg_dma; 574 + bool chained; 576 575 int src_nents; 577 576 int sec4_sg_bytes; 578 577 struct sec4_sg_entry *sec4_sg; ··· 585 582 struct ahash_request *req, int dst_len) 586 583 { 587 584 if (edesc->src_nents) 588 - dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 585 + dma_unmap_sg_chained(dev, req->src, edesc->src_nents, 586 + DMA_TO_DEVICE, edesc->chained); 589 587 if (edesc->dst_dma) 590 588 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 591 589 ··· 779 775 dma_addr_t ptr = ctx->sh_desc_update_dma; 780 776 int src_nents, sec4_sg_bytes, sec4_sg_src_index; 781 777 struct ahash_edesc *edesc; 778 + bool chained = false; 782 779 int ret = 0; 783 780 int sh_len; 784 781 ··· 788 783 to_hash = in_len - *next_buflen; 789 784 790 785 if (to_hash) { 791 - src_nents = __sg_count(req->src, req->nbytes - (*next_buflen)); 786 + src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 787 + &chained); 792 788 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 793 789 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 794 790 sizeof(struct sec4_sg_entry); ··· 807 801 } 808 802 809 803 edesc->src_nents = src_nents; 804 + edesc->chained = chained; 810 805 edesc->sec4_sg_bytes = sec4_sg_bytes; 811 806 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 812 807 DESC_JOB_IO_LEN; ··· 825 818 826 819 if (src_nents) { 827 820 src_map_to_sec4_sg(jrdev, req->src, src_nents, 828 - edesc->sec4_sg + sec4_sg_src_index); 821 + edesc->sec4_sg + sec4_sg_src_index, 822 + chained); 829 823 if (*next_buflen) { 830 824 sg_copy_part(next_buf, req->src, to_hash - 831 825 *buflen, req->nbytes); ··· 966 958 int src_nents; 967 959 int digestsize = crypto_ahash_digestsize(ahash); 968 960 struct ahash_edesc *edesc; 961 + bool chained = false; 969 962 int ret = 0; 970 963 int sh_len; 971 964 972 - src_nents = __sg_count(req->src, req->nbytes); 965 + src_nents = __sg_count(req->src, req->nbytes, &chained); 973 966 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 974 967 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 975 968 sizeof(struct sec4_sg_entry); ··· 988 979 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 989 980 990 981 edesc->src_nents = src_nents; 982 + edesc->chained = chained; 991 983 edesc->sec4_sg_bytes = sec4_sg_bytes; 992 984 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 993 985 DESC_JOB_IO_LEN; ··· 1003 993 last_buflen); 1004 994 1005 995 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1006 - sec4_sg_src_index); 996 + sec4_sg_src_index, chained); 1007 997 1008 998 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + 1009 999 buflen + req->nbytes, LDST_SGF); ··· 1040 1030 int src_nents, sec4_sg_bytes; 1041 1031 dma_addr_t src_dma; 1042 1032 struct ahash_edesc *edesc; 1033 + bool chained = false; 1043 1034 int ret = 0; 1044 1035 u32 options; 1045 1036 int sh_len; 1046 1037 1047 - src_nents = sg_count(req->src, req->nbytes); 1048 - dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); 1038 + src_nents = sg_count(req->src, req->nbytes, &chained); 1039 + dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, 1040 + chained); 1049 1041 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1050 1042 1051 1043 /* allocate space for base edesc and hw desc commands, link tables */ ··· 1062 1050 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1063 1051 sec4_sg_bytes, DMA_TO_DEVICE); 1064 1052 edesc->src_nents = src_nents; 1053 + edesc->chained = chained; 1065 1054 1066 1055 sh_len = desc_len(sh_desc); 1067 1056 desc = edesc->hw_desc; ··· 1170 1157 struct ahash_edesc *edesc; 1171 1158 u32 *desc, *sh_desc = ctx->sh_desc_update_first; 1172 1159 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1160 + bool chained = false; 1173 1161 int ret = 0; 1174 1162 int sh_len; 1175 1163 ··· 1178 1164 to_hash = in_len - *next_buflen; 1179 1165 1180 1166 if (to_hash) { 1181 - src_nents = __sg_count(req->src, req->nbytes - (*next_buflen)); 1167 + src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 1168 + &chained); 1182 1169 sec4_sg_bytes = (1 + src_nents) * 1183 1170 sizeof(struct sec4_sg_entry); 1184 1171 ··· 1196 1181 } 1197 1182 1198 1183 edesc->src_nents = src_nents; 1184 + edesc->chained = chained; 1199 1185 edesc->sec4_sg_bytes = sec4_sg_bytes; 1200 1186 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1201 1187 DESC_JOB_IO_LEN; ··· 1207 1191 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1208 1192 buf, *buflen); 1209 1193 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1210 - edesc->sec4_sg + 1); 1194 + edesc->sec4_sg + 1, chained); 1211 1195 if (*next_buflen) { 1212 1196 sg_copy_part(next_buf, req->src, to_hash - *buflen, 1213 1197 req->nbytes); ··· 1274 1258 int sec4_sg_bytes, sec4_sg_src_index, src_nents; 1275 1259 int digestsize = crypto_ahash_digestsize(ahash); 1276 1260 struct ahash_edesc *edesc; 1261 + bool chained = false; 1277 1262 int sh_len; 1278 1263 int ret = 0; 1279 1264 1280 - src_nents = __sg_count(req->src, req->nbytes); 1265 + src_nents = __sg_count(req->src, req->nbytes, &chained); 1281 1266 sec4_sg_src_index = 2; 1282 1267 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1283 1268 sizeof(struct sec4_sg_entry); ··· 1296 1279 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1297 1280 1298 1281 edesc->src_nents = src_nents; 1282 + edesc->chained = chained; 1299 1283 edesc->sec4_sg_bytes = sec4_sg_bytes; 1300 1284 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1301 1285 DESC_JOB_IO_LEN; ··· 1307 1289 state->buf_dma, buflen, 1308 1290 last_buflen); 1309 1291 1310 - src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1); 1292 + src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1293 + chained); 1311 1294 1312 1295 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + 1313 1296 req->nbytes, LDST_SGF); ··· 1351 1332 dma_addr_t src_dma; 1352 1333 u32 options; 1353 1334 struct ahash_edesc *edesc; 1335 + bool chained = false; 1354 1336 int ret = 0; 1355 1337 int sh_len; 1356 1338 ··· 1360 1340 to_hash = req->nbytes - *next_buflen; 1361 1341 1362 1342 if (to_hash) { 1363 - src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); 1364 - dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); 1343 + src_nents = sg_count(req->src, req->nbytes - (*next_buflen), 1344 + &chained); 1345 + dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1346 + DMA_TO_DEVICE, chained); 1365 1347 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1366 1348 1367 1349 /* ··· 1379 1357 } 1380 1358 1381 1359 edesc->src_nents = src_nents; 1360 + edesc->chained = chained; 1382 1361 edesc->sec4_sg_bytes = sec4_sg_bytes; 1383 1362 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1384 1363 DESC_JOB_IO_LEN;
+39 -5
drivers/crypto/caam/sg_sw_sec4.h
··· 37 37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), 38 38 sg_dma_len(sg), offset); 39 39 sec4_sg_ptr++; 40 - sg = sg_next(sg); 40 + sg = scatterwalk_sg_next(sg); 41 41 sg_count--; 42 42 } 43 43 return sec4_sg_ptr - 1; ··· 56 56 } 57 57 58 58 /* count number of elements in scatterlist */ 59 - static inline int __sg_count(struct scatterlist *sg_list, int nbytes) 59 + static inline int __sg_count(struct scatterlist *sg_list, int nbytes, 60 + bool *chained) 60 61 { 61 62 struct scatterlist *sg = sg_list; 62 63 int sg_nents = 0; ··· 66 65 sg_nents++; 67 66 nbytes -= sg->length; 68 67 if (!sg_is_last(sg) && (sg + 1)->length == 0) 69 - BUG(); /* Not support chaining */ 68 + *chained = true; 70 69 sg = scatterwalk_sg_next(sg); 71 70 } 72 71 ··· 74 73 } 75 74 76 75 /* derive number of elements in scatterlist, but return 0 for 1 */ 77 - static inline int sg_count(struct scatterlist *sg_list, int nbytes) 76 + static inline int sg_count(struct scatterlist *sg_list, int nbytes, 77 + bool *chained) 78 78 { 79 - int sg_nents = __sg_count(sg_list, nbytes); 79 + int sg_nents = __sg_count(sg_list, nbytes, chained); 80 80 81 81 if (likely(sg_nents == 1)) 82 82 return 0; 83 83 84 84 return sg_nents; 85 + } 86 + 87 + static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, 88 + unsigned int nents, enum dma_data_direction dir, 89 + bool chained) 90 + { 91 + if (unlikely(chained)) { 92 + int i; 93 + for (i = 0; i < nents; i++) { 94 + dma_map_sg(dev, sg, 1, dir); 95 + sg = scatterwalk_sg_next(sg); 96 + } 97 + } else { 98 + dma_map_sg(dev, sg, nents, dir); 99 + } 100 + return nents; 101 + } 102 + 103 + static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, 104 + unsigned int nents, enum dma_data_direction dir, 105 + bool chained) 106 + { 107 + if (unlikely(chained)) { 108 + int i; 109 + for (i = 0; i < nents; i++) { 110 + dma_unmap_sg(dev, sg, 1, dir); 111 + sg = scatterwalk_sg_next(sg); 112 + } 113 + } else { 114 + dma_unmap_sg(dev, sg, nents, dir); 115 + } 116 + return nents; 85 117 } 86 118 87 119 /* Copy from len bytes of sg to dest, starting from beginning */