Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: caam - dma_map_sg can handle chained SG

The caam driver use two dma_map_sg path according to SG are chained
or not.
Since dma_map_sg can handle both case, clean the code with all
references to sg chained.

Thus removing dma_map_sg_chained, dma_unmap_sg_chained
and __sg_count functions.

Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

LABBE Corentin and committed by
Herbert Xu
13fb8fd7 b8a011d4

+53 -168
+34 -60
drivers/crypto/caam/caamalg.c
··· 1708 1708 /* 1709 1709 * aead_edesc - s/w-extended aead descriptor 1710 1710 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist 1711 - * @assoc_chained: if source is chained 1712 1711 * @src_nents: number of segments in input scatterlist 1713 - * @src_chained: if source is chained 1714 1712 * @dst_nents: number of segments in output scatterlist 1715 - * @dst_chained: if destination is chained 1716 1713 * @iv_dma: dma address of iv for checking continuity and link table 1717 1714 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 1718 1715 * @sec4_sg_bytes: length of dma mapped sec4_sg space ··· 1718 1721 */ 1719 1722 struct aead_edesc { 1720 1723 int assoc_nents; 1721 - bool assoc_chained; 1722 1724 int src_nents; 1723 - bool src_chained; 1724 1725 int dst_nents; 1725 - bool dst_chained; 1726 1726 dma_addr_t iv_dma; 1727 1727 int sec4_sg_bytes; 1728 1728 dma_addr_t sec4_sg_dma; ··· 1730 1736 /* 1731 1737 * ablkcipher_edesc - s/w-extended ablkcipher descriptor 1732 1738 * @src_nents: number of segments in input scatterlist 1733 - * @src_chained: if source is chained 1734 1739 * @dst_nents: number of segments in output scatterlist 1735 - * @dst_chained: if destination is chained 1736 1740 * @iv_dma: dma address of iv for checking continuity and link table 1737 1741 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) 1738 1742 * @sec4_sg_bytes: length of dma mapped sec4_sg space ··· 1739 1747 */ 1740 1748 struct ablkcipher_edesc { 1741 1749 int src_nents; 1742 - bool src_chained; 1743 1750 int dst_nents; 1744 - bool dst_chained; 1745 1751 dma_addr_t iv_dma; 1746 1752 int sec4_sg_bytes; 1747 1753 dma_addr_t sec4_sg_dma; ··· 1749 1759 1750 1760 static void caam_unmap(struct device *dev, struct scatterlist *src, 1751 1761 struct scatterlist *dst, int src_nents, 1752 - bool src_chained, int dst_nents, bool dst_chained, 1762 + int dst_nents, 1753 1763 dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, 1754 1764 int sec4_sg_bytes) 1755 1765 { 1756 1766 if (dst != src) { 1757 - dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, 1758 - src_chained); 1759 - dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, 1760 - dst_chained); 1767 + dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE); 1768 + dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE); 1761 1769 } else { 1762 - dma_unmap_sg_chained(dev, src, src_nents ? : 1, 1763 - DMA_BIDIRECTIONAL, src_chained); 1770 + dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL); 1764 1771 } 1765 1772 1766 1773 if (iv_dma) ··· 1772 1785 struct aead_request *req) 1773 1786 { 1774 1787 caam_unmap(dev, req->src, req->dst, 1775 - edesc->src_nents, edesc->src_chained, edesc->dst_nents, 1776 - edesc->dst_chained, 0, 0, 1788 + edesc->src_nents, edesc->dst_nents, 0, 0, 1777 1789 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 1778 1790 } 1779 1791 ··· 1784 1798 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 1785 1799 1786 1800 caam_unmap(dev, req->src, req->dst, 1787 - edesc->src_nents, edesc->src_chained, edesc->dst_nents, 1788 - edesc->dst_chained, edesc->iv_dma, ivsize, 1801 + edesc->src_nents, edesc->dst_nents, 1802 + edesc->iv_dma, ivsize, 1789 1803 edesc->sec4_sg_dma, edesc->sec4_sg_bytes); 1790 1804 } 1791 1805 ··· 2155 2169 struct aead_edesc *edesc; 2156 2170 int sgc; 2157 2171 bool all_contig = true; 2158 - bool src_chained = false, dst_chained = false; 2159 2172 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; 2160 2173 unsigned int authsize = ctx->authsize; 2161 2174 2162 2175 if (unlikely(req->dst != req->src)) { 2163 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen, 2164 - &src_chained); 2176 + src_nents = sg_count(req->src, req->assoclen + req->cryptlen); 2165 2177 dst_nents = sg_count(req->dst, 2166 2178 req->assoclen + req->cryptlen + 2167 - (encrypt ? authsize : (-authsize)), 2168 - &dst_chained); 2179 + (encrypt ? authsize : (-authsize))); 2169 2180 } else { 2170 2181 src_nents = sg_count(req->src, 2171 2182 req->assoclen + req->cryptlen + 2172 - (encrypt ? authsize : 0), 2173 - &src_chained); 2183 + (encrypt ? authsize : 0)); 2174 2184 } 2175 2185 2176 2186 /* Check if data are contiguous. */ ··· 2189 2207 } 2190 2208 2191 2209 if (likely(req->src == req->dst)) { 2192 - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2193 - DMA_BIDIRECTIONAL, src_chained); 2210 + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 2211 + DMA_BIDIRECTIONAL); 2194 2212 if (unlikely(!sgc)) { 2195 2213 dev_err(jrdev, "unable to map source\n"); 2196 2214 kfree(edesc); 2197 2215 return ERR_PTR(-ENOMEM); 2198 2216 } 2199 2217 } else { 2200 - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2201 - DMA_TO_DEVICE, src_chained); 2218 + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 2219 + DMA_TO_DEVICE); 2202 2220 if (unlikely(!sgc)) { 2203 2221 dev_err(jrdev, "unable to map source\n"); 2204 2222 kfree(edesc); 2205 2223 return ERR_PTR(-ENOMEM); 2206 2224 } 2207 2225 2208 - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2209 - DMA_FROM_DEVICE, dst_chained); 2226 + sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 2227 + DMA_FROM_DEVICE); 2210 2228 if (unlikely(!sgc)) { 2211 2229 dev_err(jrdev, "unable to map destination\n"); 2212 - dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1, 2213 - DMA_TO_DEVICE, src_chained); 2230 + dma_unmap_sg(jrdev, req->src, src_nents ? : 1, 2231 + DMA_TO_DEVICE); 2214 2232 kfree(edesc); 2215 2233 return ERR_PTR(-ENOMEM); 2216 2234 } 2217 2235 } 2218 2236 2219 2237 edesc->src_nents = src_nents; 2220 - edesc->src_chained = src_chained; 2221 2238 edesc->dst_nents = dst_nents; 2222 - edesc->dst_chained = dst_chained; 2223 2239 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + 2224 2240 desc_bytes; 2225 2241 *all_contig_ptr = all_contig; ··· 2447 2467 bool iv_contig = false; 2448 2468 int sgc; 2449 2469 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 2450 - bool src_chained = false, dst_chained = false; 2451 2470 int sec4_sg_index; 2452 2471 2453 - src_nents = sg_count(req->src, req->nbytes, &src_chained); 2472 + src_nents = sg_count(req->src, req->nbytes); 2454 2473 2455 2474 if (req->dst != req->src) 2456 - dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 2475 + dst_nents = sg_count(req->dst, req->nbytes); 2457 2476 2458 2477 if (likely(req->src == req->dst)) { 2459 - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2460 - DMA_BIDIRECTIONAL, src_chained); 2478 + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 2479 + DMA_BIDIRECTIONAL); 2461 2480 } else { 2462 - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2463 - DMA_TO_DEVICE, src_chained); 2464 - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2465 - DMA_FROM_DEVICE, dst_chained); 2481 + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 2482 + DMA_TO_DEVICE); 2483 + sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 2484 + DMA_FROM_DEVICE); 2466 2485 } 2467 2486 2468 2487 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE); ··· 2490 2511 } 2491 2512 2492 2513 edesc->src_nents = src_nents; 2493 - edesc->src_chained = src_chained; 2494 2514 edesc->dst_nents = dst_nents; 2495 - edesc->dst_chained = dst_chained; 2496 2515 edesc->sec4_sg_bytes = sec4_sg_bytes; 2497 2516 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 2498 2517 desc_bytes; ··· 2623 2646 bool iv_contig = false; 2624 2647 int sgc; 2625 2648 int ivsize = crypto_ablkcipher_ivsize(ablkcipher); 2626 - bool src_chained = false, dst_chained = false; 2627 2649 int sec4_sg_index; 2628 2650 2629 - src_nents = sg_count(req->src, req->nbytes, &src_chained); 2651 + src_nents = sg_count(req->src, req->nbytes); 2630 2652 2631 2653 if (unlikely(req->dst != req->src)) 2632 - dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); 2654 + dst_nents = sg_count(req->dst, req->nbytes); 2633 2655 2634 2656 if (likely(req->src == req->dst)) { 2635 - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2636 - DMA_BIDIRECTIONAL, src_chained); 2657 + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 2658 + DMA_BIDIRECTIONAL); 2637 2659 } else { 2638 - sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 2639 - DMA_TO_DEVICE, src_chained); 2640 - sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, 2641 - DMA_FROM_DEVICE, dst_chained); 2660 + sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, 2661 + DMA_TO_DEVICE); 2662 + sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, 2663 + DMA_FROM_DEVICE); 2642 2664 } 2643 2665 2644 2666 /* ··· 2666 2690 } 2667 2691 2668 2692 edesc->src_nents = src_nents; 2669 - edesc->src_chained = src_chained; 2670 2693 edesc->dst_nents = dst_nents; 2671 - edesc->dst_chained = dst_chained; 2672 2694 edesc->sec4_sg_bytes = sec4_sg_bytes; 2673 2695 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + 2674 2696 desc_bytes;
+17 -38
drivers/crypto/caam/caamhash.c
··· 181 181 /* Map req->src and put it in link table */ 182 182 static inline void src_map_to_sec4_sg(struct device *jrdev, 183 183 struct scatterlist *src, int src_nents, 184 - struct sec4_sg_entry *sec4_sg, 185 - bool chained) 184 + struct sec4_sg_entry *sec4_sg) 186 185 { 187 - dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); 186 + dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE); 188 187 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); 189 188 } 190 189 ··· 584 585 * ahash_edesc - s/w-extended ahash descriptor 585 586 * @dst_dma: physical mapped address of req->result 586 587 * @sec4_sg_dma: physical mapped address of h/w link table 587 - * @chained: if source is chained 588 588 * @src_nents: number of segments in input scatterlist 589 589 * @sec4_sg_bytes: length of dma mapped sec4_sg space 590 590 * @sec4_sg: pointer to h/w link table ··· 592 594 struct ahash_edesc { 593 595 dma_addr_t dst_dma; 594 596 dma_addr_t sec4_sg_dma; 595 - bool chained; 596 597 int src_nents; 597 598 int sec4_sg_bytes; 598 599 struct sec4_sg_entry *sec4_sg; ··· 603 606 struct ahash_request *req, int dst_len) 604 607 { 605 608 if (edesc->src_nents) 606 - dma_unmap_sg_chained(dev, req->src, edesc->src_nents, 607 - DMA_TO_DEVICE, edesc->chained); 609 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); 608 610 if (edesc->dst_dma) 609 611 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); 610 612 ··· 784 788 dma_addr_t ptr = ctx->sh_desc_update_dma; 785 789 int src_nents, sec4_sg_bytes, sec4_sg_src_index; 786 790 struct ahash_edesc *edesc; 787 - bool chained = false; 788 791 int ret = 0; 789 792 int sh_len; 790 793 ··· 792 797 to_hash = in_len - *next_buflen; 793 798 794 799 if (to_hash) { 795 - src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 796 - &chained); 800 + src_nents = sg_nents_for_len(req->src, 801 + req->nbytes - (*next_buflen)); 797 802 sec4_sg_src_index = 1 + (*buflen ? 1 : 0); 798 803 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 799 804 sizeof(struct sec4_sg_entry); ··· 811 816 } 812 817 813 818 edesc->src_nents = src_nents; 814 - edesc->chained = chained; 815 819 edesc->sec4_sg_bytes = sec4_sg_bytes; 816 820 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 817 821 DESC_JOB_IO_LEN; ··· 827 833 828 834 if (src_nents) { 829 835 src_map_to_sec4_sg(jrdev, req->src, src_nents, 830 - edesc->sec4_sg + sec4_sg_src_index, 831 - chained); 836 + edesc->sec4_sg + sec4_sg_src_index); 832 837 if (*next_buflen) 833 838 scatterwalk_map_and_copy(next_buf, req->src, 834 839 to_hash - *buflen, ··· 989 996 int src_nents; 990 997 int digestsize = crypto_ahash_digestsize(ahash); 991 998 struct ahash_edesc *edesc; 992 - bool chained = false; 993 999 int ret = 0; 994 1000 int sh_len; 995 1001 996 - src_nents = __sg_count(req->src, req->nbytes, &chained); 1002 + src_nents = sg_nents_for_len(req->src, req->nbytes); 997 1003 sec4_sg_src_index = 1 + (buflen ? 1 : 0); 998 1004 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 999 1005 sizeof(struct sec4_sg_entry); ··· 1010 1018 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1011 1019 1012 1020 edesc->src_nents = src_nents; 1013 - edesc->chained = chained; 1014 1021 edesc->sec4_sg_bytes = sec4_sg_bytes; 1015 1022 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1016 1023 DESC_JOB_IO_LEN; ··· 1024 1033 last_buflen); 1025 1034 1026 1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1027 - sec4_sg_src_index, chained); 1036 + sec4_sg_src_index); 1028 1037 1029 1038 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1030 1039 sec4_sg_bytes, DMA_TO_DEVICE); ··· 1072 1081 int src_nents, sec4_sg_bytes; 1073 1082 dma_addr_t src_dma; 1074 1083 struct ahash_edesc *edesc; 1075 - bool chained = false; 1076 1084 int ret = 0; 1077 1085 u32 options; 1078 1086 int sh_len; 1079 1087 1080 - src_nents = sg_count(req->src, req->nbytes, &chained); 1081 - dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, 1082 - chained); 1088 + src_nents = sg_count(req->src, req->nbytes); 1089 + dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); 1083 1090 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1084 1091 1085 1092 /* allocate space for base edesc and hw desc commands, link tables */ ··· 1091 1102 DESC_JOB_IO_LEN; 1092 1103 edesc->sec4_sg_bytes = sec4_sg_bytes; 1093 1104 edesc->src_nents = src_nents; 1094 - edesc->chained = chained; 1095 1105 1096 1106 sh_len = desc_len(sh_desc); 1097 1107 desc = edesc->hw_desc; ··· 1216 1228 struct ahash_edesc *edesc; 1217 1229 u32 *desc, *sh_desc = ctx->sh_desc_update_first; 1218 1230 dma_addr_t ptr = ctx->sh_desc_update_first_dma; 1219 - bool chained = false; 1220 1231 int ret = 0; 1221 1232 int sh_len; 1222 1233 ··· 1223 1236 to_hash = in_len - *next_buflen; 1224 1237 1225 1238 if (to_hash) { 1226 - src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), 1227 - &chained); 1239 + src_nents = sg_nents_for_len(req->src, 1240 + req->nbytes - (*next_buflen)); 1228 1241 sec4_sg_bytes = (1 + src_nents) * 1229 1242 sizeof(struct sec4_sg_entry); 1230 1243 ··· 1241 1254 } 1242 1255 1243 1256 edesc->src_nents = src_nents; 1244 - edesc->chained = chained; 1245 1257 edesc->sec4_sg_bytes = sec4_sg_bytes; 1246 1258 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1247 1259 DESC_JOB_IO_LEN; ··· 1249 1263 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, 1250 1264 buf, *buflen); 1251 1265 src_map_to_sec4_sg(jrdev, req->src, src_nents, 1252 - edesc->sec4_sg + 1, chained); 1266 + edesc->sec4_sg + 1); 1253 1267 if (*next_buflen) { 1254 1268 scatterwalk_map_and_copy(next_buf, req->src, 1255 1269 to_hash - *buflen, ··· 1329 1343 int sec4_sg_bytes, sec4_sg_src_index, src_nents; 1330 1344 int digestsize = crypto_ahash_digestsize(ahash); 1331 1345 struct ahash_edesc *edesc; 1332 - bool chained = false; 1333 1346 int sh_len; 1334 1347 int ret = 0; 1335 1348 1336 - src_nents = __sg_count(req->src, req->nbytes, &chained); 1349 + src_nents = sg_nents_for_len(req->src, req->nbytes); 1337 1350 sec4_sg_src_index = 2; 1338 1351 sec4_sg_bytes = (sec4_sg_src_index + src_nents) * 1339 1352 sizeof(struct sec4_sg_entry); ··· 1350 1365 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); 1351 1366 1352 1367 edesc->src_nents = src_nents; 1353 - edesc->chained = chained; 1354 1368 edesc->sec4_sg_bytes = sec4_sg_bytes; 1355 1369 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1356 1370 DESC_JOB_IO_LEN; ··· 1358 1374 state->buf_dma, buflen, 1359 1375 last_buflen); 1360 1376 1361 - src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, 1362 - chained); 1377 + src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1); 1363 1378 1364 1379 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 1365 1380 sec4_sg_bytes, DMA_TO_DEVICE); ··· 1412 1429 dma_addr_t src_dma; 1413 1430 u32 options; 1414 1431 struct ahash_edesc *edesc; 1415 - bool chained = false; 1416 1432 int ret = 0; 1417 1433 int sh_len; 1418 1434 ··· 1420 1438 to_hash = req->nbytes - *next_buflen; 1421 1439 1422 1440 if (to_hash) { 1423 - src_nents = sg_count(req->src, req->nbytes - (*next_buflen), 1424 - &chained); 1425 - dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, 1426 - DMA_TO_DEVICE, chained); 1441 + src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); 1442 + dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); 1427 1443 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); 1428 1444 1429 1445 /* ··· 1437 1457 } 1438 1458 1439 1459 edesc->src_nents = src_nents; 1440 - edesc->chained = chained; 1441 1460 edesc->sec4_sg_bytes = sec4_sg_bytes; 1442 1461 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + 1443 1462 DESC_JOB_IO_LEN;
+2 -70
drivers/crypto/caam/sg_sw_sec4.h
··· 69 69 return sec4_sg_ptr - 1; 70 70 } 71 71 72 - /* count number of elements in scatterlist */ 73 - static inline int __sg_count(struct scatterlist *sg_list, int nbytes, 74 - bool *chained) 75 - { 76 - struct scatterlist *sg = sg_list; 77 - int sg_nents = 0; 78 - 79 - while (nbytes > 0) { 80 - sg_nents++; 81 - nbytes -= sg->length; 82 - if (!sg_is_last(sg) && (sg + 1)->length == 0) 83 - *chained = true; 84 - sg = sg_next(sg); 85 - } 86 - 87 - return sg_nents; 88 - } 89 - 90 72 /* derive number of elements in scatterlist, but return 0 for 1 */ 91 - static inline int sg_count(struct scatterlist *sg_list, int nbytes, 92 - bool *chained) 73 + static inline int sg_count(struct scatterlist *sg_list, int nbytes) 93 74 { 94 - int sg_nents = __sg_count(sg_list, nbytes, chained); 75 + int sg_nents = sg_nents_for_len(sg_list, nbytes); 95 76 96 77 if (likely(sg_nents == 1)) 97 78 return 0; 98 79 99 80 return sg_nents; 100 - } 101 - 102 - static inline void dma_unmap_sg_chained( 103 - struct device *dev, struct scatterlist *sg, unsigned int nents, 104 - enum dma_data_direction dir, bool chained) 105 - { 106 - if (unlikely(chained)) { 107 - int i; 108 - struct scatterlist *tsg = sg; 109 - 110 - /* 111 - * Use a local copy of the sg pointer to avoid moving the 112 - * head of the list pointed to by sg as we walk the list. 113 - */ 114 - for (i = 0; i < nents; i++) { 115 - dma_unmap_sg(dev, tsg, 1, dir); 116 - tsg = sg_next(tsg); 117 - } 118 - } else if (nents) { 119 - dma_unmap_sg(dev, sg, nents, dir); 120 - } 121 - } 122 - 123 - static inline int dma_map_sg_chained( 124 - struct device *dev, struct scatterlist *sg, unsigned int nents, 125 - enum dma_data_direction dir, bool chained) 126 - { 127 - if (unlikely(chained)) { 128 - int i; 129 - struct scatterlist *tsg = sg; 130 - 131 - /* 132 - * Use a local copy of the sg pointer to avoid moving the 133 - * head of the list pointed to by sg as we walk the list. 134 - */ 135 - for (i = 0; i < nents; i++) { 136 - if (!dma_map_sg(dev, tsg, 1, dir)) { 137 - dma_unmap_sg_chained(dev, sg, i, dir, 138 - chained); 139 - nents = 0; 140 - break; 141 - } 142 - 143 - tsg = sg_next(tsg); 144 - } 145 - } else 146 - nents = dma_map_sg(dev, sg, nents, dir); 147 - 148 - return nents; 149 81 }