Merge branch 'for-4.3/sg' of git://git.kernel.dk/linux-block

Pull SG updates from Jens Axboe:
"This contains a set of scatter-gather related changes/fixes for 4.3:

- Add support for limited chaining of sg tables even for
architectures that do not set ARCH_HAS_SG_CHAIN. From Christoph.

- Add sg chain support to target_rd. From Christoph.

- Fixup open coded sg->page_link in crypto/omap-sham. From
Christoph.

- Fixup open coded crypto ->page_link manipulation. From Dan.

- Also from Dan, automated fixup of manual sg_unmark_end()
manipulations.

- Also from Dan, automated fixup of open coded sg_phys()
implementations.

- From Robert Jarzmik, addition of an sg table splitting helper that
drivers can use"

* 'for-4.3/sg' of git://git.kernel.dk/linux-block:
lib: scatterlist: add sg splitting function
scatterlist: use sg_phys()
crypto/omap-sham: remove an open coded access to ->page_link
scatterlist: remove open coded sg_unmark_end instances
crypto: replace scatterwalk_sg_chain with sg_chain
target/rd: always chain S/G list
scatterlist: allow limited chaining without ARCH_HAS_SG_CHAIN

+234 -81
+1 -1
arch/arm/mm/dma-mapping.c
··· 1520 return -ENOMEM; 1521 1522 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1523 - phys_addr_t phys = page_to_phys(sg_page(s)); 1524 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1525 1526 if (!is_coherent &&
··· 1520 return -ENOMEM; 1521 1522 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1523 + phys_addr_t phys = sg_phys(s) & PAGE_MASK; 1524 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1525 1526 if (!is_coherent &&
+1 -2
arch/microblaze/kernel/dma.c
··· 61 /* FIXME this part of code is untested */ 62 for_each_sg(sgl, sg, nents, i) { 63 sg->dma_address = sg_phys(sg); 64 - __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, 65 - sg->length, direction); 66 } 67 68 return nents;
··· 61 /* FIXME this part of code is untested */ 62 for_each_sg(sgl, sg, nents, i) { 63 sg->dma_address = sg_phys(sg); 64 + __dma_sync(sg_phys(sg), sg->length, direction); 65 } 66 67 return nents;
+1 -1
block/blk-merge.c
··· 393 if (rq->cmd_flags & REQ_WRITE) 394 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 395 396 - sg->page_link &= ~0x02; 397 sg = sg_next(sg); 398 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 399 q->dma_drain_size,
··· 393 if (rq->cmd_flags & REQ_WRITE) 394 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 395 396 + sg_unmark_end(sg); 397 sg = sg_next(sg); 398 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), 399 q->dma_drain_size,
+1 -1
crypto/algif_skcipher.c
··· 145 sgl->cur = 0; 146 147 if (sg) 148 - scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 149 150 list_add_tail(&sgl->list, &ctx->tsgl); 151 }
··· 145 sgl->cur = 0; 146 147 if (sg) 148 + sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 149 150 list_add_tail(&sgl->list, &ctx->tsgl); 151 }
+2 -2
crypto/gcm.c
··· 206 sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); 207 sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); 208 if (sg != pctx->src + 1) 209 - scatterwalk_sg_chain(pctx->src, 2, sg); 210 211 if (req->src != req->dst) { 212 sg_init_table(pctx->dst, 3); 213 sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); 214 sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); 215 if (sg != pctx->dst + 1) 216 - scatterwalk_sg_chain(pctx->dst, 2, sg); 217 } 218 } 219
··· 206 sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); 207 sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); 208 if (sg != pctx->src + 1) 209 + sg_chain(pctx->src, 2, sg); 210 211 if (req->src != req->dst) { 212 sg_init_table(pctx->dst, 3); 213 sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); 214 sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); 215 if (sg != pctx->dst + 1) 216 + sg_chain(pctx->dst, 2, sg); 217 } 218 } 219
+1 -2
drivers/crypto/bfin_crc.c
··· 370 sg_init_table(ctx->bufsl, nsg); 371 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); 372 if (nsg > 1) 373 - scatterwalk_sg_chain(ctx->bufsl, nsg, 374 - req->src); 375 ctx->sg = ctx->bufsl; 376 } else 377 ctx->sg = req->src;
··· 370 sg_init_table(ctx->bufsl, nsg); 371 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); 372 if (nsg > 1) 373 + sg_chain(ctx->bufsl, nsg, req->src); 374 ctx->sg = ctx->bufsl; 375 } else 376 ctx->sg = req->src;
+1 -1
drivers/crypto/omap-sham.c
··· 588 * the dmaengine may try to DMA the incorrect amount of data. 589 */ 590 sg_init_table(&ctx->sgl, 1); 591 - ctx->sgl.page_link = ctx->sg->page_link; 592 ctx->sgl.offset = ctx->sg->offset; 593 sg_dma_len(&ctx->sgl) = len32; 594 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
··· 588 * the dmaengine may try to DMA the incorrect amount of data. 589 */ 590 sg_init_table(&ctx->sgl, 1); 591 + sg_assign_page(&ctx->sgl, sg_page(ctx->sg)); 592 ctx->sgl.offset = ctx->sg->offset; 593 sg_dma_len(&ctx->sgl) = len32; 594 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
+1 -1
drivers/crypto/qce/sha.c
··· 296 if (rctx->buflen) { 297 sg_init_table(rctx->sg, 2); 298 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); 299 - scatterwalk_sg_chain(rctx->sg, 2, req->src); 300 req->src = rctx->sg; 301 } 302
··· 296 if (rctx->buflen) { 297 sg_init_table(rctx->sg, 2); 298 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); 299 + sg_chain(rctx->sg, 2, req->src); 300 req->src = rctx->sg; 301 } 302
+1 -1
drivers/crypto/sahara.c
··· 999 sg_init_table(rctx->in_sg_chain, 2); 1000 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); 1001 1002 - scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); 1003 1004 rctx->total = req->nbytes + rctx->buf_cnt; 1005 rctx->in_sg = rctx->in_sg_chain;
··· 999 sg_init_table(rctx->in_sg_chain, 2); 1000 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); 1001 1002 + sg_chain(rctx->in_sg_chain, 2, req->src); 1003 1004 rctx->total = req->nbytes + rctx->buf_cnt; 1005 rctx->in_sg = rctx->in_sg_chain;
+1 -1
drivers/crypto/talitos.c
··· 1929 sg_init_table(req_ctx->bufsl, nsg); 1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1931 if (nsg > 1) 1932 - scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); 1933 req_ctx->psrc = req_ctx->bufsl; 1934 } else 1935 req_ctx->psrc = areq->src;
··· 1929 sg_init_table(req_ctx->bufsl, nsg); 1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1931 if (nsg > 1) 1932 + sg_chain(req_ctx->bufsl, 2, areq->src); 1933 req_ctx->psrc = req_ctx->bufsl; 1934 } else 1935 req_ctx->psrc = areq->src;
+2 -2
drivers/iommu/intel-iommu.c
··· 2103 sg_res = aligned_nrpages(sg->offset, sg->length); 2104 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2105 sg->dma_length = sg->length; 2106 - pteval = page_to_phys(sg_page(sg)) | prot; 2107 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2108 } 2109 ··· 3631 3632 for_each_sg(sglist, sg, nelems, i) { 3633 BUG_ON(!sg_page(sg)); 3634 - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; 3635 sg->dma_length = sg->length; 3636 } 3637 return nelems;
··· 2103 sg_res = aligned_nrpages(sg->offset, sg->length); 2104 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2105 sg->dma_length = sg->length; 2106 + pteval = (sg_phys(sg) & PAGE_MASK) | prot; 2107 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2108 } 2109 ··· 3631 3632 for_each_sg(sglist, sg, nelems, i) { 3633 BUG_ON(!sg_page(sg)); 3634 + sg->dma_address = sg_phys(sg); 3635 sg->dma_length = sg->length; 3636 } 3637 return nelems;
+1 -1
drivers/iommu/iommu.c
··· 1408 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1409 1410 for_each_sg(sg, s, nents, i) { 1411 - phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; 1412 1413 /* 1414 * We are mapping on IOMMU page boundaries, so offset within
··· 1408 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1409 1410 for_each_sg(sg, s, nents, i) { 1411 + phys_addr_t phys = sg_phys(s); 1412 1413 /* 1414 * We are mapping on IOMMU page boundaries, so offset within
+2 -2
drivers/mmc/card/queue.c
··· 467 sg_set_buf(__sg, buf + offset, len); 468 offset += len; 469 remain -= len; 470 - (__sg++)->page_link &= ~0x02; 471 sg_len++; 472 } while (remain); 473 } ··· 475 list_for_each_entry(req, &packed->list, queuelist) { 476 sg_len += blk_rq_map_sg(mq->queue, req, __sg); 477 __sg = sg + (sg_len - 1); 478 - (__sg++)->page_link &= ~0x02; 479 } 480 sg_mark_end(sg + (sg_len - 1)); 481 return sg_len;
··· 467 sg_set_buf(__sg, buf + offset, len); 468 offset += len; 469 remain -= len; 470 + sg_unmark_end(__sg++); 471 sg_len++; 472 } while (remain); 473 } ··· 475 list_for_each_entry(req, &packed->list, queuelist) { 476 sg_len += blk_rq_map_sg(mq->queue, req, __sg); 477 __sg = sg + (sg_len - 1); 478 + sg_unmark_end(__sg++); 479 } 480 sg_mark_end(sg + (sg_len - 1)); 481 return sg_len;
+2 -2
drivers/staging/android/ion/ion_chunk_heap.c
··· 81 err: 82 sg = table->sgl; 83 for (i -= 1; i >= 0; i--) { 84 - gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 85 sg->length); 86 sg = sg_next(sg); 87 } ··· 109 DMA_BIDIRECTIONAL); 110 111 for_each_sg(table->sgl, sg, table->nents, i) { 112 - gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), 113 sg->length); 114 } 115 chunk_heap->allocated -= allocated_size;
··· 81 err: 82 sg = table->sgl; 83 for (i -= 1; i >= 0; i--) { 84 + gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 85 sg->length); 86 sg = sg_next(sg); 87 } ··· 109 DMA_BIDIRECTIONAL); 110 111 for_each_sg(table->sgl, sg, table->nents, i) { 112 + gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 113 sg->length); 114 } 115 chunk_heap->allocated -= allocated_size;
-44
drivers/target/target_core_rd.c
··· 138 sg_per_table = (total_sg_needed > max_sg_per_table) ? 139 max_sg_per_table : total_sg_needed; 140 141 - #ifdef CONFIG_ARCH_HAS_SG_CHAIN 142 - 143 /* 144 * Reserve extra element for chain entry 145 */ 146 if (sg_per_table < total_sg_needed) 147 chain_entry = 1; 148 - 149 - #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ 150 151 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), 152 GFP_KERNEL); ··· 154 155 sg_init_table(sg, sg_per_table + chain_entry); 156 157 - #ifdef CONFIG_ARCH_HAS_SG_CHAIN 158 - 159 if (i > 0) { 160 sg_chain(sg_table[i - 1].sg_table, 161 max_sg_per_table + 1, sg); 162 } 163 - 164 - #endif /* CONFIG_ARCH_HAS_SG_CHAIN */ 165 166 sg_table[i].sg_table = sg; 167 sg_table[i].rd_sg_count = sg_per_table; ··· 421 422 prot_sg = &prot_table->sg_table[prot_page - 423 prot_table->page_start_offset]; 424 - 425 - #ifndef CONFIG_ARCH_HAS_SG_CHAIN 426 - 427 - prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length, 428 - PAGE_SIZE); 429 - 430 - /* 431 - * Allocate temporaly contiguous scatterlist entries if prot pages 432 - * straddles multiple scatterlist tables. 433 - */ 434 - if (prot_table->page_end_offset < prot_page + prot_npages - 1) { 435 - int i; 436 - 437 - prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL); 438 - if (!prot_sg) 439 - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 440 - 441 - need_to_release = true; 442 - sg_init_table(prot_sg, prot_npages); 443 - 444 - for (i = 0; i < prot_npages; i++) { 445 - if (prot_page + i > prot_table->page_end_offset) { 446 - prot_table = rd_get_prot_table(dev, 447 - prot_page + i); 448 - if (!prot_table) { 449 - kfree(prot_sg); 450 - return rc; 451 - } 452 - sg_unmark_end(&prot_sg[i - 1]); 453 - } 454 - prot_sg[i] = prot_table->sg_table[prot_page + i - 455 - prot_table->page_start_offset]; 456 - } 457 - } 458 - 459 - #endif /* !CONFIG_ARCH_HAS_SG_CHAIN */ 460 461 if (is_read) 462 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
··· 138 sg_per_table = (total_sg_needed > max_sg_per_table) ? 139 max_sg_per_table : total_sg_needed; 140 141 /* 142 * Reserve extra element for chain entry 143 */ 144 if (sg_per_table < total_sg_needed) 145 chain_entry = 1; 146 147 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), 148 GFP_KERNEL); ··· 158 159 sg_init_table(sg, sg_per_table + chain_entry); 160 161 if (i > 0) { 162 sg_chain(sg_table[i - 1].sg_table, 163 max_sg_per_table + 1, sg); 164 } 165 166 sg_table[i].sg_table = sg; 167 sg_table[i].rd_sg_count = sg_per_table; ··· 429 430 prot_sg = &prot_table->sg_table[prot_page - 431 prot_table->page_start_offset]; 432 433 if (is_read) 434 rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+1 -9
include/crypto/scatterwalk.h
··· 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 28 - static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num, 29 - struct scatterlist *sg2) 30 - { 31 - sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0); 32 - sg1[num - 1].page_link &= ~0x02; 33 - sg1[num - 1].page_link |= 0x01; 34 - } 35 - 36 static inline void scatterwalk_crypto_chain(struct scatterlist *head, 37 struct scatterlist *sg, 38 int chain, int num) ··· 35 } 36 37 if (sg) 38 - scatterwalk_sg_chain(head, num, sg); 39 else 40 sg_mark_end(head); 41 }
··· 25 #include <linux/scatterlist.h> 26 #include <linux/sched.h> 27 28 static inline void scatterwalk_crypto_chain(struct scatterlist *head, 29 struct scatterlist *sg, 30 int chain, int num) ··· 43 } 44 45 if (sg) 46 + sg_chain(head, num, sg); 47 else 48 sg_mark_end(head); 49 }
+5 -4
include/linux/scatterlist.h
··· 161 static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 162 struct scatterlist *sgl) 163 { 164 - #ifndef CONFIG_ARCH_HAS_SG_CHAIN 165 - BUG(); 166 - #endif 167 - 168 /* 169 * offset and length are unused for chain entry. Clear them. 170 */ ··· 247 struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 248 void sg_init_table(struct scatterlist *, unsigned int); 249 void sg_init_one(struct scatterlist *, const void *, unsigned int); 250 251 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 252 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
··· 161 static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, 162 struct scatterlist *sgl) 163 { 164 /* 165 * offset and length are unused for chain entry. Clear them. 166 */ ··· 251 struct scatterlist *sg_last(struct scatterlist *s, unsigned int); 252 void sg_init_table(struct scatterlist *, unsigned int); 253 void sg_init_one(struct scatterlist *, const void *, unsigned int); 254 + int sg_split(struct scatterlist *in, const int in_mapped_nents, 255 + const off_t skip, const int nb_splits, 256 + const size_t *split_sizes, 257 + struct scatterlist **out, int *out_mapped_nents, 258 + gfp_t gfp_mask); 259 260 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); 261 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
+7
lib/Kconfig
··· 521 522 source "lib/fonts/Kconfig" 523 524 # 525 # sg chaining option 526 #
··· 521 522 source "lib/fonts/Kconfig" 523 524 + config SG_SPLIT 525 + def_bool n 526 + help 527 + Provides a heler to split scatterlists into chunks, each chunk being a 528 + scatterlist. This should be selected by a driver or an API which 529 + whishes to split a scatterlist amongst multiple DMA channel. 530 + 531 # 532 # sg chaining option 533 #
+1
lib/Makefile
··· 160 161 obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o 162 163 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 164 165 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
··· 160 161 obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o 162 163 + obj-$(CONFIG_SG_SPLIT) += sg_split.o 164 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o 165 166 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
-4
lib/scatterlist.c
··· 105 **/ 106 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) 107 { 108 - #ifndef CONFIG_ARCH_HAS_SG_CHAIN 109 - struct scatterlist *ret = &sgl[nents - 1]; 110 - #else 111 struct scatterlist *sg, *ret = NULL; 112 unsigned int i; 113 114 for_each_sg(sgl, sg, nents, i) 115 ret = sg; 116 117 - #endif 118 #ifdef CONFIG_DEBUG_SG 119 BUG_ON(sgl[0].sg_magic != SG_MAGIC); 120 BUG_ON(!sg_is_last(ret));
··· 105 **/ 106 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) 107 { 108 struct scatterlist *sg, *ret = NULL; 109 unsigned int i; 110 111 for_each_sg(sgl, sg, nents, i) 112 ret = sg; 113 114 #ifdef CONFIG_DEBUG_SG 115 BUG_ON(sgl[0].sg_magic != SG_MAGIC); 116 BUG_ON(!sg_is_last(ret));
+202
lib/sg_split.c
···
··· 1 + /* 2 + * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr> 3 + * 4 + * Scatterlist splitting helpers. 5 + * 6 + * This source code is licensed under the GNU General Public License, 7 + * Version 2. See the file COPYING for more details. 8 + */ 9 + 10 + #include <linux/scatterlist.h> 11 + #include <linux/slab.h> 12 + 13 + struct sg_splitter { 14 + struct scatterlist *in_sg0; 15 + int nents; 16 + off_t skip_sg0; 17 + unsigned int length_last_sg; 18 + 19 + struct scatterlist *out_sg; 20 + }; 21 + 22 + static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits, 23 + off_t skip, const size_t *sizes, 24 + struct sg_splitter *splitters, bool mapped) 25 + { 26 + int i; 27 + unsigned int sglen; 28 + size_t size = sizes[0], len; 29 + struct sg_splitter *curr = splitters; 30 + struct scatterlist *sg; 31 + 32 + for (i = 0; i < nb_splits; i++) { 33 + splitters[i].in_sg0 = NULL; 34 + splitters[i].nents = 0; 35 + } 36 + 37 + for_each_sg(in, sg, nents, i) { 38 + sglen = mapped ? sg_dma_len(sg) : sg->length; 39 + if (skip > sglen) { 40 + skip -= sglen; 41 + continue; 42 + } 43 + 44 + len = min_t(size_t, size, sglen - skip); 45 + if (!curr->in_sg0) { 46 + curr->in_sg0 = sg; 47 + curr->skip_sg0 = skip; 48 + } 49 + size -= len; 50 + curr->nents++; 51 + curr->length_last_sg = len; 52 + 53 + while (!size && (skip + len < sglen) && (--nb_splits > 0)) { 54 + curr++; 55 + size = *(++sizes); 56 + skip += len; 57 + len = min_t(size_t, size, sglen - skip); 58 + 59 + curr->in_sg0 = sg; 60 + curr->skip_sg0 = skip; 61 + curr->nents = 1; 62 + curr->length_last_sg = len; 63 + size -= len; 64 + } 65 + skip = 0; 66 + 67 + if (!size && --nb_splits > 0) { 68 + curr++; 69 + size = *(++sizes); 70 + } 71 + 72 + if (!nb_splits) 73 + break; 74 + } 75 + 76 + return (size || !splitters[0].in_sg0) ? -EINVAL : 0; 77 + } 78 + 79 + static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) 80 + { 81 + int i, j; 82 + struct scatterlist *in_sg, *out_sg; 83 + struct sg_splitter *split; 84 + 85 + for (i = 0, split = splitters; i < nb_splits; i++, split++) { 86 + in_sg = split->in_sg0; 87 + out_sg = split->out_sg; 88 + for (j = 0; j < split->nents; j++, out_sg++) { 89 + *out_sg = *in_sg; 90 + if (!j) { 91 + out_sg->offset += split->skip_sg0; 92 + out_sg->length -= split->skip_sg0; 93 + } else { 94 + out_sg->offset = 0; 95 + } 96 + sg_dma_address(out_sg) = 0; 97 + sg_dma_len(out_sg) = 0; 98 + in_sg = sg_next(in_sg); 99 + } 100 + out_sg[-1].length = split->length_last_sg; 101 + sg_mark_end(out_sg - 1); 102 + } 103 + } 104 + 105 + static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits) 106 + { 107 + int i, j; 108 + struct scatterlist *in_sg, *out_sg; 109 + struct sg_splitter *split; 110 + 111 + for (i = 0, split = splitters; i < nb_splits; i++, split++) { 112 + in_sg = split->in_sg0; 113 + out_sg = split->out_sg; 114 + for (j = 0; j < split->nents; j++, out_sg++) { 115 + sg_dma_address(out_sg) = sg_dma_address(in_sg); 116 + sg_dma_len(out_sg) = sg_dma_len(in_sg); 117 + if (!j) { 118 + sg_dma_address(out_sg) += split->skip_sg0; 119 + sg_dma_len(out_sg) -= split->skip_sg0; 120 + } 121 + in_sg = sg_next(in_sg); 122 + } 123 + sg_dma_len(--out_sg) = split->length_last_sg; 124 + } 125 + } 126 + 127 + /** 128 + * sg_split - split a scatterlist into several scatterlists 129 + * @in: the input sg list 130 + * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped. 131 + * @skip: the number of bytes to skip in the input sg list 132 + * @nb_splits: the number of desired sg outputs 133 + * @split_sizes: the respective size of each output sg list in bytes 134 + * @out: an array where to store the allocated output sg lists 135 + * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might 136 + * be NULL if sglist not already mapped (in_mapped_nents = 0) 137 + * @gfp_mask: the allocation flag 138 + * 139 + * This function splits the input sg list into nb_splits sg lists, which are 140 + * allocated and stored into out. 141 + * The @in is split into : 142 + * - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in 143 + * - @out[1], which covers bytes [@skip + split_sizes[0] .. 144 + * @skip + @split_sizes[0] + @split_sizes[1] -1] 145 + * etc ... 146 + * It will be the caller's duty to kfree() out array members. 147 + * 148 + * Returns 0 upon success, or error code 149 + */ 150 + int sg_split(struct scatterlist *in, const int in_mapped_nents, 151 + const off_t skip, const int nb_splits, 152 + const size_t *split_sizes, 153 + struct scatterlist **out, int *out_mapped_nents, 154 + gfp_t gfp_mask) 155 + { 156 + int i, ret; 157 + struct sg_splitter *splitters; 158 + 159 + splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask); 160 + if (!splitters) 161 + return -ENOMEM; 162 + 163 + ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes, 164 + splitters, false); 165 + if (ret < 0) 166 + goto err; 167 + 168 + ret = -ENOMEM; 169 + for (i = 0; i < nb_splits; i++) { 170 + splitters[i].out_sg = kmalloc_array(splitters[i].nents, 171 + sizeof(struct scatterlist), 172 + gfp_mask); 173 + if (!splitters[i].out_sg) 174 + goto err; 175 + } 176 + 177 + /* 178 + * The order of these 3 calls is important and should be kept. 179 + */ 180 + sg_split_phys(splitters, nb_splits); 181 + ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip, 182 + split_sizes, splitters, true); 183 + if (ret < 0) 184 + goto err; 185 + sg_split_mapped(splitters, nb_splits); 186 + 187 + for (i = 0; i < nb_splits; i++) { 188 + out[i] = splitters[i].out_sg; 189 + if (out_mapped_nents) 190 + out_mapped_nents[i] = splitters[i].nents; 191 + } 192 + 193 + kfree(splitters); 194 + return 0; 195 + 196 + err: 197 + for (i = 0; i < nb_splits; i++) 198 + kfree(splitters[i].out_sg); 199 + kfree(splitters); 200 + return ret; 201 + } 202 + EXPORT_SYMBOL(sg_split);