Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine_topic_dma_vec' into togreg

Dmaengine topic
- New device_prep_peripheral_dma_vec, documentation and user

+92
+9
Documentation/driver-api/dmaengine/client.rst
··· 80 80 81 81 - slave_sg: DMA a list of scatter gather buffers from/to a peripheral 82 82 83 + - peripheral_dma_vec: DMA an array of scatter gather buffers from/to a 84 + peripheral. Similar to slave_sg, but uses an array of dma_vec 85 + structures instead of a scatterlist. 86 + 83 87 - dma_cyclic: Perform a cyclic DMA operation from/to a peripheral till the 84 88 operation is explicitly stopped. 85 89 ··· 104 100 struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( 105 101 struct dma_chan *chan, struct scatterlist *sgl, 106 102 unsigned int sg_len, enum dma_data_direction direction, 103 + unsigned long flags); 104 + 105 + struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec( 106 + struct dma_chan *chan, const struct dma_vec *vecs, 107 + size_t nents, enum dma_data_direction direction, 107 108 unsigned long flags); 108 109 109 110 struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+10
Documentation/driver-api/dmaengine/provider.rst
··· 433 433 - residue: Provides the residue bytes of the transfer for those that 434 434 support residue. 435 435 436 + - ``device_prep_peripheral_dma_vec`` 437 + 438 + - Similar to ``device_prep_slave_sg``, but it takes a pointer to a 439 + array of ``dma_vec`` structures, which (in the long run) will replace 440 + scatterlists. 441 + 436 442 - ``device_issue_pending`` 437 443 438 444 - Takes the first transaction descriptor in the pending queue, ··· 549 543 550 544 - Not really relevant any more since the introduction of ``virt-dma`` 551 545 that abstracts it away. 546 + 547 + dma_vec 548 + 549 + - A small structure that contains a DMA address and length. 552 550 553 551 DMA_CTRL_ACK 554 552
+40
drivers/dma/dma-axi-dmac.c
··· 620 620 return sg; 621 621 } 622 622 623 + static struct dma_async_tx_descriptor * 624 + axi_dmac_prep_peripheral_dma_vec(struct dma_chan *c, const struct dma_vec *vecs, 625 + size_t nb, enum dma_transfer_direction direction, 626 + unsigned long flags) 627 + { 628 + struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 629 + struct axi_dmac_desc *desc; 630 + unsigned int num_sgs = 0; 631 + struct axi_dmac_sg *dsg; 632 + size_t i; 633 + 634 + if (direction != chan->direction) 635 + return NULL; 636 + 637 + for (i = 0; i < nb; i++) 638 + num_sgs += DIV_ROUND_UP(vecs[i].len, chan->max_length); 639 + 640 + desc = axi_dmac_alloc_desc(chan, num_sgs); 641 + if (!desc) 642 + return NULL; 643 + 644 + dsg = desc->sg; 645 + 646 + for (i = 0; i < nb; i++) { 647 + if (!axi_dmac_check_addr(chan, vecs[i].addr) || 648 + !axi_dmac_check_len(chan, vecs[i].len)) { 649 + kfree(desc); 650 + return NULL; 651 + } 652 + 653 + dsg = axi_dmac_fill_linear_sg(chan, direction, vecs[i].addr, 1, 654 + vecs[i].len, dsg); 655 + } 656 + 657 + desc->cyclic = false; 658 + 659 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 660 + } 661 + 623 662 static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( 624 663 struct dma_chan *c, struct scatterlist *sgl, 625 664 unsigned int sg_len, enum dma_transfer_direction direction, ··· 1100 1061 dma_dev->device_tx_status = dma_cookie_status; 1101 1062 dma_dev->device_issue_pending = axi_dmac_issue_pending; 1102 1063 dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg; 1064 + dma_dev->device_prep_peripheral_dma_vec = axi_dmac_prep_peripheral_dma_vec; 1103 1065 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; 1104 1066 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; 1105 1067 dma_dev->device_terminate_all = axi_dmac_terminate_all;
+33
include/linux/dmaengine.h
··· 161 161 }; 162 162 163 163 /** 164 + * struct dma_vec - DMA vector 165 + * @addr: Bus address of the start of the vector 166 + * @len: Length in bytes of the DMA vector 167 + */ 168 + struct dma_vec { 169 + dma_addr_t addr; 170 + size_t len; 171 + }; 172 + 173 + /** 164 174 * enum dma_ctrl_flags - DMA flags to augment operation preparation, 165 175 * control completion, and communicate status. 166 176 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of ··· 920 910 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 921 911 struct dma_chan *chan, unsigned long flags); 922 912 913 + struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)( 914 + struct dma_chan *chan, const struct dma_vec *vecs, 915 + size_t nents, enum dma_transfer_direction direction, 916 + unsigned long flags); 923 917 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 924 918 struct dma_chan *chan, struct scatterlist *sgl, 925 919 unsigned int sg_len, enum dma_transfer_direction direction, ··· 985 971 986 972 return chan->device->device_prep_slave_sg(chan, &sg, 1, 987 973 dir, flags, NULL); 974 + } 975 + 976 + /** 977 + * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor 978 + * @chan: The channel to be used for this descriptor 979 + * @vecs: The array of DMA vectors that should be transferred 980 + * @nents: The number of DMA vectors in the array 981 + * @dir: Specifies the direction of the data transfer 982 + * @flags: DMA engine flags 983 + */ 984 + static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec( 985 + struct dma_chan *chan, const struct dma_vec *vecs, size_t nents, 986 + enum dma_transfer_direction dir, unsigned long flags) 987 + { 988 + if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec) 989 + return NULL; 990 + 991 + return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents, 992 + dir, flags); 988 993 } 989 994 990 995 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(