Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: xilinx_dma: Support descriptor setup from dma_vecs

The DMAEngine provides an interface for obtaining DMA transaction
descriptors from an array of scatter gather buffers represented by
struct dma_vec. This interface is used in the DMABUF API of the IIO
framework [1][2].
To enable DMABUF support through the IIO framework for the Xilinx DMA,
implement callback .device_prep_peripheral_dma_vec() of struct
dma_device in the driver.

[1]: 7a86d469983a ("iio: buffer-dmaengine: Support new DMABUF based userspace API")
[2]: 5878853fc938 ("dmaengine: Add API function dmaengine_prep_peripheral_dma_vec()")

Signed-off-by: Folker Schwesinger <dev@folker-schwesinger.de>
Reviewed-by: Suraj Gupta <suraj.gupta2@amd.com>
Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Link: https://lore.kernel.org/r/DCCKQLKOZC06.2H6LJ8RJQJNV2@folker-schwesinger.de
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Folker Schwesinger and committed by
Vinod Koul
38433a6f d9a3e992

+94
+94
drivers/dma/xilinx/xilinx_dma.c
··· 2173 2173 } 2174 2174 2175 2175 /** 2176 + * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE 2177 + * transaction from DMA vectors 2178 + * @dchan: DMA channel 2179 + * @vecs: Array of DMA vectors that should be transferred 2180 + * @nb: number of entries in @vecs 2181 + * @direction: DMA direction 2182 + * @flags: transfer ack flags 2183 + * 2184 + * Return: Async transaction descriptor on success and NULL on failure 2185 + */ 2186 + static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec( 2187 + struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb, 2188 + enum dma_transfer_direction direction, unsigned long flags) 2189 + { 2190 + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2191 + struct xilinx_dma_tx_descriptor *desc; 2192 + struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL; 2193 + size_t copy; 2194 + size_t sg_used; 2195 + unsigned int i; 2196 + 2197 + if (!is_slave_direction(direction) || direction != chan->direction) 2198 + return NULL; 2199 + 2200 + desc = xilinx_dma_alloc_tx_descriptor(chan); 2201 + if (!desc) 2202 + return NULL; 2203 + 2204 + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2205 + desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2206 + 2207 + /* Build transactions using information from DMA vectors */ 2208 + for (i = 0; i < nb; i++) { 2209 + sg_used = 0; 2210 + 2211 + /* Loop until the entire dma_vec entry is used */ 2212 + while (sg_used < vecs[i].len) { 2213 + struct xilinx_axidma_desc_hw *hw; 2214 + 2215 + /* Get a free segment */ 2216 + segment = xilinx_axidma_alloc_tx_segment(chan); 2217 + if (!segment) 2218 + goto error; 2219 + 2220 + /* 2221 + * Calculate the maximum number of bytes to transfer, 2222 + * making sure it is less than the hw limit 2223 + */ 2224 + copy = xilinx_dma_calc_copysize(chan, vecs[i].len, 2225 + sg_used); 2226 + hw = &segment->hw; 2227 + 2228 + /* Fill in the descriptor */ 2229 + xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0); 2230 + hw->control = copy; 2231 + 2232 + if (prev) 2233 + prev->hw.next_desc = segment->phys; 2234 + 2235 + prev = segment; 2236 + sg_used += copy; 2237 + 2238 + /* 2239 + * Insert the segment into the descriptor segments 2240 + * list. 2241 + */ 2242 + list_add_tail(&segment->node, &desc->segments); 2243 + } 2244 + } 2245 + 2246 + head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); 2247 + desc->async_tx.phys = head->phys; 2248 + 2249 + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2250 + if (chan->direction == DMA_MEM_TO_DEV) { 2251 + segment->hw.control |= XILINX_DMA_BD_SOP; 2252 + segment = list_last_entry(&desc->segments, 2253 + struct xilinx_axidma_tx_segment, 2254 + node); 2255 + segment->hw.control |= XILINX_DMA_BD_EOP; 2256 + } 2257 + 2258 + if (chan->xdev->has_axistream_connected) 2259 + desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; 2260 + 2261 + return &desc->async_tx; 2262 + 2263 + error: 2264 + xilinx_dma_free_tx_descriptor(chan, desc); 2265 + return NULL; 2266 + } 2267 + 2268 + /** 2176 2269 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 2177 2270 * @dchan: DMA channel 2178 2271 * @sgl: scatterlist to transfer to/from ··· 3273 3180 xdev->common.device_config = xilinx_dma_device_config; 3274 3181 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 3275 3182 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 3183 + xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec; 3276 3184 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 3277 3185 xdev->common.device_prep_dma_cyclic = 3278 3186 xilinx_dma_prep_dma_cyclic;