Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: remove DMA_MEMCPY_SG once again

This was removed before due to the complete lack of users, but
3218910fd585 ("dmaengine: Add core function and capability check for
DMA_MEMCPY_SG") and 29cf37fa6dd9 ("dmaengine: Add consumer for the new
DMA_MEMCPY_SG API function.") added it back despite still not having
any users whatsoever.

Fixes: 3218910fd585 ("dmaengine: Add core function and capability check for DMA_MEMCPY_SG")
Fixes: 29cf37fa6dd9 ("dmaengine: Add consumer for the new DMA_MEMCPY_SG API function.")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Michal Simek <michal.simek@amd.com>
Link: https://lore.kernel.org/r/20220606074733.622616-1-hch@lst.de
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Christoph Hellwig and committed by
Vinod Koul
0cae0437 f7a03501

-159
-10
Documentation/driver-api/dmaengine/provider.rst
··· 162 162 163 163 - The device is able to do memory to memory copies 164 164 165 - - - DMA_MEMCPY_SG 166 - 167 - - The device supports memory to memory scatter-gather transfers. 168 - 169 - - Even though a plain memcpy can look like a particular case of a 170 - scatter-gather transfer, with a single chunk to copy, it's a distinct 171 - transaction type in the mem2mem transfer case. This is because some very 172 - simple devices might be able to do contiguous single-chunk memory copies, 173 - but have no support for more complex SG transfers. 174 - 175 165 - No matter what the overall size of the combined chunks for source and 176 166 destination is, only as many bytes as the smallest of the two will be 177 167 transmitted. That means the number and size of the scatter-gather buffers in
-7
drivers/dma/dmaengine.c
··· 1153 1153 return -EIO; 1154 1154 } 1155 1155 1156 - if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) { 1157 - dev_err(device->dev, 1158 - "Device claims capability %s, but op is not defined\n", 1159 - "DMA_MEMCPY_SG"); 1160 - return -EIO; 1161 - } 1162 - 1163 1156 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { 1164 1157 dev_err(device->dev, 1165 1158 "Device claims capability %s, but op is not defined\n",
-122
drivers/dma/xilinx/xilinx_dma.c
··· 2128 2128 } 2129 2129 2130 2130 /** 2131 - * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction 2132 - * @dchan: DMA channel 2133 - * @dst_sg: Destination scatter list 2134 - * @dst_sg_len: Number of entries in destination scatter list 2135 - * @src_sg: Source scatter list 2136 - * @src_sg_len: Number of entries in source scatter list 2137 - * @flags: transfer ack flags 2138 - * 2139 - * Return: Async transaction descriptor on success and NULL on failure 2140 - */ 2141 - static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg( 2142 - struct dma_chan *dchan, struct scatterlist *dst_sg, 2143 - unsigned int dst_sg_len, struct scatterlist *src_sg, 2144 - unsigned int src_sg_len, unsigned long flags) 2145 - { 2146 - struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2147 - struct xilinx_dma_tx_descriptor *desc; 2148 - struct xilinx_cdma_tx_segment *segment, *prev = NULL; 2149 - struct xilinx_cdma_desc_hw *hw; 2150 - size_t len, dst_avail, src_avail; 2151 - dma_addr_t dma_dst, dma_src; 2152 - 2153 - if (unlikely(dst_sg_len == 0 || src_sg_len == 0)) 2154 - return NULL; 2155 - 2156 - if (unlikely(!dst_sg || !src_sg)) 2157 - return NULL; 2158 - 2159 - desc = xilinx_dma_alloc_tx_descriptor(chan); 2160 - if (!desc) 2161 - return NULL; 2162 - 2163 - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2164 - desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2165 - 2166 - dst_avail = sg_dma_len(dst_sg); 2167 - src_avail = sg_dma_len(src_sg); 2168 - /* 2169 - * loop until there is either no more source or no more destination 2170 - * scatterlist entry 2171 - */ 2172 - while (true) { 2173 - len = min_t(size_t, src_avail, dst_avail); 2174 - len = min_t(size_t, len, chan->xdev->max_buffer_len); 2175 - if (len == 0) 2176 - goto fetch; 2177 - 2178 - /* Allocate the link descriptor from DMA pool */ 2179 - segment = xilinx_cdma_alloc_tx_segment(chan); 2180 - if (!segment) 2181 - goto error; 2182 - 2183 - dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - 2184 - dst_avail; 2185 - dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - 2186 - src_avail; 2187 - hw = &segment->hw; 2188 - hw->control = len; 2189 - hw->src_addr = dma_src; 2190 - hw->dest_addr = dma_dst; 2191 - if (chan->ext_addr) { 2192 - hw->src_addr_msb = upper_32_bits(dma_src); 2193 - hw->dest_addr_msb = upper_32_bits(dma_dst); 2194 - } 2195 - 2196 - if (prev) { 2197 - prev->hw.next_desc = segment->phys; 2198 - if (chan->ext_addr) 2199 - prev->hw.next_desc_msb = 2200 - upper_32_bits(segment->phys); 2201 - } 2202 - 2203 - prev = segment; 2204 - dst_avail -= len; 2205 - src_avail -= len; 2206 - list_add_tail(&segment->node, &desc->segments); 2207 - 2208 - fetch: 2209 - /* Fetch the next dst scatterlist entry */ 2210 - if (dst_avail == 0) { 2211 - if (dst_sg_len == 0) 2212 - break; 2213 - dst_sg = sg_next(dst_sg); 2214 - if (dst_sg == NULL) 2215 - break; 2216 - dst_sg_len--; 2217 - dst_avail = sg_dma_len(dst_sg); 2218 - } 2219 - /* Fetch the next src scatterlist entry */ 2220 - if (src_avail == 0) { 2221 - if (src_sg_len == 0) 2222 - break; 2223 - src_sg = sg_next(src_sg); 2224 - if (src_sg == NULL) 2225 - break; 2226 - src_sg_len--; 2227 - src_avail = sg_dma_len(src_sg); 2228 - } 2229 - } 2230 - 2231 - if (list_empty(&desc->segments)) { 2232 - dev_err(chan->xdev->dev, 2233 - "%s: Zero-size SG transfer requested\n", __func__); 2234 - goto error; 2235 - } 2236 - 2237 - /* Link the last hardware descriptor with the first. */ 2238 - segment = list_first_entry(&desc->segments, 2239 - struct xilinx_cdma_tx_segment, node); 2240 - desc->async_tx.phys = segment->phys; 2241 - prev->hw.next_desc = segment->phys; 2242 - 2243 - return &desc->async_tx; 2244 - 2245 - error: 2246 - xilinx_dma_free_tx_descriptor(chan, desc); 2247 - return NULL; 2248 - } 2249 - 2250 - /** 2251 2131 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 2252 2132 * @dchan: DMA channel 2253 2133 * @sgl: scatterlist to transfer to/from ··· 3120 3240 DMA_RESIDUE_GRANULARITY_SEGMENT; 3121 3241 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { 3122 3242 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 3123 - dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask); 3124 3243 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 3125 - xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg; 3126 3244 /* Residue calculation is supported by only AXI DMA and CDMA */ 3127 3245 xdev->common.residue_granularity = 3128 3246 DMA_RESIDUE_GRANULARITY_SEGMENT;
-20
include/linux/dmaengine.h
··· 50 50 */ 51 51 enum dma_transaction_type { 52 52 DMA_MEMCPY, 53 - DMA_MEMCPY_SG, 54 53 DMA_XOR, 55 54 DMA_PQ, 56 55 DMA_XOR_VAL, ··· 886 887 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 887 888 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 888 889 size_t len, unsigned long flags); 889 - struct dma_async_tx_descriptor *(*device_prep_dma_memcpy_sg)( 890 - struct dma_chan *chan, 891 - struct scatterlist *dst_sg, unsigned int dst_nents, 892 - struct scatterlist *src_sg, unsigned int src_nents, 893 - unsigned long flags); 894 890 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 895 891 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 896 892 unsigned int src_cnt, size_t len, unsigned long flags); ··· 1052 1058 1053 1059 return chan->device->device_prep_dma_memcpy(chan, dest, src, 1054 1060 len, flags); 1055 - } 1056 - 1057 - static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy_sg( 1058 - struct dma_chan *chan, 1059 - struct scatterlist *dst_sg, unsigned int dst_nents, 1060 - struct scatterlist *src_sg, unsigned int src_nents, 1061 - unsigned long flags) 1062 - { 1063 - if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy_sg) 1064 - return NULL; 1065 - 1066 - return chan->device->device_prep_dma_memcpy_sg(chan, dst_sg, dst_nents, 1067 - src_sg, src_nents, 1068 - flags); 1069 1061 } 1070 1062 1071 1063 static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,