Merge tag 'dmaengine-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
"A couple of new device support and small driver updates for this
round.

New support:
- Intel idxd Wildcat Lake family support
- SpacemiT K1 PDMA controller support
- Renesas RZ/G3E family support

Updates:
- Xilinx shutdown support and dma client properties update
- Designware edma callback_result support"

* tag 'dmaengine-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
dt-bindings: dma: rz-dmac: Document RZ/G3E family of SoCs
dmaengine: dw-edma: Set status for callback_result
dmaengine: mv_xor: match alloc_wc and free_wc
dmaengine: mmp_pdma: Add SpacemiT K1 PDMA support with 64-bit addressing
dmaengine: mmp_pdma: Add operations structure for controller abstraction
dmaengine: mmp_pdma: Add reset controller support
dmaengine: mmp_pdma: Add clock support
dt-bindings: dma: Add SpacemiT K1 PDMA controller
dt-bindings: dmaengine: xilinx_dma: Remove DMA client properties
dmaengine: Fix dma_async_tx_descriptor->tx_submit documentation
dmaengine: xilinx_dma: Support descriptor setup from dma_vecs
dmaengine: sh: setup_xref error handling
dmaengine: Replace zero-length array with flexible-array
dmaengine: ppc4xx: Remove space before newline
dmaengine: idxd: Add a new IAA device ID for Wildcat Lake family platforms
dmaengine: idxd: Replace memset(0) + strscpy() with strscpy_pad()
dt-bindings: dma: nvidia,tegra20-apbdma: Add undocumented compatibles and "clock-names"
dmaengine: zynqmp_dma: Add shutdown operation support

+504 -85
+11 -1
Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml
··· 18 18 properties: 19 19 compatible: 20 20 oneOf: 21 - - const: nvidia,tegra20-apbdma 21 + - enum: 22 + - nvidia,tegra114-apbdma 23 + - nvidia,tegra20-apbdma 22 24 - items: 23 25 - const: nvidia,tegra30-apbdma 24 26 - const: nvidia,tegra20-apbdma 27 + - items: 28 + - enum: 29 + - nvidia,tegra124-apbdma 30 + - nvidia,tegra210-apbdma 31 + - const: nvidia,tegra148-apbdma 25 32 26 33 reg: 27 34 maxItems: 1 ··· 38 31 39 32 clocks: 40 33 maxItems: 1 34 + 35 + clock-names: 36 + const: dma 41 37 42 38 interrupts: 43 39 description:
+5
Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
··· 21 21 - renesas,r9a08g045-dmac # RZ/G3S 22 22 - const: renesas,rz-dmac 23 23 24 + - items: 25 + - enum: 26 + - renesas,r9a09g047-dmac # RZ/G3E 27 + - const: renesas,r9a09g057-dmac 28 + 24 29 - const: renesas,r9a09g057-dmac # RZ/V2H(P) 25 30 26 31 reg:
+68
Documentation/devicetree/bindings/dma/spacemit,k1-pdma.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/dma/spacemit,k1-pdma.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: SpacemiT K1 PDMA Controller 8 + 9 + maintainers: 10 + - Guodong Xu <guodong@riscstar.com> 11 + 12 + allOf: 13 + - $ref: dma-controller.yaml# 14 + 15 + properties: 16 + compatible: 17 + const: spacemit,k1-pdma 18 + 19 + reg: 20 + maxItems: 1 21 + 22 + interrupts: 23 + description: Shared interrupt for all DMA channels 24 + maxItems: 1 25 + 26 + clocks: 27 + maxItems: 1 28 + 29 + resets: 30 + maxItems: 1 31 + 32 + dma-channels: 33 + maximum: 16 34 + 35 + '#dma-cells': 36 + const: 1 37 + description: 38 + The DMA request number for the peripheral device. 39 + 40 + required: 41 + - compatible 42 + - reg 43 + - interrupts 44 + - clocks 45 + - resets 46 + - dma-channels 47 + - '#dma-cells' 48 + 49 + unevaluatedProperties: false 50 + 51 + examples: 52 + - | 53 + #include <dt-bindings/clock/spacemit,k1-syscon.h> 54 + 55 + soc { 56 + #address-cells = <2>; 57 + #size-cells = <2>; 58 + 59 + dma-controller@d4000000 { 60 + compatible = "spacemit,k1-pdma"; 61 + reg = <0x0 0xd4000000 0x0 0x4000>; 62 + interrupts = <72>; 63 + clocks = <&syscon_apmu CLK_DMA>; 64 + resets = <&syscon_apmu RESET_DMA>; 65 + dma-channels = <16>; 66 + #dma-cells = <1>; 67 + }; 68 + };
-23
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
··· 109 109 xlnx,datawidth = <0x40>; 110 110 } ; 111 111 } ; 112 - 113 - 114 - * DMA client 115 - 116 - Required properties: 117 - - dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, 118 - where Channel ID is '0' for write/tx and '1' for read/rx 119 - channel. For MCMDA, MM2S channel(write/tx) ID start from 120 - '0' and is in [0-15] range. S2MM channel(read/rx) ID start 121 - from '16' and is in [16-31] range. These channels ID are 122 - fixed irrespective of IP configuration. 123 - 124 - - dma-names: a list of DMA channel names, one per "dmas" entry 125 - 126 - Example: 127 - ++++++++ 128 - 129 - vdmatest_0: vdmatest@0 { 130 - compatible ="xlnx,axi-vdma-test-1.00.a"; 131 - dmas = <&axi_vdma_0 0 132 - &axi_vdma_0 1>; 133 - dma-names = "vdma0", "vdma1"; 134 - } ;
+1 -1
drivers/dma/Kconfig
··· 450 450 451 451 config MMP_PDMA 452 452 tristate "MMP PDMA support" 453 - depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST 453 + depends on ARCH_MMP || ARCH_PXA || ARCH_SPACEMIT || COMPILE_TEST 454 454 select DMA_ENGINE 455 455 help 456 456 Support the MMP PDMA engine for PXA and MMP platform.
+22
drivers/dma/dw-edma/dw-edma-core.c
··· 584 584 return dw_edma_device_transfer(&xfer); 585 585 } 586 586 587 + static void dw_hdma_set_callback_result(struct virt_dma_desc *vd, 588 + enum dmaengine_tx_result result) 589 + { 590 + u32 residue = 0; 591 + struct dw_edma_desc *desc; 592 + struct dmaengine_result *res; 593 + 594 + if (!vd->tx.callback_result) 595 + return; 596 + 597 + desc = vd2dw_edma_desc(vd); 598 + if (desc) 599 + residue = desc->alloc_sz - desc->xfer_sz; 600 + 601 + res = &vd->tx_result; 602 + res->result = result; 603 + res->residue = residue; 604 + } 605 + 587 606 static void dw_edma_done_interrupt(struct dw_edma_chan *chan) 588 607 { 589 608 struct dw_edma_desc *desc; ··· 616 597 case EDMA_REQ_NONE: 617 598 desc = vd2dw_edma_desc(vd); 618 599 if (!desc->chunks_alloc) { 600 + dw_hdma_set_callback_result(vd, 601 + DMA_TRANS_NOERROR); 619 602 list_del(&vd->node); 620 603 vchan_cookie_complete(vd); 621 604 } ··· 654 633 spin_lock_irqsave(&chan->vc.lock, flags); 655 634 vd = vchan_next_desc(&chan->vc); 656 635 if (vd) { 636 + dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED); 657 637 list_del(&vd->node); 658 638 vchan_cookie_complete(vd); 659 639 }
+2 -4
drivers/dma/idxd/defaults.c
··· 36 36 group->num_wqs++; 37 37 38 38 /* set name to "iaa_crypto" */ 39 - memset(wq->name, 0, WQ_NAME_SIZE + 1); 40 - strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1); 39 + strscpy_pad(wq->name, "iaa_crypto"); 41 40 42 41 /* set driver_name to "crypto" */ 43 - memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); 44 - strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1); 42 + strscpy_pad(wq->driver_name, "crypto"); 45 43 46 44 engine = idxd->engines[0]; 47 45
+2
drivers/dma/idxd/init.c
··· 80 80 { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) }, 81 81 /* IAA PTL platforms */ 82 82 { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) }, 83 + /* IAA WCL platforms */ 84 + { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) }, 83 85 { 0, } 84 86 }; 85 87 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
+1
drivers/dma/idxd/registers.h
··· 14 14 #define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 15 15 #define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 16 16 #define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d 17 + #define PCI_DEVICE_ID_INTEL_IAA_WCL 0xfd2d 17 18 18 19 #define DEVICE_VERSION_1 0x100 19 20 #define DEVICE_VERSION_2 0x200
+1 -1
drivers/dma/imx-sdma.c
··· 256 256 /* End of v3 array */ 257 257 union { s32 v3_end; s32 mcu_2_zqspi_addr; }; 258 258 /* End of v4 array */ 259 - s32 v4_end[0]; 259 + s32 v4_end[]; 260 260 }; 261 261 262 262 /*
+256 -37
drivers/dma/mmp_pdma.c
··· 15 15 #include <linux/device.h> 16 16 #include <linux/platform_data/mmp_dma.h> 17 17 #include <linux/dmapool.h> 18 + #include <linux/clk.h> 19 + #include <linux/reset.h> 18 20 #include <linux/of_dma.h> 19 21 #include <linux/of.h> 20 22 ··· 25 23 #define DCSR 0x0000 26 24 #define DALGN 0x00a0 27 25 #define DINT 0x00f0 28 - #define DDADR 0x0200 26 + #define DDADR(n) (0x0200 + ((n) << 4)) 29 27 #define DSADR(n) (0x0204 + ((n) << 4)) 30 28 #define DTADR(n) (0x0208 + ((n) << 4)) 29 + #define DDADRH(n) (0x0300 + ((n) << 4)) 30 + #define DSADRH(n) (0x0304 + ((n) << 4)) 31 + #define DTADRH(n) (0x0308 + ((n) << 4)) 31 32 #define DCMD 0x020c 32 33 33 34 #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ ··· 47 42 #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ 48 43 #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ 49 44 #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ 45 + #define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */ 50 46 #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ 51 47 #define DCSR_EORINTR BIT(9) /* The end of Receive */ 52 48 ··· 80 74 u32 dsadr; /* DSADR value for the current transfer */ 81 75 u32 dtadr; /* DTADR value for the current transfer */ 82 76 u32 dcmd; /* DCMD value for the current transfer */ 77 + /* 78 + * The following 32-bit words are only used in the 64-bit, ie. 79 + * LPAE (Long Physical Address Extension) mode. 80 + * They are used to specify the high 32 bits of the descriptor's 81 + * addresses. 82 + */ 83 + u32 ddadrh; /* High 32-bit of DDADR */ 84 + u32 dsadrh; /* High 32-bit of DSADR */ 85 + u32 dtadrh; /* High 32-bit of DTADR */ 86 + u32 rsvd; /* reserved */ 83 87 } __aligned(32); 84 88 85 89 struct mmp_pdma_desc_sw { ··· 134 118 struct mmp_pdma_chan *vchan; 135 119 }; 136 120 121 + /** 122 + * struct mmp_pdma_ops - Operations for the MMP PDMA controller 123 + * 124 + * Hardware Register Operations (read/write hardware registers): 125 + * @write_next_addr: Function to program address of next descriptor into 126 + * DDADR/DDADRH 127 + * @read_src_addr: Function to read the source address from DSADR/DSADRH 128 + * @read_dst_addr: Function to read the destination address from DTADR/DTADRH 129 + * 130 + * Descriptor Memory Operations (manipulate descriptor structs in memory): 131 + * @set_desc_next_addr: Function to set next descriptor address in descriptor 132 + * @set_desc_src_addr: Function to set the source address in descriptor 133 + * @set_desc_dst_addr: Function to set the destination address in descriptor 134 + * @get_desc_src_addr: Function to get the source address from descriptor 135 + * @get_desc_dst_addr: Function to get the destination address from descriptor 136 + * 137 + * Controller Configuration: 138 + * @run_bits: Control bits in DCSR register for channel start/stop 139 + * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform 140 + * settings, or explicit mask like DMA_BIT_MASK(32/64) 141 + */ 142 + struct mmp_pdma_ops { 143 + /* Hardware Register Operations */ 144 + void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr); 145 + u64 (*read_src_addr)(struct mmp_pdma_phy *phy); 146 + u64 (*read_dst_addr)(struct mmp_pdma_phy *phy); 147 + 148 + /* Descriptor Memory Operations */ 149 + void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc, 150 + dma_addr_t addr); 151 + void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc, 152 + dma_addr_t addr); 153 + void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc, 154 + dma_addr_t addr); 155 + u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc); 156 + u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc); 157 + 158 + /* Controller Configuration */ 159 + u32 run_bits; 160 + u64 dma_mask; 161 + }; 162 + 137 163 struct mmp_pdma_device { 138 164 int dma_channels; 139 165 void __iomem *base; 140 166 struct device *dev; 141 167 struct dma_device device; 142 168 struct mmp_pdma_phy *phy; 169 + const struct mmp_pdma_ops *ops; 143 170 spinlock_t phy_lock; /* protect alloc/free phy channels */ 144 171 }; 145 172 ··· 195 136 #define to_mmp_pdma_dev(dmadev) \ 196 137 container_of(dmadev, struct mmp_pdma_device, device) 197 138 198 - static int mmp_pdma_config_write(struct dma_chan *dchan, 199 - struct dma_slave_config *cfg, 200 - enum dma_transfer_direction direction); 201 - 202 - static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) 139 + /* For 32-bit PDMA */ 140 + static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr) 203 141 { 204 - u32 reg = (phy->idx << 4) + DDADR; 205 - 206 - writel(addr, phy->base + reg); 142 + writel(addr, phy->base + DDADR(phy->idx)); 207 143 } 144 + 145 + static u64 read_src_addr_32(struct mmp_pdma_phy *phy) 146 + { 147 + return readl(phy->base + DSADR(phy->idx)); 148 + } 149 + 150 + static u64 read_dst_addr_32(struct mmp_pdma_phy *phy) 151 + { 152 + return readl(phy->base + DTADR(phy->idx)); 153 + } 154 + 155 + static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr) 156 + { 157 + desc->ddadr = addr; 158 + } 159 + 160 + static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr) 161 + { 162 + desc->dsadr = addr; 163 + } 164 + 165 + static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr) 166 + { 167 + desc->dtadr = addr; 168 + } 169 + 170 + static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc) 171 + { 172 + return desc->dsadr; 173 + } 174 + 175 + static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc) 176 + { 177 + return desc->dtadr; 178 + } 179 + 180 + /* For 64-bit PDMA */ 181 + static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr) 182 + { 183 + writel(lower_32_bits(addr), phy->base + DDADR(phy->idx)); 184 + writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx)); 185 + } 186 + 187 + static u64 read_src_addr_64(struct mmp_pdma_phy *phy) 188 + { 189 + u32 low = readl(phy->base + DSADR(phy->idx)); 190 + u32 high = readl(phy->base + DSADRH(phy->idx)); 191 + 192 + return ((u64)high << 32) | low; 193 + } 194 + 195 + static u64 read_dst_addr_64(struct mmp_pdma_phy *phy) 196 + { 197 + u32 low = readl(phy->base + DTADR(phy->idx)); 198 + u32 high = readl(phy->base + DTADRH(phy->idx)); 199 + 200 + return ((u64)high << 32) | low; 201 + } 202 + 203 + static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr) 204 + { 205 + desc->ddadr = lower_32_bits(addr); 206 + desc->ddadrh = upper_32_bits(addr); 207 + } 208 + 209 + static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr) 210 + { 211 + desc->dsadr = lower_32_bits(addr); 212 + desc->dsadrh = upper_32_bits(addr); 213 + } 214 + 215 + static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr) 216 + { 217 + desc->dtadr = lower_32_bits(addr); 218 + desc->dtadrh = upper_32_bits(addr); 219 + } 220 + 221 + static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc) 222 + { 223 + return ((u64)desc->dsadrh << 32) | desc->dsadr; 224 + } 225 + 226 + static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc) 227 + { 228 + return ((u64)desc->dtadrh << 32) | desc->dtadr; 229 + } 230 + 231 + static int mmp_pdma_config_write(struct dma_chan *dchan, 232 + struct dma_slave_config *cfg, 233 + enum dma_transfer_direction direction); 208 234 209 235 static void enable_chan(struct mmp_pdma_phy *phy) 210 236 { 211 237 u32 reg, dalgn; 238 + struct mmp_pdma_device *pdev; 212 239 213 240 if (!phy->vchan) 214 241 return; 242 + 243 + pdev = to_mmp_pdma_dev(phy->vchan->chan.device); 215 244 216 245 reg = DRCMR(phy->vchan->drcmr); 217 246 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); ··· 312 165 writel(dalgn, phy->base + DALGN); 313 166 314 167 reg = (phy->idx << 2) + DCSR; 315 - writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); 168 + writel(readl(phy->base + reg) | pdev->ops->run_bits, 169 + phy->base + reg); 316 170 } 317 171 318 172 static void disable_chan(struct mmp_pdma_phy *phy) 319 173 { 320 - u32 reg; 174 + u32 reg, dcsr; 321 175 322 176 if (!phy) 323 177 return; 324 178 325 179 reg = (phy->idx << 2) + DCSR; 326 - writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); 180 + dcsr = readl(phy->base + reg); 181 + 182 + if (phy->vchan) { 183 + struct mmp_pdma_device *pdev; 184 + 185 + pdev = to_mmp_pdma_dev(phy->vchan->chan.device); 186 + writel(dcsr & ~pdev->ops->run_bits, phy->base + reg); 187 + } else { 188 + /* If no vchan, just clear the RUN bit */ 189 + writel(dcsr & ~DCSR_RUN, phy->base + reg); 190 + } 327 191 } 328 192 329 193 static int clear_chan_irq(struct mmp_pdma_phy *phy) ··· 453 295 static void start_pending_queue(struct mmp_pdma_chan *chan) 454 296 { 455 297 struct mmp_pdma_desc_sw *desc; 298 + struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); 456 299 457 300 /* still in running, irq will start the pending list */ 458 301 if (!chan->idle) { ··· 488 329 * Program the descriptor's address into the DMA controller, 489 330 * then start the DMA transaction 490 331 */ 491 - set_desc(chan->phy, desc->async_tx.phys); 332 + pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys); 492 333 enable_chan(chan->phy); 493 334 chan->idle = false; 494 335 } ··· 604 445 size_t len, unsigned long flags) 605 446 { 606 447 struct mmp_pdma_chan *chan; 448 + struct mmp_pdma_device *pdev; 607 449 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 608 450 size_t copy = 0; 609 451 610 - if (!dchan) 452 + if (!dchan || !len) 611 453 return NULL; 612 454 613 - if (!len) 614 - return NULL; 615 - 455 + pdev = to_mmp_pdma_dev(dchan->device); 616 456 chan = to_mmp_pdma_chan(dchan); 617 457 chan->byte_align = false; 618 458 ··· 634 476 chan->byte_align = true; 635 477 636 478 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); 637 - new->desc.dsadr = dma_src; 638 - new->desc.dtadr = dma_dst; 479 + pdev->ops->set_desc_src_addr(&new->desc, dma_src); 480 + pdev->ops->set_desc_dst_addr(&new->desc, dma_dst); 639 481 640 482 if (!first) 641 483 first = new; 642 484 else 643 - prev->desc.ddadr = new->async_tx.phys; 485 + pdev->ops->set_desc_next_addr(&prev->desc, 486 + new->async_tx.phys); 644 487 645 488 new->async_tx.cookie = 0; 646 489 async_tx_ack(&new->async_tx); ··· 685 526 unsigned long flags, void *context) 686 527 { 687 528 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 529 + struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device); 688 530 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; 689 531 size_t len, avail; 690 532 struct scatterlist *sg; ··· 717 557 718 558 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); 719 559 if (dir == DMA_MEM_TO_DEV) { 720 - new->desc.dsadr = addr; 560 + pdev->ops->set_desc_src_addr(&new->desc, addr); 721 561 new->desc.dtadr = chan->dev_addr; 722 562 } else { 723 563 new->desc.dsadr = chan->dev_addr; 724 - new->desc.dtadr = addr; 564 + pdev->ops->set_desc_dst_addr(&new->desc, addr); 725 565 } 726 566 727 567 if (!first) 728 568 first = new; 729 569 else 730 - prev->desc.ddadr = new->async_tx.phys; 570 + pdev->ops->set_desc_next_addr(&prev->desc, 571 + new->async_tx.phys); 731 572 732 573 new->async_tx.cookie = 0; 733 574 async_tx_ack(&new->async_tx); ··· 768 607 unsigned long flags) 769 608 { 770 609 struct mmp_pdma_chan *chan; 610 + struct mmp_pdma_device *pdev; 771 611 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 772 612 dma_addr_t dma_src, dma_dst; 773 613 774 614 if (!dchan || !len || !period_len) 775 615 return NULL; 616 + 617 + pdev = to_mmp_pdma_dev(dchan->device); 776 618 777 619 /* the buffer length must be a multiple of period_len */ 778 620 if (len % period_len != 0) ··· 813 649 814 650 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | 815 651 (DCMD_LENGTH & period_len)); 816 - new->desc.dsadr = dma_src; 817 - new->desc.dtadr = dma_dst; 652 + pdev->ops->set_desc_src_addr(&new->desc, dma_src); 653 + pdev->ops->set_desc_dst_addr(&new->desc, dma_dst); 818 654 819 655 if (!first) 820 656 first = new; 821 657 else 822 - prev->desc.ddadr = new->async_tx.phys; 658 + pdev->ops->set_desc_next_addr(&prev->desc, 659 + new->async_tx.phys); 823 660 824 661 new->async_tx.cookie = 0; 825 662 async_tx_ack(&new->async_tx); ··· 841 676 first->async_tx.cookie = -EBUSY; 842 677 843 678 /* make the cyclic link */ 844 - new->desc.ddadr = first->async_tx.phys; 679 + pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys); 845 680 chan->cyclic_first = first; 846 681 847 682 return &first->async_tx; ··· 927 762 dma_cookie_t cookie) 928 763 { 929 764 struct mmp_pdma_desc_sw *sw; 930 - u32 curr, residue = 0; 765 + struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); 766 + u64 curr; 767 + u32 residue = 0; 931 768 bool passed = false; 932 769 bool cyclic = chan->cyclic_first != NULL; 933 770 ··· 941 774 return 0; 942 775 943 776 if (chan->dir == DMA_DEV_TO_MEM) 944 - curr = readl(chan->phy->base + DTADR(chan->phy->idx)); 777 + curr = pdev->ops->read_dst_addr(chan->phy); 945 778 else 946 - curr = readl(chan->phy->base + DSADR(chan->phy->idx)); 779 + curr = pdev->ops->read_src_addr(chan->phy); 947 780 948 781 list_for_each_entry(sw, &chan->chain_running, node) { 949 - u32 start, end, len; 782 + u64 start, end; 783 + u32 len; 950 784 951 785 if (chan->dir == DMA_DEV_TO_MEM) 952 - start = sw->desc.dtadr; 786 + start = pdev->ops->get_desc_dst_addr(&sw->desc); 953 787 else 954 - start = sw->desc.dsadr; 788 + start = pdev->ops->get_desc_src_addr(&sw->desc); 955 789 956 790 len = sw->desc.dcmd & DCMD_LENGTH; 957 791 end = start + len; ··· 968 800 if (passed) { 969 801 residue += len; 970 802 } else if (curr >= start && curr <= end) { 971 - residue += end - curr; 803 + residue += (u32)(end - curr); 972 804 passed = true; 973 805 } 974 806 ··· 1162 994 return 0; 1163 995 } 1164 996 997 + static const struct mmp_pdma_ops marvell_pdma_v1_ops = { 998 + .write_next_addr = write_next_addr_32, 999 + .read_src_addr = read_src_addr_32, 1000 + .read_dst_addr = read_dst_addr_32, 1001 + .set_desc_next_addr = set_desc_next_addr_32, 1002 + .set_desc_src_addr = set_desc_src_addr_32, 1003 + .set_desc_dst_addr = set_desc_dst_addr_32, 1004 + .get_desc_src_addr = get_desc_src_addr_32, 1005 + .get_desc_dst_addr = get_desc_dst_addr_32, 1006 + .run_bits = (DCSR_RUN), 1007 + .dma_mask = 0, /* let OF/platform set DMA mask */ 1008 + }; 1009 + 1010 + static const struct mmp_pdma_ops spacemit_k1_pdma_ops = { 1011 + .write_next_addr = write_next_addr_64, 1012 + .read_src_addr = read_src_addr_64, 1013 + .read_dst_addr = read_dst_addr_64, 1014 + .set_desc_next_addr = set_desc_next_addr_64, 1015 + .set_desc_src_addr = set_desc_src_addr_64, 1016 + .set_desc_dst_addr = set_desc_dst_addr_64, 1017 + .get_desc_src_addr = get_desc_src_addr_64, 1018 + .get_desc_dst_addr = get_desc_dst_addr_64, 1019 + .run_bits = (DCSR_RUN | DCSR_LPAEEN), 1020 + .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */ 1021 + }; 1022 + 1165 1023 static const struct of_device_id mmp_pdma_dt_ids[] = { 1166 - { .compatible = "marvell,pdma-1.0", }, 1167 - {} 1024 + { 1025 + .compatible = "marvell,pdma-1.0", 1026 + .data = &marvell_pdma_v1_ops 1027 + }, { 1028 + .compatible = "spacemit,k1-pdma", 1029 + .data = &spacemit_k1_pdma_ops 1030 + }, { 1031 + /* sentinel */ 1032 + } 1168 1033 }; 1169 1034 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); 1170 1035 ··· 1220 1019 { 1221 1020 struct mmp_pdma_device *pdev; 1222 1021 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 1022 + struct clk *clk; 1023 + struct reset_control *rst; 1223 1024 int i, ret, irq = 0; 1224 1025 int dma_channels = 0, irq_num = 0; 1225 1026 const enum dma_slave_buswidth widths = ··· 1239 1036 pdev->base = devm_platform_ioremap_resource(op, 0); 1240 1037 if (IS_ERR(pdev->base)) 1241 1038 return PTR_ERR(pdev->base); 1039 + 1040 + clk = devm_clk_get_optional_enabled(pdev->dev, NULL); 1041 + if (IS_ERR(clk)) 1042 + return PTR_ERR(clk); 1043 + 1044 + rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev, 1045 + NULL); 1046 + if (IS_ERR(rst)) 1047 + return PTR_ERR(rst); 1048 + 1049 + pdev->ops = of_device_get_match_data(&op->dev); 1050 + if (!pdev->ops) 1051 + return -ENODEV; 1242 1052 1243 1053 if (pdev->dev->of_node) { 1244 1054 /* Parse new and deprecated dma-channels properties */ ··· 1314 1098 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1315 1099 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1316 1100 1317 - if (pdev->dev->coherent_dma_mask) 1101 + /* Set DMA mask based on ops->dma_mask, or OF/platform */ 1102 + if (pdev->ops->dma_mask) 1103 + dma_set_mask(pdev->dev, pdev->ops->dma_mask); 1104 + else if (pdev->dev->coherent_dma_mask) 1318 1105 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1319 1106 else 1320 1107 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
+2 -2
drivers/dma/mv_xor.c
··· 1013 1013 1014 1014 dma_async_device_unregister(&mv_chan->dmadev); 1015 1015 1016 - dma_free_coherent(dev, MV_XOR_POOL_SIZE, 1016 + dma_free_wc(dev, MV_XOR_POOL_SIZE, 1017 1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1018 1018 dma_unmap_single(dev, mv_chan->dummy_src_addr, 1019 1019 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); ··· 1163 1163 err_free_irq: 1164 1164 free_irq(mv_chan->irq, mv_chan); 1165 1165 err_free_dma: 1166 - dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1166 + dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE, 1167 1167 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1168 1168 err_unmap_dst: 1169 1169 dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
+2 -2
drivers/dma/ppc4xx/adma.c
··· 874 874 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n", 875 875 __func__, src_cnt, state, addr_count, order); 876 876 for (i = 0; i < src_cnt; i++) 877 - pr_err("\t[%d] 0x%llx \n", i, srcs[i]); 877 + pr_err("\t[%d] 0x%llx\n", i, srcs[i]); 878 878 BUG(); 879 879 } 880 880 ··· 3636 3636 3637 3637 ppc440spe_chan = to_ppc440spe_adma_chan(chan); 3638 3638 dev_dbg(ppc440spe_chan->device->common.dev, 3639 - "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id, 3639 + "ppc440spe adma%d: %s %d\n", ppc440spe_chan->device->id, 3640 3640 __func__, ppc440spe_chan->pending); 3641 3641 3642 3642 if (ppc440spe_chan->pending) {
+19 -6
drivers/dma/sh/shdma-base.c
··· 129 129 const struct shdma_ops *ops = sdev->ops; 130 130 dev_dbg(schan->dev, "Bring up channel %d\n", 131 131 schan->id); 132 - /* 133 - * TODO: .xfer_setup() might fail on some platforms. 134 - * Make it int then, on error remove chunks from the 135 - * queue again 136 - */ 137 - ops->setup_xfer(schan, schan->slave_id); 132 + 133 + ret = ops->setup_xfer(schan, schan->slave_id); 134 + if (ret < 0) { 135 + dev_err(schan->dev, "setup_xfer failed: %d\n", ret); 136 + 137 + /* Remove chunks from the queue and mark them as idle */ 138 + list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) { 139 + if (chunk->cookie == cookie) { 140 + chunk->mark = DESC_IDLE; 141 + list_move(&chunk->node, &schan->ld_free); 142 + } 143 + } 144 + 145 + schan->pm_state = SHDMA_PM_ESTABLISHED; 146 + ret = pm_runtime_put(schan->dev); 147 + 148 + spin_unlock_irq(&schan->chan_lock); 149 + return ret; 150 + } 138 151 139 152 if (schan->pm_state == SHDMA_PM_PENDING) 140 153 shdma_chan_xfer_ld_queue(schan);
+13 -4
drivers/dma/sh/shdmac.c
··· 300 300 return dmae_is_busy(sh_chan); 301 301 } 302 302 303 - static void sh_dmae_setup_xfer(struct shdma_chan *schan, 304 - int slave_id) 303 + static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id) 305 304 { 306 305 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 307 306 shdma_chan); 308 307 308 + int ret = 0; 309 309 if (slave_id >= 0) { 310 310 const struct sh_dmae_slave_config *cfg = 311 311 sh_chan->config; 312 312 313 - dmae_set_dmars(sh_chan, cfg->mid_rid); 314 - dmae_set_chcr(sh_chan, cfg->chcr); 313 + ret = dmae_set_dmars(sh_chan, cfg->mid_rid); 314 + if (ret < 0) 315 + goto END; 316 + 317 + ret = dmae_set_chcr(sh_chan, cfg->chcr); 318 + if (ret < 0) 319 + goto END; 320 + 315 321 } else { 316 322 dmae_init(sh_chan); 317 323 } 324 + 325 + END: 326 + return ret; 318 327 } 319 328 320 329 /*
+94
drivers/dma/xilinx/xilinx_dma.c
··· 2173 2173 } 2174 2174 2175 2175 /** 2176 + * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE 2177 + * transaction from DMA vectors 2178 + * @dchan: DMA channel 2179 + * @vecs: Array of DMA vectors that should be transferred 2180 + * @nb: number of entries in @vecs 2181 + * @direction: DMA direction 2182 + * @flags: transfer ack flags 2183 + * 2184 + * Return: Async transaction descriptor on success and NULL on failure 2185 + */ 2186 + static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec( 2187 + struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb, 2188 + enum dma_transfer_direction direction, unsigned long flags) 2189 + { 2190 + struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2191 + struct xilinx_dma_tx_descriptor *desc; 2192 + struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL; 2193 + size_t copy; 2194 + size_t sg_used; 2195 + unsigned int i; 2196 + 2197 + if (!is_slave_direction(direction) || direction != chan->direction) 2198 + return NULL; 2199 + 2200 + desc = xilinx_dma_alloc_tx_descriptor(chan); 2201 + if (!desc) 2202 + return NULL; 2203 + 2204 + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 2205 + desc->async_tx.tx_submit = xilinx_dma_tx_submit; 2206 + 2207 + /* Build transactions using information from DMA vectors */ 2208 + for (i = 0; i < nb; i++) { 2209 + sg_used = 0; 2210 + 2211 + /* Loop until the entire dma_vec entry is used */ 2212 + while (sg_used < vecs[i].len) { 2213 + struct xilinx_axidma_desc_hw *hw; 2214 + 2215 + /* Get a free segment */ 2216 + segment = xilinx_axidma_alloc_tx_segment(chan); 2217 + if (!segment) 2218 + goto error; 2219 + 2220 + /* 2221 + * Calculate the maximum number of bytes to transfer, 2222 + * making sure it is less than the hw limit 2223 + */ 2224 + copy = xilinx_dma_calc_copysize(chan, vecs[i].len, 2225 + sg_used); 2226 + hw = &segment->hw; 2227 + 2228 + /* Fill in the descriptor */ 2229 + xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0); 2230 + hw->control = copy; 2231 + 2232 + if (prev) 2233 + prev->hw.next_desc = segment->phys; 2234 + 2235 + prev = segment; 2236 + sg_used += copy; 2237 + 2238 + /* 2239 + * Insert the segment into the descriptor segments 2240 + * list. 2241 + */ 2242 + list_add_tail(&segment->node, &desc->segments); 2243 + } 2244 + } 2245 + 2246 + head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); 2247 + desc->async_tx.phys = head->phys; 2248 + 2249 + /* For the last DMA_MEM_TO_DEV transfer, set EOP */ 2250 + if (chan->direction == DMA_MEM_TO_DEV) { 2251 + segment->hw.control |= XILINX_DMA_BD_SOP; 2252 + segment = list_last_entry(&desc->segments, 2253 + struct xilinx_axidma_tx_segment, 2254 + node); 2255 + segment->hw.control |= XILINX_DMA_BD_EOP; 2256 + } 2257 + 2258 + if (chan->xdev->has_axistream_connected) 2259 + desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops; 2260 + 2261 + return &desc->async_tx; 2262 + 2263 + error: 2264 + xilinx_dma_free_tx_descriptor(chan, desc); 2265 + return NULL; 2266 + } 2267 + 2268 + /** 2176 2269 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 2177 2270 * @dchan: DMA channel 2178 2271 * @sgl: scatterlist to transfer to/from ··· 3273 3180 xdev->common.device_config = xilinx_dma_device_config; 3274 3181 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 3275 3182 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); 3183 + xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec; 3276 3184 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 3277 3185 xdev->common.device_prep_dma_cyclic = 3278 3186 xilinx_dma_prep_dma_cyclic;
+3 -2
drivers/dma/xilinx/zynqmp_dma.c
··· 1173 1173 dma_async_device_unregister(&zdev->common); 1174 1174 1175 1175 zynqmp_dma_chan_remove(zdev->chan); 1176 - pm_runtime_disable(zdev->dev); 1177 - if (!pm_runtime_enabled(zdev->dev)) 1176 + if (pm_runtime_active(zdev->dev)) 1178 1177 zynqmp_dma_runtime_suspend(zdev->dev); 1178 + pm_runtime_disable(zdev->dev); 1179 1179 } 1180 1180 1181 1181 static const struct of_device_id zynqmp_dma_of_match[] = { ··· 1193 1193 }, 1194 1194 .probe = zynqmp_dma_probe, 1195 1195 .remove = zynqmp_dma_remove, 1196 + .shutdown = zynqmp_dma_remove, 1196 1197 }; 1197 1198 1198 1199 module_platform_driver(zynqmp_dma_driver);
+1 -1
include/linux/dmaengine.h
··· 594 594 * @phys: physical address of the descriptor 595 595 * @chan: target channel for this operation 596 596 * @tx_submit: accept the descriptor, assign ordered cookie and mark the 597 + * descriptor pending. To be pushed on .issue_pending() call 597 598 * @desc_free: driver's callback function to free a resusable descriptor 598 599 * after completion 599 - * descriptor pending. To be pushed on .issue_pending() call 600 600 * @callback: routine to call after this operation is complete 601 601 * @callback_result: error result from a DMA transaction 602 602 * @callback_param: general parameter to pass to the callback routine
+1 -1
include/linux/shdma-base.h
··· 96 96 int (*desc_setup)(struct shdma_chan *, struct shdma_desc *, 97 97 dma_addr_t, dma_addr_t, size_t *); 98 98 int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool); 99 - void (*setup_xfer)(struct shdma_chan *, int); 99 + int (*setup_xfer)(struct shdma_chan *, int); 100 100 void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); 101 101 struct shdma_desc *(*embedded_desc)(void *, int); 102 102 bool (*chan_irq)(struct shdma_chan *, int);