Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
"Nothing special, this includes a couple of new device support and new
driver support and bunch of driver updates.

New support:

- Tegra gpcdma driver support

- Qualcomm SM8350, Sm8450 and SC7280 device support

- Renesas RZN1 dma and platform support

Updates:

- stm32 device pause/resume support and updates

- DMA memset ops Documentation and usage clarification

- deprecate '#dma-channels' & '#dma-requests' bindings

- driver updates for stm32, ptdma idsx etc"

* tag 'dmaengine-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (87 commits)
dmaengine: idxd: make idxd_wq_enable() return 0 if wq is already enabled
dmaengine: sun6i: Add support for the D1 variant
dmaengine: sun6i: Add support for 34-bit physical addresses
dmaengine: sun6i: Do not use virt_to_phys
dt-bindings: dma: sun50i-a64: Add compatible for D1
dmaengine: tegra: Remove unused switch case
dmaengine: tegra: Fix uninitialized variable usage
dmaengine: stm32-dma: add device_pause/device_resume support
dmaengine: stm32-dma: rename pm ops before dma pause/resume introduction
dmaengine: stm32-dma: pass DMA_SxSCR value to stm32_dma_handle_chan_done()
dmaengine: stm32-dma: introduce stm32_dma_sg_inc to manage chan->next_sg
dmaengine: stm32-dmamux: avoid reset of dmamux if used by coprocessor
dmaengine: qcom: gpi: Add support for sc7280
dt-bindings: dma: pl330: Add power-domains
dmaengine: stm32-mdma: use dev_dbg on non-busy channel spurious it
dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler()
dmaengine: stm32-mdma: remove GISR1 register
dmaengine: ti: deprecate '#dma-channels'
dmaengine: mmp: deprecate '#dma-channels'
dmaengine: pxa: deprecate '#dma-channels' and '#dma-requests'
...

+2745 -350
+11
Documentation/devicetree/bindings/clock/renesas,r9a06g032-sysctrl.yaml
··· 39 39 '#power-domain-cells': 40 40 const: 0 41 41 42 + '#address-cells': 43 + const: 1 44 + 45 + '#size-cells': 46 + const: 1 47 + 48 + patternProperties: 49 + "^dma-router@[a-f0-9]+$": 50 + type: object 51 + $ref: "../dma/renesas,rzn1-dmamux.yaml#" 52 + 42 53 required: 43 54 - compatible 44 55 - reg
+6 -3
Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
··· 20 20 21 21 compatible: 22 22 oneOf: 23 - - const: allwinner,sun50i-a64-dma 24 - - const: allwinner,sun50i-a100-dma 25 - - const: allwinner,sun50i-h6-dma 23 + - enum: 24 + - allwinner,sun20i-d1-dma 25 + - allwinner,sun50i-a64-dma 26 + - allwinner,sun50i-a100-dma 27 + - allwinner,sun50i-h6-dma 26 28 - items: 27 29 - const: allwinner,sun8i-r40-dma 28 30 - const: allwinner,sun50i-a64-dma ··· 60 58 properties: 61 59 compatible: 62 60 enum: 61 + - allwinner,sun20i-d1-dma 63 62 - allwinner,sun50i-a100-dma 64 63 - allwinner,sun50i-h6-dma 65 64
+1 -1
Documentation/devicetree/bindings/dma/altr,msgdma.yaml
··· 7 7 title: Altera mSGDMA IP core 8 8 9 9 maintainers: 10 - - Olivier Dautricourt <olivier.dautricourt@orolia.com> 10 + - Olivier Dautricourt <olivierdautricourt@gmail.com> 11 11 12 12 description: | 13 13 Altera / Intel modular Scatter-Gather Direct Memory Access (mSGDMA)
+3
Documentation/devicetree/bindings/dma/arm,pl330.yaml
··· 55 55 56 56 dma-coherent: true 57 57 58 + power-domains: 59 + maxItems: 1 60 + 58 61 resets: 59 62 minItems: 1 60 63 maxItems: 2
+6 -4
Documentation/devicetree/bindings/dma/mmp-dma.txt
··· 10 10 or one irq for pdma device 11 11 12 12 Optional properties: 13 - - #dma-channels: Number of DMA channels supported by the controller (defaults 13 + - dma-channels: Number of DMA channels supported by the controller (defaults 14 14 to 32 when not specified) 15 - - #dma-requests: Number of DMA requestor lines supported by the controller 15 + - #dma-channels: deprecated 16 + - dma-requests: Number of DMA requestor lines supported by the controller 16 17 (defaults to 32 when not specified) 18 + - #dma-requests: deprecated 17 19 18 20 "marvell,pdma-1.0" 19 21 Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688. ··· 35 33 reg = <0xd4000000 0x10000>; 36 34 interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>; 37 35 interrupt-parent = <&intcmux32>; 38 - #dma-channels = <16>; 36 + dma-channels = <16>; 39 37 }; 40 38 41 39 /* ··· 47 45 compatible = "marvell,pdma-1.0"; 48 46 reg = <0xd4000000 0x10000>; 49 47 interrupts = <47>; 50 - #dma-channels = <16>; 48 + dma-channels = <16>; 51 49 }; 52 50 53 51
+110
Documentation/devicetree/bindings/dma/nvidia,tegra186-gpc-dma.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/dma/nvidia,tegra186-gpc-dma.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: NVIDIA Tegra GPC DMA Controller Device Tree Bindings 8 + 9 + description: | 10 + The Tegra General Purpose Central (GPC) DMA controller is used for faster 11 + data transfers between memory to memory, memory to device and device to 12 + memory. 13 + 14 + maintainers: 15 + - Jon Hunter <jonathanh@nvidia.com> 16 + - Rajesh Gumasta <rgumasta@nvidia.com> 17 + 18 + allOf: 19 + - $ref: "dma-controller.yaml#" 20 + 21 + properties: 22 + compatible: 23 + oneOf: 24 + - const: nvidia,tegra186-gpcdma 25 + - items: 26 + - const: nvidia,tegra194-gpcdma 27 + - const: nvidia,tegra186-gpcdma 28 + 29 + "#dma-cells": 30 + const: 1 31 + 32 + reg: 33 + maxItems: 1 34 + 35 + interrupts: 36 + description: 37 + Should contain all of the per-channel DMA interrupts in 38 + ascending order with respect to the DMA channel index. 39 + minItems: 1 40 + maxItems: 31 41 + 42 + resets: 43 + maxItems: 1 44 + 45 + reset-names: 46 + const: gpcdma 47 + 48 + iommus: 49 + maxItems: 1 50 + 51 + dma-coherent: true 52 + 53 + required: 54 + - compatible 55 + - reg 56 + - interrupts 57 + - resets 58 + - reset-names 59 + - "#dma-cells" 60 + - iommus 61 + 62 + additionalProperties: false 63 + 64 + examples: 65 + - | 66 + #include <dt-bindings/interrupt-controller/arm-gic.h> 67 + #include <dt-bindings/memory/tegra186-mc.h> 68 + #include <dt-bindings/reset/tegra186-reset.h> 69 + 70 + dma-controller@2600000 { 71 + compatible = "nvidia,tegra186-gpcdma"; 72 + reg = <0x2600000 0x210000>; 73 + resets = <&bpmp TEGRA186_RESET_GPCDMA>; 74 + reset-names = "gpcdma"; 75 + interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>, 76 + <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>, 77 + <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>, 78 + <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>, 79 + <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>, 80 + <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>, 81 + <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>, 82 + <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>, 83 + <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>, 84 + <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>, 85 + <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>, 86 + <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>, 87 + <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>, 88 + <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>, 89 + <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>, 90 + <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, 91 + <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, 92 + <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>, 93 + <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>, 94 + <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>, 95 + <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, 96 + <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, 97 + <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, 98 + <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, 99 + <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, 100 + <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, 101 + <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, 102 + <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, 103 + <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, 104 + <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, 105 + <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; 106 + #dma-cells = <1>; 107 + iommus = <&smmu TEGRA186_SID_GPCDMA_0>; 108 + dma-coherent; 109 + }; 110 + ...
+3
Documentation/devicetree/bindings/dma/qcom,gpi.yaml
··· 19 19 properties: 20 20 compatible: 21 21 enum: 22 + - qcom,sc7280-gpi-dma 22 23 - qcom,sdm845-gpi-dma 23 24 - qcom,sm8150-gpi-dma 24 25 - qcom,sm8250-gpi-dma 26 + - qcom,sm8350-gpi-dma 27 + - qcom,sm8450-gpi-dma 25 28 26 29 reg: 27 30 maxItems: 1
+4 -6
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
··· 42 42 - const: renesas,rcar-dmac 43 43 44 44 - items: 45 - - const: renesas,dmac-r8a779a0 # R-Car V3U 46 - 47 - - items: 48 - - const: renesas,dmac-r8a779f0 # R-Car S4-8 49 - - const: renesas,rcar-gen4-dmac 45 + - enum: 46 + - renesas,dmac-r8a779a0 # R-Car V3U 47 + - renesas,dmac-r8a779f0 # R-Car S4-8 48 + - const: renesas,rcar-gen4-dmac # R-Car Gen4 50 49 51 50 reg: true 52 51 ··· 120 121 compatible: 121 122 contains: 122 123 enum: 123 - - renesas,dmac-r8a779a0 124 124 - renesas,rcar-gen4-dmac 125 125 then: 126 126 properties:
+51
Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/dma/renesas,rzn1-dmamux.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Renesas RZ/N1 DMA mux 8 + 9 + maintainers: 10 + - Miquel Raynal <miquel.raynal@bootlin.com> 11 + 12 + allOf: 13 + - $ref: "dma-router.yaml#" 14 + 15 + properties: 16 + compatible: 17 + const: renesas,rzn1-dmamux 18 + 19 + reg: 20 + maxItems: 1 21 + description: DMA mux first register offset within the system control parent. 22 + 23 + '#dma-cells': 24 + const: 6 25 + description: 26 + The first four cells are dedicated to the master DMA controller. The fifth 27 + cell gives the DMA mux bit index that must be set starting from 0. The 28 + sixth cell gives the binary value that must be written there, ie. 0 or 1. 29 + 30 + dma-masters: 31 + minItems: 1 32 + maxItems: 2 33 + 34 + dma-requests: 35 + const: 32 36 + 37 + required: 38 + - reg 39 + - dma-requests 40 + 41 + additionalProperties: false 42 + 43 + examples: 44 + - | 45 + dma-router@a0 { 46 + compatible = "renesas,rzn1-dmamux"; 47 + reg = <0xa0 4>; 48 + #dma-cells = <6>; 49 + dma-masters = <&dma0 &dma1>; 50 + dma-requests = <32>; 51 + };
+17 -2
Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
··· 28 28 properties: 29 29 compatible: 30 30 items: 31 - - const: sifive,fu540-c000-pdma 31 + - enum: 32 + - sifive,fu540-c000-pdma 33 + - const: sifive,pdma0 34 + description: 35 + Should be "sifive,<chip>-pdma" and "sifive,pdma<version>". 36 + Supported compatible strings are - 37 + "sifive,fu540-c000-pdma" for the SiFive PDMA v0 as integrated onto the 38 + SiFive FU540 chip resp and "sifive,pdma0" for the SiFive PDMA v0 IP block 39 + with no chip integration tweaks. 32 40 33 41 reg: 34 42 maxItems: 1 ··· 44 36 interrupts: 45 37 minItems: 1 46 38 maxItems: 8 39 + 40 + dma-channels: 41 + description: For backwards-compatibility, the default value is 4 42 + minimum: 1 43 + maximum: 4 44 + default: 4 47 45 48 46 '#dma-cells': 49 47 const: 1 ··· 64 50 examples: 65 51 - | 66 52 dma-controller@3000000 { 67 - compatible = "sifive,fu540-c000-pdma"; 53 + compatible = "sifive,fu540-c000-pdma", "sifive,pdma0"; 68 54 reg = <0x3000000 0x8000>; 55 + dma-channels = <4>; 69 56 interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>; 70 57 #dma-cells = <1>; 71 58 };
+7 -1
Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
··· 15 15 16 16 properties: 17 17 compatible: 18 - const: snps,dma-spear1340 18 + oneOf: 19 + - const: snps,dma-spear1340 20 + - items: 21 + - enum: 22 + - renesas,r9a06g032-dma 23 + - const: renesas,rzn1-dma 24 + 19 25 20 26 "#dma-cells": 21 27 minimum: 3
+5 -2
Documentation/devicetree/bindings/dma/sprd-dma.txt
··· 8 8 - interrupts: Should contain one interrupt shared by all channel. 9 9 - #dma-cells: must be <1>. Used to represent the number of integer 10 10 cells in the dmas property of client device. 11 - - #dma-channels : Number of DMA channels supported. Should be 32. 11 + - dma-channels : Number of DMA channels supported. Should be 32. 12 12 - clock-names: Should contain the clock of the DMA controller. 13 13 - clocks: Should contain a clock specifier for each entry in clock-names. 14 + 15 + Deprecated properties: 16 + - #dma-channels : Number of DMA channels supported. Should be 32. 14 17 15 18 Example: 16 19 ··· 23 20 reg = <0x20100000 0x4000>; 24 21 interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; 25 22 #dma-cells = <1>; 26 - #dma-channels = <32>; 23 + dma-channels = <32>; 27 24 clock-names = "enable"; 28 25 clocks = <&clk_ap_ahb_gates 5>; 29 26 };
+5 -1
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
··· 110 110 Required properties: 111 111 - dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs, 112 112 where Channel ID is '0' for write/tx and '1' for read/rx 113 - channel. 113 + channel. For MCMDA, MM2S channel(write/tx) ID start from 114 + '0' and is in [0-15] range. S2MM channel(read/rx) ID start 115 + from '16' and is in [16-31] range. These channels ID are 116 + fixed irrespective of IP configuration. 117 + 114 118 - dma-names: a list of DMA channel names, one per "dmas" entry 115 119 116 120 Example:
+7 -1
Documentation/driver-api/dmaengine/provider.rst
··· 206 206 - The device is able to perform parity check using RAID6 P+Q 207 207 algorithm against a memory buffer. 208 208 209 + - DMA_MEMSET 210 + 211 + - The device is able to fill memory with the provided pattern 212 + 213 + - The pattern is treated as a single byte signed value. 214 + 209 215 - DMA_INTERRUPT 210 216 211 217 - The device is able to trigger a dummy transfer that will ··· 463 457 - Should use dma_set_residue to report it 464 458 465 459 - In the case of a cyclic transfer, it should only take into 466 - account the current period. 460 + account the total size of the cyclic buffer. 467 461 468 462 - Should return DMA_OUT_OF_ORDER if the device does not support in order 469 463 completion and is completing the operation out of order.
+2 -1
MAINTAINERS
··· 820 820 F: drivers/mailbox/mailbox-altera.c 821 821 822 822 ALTERA MSGDMA IP CORE DRIVER 823 - M: Olivier Dautricourt <olivier.dautricourt@orolia.com> 823 + M: Olivier Dautricourt <olivierdautricourt@gmail.com> 824 824 R: Stefan Roese <sr@denx.de> 825 825 L: dmaengine@vger.kernel.org 826 826 S: Odd Fixes ··· 19202 19202 M: Viresh Kumar <vireshk@kernel.org> 19203 19203 R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 19204 19204 S: Maintained 19205 + F: Documentation/devicetree/bindings/dma/renesas,rzn1-dmamux.yaml 19205 19206 F: Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml 19206 19207 F: drivers/dma/dw/ 19207 19208 F: include/dt-bindings/dma/dw-dmac.h
+39 -1
drivers/clk/renesas/r9a06g032-clocks.c
··· 16 16 #include <linux/math64.h> 17 17 #include <linux/of.h> 18 18 #include <linux/of_address.h> 19 + #include <linux/of_platform.h> 19 20 #include <linux/platform_device.h> 20 21 #include <linux/pm_clock.h> 21 22 #include <linux/pm_domain.h> 22 23 #include <linux/slab.h> 24 + #include <linux/soc/renesas/r9a06g032-sysctrl.h> 23 25 #include <linux/spinlock.h> 24 26 #include <dt-bindings/clock/r9a06g032-sysctrl.h> 27 + 28 + #define R9A06G032_SYSCTRL_DMAMUX 0xA0 25 29 26 30 struct r9a06g032_gate { 27 31 u16 gate, reset, ready, midle, ··· 318 314 spinlock_t lock; /* protects concurrent access to gates */ 319 315 void __iomem *reg; 320 316 }; 317 + 318 + static struct r9a06g032_priv *sysctrl_priv; 319 + 320 + /* Exported helper to access the DMAMUX register */ 321 + int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) 322 + { 323 + unsigned long flags; 324 + u32 dmamux; 325 + 326 + if (!sysctrl_priv) 327 + return -EPROBE_DEFER; 328 + 329 + spin_lock_irqsave(&sysctrl_priv->lock, flags); 330 + 331 + dmamux = readl(sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX); 332 + dmamux &= ~mask; 333 + dmamux |= val & mask; 334 + writel(dmamux, sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX); 335 + 336 + spin_unlock_irqrestore(&sysctrl_priv->lock, flags); 337 + 338 + return 0; 339 + } 340 + EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux); 321 341 322 342 /* register/bit pairs are encoded as an uint16_t */ 323 343 static void ··· 991 963 if (error) 992 964 return error; 993 965 994 - return r9a06g032_add_clk_domain(dev); 966 + error = r9a06g032_add_clk_domain(dev); 967 + if (error) 968 + return error; 969 + 970 + sysctrl_priv = clocks; 971 + 972 + error = of_platform_populate(np, NULL, NULL, dev); 973 + if (error) 974 + dev_err(dev, "Failed to populate children (%d)\n", error); 975 + 976 + return 0; 995 977 } 996 978 997 979 static const struct of_device_id r9a06g032_match[] = {
+13 -1
drivers/dma/Kconfig
··· 163 163 164 164 config DMA_SUN6I 165 165 tristate "Allwinner A31 SoCs DMA support" 166 - depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST 166 + depends on ARCH_SUNXI || COMPILE_TEST 167 167 depends on RESET_CONTROLLER 168 168 select DMA_ENGINE 169 169 select DMA_VIRTUAL_CHANNELS ··· 628 628 help 629 629 Support the TXx9 SoC internal DMA controller. This can be 630 630 integrated in chips such as the Toshiba TX4927/38/39. 631 + 632 + config TEGRA186_GPC_DMA 633 + tristate "NVIDIA Tegra GPC DMA support" 634 + depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT 635 + depends on IOMMU_API 636 + select DMA_ENGINE 637 + help 638 + Support for the NVIDIA Tegra General Purpose Central DMA controller. 639 + The DMA controller has multiple DMA channels which can be configured 640 + for different peripherals like UART, SPI, etc which are on APB bus. 641 + This DMA controller transfers data from memory to peripheral FIFO 642 + or vice versa. It also supports memory to memory data transfer. 631 643 632 644 config TEGRA20_APB_DMA 633 645 tristate "NVIDIA Tegra20 APB DMA support"
+1
drivers/dma/Makefile
··· 72 72 obj-$(CONFIG_SPRD_DMA) += sprd-dma.o 73 73 obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o 74 74 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 75 + obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o 75 76 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o 76 77 obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o 77 78 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-11
drivers/dma/amba-pl08x.c
··· 1535 1535 vchan_free_chan_resources(to_virt_chan(chan)); 1536 1536 } 1537 1537 1538 - static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1539 - struct dma_chan *chan, unsigned long flags) 1540 - { 1541 - struct dma_async_tx_descriptor *retval = NULL; 1542 - 1543 - return retval; 1544 - } 1545 - 1546 1538 /* 1547 1539 * Code accessing dma_async_is_complete() in a tight loop may give problems. 1548 1540 * If slaves are relying on interrupts to signal completion this function ··· 2752 2760 pl08x->memcpy.dev = &adev->dev; 2753 2761 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 2754 2762 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 2755 - pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2756 2763 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2757 2764 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2758 2765 pl08x->memcpy.device_config = pl08x_config; ··· 2778 2787 pl08x->slave.dev = &adev->dev; 2779 2788 pl08x->slave.device_free_chan_resources = 2780 2789 pl08x_free_chan_resources; 2781 - pl08x->slave.device_prep_dma_interrupt = 2782 - pl08x_prep_dma_interrupt; 2783 2790 pl08x->slave.device_tx_status = pl08x_dma_tx_status; 2784 2791 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2785 2792 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+9 -1
drivers/dma/at_hdmac.c
··· 942 942 struct at_desc *desc; 943 943 void __iomem *vaddr; 944 944 dma_addr_t paddr; 945 + char fill_pattern; 945 946 946 947 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, 947 948 &dest, value, len, flags); ··· 964 963 __func__); 965 964 return NULL; 966 965 } 967 - *(u32*)vaddr = value; 966 + 967 + /* Only the first byte of value is to be used according to dmaengine */ 968 + fill_pattern = (char)value; 969 + 970 + *(u32*)vaddr = (fill_pattern << 24) | 971 + (fill_pattern << 16) | 972 + (fill_pattern << 8) | 973 + fill_pattern; 968 974 969 975 desc = atc_create_memset_desc(chan, paddr, dest, len); 970 976 if (!desc) {
+8 -1
drivers/dma/at_xdmac.c
··· 1202 1202 unsigned long flags; 1203 1203 size_t ublen; 1204 1204 u32 dwidth; 1205 + char pattern; 1205 1206 /* 1206 1207 * WARNING: The channel configuration is set here since there is no 1207 1208 * dmaengine_slave_config call in this case. Moreover we don't know the ··· 1245 1244 1246 1245 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); 1247 1246 1247 + /* Only the first byte of value is to be used according to dmaengine */ 1248 + pattern = (char)value; 1249 + 1248 1250 ublen = len >> dwidth; 1249 1251 1250 1252 desc->lld.mbr_da = dst_addr; 1251 - desc->lld.mbr_ds = value; 1253 + desc->lld.mbr_ds = (pattern << 24) | 1254 + (pattern << 16) | 1255 + (pattern << 8) | 1256 + pattern; 1252 1257 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 1253 1258 | AT_XDMAC_MBR_UBC_NDEN 1254 1259 | AT_XDMAC_MBR_UBC_NSEN
+2
drivers/dma/bestcomm/bestcomm.c
··· 17 17 #include <linux/kernel.h> 18 18 #include <linux/slab.h> 19 19 #include <linux/of.h> 20 + #include <linux/of_address.h> 20 21 #include <linux/of_device.h> 22 + #include <linux/of_irq.h> 21 23 #include <linux/of_platform.h> 22 24 #include <asm/io.h> 23 25 #include <asm/irq.h>
+9
drivers/dma/dma-jz4780.c
··· 8 8 9 9 #include <linux/clk.h> 10 10 #include <linux/dmapool.h> 11 + #include <linux/dma-mapping.h> 11 12 #include <linux/init.h> 12 13 #include <linux/interrupt.h> 13 14 #include <linux/module.h> ··· 911 910 0, &jzdma->chan_reserved); 912 911 913 912 dd = &jzdma->dma_device; 913 + 914 + /* 915 + * The real segment size limit is dependent on the size unit selected 916 + * for the transfer. Because the size unit is selected automatically 917 + * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to 918 + * ensure the 24-bit transfer count in the descriptor cannot overflow. 919 + */ 920 + dma_set_max_seg_size(dev, 0xffffff); 914 921 915 922 dma_cap_set(DMA_MEMCPY, dd->cap_mask); 916 923 dma_cap_set(DMA_SLAVE, dd->cap_mask);
-7
drivers/dma/dmaengine.c
··· 1053 1053 * When the chan_id is a negative value, we are dynamically adding 1054 1054 * the channel. Otherwise we are static enumerating. 1055 1055 */ 1056 - mutex_lock(&device->chan_mutex); 1057 1056 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); 1058 - mutex_unlock(&device->chan_mutex); 1059 1057 if (chan->chan_id < 0) { 1060 1058 pr_err("%s: unable to alloc ida for chan: %d\n", 1061 1059 __func__, chan->chan_id); ··· 1076 1078 return 0; 1077 1079 1078 1080 err_out_ida: 1079 - mutex_lock(&device->chan_mutex); 1080 1081 ida_free(&device->chan_ida, chan->chan_id); 1081 - mutex_unlock(&device->chan_mutex); 1082 1082 err_free_dev: 1083 1083 kfree(chan->dev); 1084 1084 err_free_local: ··· 1109 1113 device->chancnt--; 1110 1114 chan->dev->chan = NULL; 1111 1115 mutex_unlock(&dma_list_mutex); 1112 - mutex_lock(&device->chan_mutex); 1113 1116 ida_free(&device->chan_ida, chan->chan_id); 1114 - mutex_unlock(&device->chan_mutex); 1115 1117 device_unregister(&chan->dev->device); 1116 1118 free_percpu(chan->local); 1117 1119 } ··· 1244 1250 if (rc != 0) 1245 1251 return rc; 1246 1252 1247 - mutex_init(&device->chan_mutex); 1248 1253 ida_init(&device->chan_ida); 1249 1254 1250 1255 /* represent channels in sysfs. Probably want devs too */
+10 -3
drivers/dma/dmatest.c
··· 675 675 /* 676 676 * src and dst buffers are freed by ourselves below 677 677 */ 678 - if (params->polled) 678 + if (params->polled) { 679 679 flags = DMA_CTRL_ACK; 680 - else 681 - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 680 + } else { 681 + if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) { 682 + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 683 + } else { 684 + pr_err("Channel does not support interrupt!\n"); 685 + goto err_pq_array; 686 + } 687 + } 682 688 683 689 ktime = ktime_get(); 684 690 while (!(kthread_should_stop() || ··· 912 906 runtime = ktime_to_us(ktime); 913 907 914 908 ret = 0; 909 + err_pq_array: 915 910 kfree(dma_pq); 916 911 err_srcs_array: 917 912 kfree(srcs);
+9
drivers/dma/dw/Kconfig
··· 16 16 Support the Synopsys DesignWare AHB DMA controller. This 17 17 can be integrated in chips such as the Intel Cherrytrail. 18 18 19 + config RZN1_DMAMUX 20 + tristate "Renesas RZ/N1 DMAMUX driver" 21 + depends on DW_DMAC 22 + depends on ARCH_RZN1 || COMPILE_TEST 23 + help 24 + Support the Renesas RZ/N1 DMAMUX which is located in front of 25 + the Synopsys DesignWare AHB DMA controller located on Renesas 26 + SoCs. 27 + 19 28 config DW_DMAC_PCI 20 29 tristate "Synopsys DesignWare AHB DMA PCI driver" 21 30 depends on PCI
+2
drivers/dma/dw/Makefile
··· 9 9 10 10 obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o 11 11 dw_dmac_pci-y := pci.o 12 + 13 + obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o
+1
drivers/dma/dw/platform.c
··· 137 137 #ifdef CONFIG_OF 138 138 static const struct of_device_id dw_dma_of_id_table[] = { 139 139 { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata }, 140 + { .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata }, 140 141 {} 141 142 }; 142 143 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
+155
drivers/dma/dw/rzn1-dmamux.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2022 Schneider-Electric 4 + * Author: Miquel Raynal <miquel.raynal@bootlin.com 5 + * Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com> 6 + */ 7 + #include <linux/bitops.h> 8 + #include <linux/of_device.h> 9 + #include <linux/of_dma.h> 10 + #include <linux/slab.h> 11 + #include <linux/soc/renesas/r9a06g032-sysctrl.h> 12 + #include <linux/types.h> 13 + 14 + #define RNZ1_DMAMUX_NCELLS 6 15 + #define RZN1_DMAMUX_MAX_LINES 64 16 + #define RZN1_DMAMUX_LINES_PER_CTLR 16 17 + 18 + struct rzn1_dmamux_data { 19 + struct dma_router dmarouter; 20 + DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR); 21 + }; 22 + 23 + struct rzn1_dmamux_map { 24 + unsigned int req_idx; 25 + }; 26 + 27 + static void rzn1_dmamux_free(struct device *dev, void *route_data) 28 + { 29 + struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev); 30 + struct rzn1_dmamux_map *map = route_data; 31 + 32 + dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx); 33 + 34 + clear_bit(map->req_idx, dmamux->used_chans); 35 + 36 + kfree(map); 37 + } 38 + 39 + static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec, 40 + struct of_dma *ofdma) 41 + { 42 + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 43 + struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev); 44 + struct rzn1_dmamux_map *map; 45 + unsigned int dmac_idx, chan, val; 46 + u32 mask; 47 + int ret; 48 + 49 + if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) 50 + return ERR_PTR(-EINVAL); 51 + 52 + map = kzalloc(sizeof(*map), GFP_KERNEL); 53 + if (!map) 54 + return ERR_PTR(-ENOMEM); 55 + 56 + chan = dma_spec->args[0]; 57 + map->req_idx = dma_spec->args[4]; 58 + val = dma_spec->args[5]; 59 + dma_spec->args_count -= 2; 60 + 61 + if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) { 62 + dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan); 63 + ret = -EINVAL; 64 + goto free_map; 65 + } 66 + 67 + if (map->req_idx >= RZN1_DMAMUX_MAX_LINES || 68 + (map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) { 69 + dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx); 70 + ret = -EINVAL; 71 + goto free_map; 72 + } 73 + 74 + dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0; 75 + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx); 76 + if (!dma_spec->np) { 77 + dev_err(&pdev->dev, "Can't get DMA master\n"); 78 + ret = -EINVAL; 79 + goto free_map; 80 + } 81 + 82 + dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n", 83 + map->req_idx, dmac_idx, chan); 84 + 85 + if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { 86 + ret = -EBUSY; 87 + goto free_map; 88 + } 89 + 90 + mask = BIT(map->req_idx); 91 + ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0); 92 + if (ret) 93 + goto clear_bitmap; 94 + 95 + return map; 96 + 97 + clear_bitmap: 98 + clear_bit(map->req_idx, dmamux->used_chans); 99 + free_map: 100 + kfree(map); 101 + 102 + return ERR_PTR(ret); 103 + } 104 + 105 + static const struct of_device_id rzn1_dmac_match[] = { 106 + { .compatible = "renesas,rzn1-dma" }, 107 + {} 108 + }; 109 + 110 + static int rzn1_dmamux_probe(struct platform_device *pdev) 111 + { 112 + struct device_node *mux_node = pdev->dev.of_node; 113 + const struct of_device_id *match; 114 + struct device_node *dmac_node; 115 + struct rzn1_dmamux_data *dmamux; 116 + 117 + dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL); 118 + if (!dmamux) 119 + return -ENOMEM; 120 + 121 + dmac_node = of_parse_phandle(mux_node, "dma-masters", 0); 122 + if (!dmac_node) 123 + return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n"); 124 + 125 + match = of_match_node(rzn1_dmac_match, dmac_node); 126 + of_node_put(dmac_node); 127 + if (!match) 128 + return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n"); 129 + 130 + dmamux->dmarouter.dev = &pdev->dev; 131 + dmamux->dmarouter.route_free = rzn1_dmamux_free; 132 + 133 + platform_set_drvdata(pdev, dmamux); 134 + 135 + return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate, 136 + &dmamux->dmarouter); 137 + } 138 + 139 + static const struct of_device_id rzn1_dmamux_match[] = { 140 + { .compatible = "renesas,rzn1-dmamux" }, 141 + {} 142 + }; 143 + 144 + static struct platform_driver rzn1_dmamux_driver = { 145 + .driver = { 146 + .name = "renesas,rzn1-dmamux", 147 + .of_match_table = rzn1_dmamux_match, 148 + }, 149 + .probe = rzn1_dmamux_probe, 150 + }; 151 + module_platform_driver(rzn1_dmamux_driver); 152 + 153 + MODULE_LICENSE("GPL"); 154 + MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com"); 155 + MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver");
+1 -1
drivers/dma/ep93xx_dma.c
··· 132 132 /** 133 133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel 134 134 * @chan: dmaengine API channel 135 - * @edma: pointer to to the engine device 135 + * @edma: pointer to the engine device 136 136 * @regs: memory mapped registers 137 137 * @irq: interrupt number of the channel 138 138 * @clk: clock used by this channel
+12 -6
drivers/dma/idxd/cdev.c
··· 99 99 ctx->wq = wq; 100 100 filp->private_data = ctx; 101 101 102 - if (device_pasid_enabled(idxd)) { 102 + if (device_user_pasid_enabled(idxd)) { 103 103 sva = iommu_sva_bind_device(dev, current->mm, NULL); 104 104 if (IS_ERR(sva)) { 105 105 rc = PTR_ERR(sva); ··· 152 152 if (wq_shared(wq)) { 153 153 idxd_device_drain_pasid(idxd, ctx->pasid); 154 154 } else { 155 - if (device_pasid_enabled(idxd)) { 155 + if (device_user_pasid_enabled(idxd)) { 156 156 /* The wq disable in the disable pasid function will drain the wq */ 157 157 rc = idxd_wq_disable_pasid(wq); 158 158 if (rc < 0) ··· 314 314 315 315 mutex_lock(&wq->wq_lock); 316 316 wq->type = IDXD_WQT_USER; 317 - rc = __drv_enable_wq(wq); 317 + rc = drv_enable_wq(wq); 318 318 if (rc < 0) 319 319 goto err; 320 320 ··· 329 329 return 0; 330 330 331 331 err_cdev: 332 - __drv_disable_wq(wq); 332 + drv_disable_wq(wq); 333 333 err: 334 334 wq->type = IDXD_WQT_NONE; 335 335 mutex_unlock(&wq->wq_lock); ··· 342 342 343 343 mutex_lock(&wq->wq_lock); 344 344 idxd_wq_del_cdev(wq); 345 - __drv_disable_wq(wq); 345 + drv_disable_wq(wq); 346 346 wq->type = IDXD_WQT_NONE; 347 347 mutex_unlock(&wq->wq_lock); 348 348 } ··· 369 369 rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, 370 370 ictx[i].name); 371 371 if (rc) 372 - return rc; 372 + goto err_free_chrdev_region; 373 373 } 374 374 375 375 return 0; 376 + 377 + err_free_chrdev_region: 378 + for (i--; i >= 0; i--) 379 + unregister_chrdev_region(ictx[i].devt, MINORMASK); 380 + 381 + return rc; 376 382 } 377 383 378 384 void idxd_cdev_remove(void)
+105 -46
drivers/dma/idxd/device.c
··· 184 184 185 185 if (wq->state == IDXD_WQ_ENABLED) { 186 186 dev_dbg(dev, "WQ %d already enabled\n", wq->id); 187 - return -ENXIO; 187 + return 0; 188 188 } 189 189 190 190 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); ··· 299 299 } 300 300 } 301 301 302 - int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) 302 + static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv) 303 303 { 304 304 struct idxd_device *idxd = wq->idxd; 305 - int rc; 306 305 union wqcfg wqcfg; 307 306 unsigned int offset; 308 307 309 - rc = idxd_wq_disable(wq, false); 310 - if (rc < 0) 311 - return rc; 308 + offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX); 309 + spin_lock(&idxd->dev_lock); 310 + wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset); 311 + wqcfg.priv = priv; 312 + wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX]; 313 + iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset); 314 + spin_unlock(&idxd->dev_lock); 315 + } 316 + 317 + static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) 318 + { 319 + struct idxd_device *idxd = wq->idxd; 320 + union wqcfg wqcfg; 321 + unsigned int offset; 312 322 313 323 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); 314 324 spin_lock(&idxd->dev_lock); 315 325 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); 316 326 wqcfg.pasid_en = 1; 317 327 wqcfg.pasid = pasid; 328 + wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX]; 318 329 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); 319 330 spin_unlock(&idxd->dev_lock); 331 + } 332 + 333 + int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) 334 + { 335 + int rc; 336 + 337 + rc = idxd_wq_disable(wq, false); 338 + if (rc < 0) 339 + return rc; 340 + 341 + __idxd_wq_set_pasid_locked(wq, pasid); 320 342 321 343 rc = idxd_wq_enable(wq); 322 344 if (rc < 0) ··· 577 555 return -ENXIO; 578 556 } 579 557 580 - spin_lock(&idxd->dev_lock); 581 558 idxd_device_clear_state(idxd); 582 - idxd->state = IDXD_DEV_DISABLED; 583 - spin_unlock(&idxd->dev_lock); 584 559 return 0; 585 560 } 586 561 587 562 void idxd_device_reset(struct idxd_device *idxd) 588 563 { 589 564 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); 590 - spin_lock(&idxd->dev_lock); 591 565 idxd_device_clear_state(idxd); 592 - idxd->state = IDXD_DEV_DISABLED; 566 + spin_lock(&idxd->dev_lock); 593 567 idxd_unmask_error_interrupts(idxd); 594 568 spin_unlock(&idxd->dev_lock); 595 569 } ··· 712 694 { 713 695 int i; 714 696 715 - lockdep_assert_held(&idxd->dev_lock); 716 697 for (i = 0; i < idxd->max_wqs; i++) { 717 698 struct idxd_wq *wq = idxd->wqs[i]; 718 699 700 + mutex_lock(&wq->wq_lock); 719 701 if (wq->state == IDXD_WQ_ENABLED) { 720 702 idxd_wq_disable_cleanup(wq); 721 703 wq->state = IDXD_WQ_DISABLED; 722 704 } 723 705 idxd_wq_device_reset_cleanup(wq); 706 + mutex_unlock(&wq->wq_lock); 724 707 } 725 708 } 726 709 ··· 730 711 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 731 712 return; 732 713 714 + idxd_device_wqs_clear_state(idxd); 715 + spin_lock(&idxd->dev_lock); 733 716 idxd_groups_clear_state(idxd); 734 717 idxd_engines_clear_state(idxd); 735 - idxd_device_wqs_clear_state(idxd); 718 + idxd->state = IDXD_DEV_DISABLED; 719 + spin_unlock(&idxd->dev_lock); 736 720 } 737 721 738 722 static void idxd_group_config_write(struct idxd_group *group) ··· 821 799 */ 822 800 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 823 801 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); 824 - wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); 802 + wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); 825 803 } 826 804 827 805 if (wq->size == 0 && wq->type != IDXD_WQT_NONE) ··· 837 815 if (wq_dedicated(wq)) 838 816 wq->wqcfg->mode = 1; 839 817 840 - if (device_pasid_enabled(idxd)) { 841 - wq->wqcfg->pasid_en = 1; 842 - if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq)) 843 - wq->wqcfg->pasid = idxd->pasid; 844 - } 845 - 846 818 /* 847 - * Here the priv bit is set depending on the WQ type. priv = 1 if the 819 + * The WQ priv bit is set depending on the WQ type. priv = 1 if the 848 820 * WQ type is kernel to indicate privileged access. This setting only 849 821 * matters for dedicated WQ. According to the DSA spec: 850 822 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the ··· 848 832 * In the case of a dedicated kernel WQ that is not able to support 849 833 * the PASID cap, then the configuration will be rejected. 850 834 */ 851 - wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); 852 835 if (wq_dedicated(wq) && wq->wqcfg->pasid_en && 853 836 !idxd_device_pasid_priv_enabled(idxd) && 854 837 wq->type == IDXD_WQT_KERNEL) { ··· 968 953 if (!wq->group) 969 954 continue; 970 955 971 - if (wq_shared(wq) && !device_swq_supported(idxd)) { 956 + if (wq_shared(wq) && !wq_shared_supported(wq)) { 972 957 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; 973 958 dev_warn(dev, "No shared wq support but configured.\n"); 974 959 return -EINVAL; ··· 1032 1017 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 1033 1018 1034 1019 wq->priority = wq->wqcfg->priority; 1020 + 1021 + wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; 1022 + wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift; 1035 1023 1036 1024 for (i = 0; i < WQCFG_STRIDES(idxd); i++) { 1037 1025 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); ··· 1179 1161 struct idxd_device *idxd = wq->idxd; 1180 1162 struct idxd_irq_entry *ie = &wq->ie; 1181 1163 1182 - synchronize_irq(ie->vector); 1164 + if (wq->type != IDXD_WQT_KERNEL) 1165 + return; 1166 + 1183 1167 free_irq(ie->vector, ie); 1184 1168 idxd_flush_pending_descs(ie); 1185 1169 if (idxd->request_int_handles) ··· 1199 1179 struct device *dev = &pdev->dev; 1200 1180 struct idxd_irq_entry *ie; 1201 1181 int rc; 1182 + 1183 + if (wq->type != IDXD_WQT_KERNEL) 1184 + return 0; 1202 1185 1203 1186 ie = &wq->ie; 1204 1187 ie->vector = pci_irq_vector(pdev, ie->id); ··· 1234 1211 return rc; 1235 1212 } 1236 1213 1237 - int __drv_enable_wq(struct idxd_wq *wq) 1214 + int drv_enable_wq(struct idxd_wq *wq) 1238 1215 { 1239 1216 struct idxd_device *idxd = wq->idxd; 1240 1217 struct device *dev = &idxd->pdev->dev; ··· 1268 1245 1269 1246 /* Shared WQ checks */ 1270 1247 if (wq_shared(wq)) { 1271 - if (!device_swq_supported(idxd)) { 1248 + if (!wq_shared_supported(wq)) { 1272 1249 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; 1273 1250 dev_dbg(dev, "PASID not enabled and shared wq.\n"); 1274 1251 goto err; ··· 1286 1263 dev_dbg(dev, "Shared wq and threshold 0.\n"); 1287 1264 goto err; 1288 1265 } 1266 + } 1267 + 1268 + /* 1269 + * In the event that the WQ is configurable for pasid and priv bits. 1270 + * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit. 1271 + * However, for non-kernel wq, the driver should only set the pasid_en bit for 1272 + * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and 1273 + * pasid_en later on so there is no need to setup. 1274 + */ 1275 + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 1276 + int priv = 0; 1277 + 1278 + if (wq_pasid_enabled(wq)) { 1279 + if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { 1280 + u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; 1281 + 1282 + __idxd_wq_set_pasid_locked(wq, pasid); 1283 + } 1284 + } 1285 + 1286 + if (is_idxd_wq_kernel(wq)) 1287 + priv = 1; 1288 + __idxd_wq_set_priv_locked(wq, priv); 1289 1289 } 1290 1290 1291 1291 rc = 0; ··· 1335 1289 } 1336 1290 1337 1291 wq->client_count = 0; 1292 + 1293 + rc = idxd_wq_request_irq(wq); 1294 + if (rc < 0) { 1295 + idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; 1296 + dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); 1297 + goto err_irq; 1298 + } 1299 + 1300 + rc = idxd_wq_alloc_resources(wq); 1301 + if (rc < 0) { 1302 + idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; 1303 + dev_dbg(dev, "WQ resource alloc failed\n"); 1304 + goto err_res_alloc; 1305 + } 1306 + 1307 + rc = idxd_wq_init_percpu_ref(wq); 1308 + if (rc < 0) { 1309 + idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; 1310 + dev_dbg(dev, "percpu_ref setup failed\n"); 1311 + goto err_ref; 1312 + } 1313 + 1338 1314 return 0; 1339 1315 1316 + err_ref: 1317 + idxd_wq_free_resources(wq); 1318 + err_res_alloc: 1319 + idxd_wq_free_irq(wq); 1320 + err_irq: 1321 + idxd_wq_unmap_portal(wq); 1340 1322 err_map_portal: 1341 1323 rc = idxd_wq_disable(wq, false); 1342 1324 if (rc < 0) ··· 1373 1299 return rc; 1374 1300 } 1375 1301 1376 - int drv_enable_wq(struct idxd_wq *wq) 1377 - { 1378 - int rc; 1379 - 1380 - mutex_lock(&wq->wq_lock); 1381 - rc = __drv_enable_wq(wq); 1382 - mutex_unlock(&wq->wq_lock); 1383 - return rc; 1384 - } 1385 - 1386 - void __drv_disable_wq(struct idxd_wq *wq) 1302 + void drv_disable_wq(struct idxd_wq *wq) 1387 1303 { 1388 1304 struct idxd_device *idxd = wq->idxd; 1389 1305 struct device *dev = &idxd->pdev->dev; ··· 1384 1320 dev_warn(dev, "Clients has claim on wq %d: %d\n", 1385 1321 wq->id, idxd_wq_refcount(wq)); 1386 1322 1323 + idxd_wq_free_resources(wq); 1387 1324 idxd_wq_unmap_portal(wq); 1388 - 1389 1325 idxd_wq_drain(wq); 1326 + idxd_wq_free_irq(wq); 1390 1327 idxd_wq_reset(wq); 1391 - 1328 + percpu_ref_exit(&wq->wq_active); 1329 + wq->type = IDXD_WQT_NONE; 1392 1330 wq->client_count = 0; 1393 - } 1394 - 1395 - void drv_disable_wq(struct idxd_wq *wq) 1396 - { 1397 - mutex_lock(&wq->wq_lock); 1398 - __drv_disable_wq(wq); 1399 - mutex_unlock(&wq->wq_lock); 1400 1331 } 1401 1332 1402 1333 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
+28 -37
drivers/dma/idxd/dma.c
··· 88 88 } 89 89 90 90 static struct dma_async_tx_descriptor * 91 + idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) 92 + { 93 + struct idxd_wq *wq = to_idxd_wq(c); 94 + u32 desc_flags; 95 + struct idxd_desc *desc; 96 + 97 + if (wq->state != IDXD_WQ_ENABLED) 98 + return NULL; 99 + 100 + op_flag_setup(flags, &desc_flags); 101 + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 102 + if (IS_ERR(desc)) 103 + return NULL; 104 + 105 + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, 106 + 0, 0, 0, desc->compl_dma, desc_flags); 107 + desc->txd.flags = flags; 108 + return &desc->txd; 109 + } 110 + 111 + static struct dma_async_tx_descriptor * 91 112 idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, 92 113 dma_addr_t dma_src, size_t len, unsigned long flags) 93 114 { ··· 214 193 INIT_LIST_HEAD(&dma->channels); 215 194 dma->dev = dev; 216 195 196 + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 217 197 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 218 198 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); 219 199 dma->device_release = idxd_dma_release; 220 200 201 + dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; 221 202 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { 222 203 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 223 204 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; ··· 250 227 dma_async_device_unregister(&idxd->idxd_dma->dma); 251 228 } 252 229 253 - int idxd_register_dma_channel(struct idxd_wq *wq) 230 + static int idxd_register_dma_channel(struct idxd_wq *wq) 254 231 { 255 232 struct idxd_device *idxd = wq->idxd; 256 233 struct dma_device *dma = &idxd->idxd_dma->dma; ··· 287 264 return 0; 288 265 } 289 266 290 - void idxd_unregister_dma_channel(struct idxd_wq *wq) 267 + static void idxd_unregister_dma_channel(struct idxd_wq *wq) 291 268 { 292 269 struct idxd_dma_chan *idxd_chan = wq->idxd_chan; 293 270 struct dma_chan *chan = &idxd_chan->chan; ··· 313 290 mutex_lock(&wq->wq_lock); 314 291 wq->type = IDXD_WQT_KERNEL; 315 292 316 - rc = idxd_wq_request_irq(wq); 317 - if (rc < 0) { 318 - idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; 319 - dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); 320 - goto err_irq; 321 - } 322 - 323 - rc = __drv_enable_wq(wq); 293 + rc = drv_enable_wq(wq); 324 294 if (rc < 0) { 325 295 dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); 326 296 rc = -ENXIO; 327 297 goto err; 328 - } 329 - 330 - rc = idxd_wq_alloc_resources(wq); 331 - if (rc < 0) { 332 - idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; 333 - dev_dbg(dev, "WQ resource alloc failed\n"); 334 - goto err_res_alloc; 335 - } 336 - 337 - rc = idxd_wq_init_percpu_ref(wq); 338 - if (rc < 0) { 339 - idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; 340 - dev_dbg(dev, "percpu_ref setup failed\n"); 341 - goto err_ref; 342 298 } 343 299 344 300 rc = idxd_register_dma_channel(wq); ··· 332 330 return 0; 333 331 334 332 err_dma: 335 - __idxd_wq_quiesce(wq); 336 - percpu_ref_exit(&wq->wq_active); 337 - err_ref: 338 - idxd_wq_free_resources(wq); 339 - err_res_alloc: 340 - __drv_disable_wq(wq); 333 + drv_disable_wq(wq); 341 334 err: 342 - idxd_wq_free_irq(wq); 343 - err_irq: 344 335 wq->type = IDXD_WQT_NONE; 345 336 mutex_unlock(&wq->wq_lock); 346 337 return rc; ··· 346 351 mutex_lock(&wq->wq_lock); 347 352 __idxd_wq_quiesce(wq); 348 353 idxd_unregister_dma_channel(wq); 349 - idxd_wq_free_resources(wq); 350 - __drv_disable_wq(wq); 351 - percpu_ref_exit(&wq->wq_active); 352 - idxd_wq_free_irq(wq); 353 - wq->type = IDXD_WQT_NONE; 354 + drv_disable_wq(wq); 354 355 mutex_unlock(&wq->wq_lock); 355 356 } 356 357
+14 -6
drivers/dma/idxd/idxd.h
··· 239 239 IDXD_FLAG_CONFIGURABLE = 0, 240 240 IDXD_FLAG_CMD_RUNNING, 241 241 IDXD_FLAG_PASID_ENABLED, 242 + IDXD_FLAG_USER_PASID_ENABLED, 242 243 }; 243 244 244 245 struct idxd_dma_dev { ··· 470 469 return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 471 470 } 472 471 473 - static inline bool device_swq_supported(struct idxd_device *idxd) 472 + static inline bool device_user_pasid_enabled(struct idxd_device *idxd) 474 473 { 475 - return (support_enqcmd && device_pasid_enabled(idxd)); 474 + return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 475 + } 476 + 477 + static inline bool wq_pasid_enabled(struct idxd_wq *wq) 478 + { 479 + return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) || 480 + (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd)); 481 + } 482 + 483 + static inline bool wq_shared_supported(struct idxd_wq *wq) 484 + { 485 + return (support_enqcmd && wq_pasid_enabled(wq)); 476 486 } 477 487 478 488 enum idxd_portal_prot { ··· 571 559 int idxd_device_drv_probe(struct idxd_dev *idxd_dev); 572 560 void idxd_device_drv_remove(struct idxd_dev *idxd_dev); 573 561 int drv_enable_wq(struct idxd_wq *wq); 574 - int __drv_enable_wq(struct idxd_wq *wq); 575 562 void drv_disable_wq(struct idxd_wq *wq); 576 - void __drv_disable_wq(struct idxd_wq *wq); 577 563 int idxd_device_init_reset(struct idxd_device *idxd); 578 564 int idxd_device_enable(struct idxd_device *idxd); 579 565 int idxd_device_disable(struct idxd_device *idxd); ··· 612 602 /* dmaengine */ 613 603 int idxd_register_dma_device(struct idxd_device *idxd); 614 604 void idxd_unregister_dma_device(struct idxd_device *idxd); 615 - int idxd_register_dma_channel(struct idxd_wq *wq); 616 - void idxd_unregister_dma_channel(struct idxd_wq *wq); 617 605 void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); 618 606 void idxd_dma_complete_txd(struct idxd_desc *desc, 619 607 enum idxd_complete_type comp_type, bool free_desc);
+15 -15
drivers/dma/idxd/init.c
··· 512 512 dev_dbg(dev, "IDXD reset complete\n"); 513 513 514 514 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 515 - rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); 516 - if (rc == 0) { 517 - rc = idxd_enable_system_pasid(idxd); 518 - if (rc < 0) { 519 - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 520 - dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); 521 - } else { 522 - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 523 - } 524 - } else { 525 - dev_warn(dev, "Unable to turn on SVA feature.\n"); 526 - } 515 + if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) 516 + dev_warn(dev, "Unable to turn on user SVA feature.\n"); 517 + else 518 + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 519 + 520 + if (idxd_enable_system_pasid(idxd)) 521 + dev_warn(dev, "No in-kernel DMA with PASID.\n"); 522 + else 523 + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 527 524 } else if (!sva) { 528 525 dev_warn(dev, "User forced SVA off via module param.\n"); 529 526 } ··· 558 561 err: 559 562 if (device_pasid_enabled(idxd)) 560 563 idxd_disable_system_pasid(idxd); 561 - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 564 + if (device_user_pasid_enabled(idxd)) 565 + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 562 566 return rc; 563 567 } 564 568 ··· 572 574 idxd_cleanup_internals(idxd); 573 575 if (device_pasid_enabled(idxd)) 574 576 idxd_disable_system_pasid(idxd); 575 - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 577 + if (device_user_pasid_enabled(idxd)) 578 + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 576 579 } 577 580 578 581 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ··· 690 691 free_irq(irq_entry->vector, irq_entry); 691 692 pci_free_irq_vectors(pdev); 692 693 pci_iounmap(pdev, idxd->reg_base); 693 - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 694 + if (device_user_pasid_enabled(idxd)) 695 + iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 694 696 pci_disable_device(pdev); 695 697 destroy_workqueue(idxd->wq); 696 698 perfmon_pmu_remove(idxd);
+1
drivers/dma/idxd/registers.h
··· 353 353 } __packed; 354 354 355 355 #define WQCFG_PASID_IDX 2 356 + #define WQCFG_PRIVL_IDX 2 356 357 #define WQCFG_OCCUP_IDX 6 357 358 358 359 #define WQCFG_OCCUP_MASK 0xffff
+9 -3
drivers/dma/idxd/sysfs.c
··· 588 588 if (sysfs_streq(buf, "dedicated")) { 589 589 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 590 590 wq->threshold = 0; 591 - } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { 591 + } else if (sysfs_streq(buf, "shared")) { 592 592 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 593 593 } else { 594 594 return -EINVAL; ··· 832 832 size_t count) 833 833 { 834 834 struct idxd_wq *wq = confdev_to_wq(dev); 835 + char *input, *pos; 835 836 836 837 if (wq->state != IDXD_WQ_DISABLED) 837 838 return -EPERM; ··· 847 846 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) 848 847 return -EOPNOTSUPP; 849 848 849 + input = kstrndup(buf, count, GFP_KERNEL); 850 + if (!input) 851 + return -ENOMEM; 852 + 853 + pos = strim(input); 850 854 memset(wq->name, 0, WQ_NAME_SIZE + 1); 851 - strncpy(wq->name, buf, WQ_NAME_SIZE); 852 - strreplace(wq->name, '\n', '\0'); 855 + sprintf(wq->name, "%s", pos); 856 + kfree(input); 853 857 return count; 854 858 } 855 859
+4 -8
drivers/dma/mediatek/mtk-cqdma.c
··· 751 751 struct mtk_cqdma_device *cqdma; 752 752 struct mtk_cqdma_vchan *vc; 753 753 struct dma_device *dd; 754 - struct resource *res; 755 754 int err; 756 755 u32 i; 757 756 ··· 823 824 return PTR_ERR(cqdma->pc[i]->base); 824 825 825 826 /* allocate IRQ resource */ 826 - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 827 - if (!res) { 828 - dev_err(&pdev->dev, "No irq resource for %s\n", 829 - dev_name(&pdev->dev)); 830 - return -EINVAL; 831 - } 832 - cqdma->pc[i]->irq = res->start; 827 + err = platform_get_irq(pdev, i); 828 + if (err < 0) 829 + return err; 830 + cqdma->pc[i]->irq = err; 833 831 834 832 err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, 835 833 mtk_cqdma_irq, 0, dev_name(&pdev->dev),
+5 -8
drivers/dma/mediatek/mtk-hsdma.c
··· 601 601 cb->flag = 0; 602 602 } 603 603 604 - cb->vd = 0; 604 + cb->vd = NULL; 605 605 606 606 /* 607 607 * Recycle the RXD with the helper WRITE_ONCE that can ensure ··· 923 923 return PTR_ERR(hsdma->clk); 924 924 } 925 925 926 - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 927 - if (!res) { 928 - dev_err(&pdev->dev, "No irq resource for %s\n", 929 - dev_name(&pdev->dev)); 930 - return -EINVAL; 931 - } 932 - hsdma->irq = res->start; 926 + err = platform_get_irq(pdev, 0); 927 + if (err < 0) 928 + return err; 929 + hsdma->irq = err; 933 930 934 931 refcount_set(&hsdma->pc_refcnt, 0); 935 932 spin_lock_init(&hsdma->lock);
+9 -5
drivers/dma/mmp_pdma.c
··· 1043 1043 return PTR_ERR(pdev->base); 1044 1044 1045 1045 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); 1046 - if (of_id) 1047 - of_property_read_u32(pdev->dev->of_node, "#dma-channels", 1048 - &dma_channels); 1049 - else if (pdata && pdata->dma_channels) 1046 + if (of_id) { 1047 + /* Parse new and deprecated dma-channels properties */ 1048 + if (of_property_read_u32(pdev->dev->of_node, "dma-channels", 1049 + &dma_channels)) 1050 + of_property_read_u32(pdev->dev->of_node, "#dma-channels", 1051 + &dma_channels); 1052 + } else if (pdata && pdata->dma_channels) { 1050 1053 dma_channels = pdata->dma_channels; 1051 - else 1054 + } else { 1052 1055 dma_channels = 32; /* default 32 channel */ 1056 + } 1053 1057 pdev->dma_channels = dma_channels; 1054 1058 1055 1059 for (i = 0; i < dma_channels; i++) {
+2 -2
drivers/dma/mv_xor_v2.c
··· 591 591 dma_run_dependencies(&next_pending_sw_desc->async_tx); 592 592 593 593 /* Lock the channel */ 594 - spin_lock_bh(&xor_dev->lock); 594 + spin_lock(&xor_dev->lock); 595 595 596 596 /* add the SW descriptor to the free descriptors list */ 597 597 list_add(&next_pending_sw_desc->free_list, 598 598 &xor_dev->free_sw_desc); 599 599 600 600 /* Release the channel */ 601 - spin_unlock_bh(&xor_dev->lock); 601 + spin_unlock(&xor_dev->lock); 602 602 603 603 /* increment the next descriptor */ 604 604 pending_ptr++;
+6 -8
drivers/dma/nbpfaxi.c
··· 1294 1294 struct device_node *np = dev->of_node; 1295 1295 struct nbpf_device *nbpf; 1296 1296 struct dma_device *dma_dev; 1297 - struct resource *iomem, *irq_res; 1297 + struct resource *iomem; 1298 1298 const struct nbpf_config *cfg; 1299 1299 int num_channels; 1300 1300 int ret, irq, eirq, i; ··· 1335 1335 nbpf->config = cfg; 1336 1336 1337 1337 for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { 1338 - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1339 - if (!irq_res) 1340 - break; 1341 - 1342 - for (irq = irq_res->start; irq <= irq_res->end; 1343 - irq++, irqs++) 1344 - irqbuf[irqs] = irq; 1338 + irq = platform_get_irq_optional(pdev, i); 1339 + if (irq < 0 && irq != -ENXIO) 1340 + return irq; 1341 + if (irq > 0) 1342 + irqbuf[irqs++] = irq; 1345 1343 } 1346 1344 1347 1345 /*
+2 -2
drivers/dma/plx_dma.c
··· 137 137 struct plx_dma_desc *desc; 138 138 u32 flags; 139 139 140 - spin_lock_bh(&plxdev->ring_lock); 140 + spin_lock(&plxdev->ring_lock); 141 141 142 142 while (plxdev->tail != plxdev->head) { 143 143 desc = plx_dma_get_desc(plxdev, plxdev->tail); ··· 165 165 plxdev->tail++; 166 166 } 167 167 168 - spin_unlock_bh(&plxdev->ring_lock); 168 + spin_unlock(&plxdev->ring_lock); 169 169 } 170 170 171 171 static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
+19 -17
drivers/dma/ptdma/ptdma-dev.c
··· 100 100 struct pt_passthru_engine *pt_engine) 101 101 { 102 102 struct ptdma_desc desc; 103 + struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q); 103 104 104 105 cmd_q->cmd_error = 0; 105 106 cmd_q->total_pt_ops++; ··· 112 111 desc.dst_lo = lower_32_bits(pt_engine->dst_dma); 113 112 desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma); 114 113 114 + if (cmd_q->int_en) 115 + pt_core_enable_queue_interrupts(pt); 116 + else 117 + pt_core_disable_queue_interrupts(pt); 118 + 115 119 return pt_core_execute_cmd(&desc, cmd_q); 116 - } 117 - 118 - static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) 119 - { 120 - iowrite32(0, pt->cmd_q.reg_control + 0x000C); 121 - } 122 - 123 - static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) 124 - { 125 - iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); 126 120 } 127 121 128 122 static void pt_do_cmd_complete(unsigned long data) ··· 140 144 cmd->pt_cmd_callback(cmd->data, cmd->ret); 141 145 } 142 146 143 - static irqreturn_t pt_core_irq_handler(int irq, void *data) 147 + void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q) 144 148 { 145 - struct pt_device *pt = data; 146 - struct pt_cmd_queue *cmd_q = &pt->cmd_q; 147 149 u32 status; 148 150 149 - pt_core_disable_queue_interrupts(pt); 150 - pt->total_interrupts++; 151 151 status = ioread32(cmd_q->reg_control + 0x0010); 152 152 if (status) { 153 153 cmd_q->int_status = status; ··· 154 162 if ((status & INT_ERROR) && !cmd_q->cmd_error) 155 163 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); 156 164 157 - /* Acknowledge the interrupt */ 165 + /* Acknowledge the completion */ 158 166 iowrite32(status, cmd_q->reg_control + 0x0010); 159 - pt_core_enable_queue_interrupts(pt); 160 167 pt_do_cmd_complete((ulong)&pt->tdata); 161 168 } 169 + } 170 + 171 + static irqreturn_t pt_core_irq_handler(int irq, void *data) 172 + { 173 + struct pt_device *pt = data; 174 + struct pt_cmd_queue *cmd_q = &pt->cmd_q; 175 + 176 + pt_core_disable_queue_interrupts(pt); 177 + pt->total_interrupts++; 178 + pt_check_status_trans(pt, cmd_q); 179 + pt_core_enable_queue_interrupts(pt); 162 180 return IRQ_HANDLED; 163 181 } 164 182
+15 -1
drivers/dma/ptdma/ptdma-dmaengine.c
··· 171 171 vchan_tx_prep(&chan->vc, &desc->vd, flags); 172 172 173 173 desc->pt = chan->pt; 174 + desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT); 174 175 desc->issued_to_hw = 0; 175 176 desc->status = DMA_IN_PROGRESS; 176 177 ··· 258 257 pt_cmd_callback(desc, 0); 259 258 } 260 259 260 + static enum dma_status 261 + pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, 262 + struct dma_tx_state *txstate) 263 + { 264 + struct pt_device *pt = to_pt_chan(c)->pt; 265 + struct pt_cmd_queue *cmd_q = &pt->cmd_q; 266 + 267 + pt_check_status_trans(pt, cmd_q); 268 + return dma_cookie_status(c, cookie, txstate); 269 + } 270 + 261 271 static int pt_pause(struct dma_chan *dma_chan) 262 272 { 263 273 struct pt_dma_chan *chan = to_pt_chan(dma_chan); ··· 303 291 { 304 292 struct pt_dma_chan *chan = to_pt_chan(dma_chan); 305 293 unsigned long flags; 294 + struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q; 306 295 LIST_HEAD(head); 307 296 297 + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); 308 298 spin_lock_irqsave(&chan->vc.lock, flags); 309 299 vchan_get_all_descriptors(&chan->vc, &head); 310 300 spin_unlock_irqrestore(&chan->vc.lock, flags); ··· 376 362 dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; 377 363 dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; 378 364 dma_dev->device_issue_pending = pt_issue_pending; 379 - dma_dev->device_tx_status = dma_cookie_status; 365 + dma_dev->device_tx_status = pt_tx_status; 380 366 dma_dev->device_pause = pt_pause; 381 367 dma_dev->device_resume = pt_resume; 382 368 dma_dev->device_terminate_all = pt_terminate_all;
+13
drivers/dma/ptdma/ptdma.h
··· 206 206 unsigned int active; 207 207 unsigned int suspended; 208 208 209 + /* Interrupt flag */ 210 + bool int_en; 211 + 209 212 /* Register addresses for queue */ 210 213 void __iomem *reg_control; 211 214 u32 qcontrol; /* Cached control register */ ··· 321 318 int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, 322 319 struct pt_passthru_engine *pt_engine); 323 320 321 + void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q); 324 322 void pt_start_queue(struct pt_cmd_queue *cmd_q); 325 323 void pt_stop_queue(struct pt_cmd_queue *cmd_q); 326 324 325 + static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) 326 + { 327 + iowrite32(0, pt->cmd_q.reg_control + 0x000C); 328 + } 329 + 330 + static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) 331 + { 332 + iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); 333 + } 327 334 #endif
+10 -3
drivers/dma/pxa_dma.c
··· 1365 1365 1366 1366 of_id = of_match_device(pxad_dt_ids, &op->dev); 1367 1367 if (of_id) { 1368 - of_property_read_u32(op->dev.of_node, "#dma-channels", 1369 - &dma_channels); 1370 - ret = of_property_read_u32(op->dev.of_node, "#dma-requests", 1368 + /* Parse new and deprecated dma-channels properties */ 1369 + if (of_property_read_u32(op->dev.of_node, "dma-channels", 1370 + &dma_channels)) 1371 + of_property_read_u32(op->dev.of_node, "#dma-channels", 1372 + &dma_channels); 1373 + /* Parse new and deprecated dma-requests properties */ 1374 + ret = of_property_read_u32(op->dev.of_node, "dma-requests", 1371 1375 &nb_requestors); 1376 + if (ret) 1377 + ret = of_property_read_u32(op->dev.of_node, "#dma-requests", 1378 + &nb_requestors); 1372 1379 if (ret) { 1373 1380 dev_warn(pdev->slave.dev, 1374 1381 "#dma-requests set to default 32 as missing in OF: %d",
+16 -5
drivers/dma/qcom/gpi.c
··· 1754 1754 tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN); 1755 1755 1756 1756 tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); 1757 - if (spi->cmd == SPI_RX) 1757 + if (spi->cmd == SPI_RX) { 1758 1758 tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB); 1759 - else 1759 + } else if (spi->cmd == SPI_TX) { 1760 1760 tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1761 + } else { /* SPI_DUPLEX */ 1762 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1763 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK); 1764 + } 1761 1765 } 1762 1766 1763 1767 /* create the dma tre */ ··· 2152 2148 { 2153 2149 struct gpi_dev *gpi_dev; 2154 2150 unsigned int i; 2151 + u32 ee_offset; 2155 2152 int ret; 2156 2153 2157 2154 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL); ··· 2179 2174 dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n"); 2180 2175 return ret; 2181 2176 } 2177 + 2178 + ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev); 2179 + gpi_dev->ee_base = gpi_dev->ee_base - ee_offset; 2182 2180 2183 2181 gpi_dev->ev_factor = EV_FACTOR; 2184 2182 ··· 2286 2278 } 2287 2279 2288 2280 static const struct of_device_id gpi_of_match[] = { 2289 - { .compatible = "qcom,sdm845-gpi-dma" }, 2290 - { .compatible = "qcom,sm8150-gpi-dma" }, 2291 - { .compatible = "qcom,sm8250-gpi-dma" }, 2281 + { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, 2282 + { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, 2283 + { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, 2284 + { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, 2285 + { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 }, 2286 + { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 }, 2292 2287 { }, 2293 2288 }; 2294 2289 MODULE_DEVICE_TABLE(of, gpi_of_match);
+12 -1
drivers/dma/qcom/hidma.c
··· 431 431 struct hidma_desc *mdesc = NULL; 432 432 struct hidma_dev *mdma = mchan->dmadev; 433 433 unsigned long irqflags; 434 + u64 byte_pattern, fill_pattern; 434 435 435 436 /* Get free descriptor */ 436 437 spin_lock_irqsave(&mchan->lock, irqflags); ··· 444 443 if (!mdesc) 445 444 return NULL; 446 445 446 + byte_pattern = (char)value; 447 + fill_pattern = (byte_pattern << 56) | 448 + (byte_pattern << 48) | 449 + (byte_pattern << 40) | 450 + (byte_pattern << 32) | 451 + (byte_pattern << 24) | 452 + (byte_pattern << 16) | 453 + (byte_pattern << 8) | 454 + byte_pattern; 455 + 447 456 mdesc->desc.flags = flags; 448 457 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 449 - value, dest, len, flags, 458 + fill_pattern, dest, len, flags, 450 459 HIDMA_TRE_MEMSET); 451 460 452 461 /* Place descriptor in prepared list */
+16 -8
drivers/dma/sf-pdma/sf-pdma.c
··· 482 482 static int sf_pdma_probe(struct platform_device *pdev) 483 483 { 484 484 struct sf_pdma *pdma; 485 - struct sf_pdma_chan *chan; 486 485 struct resource *res; 487 - int len, chans; 488 - int ret; 486 + int ret, n_chans; 489 487 const enum dma_slave_buswidth widths = 490 488 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | 491 489 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | 492 490 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | 493 491 DMA_SLAVE_BUSWIDTH_64_BYTES; 494 492 495 - chans = PDMA_NR_CH; 496 - len = sizeof(*pdma) + sizeof(*chan) * chans; 497 - pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); 493 + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans); 494 + if (ret) { 495 + /* backwards-compatibility for no dma-channels property */ 496 + dev_dbg(&pdev->dev, "set number of channels to default value: 4\n"); 497 + n_chans = PDMA_MAX_NR_CH; 498 + } else if (n_chans > PDMA_MAX_NR_CH) { 499 + dev_err(&pdev->dev, "the number of channels exceeds the maximum\n"); 500 + return -EINVAL; 501 + } 502 + 503 + pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans), 504 + GFP_KERNEL); 498 505 if (!pdma) 499 506 return -ENOMEM; 500 507 501 - pdma->n_chans = chans; 508 + pdma->n_chans = n_chans; 502 509 503 510 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 504 511 pdma->membase = devm_ioremap_resource(&pdev->dev, res); ··· 563 556 struct sf_pdma_chan *ch; 564 557 int i; 565 558 566 - for (i = 0; i < PDMA_NR_CH; i++) { 559 + for (i = 0; i < pdma->n_chans; i++) { 567 560 ch = &pdma->chans[i]; 568 561 569 562 devm_free_irq(&pdev->dev, ch->txirq, ch); ··· 581 574 582 575 static const struct of_device_id sf_pdma_dt_ids[] = { 583 576 { .compatible = "sifive,fu540-c000-pdma" }, 577 + { .compatible = "sifive,pdma0" }, 584 578 {}, 585 579 }; 586 580 MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
+2 -6
drivers/dma/sf-pdma/sf-pdma.h
··· 22 22 #include "../dmaengine.h" 23 23 #include "../virt-dma.h" 24 24 25 - #define PDMA_NR_CH 4 26 - 27 - #if (PDMA_NR_CH != 4) 28 - #error "Please define PDMA_NR_CH to 4" 29 - #endif 25 + #define PDMA_MAX_NR_CH 4 30 26 31 27 #define PDMA_BASE_ADDR 0x3000000 32 28 #define PDMA_CHAN_OFFSET 0x1000 ··· 114 118 void __iomem *membase; 115 119 void __iomem *mappedbase; 116 120 u32 n_chans; 117 - struct sf_pdma_chan chans[PDMA_NR_CH]; 121 + struct sf_pdma_chan chans[]; 118 122 }; 119 123 120 124 #endif /* _SF_PDMA_H */
+1 -1
drivers/dma/sh/Kconfig
··· 50 50 51 51 config RZ_DMAC 52 52 tristate "Renesas RZ/{G2L,V2L} DMA Controller" 53 - depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST 53 + depends on ARCH_RZG2L || COMPILE_TEST 54 54 select RENESAS_DMA 55 55 select DMA_VIRTUAL_CHANNELS 56 56 help
+5 -1
drivers/dma/sprd-dma.c
··· 1117 1117 u32 chn_count; 1118 1118 int ret, i; 1119 1119 1120 - ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count); 1120 + /* Parse new and deprecated dma-channels properties */ 1121 + ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count); 1122 + if (ret) 1123 + ret = device_property_read_u32(&pdev->dev, "#dma-channels", 1124 + &chn_count); 1121 1125 if (ret) { 1122 1126 dev_err(&pdev->dev, "get dma channels count failed\n"); 1123 1127 return ret;
+269 -44
drivers/dma/stm32-dma.c
··· 208 208 u32 threshold; 209 209 u32 mem_burst; 210 210 u32 mem_width; 211 + enum dma_status status; 211 212 }; 212 213 213 214 struct stm32_dma_device { ··· 486 485 } 487 486 488 487 chan->busy = false; 488 + chan->status = DMA_COMPLETE; 489 489 } 490 490 491 491 static int stm32_dma_terminate_all(struct dma_chan *c) ··· 537 535 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); 538 536 } 539 537 538 + static void stm32_dma_sg_inc(struct stm32_dma_chan *chan) 539 + { 540 + chan->next_sg++; 541 + if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs)) 542 + chan->next_sg = 0; 543 + } 544 + 540 545 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); 541 546 542 547 static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) ··· 584 575 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); 585 576 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); 586 577 587 - chan->next_sg++; 578 + stm32_dma_sg_inc(chan); 588 579 589 580 /* Clear interrupt status if it is there */ 590 581 status = stm32_dma_irq_status(chan); ··· 597 588 stm32_dma_dump_reg(chan); 598 589 599 590 /* Start DMA */ 591 + chan->busy = true; 592 + chan->status = DMA_IN_PROGRESS; 600 593 reg->dma_scr |= STM32_DMA_SCR_EN; 601 594 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); 602 - 603 - chan->busy = true; 604 595 605 596 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); 606 597 } ··· 614 605 id = chan->id; 615 606 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 616 607 617 - if (dma_scr & STM32_DMA_SCR_DBM) { 618 - if (chan->next_sg == chan->desc->num_sgs) 619 - chan->next_sg = 0; 608 + sg_req = &chan->desc->sg_req[chan->next_sg]; 620 609 621 - sg_req = &chan->desc->sg_req[chan->next_sg]; 622 - 623 - if (dma_scr & STM32_DMA_SCR_CT) { 624 - dma_sm0ar = sg_req->chan_reg.dma_sm0ar; 625 - stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); 626 - dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", 627 - stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); 628 - } else { 629 - dma_sm1ar = sg_req->chan_reg.dma_sm1ar; 630 - stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); 631 - dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", 632 - stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); 633 - } 610 + if (dma_scr & STM32_DMA_SCR_CT) { 611 + dma_sm0ar = sg_req->chan_reg.dma_sm0ar; 612 + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); 613 + dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", 614 + stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); 615 + } else { 616 + dma_sm1ar = sg_req->chan_reg.dma_sm1ar; 617 + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); 618 + dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", 619 + stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); 634 620 } 635 621 } 636 622 637 - static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) 623 + static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan) 638 624 { 639 - if (chan->desc) { 640 - if (chan->desc->cyclic) { 641 - vchan_cyclic_callback(&chan->desc->vdesc); 642 - chan->next_sg++; 625 + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 626 + u32 dma_scr; 627 + 628 + /* 629 + * Read and store current remaining data items and peripheral/memory addresses to be 630 + * updated on resume 631 + */ 632 + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); 633 + /* 634 + * Transfer can be paused while between a previous resume and reconfiguration on transfer 635 + * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need 636 + * to set it here in SCR backup to ensure a good reconfiguration on transfer complete. 637 + */ 638 + if (chan->desc && chan->desc->cyclic) { 639 + if (chan->desc->num_sgs == 1) 640 + dma_scr |= STM32_DMA_SCR_CIRC; 641 + else 642 + dma_scr |= STM32_DMA_SCR_DBM; 643 + } 644 + chan->chan_reg.dma_scr = dma_scr; 645 + 646 + /* 647 + * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise 648 + * on resume NDTR autoreload value will be wrong (lower than the initial period length) 649 + */ 650 + if (chan->desc && chan->desc->cyclic) { 651 + dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC); 652 + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); 653 + } 654 + 655 + chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); 656 + 657 + dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); 658 + } 659 + 660 + static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan) 661 + { 662 + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 663 + struct stm32_dma_sg_req *sg_req; 664 + u32 dma_scr, status, id; 665 + 666 + id = chan->id; 667 + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 668 + 669 + /* Clear interrupt status if it is there */ 670 + status = stm32_dma_irq_status(chan); 671 + if (status) 672 + stm32_dma_irq_clear(chan, status); 673 + 674 + if (!chan->next_sg) 675 + sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; 676 + else 677 + sg_req = &chan->desc->sg_req[chan->next_sg - 1]; 678 + 679 + /* Reconfigure NDTR with the initial value */ 680 + stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr); 681 + 682 + /* Restore SPAR */ 683 + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar); 684 + 685 + /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */ 686 + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar); 687 + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar); 688 + 689 + /* Reactivate CIRC/DBM if needed */ 690 + if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) { 691 + dma_scr |= STM32_DMA_SCR_DBM; 692 + /* Restore CT */ 693 + if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT) 694 + dma_scr &= ~STM32_DMA_SCR_CT; 695 + else 696 + dma_scr |= STM32_DMA_SCR_CT; 697 + } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) { 698 + dma_scr |= STM32_DMA_SCR_CIRC; 699 + } 700 + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); 701 + 702 + stm32_dma_configure_next_sg(chan); 703 + 704 + stm32_dma_dump_reg(chan); 705 + 706 + dma_scr |= STM32_DMA_SCR_EN; 707 + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); 708 + 709 + dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan); 710 + } 711 + 712 + static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) 713 + { 714 + if (!chan->desc) 715 + return; 716 + 717 + if (chan->desc->cyclic) { 718 + vchan_cyclic_callback(&chan->desc->vdesc); 719 + stm32_dma_sg_inc(chan); 720 + /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ 721 + if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) 722 + stm32_dma_post_resume_reconfigure(chan); 723 + else if (scr & STM32_DMA_SCR_DBM) 643 724 stm32_dma_configure_next_sg(chan); 644 - } else { 645 - chan->busy = false; 646 - if (chan->next_sg == chan->desc->num_sgs) { 647 - vchan_cookie_complete(&chan->desc->vdesc); 648 - chan->desc = NULL; 649 - } 650 - stm32_dma_start_transfer(chan); 725 + } else { 726 + chan->busy = false; 727 + chan->status = DMA_COMPLETE; 728 + if (chan->next_sg == chan->desc->num_sgs) { 729 + vchan_cookie_complete(&chan->desc->vdesc); 730 + chan->desc = NULL; 651 731 } 732 + stm32_dma_start_transfer(chan); 652 733 } 653 734 } 654 735 ··· 774 675 775 676 if (status & STM32_DMA_TCI) { 776 677 stm32_dma_irq_clear(chan, STM32_DMA_TCI); 777 - if (scr & STM32_DMA_SCR_TCIE) 778 - stm32_dma_handle_chan_done(chan); 678 + if (scr & STM32_DMA_SCR_TCIE) { 679 + if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN)) 680 + stm32_dma_handle_chan_paused(chan); 681 + else 682 + stm32_dma_handle_chan_done(chan, scr); 683 + } 779 684 status &= ~STM32_DMA_TCI; 780 685 } 781 686 ··· 812 709 813 710 } 814 711 spin_unlock_irqrestore(&chan->vchan.lock, flags); 712 + } 713 + 714 + static int stm32_dma_pause(struct dma_chan *c) 715 + { 716 + struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 717 + unsigned long flags; 718 + int ret; 719 + 720 + if (chan->status != DMA_IN_PROGRESS) 721 + return -EPERM; 722 + 723 + spin_lock_irqsave(&chan->vchan.lock, flags); 724 + ret = stm32_dma_disable_chan(chan); 725 + /* 726 + * A transfer complete flag is set to indicate the end of transfer due to the stream 727 + * interruption, so wait for interrupt 728 + */ 729 + if (!ret) 730 + chan->status = DMA_PAUSED; 731 + spin_unlock_irqrestore(&chan->vchan.lock, flags); 732 + 733 + return ret; 734 + } 735 + 736 + static int stm32_dma_resume(struct dma_chan *c) 737 + { 738 + struct stm32_dma_chan *chan = to_stm32_dma_chan(c); 739 + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 740 + struct stm32_dma_chan_reg chan_reg = chan->chan_reg; 741 + u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar; 742 + struct stm32_dma_sg_req *sg_req; 743 + unsigned long flags; 744 + 745 + if (chan->status != DMA_PAUSED) 746 + return -EPERM; 747 + 748 + scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 749 + if (WARN_ON(scr & STM32_DMA_SCR_EN)) 750 + return -EPERM; 751 + 752 + spin_lock_irqsave(&chan->vchan.lock, flags); 753 + 754 + /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */ 755 + if (!chan->next_sg) 756 + sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; 757 + else 758 + sg_req = &chan->desc->sg_req[chan->next_sg - 1]; 759 + 760 + ndtr = sg_req->chan_reg.dma_sndtr; 761 + offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr); 762 + spar = sg_req->chan_reg.dma_spar; 763 + sm0ar = sg_req->chan_reg.dma_sm0ar; 764 + sm1ar = sg_req->chan_reg.dma_sm1ar; 765 + 766 + /* 767 + * The peripheral and/or memory addresses have to be updated in order to adjust the 768 + * address pointers. Need to check increment. 769 + */ 770 + if (chan_reg.dma_scr & STM32_DMA_SCR_PINC) 771 + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset); 772 + else 773 + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar); 774 + 775 + if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC)) 776 + offset = 0; 777 + 778 + /* 779 + * In case of DBM, the current target could be SM1AR. 780 + * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so 781 + * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1. 782 + */ 783 + if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT)) 784 + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset); 785 + else 786 + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset); 787 + 788 + /* NDTR must be restored otherwise internal HW counter won't be correctly reset */ 789 + stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr); 790 + 791 + /* 792 + * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, 793 + * otherwise NDTR autoreload value will be wrong (lower than the initial period length) 794 + */ 795 + if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)) 796 + chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM); 797 + 798 + if (chan_reg.dma_scr & STM32_DMA_SCR_DBM) 799 + stm32_dma_configure_next_sg(chan); 800 + 801 + stm32_dma_dump_reg(chan); 802 + 803 + /* The stream may then be re-enabled to restart transfer from the point it was stopped */ 804 + chan->status = DMA_IN_PROGRESS; 805 + chan_reg.dma_scr |= STM32_DMA_SCR_EN; 806 + stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr); 807 + 808 + spin_unlock_irqrestore(&chan->vchan.lock, flags); 809 + 810 + dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan); 811 + 812 + return 0; 815 813 } 816 814 817 815 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, ··· 1182 978 } 1183 979 1184 980 /* Enable Circular mode or double buffer mode */ 1185 - if (buf_len == period_len) 981 + if (buf_len == period_len) { 1186 982 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; 1187 - else 983 + } else { 1188 984 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; 985 + chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; 986 + } 1189 987 1190 988 /* Clear periph ctrl if client set it */ 1191 989 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; ··· 1297 1091 { 1298 1092 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); 1299 1093 struct stm32_dma_sg_req *sg_req; 1300 - u32 dma_scr, dma_smar, id; 1094 + u32 dma_scr, dma_smar, id, period_len; 1301 1095 1302 1096 id = chan->id; 1303 1097 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); 1304 1098 1099 + /* In cyclic CIRC but not DBM, CT is not used */ 1305 1100 if (!(dma_scr & STM32_DMA_SCR_DBM)) 1306 1101 return true; 1307 1102 1308 1103 sg_req = &chan->desc->sg_req[chan->next_sg]; 1104 + period_len = sg_req->len; 1309 1105 1106 + /* DBM - take care of a previous pause/resume not yet post reconfigured */ 1310 1107 if (dma_scr & STM32_DMA_SCR_CT) { 1311 1108 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); 1312 - return (dma_smar == sg_req->chan_reg.dma_sm0ar); 1109 + /* 1110 + * If transfer has been pause/resumed, 1111 + * SM0AR is in the range of [SM0AR:SM0AR+period_len] 1112 + */ 1113 + return (dma_smar >= sg_req->chan_reg.dma_sm0ar && 1114 + dma_smar < sg_req->chan_reg.dma_sm0ar + period_len); 1313 1115 } 1314 1116 1315 1117 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); 1316 - 1317 - return (dma_smar == sg_req->chan_reg.dma_sm1ar); 1118 + /* 1119 + * If transfer has been pause/resumed, 1120 + * SM1AR is in the range of [SM1AR:SM1AR+period_len] 1121 + */ 1122 + return (dma_smar >= sg_req->chan_reg.dma_sm1ar && 1123 + dma_smar < sg_req->chan_reg.dma_sm1ar + period_len); 1318 1124 } 1319 1125 1320 1126 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, ··· 1366 1148 1367 1149 residue = stm32_dma_get_remaining_bytes(chan); 1368 1150 1369 - if (!stm32_dma_is_current_sg(chan)) { 1151 + if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) { 1370 1152 n_sg++; 1371 1153 if (n_sg == chan->desc->num_sgs) 1372 1154 n_sg = 0; ··· 1406 1188 u32 residue = 0; 1407 1189 1408 1190 status = dma_cookie_status(c, cookie, state); 1409 - if (status == DMA_COMPLETE || !state) 1191 + if (status == DMA_COMPLETE) 1192 + return status; 1193 + 1194 + status = chan->status; 1195 + 1196 + if (!state) 1410 1197 return status; 1411 1198 1412 1199 spin_lock_irqsave(&chan->vchan.lock, flags); ··· 1600 1377 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; 1601 1378 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; 1602 1379 dd->device_config = stm32_dma_slave_config; 1380 + dd->device_pause = stm32_dma_pause; 1381 + dd->device_resume = stm32_dma_resume; 1603 1382 dd->device_terminate_all = stm32_dma_terminate_all; 1604 1383 dd->device_synchronize = stm32_dma_synchronize; 1605 1384 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1707 1482 #endif 1708 1483 1709 1484 #ifdef CONFIG_PM_SLEEP 1710 - static int stm32_dma_suspend(struct device *dev) 1485 + static int stm32_dma_pm_suspend(struct device *dev) 1711 1486 { 1712 1487 struct stm32_dma_device *dmadev = dev_get_drvdata(dev); 1713 1488 int id, ret, scr; ··· 1731 1506 return 0; 1732 1507 } 1733 1508 1734 - static int stm32_dma_resume(struct device *dev) 1509 + static int stm32_dma_pm_resume(struct device *dev) 1735 1510 { 1736 1511 return pm_runtime_force_resume(dev); 1737 1512 } 1738 1513 #endif 1739 1514 1740 1515 static const struct dev_pm_ops stm32_dma_pm_ops = { 1741 - SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume) 1516 + SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume) 1742 1517 SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, 1743 1518 stm32_dma_runtime_resume, NULL) 1744 1519 };
+1 -1
drivers/dma/stm32-dmamux.c
··· 267 267 ret = PTR_ERR(rst); 268 268 if (ret == -EPROBE_DEFER) 269 269 goto err_clk; 270 - } else { 270 + } else if (count > 1) { /* Don't reset if there is only one dma-master */ 271 271 reset_control_assert(rst); 272 272 udelay(2); 273 273 reset_control_deassert(rst);
+32 -21
drivers/dma/stm32-mdma.c
··· 34 34 #include "virt-dma.h" 35 35 36 36 #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ 37 - #define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ 38 37 39 38 /* MDMA Channel x interrupt/status register */ 40 39 #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ ··· 72 73 #define STM32_MDMA_CCR_WEX BIT(14) 73 74 #define STM32_MDMA_CCR_HEX BIT(13) 74 75 #define STM32_MDMA_CCR_BEX BIT(12) 76 + #define STM32_MDMA_CCR_SM BIT(8) 75 77 #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) 76 78 #define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n)) 77 79 #define STM32_MDMA_CCR_TCIE BIT(5) ··· 168 168 169 169 #define STM32_MDMA_MAX_BUF_LEN 128 170 170 #define STM32_MDMA_MAX_BLOCK_LEN 65536 171 - #define STM32_MDMA_MAX_CHANNELS 63 171 + #define STM32_MDMA_MAX_CHANNELS 32 172 172 #define STM32_MDMA_MAX_REQUESTS 256 173 173 #define STM32_MDMA_MAX_BURST 128 174 174 #define STM32_MDMA_VERY_HIGH_PRIORITY 0x3 ··· 248 248 u32 nr_channels; 249 249 u32 nr_requests; 250 250 u32 nr_ahb_addr_masks; 251 + u32 chan_reserved; 251 252 struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; 252 253 u32 ahb_addr_masks[]; 253 254 }; ··· 1318 1317 static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) 1319 1318 { 1320 1319 struct stm32_mdma_device *dmadev = devid; 1321 - struct stm32_mdma_chan *chan = devid; 1320 + struct stm32_mdma_chan *chan; 1322 1321 u32 reg, id, ccr, ien, status; 1323 1322 1324 1323 /* Find out which channel generates the interrupt */ 1325 1324 status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); 1326 - if (status) { 1327 - id = __ffs(status); 1328 - } else { 1329 - status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); 1330 - if (!status) { 1331 - dev_dbg(mdma2dev(dmadev), "spurious it\n"); 1332 - return IRQ_NONE; 1333 - } 1334 - id = __ffs(status); 1335 - /* 1336 - * As GISR0 provides status for channel id from 0 to 31, 1337 - * so GISR1 provides status for channel id from 32 to 62 1338 - */ 1339 - id += 32; 1325 + if (!status) { 1326 + dev_dbg(mdma2dev(dmadev), "spurious it\n"); 1327 + return IRQ_NONE; 1340 1328 } 1329 + id = __ffs(status); 1341 1330 1342 1331 chan = &dmadev->chan[id]; 1343 1332 if (!chan) { ··· 1345 1354 1346 1355 if (!(status & ien)) { 1347 1356 spin_unlock(&chan->vchan.lock); 1348 - dev_warn(chan2dev(chan), 1349 - "spurious it (status=0x%04x, ien=0x%04x)\n", 1350 - status, ien); 1357 + if (chan->busy) 1358 + dev_warn(chan2dev(chan), 1359 + "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien); 1360 + else 1361 + dev_dbg(chan2dev(chan), 1362 + "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien); 1351 1363 return IRQ_NONE; 1352 1364 } 1353 1365 ··· 1450 1456 chan->desc_pool = NULL; 1451 1457 } 1452 1458 1459 + static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param) 1460 + { 1461 + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); 1462 + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); 1463 + 1464 + /* Check if chan is marked Secure */ 1465 + if (dmadev->chan_reserved & BIT(chan->id)) 1466 + return false; 1467 + 1468 + return true; 1469 + } 1470 + 1453 1471 static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, 1454 1472 struct of_dma *ofdma) 1455 1473 { 1456 1474 struct stm32_mdma_device *dmadev = ofdma->of_dma_data; 1475 + dma_cap_mask_t mask = dmadev->ddev.cap_mask; 1457 1476 struct stm32_mdma_chan *chan; 1458 1477 struct dma_chan *c; 1459 1478 struct stm32_mdma_chan_config config; ··· 1492 1485 return NULL; 1493 1486 } 1494 1487 1495 - c = dma_get_any_slave_channel(&dmadev->ddev); 1488 + c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node); 1496 1489 if (!c) { 1497 1490 dev_err(mdma2dev(dmadev), "No more channels available\n"); 1498 1491 return NULL; ··· 1622 1615 for (i = 0; i < dmadev->nr_channels; i++) { 1623 1616 chan = &dmadev->chan[i]; 1624 1617 chan->id = i; 1618 + 1619 + if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM) 1620 + dmadev->chan_reserved |= BIT(i); 1621 + 1625 1622 chan->vchan.desc_free = stm32_mdma_desc_free; 1626 1623 vchan_init(&chan->vchan, dd); 1627 1624 }
+58 -34
drivers/dma/sun6i-dma.c
··· 90 90 91 91 #define DMA_CHAN_CUR_PARA 0x1c 92 92 93 + /* 94 + * LLI address mangling 95 + * 96 + * The LLI link physical address is also mangled, but we avoid dealing 97 + * with that by allocating LLIs from the DMA32 zone. 98 + */ 99 + #define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16) 100 + #define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18) 93 101 94 102 /* 95 103 * Various hardware related defines ··· 140 132 u32 dst_burst_lengths; 141 133 u32 src_addr_widths; 142 134 u32 dst_addr_widths; 135 + bool has_high_addr; 143 136 bool has_mbus_clk; 144 137 }; 145 138 ··· 250 241 static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, 251 242 struct sun6i_pchan *pchan) 252 243 { 253 - phys_addr_t reg = virt_to_phys(pchan->base); 254 - 255 - dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" 244 + dev_dbg(sdev->slave.dev, "Chan %d reg:\n" 256 245 "\t___en(%04x): \t0x%08x\n" 257 246 "\tpause(%04x): \t0x%08x\n" 258 247 "\tstart(%04x): \t0x%08x\n" ··· 259 252 "\t__dst(%04x): \t0x%08x\n" 260 253 "\tcount(%04x): \t0x%08x\n" 261 254 "\t_para(%04x): \t0x%08x\n\n", 262 - pchan->idx, &reg, 255 + pchan->idx, 263 256 DMA_CHAN_ENABLE, 264 257 readl(pchan->base + DMA_CHAN_ENABLE), 265 258 DMA_CHAN_PAUSE, ··· 392 385 } 393 386 394 387 static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, 395 - struct sun6i_dma_lli *lli) 388 + struct sun6i_dma_lli *v_lli, 389 + dma_addr_t p_lli) 396 390 { 397 - phys_addr_t p_lli = virt_to_phys(lli); 398 - 399 391 dev_dbg(chan2dev(&vchan->vc.chan), 400 - "\n\tdesc: p - %pa v - 0x%p\n" 392 + "\n\tdesc:\tp - %pad v - 0x%p\n" 401 393 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" 402 394 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", 403 - &p_lli, lli, 404 - lli->cfg, lli->src, lli->dst, 405 - lli->len, lli->para, lli->p_lli_next); 395 + &p_lli, v_lli, 396 + v_lli->cfg, v_lli->src, v_lli->dst, 397 + v_lli->len, v_lli->para, v_lli->p_lli_next); 406 398 } 407 399 408 400 static void sun6i_dma_free_desc(struct virt_dma_desc *vd) ··· 451 445 pchan->desc = to_sun6i_desc(&desc->tx); 452 446 pchan->done = NULL; 453 447 454 - sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); 448 + sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli); 455 449 456 450 irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; 457 451 irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; ··· 632 626 return 0; 633 627 } 634 628 629 + static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev, 630 + struct sun6i_dma_lli *v_lli, 631 + dma_addr_t src, dma_addr_t dst) 632 + { 633 + v_lli->src = lower_32_bits(src); 634 + v_lli->dst = lower_32_bits(dst); 635 + 636 + if (sdev->cfg->has_high_addr) 637 + v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) | 638 + DST_HIGH_ADDR(upper_32_bits(dst)); 639 + } 640 + 635 641 static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( 636 642 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 637 643 size_t len, unsigned long flags) ··· 666 648 if (!txd) 667 649 return NULL; 668 650 669 - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); 651 + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); 670 652 if (!v_lli) { 671 653 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); 672 654 goto err_txd_free; 673 655 } 674 656 675 - v_lli->src = src; 676 - v_lli->dst = dest; 677 657 v_lli->len = len; 678 658 v_lli->para = NORMAL_WAIT; 659 + sun6i_dma_set_addr(sdev, v_lli, src, dest); 679 660 680 661 burst = convert_burst(8); 681 662 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); ··· 687 670 688 671 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); 689 672 690 - sun6i_dma_dump_lli(vchan, v_lli); 673 + sun6i_dma_dump_lli(vchan, v_lli, p_lli); 691 674 692 675 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 693 676 ··· 725 708 return NULL; 726 709 727 710 for_each_sg(sgl, sg, sg_len, i) { 728 - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); 711 + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); 729 712 if (!v_lli) 730 713 goto err_lli_free; 731 714 ··· 733 716 v_lli->para = NORMAL_WAIT; 734 717 735 718 if (dir == DMA_MEM_TO_DEV) { 736 - v_lli->src = sg_dma_address(sg); 737 - v_lli->dst = sconfig->dst_addr; 719 + sun6i_dma_set_addr(sdev, v_lli, 720 + sg_dma_address(sg), 721 + sconfig->dst_addr); 738 722 v_lli->cfg = lli_cfg; 739 723 sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); 740 724 sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); ··· 747 729 sg_dma_len(sg), flags); 748 730 749 731 } else { 750 - v_lli->src = sconfig->src_addr; 751 - v_lli->dst = sg_dma_address(sg); 732 + sun6i_dma_set_addr(sdev, v_lli, 733 + sconfig->src_addr, 734 + sg_dma_address(sg)); 752 735 v_lli->cfg = lli_cfg; 753 736 sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); 754 737 sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); ··· 765 746 } 766 747 767 748 dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); 768 - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) 769 - sun6i_dma_dump_lli(vchan, prev); 749 + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; 750 + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) 751 + sun6i_dma_dump_lli(vchan, v_lli, p_lli); 770 752 771 753 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 772 754 773 755 err_lli_free: 774 - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) 775 - dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); 756 + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; 757 + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) 758 + dma_pool_free(sdev->pool, v_lli, p_lli); 776 759 kfree(txd); 777 760 return NULL; 778 761 } ··· 808 787 return NULL; 809 788 810 789 for (i = 0; i < periods; i++) { 811 - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); 790 + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); 812 791 if (!v_lli) { 813 792 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); 814 793 goto err_lli_free; ··· 818 797 v_lli->para = NORMAL_WAIT; 819 798 820 799 if (dir == DMA_MEM_TO_DEV) { 821 - v_lli->src = buf_addr + period_len * i; 822 - v_lli->dst = sconfig->dst_addr; 800 + sun6i_dma_set_addr(sdev, v_lli, 801 + buf_addr + period_len * i, 802 + sconfig->dst_addr); 823 803 v_lli->cfg = lli_cfg; 824 804 sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); 825 805 sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); 826 806 } else { 827 - v_lli->src = sconfig->src_addr; 828 - v_lli->dst = buf_addr + period_len * i; 807 + sun6i_dma_set_addr(sdev, v_lli, 808 + sconfig->src_addr, 809 + buf_addr + period_len * i); 829 810 v_lli->cfg = lli_cfg; 830 811 sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); 831 812 sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); ··· 843 820 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 844 821 845 822 err_lli_free: 846 - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) 847 - dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); 823 + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; 824 + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) 825 + dma_pool_free(sdev->pool, v_lli, p_lli); 848 826 kfree(txd); 849 827 return NULL; 850 828 } ··· 1198 1174 }; 1199 1175 1200 1176 /* 1201 - * TODO: Add support for more than 4g physical addressing. 1202 - * 1203 1177 * The A100 binding uses the number of dma channels from the 1204 1178 * device tree node. 1205 1179 */ ··· 1216 1194 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1217 1195 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1218 1196 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1197 + .has_high_addr = true, 1219 1198 .has_mbus_clk = true, 1220 1199 }; 1221 1200 ··· 1271 1248 { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, 1272 1249 { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, 1273 1250 { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, 1251 + { .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg }, 1274 1252 { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, 1275 1253 { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg }, 1276 1254 { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
+1498
drivers/dma/tegra186-gpc-dma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * DMA driver for NVIDIA Tegra GPC DMA controller. 4 + * 5 + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. 6 + */ 7 + 8 + #include <linux/bitfield.h> 9 + #include <linux/dmaengine.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/interrupt.h> 12 + #include <linux/iommu.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/minmax.h> 15 + #include <linux/module.h> 16 + #include <linux/of_device.h> 17 + #include <linux/of_dma.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/reset.h> 20 + #include <linux/slab.h> 21 + #include <dt-bindings/memory/tegra186-mc.h> 22 + #include "virt-dma.h" 23 + 24 + /* CSR register */ 25 + #define TEGRA_GPCDMA_CHAN_CSR 0x00 26 + #define TEGRA_GPCDMA_CSR_ENB BIT(31) 27 + #define TEGRA_GPCDMA_CSR_IE_EOC BIT(30) 28 + #define TEGRA_GPCDMA_CSR_ONCE BIT(27) 29 + 30 + #define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24) 31 + #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \ 32 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0) 33 + #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \ 34 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1) 35 + #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \ 36 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2) 37 + #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \ 38 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3) 39 + 40 + #define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21) 41 + #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \ 42 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0) 43 + #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \ 44 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1) 45 + #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \ 46 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2) 47 + #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \ 48 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3) 49 + #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \ 50 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4) 51 + #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \ 52 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6) 53 + 54 + #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16) 55 + #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \ 56 + FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4) 57 + #define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15) 58 + #define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10) 59 + 60 + /* STATUS register */ 61 + #define TEGRA_GPCDMA_CHAN_STATUS 0x004 62 + #define TEGRA_GPCDMA_STATUS_BUSY BIT(31) 63 + #define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30) 64 + #define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28) 65 + #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27) 66 + #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26) 67 + #define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25) 68 + #define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24) 69 + #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23) 70 + #define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21) 71 + #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20) 72 + 73 + #define TEGRA_GPCDMA_CHAN_CSRE 0x008 74 + #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31) 75 + 76 + /* Source address */ 77 + #define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C 78 + 79 + /* Destination address */ 80 + #define TEGRA_GPCDMA_CHAN_DST_PTR 0x010 81 + 82 + /* High address pointer */ 83 + #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014 84 + #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0) 85 + #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16) 86 + 87 + /* MC sequence register */ 88 + #define TEGRA_GPCDMA_CHAN_MCSEQ 0x18 89 + #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31) 90 + #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25) 91 + #define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23) 92 + #define TEGRA_GPCDMA_MCSEQ_BURST_2 \ 93 + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0) 94 + #define TEGRA_GPCDMA_MCSEQ_BURST_16 \ 95 + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3) 96 + #define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20) 97 + #define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17) 98 + #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0 99 + 100 + #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7) 101 + #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0) 102 + 103 + /* MMIO sequence register */ 104 + #define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c 105 + #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31) 106 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28) 107 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \ 108 + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0) 109 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \ 110 + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1) 111 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \ 112 + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2) 113 + #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27) 114 + #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23 115 + #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U 116 + #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U 117 + #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \ 118 + (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT) 119 + #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19) 120 + #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16) 121 + #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7) 122 + 123 + /* Channel WCOUNT */ 124 + #define TEGRA_GPCDMA_CHAN_WCOUNT 0x20 125 + 126 + /* Transfer count */ 127 + #define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24 128 + 129 + /* DMA byte count status */ 130 + #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28 131 + 132 + /* Error Status Register */ 133 + #define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30 134 + #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8 135 + #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF 136 + #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \ 137 + ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \ 138 + TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK) 139 + #define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF 140 + #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE 141 + #define TEGRA_DMA_PERIPH_ID_ERR 0xD 142 + #define TEGRA_DMA_STREAM_ID_ERR 0xC 143 + #define TEGRA_DMA_MC_SLAVE_ERR 0xB 144 + #define TEGRA_DMA_MMIO_SLAVE_ERR 0xA 145 + 146 + /* Fixed Pattern */ 147 + #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34 148 + 149 + #define TEGRA_GPCDMA_CHAN_TZ 0x38 150 + #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0) 151 + #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1) 152 + 153 + #define TEGRA_GPCDMA_CHAN_SPARE 0x3c 154 + #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16) 155 + 156 + /* 157 + * If any burst is in flight and DMA paused then this is the time to complete 158 + * on-flight burst and update DMA status register. 159 + */ 160 + #define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20 161 + #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100 162 + 163 + /* Channel base address offset from GPCDMA base address */ 164 + #define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000 165 + 166 + struct tegra_dma; 167 + struct tegra_dma_channel; 168 + 169 + /* 170 + * tegra_dma_chip_data Tegra chip specific DMA data 171 + * @nr_channels: Number of channels available in the controller. 172 + * @channel_reg_size: Channel register size. 173 + * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 174 + * @hw_support_pause: DMA HW engine support pause of the channel. 175 + */ 176 + struct tegra_dma_chip_data { 177 + bool hw_support_pause; 178 + unsigned int nr_channels; 179 + unsigned int channel_reg_size; 180 + unsigned int max_dma_count; 181 + int (*terminate)(struct tegra_dma_channel *tdc); 182 + }; 183 + 184 + /* DMA channel registers */ 185 + struct tegra_dma_channel_regs { 186 + u32 csr; 187 + u32 src_ptr; 188 + u32 dst_ptr; 189 + u32 high_addr_ptr; 190 + u32 mc_seq; 191 + u32 mmio_seq; 192 + u32 wcount; 193 + u32 fixed_pattern; 194 + }; 195 + 196 + /* 197 + * tegra_dma_sg_req: DMA request details to configure hardware. This 198 + * contains the details for one transfer to configure DMA hw. 199 + * The client's request for data transfer can be broken into multiple 200 + * sub-transfer as per requester details and hw support. This sub transfer 201 + * get added as an array in Tegra DMA desc which manages the transfer details. 202 + */ 203 + struct tegra_dma_sg_req { 204 + unsigned int len; 205 + struct tegra_dma_channel_regs ch_regs; 206 + }; 207 + 208 + /* 209 + * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to 210 + * manage client request and keep track of transfer status, callbacks 211 + * and request counts etc. 212 + */ 213 + struct tegra_dma_desc { 214 + bool cyclic; 215 + unsigned int bytes_req; 216 + unsigned int bytes_xfer; 217 + unsigned int sg_idx; 218 + unsigned int sg_count; 219 + struct virt_dma_desc vd; 220 + struct tegra_dma_channel *tdc; 221 + struct tegra_dma_sg_req sg_req[]; 222 + }; 223 + 224 + /* 225 + * tegra_dma_channel: Channel specific information 226 + */ 227 + struct tegra_dma_channel { 228 + bool config_init; 229 + char name[30]; 230 + enum dma_transfer_direction sid_dir; 231 + int id; 232 + int irq; 233 + int slave_id; 234 + struct tegra_dma *tdma; 235 + struct virt_dma_chan vc; 236 + struct tegra_dma_desc *dma_desc; 237 + struct dma_slave_config dma_sconfig; 238 + unsigned int stream_id; 239 + unsigned long chan_base_offset; 240 + }; 241 + 242 + /* 243 + * tegra_dma: Tegra DMA specific information 244 + */ 245 + struct tegra_dma { 246 + const struct tegra_dma_chip_data *chip_data; 247 + unsigned long sid_m2d_reserved; 248 + unsigned long sid_d2m_reserved; 249 + void __iomem *base_addr; 250 + struct device *dev; 251 + struct dma_device dma_dev; 252 + struct reset_control *rst; 253 + struct tegra_dma_channel channels[]; 254 + }; 255 + 256 + static inline void tdc_write(struct tegra_dma_channel *tdc, 257 + u32 reg, u32 val) 258 + { 259 + writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); 260 + } 261 + 262 + static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 263 + { 264 + return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg); 265 + } 266 + 267 + static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 268 + { 269 + return container_of(dc, struct tegra_dma_channel, vc.chan); 270 + } 271 + 272 + static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd) 273 + { 274 + return container_of(vd, struct tegra_dma_desc, vd); 275 + } 276 + 277 + static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 278 + { 279 + return tdc->vc.chan.device->dev; 280 + } 281 + 282 + static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc) 283 + { 284 + dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n", 285 + tdc->id, tdc->name); 286 + dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n", 287 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR), 288 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS), 289 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE), 290 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR), 291 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR) 292 + ); 293 + dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n", 294 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ), 295 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ), 296 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT), 297 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT), 298 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS) 299 + ); 300 + dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n", 301 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS)); 302 + } 303 + 304 + static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc, 305 + enum dma_transfer_direction direction) 306 + { 307 + struct tegra_dma *tdma = tdc->tdma; 308 + int sid = tdc->slave_id; 309 + 310 + if (!is_slave_direction(direction)) 311 + return 0; 312 + 313 + switch (direction) { 314 + case DMA_MEM_TO_DEV: 315 + if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) { 316 + dev_err(tdma->dev, "slave id already in use\n"); 317 + return -EINVAL; 318 + } 319 + break; 320 + case DMA_DEV_TO_MEM: 321 + if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) { 322 + dev_err(tdma->dev, "slave id already in use\n"); 323 + return -EINVAL; 324 + } 325 + break; 326 + default: 327 + break; 328 + } 329 + 330 + tdc->sid_dir = direction; 331 + 332 + return 0; 333 + } 334 + 335 + static void tegra_dma_sid_free(struct tegra_dma_channel *tdc) 336 + { 337 + struct tegra_dma *tdma = tdc->tdma; 338 + int sid = tdc->slave_id; 339 + 340 + switch (tdc->sid_dir) { 341 + case DMA_MEM_TO_DEV: 342 + clear_bit(sid, &tdma->sid_m2d_reserved); 343 + break; 344 + case DMA_DEV_TO_MEM: 345 + clear_bit(sid, &tdma->sid_d2m_reserved); 346 + break; 347 + default: 348 + break; 349 + } 350 + 351 + tdc->sid_dir = DMA_TRANS_NONE; 352 + } 353 + 354 + static void tegra_dma_desc_free(struct virt_dma_desc *vd) 355 + { 356 + kfree(container_of(vd, struct tegra_dma_desc, vd)); 357 + } 358 + 359 + static int tegra_dma_slave_config(struct dma_chan *dc, 360 + struct dma_slave_config *sconfig) 361 + { 362 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 363 + 364 + memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 365 + tdc->config_init = true; 366 + 367 + return 0; 368 + } 369 + 370 + static int tegra_dma_pause(struct tegra_dma_channel *tdc) 371 + { 372 + int ret; 373 + u32 val; 374 + 375 + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); 376 + val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE; 377 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); 378 + 379 + /* Wait until busy bit is de-asserted */ 380 + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + 381 + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, 382 + val, 383 + !(val & TEGRA_GPCDMA_STATUS_BUSY), 384 + TEGRA_GPCDMA_BURST_COMPLETE_TIME, 385 + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); 386 + 387 + if (ret) { 388 + dev_err(tdc2dev(tdc), "DMA pause timed out\n"); 389 + tegra_dma_dump_chan_regs(tdc); 390 + } 391 + 392 + return ret; 393 + } 394 + 395 + static int tegra_dma_device_pause(struct dma_chan *dc) 396 + { 397 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 398 + unsigned long flags; 399 + int ret; 400 + 401 + if (!tdc->tdma->chip_data->hw_support_pause) 402 + return -ENOSYS; 403 + 404 + spin_lock_irqsave(&tdc->vc.lock, flags); 405 + ret = tegra_dma_pause(tdc); 406 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 407 + 408 + return ret; 409 + } 410 + 411 + static void tegra_dma_resume(struct tegra_dma_channel *tdc) 412 + { 413 + u32 val; 414 + 415 + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); 416 + val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; 417 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); 418 + } 419 + 420 + static int tegra_dma_device_resume(struct dma_chan *dc) 421 + { 422 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 423 + unsigned long flags; 424 + 425 + if (!tdc->tdma->chip_data->hw_support_pause) 426 + return -ENOSYS; 427 + 428 + spin_lock_irqsave(&tdc->vc.lock, flags); 429 + tegra_dma_resume(tdc); 430 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 431 + 432 + return 0; 433 + } 434 + 435 + static void tegra_dma_disable(struct tegra_dma_channel *tdc) 436 + { 437 + u32 csr, status; 438 + 439 + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); 440 + 441 + /* Disable interrupts */ 442 + csr &= ~TEGRA_GPCDMA_CSR_IE_EOC; 443 + 444 + /* Disable DMA */ 445 + csr &= ~TEGRA_GPCDMA_CSR_ENB; 446 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); 447 + 448 + /* Clear interrupt status if it is there */ 449 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); 450 + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) { 451 + dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 452 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status); 453 + } 454 + } 455 + 456 + static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc) 457 + { 458 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 459 + struct tegra_dma_channel_regs *ch_regs; 460 + int ret; 461 + u32 val; 462 + 463 + dma_desc->sg_idx++; 464 + 465 + /* Reset the sg index for cyclic transfers */ 466 + if (dma_desc->sg_idx == dma_desc->sg_count) 467 + dma_desc->sg_idx = 0; 468 + 469 + /* Configure next transfer immediately after DMA is busy */ 470 + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + 471 + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, 472 + val, 473 + (val & TEGRA_GPCDMA_STATUS_BUSY), 0, 474 + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); 475 + if (ret) 476 + return; 477 + 478 + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; 479 + 480 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); 481 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); 482 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); 483 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); 484 + 485 + /* Start DMA */ 486 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 487 + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); 488 + } 489 + 490 + static void tegra_dma_start(struct tegra_dma_channel *tdc) 491 + { 492 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 493 + struct tegra_dma_channel_regs *ch_regs; 494 + struct virt_dma_desc *vdesc; 495 + 496 + if (!dma_desc) { 497 + vdesc = vchan_next_desc(&tdc->vc); 498 + if (!vdesc) 499 + return; 500 + 501 + dma_desc = vd_to_tegra_dma_desc(vdesc); 502 + list_del(&vdesc->node); 503 + dma_desc->tdc = tdc; 504 + tdc->dma_desc = dma_desc; 505 + 506 + tegra_dma_resume(tdc); 507 + } 508 + 509 + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; 510 + 511 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); 512 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0); 513 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); 514 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); 515 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); 516 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern); 517 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq); 518 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq); 519 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr); 520 + 521 + /* Start DMA */ 522 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 523 + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); 524 + } 525 + 526 + static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) 527 + { 528 + vchan_cookie_complete(&tdc->dma_desc->vd); 529 + 530 + tegra_dma_sid_free(tdc); 531 + tdc->dma_desc = NULL; 532 + } 533 + 534 + static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, 535 + unsigned int err_status) 536 + { 537 + switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) { 538 + case TEGRA_DMA_BM_FIFO_FULL_ERR: 539 + dev_err(tdc->tdma->dev, 540 + "GPCDMA CH%d bm fifo full\n", tdc->id); 541 + break; 542 + 543 + case TEGRA_DMA_PERIPH_FIFO_FULL_ERR: 544 + dev_err(tdc->tdma->dev, 545 + "GPCDMA CH%d peripheral fifo full\n", tdc->id); 546 + break; 547 + 548 + case TEGRA_DMA_PERIPH_ID_ERR: 549 + dev_err(tdc->tdma->dev, 550 + "GPCDMA CH%d illegal peripheral id\n", tdc->id); 551 + break; 552 + 553 + case TEGRA_DMA_STREAM_ID_ERR: 554 + dev_err(tdc->tdma->dev, 555 + "GPCDMA CH%d illegal stream id\n", tdc->id); 556 + break; 557 + 558 + case TEGRA_DMA_MC_SLAVE_ERR: 559 + dev_err(tdc->tdma->dev, 560 + "GPCDMA CH%d mc slave error\n", tdc->id); 561 + break; 562 + 563 + case TEGRA_DMA_MMIO_SLAVE_ERR: 564 + dev_err(tdc->tdma->dev, 565 + "GPCDMA CH%d mmio slave error\n", tdc->id); 566 + break; 567 + 568 + default: 569 + dev_err(tdc->tdma->dev, 570 + "GPCDMA CH%d security violation %x\n", tdc->id, 571 + err_status); 572 + } 573 + } 574 + 575 + static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 576 + { 577 + struct tegra_dma_channel *tdc = dev_id; 578 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 579 + struct tegra_dma_sg_req *sg_req; 580 + u32 status; 581 + 582 + /* Check channel error status register */ 583 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS); 584 + if (status) { 585 + tegra_dma_chan_decode_error(tdc, status); 586 + tegra_dma_dump_chan_regs(tdc); 587 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF); 588 + } 589 + 590 + spin_lock(&tdc->vc.lock); 591 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); 592 + if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC)) 593 + goto irq_done; 594 + 595 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, 596 + TEGRA_GPCDMA_STATUS_ISE_EOC); 597 + 598 + if (!dma_desc) 599 + goto irq_done; 600 + 601 + sg_req = dma_desc->sg_req; 602 + dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len; 603 + 604 + if (dma_desc->cyclic) { 605 + vchan_cyclic_callback(&dma_desc->vd); 606 + tegra_dma_configure_next_sg(tdc); 607 + } else { 608 + dma_desc->sg_idx++; 609 + if (dma_desc->sg_idx == dma_desc->sg_count) 610 + tegra_dma_xfer_complete(tdc); 611 + else 612 + tegra_dma_start(tdc); 613 + } 614 + 615 + irq_done: 616 + spin_unlock(&tdc->vc.lock); 617 + return IRQ_HANDLED; 618 + } 619 + 620 + static void tegra_dma_issue_pending(struct dma_chan *dc) 621 + { 622 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 623 + unsigned long flags; 624 + 625 + if (tdc->dma_desc) 626 + return; 627 + 628 + spin_lock_irqsave(&tdc->vc.lock, flags); 629 + if (vchan_issue_pending(&tdc->vc)) 630 + tegra_dma_start(tdc); 631 + 632 + /* 633 + * For cyclic DMA transfers, program the second 634 + * transfer parameters as soon as the first DMA 635 + * transfer is started inorder for the DMA 636 + * controller to trigger the second transfer 637 + * with the correct parameters. 638 + */ 639 + if (tdc->dma_desc && tdc->dma_desc->cyclic) 640 + tegra_dma_configure_next_sg(tdc); 641 + 642 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 643 + } 644 + 645 + static int tegra_dma_stop_client(struct tegra_dma_channel *tdc) 646 + { 647 + int ret; 648 + u32 status, csr; 649 + 650 + /* 651 + * Change the client associated with the DMA channel 652 + * to stop DMA engine from starting any more bursts for 653 + * the given client and wait for in flight bursts to complete 654 + */ 655 + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); 656 + csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK); 657 + csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED; 658 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); 659 + 660 + /* Wait for in flight data transfer to finish */ 661 + udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME); 662 + 663 + /* If TX/RX path is still active wait till it becomes 664 + * inactive 665 + */ 666 + 667 + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + 668 + tdc->chan_base_offset + 669 + TEGRA_GPCDMA_CHAN_STATUS, 670 + status, 671 + !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX | 672 + TEGRA_GPCDMA_STATUS_CHANNEL_RX)), 673 + 5, 674 + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); 675 + if (ret) { 676 + dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n"); 677 + tegra_dma_dump_chan_regs(tdc); 678 + } 679 + 680 + return ret; 681 + } 682 + 683 + static int tegra_dma_terminate_all(struct dma_chan *dc) 684 + { 685 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 686 + unsigned long flags; 687 + LIST_HEAD(head); 688 + int err; 689 + 690 + spin_lock_irqsave(&tdc->vc.lock, flags); 691 + 692 + if (tdc->dma_desc) { 693 + err = tdc->tdma->chip_data->terminate(tdc); 694 + if (err) { 695 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 696 + return err; 697 + } 698 + 699 + tegra_dma_disable(tdc); 700 + tdc->dma_desc = NULL; 701 + } 702 + 703 + tegra_dma_sid_free(tdc); 704 + vchan_get_all_descriptors(&tdc->vc, &head); 705 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 706 + 707 + vchan_dma_desc_free_list(&tdc->vc, &head); 708 + 709 + return 0; 710 + } 711 + 712 + static int tegra_dma_get_residual(struct tegra_dma_channel *tdc) 713 + { 714 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 715 + struct tegra_dma_sg_req *sg_req = dma_desc->sg_req; 716 + unsigned int bytes_xfer, residual; 717 + u32 wcount = 0, status; 718 + 719 + wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT); 720 + 721 + /* 722 + * Set wcount = 0 if EOC bit is set. The transfer would have 723 + * already completed and the CHAN_XFER_COUNT could have updated 724 + * for the next transfer, specifically in case of cyclic transfers. 725 + */ 726 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); 727 + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) 728 + wcount = 0; 729 + 730 + bytes_xfer = dma_desc->bytes_xfer + 731 + sg_req[dma_desc->sg_idx].len - (wcount * 4); 732 + 733 + residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req); 734 + 735 + return residual; 736 + } 737 + 738 + static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 739 + dma_cookie_t cookie, 740 + struct dma_tx_state *txstate) 741 + { 742 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 743 + struct tegra_dma_desc *dma_desc; 744 + struct virt_dma_desc *vd; 745 + unsigned int residual; 746 + unsigned long flags; 747 + enum dma_status ret; 748 + 749 + ret = dma_cookie_status(dc, cookie, txstate); 750 + if (ret == DMA_COMPLETE) 751 + return ret; 752 + 753 + spin_lock_irqsave(&tdc->vc.lock, flags); 754 + vd = vchan_find_desc(&tdc->vc, cookie); 755 + if (vd) { 756 + dma_desc = vd_to_tegra_dma_desc(vd); 757 + residual = dma_desc->bytes_req; 758 + dma_set_residue(txstate, residual); 759 + } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) { 760 + residual = tegra_dma_get_residual(tdc); 761 + dma_set_residue(txstate, residual); 762 + } else { 763 + dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie); 764 + } 765 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 766 + 767 + return ret; 768 + } 769 + 770 + static inline int get_bus_width(struct tegra_dma_channel *tdc, 771 + enum dma_slave_buswidth slave_bw) 772 + { 773 + switch (slave_bw) { 774 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 775 + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8; 776 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 777 + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16; 778 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 779 + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32; 780 + default: 781 + dev_err(tdc2dev(tdc), "given slave bus width is not supported\n"); 782 + return -EINVAL; 783 + } 784 + } 785 + 786 + static unsigned int get_burst_size(struct tegra_dma_channel *tdc, 787 + u32 burst_size, enum dma_slave_buswidth slave_bw, 788 + int len) 789 + { 790 + unsigned int burst_mmio_width, burst_byte; 791 + 792 + /* 793 + * burst_size from client is in terms of the bus_width. 794 + * convert that into words. 795 + * If burst_size is not specified from client, then use 796 + * len to calculate the optimum burst size 797 + */ 798 + burst_byte = burst_size ? burst_size * slave_bw : len; 799 + burst_mmio_width = burst_byte / 4; 800 + 801 + if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN) 802 + return 0; 803 + 804 + burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX); 805 + 806 + return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width); 807 + } 808 + 809 + static int get_transfer_param(struct tegra_dma_channel *tdc, 810 + enum dma_transfer_direction direction, 811 + u32 *apb_addr, 812 + u32 *mmio_seq, 813 + u32 *csr, 814 + unsigned int *burst_size, 815 + enum dma_slave_buswidth *slave_bw) 816 + { 817 + switch (direction) { 818 + case DMA_MEM_TO_DEV: 819 + *apb_addr = tdc->dma_sconfig.dst_addr; 820 + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 821 + *burst_size = tdc->dma_sconfig.dst_maxburst; 822 + *slave_bw = tdc->dma_sconfig.dst_addr_width; 823 + *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC; 824 + return 0; 825 + case DMA_DEV_TO_MEM: 826 + *apb_addr = tdc->dma_sconfig.src_addr; 827 + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 828 + *burst_size = tdc->dma_sconfig.src_maxburst; 829 + *slave_bw = tdc->dma_sconfig.src_addr_width; 830 + *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC; 831 + return 0; 832 + default: 833 + dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); 834 + } 835 + 836 + return -EINVAL; 837 + } 838 + 839 + static struct dma_async_tx_descriptor * 840 + tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value, 841 + size_t len, unsigned long flags) 842 + { 843 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 844 + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; 845 + struct tegra_dma_sg_req *sg_req; 846 + struct tegra_dma_desc *dma_desc; 847 + u32 csr, mc_seq; 848 + 849 + if ((len & 3) || (dest & 3) || len > max_dma_count) { 850 + dev_err(tdc2dev(tdc), 851 + "DMA length/memory address is not supported\n"); 852 + return NULL; 853 + } 854 + 855 + /* Set DMA mode to fixed pattern */ 856 + csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT; 857 + /* Enable once or continuous mode */ 858 + csr |= TEGRA_GPCDMA_CSR_ONCE; 859 + /* Enable IRQ mask */ 860 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 861 + /* Enable the DMA interrupt */ 862 + if (flags & DMA_PREP_INTERRUPT) 863 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 864 + /* Configure default priority weight for the channel */ 865 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 866 + 867 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 868 + /* retain stream-id and clean rest */ 869 + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; 870 + 871 + /* Set the address wrapping */ 872 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 873 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 874 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 875 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 876 + 877 + /* Program outstanding MC requests */ 878 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 879 + /* Set burst size */ 880 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 881 + 882 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); 883 + if (!dma_desc) 884 + return NULL; 885 + 886 + dma_desc->bytes_req = len; 887 + dma_desc->sg_count = 1; 888 + sg_req = dma_desc->sg_req; 889 + 890 + sg_req[0].ch_regs.src_ptr = 0; 891 + sg_req[0].ch_regs.dst_ptr = dest; 892 + sg_req[0].ch_regs.high_addr_ptr = 893 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); 894 + sg_req[0].ch_regs.fixed_pattern = value; 895 + /* Word count reg takes value as (N +1) words */ 896 + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); 897 + sg_req[0].ch_regs.csr = csr; 898 + sg_req[0].ch_regs.mmio_seq = 0; 899 + sg_req[0].ch_regs.mc_seq = mc_seq; 900 + sg_req[0].len = len; 901 + 902 + dma_desc->cyclic = false; 903 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 904 + } 905 + 906 + static struct dma_async_tx_descriptor * 907 + tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest, 908 + dma_addr_t src, size_t len, unsigned long flags) 909 + { 910 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 911 + struct tegra_dma_sg_req *sg_req; 912 + struct tegra_dma_desc *dma_desc; 913 + unsigned int max_dma_count; 914 + u32 csr, mc_seq; 915 + 916 + max_dma_count = tdc->tdma->chip_data->max_dma_count; 917 + if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) { 918 + dev_err(tdc2dev(tdc), 919 + "DMA length/memory address is not supported\n"); 920 + return NULL; 921 + } 922 + 923 + /* Set DMA mode to memory to memory transfer */ 924 + csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; 925 + /* Enable once or continuous mode */ 926 + csr |= TEGRA_GPCDMA_CSR_ONCE; 927 + /* Enable IRQ mask */ 928 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 929 + /* Enable the DMA interrupt */ 930 + if (flags & DMA_PREP_INTERRUPT) 931 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 932 + /* Configure default priority weight for the channel */ 933 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 934 + 935 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 936 + /* retain stream-id and clean rest */ 937 + mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) | 938 + (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); 939 + 940 + /* Set the address wrapping */ 941 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 942 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 943 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 944 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 945 + 946 + /* Program outstanding MC requests */ 947 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 948 + /* Set burst size */ 949 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 950 + 951 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); 952 + if (!dma_desc) 953 + return NULL; 954 + 955 + dma_desc->bytes_req = len; 956 + dma_desc->sg_count = 1; 957 + sg_req = dma_desc->sg_req; 958 + 959 + sg_req[0].ch_regs.src_ptr = src; 960 + sg_req[0].ch_regs.dst_ptr = dest; 961 + sg_req[0].ch_regs.high_addr_ptr = 962 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32)); 963 + sg_req[0].ch_regs.high_addr_ptr |= 964 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); 965 + /* Word count reg takes value as (N +1) words */ 966 + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); 967 + sg_req[0].ch_regs.csr = csr; 968 + sg_req[0].ch_regs.mmio_seq = 0; 969 + sg_req[0].ch_regs.mc_seq = mc_seq; 970 + sg_req[0].len = len; 971 + 972 + dma_desc->cyclic = false; 973 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 974 + } 975 + 976 + static struct dma_async_tx_descriptor * 977 + tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl, 978 + unsigned int sg_len, enum dma_transfer_direction direction, 979 + unsigned long flags, void *context) 980 + { 981 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 982 + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; 983 + enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; 984 + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0; 985 + struct tegra_dma_sg_req *sg_req; 986 + struct tegra_dma_desc *dma_desc; 987 + struct scatterlist *sg; 988 + u32 burst_size; 989 + unsigned int i; 990 + int ret; 991 + 992 + if (!tdc->config_init) { 993 + dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); 994 + return NULL; 995 + } 996 + if (sg_len < 1) { 997 + dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 998 + return NULL; 999 + } 1000 + 1001 + ret = tegra_dma_sid_reserve(tdc, direction); 1002 + if (ret) 1003 + return NULL; 1004 + 1005 + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, 1006 + &burst_size, &slave_bw); 1007 + if (ret < 0) 1008 + return NULL; 1009 + 1010 + /* Enable once or continuous mode */ 1011 + csr |= TEGRA_GPCDMA_CSR_ONCE; 1012 + /* Program the slave id in requestor select */ 1013 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); 1014 + /* Enable IRQ mask */ 1015 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 1016 + /* Configure default priority weight for the channel*/ 1017 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 1018 + 1019 + /* Enable the DMA interrupt */ 1020 + if (flags & DMA_PREP_INTERRUPT) 1021 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 1022 + 1023 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 1024 + /* retain stream-id and clean rest */ 1025 + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; 1026 + 1027 + /* Set the address wrapping on both MC and MMIO side */ 1028 + 1029 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 1030 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1031 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 1032 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1033 + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); 1034 + 1035 + /* Program 2 MC outstanding requests by default. */ 1036 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 1037 + 1038 + /* Setting MC burst size depending on MMIO burst size */ 1039 + if (burst_size == 64) 1040 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 1041 + else 1042 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; 1043 + 1044 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT); 1045 + if (!dma_desc) 1046 + return NULL; 1047 + 1048 + dma_desc->sg_count = sg_len; 1049 + sg_req = dma_desc->sg_req; 1050 + 1051 + /* Make transfer requests */ 1052 + for_each_sg(sgl, sg, sg_len, i) { 1053 + u32 len; 1054 + dma_addr_t mem; 1055 + 1056 + mem = sg_dma_address(sg); 1057 + len = sg_dma_len(sg); 1058 + 1059 + if ((len & 3) || (mem & 3) || len > max_dma_count) { 1060 + dev_err(tdc2dev(tdc), 1061 + "DMA length/memory address is not supported\n"); 1062 + kfree(dma_desc); 1063 + return NULL; 1064 + } 1065 + 1066 + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1067 + dma_desc->bytes_req += len; 1068 + 1069 + if (direction == DMA_MEM_TO_DEV) { 1070 + sg_req[i].ch_regs.src_ptr = mem; 1071 + sg_req[i].ch_regs.dst_ptr = apb_ptr; 1072 + sg_req[i].ch_regs.high_addr_ptr = 1073 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); 1074 + } else if (direction == DMA_DEV_TO_MEM) { 1075 + sg_req[i].ch_regs.src_ptr = apb_ptr; 1076 + sg_req[i].ch_regs.dst_ptr = mem; 1077 + sg_req[i].ch_regs.high_addr_ptr = 1078 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); 1079 + } 1080 + 1081 + /* 1082 + * Word count register takes input in words. Writing a value 1083 + * of N into word count register means a req of (N+1) words. 1084 + */ 1085 + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); 1086 + sg_req[i].ch_regs.csr = csr; 1087 + sg_req[i].ch_regs.mmio_seq = mmio_seq; 1088 + sg_req[i].ch_regs.mc_seq = mc_seq; 1089 + sg_req[i].len = len; 1090 + } 1091 + 1092 + dma_desc->cyclic = false; 1093 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 1094 + } 1095 + 1096 + static struct dma_async_tx_descriptor * 1097 + tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 1098 + size_t period_len, enum dma_transfer_direction direction, 1099 + unsigned long flags) 1100 + { 1101 + enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; 1102 + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; 1103 + unsigned int max_dma_count, len, period_count, i; 1104 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1105 + struct tegra_dma_desc *dma_desc; 1106 + struct tegra_dma_sg_req *sg_req; 1107 + dma_addr_t mem = buf_addr; 1108 + int ret; 1109 + 1110 + if (!buf_len || !period_len) { 1111 + dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1112 + return NULL; 1113 + } 1114 + 1115 + if (!tdc->config_init) { 1116 + dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1117 + return NULL; 1118 + } 1119 + 1120 + ret = tegra_dma_sid_reserve(tdc, direction); 1121 + if (ret) 1122 + return NULL; 1123 + 1124 + /* 1125 + * We only support cycle transfer when buf_len is multiple of 1126 + * period_len. 1127 + */ 1128 + if (buf_len % period_len) { 1129 + dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1130 + return NULL; 1131 + } 1132 + 1133 + len = period_len; 1134 + max_dma_count = tdc->tdma->chip_data->max_dma_count; 1135 + if ((len & 3) || (buf_addr & 3) || len > max_dma_count) { 1136 + dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1137 + return NULL; 1138 + } 1139 + 1140 + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, 1141 + &burst_size, &slave_bw); 1142 + if (ret < 0) 1143 + return NULL; 1144 + 1145 + /* Enable once or continuous mode */ 1146 + csr &= ~TEGRA_GPCDMA_CSR_ONCE; 1147 + /* Program the slave id in requestor select */ 1148 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); 1149 + /* Enable IRQ mask */ 1150 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 1151 + /* Configure default priority weight for the channel*/ 1152 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 1153 + 1154 + /* Enable the DMA interrupt */ 1155 + if (flags & DMA_PREP_INTERRUPT) 1156 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 1157 + 1158 + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); 1159 + 1160 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 1161 + /* retain stream-id and clean rest */ 1162 + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; 1163 + 1164 + /* Set the address wrapping on both MC and MMIO side */ 1165 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 1166 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1167 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 1168 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1169 + 1170 + /* Program 2 MC outstanding requests by default. */ 1171 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 1172 + /* Setting MC burst size depending on MMIO burst size */ 1173 + if (burst_size == 64) 1174 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 1175 + else 1176 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; 1177 + 1178 + period_count = buf_len / period_len; 1179 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count), 1180 + GFP_NOWAIT); 1181 + if (!dma_desc) 1182 + return NULL; 1183 + 1184 + dma_desc->bytes_req = buf_len; 1185 + dma_desc->sg_count = period_count; 1186 + sg_req = dma_desc->sg_req; 1187 + 1188 + /* Split transfer equal to period size */ 1189 + for (i = 0; i < period_count; i++) { 1190 + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1191 + if (direction == DMA_MEM_TO_DEV) { 1192 + sg_req[i].ch_regs.src_ptr = mem; 1193 + sg_req[i].ch_regs.dst_ptr = apb_ptr; 1194 + sg_req[i].ch_regs.high_addr_ptr = 1195 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); 1196 + } else if (direction == DMA_DEV_TO_MEM) { 1197 + sg_req[i].ch_regs.src_ptr = apb_ptr; 1198 + sg_req[i].ch_regs.dst_ptr = mem; 1199 + sg_req[i].ch_regs.high_addr_ptr = 1200 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); 1201 + } 1202 + /* 1203 + * Word count register takes input in words. Writing a value 1204 + * of N into word count register means a req of (N+1) words. 1205 + */ 1206 + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); 1207 + sg_req[i].ch_regs.csr = csr; 1208 + sg_req[i].ch_regs.mmio_seq = mmio_seq; 1209 + sg_req[i].ch_regs.mc_seq = mc_seq; 1210 + sg_req[i].len = len; 1211 + 1212 + mem += len; 1213 + } 1214 + 1215 + dma_desc->cyclic = true; 1216 + 1217 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 1218 + } 1219 + 1220 + static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1221 + { 1222 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1223 + int ret; 1224 + 1225 + ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc); 1226 + if (ret) { 1227 + dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name); 1228 + return ret; 1229 + } 1230 + 1231 + dma_cookie_init(&tdc->vc.chan); 1232 + tdc->config_init = false; 1233 + return 0; 1234 + } 1235 + 1236 + static void tegra_dma_chan_synchronize(struct dma_chan *dc) 1237 + { 1238 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1239 + 1240 + synchronize_irq(tdc->irq); 1241 + vchan_synchronize(&tdc->vc); 1242 + } 1243 + 1244 + static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1245 + { 1246 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1247 + 1248 + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1249 + 1250 + tegra_dma_terminate_all(dc); 1251 + synchronize_irq(tdc->irq); 1252 + 1253 + tasklet_kill(&tdc->vc.task); 1254 + tdc->config_init = false; 1255 + tdc->slave_id = -1; 1256 + tdc->sid_dir = DMA_TRANS_NONE; 1257 + free_irq(tdc->irq, tdc); 1258 + 1259 + vchan_free_chan_resources(&tdc->vc); 1260 + } 1261 + 1262 + static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, 1263 + struct of_dma *ofdma) 1264 + { 1265 + struct tegra_dma *tdma = ofdma->of_dma_data; 1266 + struct tegra_dma_channel *tdc; 1267 + struct dma_chan *chan; 1268 + 1269 + chan = dma_get_any_slave_channel(&tdma->dma_dev); 1270 + if (!chan) 1271 + return NULL; 1272 + 1273 + tdc = to_tegra_dma_chan(chan); 1274 + tdc->slave_id = dma_spec->args[0]; 1275 + 1276 + return chan; 1277 + } 1278 + 1279 + static const struct tegra_dma_chip_data tegra186_dma_chip_data = { 1280 + .nr_channels = 31, 1281 + .channel_reg_size = SZ_64K, 1282 + .max_dma_count = SZ_1G, 1283 + .hw_support_pause = false, 1284 + .terminate = tegra_dma_stop_client, 1285 + }; 1286 + 1287 + static const struct tegra_dma_chip_data tegra194_dma_chip_data = { 1288 + .nr_channels = 31, 1289 + .channel_reg_size = SZ_64K, 1290 + .max_dma_count = SZ_1G, 1291 + .hw_support_pause = true, 1292 + .terminate = tegra_dma_pause, 1293 + }; 1294 + 1295 + static const struct of_device_id tegra_dma_of_match[] = { 1296 + { 1297 + .compatible = "nvidia,tegra186-gpcdma", 1298 + .data = &tegra186_dma_chip_data, 1299 + }, { 1300 + .compatible = "nvidia,tegra194-gpcdma", 1301 + .data = &tegra194_dma_chip_data, 1302 + }, { 1303 + }, 1304 + }; 1305 + MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1306 + 1307 + static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id) 1308 + { 1309 + unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 1310 + 1311 + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK); 1312 + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); 1313 + 1314 + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id); 1315 + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id); 1316 + 1317 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val); 1318 + return 0; 1319 + } 1320 + 1321 + static int tegra_dma_probe(struct platform_device *pdev) 1322 + { 1323 + const struct tegra_dma_chip_data *cdata = NULL; 1324 + struct iommu_fwspec *iommu_spec; 1325 + unsigned int stream_id, i; 1326 + struct tegra_dma *tdma; 1327 + int ret; 1328 + 1329 + cdata = of_device_get_match_data(&pdev->dev); 1330 + 1331 + tdma = devm_kzalloc(&pdev->dev, 1332 + struct_size(tdma, channels, cdata->nr_channels), 1333 + GFP_KERNEL); 1334 + if (!tdma) 1335 + return -ENOMEM; 1336 + 1337 + tdma->dev = &pdev->dev; 1338 + tdma->chip_data = cdata; 1339 + platform_set_drvdata(pdev, tdma); 1340 + 1341 + tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); 1342 + if (IS_ERR(tdma->base_addr)) 1343 + return PTR_ERR(tdma->base_addr); 1344 + 1345 + tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma"); 1346 + if (IS_ERR(tdma->rst)) { 1347 + return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst), 1348 + "Missing controller reset\n"); 1349 + } 1350 + reset_control_reset(tdma->rst); 1351 + 1352 + tdma->dma_dev.dev = &pdev->dev; 1353 + 1354 + iommu_spec = dev_iommu_fwspec_get(&pdev->dev); 1355 + if (!iommu_spec) { 1356 + dev_err(&pdev->dev, "Missing iommu stream-id\n"); 1357 + return -EINVAL; 1358 + } 1359 + stream_id = iommu_spec->ids[0] & 0xffff; 1360 + 1361 + INIT_LIST_HEAD(&tdma->dma_dev.channels); 1362 + for (i = 0; i < cdata->nr_channels; i++) { 1363 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1364 + 1365 + tdc->irq = platform_get_irq(pdev, i); 1366 + if (tdc->irq < 0) 1367 + return tdc->irq; 1368 + 1369 + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + 1370 + i * cdata->channel_reg_size; 1371 + snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); 1372 + tdc->tdma = tdma; 1373 + tdc->id = i; 1374 + tdc->slave_id = -1; 1375 + 1376 + vchan_init(&tdc->vc, &tdma->dma_dev); 1377 + tdc->vc.desc_free = tegra_dma_desc_free; 1378 + 1379 + /* program stream-id for this channel */ 1380 + tegra_dma_program_sid(tdc, stream_id); 1381 + tdc->stream_id = stream_id; 1382 + } 1383 + 1384 + dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1385 + dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1386 + dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask); 1387 + dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask); 1388 + dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1389 + 1390 + /* 1391 + * Only word aligned transfers are supported. Set the copy 1392 + * alignment shift. 1393 + */ 1394 + tdma->dma_dev.copy_align = 2; 1395 + tdma->dma_dev.fill_align = 2; 1396 + tdma->dma_dev.device_alloc_chan_resources = 1397 + tegra_dma_alloc_chan_resources; 1398 + tdma->dma_dev.device_free_chan_resources = 1399 + tegra_dma_free_chan_resources; 1400 + tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1401 + tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy; 1402 + tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset; 1403 + tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1404 + tdma->dma_dev.device_config = tegra_dma_slave_config; 1405 + tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; 1406 + tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1407 + tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1408 + tdma->dma_dev.device_pause = tegra_dma_device_pause; 1409 + tdma->dma_dev.device_resume = tegra_dma_device_resume; 1410 + tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize; 1411 + tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1412 + 1413 + ret = dma_async_device_register(&tdma->dma_dev); 1414 + if (ret < 0) { 1415 + dev_err_probe(&pdev->dev, ret, 1416 + "GPC DMA driver registration failed\n"); 1417 + return ret; 1418 + } 1419 + 1420 + ret = of_dma_controller_register(pdev->dev.of_node, 1421 + tegra_dma_of_xlate, tdma); 1422 + if (ret < 0) { 1423 + dev_err_probe(&pdev->dev, ret, 1424 + "GPC DMA OF registration failed\n"); 1425 + 1426 + dma_async_device_unregister(&tdma->dma_dev); 1427 + return ret; 1428 + } 1429 + 1430 + dev_info(&pdev->dev, "GPC DMA driver register %d channels\n", 1431 + cdata->nr_channels); 1432 + 1433 + return 0; 1434 + } 1435 + 1436 + static int tegra_dma_remove(struct platform_device *pdev) 1437 + { 1438 + struct tegra_dma *tdma = platform_get_drvdata(pdev); 1439 + 1440 + of_dma_controller_free(pdev->dev.of_node); 1441 + dma_async_device_unregister(&tdma->dma_dev); 1442 + 1443 + return 0; 1444 + } 1445 + 1446 + static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) 1447 + { 1448 + struct tegra_dma *tdma = dev_get_drvdata(dev); 1449 + unsigned int i; 1450 + 1451 + for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1452 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1453 + 1454 + if (tdc->dma_desc) { 1455 + dev_err(tdma->dev, "channel %u busy\n", i); 1456 + return -EBUSY; 1457 + } 1458 + } 1459 + 1460 + return 0; 1461 + } 1462 + 1463 + static int __maybe_unused tegra_dma_pm_resume(struct device *dev) 1464 + { 1465 + struct tegra_dma *tdma = dev_get_drvdata(dev); 1466 + unsigned int i; 1467 + 1468 + reset_control_reset(tdma->rst); 1469 + 1470 + for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1471 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1472 + 1473 + tegra_dma_program_sid(tdc, tdc->stream_id); 1474 + } 1475 + 1476 + return 0; 1477 + } 1478 + 1479 + static const struct dev_pm_ops tegra_dma_dev_pm_ops = { 1480 + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) 1481 + }; 1482 + 1483 + static struct platform_driver tegra_dma_driver = { 1484 + .driver = { 1485 + .name = "tegra-gpcdma", 1486 + .pm = &tegra_dma_dev_pm_ops, 1487 + .of_match_table = tegra_dma_of_match, 1488 + }, 1489 + .probe = tegra_dma_probe, 1490 + .remove = tegra_dma_remove, 1491 + }; 1492 + 1493 + module_platform_driver(tegra_dma_driver); 1494 + 1495 + MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver"); 1496 + MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>"); 1497 + MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>"); 1498 + MODULE_LICENSE("GPL");
+5 -1
drivers/dma/ti/cppi41.c
··· 1105 1105 cdd->qmgr_num_pend = glue_info->qmgr_num_pend; 1106 1106 cdd->first_completion_queue = glue_info->first_completion_queue; 1107 1107 1108 + /* Parse new and deprecated dma-channels properties */ 1108 1109 ret = of_property_read_u32(dev->of_node, 1109 - "#dma-channels", &cdd->n_chans); 1110 + "dma-channels", &cdd->n_chans); 1111 + if (ret) 1112 + ret = of_property_read_u32(dev->of_node, 1113 + "#dma-channels", &cdd->n_chans); 1110 1114 if (ret) 1111 1115 goto err_get_n_chans; 1112 1116
+4 -4
drivers/dma/ti/k3-psil-am62.c
··· 70 70 /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ 71 71 static struct psil_ep am62_src_ep_map[] = { 72 72 /* SAUL */ 73 - PSIL_SAUL(0x7500, 20, 35, 8, 35, 0), 74 - PSIL_SAUL(0x7501, 21, 35, 8, 36, 0), 75 - PSIL_SAUL(0x7502, 22, 43, 8, 43, 0), 76 - PSIL_SAUL(0x7503, 23, 43, 8, 44, 0), 73 + PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), 74 + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), 75 + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), 76 + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), 77 77 /* PDMA_MAIN0 - SPI0-3 */ 78 78 PSIL_PDMA_XY_PKT(0x4302), 79 79 PSIL_PDMA_XY_PKT(0x4303),
+13 -4
drivers/dma/xilinx/zynqmp_dma.c
··· 229 229 bool is_dmacoherent; 230 230 struct tasklet_struct tasklet; 231 231 bool idle; 232 - u32 desc_size; 232 + size_t desc_size; 233 233 bool err; 234 234 u32 bus_width; 235 235 u32 src_burst_len; ··· 486 486 } 487 487 488 488 chan->desc_pool_v = dma_alloc_coherent(chan->dev, 489 - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 489 + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * 490 + ZYNQMP_DMA_NUM_DESCS), 490 491 &chan->desc_pool_p, GFP_KERNEL); 491 492 if (!chan->desc_pool_v) 492 493 return -ENOMEM; ··· 1078 1077 pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); 1079 1078 pm_runtime_use_autosuspend(zdev->dev); 1080 1079 pm_runtime_enable(zdev->dev); 1081 - pm_runtime_get_sync(zdev->dev); 1080 + ret = pm_runtime_resume_and_get(zdev->dev); 1081 + if (ret < 0) { 1082 + dev_err(&pdev->dev, "device wakeup failed.\n"); 1083 + pm_runtime_disable(zdev->dev); 1084 + } 1082 1085 if (!pm_runtime_enabled(zdev->dev)) { 1083 1086 ret = zynqmp_dma_runtime_resume(zdev->dev); 1084 1087 if (ret) ··· 1098 1093 p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); 1099 1094 p->src_addr_widths = BIT(zdev->chan->bus_width / 8); 1100 1095 1101 - dma_async_device_register(&zdev->common); 1096 + ret = dma_async_device_register(&zdev->common); 1097 + if (ret) { 1098 + dev_err(zdev->dev, "failed to register the dma device\n"); 1099 + goto free_chan_resources; 1100 + } 1102 1101 1103 1102 ret = of_dma_controller_register(pdev->dev.of_node, 1104 1103 of_zynqmp_dma_xlate, zdev);
+8 -1
include/linux/dmaengine.h
··· 870 870 struct device *dev; 871 871 struct module *owner; 872 872 struct ida chan_ida; 873 - struct mutex chan_mutex; /* to protect chan_ida */ 874 873 875 874 u32 src_addr_widths; 876 875 u32 dst_addr_widths; ··· 1030 1031 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 1031 1032 } 1032 1033 1034 + /** 1035 + * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor. 1036 + * @chan: The channel to be used for this descriptor 1037 + * @dest: Address of buffer to be set 1038 + * @value: Treated as a single byte value that fills the destination buffer 1039 + * @len: The total size of dest 1040 + * @flags: DMA engine flags 1041 + */ 1033 1042 static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset( 1034 1043 struct dma_chan *chan, dma_addr_t dest, int value, size_t len, 1035 1044 unsigned long flags)
+11
include/linux/soc/renesas/r9a06g032-sysctrl.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ 3 + #define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ 4 + 5 + #ifdef CONFIG_CLK_R9A06G032 6 + int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val); 7 + #else 8 + static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; } 9 + #endif 10 + 11 + #endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
+28 -3
include/uapi/linux/idxd.h
··· 53 53 54 54 /* IAX */ 55 55 #define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000 56 + #define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000 57 + #define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000 58 + #define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000 59 + #define IDXD_OP_FLAG_SRC2_STS 0x100000 60 + #define IDXD_OP_FLAG_CRC_RFC3720 0x200000 56 61 57 62 /* Opcode */ 58 63 enum dsa_opcode { ··· 86 81 IAX_OPCODE_MEMMOVE, 87 82 IAX_OPCODE_DECOMPRESS = 0x42, 88 83 IAX_OPCODE_COMPRESS, 84 + IAX_OPCODE_CRC64, 85 + IAX_OPCODE_ZERO_DECOMP_32 = 0x48, 86 + IAX_OPCODE_ZERO_DECOMP_16, 87 + IAX_OPCODE_DECOMP_32 = 0x4c, 88 + IAX_OPCODE_DECOMP_16, 89 + IAX_OPCODE_SCAN = 0x50, 90 + IAX_OPCODE_SET_MEMBER, 91 + IAX_OPCODE_EXTRACT, 92 + IAX_OPCODE_SELECT, 93 + IAX_OPCODE_RLE_BURST, 94 + IAX_OPCDE_FIND_UNIQUE, 95 + IAX_OPCODE_EXPAND, 89 96 }; 90 97 91 98 /* Completion record status */ ··· 137 120 IAX_COMP_NONE = 0, 138 121 IAX_COMP_SUCCESS, 139 122 IAX_COMP_PAGE_FAULT_IR = 0x04, 123 + IAX_COMP_ANALYTICS_ERROR = 0x0a, 140 124 IAX_COMP_OUTBUF_OVERFLOW, 141 125 IAX_COMP_BAD_OPCODE = 0x10, 142 126 IAX_COMP_INVALID_FLAGS, ··· 158 140 IAX_COMP_WATCHDOG, 159 141 IAX_COMP_INVALID_COMP_FLAG = 0x30, 160 142 IAX_COMP_INVALID_FILTER_FLAG, 161 - IAX_COMP_INVALID_NUM_ELEMS = 0x33, 143 + IAX_COMP_INVALID_INPUT_SIZE, 144 + IAX_COMP_INVALID_NUM_ELEMS, 145 + IAX_COMP_INVALID_SRC1_WIDTH, 146 + IAX_COMP_INVALID_INVERT_OUT, 162 147 }; 163 148 164 149 #define DSA_COMP_STATUS_MASK 0x7f ··· 340 319 uint32_t output_size; 341 320 uint8_t output_bits; 342 321 uint8_t rsvd3; 343 - uint16_t rsvd4; 344 - uint64_t rsvd5[4]; 322 + uint16_t xor_csum; 323 + uint32_t crc; 324 + uint32_t min; 325 + uint32_t max; 326 + uint32_t sum; 327 + uint64_t rsvd4[2]; 345 328 } __attribute__((packed)); 346 329 347 330 struct iax_raw_completion_record {