Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
"A fairly small update for the dmaengine subsystem. This has a new ARM
dmaengine driver and couple of new device support and few driver
changes:

New support:
- Renesas RZ/V2H(P) dma support for r9a09g057
- Arm DMA-350 driver
- Tegra Tegra264 ADMA support

Updates:
- AMD ptdma driver code removal and optimizations
- Freescale edma error interrupt handler support"

* tag 'dmaengine-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (27 commits)
dmaengine: idxd: Remove unused pointer and macro
arm64: dts: renesas: r9a09g057: Add DMAC nodes
dmaengine: sh: rz-dmac: Add RZ/V2H(P) support
dmaengine: sh: rz-dmac: Allow for multiple DMACs
irqchip/renesas-rzv2h: Add rzv2h_icu_register_dma_req()
dt-bindings: dma: rz-dmac: Document RZ/V2H(P) family of SoCs
dt-bindings: dma: rz-dmac: Restrict properties for RZ/A1H
dmaengine: idxd: Narrow the restriction on BATCH to ver. 1 only
dmaengine: ti: Add NULL check in udma_probe()
fsldma: Set correct dma_mask based on hw capability
dmaengine: idxd: Check availability of workqueue allocated by idxd wq driver before using
dmaengine: xilinx_dma: Set dma_device directions
dmaengine: tegra210-adma: Add Tegra264 support
dt-bindings: Document Tegra264 ADMA support
dmaengine: dw-edma: Add HDMA NATIVE map check
dmaegnine: fsl-edma: add edma error interrupt handler
dt-bindings: dma: fsl-edma: increase maxItems of interrupts and interrupt-names
dmaengine: ARM_DMA350 should depend on ARM/ARM64
dt-bindings: dma: qcom,bam: Document dma-coherent property
dmaengine: Add Arm DMA-350 driver
...

+1463 -106
+44
Documentation/devicetree/bindings/dma/arm,dma-350.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/dma/arm,dma-350.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Arm CoreLink DMA-350 Controller 8 + 9 + maintainers: 10 + - Robin Murphy <robin.murphy@arm.com> 11 + 12 + allOf: 13 + - $ref: dma-controller.yaml# 14 + 15 + properties: 16 + compatible: 17 + const: arm,dma-350 18 + 19 + reg: 20 + items: 21 + - description: Base and size of the full register map 22 + 23 + interrupts: 24 + minItems: 1 25 + items: 26 + - description: Channel 0 interrupt 27 + - description: Channel 1 interrupt 28 + - description: Channel 2 interrupt 29 + - description: Channel 3 interrupt 30 + - description: Channel 4 interrupt 31 + - description: Channel 5 interrupt 32 + - description: Channel 6 interrupt 33 + - description: Channel 7 interrupt 34 + 35 + "#dma-cells": 36 + const: 1 37 + description: The cell is the trigger input number 38 + 39 + required: 40 + - compatible 41 + - reg 42 + - interrupts 43 + 44 + unevaluatedProperties: false
+2 -2
Documentation/devicetree/bindings/dma/fsl,edma.yaml
··· 48 48 49 49 interrupts: 50 50 minItems: 1 51 - maxItems: 64 51 + maxItems: 65 52 52 53 53 interrupt-names: 54 54 minItems: 1 55 - maxItems: 64 55 + maxItems: 65 56 56 57 57 "#dma-cells": 58 58 description: |
+2
Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.yaml
··· 19 19 - enum: 20 20 - nvidia,tegra210-adma 21 21 - nvidia,tegra186-adma 22 + - nvidia,tegra264-adma 22 23 - items: 23 24 - enum: 24 25 - nvidia,tegra234-adma ··· 93 92 contains: 94 93 enum: 95 94 - nvidia,tegra186-adma 95 + - nvidia,tegra264-adma 96 96 then: 97 97 anyOf: 98 98 - properties:
+2
Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
··· 42 42 interrupts: 43 43 maxItems: 1 44 44 45 + dma-coherent: true 46 + 45 47 iommus: 46 48 minItems: 1 47 49 maxItems: 6
+89 -18
Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
··· 11 11 12 12 properties: 13 13 compatible: 14 - items: 15 - - enum: 16 - - renesas,r7s72100-dmac # RZ/A1H 17 - - renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five 18 - - renesas,r9a07g044-dmac # RZ/G2{L,LC} 19 - - renesas,r9a07g054-dmac # RZ/V2L 20 - - renesas,r9a08g045-dmac # RZ/G3S 21 - - const: renesas,rz-dmac 14 + oneOf: 15 + - items: 16 + - enum: 17 + - renesas,r7s72100-dmac # RZ/A1H 18 + - renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five 19 + - renesas,r9a07g044-dmac # RZ/G2{L,LC} 20 + - renesas,r9a07g054-dmac # RZ/V2L 21 + - renesas,r9a08g045-dmac # RZ/G3S 22 + - const: renesas,rz-dmac 23 + 24 + - const: renesas,r9a09g057-dmac # RZ/V2H(P) 22 25 23 26 reg: 24 27 items: 25 28 - description: Control and channel register block 26 29 - description: DMA extended resource selector block 30 + minItems: 1 27 31 28 32 interrupts: 29 33 maxItems: 17 ··· 56 52 items: 57 53 - description: DMA main clock 58 54 - description: DMA register access clock 55 + minItems: 1 59 56 60 57 clock-names: 61 58 items: ··· 66 61 '#dma-cells': 67 62 const: 1 68 63 description: 69 - The cell specifies the encoded MID/RID values of the DMAC port 70 - connected to the DMA client and the slave channel configuration 71 - parameters. 72 - bits[0:9] - Specifies MID/RID value 64 + The cell specifies the encoded MID/RID or the REQ No values of 65 + the DMAC port connected to the DMA client and the slave channel 66 + configuration parameters. 67 + bits[0:9] - Specifies the MID/RID or the REQ No value 73 68 bit[10] - Specifies DMA request high enable (HIEN) 74 69 bit[11] - Specifies DMA request detection type (LVL) 75 70 bits[12:14] - Specifies DMAACK output mode (AM) ··· 85 80 items: 86 81 - description: Reset for DMA ARESETN reset terminal 87 82 - description: Reset for DMA RST_ASYNC reset terminal 83 + minItems: 1 88 84 89 85 reset-names: 90 86 items: 91 87 - const: arst 92 88 - const: rst_async 89 + 90 + renesas,icu: 91 + description: 92 + It must contain the phandle to the ICU and the index of the DMAC as seen 93 + from the ICU. 94 + $ref: /schemas/types.yaml#/definitions/phandle-array 95 + items: 96 + - items: 97 + - description: Phandle to the ICU node. 98 + - description: 99 + The number of the DMAC as seen from the ICU, i.e. parameter k from 100 + register ICU_DMkSELy. This may differ from the actual DMAC instance 101 + number. 93 102 94 103 required: 95 104 - compatible ··· 117 98 - $ref: dma-controller.yaml# 118 99 119 100 - if: 120 - not: 121 - properties: 122 - compatible: 123 - contains: 124 - enum: 125 - - renesas,r7s72100-dmac 101 + properties: 102 + compatible: 103 + contains: 104 + enum: 105 + - renesas,r9a07g043-dmac 106 + - renesas,r9a07g044-dmac 107 + - renesas,r9a07g054-dmac 108 + - renesas,r9a08g045-dmac 126 109 then: 110 + properties: 111 + reg: 112 + minItems: 2 113 + clocks: 114 + minItems: 2 115 + resets: 116 + minItems: 2 117 + 118 + renesas,icu: false 119 + 127 120 required: 128 121 - clocks 129 122 - clock-names 130 123 - power-domains 131 124 - resets 132 125 - reset-names 126 + 127 + - if: 128 + properties: 129 + compatible: 130 + contains: 131 + const: renesas,r7s72100-dmac 132 + then: 133 + properties: 134 + reg: 135 + minItems: 2 136 + 137 + clocks: false 138 + clock-names: false 139 + power-domains: false 140 + resets: false 141 + reset-names: false 142 + renesas,icu: false 143 + 144 + - if: 145 + properties: 146 + compatible: 147 + contains: 148 + const: renesas,r9a09g057-dmac 149 + then: 150 + properties: 151 + reg: 152 + maxItems: 1 153 + clocks: 154 + maxItems: 1 155 + resets: 156 + maxItems: 1 157 + 158 + clock-names: false 159 + reset-names: false 160 + 161 + required: 162 + - clocks 163 + - power-domains 164 + - renesas,icu 165 + - resets 133 166 134 167 additionalProperties: false 135 168
+2 -2
Documentation/driver-api/dmaengine/provider.rst
··· 172 172 - It's usually used for copying pixel data between host memory and 173 173 memory-mapped GPU device memory, such as found on modern PCI video graphics 174 174 cards. The most immediate example is the OpenGL API function 175 - ``glReadPielx()``, which might require a verbatim copy of a huge framebuffer 176 - from local device memory onto host memory. 175 + ``glReadPixels()``, which might require a verbatim copy of a huge 176 + framebuffer from local device memory onto host memory. 177 177 178 178 - DMA_XOR 179 179
+1 -1
MAINTAINERS
··· 10781 10781 10782 10782 HISILICON DMA DRIVER 10783 10783 M: Zhou Wang <wangzhou1@hisilicon.com> 10784 - M: Jie Hai <haijie1@huawei.com> 10784 + M: Longfang Liu <liulongfang@huawei.com> 10785 10785 L: dmaengine@vger.kernel.org 10786 10786 S: Maintained 10787 10787 F: drivers/dma/hisi_dma.c
+165
arch/arm64/boot/dts/renesas/r9a09g057.dtsi
··· 280 280 resets = <&cpg 0x30>; 281 281 }; 282 282 283 + dmac0: dma-controller@11400000 { 284 + compatible = "renesas,r9a09g057-dmac"; 285 + reg = <0 0x11400000 0 0x10000>; 286 + interrupts = <GIC_SPI 499 IRQ_TYPE_EDGE_RISING>, 287 + <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>, 288 + <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>, 289 + <GIC_SPI 91 IRQ_TYPE_EDGE_RISING>, 290 + <GIC_SPI 92 IRQ_TYPE_EDGE_RISING>, 291 + <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>, 292 + <GIC_SPI 94 IRQ_TYPE_EDGE_RISING>, 293 + <GIC_SPI 95 IRQ_TYPE_EDGE_RISING>, 294 + <GIC_SPI 96 IRQ_TYPE_EDGE_RISING>, 295 + <GIC_SPI 97 IRQ_TYPE_EDGE_RISING>, 296 + <GIC_SPI 98 IRQ_TYPE_EDGE_RISING>, 297 + <GIC_SPI 99 IRQ_TYPE_EDGE_RISING>, 298 + <GIC_SPI 100 IRQ_TYPE_EDGE_RISING>, 299 + <GIC_SPI 101 IRQ_TYPE_EDGE_RISING>, 300 + <GIC_SPI 102 IRQ_TYPE_EDGE_RISING>, 301 + <GIC_SPI 103 IRQ_TYPE_EDGE_RISING>, 302 + <GIC_SPI 104 IRQ_TYPE_EDGE_RISING>; 303 + interrupt-names = "error", 304 + "ch0", "ch1", "ch2", "ch3", 305 + "ch4", "ch5", "ch6", "ch7", 306 + "ch8", "ch9", "ch10", "ch11", 307 + "ch12", "ch13", "ch14", "ch15"; 308 + clocks = <&cpg CPG_MOD 0x0>; 309 + power-domains = <&cpg>; 310 + resets = <&cpg 0x31>; 311 + #dma-cells = <1>; 312 + dma-channels = <16>; 313 + renesas,icu = <&icu 4>; 314 + }; 315 + 316 + dmac1: dma-controller@14830000 { 317 + compatible = "renesas,r9a09g057-dmac"; 318 + reg = <0 0x14830000 0 0x10000>; 319 + interrupts = <GIC_SPI 495 IRQ_TYPE_EDGE_RISING>, 320 + <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>, 321 + <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>, 322 + <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>, 323 + <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>, 324 + <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>, 325 + <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>, 326 + <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>, 327 + <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>, 328 + <GIC_SPI 33 IRQ_TYPE_EDGE_RISING>, 329 + <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>, 330 + <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>, 331 + <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>, 332 + <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>, 333 + <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>, 334 + <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>, 335 + <GIC_SPI 40 IRQ_TYPE_EDGE_RISING>; 336 + interrupt-names = "error", 337 + "ch0", "ch1", "ch2", "ch3", 338 + "ch4", "ch5", "ch6", "ch7", 339 + "ch8", "ch9", "ch10", "ch11", 340 + "ch12", "ch13", "ch14", "ch15"; 341 + clocks = <&cpg CPG_MOD 0x1>; 342 + power-domains = <&cpg>; 343 + resets = <&cpg 0x32>; 344 + #dma-cells = <1>; 345 + dma-channels = <16>; 346 + renesas,icu = <&icu 0>; 347 + }; 348 + 349 + dmac2: dma-controller@14840000 { 350 + compatible = "renesas,r9a09g057-dmac"; 351 + reg = <0 0x14840000 0 0x10000>; 352 + interrupts = <GIC_SPI 496 IRQ_TYPE_EDGE_RISING>, 353 + <GIC_SPI 41 IRQ_TYPE_EDGE_RISING>, 354 + <GIC_SPI 42 IRQ_TYPE_EDGE_RISING>, 355 + <GIC_SPI 43 IRQ_TYPE_EDGE_RISING>, 356 + <GIC_SPI 44 IRQ_TYPE_EDGE_RISING>, 357 + <GIC_SPI 45 IRQ_TYPE_EDGE_RISING>, 358 + <GIC_SPI 46 IRQ_TYPE_EDGE_RISING>, 359 + <GIC_SPI 47 IRQ_TYPE_EDGE_RISING>, 360 + <GIC_SPI 48 IRQ_TYPE_EDGE_RISING>, 361 + <GIC_SPI 49 IRQ_TYPE_EDGE_RISING>, 362 + <GIC_SPI 50 IRQ_TYPE_EDGE_RISING>, 363 + <GIC_SPI 51 IRQ_TYPE_EDGE_RISING>, 364 + <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>, 365 + <GIC_SPI 53 IRQ_TYPE_EDGE_RISING>, 366 + <GIC_SPI 54 IRQ_TYPE_EDGE_RISING>, 367 + <GIC_SPI 55 IRQ_TYPE_EDGE_RISING>, 368 + <GIC_SPI 56 IRQ_TYPE_EDGE_RISING>; 369 + interrupt-names = "error", 370 + "ch0", "ch1", "ch2", "ch3", 371 + "ch4", "ch5", "ch6", "ch7", 372 + "ch8", "ch9", "ch10", "ch11", 373 + "ch12", "ch13", "ch14", "ch15"; 374 + clocks = <&cpg CPG_MOD 0x2>; 375 + power-domains = <&cpg>; 376 + resets = <&cpg 0x33>; 377 + #dma-cells = <1>; 378 + dma-channels = <16>; 379 + renesas,icu = <&icu 1>; 380 + }; 381 + 382 + dmac3: dma-controller@12000000 { 383 + compatible = "renesas,r9a09g057-dmac"; 384 + reg = <0 0x12000000 0 0x10000>; 385 + interrupts = <GIC_SPI 497 IRQ_TYPE_EDGE_RISING>, 386 + <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>, 387 + <GIC_SPI 58 IRQ_TYPE_EDGE_RISING>, 388 + <GIC_SPI 59 IRQ_TYPE_EDGE_RISING>, 389 + <GIC_SPI 60 IRQ_TYPE_EDGE_RISING>, 390 + <GIC_SPI 61 IRQ_TYPE_EDGE_RISING>, 391 + <GIC_SPI 62 IRQ_TYPE_EDGE_RISING>, 392 + <GIC_SPI 63 IRQ_TYPE_EDGE_RISING>, 393 + <GIC_SPI 64 IRQ_TYPE_EDGE_RISING>, 394 + <GIC_SPI 65 IRQ_TYPE_EDGE_RISING>, 395 + <GIC_SPI 66 IRQ_TYPE_EDGE_RISING>, 396 + <GIC_SPI 67 IRQ_TYPE_EDGE_RISING>, 397 + <GIC_SPI 68 IRQ_TYPE_EDGE_RISING>, 398 + <GIC_SPI 69 IRQ_TYPE_EDGE_RISING>, 399 + <GIC_SPI 70 IRQ_TYPE_EDGE_RISING>, 400 + <GIC_SPI 71 IRQ_TYPE_EDGE_RISING>, 401 + <GIC_SPI 72 IRQ_TYPE_EDGE_RISING>; 402 + interrupt-names = "error", 403 + "ch0", "ch1", "ch2", "ch3", 404 + "ch4", "ch5", "ch6", "ch7", 405 + "ch8", "ch9", "ch10", "ch11", 406 + "ch12", "ch13", "ch14", "ch15"; 407 + clocks = <&cpg CPG_MOD 0x3>; 408 + power-domains = <&cpg>; 409 + resets = <&cpg 0x34>; 410 + #dma-cells = <1>; 411 + dma-channels = <16>; 412 + renesas,icu = <&icu 2>; 413 + }; 414 + 415 + dmac4: dma-controller@12010000 { 416 + compatible = "renesas,r9a09g057-dmac"; 417 + reg = <0 0x12010000 0 0x10000>; 418 + interrupts = <GIC_SPI 498 IRQ_TYPE_EDGE_RISING>, 419 + <GIC_SPI 73 IRQ_TYPE_EDGE_RISING>, 420 + <GIC_SPI 74 IRQ_TYPE_EDGE_RISING>, 421 + <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>, 422 + <GIC_SPI 76 IRQ_TYPE_EDGE_RISING>, 423 + <GIC_SPI 77 IRQ_TYPE_EDGE_RISING>, 424 + <GIC_SPI 78 IRQ_TYPE_EDGE_RISING>, 425 + <GIC_SPI 79 IRQ_TYPE_EDGE_RISING>, 426 + <GIC_SPI 80 IRQ_TYPE_EDGE_RISING>, 427 + <GIC_SPI 81 IRQ_TYPE_EDGE_RISING>, 428 + <GIC_SPI 82 IRQ_TYPE_EDGE_RISING>, 429 + <GIC_SPI 83 IRQ_TYPE_EDGE_RISING>, 430 + <GIC_SPI 84 IRQ_TYPE_EDGE_RISING>, 431 + <GIC_SPI 85 IRQ_TYPE_EDGE_RISING>, 432 + <GIC_SPI 86 IRQ_TYPE_EDGE_RISING>, 433 + <GIC_SPI 87 IRQ_TYPE_EDGE_RISING>, 434 + <GIC_SPI 88 IRQ_TYPE_EDGE_RISING>; 435 + interrupt-names = "error", 436 + "ch0", "ch1", "ch2", "ch3", 437 + "ch4", "ch5", "ch6", "ch7", 438 + "ch8", "ch9", "ch10", "ch11", 439 + "ch12", "ch13", "ch14", "ch15"; 440 + clocks = <&cpg CPG_MOD 0x4>; 441 + power-domains = <&cpg>; 442 + resets = <&cpg 0x35>; 443 + #dma-cells = <1>; 444 + dma-channels = <16>; 445 + renesas,icu = <&icu 3>; 446 + }; 447 + 283 448 ostm0: timer@11800000 { 284 449 compatible = "renesas,r9a09g057-ostm", "renesas,ostm"; 285 450 reg = <0x0 0x11800000 0x0 0x1000>;
+8
drivers/dma/Kconfig
··· 93 93 help 94 94 Enable support for Audio DMA Controller found on Apple Silicon SoCs. 95 95 96 + config ARM_DMA350 97 + tristate "Arm DMA-350 support" 98 + depends on ARM || ARM64 || COMPILE_TEST 99 + select DMA_ENGINE 100 + select DMA_VIRTUAL_CHANNELS 101 + help 102 + Enable support for the Arm DMA-350 controller. 103 + 96 104 config AT_HDMAC 97 105 tristate "Atmel AHB DMA support" 98 106 depends on ARCH_AT91
+1
drivers/dma/Makefile
··· 17 17 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o 18 18 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 19 19 obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o 20 + obj-$(CONFIG_ARM_DMA350) += arm-dma350.o 20 21 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 21 22 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o 22 23 obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
+4 -19
drivers/dma/amd/ptdma/ptdma-dmaengine.c
··· 566 566 struct ae4_device *ae4 = NULL; 567 567 struct pt_dma_chan *chan; 568 568 char *desc_cache_name; 569 - char *cmd_cache_name; 570 569 int ret, i; 571 570 572 571 if (pt->ver == AE4_DMA_VERSION) ··· 581 582 if (!pt->pt_dma_chan) 582 583 return -ENOMEM; 583 584 584 - cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL, 585 - "%s-dmaengine-cmd-cache", 586 - dev_name(pt->dev)); 587 - if (!cmd_cache_name) 588 - return -ENOMEM; 589 - 590 585 desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL, 591 586 "%s-dmaengine-desc-cache", 592 587 dev_name(pt->dev)); 593 - if (!desc_cache_name) { 594 - ret = -ENOMEM; 595 - goto err_cache; 596 - } 588 + if (!desc_cache_name) 589 + return -ENOMEM; 597 590 598 591 pt->dma_desc_cache = kmem_cache_create(desc_cache_name, 599 592 sizeof(struct pt_dma_desc), 0, 600 593 SLAB_HWCACHE_ALIGN, NULL); 601 - if (!pt->dma_desc_cache) { 602 - ret = -ENOMEM; 603 - goto err_cache; 604 - } 594 + if (!pt->dma_desc_cache) 595 + return -ENOMEM; 605 596 606 597 dma_dev->dev = pt->dev; 607 598 dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES; ··· 645 656 err_reg: 646 657 kmem_cache_destroy(pt->dma_desc_cache); 647 658 648 - err_cache: 649 - kmem_cache_destroy(pt->dma_cmd_cache); 650 - 651 659 return ret; 652 660 } 653 661 EXPORT_SYMBOL_GPL(pt_dmaengine_register); ··· 656 670 dma_async_device_unregister(dma_dev); 657 671 658 672 kmem_cache_destroy(pt->dma_desc_cache); 659 - kmem_cache_destroy(pt->dma_cmd_cache); 660 673 }
-1
drivers/dma/amd/ptdma/ptdma.h
··· 254 254 /* Support for the DMA Engine capabilities */ 255 255 struct dma_device dma_dev; 256 256 struct pt_dma_chan *pt_dma_chan; 257 - struct kmem_cache *dma_cmd_cache; 258 257 struct kmem_cache *dma_desc_cache; 259 258 260 259 wait_queue_head_t lsb_queue;
+660
drivers/dma/arm-dma350.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2024-2025 Arm Limited 3 + // Arm DMA-350 driver 4 + 5 + #include <linux/bitfield.h> 6 + #include <linux/dmaengine.h> 7 + #include <linux/dma-mapping.h> 8 + #include <linux/io.h> 9 + #include <linux/of.h> 10 + #include <linux/module.h> 11 + #include <linux/platform_device.h> 12 + 13 + #include "dmaengine.h" 14 + #include "virt-dma.h" 15 + 16 + #define DMAINFO 0x0f00 17 + 18 + #define DMA_BUILDCFG0 0xb0 19 + #define DMA_CFG_DATA_WIDTH GENMASK(18, 16) 20 + #define DMA_CFG_ADDR_WIDTH GENMASK(15, 10) 21 + #define DMA_CFG_NUM_CHANNELS GENMASK(9, 4) 22 + 23 + #define DMA_BUILDCFG1 0xb4 24 + #define DMA_CFG_NUM_TRIGGER_IN GENMASK(8, 0) 25 + 26 + #define IIDR 0xc8 27 + #define IIDR_PRODUCTID GENMASK(31, 20) 28 + #define IIDR_VARIANT GENMASK(19, 16) 29 + #define IIDR_REVISION GENMASK(15, 12) 30 + #define IIDR_IMPLEMENTER GENMASK(11, 0) 31 + 32 + #define PRODUCTID_DMA350 0x3a0 33 + #define IMPLEMENTER_ARM 0x43b 34 + 35 + #define DMACH(n) (0x1000 + 0x0100 * (n)) 36 + 37 + #define CH_CMD 0x00 38 + #define CH_CMD_RESUME BIT(5) 39 + #define CH_CMD_PAUSE BIT(4) 40 + #define CH_CMD_STOP BIT(3) 41 + #define CH_CMD_DISABLE BIT(2) 42 + #define CH_CMD_CLEAR BIT(1) 43 + #define CH_CMD_ENABLE BIT(0) 44 + 45 + #define CH_STATUS 0x04 46 + #define CH_STAT_RESUMEWAIT BIT(21) 47 + #define CH_STAT_PAUSED BIT(20) 48 + #define CH_STAT_STOPPED BIT(19) 49 + #define CH_STAT_DISABLED BIT(18) 50 + #define CH_STAT_ERR BIT(17) 51 + #define CH_STAT_DONE BIT(16) 52 + #define CH_STAT_INTR_ERR BIT(1) 53 + #define CH_STAT_INTR_DONE BIT(0) 54 + 55 + #define CH_INTREN 0x08 56 + #define CH_INTREN_ERR BIT(1) 57 + #define CH_INTREN_DONE BIT(0) 58 + 59 + #define CH_CTRL 0x0c 60 + #define CH_CTRL_USEDESTRIGIN BIT(26) 61 + #define CH_CTRL_USESRCTRIGIN BIT(26) 62 + #define CH_CTRL_DONETYPE GENMASK(23, 21) 63 + #define CH_CTRL_REGRELOADTYPE GENMASK(20, 18) 64 + #define CH_CTRL_XTYPE GENMASK(11, 9) 65 + #define CH_CTRL_TRANSIZE GENMASK(2, 0) 66 + 67 + #define CH_SRCADDR 0x10 68 + #define CH_SRCADDRHI 0x14 69 + #define CH_DESADDR 0x18 70 + #define CH_DESADDRHI 0x1c 71 + #define CH_XSIZE 0x20 72 + #define CH_XSIZEHI 0x24 73 + #define CH_SRCTRANSCFG 0x28 74 + #define CH_DESTRANSCFG 0x2c 75 + #define CH_CFG_MAXBURSTLEN GENMASK(19, 16) 76 + #define CH_CFG_PRIVATTR BIT(11) 77 + #define CH_CFG_SHAREATTR GENMASK(9, 8) 78 + #define CH_CFG_MEMATTR GENMASK(7, 0) 79 + 80 + #define TRANSCFG_DEVICE \ 81 + FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \ 82 + FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \ 83 + FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE) 84 + #define TRANSCFG_NC \ 85 + FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \ 86 + FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \ 87 + FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC) 88 + #define TRANSCFG_WB \ 89 + FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \ 90 + FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) | \ 91 + FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB) 92 + 93 + #define CH_XADDRINC 0x30 94 + #define CH_XY_DES GENMASK(31, 16) 95 + #define CH_XY_SRC GENMASK(15, 0) 96 + 97 + #define CH_FILLVAL 0x38 98 + #define CH_SRCTRIGINCFG 0x4c 99 + #define CH_DESTRIGINCFG 0x50 100 + #define CH_LINKATTR 0x70 101 + #define CH_LINK_SHAREATTR GENMASK(9, 8) 102 + #define CH_LINK_MEMATTR GENMASK(7, 0) 103 + 104 + #define CH_AUTOCFG 0x74 105 + #define CH_LINKADDR 0x78 106 + #define CH_LINKADDR_EN BIT(0) 107 + 108 + #define CH_LINKADDRHI 0x7c 109 + #define CH_ERRINFO 0x90 110 + #define CH_ERRINFO_AXIRDPOISERR BIT(18) 111 + #define CH_ERRINFO_AXIWRRESPERR BIT(17) 112 + #define CH_ERRINFO_AXIRDRESPERR BIT(16) 113 + 114 + #define CH_BUILDCFG0 0xf8 115 + #define CH_CFG_INC_WIDTH GENMASK(29, 26) 116 + #define CH_CFG_DATA_WIDTH GENMASK(24, 22) 117 + #define CH_CFG_DATA_BUF_SIZE GENMASK(7, 0) 118 + 119 + #define CH_BUILDCFG1 0xfc 120 + #define CH_CFG_HAS_CMDLINK BIT(8) 121 + #define CH_CFG_HAS_TRIGSEL BIT(7) 122 + #define CH_CFG_HAS_TRIGIN BIT(5) 123 + #define CH_CFG_HAS_WRAP BIT(1) 124 + 125 + 126 + #define LINK_REGCLEAR BIT(0) 127 + #define LINK_INTREN BIT(2) 128 + #define LINK_CTRL BIT(3) 129 + #define LINK_SRCADDR BIT(4) 130 + #define LINK_SRCADDRHI BIT(5) 131 + #define LINK_DESADDR BIT(6) 132 + #define LINK_DESADDRHI BIT(7) 133 + #define LINK_XSIZE BIT(8) 134 + #define LINK_XSIZEHI BIT(9) 135 + #define LINK_SRCTRANSCFG BIT(10) 136 + #define LINK_DESTRANSCFG BIT(11) 137 + #define LINK_XADDRINC BIT(12) 138 + #define LINK_FILLVAL BIT(14) 139 + #define LINK_SRCTRIGINCFG BIT(19) 140 + #define LINK_DESTRIGINCFG BIT(20) 141 + #define LINK_AUTOCFG BIT(29) 142 + #define LINK_LINKADDR BIT(30) 143 + #define LINK_LINKADDRHI BIT(31) 144 + 145 + 146 + enum ch_ctrl_donetype { 147 + CH_CTRL_DONETYPE_NONE = 0, 148 + CH_CTRL_DONETYPE_CMD = 1, 149 + CH_CTRL_DONETYPE_CYCLE = 3 150 + }; 151 + 152 + enum ch_ctrl_xtype { 153 + CH_CTRL_XTYPE_DISABLE = 0, 154 + CH_CTRL_XTYPE_CONTINUE = 1, 155 + CH_CTRL_XTYPE_WRAP = 2, 156 + CH_CTRL_XTYPE_FILL = 3 157 + }; 158 + 159 + enum ch_cfg_shareattr { 160 + SHAREATTR_NSH = 0, 161 + SHAREATTR_OSH = 2, 162 + SHAREATTR_ISH = 3 163 + }; 164 + 165 + enum ch_cfg_memattr { 166 + MEMATTR_DEVICE = 0x00, 167 + MEMATTR_NC = 0x44, 168 + MEMATTR_WB = 0xff 169 + }; 170 + 171 + struct d350_desc { 172 + struct virt_dma_desc vd; 173 + u32 command[16]; 174 + u16 xsize; 175 + u16 xsizehi; 176 + u8 tsz; 177 + }; 178 + 179 + struct d350_chan { 180 + struct virt_dma_chan vc; 181 + struct d350_desc *desc; 182 + void __iomem *base; 183 + int irq; 184 + enum dma_status status; 185 + dma_cookie_t cookie; 186 + u32 residue; 187 + u8 tsz; 188 + bool has_trig; 189 + bool has_wrap; 190 + bool coherent; 191 + }; 192 + 193 + struct d350 { 194 + struct dma_device dma; 195 + int nchan; 196 + int nreq; 197 + struct d350_chan channels[] __counted_by(nchan); 198 + }; 199 + 200 + static inline struct d350_chan *to_d350_chan(struct dma_chan *chan) 201 + { 202 + return container_of(chan, struct d350_chan, vc.chan); 203 + } 204 + 205 + static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd) 206 + { 207 + return container_of(vd, struct d350_desc, vd); 208 + } 209 + 210 + static void d350_desc_free(struct virt_dma_desc *vd) 211 + { 212 + kfree(to_d350_desc(vd)); 213 + } 214 + 215 + static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan, 216 + dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) 217 + { 218 + struct d350_chan *dch = to_d350_chan(chan); 219 + struct d350_desc *desc; 220 + u32 *cmd; 221 + 222 + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 223 + if (!desc) 224 + return NULL; 225 + 226 + desc->tsz = __ffs(len | dest | src | (1 << dch->tsz)); 227 + desc->xsize = lower_16_bits(len >> desc->tsz); 228 + desc->xsizehi = upper_16_bits(len >> desc->tsz); 229 + 230 + cmd = desc->command; 231 + cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR | 232 + LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG | 233 + LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR; 234 + 235 + cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) | 236 + FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) | 237 + FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD); 238 + 239 + cmd[2] = lower_32_bits(src); 240 + cmd[3] = upper_32_bits(src); 241 + cmd[4] = lower_32_bits(dest); 242 + cmd[5] = upper_32_bits(dest); 243 + cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize); 244 + cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi); 245 + cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC; 246 + cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC; 247 + cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1); 248 + cmd[11] = 0; 249 + 250 + return vchan_tx_prep(&dch->vc, &desc->vd, flags); 251 + } 252 + 253 + static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan, 254 + dma_addr_t dest, int value, size_t len, unsigned long flags) 255 + { 256 + struct d350_chan *dch = to_d350_chan(chan); 257 + struct d350_desc *desc; 258 + u32 *cmd; 259 + 260 + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 261 + if (!desc) 262 + return NULL; 263 + 264 + desc->tsz = __ffs(len | dest | (1 << dch->tsz)); 265 + desc->xsize = lower_16_bits(len >> desc->tsz); 266 + desc->xsizehi = upper_16_bits(len >> desc->tsz); 267 + 268 + cmd = desc->command; 269 + cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI | 270 + LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG | 271 + LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR; 272 + 273 + cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) | 274 + FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) | 275 + FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD); 276 + 277 + cmd[2] = lower_32_bits(dest); 278 + cmd[3] = upper_32_bits(dest); 279 + cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize); 280 + cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi); 281 + cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC; 282 + cmd[7] = FIELD_PREP(CH_XY_DES, 1); 283 + cmd[8] = (u8)value * 0x01010101; 284 + cmd[9] = 0; 285 + 286 + return vchan_tx_prep(&dch->vc, &desc->vd, flags); 287 + } 288 + 289 + static int d350_pause(struct dma_chan *chan) 290 + { 291 + struct d350_chan *dch = to_d350_chan(chan); 292 + unsigned long flags; 293 + 294 + spin_lock_irqsave(&dch->vc.lock, flags); 295 + if (dch->status == DMA_IN_PROGRESS) { 296 + writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD); 297 + dch->status = DMA_PAUSED; 298 + } 299 + spin_unlock_irqrestore(&dch->vc.lock, flags); 300 + 301 + return 0; 302 + } 303 + 304 + static int d350_resume(struct dma_chan *chan) 305 + { 306 + struct d350_chan *dch = to_d350_chan(chan); 307 + unsigned long flags; 308 + 309 + spin_lock_irqsave(&dch->vc.lock, flags); 310 + if (dch->status == DMA_PAUSED) { 311 + writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD); 312 + dch->status = DMA_IN_PROGRESS; 313 + } 314 + spin_unlock_irqrestore(&dch->vc.lock, flags); 315 + 316 + return 0; 317 + } 318 + 319 + static u32 d350_get_residue(struct d350_chan *dch) 320 + { 321 + u32 res, xsize, xsizehi, hi_new; 322 + int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */ 323 + 324 + hi_new = readl_relaxed(dch->base + CH_XSIZEHI); 325 + do { 326 + xsizehi = hi_new; 327 + xsize = readl_relaxed(dch->base + CH_XSIZE); 328 + hi_new = readl_relaxed(dch->base + CH_XSIZEHI); 329 + } while (xsizehi != hi_new && --retries); 330 + 331 + res = FIELD_GET(CH_XY_DES, xsize); 332 + res |= FIELD_GET(CH_XY_DES, xsizehi) << 16; 333 + 334 + return res << dch->desc->tsz; 335 + } 336 + 337 + static int d350_terminate_all(struct dma_chan *chan) 338 + { 339 + struct d350_chan *dch = to_d350_chan(chan); 340 + unsigned long flags; 341 + LIST_HEAD(list); 342 + 343 + spin_lock_irqsave(&dch->vc.lock, flags); 344 + writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD); 345 + if (dch->desc) { 346 + if (dch->status != DMA_ERROR) 347 + vchan_terminate_vdesc(&dch->desc->vd); 348 + dch->desc = NULL; 349 + dch->status = DMA_COMPLETE; 350 + } 351 + vchan_get_all_descriptors(&dch->vc, &list); 352 + list_splice_tail(&list, &dch->vc.desc_terminated); 353 + spin_unlock_irqrestore(&dch->vc.lock, flags); 354 + 355 + return 0; 356 + } 357 + 358 + static void d350_synchronize(struct dma_chan *chan) 359 + { 360 + struct d350_chan *dch = to_d350_chan(chan); 361 + 362 + vchan_synchronize(&dch->vc); 363 + } 364 + 365 + static u32 d350_desc_bytes(struct d350_desc *desc) 366 + { 367 + return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz; 368 + } 369 + 370 + static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 371 + struct dma_tx_state *state) 372 + { 373 + struct d350_chan *dch = to_d350_chan(chan); 374 + struct virt_dma_desc *vd; 375 + enum dma_status status; 376 + unsigned long flags; 377 + u32 residue = 0; 378 + 379 + status = dma_cookie_status(chan, cookie, state); 380 + 381 + spin_lock_irqsave(&dch->vc.lock, flags); 382 + if (cookie == dch->cookie) { 383 + status = dch->status; 384 + if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) 385 + dch->residue = d350_get_residue(dch); 386 + residue = dch->residue; 387 + } else if ((vd = vchan_find_desc(&dch->vc, cookie))) { 388 + residue = d350_desc_bytes(to_d350_desc(vd)); 389 + } else if (status == DMA_IN_PROGRESS) { 390 + /* Somebody else terminated it? */ 391 + status = DMA_ERROR; 392 + } 393 + spin_unlock_irqrestore(&dch->vc.lock, flags); 394 + 395 + dma_set_residue(state, residue); 396 + return status; 397 + } 398 + 399 + static void d350_start_next(struct d350_chan *dch) 400 + { 401 + u32 hdr, *reg; 402 + 403 + dch->desc = to_d350_desc(vchan_next_desc(&dch->vc)); 404 + if (!dch->desc) 405 + return; 406 + 407 + list_del(&dch->desc->vd.node); 408 + dch->status = DMA_IN_PROGRESS; 409 + dch->cookie = dch->desc->vd.tx.cookie; 410 + dch->residue = d350_desc_bytes(dch->desc); 411 + 412 + hdr = dch->desc->command[0]; 413 + reg = &dch->desc->command[1]; 414 + 415 + if (hdr & LINK_INTREN) 416 + writel_relaxed(*reg++, dch->base + CH_INTREN); 417 + if (hdr & LINK_CTRL) 418 + writel_relaxed(*reg++, dch->base + CH_CTRL); 419 + if (hdr & LINK_SRCADDR) 420 + writel_relaxed(*reg++, dch->base + CH_SRCADDR); 421 + if (hdr & LINK_SRCADDRHI) 422 + writel_relaxed(*reg++, dch->base + CH_SRCADDRHI); 423 + if (hdr & LINK_DESADDR) 424 + writel_relaxed(*reg++, dch->base + CH_DESADDR); 425 + if (hdr & LINK_DESADDRHI) 426 + writel_relaxed(*reg++, dch->base + CH_DESADDRHI); 427 + if (hdr & LINK_XSIZE) 428 + writel_relaxed(*reg++, dch->base + CH_XSIZE); 429 + if (hdr & LINK_XSIZEHI) 430 + writel_relaxed(*reg++, dch->base + CH_XSIZEHI); 431 + if (hdr & LINK_SRCTRANSCFG) 432 + writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG); 433 + if (hdr & LINK_DESTRANSCFG) 434 + writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG); 435 + if (hdr & LINK_XADDRINC) 436 + writel_relaxed(*reg++, dch->base + CH_XADDRINC); 437 + if (hdr & LINK_FILLVAL) 438 + writel_relaxed(*reg++, dch->base + CH_FILLVAL); 439 + if (hdr & LINK_SRCTRIGINCFG) 440 + writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG); 441 + if (hdr & LINK_DESTRIGINCFG) 442 + writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG); 443 + if (hdr & LINK_AUTOCFG) 444 + writel_relaxed(*reg++, dch->base + CH_AUTOCFG); 445 + if (hdr & LINK_LINKADDR) 446 + writel_relaxed(*reg++, dch->base + CH_LINKADDR); 447 + if (hdr & LINK_LINKADDRHI) 448 + writel_relaxed(*reg++, dch->base + CH_LINKADDRHI); 449 + 450 + writel(CH_CMD_ENABLE, dch->base + CH_CMD); 451 + } 452 + 453 + static void d350_issue_pending(struct dma_chan *chan) 454 + { 455 + struct d350_chan *dch = to_d350_chan(chan); 456 + unsigned long flags; 457 + 458 + spin_lock_irqsave(&dch->vc.lock, flags); 459 + if (vchan_issue_pending(&dch->vc) && !dch->desc) 460 + d350_start_next(dch); 461 + spin_unlock_irqrestore(&dch->vc.lock, flags); 462 + } 463 + 464 + static irqreturn_t d350_irq(int irq, void *data) 465 + { 466 + struct d350_chan *dch = data; 467 + struct device *dev = dch->vc.chan.device->dev; 468 + struct virt_dma_desc *vd = &dch->desc->vd; 469 + u32 ch_status; 470 + 471 + ch_status = readl(dch->base + CH_STATUS); 472 + if (!ch_status) 473 + return IRQ_NONE; 474 + 475 + if (ch_status & CH_STAT_INTR_ERR) { 476 + u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO); 477 + 478 + if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR)) 479 + vd->tx_result.result = DMA_TRANS_READ_FAILED; 480 + else if (errinfo & CH_ERRINFO_AXIWRRESPERR) 481 + vd->tx_result.result = DMA_TRANS_WRITE_FAILED; 482 + else 483 + vd->tx_result.result = DMA_TRANS_ABORTED; 484 + 485 + vd->tx_result.residue = d350_get_residue(dch); 486 + } else if (!(ch_status & CH_STAT_INTR_DONE)) { 487 + dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status); 488 + } 489 + writel_relaxed(ch_status, dch->base + CH_STATUS); 490 + 491 + spin_lock(&dch->vc.lock); 492 + vchan_cookie_complete(vd); 493 + if (ch_status & CH_STAT_INTR_DONE) { 494 + dch->status = DMA_COMPLETE; 495 + dch->residue = 0; 496 + d350_start_next(dch); 497 + } else { 498 + dch->status = DMA_ERROR; 499 + dch->residue = vd->tx_result.residue; 500 + } 501 + spin_unlock(&dch->vc.lock); 502 + 503 + return IRQ_HANDLED; 504 + } 505 + 506 + static int d350_alloc_chan_resources(struct dma_chan *chan) 507 + { 508 + struct d350_chan *dch = to_d350_chan(chan); 509 + int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED, 510 + dev_name(&dch->vc.chan.dev->device), dch); 511 + if (!ret) 512 + writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN); 513 + 514 + return ret; 515 + } 516 + 517 + static void d350_free_chan_resources(struct dma_chan *chan) 518 + { 519 + struct d350_chan *dch = to_d350_chan(chan); 520 + 521 + writel_relaxed(0, dch->base + CH_INTREN); 522 + free_irq(dch->irq, dch); 523 + vchan_free_chan_resources(&dch->vc); 524 + } 525 + 526 + static int d350_probe(struct platform_device *pdev) 527 + { 528 + struct device *dev = &pdev->dev; 529 + struct d350 *dmac; 530 + void __iomem *base; 531 + u32 reg; 532 + int ret, nchan, dw, aw, r, p; 533 + bool coherent, memset; 534 + 535 + base = devm_platform_ioremap_resource(pdev, 0); 536 + if (IS_ERR(base)) 537 + return PTR_ERR(base); 538 + 539 + reg = readl_relaxed(base + DMAINFO + IIDR); 540 + r = FIELD_GET(IIDR_VARIANT, reg); 541 + p = FIELD_GET(IIDR_REVISION, reg); 542 + if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM || 543 + FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350) 544 + return dev_err_probe(dev, -ENODEV, "Not a DMA-350!"); 545 + 546 + reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0); 547 + nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1; 548 + dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg); 549 + aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1; 550 + 551 + dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw)); 552 + coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT; 553 + 554 + dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL); 555 + if (!dmac) 556 + return -ENOMEM; 557 + 558 + dmac->nchan = nchan; 559 + 560 + reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1); 561 + dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg); 562 + 563 + dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq); 564 + 565 + dmac->dma.dev = dev; 566 + for (int i = min(dw, 16); i > 0; i /= 2) { 567 + dmac->dma.src_addr_widths |= BIT(i); 568 + dmac->dma.dst_addr_widths |= BIT(i); 569 + } 570 + dmac->dma.directions = BIT(DMA_MEM_TO_MEM); 571 + dmac->dma.descriptor_reuse = true; 572 + dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 573 + dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources; 574 + dmac->dma.device_free_chan_resources = d350_free_chan_resources; 575 + dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask); 576 + dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy; 577 + dmac->dma.device_pause = d350_pause; 578 + dmac->dma.device_resume = d350_resume; 579 + dmac->dma.device_terminate_all = d350_terminate_all; 580 + dmac->dma.device_synchronize = d350_synchronize; 581 + dmac->dma.device_tx_status = d350_tx_status; 582 + dmac->dma.device_issue_pending = d350_issue_pending; 583 + INIT_LIST_HEAD(&dmac->dma.channels); 584 + 585 + /* Would be nice to have per-channel caps for this... */ 586 + memset = true; 587 + for (int i = 0; i < nchan; i++) { 588 + struct d350_chan *dch = &dmac->channels[i]; 589 + 590 + dch->base = base + DMACH(i); 591 + writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD); 592 + 593 + reg = readl_relaxed(dch->base + CH_BUILDCFG1); 594 + if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) { 595 + dev_warn(dev, "No command link support on channel %d\n", i); 596 + continue; 597 + } 598 + dch->irq = platform_get_irq(pdev, i); 599 + if (dch->irq < 0) 600 + return dev_err_probe(dev, dch->irq, 601 + "Failed to get IRQ for channel %d\n", i); 602 + 603 + dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg); 604 + dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) & 605 + FIELD_GET(CH_CFG_HAS_TRIGSEL, reg); 606 + 607 + /* Fill is a special case of Wrap */ 608 + memset &= dch->has_wrap; 609 + 610 + reg = readl_relaxed(dch->base + CH_BUILDCFG0); 611 + dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg); 612 + 613 + reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH); 614 + reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC); 615 + writel_relaxed(reg, dch->base + CH_LINKATTR); 616 + 617 + dch->vc.desc_free = d350_desc_free; 618 + vchan_init(&dch->vc, &dmac->dma); 619 + } 620 + 621 + if (memset) { 622 + dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask); 623 + dmac->dma.device_prep_dma_memset = d350_prep_memset; 624 + } 625 + 626 + platform_set_drvdata(pdev, dmac); 627 + 628 + ret = dma_async_device_register(&dmac->dma); 629 + if (ret) 630 + return dev_err_probe(dev, ret, "Failed to register DMA device\n"); 631 + 632 + return 0; 633 + } 634 + 635 + static void d350_remove(struct platform_device *pdev) 636 + { 637 + struct d350 *dmac = platform_get_drvdata(pdev); 638 + 639 + dma_async_device_unregister(&dmac->dma); 640 + } 641 + 642 + static const struct of_device_id d350_of_match[] __maybe_unused = { 643 + { .compatible = "arm,dma-350" }, 644 + {} 645 + }; 646 + MODULE_DEVICE_TABLE(of, d350_of_match); 647 + 648 + static struct platform_driver d350_driver = { 649 + .driver = { 650 + .name = "arm-dma350", 651 + .of_match_table = of_match_ptr(d350_of_match), 652 + }, 653 + .probe = d350_probe, 654 + .remove = d350_remove, 655 + }; 656 + module_platform_driver(d350_driver); 657 + 658 + MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>"); 659 + MODULE_DESCRIPTION("Arm DMA-350 driver"); 660 + MODULE_LICENSE("GPL v2");
+2 -4
drivers/dma/at_xdmac.c
··· 2033 2033 * at_xdmac_start_xfer() for this descriptor. Now it's time 2034 2034 * to release it. 2035 2035 */ 2036 - if (desc->active_xfer) { 2037 - pm_runtime_put_autosuspend(atxdmac->dev); 2038 - pm_runtime_mark_last_busy(atxdmac->dev); 2039 - } 2036 + if (desc->active_xfer) 2037 + pm_runtime_put_noidle(atxdmac->dev); 2040 2038 } 2041 2039 2042 2040 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+4 -1
drivers/dma/dw-edma/dw-edma-pcie.c
··· 136 136 map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val); 137 137 if (map != EDMA_MF_EDMA_LEGACY && 138 138 map != EDMA_MF_EDMA_UNROLL && 139 - map != EDMA_MF_HDMA_COMPAT) 139 + map != EDMA_MF_HDMA_COMPAT && 140 + map != EDMA_MF_HDMA_NATIVE) 140 141 return; 141 142 142 143 pdata->mf = map; ··· 292 291 pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf); 293 292 else if (chip->mf == EDMA_MF_HDMA_COMPAT) 294 293 pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf); 294 + else if (chip->mf == EDMA_MF_HDMA_NATIVE) 295 + pci_dbg(pdev, "Version:\tHDMA Native (0x%x)\n", chip->mf); 295 296 else 296 297 pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf); 297 298
+22 -8
drivers/dma/fsl-edma-common.c
··· 95 95 } 96 96 97 97 val = edma_readl_chreg(fsl_chan, ch_csr); 98 - val |= EDMA_V3_CH_CSR_ERQ; 98 + val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI; 99 99 edma_writel_chreg(fsl_chan, val, ch_csr); 100 100 } 101 101 ··· 821 821 int fsl_edma_alloc_chan_resources(struct dma_chan *chan) 822 822 { 823 823 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 824 - int ret; 824 + int ret = 0; 825 825 826 826 if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 827 827 clk_prepare_enable(fsl_chan->clk); ··· 831 831 sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), 832 832 32, 0); 833 833 834 - if (fsl_chan->txirq) { 834 + if (fsl_chan->txirq) 835 835 ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED, 836 836 fsl_chan->chan_name, fsl_chan); 837 837 838 - if (ret) { 839 - dma_pool_destroy(fsl_chan->tcd_pool); 840 - return ret; 841 - } 842 - } 838 + if (ret) 839 + goto err_txirq; 840 + 841 + if (fsl_chan->errirq > 0) 842 + ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED, 843 + fsl_chan->errirq_name, fsl_chan); 844 + 845 + if (ret) 846 + goto err_errirq; 843 847 844 848 return 0; 849 + 850 + err_errirq: 851 + if (fsl_chan->txirq) 852 + free_irq(fsl_chan->txirq, fsl_chan); 853 + err_txirq: 854 + dma_pool_destroy(fsl_chan->tcd_pool); 855 + 856 + return ret; 845 857 } 846 858 847 859 void fsl_edma_free_chan_resources(struct dma_chan *chan) ··· 874 862 875 863 if (fsl_chan->txirq) 876 864 free_irq(fsl_chan->txirq, fsl_chan); 865 + if (fsl_chan->errirq) 866 + free_irq(fsl_chan->errirq, fsl_chan); 877 867 878 868 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 879 869 dma_pool_destroy(fsl_chan->tcd_pool);
+18
drivers/dma/fsl-edma-common.h
··· 71 71 #define EDMA_V3_CH_ES_ERR BIT(31) 72 72 #define EDMA_V3_MP_ES_VLD BIT(31) 73 73 74 + #define EDMA_V3_CH_ERR_DBE BIT(0) 75 + #define EDMA_V3_CH_ERR_SBE BIT(1) 76 + #define EDMA_V3_CH_ERR_SGE BIT(2) 77 + #define EDMA_V3_CH_ERR_NCE BIT(3) 78 + #define EDMA_V3_CH_ERR_DOE BIT(4) 79 + #define EDMA_V3_CH_ERR_DAE BIT(5) 80 + #define EDMA_V3_CH_ERR_SOE BIT(6) 81 + #define EDMA_V3_CH_ERR_SAE BIT(7) 82 + #define EDMA_V3_CH_ERR_ECX BIT(8) 83 + #define EDMA_V3_CH_ERR_UCE BIT(9) 84 + #define EDMA_V3_CH_ERR BIT(31) 85 + 74 86 enum fsl_edma_pm_state { 75 87 RUNNING = 0, 76 88 SUSPENDED, ··· 174 162 u32 dma_dev_size; 175 163 enum dma_data_direction dma_dir; 176 164 char chan_name[32]; 165 + char errirq_name[36]; 177 166 void __iomem *tcd; 178 167 void __iomem *mux_addr; 179 168 u32 real_count; ··· 187 174 int priority; 188 175 int hw_chanid; 189 176 int txirq; 177 + int errirq; 190 178 irqreturn_t (*irq_handler)(int irq, void *dev_id); 179 + irqreturn_t (*errirq_handler)(int irq, void *dev_id); 191 180 bool is_rxchan; 192 181 bool is_remote; 193 182 bool is_multi_fifo; ··· 223 208 /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */ 224 209 #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14) 225 210 #define FSL_EDMA_DRV_TCD64 BIT(15) 211 + /* All channel ERR IRQ share one IRQ line */ 212 + #define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16) 213 + 226 214 227 215 #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \ 228 216 FSL_EDMA_DRV_BUS_8BYTE | \
+109 -5
drivers/dma/fsl-edma-main.c
··· 50 50 return IRQ_HANDLED; 51 51 } 52 52 53 + static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan) 54 + { 55 + unsigned int ch_err; 56 + u32 val; 57 + 58 + scoped_guard(spinlock, &fsl_chan->vchan.lock) { 59 + ch_err = edma_readl_chreg(fsl_chan, ch_es); 60 + if (!(ch_err & EDMA_V3_CH_ERR)) 61 + return; 62 + 63 + edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es); 64 + val = edma_readl_chreg(fsl_chan, ch_csr); 65 + val &= ~EDMA_V3_CH_CSR_ERQ; 66 + edma_writel_chreg(fsl_chan, val, ch_csr); 67 + } 68 + 69 + /* Ignore this interrupt since channel has been disabled already */ 70 + if (!fsl_chan->edesc) 71 + return; 72 + 73 + if (ch_err & EDMA_V3_CH_ERR_DBE) 74 + dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n"); 75 + 76 + if (ch_err & EDMA_V3_CH_ERR_SBE) 77 + dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n"); 78 + 79 + if (ch_err & EDMA_V3_CH_ERR_SGE) 80 + dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n"); 81 + 82 + if (ch_err & EDMA_V3_CH_ERR_NCE) 83 + dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n"); 84 + 85 + if (ch_err & EDMA_V3_CH_ERR_DOE) 86 + dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n"); 87 + 88 + if (ch_err & EDMA_V3_CH_ERR_DAE) 89 + dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n"); 90 + 91 + if (ch_err & EDMA_V3_CH_ERR_SOE) 92 + dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n"); 93 + 94 + if (ch_err & EDMA_V3_CH_ERR_SAE) 95 + dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n"); 96 + 97 + if (ch_err & EDMA_V3_CH_ERR_ECX) 98 + dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n"); 99 + 100 + if (ch_err & EDMA_V3_CH_ERR_UCE) 101 + dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n"); 102 + 103 + fsl_chan->status = DMA_ERROR; 104 + } 105 + 106 + static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id) 107 + { 108 + struct fsl_edma_chan *fsl_chan = dev_id; 109 + 110 + fsl_edma3_err_check(fsl_chan); 111 + 112 + return IRQ_HANDLED; 113 + } 114 + 115 + static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id) 116 + { 117 + struct fsl_edma_engine *fsl_edma = dev_id; 118 + unsigned int ch; 119 + 120 + for (ch = 0; ch < fsl_edma->n_chans; ch++) { 121 + if (fsl_edma->chan_masked & BIT(ch)) 122 + continue; 123 + 124 + fsl_edma3_err_check(&fsl_edma->chans[ch]); 125 + } 126 + 127 + return IRQ_HANDLED; 128 + } 129 + 53 130 static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id) 54 131 { 55 132 struct fsl_edma_chan *fsl_chan = dev_id; ··· 386 309 387 310 static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) 388 311 { 389 - int i; 312 + char *errirq_name; 313 + int i, ret; 390 314 391 315 for (i = 0; i < fsl_edma->n_chans; i++) { 392 316 ··· 402 324 return -EINVAL; 403 325 404 326 fsl_chan->irq_handler = fsl_edma3_tx_handler; 327 + 328 + if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) { 329 + fsl_chan->errirq = fsl_chan->txirq; 330 + fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan; 331 + } 332 + } 333 + 334 + /* All channel err use one irq number */ 335 + if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) { 336 + /* last one is error irq */ 337 + fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans); 338 + if (fsl_edma->errirq < 0) 339 + return 0; /* dts miss err irq, treat as no err irq case */ 340 + 341 + errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err", 342 + dev_name(&pdev->dev)); 343 + 344 + ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared, 345 + 0, errirq_name, fsl_edma); 346 + if (ret) 347 + return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n"); 405 348 } 406 349 407 350 return 0; ··· 563 464 }; 564 465 565 466 static struct fsl_edma_drvdata imx8qm_data = { 566 - .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE, 467 + .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE 468 + | FSL_EDMA_DRV_ERRIRQ_SHARE, 567 469 .chreg_space_sz = 0x10000, 568 470 .chreg_off = 0x10000, 569 471 .setup_irq = fsl_edma3_irq_init, ··· 581 481 }; 582 482 583 483 static struct fsl_edma_drvdata imx93_data3 = { 584 - .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3, 484 + .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE, 585 485 .chreg_space_sz = 0x10000, 586 486 .chreg_off = 0x10000, 587 487 .setup_irq = fsl_edma3_irq_init, 588 488 }; 589 489 590 490 static struct fsl_edma_drvdata imx93_data4 = { 591 - .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4, 491 + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 492 + | FSL_EDMA_DRV_ERRIRQ_SHARE, 592 493 .chreg_space_sz = 0x8000, 593 494 .chreg_off = 0x10000, 594 495 .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), ··· 599 498 600 499 static struct fsl_edma_drvdata imx95_data5 = { 601 500 .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 | 602 - FSL_EDMA_DRV_TCD64, 501 + FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE, 603 502 .chreg_space_sz = 0x8000, 604 503 .chreg_off = 0x10000, 605 504 .mux_off = 0x200, ··· 800 699 801 700 snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d", 802 701 dev_name(&pdev->dev), i); 702 + 703 + snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name), 704 + "%s-CH%02d-err", dev_name(&pdev->dev), i); 803 705 804 706 fsl_chan->edma = fsl_edma; 805 707 fsl_chan->pm_state = RUNNING;
+16 -4
drivers/dma/fsldma.c
··· 1226 1226 1227 1227 fdev->dev = &op->dev; 1228 1228 INIT_LIST_HEAD(&fdev->common.channels); 1229 + /* The DMA address bits supported for this device. */ 1230 + fdev->addr_bits = (long)device_get_match_data(fdev->dev); 1229 1231 1230 1232 /* ioremap the registers for use */ 1231 1233 fdev->regs = of_iomap(op->dev.of_node, 0); ··· 1256 1254 fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1257 1255 fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1258 1256 1259 - dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1257 + dma_set_mask(&(op->dev), DMA_BIT_MASK(fdev->addr_bits)); 1260 1258 1261 1259 platform_set_drvdata(op, fdev); 1262 1260 ··· 1389 1387 }; 1390 1388 #endif 1391 1389 1390 + /* The .data field is used for dma-bit-mask. */ 1392 1391 static const struct of_device_id fsldma_of_ids[] = { 1393 - { .compatible = "fsl,elo3-dma", }, 1394 - { .compatible = "fsl,eloplus-dma", }, 1395 - { .compatible = "fsl,elo-dma", }, 1392 + { 1393 + .compatible = "fsl,elo3-dma", 1394 + .data = (void *)40, 1395 + }, 1396 + { 1397 + .compatible = "fsl,eloplus-dma", 1398 + .data = (void *)36, 1399 + }, 1400 + { 1401 + .compatible = "fsl,elo-dma", 1402 + .data = (void *)32, 1403 + }, 1396 1404 {} 1397 1405 }; 1398 1406 MODULE_DEVICE_TABLE(of, fsldma_of_ids);
+1
drivers/dma/fsldma.h
··· 124 124 struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; 125 125 u32 feature; /* The same as DMA channels */ 126 126 int irq; /* Channel IRQ */ 127 + int addr_bits; /* DMA addressing bits supported */ 127 128 }; 128 129 129 130 /* Define macros for fsldma_chan->feature property */
+7 -3
drivers/dma/idxd/cdev.c
··· 349 349 set_bit(h, evl->bmap); 350 350 h = (h + 1) % size; 351 351 } 352 - drain_workqueue(wq->wq); 352 + if (wq->wq) 353 + drain_workqueue(wq->wq); 354 + 353 355 mutex_unlock(&evl->lock); 354 356 } 355 357 ··· 444 442 * DSA devices are capable of indirect ("batch") command submission. 445 443 * On devices where direct user submissions are not safe, we cannot 446 444 * allow this since there is no good way for us to verify these 447 - * indirect commands. 445 + * indirect commands. Narrow the restriction of operations with the 446 + * BATCH opcode to only DSA version 1 devices. 448 447 */ 449 448 if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH && 450 - !wq->idxd->user_submission_safe) 449 + wq->idxd->hw.version == DEVICE_VERSION_1 && 450 + !wq->idxd->user_submission_safe) 451 451 return -EINVAL; 452 452 /* 453 453 * As per the programming specification, the completion address must be
-2
drivers/dma/idxd/idxd.h
··· 19 19 20 20 #define IDXD_DRIVER_VERSION "1.00" 21 21 22 - extern struct kmem_cache *idxd_desc_pool; 23 22 extern bool tc_override; 24 23 25 24 struct idxd_wq; ··· 170 171 171 172 #define DRIVER_NAME_SIZE 128 172 173 173 - #define IDXD_ALLOCATED_BATCH_SIZE 128U 174 174 #define WQ_NAME_SIZE 1024 175 175 #define WQ_TYPE_SIZE 10 176 176
+4 -2
drivers/dma/idxd/sysfs.c
··· 1208 1208 1209 1209 /* On systems where direct user submissions are not safe, we need to clear out 1210 1210 * the BATCH capability from the capability mask in sysfs since we cannot support 1211 - * that command on such systems. 1211 + * that command on such systems. Narrow the restriction of operations with the 1212 + * BATCH opcode to only DSA version 1 devices. 1212 1213 */ 1213 - if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe) 1214 + if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe && 1215 + confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1) 1214 1216 clear_bit(DSA_OPCODE_BATCH % 64, &val); 1215 1217 1216 1218 pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);
+76 -8
drivers/dma/sh/rz-dmac.c
··· 14 14 #include <linux/dmaengine.h> 15 15 #include <linux/interrupt.h> 16 16 #include <linux/iopoll.h> 17 + #include <linux/irqchip/irq-renesas-rzv2h.h> 17 18 #include <linux/list.h> 18 19 #include <linux/module.h> 19 20 #include <linux/of.h> ··· 90 89 91 90 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 92 91 92 + struct rz_dmac_icu { 93 + struct platform_device *pdev; 94 + u8 dmac_index; 95 + }; 96 + 93 97 struct rz_dmac { 94 98 struct dma_device engine; 99 + struct rz_dmac_icu icu; 95 100 struct device *dev; 96 101 struct reset_control *rstc; 97 102 void __iomem *base; ··· 105 98 106 99 unsigned int n_channels; 107 100 struct rz_dmac_chan *channels; 101 + 102 + bool has_icu; 108 103 109 104 DECLARE_BITMAP(modules, 1024); 110 105 }; ··· 175 166 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 176 167 #define RZ_DMAC_MAX_CHANNELS 16 177 168 #define DMAC_NR_LMDESC 64 169 + 170 + /* RZ/V2H ICU related */ 171 + #define RZV2H_MAX_DMAC_INDEX 4 178 172 179 173 /* 180 174 * ----------------------------------------------------------------------------- ··· 336 324 lmdesc->chext = 0; 337 325 lmdesc->header = HEADER_LV; 338 326 339 - rz_dmac_set_dmars_register(dmac, channel->index, 0); 327 + if (dmac->has_icu) { 328 + rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 329 + channel->index, 330 + RZV2H_ICU_DMAC_REQ_NO_DEFAULT); 331 + } else { 332 + rz_dmac_set_dmars_register(dmac, channel->index, 0); 333 + } 340 334 341 335 channel->chcfg = chcfg; 342 336 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; ··· 393 375 394 376 channel->lmdesc.tail = lmdesc; 395 377 396 - rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 378 + if (dmac->has_icu) { 379 + rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 380 + channel->index, channel->mid_rid); 381 + } else { 382 + rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 383 + } 384 + 397 385 channel->chctrl = CHCTRL_SETEN; 398 386 } 399 387 ··· 671 647 if (ret < 0) 672 648 dev_warn(dmac->dev, "DMA Timeout"); 673 649 674 - rz_dmac_set_dmars_register(dmac, channel->index, 0); 650 + if (dmac->has_icu) { 651 + rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index, 652 + channel->index, 653 + RZV2H_ICU_DMAC_REQ_NO_DEFAULT); 654 + } else { 655 + rz_dmac_set_dmars_register(dmac, channel->index, 0); 656 + } 675 657 } 676 658 677 659 /* ··· 778 748 dma_cap_zero(mask); 779 749 dma_cap_set(DMA_SLAVE, mask); 780 750 781 - return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec); 751 + return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec, 752 + ofdma->of_node); 782 753 } 783 754 784 755 /* ··· 854 823 return 0; 855 824 } 856 825 826 + static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac) 827 + { 828 + struct device_node *np = dev->of_node; 829 + struct of_phandle_args args; 830 + uint32_t dmac_index; 831 + int ret; 832 + 833 + ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args); 834 + if (ret == -ENOENT) 835 + return 0; 836 + if (ret) 837 + return ret; 838 + 839 + dmac->has_icu = true; 840 + 841 + dmac->icu.pdev = of_find_device_by_node(args.np); 842 + of_node_put(args.np); 843 + if (!dmac->icu.pdev) { 844 + dev_err(dev, "ICU device not found.\n"); 845 + return -ENODEV; 846 + } 847 + 848 + dmac_index = args.args[0]; 849 + if (dmac_index > RZV2H_MAX_DMAC_INDEX) { 850 + dev_err(dev, "DMAC index %u invalid.\n", dmac_index); 851 + return -EINVAL; 852 + } 853 + dmac->icu.dmac_index = dmac_index; 854 + 855 + return 0; 856 + } 857 + 857 858 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 858 859 { 859 860 struct device_node *np = dev->of_node; ··· 902 839 return -EINVAL; 903 840 } 904 841 905 - return 0; 842 + return rz_dmac_parse_of_icu(dev, dmac); 906 843 } 907 844 908 845 static int rz_dmac_probe(struct platform_device *pdev) ··· 936 873 if (IS_ERR(dmac->base)) 937 874 return PTR_ERR(dmac->base); 938 875 939 - dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 940 - if (IS_ERR(dmac->ext_base)) 941 - return PTR_ERR(dmac->ext_base); 876 + if (!dmac->has_icu) { 877 + dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 878 + if (IS_ERR(dmac->ext_base)) 879 + return PTR_ERR(dmac->ext_base); 880 + } 942 881 943 882 /* Register interrupt handler for error */ 944 883 irq = platform_get_irq_byname(pdev, irqname); ··· 1055 990 reset_control_assert(dmac->rstc); 1056 991 pm_runtime_put(&pdev->dev); 1057 992 pm_runtime_disable(&pdev->dev); 993 + 994 + platform_device_put(dmac->icu.pdev); 1058 995 } 1059 996 1060 997 static const struct of_device_id of_rz_dmac_match[] = { 998 + { .compatible = "renesas,r9a09g057-dmac", }, 1061 999 { .compatible = "renesas,rz-dmac", }, 1062 1000 { /* Sentinel */ } 1063 1001 };
+160 -25
drivers/dma/tegra210-adma.c
··· 27 27 28 28 #define ADMA_CH_INT_CLEAR 0x1c 29 29 #define ADMA_CH_CTRL 0x24 30 - #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12) 30 + #define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift)) 31 31 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2 32 32 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4 33 - #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8) 33 + #define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift)) 34 34 #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) 35 35 #define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0 36 36 ··· 41 41 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 42 42 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) 43 43 #define ADMA_CH_CONFIG_MAX_BUFS 8 44 - #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) 44 + #define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4) 45 + 46 + #define ADMA_GLOBAL_CH_CONFIG 0x400 47 + #define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7) 48 + #define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8) 45 49 46 50 #define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30 47 51 #define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70 48 52 #define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84 53 + #define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44 54 + #define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48 55 + #define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100 56 + #define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104 57 + #define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180 58 + #define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184 59 + #define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8 49 60 50 61 #define ADMA_CH_FIFO_CTRL 0x2c 51 62 #define ADMA_CH_TX_FIFO_SIZE_SHIFT 8 52 63 #define ADMA_CH_RX_FIFO_SIZE_SHIFT 0 64 + #define ADMA_GLOBAL_CH_FIFO_CTRL 0x300 53 65 54 66 #define ADMA_CH_LOWER_SRC_ADDR 0x34 55 67 #define ADMA_CH_LOWER_TRG_ADDR 0x3c ··· 85 73 * @adma_get_burst_config: Function callback used to set DMA burst size. 86 74 * @global_reg_offset: Register offset of DMA global register. 87 75 * @global_int_clear: Register offset of DMA global interrupt clear. 76 + * @global_ch_fifo_base: Global channel fifo ctrl base offset 77 + * @global_ch_config_base: Global channel config base offset 88 78 * @ch_req_tx_shift: Register offset for AHUB transmit channel select. 89 79 * @ch_req_rx_shift: Register offset for AHUB receive channel select. 80 + * @ch_dir_shift: Channel direction bit position. 81 + * @ch_mode_shift: Channel mode bit position. 90 82 * @ch_base_offset: Register offset of DMA channel registers. 83 + * @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264 91 84 * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. 85 + * @ch_config: Outstanding and WRR config values 92 86 * @ch_req_mask: Mask for Tx or Rx channel select. 87 + * @ch_dir_mask: Mask for channel direction. 93 88 * @ch_req_max: Maximum number of Tx or Rx channels available. 94 89 * @ch_reg_size: Size of DMA channel register space. 95 90 * @nr_channels: Number of DMA channels available. 96 91 * @ch_fifo_size_mask: Mask for FIFO size field. 97 92 * @sreq_index_offset: Slave channel index offset. 98 93 * @max_page: Maximum ADMA Channel Page. 99 - * @has_outstanding_reqs: If DMA channel can have outstanding requests. 100 94 * @set_global_pg_config: Global page programming. 101 95 */ 102 96 struct tegra_adma_chip_data { 103 97 unsigned int (*adma_get_burst_config)(unsigned int burst_size); 104 98 unsigned int global_reg_offset; 105 99 unsigned int global_int_clear; 100 + unsigned int global_ch_fifo_base; 101 + unsigned int global_ch_config_base; 106 102 unsigned int ch_req_tx_shift; 107 103 unsigned int ch_req_rx_shift; 104 + unsigned int ch_dir_shift; 105 + unsigned int ch_mode_shift; 108 106 unsigned int ch_base_offset; 107 + unsigned int ch_tc_offset_diff; 109 108 unsigned int ch_fifo_ctrl; 109 + unsigned int ch_config; 110 110 unsigned int ch_req_mask; 111 + unsigned int ch_dir_mask; 111 112 unsigned int ch_req_max; 112 113 unsigned int ch_reg_size; 113 114 unsigned int nr_channels; 114 115 unsigned int ch_fifo_size_mask; 115 116 unsigned int sreq_index_offset; 116 117 unsigned int max_page; 117 - bool has_outstanding_reqs; 118 118 void (*set_global_pg_config)(struct tegra_adma *tdma); 119 119 }; 120 120 ··· 136 112 struct tegra_adma_chan_regs { 137 113 unsigned int ctrl; 138 114 unsigned int config; 115 + unsigned int global_config; 139 116 unsigned int src_addr; 140 117 unsigned int trg_addr; 141 118 unsigned int fifo_ctrl; ··· 175 150 /* Transfer count and position info */ 176 151 unsigned int tx_buf_count; 177 152 unsigned int tx_buf_pos; 153 + 154 + unsigned int global_ch_fifo_offset; 155 + unsigned int global_ch_config_offset; 178 156 }; 179 157 180 158 /* ··· 272 244 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_CHGRP + (tdma->ch_page_no * 0x4), 0xff); 273 245 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ + (tdma->ch_page_no * 0x4), 0x1ffffff); 274 246 tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff); 247 + } 248 + 249 + static void tegra264_adma_global_page_config(struct tegra_adma *tdma) 250 + { 251 + u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET; 252 + 253 + /* If the default page (page1) is not used, then clear page1 registers */ 254 + if (tdma->ch_page_no) { 255 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0); 256 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0); 257 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0); 258 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0); 259 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0); 260 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0); 261 + } 262 + 263 + /* Program global registers for selected page */ 264 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff); 265 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff); 266 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff); 267 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1); 268 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff); 269 + tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1); 275 270 } 276 271 277 272 static int tegra_adma_init(struct tegra_adma *tdma) ··· 455 404 456 405 tdc->tx_buf_pos = 0; 457 406 tdc->tx_buf_count = 0; 458 - tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc); 407 + tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc); 459 408 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl); 460 - tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr); 461 - tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr); 462 - tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl); 409 + tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff, 410 + ch_regs->src_addr); 411 + tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff, 412 + ch_regs->trg_addr); 413 + 414 + if (!tdc->tdma->cdata->global_ch_fifo_base) 415 + tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl); 416 + else if (tdc->global_ch_fifo_offset) 417 + tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl); 418 + 419 + if (tdc->global_ch_config_offset) 420 + tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config); 421 + 463 422 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config); 464 423 465 424 /* Start ADMA */ ··· 482 421 { 483 422 struct tegra_adma_desc *desc = tdc->desc; 484 423 unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1; 485 - unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS); 424 + unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS - 425 + tdc->tdma->cdata->ch_tc_offset_diff); 486 426 unsigned int periods_remaining; 487 427 488 428 /* ··· 689 627 return -EINVAL; 690 628 } 691 629 692 - ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) | 693 - ADMA_CH_CTRL_MODE_CONTINUOUS | 630 + ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask, 631 + cdata->ch_dir_shift) | 632 + ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) | 694 633 ADMA_CH_CTRL_FLOWCTRL_EN; 695 634 ch_regs->config |= cdata->adma_get_burst_config(burst_size); 696 - ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); 697 - if (cdata->has_outstanding_reqs) 698 - ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); 635 + 636 + if (cdata->global_ch_config_base) 637 + ch_regs->global_config |= cdata->ch_config; 638 + else 639 + ch_regs->config |= cdata->ch_config; 699 640 700 641 /* 701 642 * 'sreq_index' represents the current ADMAIF channel number and as per ··· 853 788 /* skip if channel is not active */ 854 789 if (!ch_reg->cmd) 855 790 continue; 856 - ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC); 857 - ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR); 858 - ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR); 791 + ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff); 792 + ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR - 793 + tdma->cdata->ch_tc_offset_diff); 794 + ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR - 795 + tdma->cdata->ch_tc_offset_diff); 859 796 ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL); 860 - ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL); 797 + 798 + if (tdc->global_ch_config_offset) 799 + ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset); 800 + 801 + if (!tdc->tdma->cdata->global_ch_fifo_base) 802 + ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL); 803 + else if (tdc->global_ch_fifo_offset) 804 + ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset); 805 + 861 806 ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG); 807 + 862 808 } 863 809 864 810 clk_disable: ··· 908 832 /* skip if channel was not active earlier */ 909 833 if (!ch_reg->cmd) 910 834 continue; 911 - tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc); 912 - tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr); 913 - tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr); 835 + tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc); 836 + tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff, 837 + ch_reg->src_addr); 838 + tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff, 839 + ch_reg->trg_addr); 914 840 tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl); 915 - tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl); 841 + 842 + if (!tdc->tdma->cdata->global_ch_fifo_base) 843 + tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl); 844 + else if (tdc->global_ch_fifo_offset) 845 + tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl); 846 + 847 + if (tdc->global_ch_config_offset) 848 + tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config); 849 + 916 850 tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config); 851 + 917 852 tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd); 918 853 } 919 854 ··· 935 848 .adma_get_burst_config = tegra210_adma_get_burst_config, 936 849 .global_reg_offset = 0xc00, 937 850 .global_int_clear = 0x20, 851 + .global_ch_fifo_base = 0, 852 + .global_ch_config_base = 0, 938 853 .ch_req_tx_shift = 28, 939 854 .ch_req_rx_shift = 24, 855 + .ch_dir_shift = 12, 856 + .ch_mode_shift = 8, 940 857 .ch_base_offset = 0, 858 + .ch_tc_offset_diff = 0, 859 + .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1), 941 860 .ch_req_mask = 0xf, 861 + .ch_dir_mask = 0xf, 942 862 .ch_req_max = 10, 943 863 .ch_reg_size = 0x80, 944 864 .nr_channels = 22, 945 865 .ch_fifo_size_mask = 0xf, 946 866 .sreq_index_offset = 2, 947 867 .max_page = 0, 948 - .has_outstanding_reqs = false, 949 868 .set_global_pg_config = NULL, 950 869 }; 951 870 ··· 959 866 .adma_get_burst_config = tegra186_adma_get_burst_config, 960 867 .global_reg_offset = 0, 961 868 .global_int_clear = 0x402c, 869 + .global_ch_fifo_base = 0, 870 + .global_ch_config_base = 0, 962 871 .ch_req_tx_shift = 27, 963 872 .ch_req_rx_shift = 22, 873 + .ch_dir_shift = 12, 874 + .ch_mode_shift = 8, 964 875 .ch_base_offset = 0x10000, 876 + .ch_tc_offset_diff = 0, 877 + .ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) | 878 + TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8), 965 879 .ch_req_mask = 0x1f, 880 + .ch_dir_mask = 0xf, 966 881 .ch_req_max = 20, 967 882 .ch_reg_size = 0x100, 968 883 .nr_channels = 32, 969 884 .ch_fifo_size_mask = 0x1f, 970 885 .sreq_index_offset = 4, 971 886 .max_page = 4, 972 - .has_outstanding_reqs = true, 973 887 .set_global_pg_config = tegra186_adma_global_page_config, 888 + }; 889 + 890 + static const struct tegra_adma_chip_data tegra264_chip_data = { 891 + .adma_get_burst_config = tegra186_adma_get_burst_config, 892 + .global_reg_offset = 0, 893 + .global_int_clear = 0x800c, 894 + .global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL, 895 + .global_ch_config_base = ADMA_GLOBAL_CH_CONFIG, 896 + .ch_req_tx_shift = 26, 897 + .ch_req_rx_shift = 20, 898 + .ch_dir_shift = 10, 899 + .ch_mode_shift = 7, 900 + .ch_base_offset = 0x10000, 901 + .ch_tc_offset_diff = 4, 902 + .ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) | 903 + ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8), 904 + .ch_req_mask = 0x3f, 905 + .ch_dir_mask = 7, 906 + .ch_req_max = 32, 907 + .ch_reg_size = 0x100, 908 + .nr_channels = 64, 909 + .ch_fifo_size_mask = 0x7f, 910 + .sreq_index_offset = 0, 911 + .max_page = 10, 912 + .set_global_pg_config = tegra264_adma_global_page_config, 974 913 }; 975 914 976 915 static const struct of_device_id tegra_adma_of_match[] = { 977 916 { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, 978 917 { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data }, 918 + { .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data }, 979 919 { }, 980 920 }; 981 921 MODULE_DEVICE_TABLE(of, tegra_adma_of_match); ··· 1110 984 continue; 1111 985 1112 986 tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i); 987 + 988 + if (tdma->base_addr) { 989 + if (cdata->global_ch_fifo_base) 990 + tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i); 991 + 992 + if (cdata->global_ch_config_base) 993 + tdc->global_ch_config_offset = 994 + cdata->global_ch_config_base + (4 * i); 995 + } 1113 996 1114 997 tdc->irq = of_irq_get(pdev->dev.of_node, i); 1115 998 if (tdc->irq <= 0) {
+2 -1
drivers/dma/ti/k3-udma.c
··· 5624 5624 uc->config.dir = DMA_MEM_TO_MEM; 5625 5625 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", 5626 5626 dev_name(dev), i); 5627 - 5627 + if (!uc->name) 5628 + return -ENOMEM; 5628 5629 vchan_init(&uc->vc, &ud->ddev); 5629 5630 /* Use custom vchan completion handling */ 5630 5631 tasklet_setup(&uc->vc.task, udma_vchan_complete);
+4
drivers/dma/xilinx/xilinx_dma.c
··· 2909 2909 return -EINVAL; 2910 2910 } 2911 2911 2912 + xdev->common.directions |= chan->direction; 2913 + 2912 2914 /* Request the interrupt */ 2913 2915 chan->irq = of_irq_get(node, chan->tdest); 2914 2916 if (chan->irq < 0) ··· 3116 3114 } 3117 3115 } 3118 3116 } 3117 + 3118 + dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len); 3119 3119 3120 3120 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 3121 3121 xdev->has_axistream_connected =
+35
drivers/irqchip/irq-renesas-rzv2h.c
··· 15 15 #include <linux/err.h> 16 16 #include <linux/io.h> 17 17 #include <linux/irqchip.h> 18 + #include <linux/irqchip/irq-renesas-rzv2h.h> 18 19 #include <linux/irqdomain.h> 19 20 #include <linux/of_address.h> 20 21 #include <linux/of_platform.h> ··· 42 41 #define ICU_TSCLR 0x24 43 42 #define ICU_TITSR(k) (0x28 + (k) * 4) 44 43 #define ICU_TSSR(k) (0x30 + (k) * 4) 44 + #define ICU_DMkSELy(k, y) (0x420 + (k) * 0x20 + (y) * 4) 45 + #define ICU_DMACKSELk(k) (0x500 + (k) * 4) 45 46 46 47 /* NMI */ 47 48 #define ICU_NMI_EDGE_FALLING 0 ··· 106 103 u8 field_width; 107 104 }; 108 105 106 + /* DMAC */ 107 + #define ICU_DMAC_DkRQ_SEL_MASK GENMASK(9, 0) 108 + 109 + #define ICU_DMAC_DMAREQ_SHIFT(up) ((up) * 16) 110 + #define ICU_DMAC_DMAREQ_MASK(up) (ICU_DMAC_DkRQ_SEL_MASK \ 111 + << ICU_DMAC_DMAREQ_SHIFT(up)) 112 + #define ICU_DMAC_PREP_DMAREQ(sel, up) (FIELD_PREP(ICU_DMAC_DkRQ_SEL_MASK, (sel)) \ 113 + << ICU_DMAC_DMAREQ_SHIFT(up)) 114 + 109 115 /** 110 116 * struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure. 111 117 * @base: Controller's base address ··· 128 116 raw_spinlock_t lock; 129 117 const struct rzv2h_hw_info *info; 130 118 }; 119 + 120 + void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel, 121 + u16 req_no) 122 + { 123 + struct rzv2h_icu_priv *priv = platform_get_drvdata(icu_dev); 124 + u32 icu_dmksely, dmareq, dmareq_mask; 125 + u8 y, upper; 126 + 127 + y = dmac_channel / 2; 128 + upper = dmac_channel % 2; 129 + 130 + dmareq = ICU_DMAC_PREP_DMAREQ(req_no, upper); 131 + dmareq_mask = ICU_DMAC_DMAREQ_MASK(upper); 132 + 133 + guard(raw_spinlock_irqsave)(&priv->lock); 134 + 135 + icu_dmksely = readl(priv->base + ICU_DMkSELy(dmac_index, y)); 136 + icu_dmksely = (icu_dmksely & ~dmareq_mask) | dmareq; 137 + writel(icu_dmksely, priv->base + ICU_DMkSELy(dmac_index, y)); 138 + } 139 + EXPORT_SYMBOL_GPL(rzv2h_icu_register_dma_req); 131 140 132 141 static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data) 133 142 { ··· 523 490 rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL); 524 491 if (!rzv2h_icu_data) 525 492 return -ENOMEM; 493 + 494 + platform_set_drvdata(pdev, rzv2h_icu_data); 526 495 527 496 rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL); 528 497 if (IS_ERR(rzv2h_icu_data->base))
+23
include/linux/irqchip/irq-renesas-rzv2h.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Renesas RZ/V2H(P) Interrupt Control Unit (ICU) 4 + * 5 + * Copyright (C) 2025 Renesas Electronics Corporation. 6 + */ 7 + 8 + #ifndef __LINUX_IRQ_RENESAS_RZV2H 9 + #define __LINUX_IRQ_RENESAS_RZV2H 10 + 11 + #include <linux/platform_device.h> 12 + 13 + #define RZV2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff 14 + 15 + #ifdef CONFIG_RENESAS_RZV2H_ICU 16 + void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel, 17 + u16 req_no); 18 + #else 19 + static inline void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, 20 + u8 dmac_channel, u16 req_no) { } 21 + #endif 22 + 23 + #endif /* __LINUX_IRQ_RENESAS_RZV2H */