Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
"New hardware support:
- Allwinner H616 dma support
- Renesas r8a779h0 dma controller support
- TI CSI2RX dma support

Updates:
- Freescale edma driver updates for TCD64csupport for i.MX95
- constify of pointers and args
- Yaml conversion for MediaTek High-Speed controller binding
- TI k3 udma support for TX/RX DMA channels for thread IDs:

* tag 'dmaengine-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits)
dmaengine: of: constify of_phandle_args in of_dma_find_controller()
dmaengine: pl08x: constify pointer to char in filter function
MAINTAINERS: change in AMD ptdma maintainer
MAINTAINERS: adjust file entry in MEDIATEK DMA DRIVER
dmaengine: idxd: constify the struct device_type usage
dt-bindings: renesas,rcar-dmac: Add r8a779h0 support
dt-bindings: dma: convert MediaTek High-Speed controller to the json-schema
dmaengine: idxd: make dsa_bus_type const
dmaengine: fsl-edma: integrate TCD64 support for i.MX95
dt-bindings: fsl-dma: fsl-edma: add fsl,imx95-edma5 compatible string
dmaengine: mcf-edma: utilize edma_write_tcdreg() macro for TCD Access
dmaengine: fsl-edma: add address for channel mux register in fsl_edma_chan
dmaengine: fsl-edma: fix spare build warning
dmaengine: fsl-edma: involve help macro fsl_edma_set(get)_tcd()
dt-bindings: mmp-dma: convert to YAML
dmaengine: ti: k3-psil-j721s2: Add entry for CSI2RX
dmaengine: ti: k3-udma-glue: Add function to request RX chan for thread ID
dmaengine: ti: k3-udma-glue: Add function to request TX chan for thread ID
dmaengine: ti: k3-udma-glue: Update name for remote RX channel device
dmaengine: ti: k3-udma-glue: Add function to parse channel by ID
...

+705 -301
+8 -4
Documentation/devicetree/bindings/dma/allwinner,sun50i-a64-dma.yaml
··· 28 28 - items: 29 29 - const: allwinner,sun8i-r40-dma 30 30 - const: allwinner,sun50i-a64-dma 31 + - items: 32 + - const: allwinner,sun50i-h616-dma 33 + - const: allwinner,sun50i-a100-dma 31 34 32 35 reg: 33 36 maxItems: 1 ··· 62 59 if: 63 60 properties: 64 61 compatible: 65 - enum: 66 - - allwinner,sun20i-d1-dma 67 - - allwinner,sun50i-a100-dma 68 - - allwinner,sun50i-h6-dma 62 + contains: 63 + enum: 64 + - allwinner,sun20i-d1-dma 65 + - allwinner,sun50i-a100-dma 66 + - allwinner,sun50i-h6-dma 69 67 70 68 then: 71 69 properties:
+2
Documentation/devicetree/bindings/dma/fsl,edma.yaml
··· 25 25 - fsl,imx8qm-edma 26 26 - fsl,imx93-edma3 27 27 - fsl,imx93-edma4 28 + - fsl,imx95-edma5 28 29 - items: 29 30 - const: fsl,ls1028a-edma 30 31 - const: fsl,vf610-edma ··· 84 83 - fsl,imx8qm-edma 85 84 - fsl,imx93-edma3 86 85 - fsl,imx93-edma4 86 + - fsl,imx95-edma5 87 87 then: 88 88 properties: 89 89 "#dma-cells":
+72
Documentation/devicetree/bindings/dma/marvell,mmp-dma.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/dma/marvell,mmp-dma.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Marvell MMP DMA controller 8 + 9 + maintainers: 10 + - Duje Mihanović <duje.mihanovic@skole.hr> 11 + 12 + description: 13 + Marvell MMP SoCs may have two types of DMA controllers, peripheral and audio. 14 + 15 + properties: 16 + compatible: 17 + enum: 18 + - marvell,pdma-1.0 19 + - marvell,adma-1.0 20 + - marvell,pxa910-squ 21 + 22 + reg: 23 + maxItems: 1 24 + 25 + interrupts: 26 + description: 27 + Interrupt lines for the controller, may be shared or one per DMA channel 28 + minItems: 1 29 + 30 + asram: 31 + description: 32 + A phandle to the SRAM pool 33 + $ref: /schemas/types.yaml#/definitions/phandle 34 + 35 + '#dma-channels': 36 + deprecated: true 37 + 38 + '#dma-requests': 39 + deprecated: true 40 + 41 + required: 42 + - compatible 43 + - reg 44 + - interrupts 45 + - '#dma-cells' 46 + 47 + allOf: 48 + - $ref: dma-controller.yaml# 49 + - if: 50 + properties: 51 + compatible: 52 + contains: 53 + enum: 54 + - marvell,pdma-1.0 55 + then: 56 + properties: 57 + asram: false 58 + else: 59 + required: 60 + - asram 61 + 62 + unevaluatedProperties: false 63 + 64 + examples: 65 + - | 66 + dma-controller@d4000000 { 67 + compatible = "marvell,pdma-1.0"; 68 + reg = <0xd4000000 0x10000>; 69 + interrupts = <47>; 70 + #dma-cells = <2>; 71 + dma-channels = <16>; 72 + };
+63
Documentation/devicetree/bindings/dma/mediatek,mt7622-hsdma.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/dma/mediatek,mt7622-hsdma.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: MediaTek High-Speed DMA Controller 8 + 9 + maintainers: 10 + - Sean Wang <sean.wang@mediatek.com> 11 + 12 + allOf: 13 + - $ref: dma-controller.yaml# 14 + 15 + properties: 16 + compatible: 17 + enum: 18 + - mediatek,mt7622-hsdma 19 + - mediatek,mt7623-hsdma 20 + 21 + reg: 22 + maxItems: 1 23 + 24 + interrupts: 25 + maxItems: 1 26 + 27 + clocks: 28 + maxItems: 1 29 + 30 + clock-names: 31 + const: hsdma 32 + 33 + power-domains: 34 + maxItems: 1 35 + 36 + "#dma-cells": 37 + description: Channel number 38 + const: 1 39 + 40 + required: 41 + - reg 42 + - interrupts 43 + - clocks 44 + - clock-names 45 + - power-domains 46 + 47 + unevaluatedProperties: false 48 + 49 + examples: 50 + - | 51 + #include <dt-bindings/clock/mt2701-clk.h> 52 + #include <dt-bindings/interrupt-controller/arm-gic.h> 53 + #include <dt-bindings/power/mt2701-power.h> 54 + 55 + dma-controller@1b007000 { 56 + compatible = "mediatek,mt7623-hsdma"; 57 + reg = <0x1b007000 0x1000>; 58 + interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>; 59 + clocks = <&ethsys CLK_ETHSYS_HSDMA>; 60 + clock-names = "hsdma"; 61 + power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 62 + #dma-cells = <1>; 63 + };
-81
Documentation/devicetree/bindings/dma/mmp-dma.txt
··· 1 - * MARVELL MMP DMA controller 2 - 3 - Marvell Peripheral DMA Controller 4 - Used platforms: pxa688, pxa910, pxa3xx, etc 5 - 6 - Required properties: 7 - - compatible: Should be "marvell,pdma-1.0" 8 - - reg: Should contain DMA registers location and length. 9 - - interrupts: Either contain all of the per-channel DMA interrupts 10 - or one irq for pdma device 11 - 12 - Optional properties: 13 - - dma-channels: Number of DMA channels supported by the controller (defaults 14 - to 32 when not specified) 15 - - #dma-channels: deprecated 16 - - dma-requests: Number of DMA requestor lines supported by the controller 17 - (defaults to 32 when not specified) 18 - - #dma-requests: deprecated 19 - 20 - "marvell,pdma-1.0" 21 - Used platforms: pxa25x, pxa27x, pxa3xx, pxa93x, pxa168, pxa910, pxa688. 22 - 23 - Examples: 24 - 25 - /* 26 - * Each channel has specific irq 27 - * ICU parse out irq channel from ICU register, 28 - * while DMA controller may not able to distinguish the irq channel 29 - * Using this method, interrupt-parent is required as demuxer 30 - * For example, pxa688 icu register 0x128, bit 0~15 is PDMA channel irq, 31 - * 18~21 is ADMA irq 32 - */ 33 - pdma: dma-controller@d4000000 { 34 - compatible = "marvell,pdma-1.0"; 35 - reg = <0xd4000000 0x10000>; 36 - interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>; 37 - interrupt-parent = <&intcmux32>; 38 - dma-channels = <16>; 39 - }; 40 - 41 - /* 42 - * One irq for all channels 43 - * Dmaengine driver (DMA controller) distinguish irq channel via 44 - * parsing internal register 45 - */ 46 - pdma: dma-controller@d4000000 { 47 - compatible = "marvell,pdma-1.0"; 48 - reg = <0xd4000000 0x10000>; 49 - interrupts = <47>; 50 - dma-channels = <16>; 51 - }; 52 - 53 - 54 - Marvell Two Channel DMA Controller used specifically for audio 55 - Used platforms: pxa688, pxa910 56 - 57 - Required properties: 58 - - compatible: Should be "marvell,adma-1.0" or "marvell,pxa910-squ" 59 - - reg: Should contain DMA registers location and length. 60 - - interrupts: Either contain all of the per-channel DMA interrupts 61 - or one irq for dma device 62 - 63 - "marvell,adma-1.0" used on pxa688 64 - "marvell,pxa910-squ" used on pxa910 65 - 66 - Examples: 67 - 68 - /* each channel has specific irq */ 69 - adma0: dma-controller@d42a0800 { 70 - compatible = "marvell,adma-1.0"; 71 - reg = <0xd42a0800 0x100>; 72 - interrupts = <18 19>; 73 - interrupt-parent = <&intcmux32>; 74 - }; 75 - 76 - /* One irq for all channels */ 77 - squ: dma-controller@d42a0800 { 78 - compatible = "marvell,pxa910-squ"; 79 - reg = <0xd42a0800 0x100>; 80 - interrupts = <46>; 81 - };
-33
Documentation/devicetree/bindings/dma/mtk-hsdma.txt
··· 1 - MediaTek High-Speed DMA Controller 2 - ================================== 3 - 4 - This device follows the generic DMA bindings defined in dma/dma.txt. 5 - 6 - Required properties: 7 - 8 - - compatible: Must be one of 9 - "mediatek,mt7622-hsdma": for MT7622 SoC 10 - "mediatek,mt7623-hsdma": for MT7623 SoC 11 - - reg: Should contain the register's base address and length. 12 - - interrupts: Should contain a reference to the interrupt used by this 13 - device. 14 - - clocks: Should be the clock specifiers corresponding to the entry in 15 - clock-names property. 16 - - clock-names: Should contain "hsdma" entries. 17 - - power-domains: Phandle to the power domain that the device is part of 18 - - #dma-cells: The length of the DMA specifier, must be <1>. This one cell 19 - in dmas property of a client device represents the channel 20 - number. 21 - Example: 22 - 23 - hsdma: dma-controller@1b007000 { 24 - compatible = "mediatek,mt7623-hsdma"; 25 - reg = <0 0x1b007000 0 0x1000>; 26 - interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>; 27 - clocks = <&ethsys CLK_ETHSYS_HSDMA>; 28 - clock-names = "hsdma"; 29 - power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 30 - #dma-cells = <1>; 31 - }; 32 - 33 - DMA clients must use the format described in dma/dma.txt file.
+1
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
··· 46 46 - renesas,dmac-r8a779a0 # R-Car V3U 47 47 - renesas,dmac-r8a779f0 # R-Car S4-8 48 48 - renesas,dmac-r8a779g0 # R-Car V4H 49 + - renesas,dmac-r8a779h0 # R-Car V4M 49 50 - const: renesas,rcar-gen4-dmac # R-Car Gen4 50 51 51 52 reg: true
+2 -2
MAINTAINERS
··· 1064 1064 F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py 1065 1065 1066 1066 AMD PTDMA DRIVER 1067 - M: Sanjay R Mehta <sanju.mehta@amd.com> 1067 + M: Basavaraj Natikar <Basavaraj.Natikar@amd.com> 1068 1068 L: dmaengine@vger.kernel.org 1069 1069 S: Maintained 1070 1070 F: drivers/dma/ptdma/ ··· 13719 13719 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 13720 13720 L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) 13721 13721 S: Maintained 13722 - F: Documentation/devicetree/bindings/dma/mtk-* 13722 + F: Documentation/devicetree/bindings/dma/mediatek,* 13723 13723 F: drivers/dma/mediatek/ 13724 13724 13725 13725 MEDIATEK ETHERNET DRIVER
+7 -7
drivers/dma/Kconfig
··· 643 643 644 644 config TEGRA210_ADMA 645 645 tristate "NVIDIA Tegra210 ADMA support" 646 - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) 646 + depends on (ARCH_TEGRA || COMPILE_TEST) 647 647 select DMA_ENGINE 648 648 select DMA_VIRTUAL_CHANNELS 649 649 help 650 - Support for the NVIDIA Tegra210 ADMA controller driver. The 651 - DMA controller has multiple DMA channels and is used to service 652 - various audio clients in the Tegra210 audio processing engine 653 - (APE). This DMA controller transfers data from memory to 654 - peripheral and vice versa. It does not support memory to 655 - memory data transfer. 650 + Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA 651 + controller driver. The DMA controller has multiple DMA channels 652 + and is used to service various audio clients in the Tegra210 653 + audio processing engine (APE). This DMA controller transfers 654 + data from memory to peripheral and vice versa. It does not 655 + support memory to memory data transfer. 656 656 657 657 config TIMB_DMA 658 658 tristate "Timberdale FPGA DMA support"
+1 -1
drivers/dma/amba-pl08x.c
··· 2239 2239 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 2240 2240 { 2241 2241 struct pl08x_dma_chan *plchan; 2242 - char *name = chan_id; 2242 + const char *name = chan_id; 2243 2243 2244 2244 /* Reject channels for devices not bound to this driver */ 2245 2245 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
-5
drivers/dma/bestcomm/sram.c
··· 90 90 bcom_sram->rh = rh_create(4); 91 91 92 92 /* Attach the free zones */ 93 - #if 0 94 - /* Currently disabled ... for future use only */ 95 - reg_addr_p = of_get_property(sram_node, "available", &psize); 96 - #else 97 93 regaddr_p = NULL; 98 94 psize = 0; 99 - #endif 100 95 101 96 if (!regaddr_p || !psize) { 102 97 /* Attach the whole zone */
+55 -46
drivers/dma/fsl-edma-common.c
··· 97 97 * ch_mux: With the exception of 0, attempts to write a value 98 98 * already in use will be forced to 0. 99 99 */ 100 - if (!edma_readl_chreg(fsl_chan, ch_mux)) 101 - edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux); 100 + if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr)) 101 + edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr); 102 102 } 103 103 104 104 val = edma_readl_chreg(fsl_chan, ch_csr); ··· 134 134 flags = fsl_edma_drvflags(fsl_chan); 135 135 136 136 if (flags & FSL_EDMA_DRV_HAS_CHMUX) 137 - edma_writel_chreg(fsl_chan, 0, ch_mux); 137 + edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr); 138 138 139 139 val &= ~EDMA_V3_CH_CSR_ERQ; 140 140 edma_writel_chreg(fsl_chan, val, ch_csr); ··· 351 351 { 352 352 struct fsl_edma_desc *edesc = fsl_chan->edesc; 353 353 enum dma_transfer_direction dir = edesc->dirn; 354 - dma_addr_t cur_addr, dma_addr; 354 + dma_addr_t cur_addr, dma_addr, old_addr; 355 355 size_t len, size; 356 356 u32 nbytes = 0; 357 357 int i; 358 358 359 359 /* calculate the total size in this desc */ 360 360 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) { 361 - nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); 361 + nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes); 362 362 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) 363 363 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); 364 - len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); 364 + len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); 365 365 } 366 366 367 367 if (!in_progress) 368 368 return len; 369 369 370 - if (dir == DMA_MEM_TO_DEV) 371 - cur_addr = edma_read_tcdreg(fsl_chan, saddr); 372 - else 373 - cur_addr = edma_read_tcdreg(fsl_chan, daddr); 370 + /* 64bit read is not atomic, need read retry when high 32bit changed */ 371 + do { 372 + if (dir == DMA_MEM_TO_DEV) { 373 + old_addr = edma_read_tcdreg(fsl_chan, saddr); 374 + cur_addr = edma_read_tcdreg(fsl_chan, saddr); 375 + } else { 376 + old_addr = edma_read_tcdreg(fsl_chan, daddr); 377 + cur_addr = edma_read_tcdreg(fsl_chan, daddr); 378 + } 379 + } while (upper_32_bits(cur_addr) != upper_32_bits(old_addr)); 374 380 375 381 /* figure out the finished and calculate the residue */ 376 382 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 377 - nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes); 383 + nbytes = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, nbytes); 378 384 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE)) 379 385 nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes); 380 386 381 - size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter); 387 + size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); 382 388 383 389 if (dir == DMA_MEM_TO_DEV) 384 - dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); 390 + dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, saddr); 385 391 else 386 - dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); 392 + dma_addr = fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, daddr); 387 393 388 394 len -= size; 389 395 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { ··· 432 426 return fsl_chan->status; 433 427 } 434 428 435 - static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, 436 - struct fsl_edma_hw_tcd *tcd) 429 + static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, void *tcd) 437 430 { 438 431 u16 csr = 0; 439 432 ··· 444 439 */ 445 440 edma_write_tcdreg(fsl_chan, 0, csr); 446 441 447 - edma_write_tcdreg(fsl_chan, tcd->saddr, saddr); 448 - edma_write_tcdreg(fsl_chan, tcd->daddr, daddr); 442 + edma_cp_tcd_to_reg(fsl_chan, tcd, saddr); 443 + edma_cp_tcd_to_reg(fsl_chan, tcd, daddr); 449 444 450 - edma_write_tcdreg(fsl_chan, tcd->attr, attr); 451 - edma_write_tcdreg(fsl_chan, tcd->soff, soff); 445 + edma_cp_tcd_to_reg(fsl_chan, tcd, attr); 446 + edma_cp_tcd_to_reg(fsl_chan, tcd, soff); 452 447 453 - edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes); 454 - edma_write_tcdreg(fsl_chan, tcd->slast, slast); 448 + edma_cp_tcd_to_reg(fsl_chan, tcd, nbytes); 449 + edma_cp_tcd_to_reg(fsl_chan, tcd, slast); 455 450 456 - edma_write_tcdreg(fsl_chan, tcd->citer, citer); 457 - edma_write_tcdreg(fsl_chan, tcd->biter, biter); 458 - edma_write_tcdreg(fsl_chan, tcd->doff, doff); 451 + edma_cp_tcd_to_reg(fsl_chan, tcd, citer); 452 + edma_cp_tcd_to_reg(fsl_chan, tcd, biter); 453 + edma_cp_tcd_to_reg(fsl_chan, tcd, doff); 459 454 460 - edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga); 455 + edma_cp_tcd_to_reg(fsl_chan, tcd, dlast_sga); 461 456 462 - csr = le16_to_cpu(tcd->csr); 457 + csr = fsl_edma_get_tcd_to_cpu(fsl_chan, tcd, csr); 463 458 464 459 if (fsl_chan->is_sw) { 465 460 csr |= EDMA_TCD_CSR_START; 466 - tcd->csr = cpu_to_le16(csr); 461 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); 467 462 } 468 463 469 464 /* ··· 478 473 edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr); 479 474 480 475 481 - edma_write_tcdreg(fsl_chan, tcd->csr, csr); 476 + edma_cp_tcd_to_reg(fsl_chan, tcd, csr); 482 477 } 483 478 484 479 static inline 485 480 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, 486 - struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, 487 - u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, 488 - u16 biter, u16 doff, u32 dlast_sga, bool major_int, 481 + struct fsl_edma_hw_tcd *tcd, dma_addr_t src, dma_addr_t dst, 482 + u16 attr, u16 soff, u32 nbytes, dma_addr_t slast, u16 citer, 483 + u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int, 489 484 bool disable_req, bool enable_sg) 490 485 { 491 486 struct dma_slave_config *cfg = &fsl_chan->cfg; ··· 498 493 * So we put the value in little endian in memory, waiting 499 494 * for fsl_edma_set_tcd_regs doing the swap. 500 495 */ 501 - tcd->saddr = cpu_to_le32(src); 502 - tcd->daddr = cpu_to_le32(dst); 496 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, src, saddr); 497 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, dst, daddr); 503 498 504 - tcd->attr = cpu_to_le16(attr); 499 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, attr, attr); 505 500 506 - tcd->soff = cpu_to_le16(soff); 501 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, soff, soff); 507 502 508 503 if (fsl_chan->is_multi_fifo) { 509 504 /* set mloff to support multiple fifo */ ··· 520 515 } 521 516 } 522 517 523 - tcd->nbytes = cpu_to_le32(nbytes); 524 - tcd->slast = cpu_to_le32(slast); 518 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, nbytes, nbytes); 519 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, slast, slast); 525 520 526 - tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); 527 - tcd->doff = cpu_to_le16(doff); 521 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_CITER_CITER(citer), citer); 522 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, doff, doff); 528 523 529 - tcd->dlast_sga = cpu_to_le32(dlast_sga); 524 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, dlast_sga, dlast_sga); 530 525 531 - tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); 526 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter); 527 + 532 528 if (major_int) 533 529 csr |= EDMA_TCD_CSR_INT_MAJOR; 534 530 ··· 545 539 if (fsl_chan->is_sw) 546 540 csr |= EDMA_TCD_CSR_START; 547 541 548 - tcd->csr = cpu_to_le16(csr); 542 + fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); 549 543 } 550 544 551 545 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, ··· 586 580 dma_addr_t dma_buf_next; 587 581 bool major_int = true; 588 582 int sg_len, i; 589 - u32 src_addr, dst_addr, last_sg, nbytes; 583 + dma_addr_t src_addr, dst_addr, last_sg; 590 584 u16 soff, doff, iter; 585 + u32 nbytes; 591 586 592 587 if (!is_slave_direction(direction)) 593 588 return NULL; ··· 660 653 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 661 654 struct fsl_edma_desc *fsl_desc; 662 655 struct scatterlist *sg; 663 - u32 src_addr, dst_addr, last_sg, nbytes; 656 + dma_addr_t src_addr, dst_addr, last_sg; 664 657 u16 soff, doff, iter; 658 + u32 nbytes; 665 659 int i; 666 660 667 661 if (!is_slave_direction(direction)) ··· 811 803 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 812 804 813 805 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, 814 - sizeof(struct fsl_edma_hw_tcd), 806 + fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? 807 + sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), 815 808 32, 0); 816 809 return 0; 817 810 }
+146 -13
drivers/dma/fsl-edma-common.h
··· 88 88 __le16 biter; 89 89 }; 90 90 91 + struct fsl_edma_hw_tcd64 { 92 + __le64 saddr; 93 + __le16 soff; 94 + __le16 attr; 95 + __le32 nbytes; 96 + __le64 slast; 97 + __le64 daddr; 98 + __le64 dlast_sga; 99 + __le16 doff; 100 + __le16 citer; 101 + __le16 csr; 102 + __le16 biter; 103 + } __packed; 104 + 91 105 struct fsl_edma3_ch_reg { 92 106 __le32 ch_csr; 93 107 __le32 ch_es; ··· 111 97 __le32 ch_mux; 112 98 __le32 ch_mattr; /* edma4, reserved for edma3 */ 113 99 __le32 ch_reserved; 114 - struct fsl_edma_hw_tcd tcd; 100 + union { 101 + struct fsl_edma_hw_tcd tcd; 102 + struct fsl_edma_hw_tcd64 tcd64; 103 + }; 115 104 } __packed; 116 105 117 106 /* ··· 143 126 144 127 struct fsl_edma_sw_tcd { 145 128 dma_addr_t ptcd; 146 - struct fsl_edma_hw_tcd *vtcd; 129 + void *vtcd; 147 130 }; 148 131 149 132 struct fsl_edma_chan { ··· 162 145 u32 dma_dev_size; 163 146 enum dma_data_direction dma_dir; 164 147 char chan_name[32]; 165 - struct fsl_edma_hw_tcd __iomem *tcd; 148 + void __iomem *tcd; 149 + void __iomem *mux_addr; 166 150 u32 real_count; 167 151 struct work_struct issue_worker; 168 152 struct platform_device *pdev; ··· 206 188 #define FSL_EDMA_DRV_CLEAR_DONE_E_SG BIT(13) 207 189 /* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */ 208 190 #define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14) 191 + #define FSL_EDMA_DRV_TCD64 BIT(15) 209 192 210 193 #define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \ 211 194 FSL_EDMA_DRV_BUS_8BYTE | \ ··· 226 207 u32 chreg_off; 227 208 u32 chreg_space_sz; 228 209 u32 flags; 210 + u32 mux_off; /* channel mux register offset */ 211 + u32 mux_skip; /* how much skip for each channel */ 229 212 int (*setup_irq)(struct platform_device *pdev, 230 213 struct fsl_edma_engine *fsl_edma); 231 214 }; ··· 250 229 struct fsl_edma_chan chans[] __counted_by(n_chans); 251 230 }; 252 231 253 - #define edma_read_tcdreg(chan, __name) \ 254 - (sizeof(chan->tcd->__name) == sizeof(u32) ? \ 255 - edma_readl(chan->edma, &chan->tcd->__name) : \ 256 - edma_readw(chan->edma, &chan->tcd->__name)) 232 + #define edma_read_tcdreg_c(chan, _tcd, __name) \ 233 + (sizeof((_tcd)->__name) == sizeof(u64) ? \ 234 + edma_readq(chan->edma, &(_tcd)->__name) : \ 235 + ((sizeof((_tcd)->__name) == sizeof(u32)) ? \ 236 + edma_readl(chan->edma, &(_tcd)->__name) : \ 237 + edma_readw(chan->edma, &(_tcd)->__name) \ 238 + )) 257 239 258 - #define edma_write_tcdreg(chan, val, __name) \ 259 - (sizeof(chan->tcd->__name) == sizeof(u32) ? \ 260 - edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \ 261 - edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name)) 240 + #define edma_read_tcdreg(chan, __name) \ 241 + ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ 242 + edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd64 __iomem *)chan->tcd), __name) : \ 243 + edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \ 244 + ) 245 + 246 + #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ 247 + do { \ 248 + switch (sizeof(_tcd->__name)) { \ 249 + case sizeof(u64): \ 250 + edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ 251 + break; \ 252 + case sizeof(u32): \ 253 + edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \ 254 + break; \ 255 + case sizeof(u16): \ 256 + edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \ 257 + break; \ 258 + case sizeof(u8): \ 259 + edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \ 260 + break; \ 261 + } \ 262 + } while (0) 263 + 264 + #define edma_write_tcdreg(chan, val, __name) \ 265 + do { \ 266 + struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \ 267 + struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \ 268 + \ 269 + if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \ 270 + edma_write_tcdreg_c(chan, tcd64_r, val, __name); \ 271 + else \ 272 + edma_write_tcdreg_c(chan, tcd_r, val, __name); \ 273 + } while (0) 274 + 275 + #define edma_cp_tcd_to_reg(chan, __tcd, __name) \ 276 + do { \ 277 + struct fsl_edma_hw_tcd64 __iomem *tcd64_r = (struct fsl_edma_hw_tcd64 __iomem *)chan->tcd; \ 278 + struct fsl_edma_hw_tcd __iomem *tcd_r = (struct fsl_edma_hw_tcd __iomem *)chan->tcd; \ 279 + struct fsl_edma_hw_tcd64 *tcd64_m = (struct fsl_edma_hw_tcd64 *)__tcd; \ 280 + struct fsl_edma_hw_tcd *tcd_m = (struct fsl_edma_hw_tcd *)__tcd; \ 281 + \ 282 + if (fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) \ 283 + edma_write_tcdreg_c(chan, tcd64_r, tcd64_m->__name, __name); \ 284 + else \ 285 + edma_write_tcdreg_c(chan, tcd_r, tcd_m->__name, __name); \ 286 + } while (0) 262 287 263 288 #define edma_readl_chreg(chan, __name) \ 264 289 edma_readl(chan->edma, \ 265 - (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name)) 290 + (void __iomem *)&(container_of(((__force void *)chan->tcd),\ 291 + struct fsl_edma3_ch_reg, tcd)->__name)) 266 292 267 293 #define edma_writel_chreg(chan, val, __name) \ 268 294 edma_writel(chan->edma, val, \ 269 - (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name)) 295 + (void __iomem *)&(container_of(((__force void *)chan->tcd),\ 296 + struct fsl_edma3_ch_reg, tcd)->__name)) 297 + 298 + #define fsl_edma_get_tcd(_chan, _tcd, _field) \ 299 + (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? (((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \ 300 + (((struct fsl_edma_hw_tcd *)_tcd)->_field)) 301 + 302 + #define fsl_edma_le_to_cpu(x) \ 303 + (sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ 304 + (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ 305 + le16_to_cpu((__force __le16)(x)))) 306 + 307 + #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ 308 + (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ 309 + fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd64 *)_tcd)->_field) : \ 310 + fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) 311 + 312 + #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ 313 + do { \ 314 + switch (sizeof((_tcd)->_field)) { \ 315 + case sizeof(u64): \ 316 + *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ 317 + break; \ 318 + case sizeof(u32): \ 319 + *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \ 320 + break; \ 321 + case sizeof(u16): \ 322 + *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \ 323 + break; \ 324 + } \ 325 + } while (0) 326 + 327 + #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ 328 + do { \ 329 + if (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64) \ 330 + fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd64 *)_tcd, _val, _field); \ 331 + else \ 332 + fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ 333 + } while (0) 270 334 271 335 /* 272 336 * R/W functions for big- or little-endian registers: ··· 359 253 * For the big-endian IP module, the offset for 8-bit or 16-bit registers 360 254 * should also be swapped opposite to that in little-endian IP. 361 255 */ 256 + static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr) 257 + { 258 + u64 l, h; 259 + 260 + if (edma->big_endian) { 261 + l = ioread32be(addr); 262 + h = ioread32be(addr + 4); 263 + } else { 264 + l = ioread32(addr); 265 + h = ioread32(addr + 4); 266 + } 267 + 268 + return (h << 32) | l; 269 + } 270 + 362 271 static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) 363 272 { 364 273 if (edma->big_endian) ··· 417 296 iowrite32be(val, addr); 418 297 else 419 298 iowrite32(val, addr); 299 + } 300 + 301 + static inline void edma_writeq(struct fsl_edma_engine *edma, 302 + u64 val, void __iomem *addr) 303 + { 304 + if (edma->big_endian) { 305 + iowrite32be(val & 0xFFFFFFFF, addr); 306 + iowrite32be(val >> 32, addr + 4); 307 + } else { 308 + iowrite32(val & 0xFFFFFFFF, addr); 309 + iowrite32(val >> 32, addr + 4); 310 + } 420 311 } 421 312 422 313 static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+18 -1
drivers/dma/fsl-edma-main.c
··· 360 360 .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4, 361 361 .chreg_space_sz = 0x8000, 362 362 .chreg_off = 0x10000, 363 + .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), 364 + .mux_skip = 0x8000, 365 + .setup_irq = fsl_edma3_irq_init, 366 + }; 367 + 368 + static struct fsl_edma_drvdata imx95_data5 = { 369 + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 | 370 + FSL_EDMA_DRV_TCD64, 371 + .chreg_space_sz = 0x8000, 372 + .chreg_off = 0x10000, 373 + .mux_off = 0x200, 374 + .mux_skip = sizeof(u32), 363 375 .setup_irq = fsl_edma3_irq_init, 364 376 }; 365 377 ··· 383 371 { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, 384 372 { .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, 385 373 { .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, 374 + { .compatible = "fsl,imx95-edma5", .data = &imx95_data5}, 386 375 { /* sentinel */ } 387 376 }; 388 377 MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); ··· 524 511 return ret; 525 512 } 526 513 514 + if (drvdata->flags & FSL_EDMA_DRV_TCD64) 515 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 516 + 527 517 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels); 528 518 for (i = 0; i < fsl_edma->n_chans; i++) { 529 519 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; ··· 549 533 offsetof(struct fsl_edma3_ch_reg, tcd) : 0; 550 534 fsl_chan->tcd = fsl_edma->membase 551 535 + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; 536 + fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; 552 537 553 538 fsl_chan->pdev = pdev; 554 539 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); 555 540 556 - edma_write_tcdreg(fsl_chan, 0, csr); 541 + edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); 557 542 fsl_edma_chan_mux(fsl_chan, 0, false); 558 543 } 559 544
+1 -1
drivers/dma/idxd/bus.c
··· 72 72 return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0); 73 73 } 74 74 75 - struct bus_type dsa_bus_type = { 75 + const struct bus_type dsa_bus_type = { 76 76 .name = "dsa", 77 77 .match = idxd_config_bus_match, 78 78 .probe = idxd_config_bus_probe,
+2 -2
drivers/dma/idxd/cdev.c
··· 152 152 mutex_unlock(&wq->wq_lock); 153 153 } 154 154 155 - static struct device_type idxd_cdev_file_type = { 155 + static const struct device_type idxd_cdev_file_type = { 156 156 .name = "idxd_file", 157 157 .release = idxd_file_dev_release, 158 158 .groups = cdev_file_attribute_groups, ··· 169 169 kfree(idxd_cdev); 170 170 } 171 171 172 - static struct device_type idxd_cdev_device_type = { 172 + static const struct device_type idxd_cdev_device_type = { 173 173 .name = "idxd_cdev", 174 174 .release = idxd_cdev_dev_release, 175 175 };
+7 -7
drivers/dma/idxd/idxd.h
··· 282 282 struct idxd_driver_data { 283 283 const char *name_prefix; 284 284 enum idxd_type type; 285 - struct device_type *dev_type; 285 + const struct device_type *dev_type; 286 286 int compl_size; 287 287 int align; 288 288 int evl_cr_off; ··· 515 515 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); 516 516 } 517 517 518 - extern struct bus_type dsa_bus_type; 518 + extern const struct bus_type dsa_bus_type; 519 519 520 520 extern bool support_enqcmd; 521 521 extern struct ida idxd_ida; 522 - extern struct device_type dsa_device_type; 523 - extern struct device_type iax_device_type; 524 - extern struct device_type idxd_wq_device_type; 525 - extern struct device_type idxd_engine_device_type; 526 - extern struct device_type idxd_group_device_type; 522 + extern const struct device_type dsa_device_type; 523 + extern const struct device_type iax_device_type; 524 + extern const struct device_type idxd_wq_device_type; 525 + extern const struct device_type idxd_engine_device_type; 526 + extern const struct device_type idxd_group_device_type; 527 527 528 528 static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) 529 529 {
+5 -5
drivers/dma/idxd/sysfs.c
··· 91 91 kfree(engine); 92 92 } 93 93 94 - struct device_type idxd_engine_device_type = { 94 + const struct device_type idxd_engine_device_type = { 95 95 .name = "engine", 96 96 .release = idxd_conf_engine_release, 97 97 .groups = idxd_engine_attribute_groups, ··· 577 577 kfree(group); 578 578 } 579 579 580 - struct device_type idxd_group_device_type = { 580 + const struct device_type idxd_group_device_type = { 581 581 .name = "group", 582 582 .release = idxd_conf_group_release, 583 583 .groups = idxd_group_attribute_groups, ··· 1369 1369 kfree(wq); 1370 1370 } 1371 1371 1372 - struct device_type idxd_wq_device_type = { 1372 + const struct device_type idxd_wq_device_type = { 1373 1373 .name = "wq", 1374 1374 .release = idxd_conf_wq_release, 1375 1375 .groups = idxd_wq_attribute_groups, ··· 1798 1798 kfree(idxd); 1799 1799 } 1800 1800 1801 - struct device_type dsa_device_type = { 1801 + const struct device_type dsa_device_type = { 1802 1802 .name = "dsa", 1803 1803 .release = idxd_conf_device_release, 1804 1804 .groups = idxd_attribute_groups, 1805 1805 }; 1806 1806 1807 - struct device_type iax_device_type = { 1807 + const struct device_type iax_device_type = { 1808 1808 .name = "iax", 1809 1809 .release = idxd_conf_device_release, 1810 1810 .groups = idxd_attribute_groups,
+1 -1
drivers/dma/mcf-edma-main.c
··· 202 202 vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); 203 203 mcf_chan->tcd = mcf_edma->membase + EDMA_TCD 204 204 + i * sizeof(struct fsl_edma_hw_tcd); 205 - iowrite32(0x0, &mcf_chan->tcd->csr); 205 + edma_write_tcdreg(mcf_chan, cpu_to_le32(0), csr); 206 206 } 207 207 208 208 iowrite32(~0, regs->inth);
+1 -1
drivers/dma/of-dma.c
··· 29 29 * to the DMA data stored is retuned. A NULL pointer is returned if no match is 30 30 * found. 31 31 */ 32 - static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) 32 + static struct of_dma *of_dma_find_controller(const struct of_phandle_args *dma_spec) 33 33 { 34 34 struct of_dma *ofdma; 35 35
+1
drivers/dma/pl330.c
··· 2588 2588 2589 2589 desc->status = PREP; 2590 2590 desc->txd.callback = NULL; 2591 + desc->txd.callback_result = NULL; 2591 2592 } 2592 2593 2593 2594 spin_unlock_irqrestore(lock, flags);
+73
drivers/dma/ti/k3-psil-j721s2.c
··· 57 57 }, \ 58 58 } 59 59 60 + #define PSIL_CSI2RX(x) \ 61 + { \ 62 + .thread_id = x, \ 63 + .ep_config = { \ 64 + .ep_type = PSIL_EP_NATIVE, \ 65 + }, \ 66 + } 67 + 60 68 /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ 61 69 static struct psil_ep j721s2_src_ep_map[] = { 62 70 /* PDMA_MCASP - McASP0-4 */ ··· 122 114 PSIL_PDMA_XY_PKT(0x4707), 123 115 PSIL_PDMA_XY_PKT(0x4708), 124 116 PSIL_PDMA_XY_PKT(0x4709), 117 + /* CSI2RX */ 118 + PSIL_CSI2RX(0x4940), 119 + PSIL_CSI2RX(0x4941), 120 + PSIL_CSI2RX(0x4942), 121 + PSIL_CSI2RX(0x4943), 122 + PSIL_CSI2RX(0x4944), 123 + PSIL_CSI2RX(0x4945), 124 + PSIL_CSI2RX(0x4946), 125 + PSIL_CSI2RX(0x4947), 126 + PSIL_CSI2RX(0x4948), 127 + PSIL_CSI2RX(0x4949), 128 + PSIL_CSI2RX(0x494a), 129 + PSIL_CSI2RX(0x494b), 130 + PSIL_CSI2RX(0x494c), 131 + PSIL_CSI2RX(0x494d), 132 + PSIL_CSI2RX(0x494e), 133 + PSIL_CSI2RX(0x494f), 134 + PSIL_CSI2RX(0x4950), 135 + PSIL_CSI2RX(0x4951), 136 + PSIL_CSI2RX(0x4952), 137 + PSIL_CSI2RX(0x4953), 138 + PSIL_CSI2RX(0x4954), 139 + PSIL_CSI2RX(0x4955), 140 + PSIL_CSI2RX(0x4956), 141 + PSIL_CSI2RX(0x4957), 142 + PSIL_CSI2RX(0x4958), 143 + PSIL_CSI2RX(0x4959), 144 + PSIL_CSI2RX(0x495a), 145 + PSIL_CSI2RX(0x495b), 146 + PSIL_CSI2RX(0x495c), 147 + PSIL_CSI2RX(0x495d), 148 + PSIL_CSI2RX(0x495e), 149 + PSIL_CSI2RX(0x495f), 150 + PSIL_CSI2RX(0x4960), 151 + PSIL_CSI2RX(0x4961), 152 + PSIL_CSI2RX(0x4962), 153 + PSIL_CSI2RX(0x4963), 154 + PSIL_CSI2RX(0x4964), 155 + PSIL_CSI2RX(0x4965), 156 + PSIL_CSI2RX(0x4966), 157 + PSIL_CSI2RX(0x4967), 158 + PSIL_CSI2RX(0x4968), 159 + PSIL_CSI2RX(0x4969), 160 + PSIL_CSI2RX(0x496a), 161 + PSIL_CSI2RX(0x496b), 162 + PSIL_CSI2RX(0x496c), 163 + PSIL_CSI2RX(0x496d), 164 + PSIL_CSI2RX(0x496e), 165 + PSIL_CSI2RX(0x496f), 166 + PSIL_CSI2RX(0x4970), 167 + PSIL_CSI2RX(0x4971), 168 + PSIL_CSI2RX(0x4972), 169 + PSIL_CSI2RX(0x4973), 170 + PSIL_CSI2RX(0x4974), 171 + PSIL_CSI2RX(0x4975), 172 + PSIL_CSI2RX(0x4976), 173 + PSIL_CSI2RX(0x4977), 174 + PSIL_CSI2RX(0x4978), 175 + PSIL_CSI2RX(0x4979), 176 + PSIL_CSI2RX(0x497a), 177 + PSIL_CSI2RX(0x497b), 178 + PSIL_CSI2RX(0x497c), 179 + PSIL_CSI2RX(0x497d), 180 + PSIL_CSI2RX(0x497e), 181 + PSIL_CSI2RX(0x497f), 125 182 /* MAIN SA2UL */ 126 183 PSIL_SA2UL(0x4a40, 0), 127 184 PSIL_SA2UL(0x4a41, 0),
+223 -91
drivers/dma/ti/k3-udma-glue.c
··· 111 111 return 0; 112 112 } 113 113 114 + static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id, 115 + bool tx_chn) 116 + { 117 + if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) 118 + return -EINVAL; 119 + 120 + if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) 121 + return -EINVAL; 122 + 123 + /* get psil endpoint config */ 124 + common->ep_config = psil_get_ep_config(thread_id); 125 + if (IS_ERR(common->ep_config)) { 126 + dev_err(common->dev, 127 + "No configuration for psi-l thread 0x%04x\n", 128 + thread_id); 129 + return PTR_ERR(common->ep_config); 130 + } 131 + 132 + common->epib = common->ep_config->needs_epib; 133 + common->psdata_size = common->ep_config->psd_size; 134 + 135 + if (tx_chn) 136 + common->dst_thread = thread_id; 137 + else 138 + common->src_thread = thread_id; 139 + 140 + return 0; 141 + } 142 + 114 143 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, 115 144 const char *name, struct k3_udma_glue_common *common, 116 145 bool tx_chn) ··· 182 153 common->atype_asel = dma_spec.args[1]; 183 154 } 184 155 185 - if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { 186 - ret = -EINVAL; 187 - goto out_put_spec; 188 - } 189 - 190 - if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { 191 - ret = -EINVAL; 192 - goto out_put_spec; 193 - } 194 - 195 - /* get psil endpoint config */ 196 - common->ep_config = psil_get_ep_config(thread_id); 197 - if (IS_ERR(common->ep_config)) { 198 - dev_err(common->dev, 199 - "No configuration for psi-l thread 0x%04x\n", 200 - thread_id); 201 - ret = PTR_ERR(common->ep_config); 202 - goto out_put_spec; 203 - } 204 - 205 - common->epib = common->ep_config->needs_epib; 206 - common->psdata_size = common->ep_config->psd_size; 207 - 208 - if (tx_chn) 209 - common->dst_thread = thread_id; 210 - else 211 - common->src_thread = thread_id; 156 + ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); 212 157 213 158 out_put_spec: 214 159 of_node_put(dma_spec.np); 215 160 return ret; 216 - }; 161 + } 162 + 163 + static int 164 + of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common, 165 + bool tx_chn, u32 thread_id) 166 + { 167 + int ret = 0; 168 + 169 + if (unlikely(!udmax_np)) 170 + return -EINVAL; 171 + 172 + ret = of_k3_udma_glue_parse(udmax_np, common); 173 + if (ret) 174 + goto out_put_spec; 175 + 176 + ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn); 177 + 178 + out_put_spec: 179 + of_node_put(udmax_np); 180 + return ret; 181 + } 217 182 218 183 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 219 184 { ··· 274 251 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); 275 252 } 276 253 277 - struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, 278 - const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) 254 + static int 255 + k3_udma_glue_request_tx_chn_common(struct device *dev, 256 + struct k3_udma_glue_tx_channel *tx_chn, 257 + struct k3_udma_glue_tx_channel_cfg *cfg) 279 258 { 280 - struct k3_udma_glue_tx_channel *tx_chn; 281 259 int ret; 282 - 283 - tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); 284 - if (!tx_chn) 285 - return ERR_PTR(-ENOMEM); 286 - 287 - tx_chn->common.dev = dev; 288 - tx_chn->common.swdata_size = cfg->swdata_size; 289 - tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; 290 - tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; 291 - tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; 292 - tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; 293 - 294 - /* parse of udmap channel */ 295 - ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 296 - &tx_chn->common, true); 297 - if (ret) 298 - goto err; 299 260 300 261 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, 301 262 tx_chn->common.psdata_size, ··· 296 289 if (IS_ERR(tx_chn->udma_tchanx)) { 297 290 ret = PTR_ERR(tx_chn->udma_tchanx); 298 291 dev_err(dev, "UDMAX tchanx get err %d\n", ret); 299 - goto err; 292 + return ret; 300 293 } 301 294 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); 302 295 ··· 309 302 dev_err(dev, "Channel Device registration failed %d\n", ret); 310 303 put_device(&tx_chn->common.chan_dev); 311 304 tx_chn->common.chan_dev.parent = NULL; 312 - goto err; 305 + return ret; 313 306 } 314 307 315 308 if (xudma_is_pktdma(tx_chn->common.udmax)) { ··· 333 326 &tx_chn->ringtxcq); 334 327 if (ret) { 335 328 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret); 336 - goto err; 329 + return ret; 337 330 } 338 331 339 332 /* Set the dma_dev for the rings to be configured */ ··· 349 342 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); 350 343 if (ret) { 351 344 dev_err(dev, "Failed to cfg ringtx %d\n", ret); 352 - goto err; 345 + return ret; 353 346 } 354 347 355 348 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); 356 349 if (ret) { 357 350 dev_err(dev, "Failed to cfg ringtx %d\n", ret); 358 - goto err; 351 + return ret; 359 352 } 360 353 361 354 /* request and cfg psi-l */ ··· 366 359 ret = k3_udma_glue_cfg_tx_chn(tx_chn); 367 360 if (ret) { 368 361 dev_err(dev, "Failed to cfg tchan %d\n", ret); 369 - goto err; 362 + return ret; 370 363 } 371 364 372 365 k3_udma_glue_dump_tx_chn(tx_chn); 366 + 367 + return 0; 368 + } 369 + 370 + struct k3_udma_glue_tx_channel * 371 + k3_udma_glue_request_tx_chn(struct device *dev, const char *name, 372 + struct k3_udma_glue_tx_channel_cfg *cfg) 373 + { 374 + struct k3_udma_glue_tx_channel *tx_chn; 375 + int ret; 376 + 377 + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); 378 + if (!tx_chn) 379 + return ERR_PTR(-ENOMEM); 380 + 381 + tx_chn->common.dev = dev; 382 + tx_chn->common.swdata_size = cfg->swdata_size; 383 + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; 384 + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; 385 + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; 386 + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; 387 + 388 + /* parse of udmap channel */ 389 + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, 390 + &tx_chn->common, true); 391 + if (ret) 392 + goto err; 393 + 394 + ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg); 395 + if (ret) 396 + goto err; 373 397 374 398 return tx_chn; 375 399 ··· 409 371 return ERR_PTR(ret); 410 372 } 411 373 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); 374 + 375 + struct k3_udma_glue_tx_channel * 376 + k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev, 377 + struct k3_udma_glue_tx_channel_cfg *cfg, 378 + struct device_node *udmax_np, u32 thread_id) 379 + { 380 + struct k3_udma_glue_tx_channel *tx_chn; 381 + int ret; 382 + 383 + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); 384 + if (!tx_chn) 385 + return ERR_PTR(-ENOMEM); 386 + 387 + tx_chn->common.dev = dev; 388 + tx_chn->common.swdata_size = cfg->swdata_size; 389 + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; 390 + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; 391 + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; 392 + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; 393 + 394 + ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id); 395 + if (ret) 396 + goto err; 397 + 398 + ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg); 399 + if (ret) 400 + goto err; 401 + 402 + return tx_chn; 403 + 404 + err: 405 + k3_udma_glue_release_tx_chn(tx_chn); 406 + return ERR_PTR(ret); 407 + } 408 + EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id); 412 409 413 410 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) 414 411 { ··· 1073 1000 return ERR_PTR(ret); 1074 1001 } 1075 1002 1003 + static int 1004 + k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn, 1005 + struct k3_udma_glue_rx_channel_cfg *cfg, 1006 + struct device *dev) 1007 + { 1008 + int ret, i; 1009 + 1010 + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, 1011 + rx_chn->common.psdata_size, 1012 + rx_chn->common.swdata_size); 1013 + 1014 + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, 1015 + sizeof(*rx_chn->flows), GFP_KERNEL); 1016 + if (!rx_chn->flows) 1017 + return -ENOMEM; 1018 + 1019 + rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; 1020 + rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); 1021 + dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x", 1022 + rx_chn->common.src_thread, rx_chn->flow_id_base); 1023 + ret = device_register(&rx_chn->common.chan_dev); 1024 + if (ret) { 1025 + dev_err(dev, "Channel Device registration failed %d\n", ret); 1026 + put_device(&rx_chn->common.chan_dev); 1027 + rx_chn->common.chan_dev.parent = NULL; 1028 + return ret; 1029 + } 1030 + 1031 + if (xudma_is_pktdma(rx_chn->common.udmax)) { 1032 + /* prepare the channel device as coherent */ 1033 + rx_chn->common.chan_dev.dma_coherent = true; 1034 + dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, 1035 + DMA_BIT_MASK(48)); 1036 + } 1037 + 1038 + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); 1039 + if (ret) 1040 + return ret; 1041 + 1042 + for (i = 0; i < rx_chn->flow_num; i++) 1043 + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; 1044 + 1045 + k3_udma_glue_dump_rx_chn(rx_chn); 1046 + 1047 + return 0; 1048 + } 1049 + 1076 1050 static struct k3_udma_glue_rx_channel * 1077 1051 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, 1078 1052 struct k3_udma_glue_rx_channel_cfg *cfg) 1079 1053 { 1080 1054 struct k3_udma_glue_rx_channel *rx_chn; 1081 - int ret, i; 1055 + int ret; 1082 1056 1083 1057 if (cfg->flow_id_num <= 0 || 1084 1058 cfg->flow_id_use_rxchan_id || ··· 1156 1036 if (ret) 1157 1037 goto err; 1158 1038 1159 - rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, 1160 - rx_chn->common.psdata_size, 1161 - rx_chn->common.swdata_size); 1162 - 1163 - rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, 1164 - sizeof(*rx_chn->flows), GFP_KERNEL); 1165 - if (!rx_chn->flows) { 1166 - ret = -ENOMEM; 1167 - goto err; 1168 - } 1169 - 1170 - rx_chn->common.chan_dev.class = &k3_udma_glue_devclass; 1171 - rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax); 1172 - dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x", 1173 - rx_chn->common.src_thread); 1174 - ret = device_register(&rx_chn->common.chan_dev); 1175 - if (ret) { 1176 - dev_err(dev, "Channel Device registration failed %d\n", ret); 1177 - put_device(&rx_chn->common.chan_dev); 1178 - rx_chn->common.chan_dev.parent = NULL; 1179 - goto err; 1180 - } 1181 - 1182 - if (xudma_is_pktdma(rx_chn->common.udmax)) { 1183 - /* prepare the channel device as coherent */ 1184 - rx_chn->common.chan_dev.dma_coherent = true; 1185 - dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, 1186 - DMA_BIT_MASK(48)); 1187 - } 1188 - 1189 - ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); 1039 + ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev); 1190 1040 if (ret) 1191 1041 goto err; 1192 - 1193 - for (i = 0; i < rx_chn->flow_num; i++) 1194 - rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; 1195 - 1196 - k3_udma_glue_dump_rx_chn(rx_chn); 1197 1042 1198 1043 return rx_chn; 1199 1044 ··· 1166 1081 k3_udma_glue_release_rx_chn(rx_chn); 1167 1082 return ERR_PTR(ret); 1168 1083 } 1084 + 1085 + struct k3_udma_glue_rx_channel * 1086 + k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev, 1087 + struct k3_udma_glue_rx_channel_cfg *cfg, 1088 + struct device_node *udmax_np, u32 thread_id) 1089 + { 1090 + struct k3_udma_glue_rx_channel *rx_chn; 1091 + int ret; 1092 + 1093 + if (cfg->flow_id_num <= 0 || 1094 + cfg->flow_id_use_rxchan_id || 1095 + cfg->def_flow_cfg || 1096 + cfg->flow_id_base < 0) 1097 + return ERR_PTR(-EINVAL); 1098 + 1099 + /* 1100 + * Remote RX channel is under control of Remote CPU core, so 1101 + * Linux can only request and manipulate by dedicated RX flows 1102 + */ 1103 + 1104 + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); 1105 + if (!rx_chn) 1106 + return ERR_PTR(-ENOMEM); 1107 + 1108 + rx_chn->common.dev = dev; 1109 + rx_chn->common.swdata_size = cfg->swdata_size; 1110 + rx_chn->remote = true; 1111 + rx_chn->udma_rchan_id = -1; 1112 + rx_chn->flow_num = cfg->flow_id_num; 1113 + rx_chn->flow_id_base = cfg->flow_id_base; 1114 + rx_chn->psil_paired = false; 1115 + 1116 + ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id); 1117 + if (ret) 1118 + goto err; 1119 + 1120 + ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev); 1121 + if (ret) 1122 + goto err; 1123 + 1124 + return rx_chn; 1125 + 1126 + err: 1127 + k3_udma_glue_release_rx_chn(rx_chn); 1128 + return ERR_PTR(ret); 1129 + } 1130 + EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id); 1169 1131 1170 1132 struct k3_udma_glue_rx_channel * 1171 1133 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
+6
drivers/dma/xilinx/xilinx_dma.c
··· 112 112 113 113 /* Register Direct Mode Registers */ 114 114 #define XILINX_DMA_REG_VSIZE 0x0000 115 + #define XILINX_DMA_VSIZE_MASK GENMASK(12, 0) 115 116 #define XILINX_DMA_REG_HSIZE 0x0004 117 + #define XILINX_DMA_HSIZE_MASK GENMASK(15, 0) 116 118 117 119 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 118 120 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 ··· 2050 2048 return NULL; 2051 2049 2052 2050 if (!xt->numf || !xt->sgl[0].size) 2051 + return NULL; 2052 + 2053 + if (xt->numf & ~XILINX_DMA_VSIZE_MASK || 2054 + xt->sgl[0].size & ~XILINX_DMA_HSIZE_MASK) 2053 2055 return NULL; 2054 2056 2055 2057 if (xt->frame_size != 1)
+10
include/linux/dma/k3-udma-glue.h
··· 26 26 struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, 27 27 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg); 28 28 29 + struct k3_udma_glue_tx_channel * 30 + k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev, 31 + struct k3_udma_glue_tx_channel_cfg *cfg, 32 + struct device_node *udmax_np, u32 thread_id); 33 + 29 34 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn); 30 35 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, 31 36 struct cppi5_host_desc_t *desc_tx, ··· 113 108 struct device *dev, 114 109 const char *name, 115 110 struct k3_udma_glue_rx_channel_cfg *cfg); 111 + 112 + struct k3_udma_glue_rx_channel * 113 + k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev, 114 + struct k3_udma_glue_rx_channel_cfg *cfg, 115 + struct device_node *udmax_np, u32 thread_id); 116 116 117 117 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn); 118 118 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);