Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

- Add support in dmaengine core to do device node checks for DT devices
and update bunch of drivers to use that and remove open coding from
drivers

- New driver/driver support for new hardware, namely:
- MediaTek UART APDMA
- Freescale i.mx7ulp edma2
- Synopsys eDMA IP core version 0
- Allwinner H6 DMA

- Updates to axi-dma and support for interleaved cyclic transfers

- Greg's debugfs return value check removals on drivers

- Updates to stm32-dma, hsu, dw, pl330, tegra drivers

* tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits)
dmaengine: Revert "dmaengine: fsl-edma: add i.mx7ulp edma2 version support"
dmaengine: at_xdmac: check for non-empty xfers_list before invoking callback
Documentation: dmaengine: clean up description of dmatest usage
dmaengine: tegra210-adma: remove PM_CLK dependency
dmaengine: fsl-edma: add i.mx7ulp edma2 version support
dt-bindings: dma: fsl-edma: add new i.mx7ulp-edma
dmaengine: fsl-edma-common: version check for v2 instead
dmaengine: fsl-edma-common: move dmamux register to another single function
dmaengine: fsl-edma: add drvdata for fsl-edma
dmaengine: Revert "dmaengine: fsl-edma: support little endian for edma driver"
dmaengine: rcar-dmac: Reject zero-length slave DMA requests
dmaengine: dw: Enable iDMA 32-bit on Intel Elkhart Lake
dmaengine: dw-edma: fix semicolon.cocci warnings
dmaengine: sh: usb-dmac: Use [] to denote a flexible array member
dmaengine: dmatest: timeout value of -1 should specify infinite wait
dmaengine: dw: Distinguish ->remove() between DW and iDMA 32-bit
dmaengine: fsl-edma: support little endian for edma driver
dmaengine: hsu: Revert "set HSU_CH_MTSR to memory width"
dmagengine: pl330: add code to get reset property
dt-bindings: pl330: document the optional resets property
...

+3667 -794
-33
Documentation/devicetree/bindings/dma/8250_mtk_dma.txt
··· 1 - * Mediatek UART APDMA Controller 2 - 3 - Required properties: 4 - - compatible should contain: 5 - * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA 6 - * "mediatek,mt6577-uart-dma" for MT6577 and all of the above 7 - 8 - - reg: The base address of the APDMA register bank. 9 - 10 - - interrupts: A single interrupt specifier. 11 - 12 - - clocks : Must contain an entry for each entry in clock-names. 13 - See ../clocks/clock-bindings.txt for details. 14 - - clock-names: The APDMA clock for register accesses 15 - 16 - Examples: 17 - 18 - apdma: dma-controller@11000380 { 19 - compatible = "mediatek,mt2712-uart-dma"; 20 - reg = <0 0x11000380 0 0x400>; 21 - interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_LOW>, 22 - <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>, 23 - <GIC_SPI 65 IRQ_TYPE_LEVEL_LOW>, 24 - <GIC_SPI 66 IRQ_TYPE_LEVEL_LOW>, 25 - <GIC_SPI 67 IRQ_TYPE_LEVEL_LOW>, 26 - <GIC_SPI 68 IRQ_TYPE_LEVEL_LOW>, 27 - <GIC_SPI 69 IRQ_TYPE_LEVEL_LOW>, 28 - <GIC_SPI 70 IRQ_TYPE_LEVEL_LOW>; 29 - clocks = <&pericfg CLK_PERI_AP_DMA>; 30 - clock-names = "apdma"; 31 - #dma-cells = <1>; 32 - }; 33 -
+3
Documentation/devicetree/bindings/dma/arm-pl330.txt
··· 16 16 - dma-channels: contains the total number of DMA channels supported by the DMAC 17 17 - dma-requests: contains the total number of DMA requests supported by the DMAC 18 18 - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP 19 + - resets: contains an entry for each entry in reset-names. 20 + See ../reset/reset.txt for details. 21 + - reset-names: must contain at least "dma", and optional is "dma-ocp". 19 22 20 23 Example: 21 24
+39 -5
Documentation/devicetree/bindings/dma/fsl-edma.txt
··· 9 9 Required properties: 10 10 - compatible : 11 11 - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC 12 + - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp 12 13 - reg : Specifies base physical address(s) and size of the eDMA registers. 13 14 The 1st region is eDMA control register's address and size. 14 15 The 2nd and the 3rd regions are programmable channel multiplexing 15 16 control register's address and size. 16 17 - interrupts : A list of interrupt-specifiers, one for each entry in 17 - interrupt-names. 18 - - interrupt-names : Should contain: 19 - "edma-tx" - the transmission interrupt 20 - "edma-err" - the error interrupt 18 + interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel 19 + per transmission interrupt, total 16 channel interrupt and 1 20 + error interrupt(located in the last), no interrupt-names list on 21 + i.mx7ulp for clean on dts. 21 22 - #dma-cells : Must be <2>. 22 23 The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1). 23 24 Specific request source can only be multiplexed by specific channels ··· 29 28 - clock-names : A list of channel group clock names. Should contain: 30 29 "dmamux0" - clock name of mux0 group 31 30 "dmamux1" - clock name of mux1 group 31 + Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp. 32 32 - clocks : A list of phandle and clock-specifier pairs, one for each entry in 33 33 clock-names. 34 34 ··· 37 35 - big-endian: If present registers and hardware scatter/gather descriptors 38 36 of the eDMA are implemented in big endian mode, otherwise in little 39 37 mode. 38 + - interrupt-names : Should contain the below on vf610 similar SoC but not used 39 + on i.mx7ulp similar SoC: 40 + "edma-tx" - the transmission interrupt 41 + "edma-err" - the error interrupt 40 42 41 43 42 44 Examples: ··· 58 52 clock-names = "dmamux0", "dmamux1"; 59 53 clocks = <&clks VF610_CLK_DMAMUX0>, 60 54 <&clks VF610_CLK_DMAMUX1>; 61 - }; 55 + }; /* vf610 */ 62 56 57 + edma1: dma-controller@40080000 { 58 + #dma-cells = <2>; 59 + compatible = "fsl,imx7ulp-edma"; 60 + reg = <0x40080000 0x2000>, 61 + <0x40210000 0x1000>; 62 + dma-channels = <32>; 63 + interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, 64 + <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, 65 + <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, 66 + <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, 67 + <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 68 + <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, 69 + <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, 70 + <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, 71 + <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, 72 + <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 73 + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, 74 + <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, 75 + <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, 76 + <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, 77 + <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 78 + <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, 79 + /* last is eDMA2-ERR interrupt */ 80 + <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; 81 + clock-names = "dma", "dmamux0"; 82 + clocks = <&pcc2 IMX7ULP_CLK_DMA1>, 83 + <&pcc2 IMX7ULP_CLK_DMA_MUX1>; 84 + }; /* i.mx7ulp */ 63 85 64 86 * DMA clients 65 87 DMA client drivers that uses the DMA function must use the format described
+54
Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt
··· 1 + * Mediatek UART APDMA Controller 2 + 3 + Required properties: 4 + - compatible should contain: 5 + * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA 6 + * "mediatek,mt6577-uart-dma" for MT6577 and all of the above 7 + 8 + - reg: The base address of the APDMA register bank. 9 + 10 + - interrupts: A single interrupt specifier. 11 + One interrupt per dma-requests, or 8 if no dma-requests property is present 12 + 13 + - dma-requests: The number of DMA channels 14 + 15 + - clocks : Must contain an entry for each entry in clock-names. 16 + See ../clocks/clock-bindings.txt for details. 17 + - clock-names: The APDMA clock for register accesses 18 + 19 + - mediatek,dma-33bits: Present if the DMA requires support 20 + 21 + Examples: 22 + 23 + apdma: dma-controller@11000400 { 24 + compatible = "mediatek,mt2712-uart-dma"; 25 + reg = <0 0x11000400 0 0x80>, 26 + <0 0x11000480 0 0x80>, 27 + <0 0x11000500 0 0x80>, 28 + <0 0x11000580 0 0x80>, 29 + <0 0x11000600 0 0x80>, 30 + <0 0x11000680 0 0x80>, 31 + <0 0x11000700 0 0x80>, 32 + <0 0x11000780 0 0x80>, 33 + <0 0x11000800 0 0x80>, 34 + <0 0x11000880 0 0x80>, 35 + <0 0x11000900 0 0x80>, 36 + <0 0x11000980 0 0x80>; 37 + interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>, 38 + <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>, 39 + <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>, 40 + <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>, 41 + <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>, 42 + <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>, 43 + <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>, 44 + <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>, 45 + <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>, 46 + <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>, 47 + <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>, 48 + <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>; 49 + dma-requests = <12>; 50 + clocks = <&pericfg CLK_PERI_AP_DMA>; 51 + clock-names = "apdma"; 52 + mediatek,dma-33bits; 53 + #dma-cells = <1>; 54 + };
+7 -2
Documentation/devicetree/bindings/dma/sun6i-dma.txt
··· 28 28 }; 29 29 30 30 ------------------------------------------------------------------------------ 31 - For A64 DMA controller: 31 + For A64 and H6 DMA controller: 32 32 33 33 Required properties: 34 - - compatible: "allwinner,sun50i-a64-dma" 34 + - compatible: Must be one of 35 + "allwinner,sun50i-a64-dma" 36 + "allwinner,sun50i-h6-dma" 35 37 - dma-channels: Number of DMA channels supported by the controller. 36 38 Refer to Documentation/devicetree/bindings/dma/dma.txt 39 + - clocks: In addition to parent AHB clock, it should also contain mbus 40 + clock (H6 only) 41 + - clock-names: Should contain "bus" and "mbus" (H6 only) 37 42 - all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells 38 43 39 44 Optional properties:
+13 -8
Documentation/driver-api/dmaengine/dmatest.rst
··· 44 44 45 45 dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1 46 46 47 - Example of multi-channel test usage: 47 + Example of multi-channel test usage (new in the 5.0 kernel):: 48 + 48 49 % modprobe dmatest 49 50 % echo 2000 > /sys/module/dmatest/parameters/timeout 50 51 % echo 1 > /sys/module/dmatest/parameters/iterations ··· 54 53 % echo dma0chan2 > /sys/module/dmatest/parameters/channel 55 54 % echo 1 > /sys/module/dmatest/parameters/run 56 55 57 - Note: the channel parameter should always be the last parameter set prior to 58 - running the test (setting run=1), this is because upon setting the channel 59 - parameter, that specific channel is requested using the dmaengine and a thread 60 - is created with the existing parameters. This thread is set as pending 61 - and will be executed once run is set to 1. Any parameters set after the thread 62 - is created are not applied. 56 + .. note:: 57 + For all tests, starting in the 5.0 kernel, either single- or multi-channel, 58 + the channel parameter(s) must be set after all other parameters. It is at 59 + that time that the existing parameter values are acquired for use by the 60 + thread(s). All other parameters are shared. Therefore, if changes are made 61 + to any of the other parameters, and an additional channel specified, the 62 + (shared) parameters used for all threads will use the new values. 63 + After the channels are specified, each thread is set as pending. All threads 64 + begin execution when the run parameter is set to 1. 63 65 64 66 .. hint:: 65 - available channel list could be extracted by running the following command:: 67 + A list of available channels can be found by running the following command:: 66 68 67 69 % ls -1 /sys/class/dma/ 68 70 ··· 208 204 Channels can be freed by setting run to 0. 209 205 210 206 Example:: 207 + 211 208 % echo dma0chan1 > /sys/module/dmatest/parameters/channel 212 209 dmatest: Added 1 threads using dma0chan1 213 210 % cat /sys/class/dma/dma0chan1/in_use
+7
MAINTAINERS
··· 4683 4683 S: Supported 4684 4684 F: drivers/mtd/nand/raw/denali* 4685 4685 4686 + DESIGNWARE EDMA CORE IP DRIVER 4687 + M: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 4688 + L: dmaengine@vger.kernel.org 4689 + S: Maintained 4690 + F: drivers/dma/dw-edma/ 4691 + F: include/linux/dma/edma.h 4692 + 4686 4693 DESIGNWARE USB2 DRD IP DRIVER 4687 4694 M: Minas Harutyunyan <hminas@synopsys.com> 4688 4695 L: linux-usb@vger.kernel.org
+4 -1
drivers/dma/Kconfig
··· 103 103 depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST 104 104 select DMA_ENGINE 105 105 select DMA_VIRTUAL_CHANNELS 106 + select REGMAP_MMIO 106 107 help 107 108 Enable support for the Analog Devices AXI-DMAC peripheral. This DMA 108 109 controller is often used in Analog Device's reference designs for FPGA ··· 585 584 586 585 config TEGRA210_ADMA 587 586 tristate "NVIDIA Tegra210 ADMA support" 588 - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK 587 + depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) 589 588 select DMA_ENGINE 590 589 select DMA_VIRTUAL_CHANNELS 591 590 help ··· 666 665 source "drivers/dma/qcom/Kconfig" 667 666 668 667 source "drivers/dma/dw/Kconfig" 668 + 669 + source "drivers/dma/dw-edma/Kconfig" 669 670 670 671 source "drivers/dma/hsu/Kconfig" 671 672
+1
drivers/dma/Makefile
··· 29 29 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o 30 30 obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ 31 31 obj-$(CONFIG_DW_DMAC_CORE) += dw/ 32 + obj-$(CONFIG_DW_EDMA) += dw-edma/ 32 33 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 33 34 obj-$(CONFIG_FSL_DMA) += fsldma.o 34 35 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
+2 -3
drivers/dma/amba-pl08x.c
··· 2508 2508 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 2509 2509 { 2510 2510 /* Expose a simple debugfs interface to view all clocks */ 2511 - (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 2512 - S_IFREG | S_IRUGO, NULL, pl08x, 2513 - &pl08x_debugfs_fops); 2511 + debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 2512 + NULL, pl08x, &pl08x_debugfs_fops); 2514 2513 } 2515 2514 2516 2515 #else
+7 -4
drivers/dma/at_xdmac.c
··· 1568 1568 struct at_xdmac_desc *desc; 1569 1569 struct dma_async_tx_descriptor *txd; 1570 1570 1571 - desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); 1572 - txd = &desc->tx_dma_desc; 1571 + if (!list_empty(&atchan->xfers_list)) { 1572 + desc = list_first_entry(&atchan->xfers_list, 1573 + struct at_xdmac_desc, xfer_node); 1574 + txd = &desc->tx_dma_desc; 1573 1575 1574 - if (txd->flags & DMA_PREP_INTERRUPT) 1575 - dmaengine_desc_get_callback_invoke(txd, NULL); 1576 + if (txd->flags & DMA_PREP_INTERRUPT) 1577 + dmaengine_desc_get_callback_invoke(txd, NULL); 1578 + } 1576 1579 } 1577 1580 1578 1581 static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
+3 -10
drivers/dma/bcm-sba-raid.c
··· 164 164 struct list_head reqs_free_list; 165 165 /* DebugFS directory entries */ 166 166 struct dentry *root; 167 - struct dentry *stats; 168 167 }; 169 168 170 169 /* ====== Command helper routines ===== */ ··· 1715 1716 1716 1717 /* Create debugfs root entry */ 1717 1718 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); 1718 - if (IS_ERR_OR_NULL(sba->root)) { 1719 - dev_err(sba->dev, "failed to create debugfs root entry\n"); 1720 - sba->root = NULL; 1721 - goto skip_debugfs; 1722 - } 1723 1719 1724 1720 /* Create debugfs stats entry */ 1725 - sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, 1726 - sba_debugfs_stats_show); 1727 - if (IS_ERR_OR_NULL(sba->stats)) 1728 - dev_err(sba->dev, "failed to create debugfs stats file\n"); 1721 + debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, 1722 + sba_debugfs_stats_show); 1723 + 1729 1724 skip_debugfs: 1730 1725 1731 1726 /* Register DMA device with Linux async framework */
+2 -4
drivers/dma/coh901318.c
··· 1378 1378 1379 1379 dma_dentry = debugfs_create_dir("dma", NULL); 1380 1380 1381 - (void) debugfs_create_file("status", 1382 - S_IFREG | S_IRUGO, 1383 - dma_dentry, NULL, 1384 - &coh901318_debugfs_status_operations); 1381 + debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL, 1382 + &coh901318_debugfs_status_operations); 1385 1383 return 0; 1386 1384 } 1387 1385
+191 -10
drivers/dma/dma-axi-dmac.c
··· 2 2 /* 3 3 * Driver for the Analog Devices AXI-DMAC core 4 4 * 5 - * Copyright 2013-2015 Analog Devices Inc. 5 + * Copyright 2013-2019 Analog Devices Inc. 6 6 * Author: Lars-Peter Clausen <lars@metafoo.de> 7 7 */ 8 8 ··· 18 18 #include <linux/of.h> 19 19 #include <linux/of_dma.h> 20 20 #include <linux/platform_device.h> 21 + #include <linux/regmap.h> 21 22 #include <linux/slab.h> 23 + #include <linux/fpga/adi-axi-common.h> 22 24 23 25 #include <dt-bindings/dma/axi-dmac.h> 24 26 ··· 64 62 #define AXI_DMAC_REG_STATUS 0x430 65 63 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 66 64 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 65 + #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c 66 + #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 67 67 68 68 #define AXI_DMAC_CTRL_ENABLE BIT(0) 69 69 #define AXI_DMAC_CTRL_PAUSE BIT(1) ··· 74 70 #define AXI_DMAC_IRQ_EOT BIT(1) 75 71 76 72 #define AXI_DMAC_FLAG_CYCLIC BIT(0) 73 + #define AXI_DMAC_FLAG_LAST BIT(1) 74 + #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) 75 + 76 + #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) 77 77 78 78 /* The maximum ID allocated by the hardware is 31 */ 79 79 #define AXI_DMAC_SG_UNUSED 32U ··· 90 82 unsigned int dest_stride; 91 83 unsigned int src_stride; 92 84 unsigned int id; 85 + unsigned int partial_len; 93 86 bool schedule_when_free; 94 87 }; 95 88 96 89 struct axi_dmac_desc { 97 90 struct virt_dma_desc vdesc; 98 91 bool cyclic; 92 + bool have_partial_xfer; 99 93 100 94 unsigned int num_submitted; 101 95 unsigned int num_completed; ··· 118 108 unsigned int dest_type; 119 109 120 110 unsigned int max_length; 121 - unsigned int align_mask; 111 + unsigned int address_align_mask; 112 + unsigned int length_align_mask; 122 113 114 + bool hw_partial_xfer; 123 115 bool hw_cyclic; 124 116 bool hw_2d; 125 117 }; ··· 179 167 { 180 168 if (len == 0) 181 169 return false; 182 - if ((len & chan->align_mask) != 0) /* Not aligned */ 170 + if ((len & chan->length_align_mask) != 0) /* Not aligned */ 183 171 return false; 184 172 return true; 185 173 } 186 174 187 175 static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) 188 176 { 189 - if ((addr & chan->align_mask) != 0) /* Not aligned */ 177 + if ((addr & chan->address_align_mask) != 0) /* Not aligned */ 190 178 return false; 191 179 return true; 192 180 } ··· 222 210 } 223 211 224 212 desc->num_submitted++; 225 - if (desc->num_submitted == desc->num_sgs) { 213 + if (desc->num_submitted == desc->num_sgs || 214 + desc->have_partial_xfer) { 226 215 if (desc->cyclic) 227 216 desc->num_submitted = 0; /* Start again */ 228 217 else 229 218 chan->next_desc = NULL; 219 + flags |= AXI_DMAC_FLAG_LAST; 230 220 } else { 231 221 chan->next_desc = desc; 232 222 } ··· 254 240 desc->num_sgs == 1) 255 241 flags |= AXI_DMAC_FLAG_CYCLIC; 256 242 243 + if (chan->hw_partial_xfer) 244 + flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; 245 + 257 246 axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); 258 247 axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); 259 248 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); ··· 269 252 struct axi_dmac_desc, vdesc.node); 270 253 } 271 254 255 + static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, 256 + struct axi_dmac_sg *sg) 257 + { 258 + if (chan->hw_2d) 259 + return sg->x_len * sg->y_len; 260 + else 261 + return sg->x_len; 262 + } 263 + 264 + static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) 265 + { 266 + struct axi_dmac *dmac = chan_to_axi_dmac(chan); 267 + struct axi_dmac_desc *desc; 268 + struct axi_dmac_sg *sg; 269 + u32 xfer_done, len, id, i; 270 + bool found_sg; 271 + 272 + do { 273 + len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); 274 + id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); 275 + 276 + found_sg = false; 277 + list_for_each_entry(desc, &chan->active_descs, vdesc.node) { 278 + for (i = 0; i < desc->num_sgs; i++) { 279 + sg = &desc->sg[i]; 280 + if (sg->id == AXI_DMAC_SG_UNUSED) 281 + continue; 282 + if (sg->id == id) { 283 + desc->have_partial_xfer = true; 284 + sg->partial_len = len; 285 + found_sg = true; 286 + break; 287 + } 288 + } 289 + if (found_sg) 290 + break; 291 + } 292 + 293 + if (found_sg) { 294 + dev_dbg(dmac->dma_dev.dev, 295 + "Found partial segment id=%u, len=%u\n", 296 + id, len); 297 + } else { 298 + dev_warn(dmac->dma_dev.dev, 299 + "Not found partial segment id=%u, len=%u\n", 300 + id, len); 301 + } 302 + 303 + /* Check if we have any more partial transfers */ 304 + xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); 305 + xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); 306 + 307 + } while (!xfer_done); 308 + } 309 + 310 + static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, 311 + struct axi_dmac_desc *active) 312 + { 313 + struct dmaengine_result *rslt = &active->vdesc.tx_result; 314 + unsigned int start = active->num_completed - 1; 315 + struct axi_dmac_sg *sg; 316 + unsigned int i, total; 317 + 318 + rslt->result = DMA_TRANS_NOERROR; 319 + rslt->residue = 0; 320 + 321 + /* 322 + * We get here if the last completed segment is partial, which 323 + * means we can compute the residue from that segment onwards 324 + */ 325 + for (i = start; i < active->num_sgs; i++) { 326 + sg = &active->sg[i]; 327 + total = axi_dmac_total_sg_bytes(chan, sg); 328 + rslt->residue += (total - sg->partial_len); 329 + } 330 + } 331 + 272 332 static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, 273 333 unsigned int completed_transfers) 274 334 { ··· 356 262 active = axi_dmac_active_desc(chan); 357 263 if (!active) 358 264 return false; 265 + 266 + if (chan->hw_partial_xfer && 267 + (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) 268 + axi_dmac_dequeue_partial_xfers(chan); 359 269 360 270 do { 361 271 sg = &active->sg[active->num_completed]; ··· 374 276 start_next = true; 375 277 } 376 278 279 + if (sg->partial_len) 280 + axi_dmac_compute_residue(chan, active); 281 + 377 282 if (active->cyclic) 378 283 vchan_cyclic_callback(&active->vdesc); 379 284 380 - if (active->num_completed == active->num_sgs) { 285 + if (active->num_completed == active->num_sgs || 286 + sg->partial_len) { 381 287 if (active->cyclic) { 382 288 active->num_completed = 0; /* wrap around */ 383 289 } else { ··· 493 391 num_segments = DIV_ROUND_UP(period_len, chan->max_length); 494 392 segment_size = DIV_ROUND_UP(period_len, num_segments); 495 393 /* Take care of alignment */ 496 - segment_size = ((segment_size - 1) | chan->align_mask) + 1; 394 + segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; 497 395 498 396 for (i = 0; i < num_periods; i++) { 499 397 len = period_len; ··· 663 561 desc->sg[0].y_len = 1; 664 562 } 665 563 564 + if (flags & DMA_CYCLIC) 565 + desc->cyclic = true; 566 + 666 567 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); 667 568 } 668 569 ··· 678 573 { 679 574 kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); 680 575 } 576 + 577 + static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) 578 + { 579 + switch (reg) { 580 + case AXI_DMAC_REG_IRQ_MASK: 581 + case AXI_DMAC_REG_IRQ_SOURCE: 582 + case AXI_DMAC_REG_IRQ_PENDING: 583 + case AXI_DMAC_REG_CTRL: 584 + case AXI_DMAC_REG_TRANSFER_ID: 585 + case AXI_DMAC_REG_START_TRANSFER: 586 + case AXI_DMAC_REG_FLAGS: 587 + case AXI_DMAC_REG_DEST_ADDRESS: 588 + case AXI_DMAC_REG_SRC_ADDRESS: 589 + case AXI_DMAC_REG_X_LENGTH: 590 + case AXI_DMAC_REG_Y_LENGTH: 591 + case AXI_DMAC_REG_DEST_STRIDE: 592 + case AXI_DMAC_REG_SRC_STRIDE: 593 + case AXI_DMAC_REG_TRANSFER_DONE: 594 + case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: 595 + case AXI_DMAC_REG_STATUS: 596 + case AXI_DMAC_REG_CURRENT_SRC_ADDR: 597 + case AXI_DMAC_REG_CURRENT_DEST_ADDR: 598 + case AXI_DMAC_REG_PARTIAL_XFER_LEN: 599 + case AXI_DMAC_REG_PARTIAL_XFER_ID: 600 + return true; 601 + default: 602 + return false; 603 + } 604 + } 605 + 606 + static const struct regmap_config axi_dmac_regmap_config = { 607 + .reg_bits = 32, 608 + .val_bits = 32, 609 + .reg_stride = 4, 610 + .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, 611 + .readable_reg = axi_dmac_regmap_rdwr, 612 + .writeable_reg = axi_dmac_regmap_rdwr, 613 + }; 681 614 682 615 /* 683 616 * The configuration stored in the devicetree matches the configuration ··· 760 617 return ret; 761 618 chan->dest_width = val / 8; 762 619 763 - chan->align_mask = max(chan->dest_width, chan->src_width) - 1; 620 + chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; 764 621 765 622 if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) 766 623 chan->direction = DMA_MEM_TO_MEM; ··· 774 631 return 0; 775 632 } 776 633 777 - static void axi_dmac_detect_caps(struct axi_dmac *dmac) 634 + static int axi_dmac_detect_caps(struct axi_dmac *dmac) 778 635 { 779 636 struct axi_dmac_chan *chan = &dmac->chan; 637 + unsigned int version; 638 + 639 + version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); 780 640 781 641 axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); 782 642 if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) ··· 793 647 chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); 794 648 if (chan->max_length != UINT_MAX) 795 649 chan->max_length++; 650 + 651 + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); 652 + if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && 653 + chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { 654 + dev_err(dmac->dma_dev.dev, 655 + "Destination memory-mapped interface not supported."); 656 + return -ENODEV; 657 + } 658 + 659 + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); 660 + if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && 661 + chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { 662 + dev_err(dmac->dma_dev.dev, 663 + "Source memory-mapped interface not supported."); 664 + return -ENODEV; 665 + } 666 + 667 + if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) 668 + chan->hw_partial_xfer = true; 669 + 670 + if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { 671 + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); 672 + chan->length_align_mask = 673 + axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); 674 + } else { 675 + chan->length_align_mask = chan->address_align_mask; 676 + } 677 + 678 + return 0; 796 679 } 797 680 798 681 static int axi_dmac_probe(struct platform_device *pdev) ··· 897 722 if (ret < 0) 898 723 return ret; 899 724 900 - axi_dmac_detect_caps(dmac); 725 + ret = axi_dmac_detect_caps(dmac); 726 + if (ret) 727 + goto err_clk_disable; 728 + 729 + dma_dev->copy_align = (dmac->chan.address_align_mask + 1); 901 730 902 731 axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); 903 732 ··· 920 741 goto err_unregister_of; 921 742 922 743 platform_set_drvdata(pdev, dmac); 744 + 745 + devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); 923 746 924 747 return 0; 925 748
+2 -5
drivers/dma/dma-jz4780.c
··· 156 156 }; 157 157 158 158 struct jz4780_dma_filter_data { 159 - struct device_node *of_node; 160 159 uint32_t transfer_type; 161 160 int channel; 162 161 }; ··· 771 772 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); 772 773 struct jz4780_dma_filter_data *data = param; 773 774 774 - if (jzdma->dma_device.dev->of_node != data->of_node) 775 - return false; 776 775 777 776 if (data->channel > -1) { 778 777 if (data->channel != jzchan->id) ··· 794 797 if (dma_spec->args_count != 2) 795 798 return NULL; 796 799 797 - data.of_node = ofdma->of_node; 798 800 data.transfer_type = dma_spec->args[0]; 799 801 data.channel = dma_spec->args[1]; 800 802 ··· 818 822 return dma_get_slave_channel( 819 823 &jzdma->chan[data.channel].vchan.chan); 820 824 } else { 821 - return dma_request_channel(mask, jz4780_dma_filter_fn, &data); 825 + return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data, 826 + ofdma->of_node); 822 827 } 823 828 } 824 829
+10 -4
drivers/dma/dmaengine.c
··· 61 61 /* --- sysfs implementation --- */ 62 62 63 63 /** 64 - * dev_to_dma_chan - convert a device pointer to the its sysfs container object 64 + * dev_to_dma_chan - convert a device pointer to its sysfs container object 65 65 * @dev - device node 66 66 * 67 67 * Must be called under dma_list_mutex ··· 629 629 * @mask: capabilities that the channel must satisfy 630 630 * @fn: optional callback to disposition available channels 631 631 * @fn_param: opaque parameter to pass to dma_filter_fn 632 + * @np: device node to look for DMA channels 632 633 * 633 634 * Returns pointer to appropriate DMA channel on success or NULL. 634 635 */ 635 636 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 636 - dma_filter_fn fn, void *fn_param) 637 + dma_filter_fn fn, void *fn_param, 638 + struct device_node *np) 637 639 { 638 640 struct dma_device *device, *_d; 639 641 struct dma_chan *chan = NULL; ··· 643 641 /* Find a channel */ 644 642 mutex_lock(&dma_list_mutex); 645 643 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 644 + /* Finds a DMA controller with matching device node */ 645 + if (np && device->dev->of_node && np != device->dev->of_node) 646 + continue; 647 + 646 648 chan = find_candidate(device, mask, fn, fn_param); 647 649 if (!IS_ERR(chan)) 648 650 break; ··· 705 699 chan = acpi_dma_request_slave_chan_by_name(dev, name); 706 700 707 701 if (chan) { 708 - /* Valid channel found or requester need to be deferred */ 702 + /* Valid channel found or requester needs to be deferred */ 709 703 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 710 704 return chan; 711 705 } ··· 763 757 if (!mask) 764 758 return ERR_PTR(-ENODEV); 765 759 766 - chan = __dma_request_channel(mask, NULL, NULL); 760 + chan = __dma_request_channel(mask, NULL, NULL, NULL); 767 761 if (!chan) { 768 762 mutex_lock(&dma_list_mutex); 769 763 if (list_empty(&dma_device_list))
+3 -3
drivers/dma/dmatest.c
··· 62 62 static int timeout = 3000; 63 63 module_param(timeout, uint, S_IRUGO | S_IWUSR); 64 64 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " 65 - "Pass -1 for infinite timeout"); 65 + "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); 66 66 67 67 static bool noverify; 68 68 module_param(noverify, bool, S_IRUGO | S_IWUSR); ··· 94 94 * @iterations: iterations before stopping test 95 95 * @xor_sources: number of xor source buffers 96 96 * @pq_sources: number of p+q source buffers 97 - * @timeout: transfer timeout in msec, -1 for infinite timeout 97 + * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) 98 98 */ 99 99 struct dmatest_params { 100 100 unsigned int buf_size; ··· 105 105 unsigned int iterations; 106 106 unsigned int xor_sources; 107 107 unsigned int pq_sources; 108 - int timeout; 108 + unsigned int timeout; 109 109 bool noverify; 110 110 bool norandom; 111 111 int alignment;
+19
drivers/dma/dw-edma/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + config DW_EDMA 4 + tristate "Synopsys DesignWare eDMA controller driver" 5 + depends on PCI && PCI_MSI 6 + select DMA_ENGINE 7 + select DMA_VIRTUAL_CHANNELS 8 + help 9 + Support the Synopsys DesignWare eDMA controller, normally 10 + implemented on endpoints SoCs. 11 + 12 + config DW_EDMA_PCIE 13 + tristate "Synopsys DesignWare eDMA PCIe driver" 14 + depends on PCI && PCI_MSI 15 + select DW_EDMA 16 + help 17 + Provides a glue-logic between the Synopsys DesignWare 18 + eDMA controller and an endpoint PCIe device. This also serves 19 + as a reference design to whom desires to use this IP.
+7
drivers/dma/dw-edma/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-$(CONFIG_DW_EDMA) += dw-edma.o 4 + dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o 5 + dw-edma-objs := dw-edma-core.o \ 6 + dw-edma-v0-core.o $(dw-edma-y) 7 + obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o
+937
drivers/dma/dw-edma/dw-edma-core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA core driver 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #include <linux/module.h> 10 + #include <linux/device.h> 11 + #include <linux/kernel.h> 12 + #include <linux/pm_runtime.h> 13 + #include <linux/dmaengine.h> 14 + #include <linux/err.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/dma/edma.h> 17 + #include <linux/pci.h> 18 + 19 + #include "dw-edma-core.h" 20 + #include "dw-edma-v0-core.h" 21 + #include "../dmaengine.h" 22 + #include "../virt-dma.h" 23 + 24 + static inline 25 + struct device *dchan2dev(struct dma_chan *dchan) 26 + { 27 + return &dchan->dev->device; 28 + } 29 + 30 + static inline 31 + struct device *chan2dev(struct dw_edma_chan *chan) 32 + { 33 + return &chan->vc.chan.dev->device; 34 + } 35 + 36 + static inline 37 + struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) 38 + { 39 + return container_of(vd, struct dw_edma_desc, vd); 40 + } 41 + 42 + static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) 43 + { 44 + struct dw_edma_burst *burst; 45 + 46 + burst = kzalloc(sizeof(*burst), GFP_NOWAIT); 47 + if (unlikely(!burst)) 48 + return NULL; 49 + 50 + INIT_LIST_HEAD(&burst->list); 51 + if (chunk->burst) { 52 + /* Create and add new element into the linked list */ 53 + chunk->bursts_alloc++; 54 + list_add_tail(&burst->list, &chunk->burst->list); 55 + } else { 56 + /* List head */ 57 + chunk->bursts_alloc = 0; 58 + chunk->burst = burst; 59 + } 60 + 61 + return burst; 62 + } 63 + 64 + static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) 65 + { 66 + struct dw_edma_chan *chan = desc->chan; 67 + struct dw_edma *dw = chan->chip->dw; 68 + struct dw_edma_chunk *chunk; 69 + 70 + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); 71 + if (unlikely(!chunk)) 72 + return NULL; 73 + 74 + INIT_LIST_HEAD(&chunk->list); 75 + chunk->chan = chan; 76 + /* Toggling change bit (CB) in each chunk, this is a mechanism to 77 + * inform the eDMA HW block that this is a new linked list ready 78 + * to be consumed. 79 + * - Odd chunks originate CB equal to 0 80 + * - Even chunks originate CB equal to 1 81 + */ 82 + chunk->cb = !(desc->chunks_alloc % 2); 83 + chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; 84 + chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; 85 + 86 + if (desc->chunk) { 87 + /* Create and add new element into the linked list */ 88 + desc->chunks_alloc++; 89 + list_add_tail(&chunk->list, &desc->chunk->list); 90 + if (!dw_edma_alloc_burst(chunk)) { 91 + kfree(chunk); 92 + return NULL; 93 + } 94 + } else { 95 + /* List head */ 96 + chunk->burst = NULL; 97 + desc->chunks_alloc = 0; 98 + desc->chunk = chunk; 99 + } 100 + 101 + return chunk; 102 + } 103 + 104 + static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) 105 + { 106 + struct dw_edma_desc *desc; 107 + 108 + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); 109 + if (unlikely(!desc)) 110 + return NULL; 111 + 112 + desc->chan = chan; 113 + if (!dw_edma_alloc_chunk(desc)) { 114 + kfree(desc); 115 + return NULL; 116 + } 117 + 118 + return desc; 119 + } 120 + 121 + static void dw_edma_free_burst(struct dw_edma_chunk *chunk) 122 + { 123 + struct dw_edma_burst *child, *_next; 124 + 125 + /* Remove all the list elements */ 126 + list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { 127 + list_del(&child->list); 128 + kfree(child); 129 + chunk->bursts_alloc--; 130 + } 131 + 132 + /* Remove the list head */ 133 + kfree(child); 134 + chunk->burst = NULL; 135 + } 136 + 137 + static void dw_edma_free_chunk(struct dw_edma_desc *desc) 138 + { 139 + struct dw_edma_chunk *child, *_next; 140 + 141 + if (!desc->chunk) 142 + return; 143 + 144 + /* Remove all the list elements */ 145 + list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { 146 + dw_edma_free_burst(child); 147 + list_del(&child->list); 148 + kfree(child); 149 + desc->chunks_alloc--; 150 + } 151 + 152 + /* Remove the list head */ 153 + kfree(child); 154 + desc->chunk = NULL; 155 + } 156 + 157 + static void dw_edma_free_desc(struct dw_edma_desc *desc) 158 + { 159 + dw_edma_free_chunk(desc); 160 + kfree(desc); 161 + } 162 + 163 + static void vchan_free_desc(struct virt_dma_desc *vdesc) 164 + { 165 + dw_edma_free_desc(vd2dw_edma_desc(vdesc)); 166 + } 167 + 168 + static void dw_edma_start_transfer(struct dw_edma_chan *chan) 169 + { 170 + struct dw_edma_chunk *child; 171 + struct dw_edma_desc *desc; 172 + struct virt_dma_desc *vd; 173 + 174 + vd = vchan_next_desc(&chan->vc); 175 + if (!vd) 176 + return; 177 + 178 + desc = vd2dw_edma_desc(vd); 179 + if (!desc) 180 + return; 181 + 182 + child = list_first_entry_or_null(&desc->chunk->list, 183 + struct dw_edma_chunk, list); 184 + if (!child) 185 + return; 186 + 187 + dw_edma_v0_core_start(child, !desc->xfer_sz); 188 + desc->xfer_sz += child->ll_region.sz; 189 + dw_edma_free_burst(child); 190 + list_del(&child->list); 191 + kfree(child); 192 + desc->chunks_alloc--; 193 + } 194 + 195 + static int dw_edma_device_config(struct dma_chan *dchan, 196 + struct dma_slave_config *config) 197 + { 198 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 199 + 200 + memcpy(&chan->config, config, sizeof(*config)); 201 + chan->configured = true; 202 + 203 + return 0; 204 + } 205 + 206 + static int dw_edma_device_pause(struct dma_chan *dchan) 207 + { 208 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 209 + int err = 0; 210 + 211 + if (!chan->configured) 212 + err = -EPERM; 213 + else if (chan->status != EDMA_ST_BUSY) 214 + err = -EPERM; 215 + else if (chan->request != EDMA_REQ_NONE) 216 + err = -EPERM; 217 + else 218 + chan->request = EDMA_REQ_PAUSE; 219 + 220 + return err; 221 + } 222 + 223 + static int dw_edma_device_resume(struct dma_chan *dchan) 224 + { 225 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 226 + int err = 0; 227 + 228 + if (!chan->configured) { 229 + err = -EPERM; 230 + } else if (chan->status != EDMA_ST_PAUSE) { 231 + err = -EPERM; 232 + } else if (chan->request != EDMA_REQ_NONE) { 233 + err = -EPERM; 234 + } else { 235 + chan->status = EDMA_ST_BUSY; 236 + dw_edma_start_transfer(chan); 237 + } 238 + 239 + return err; 240 + } 241 + 242 + static int dw_edma_device_terminate_all(struct dma_chan *dchan) 243 + { 244 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 245 + int err = 0; 246 + LIST_HEAD(head); 247 + 248 + if (!chan->configured) { 249 + /* Do nothing */ 250 + } else if (chan->status == EDMA_ST_PAUSE) { 251 + chan->status = EDMA_ST_IDLE; 252 + chan->configured = false; 253 + } else if (chan->status == EDMA_ST_IDLE) { 254 + chan->configured = false; 255 + } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { 256 + /* 257 + * The channel is in a false BUSY state, probably didn't 258 + * receive or lost an interrupt 259 + */ 260 + chan->status = EDMA_ST_IDLE; 261 + chan->configured = false; 262 + } else if (chan->request > EDMA_REQ_PAUSE) { 263 + err = -EPERM; 264 + } else { 265 + chan->request = EDMA_REQ_STOP; 266 + } 267 + 268 + return err; 269 + } 270 + 271 + static void dw_edma_device_issue_pending(struct dma_chan *dchan) 272 + { 273 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 274 + unsigned long flags; 275 + 276 + spin_lock_irqsave(&chan->vc.lock, flags); 277 + if (chan->configured && chan->request == EDMA_REQ_NONE && 278 + chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { 279 + chan->status = EDMA_ST_BUSY; 280 + dw_edma_start_transfer(chan); 281 + } 282 + spin_unlock_irqrestore(&chan->vc.lock, flags); 283 + } 284 + 285 + static enum dma_status 286 + dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, 287 + struct dma_tx_state *txstate) 288 + { 289 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 290 + struct dw_edma_desc *desc; 291 + struct virt_dma_desc *vd; 292 + unsigned long flags; 293 + enum dma_status ret; 294 + u32 residue = 0; 295 + 296 + ret = dma_cookie_status(dchan, cookie, txstate); 297 + if (ret == DMA_COMPLETE) 298 + return ret; 299 + 300 + if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) 301 + ret = DMA_PAUSED; 302 + 303 + if (!txstate) 304 + goto ret_residue; 305 + 306 + spin_lock_irqsave(&chan->vc.lock, flags); 307 + vd = vchan_find_desc(&chan->vc, cookie); 308 + if (vd) { 309 + desc = vd2dw_edma_desc(vd); 310 + if (desc) 311 + residue = desc->alloc_sz - desc->xfer_sz; 312 + } 313 + spin_unlock_irqrestore(&chan->vc.lock, flags); 314 + 315 + ret_residue: 316 + dma_set_residue(txstate, residue); 317 + 318 + return ret; 319 + } 320 + 321 + static struct dma_async_tx_descriptor * 322 + dw_edma_device_transfer(struct dw_edma_transfer *xfer) 323 + { 324 + struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); 325 + enum dma_transfer_direction direction = xfer->direction; 326 + phys_addr_t src_addr, dst_addr; 327 + struct scatterlist *sg = NULL; 328 + struct dw_edma_chunk *chunk; 329 + struct dw_edma_burst *burst; 330 + struct dw_edma_desc *desc; 331 + u32 cnt; 332 + int i; 333 + 334 + if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || 335 + (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) 336 + return NULL; 337 + 338 + if (xfer->cyclic) { 339 + if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) 340 + return NULL; 341 + } else { 342 + if (xfer->xfer.sg.len < 1) 343 + return NULL; 344 + } 345 + 346 + if (!chan->configured) 347 + return NULL; 348 + 349 + desc = dw_edma_alloc_desc(chan); 350 + if (unlikely(!desc)) 351 + goto err_alloc; 352 + 353 + chunk = dw_edma_alloc_chunk(desc); 354 + if (unlikely(!chunk)) 355 + goto err_alloc; 356 + 357 + src_addr = chan->config.src_addr; 358 + dst_addr = chan->config.dst_addr; 359 + 360 + if (xfer->cyclic) { 361 + cnt = xfer->xfer.cyclic.cnt; 362 + } else { 363 + cnt = xfer->xfer.sg.len; 364 + sg = xfer->xfer.sg.sgl; 365 + } 366 + 367 + for (i = 0; i < cnt; i++) { 368 + if (!xfer->cyclic && !sg) 369 + break; 370 + 371 + if (chunk->bursts_alloc == chan->ll_max) { 372 + chunk = dw_edma_alloc_chunk(desc); 373 + if (unlikely(!chunk)) 374 + goto err_alloc; 375 + } 376 + 377 + burst = dw_edma_alloc_burst(chunk); 378 + if (unlikely(!burst)) 379 + goto err_alloc; 380 + 381 + if (xfer->cyclic) 382 + burst->sz = xfer->xfer.cyclic.len; 383 + else 384 + burst->sz = sg_dma_len(sg); 385 + 386 + chunk->ll_region.sz += burst->sz; 387 + desc->alloc_sz += burst->sz; 388 + 389 + if (direction == DMA_DEV_TO_MEM) { 390 + burst->sar = src_addr; 391 + if (xfer->cyclic) { 392 + burst->dar = xfer->xfer.cyclic.paddr; 393 + } else { 394 + burst->dar = sg_dma_address(sg); 395 + /* Unlike the typical assumption by other 396 + * drivers/IPs the peripheral memory isn't 397 + * a FIFO memory, in this case, it's a 398 + * linear memory and that why the source 399 + * and destination addresses are increased 400 + * by the same portion (data length) 401 + */ 402 + src_addr += sg_dma_len(sg); 403 + } 404 + } else { 405 + burst->dar = dst_addr; 406 + if (xfer->cyclic) { 407 + burst->sar = xfer->xfer.cyclic.paddr; 408 + } else { 409 + burst->sar = sg_dma_address(sg); 410 + /* Unlike the typical assumption by other 411 + * drivers/IPs the peripheral memory isn't 412 + * a FIFO memory, in this case, it's a 413 + * linear memory and that why the source 414 + * and destination addresses are increased 415 + * by the same portion (data length) 416 + */ 417 + dst_addr += sg_dma_len(sg); 418 + } 419 + } 420 + 421 + if (!xfer->cyclic) 422 + sg = sg_next(sg); 423 + } 424 + 425 + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); 426 + 427 + err_alloc: 428 + if (desc) 429 + dw_edma_free_desc(desc); 430 + 431 + return NULL; 432 + } 433 + 434 + static struct dma_async_tx_descriptor * 435 + dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 436 + unsigned int len, 437 + enum dma_transfer_direction direction, 438 + unsigned long flags, void *context) 439 + { 440 + struct dw_edma_transfer xfer; 441 + 442 + xfer.dchan = dchan; 443 + xfer.direction = direction; 444 + xfer.xfer.sg.sgl = sgl; 445 + xfer.xfer.sg.len = len; 446 + xfer.flags = flags; 447 + xfer.cyclic = false; 448 + 449 + return dw_edma_device_transfer(&xfer); 450 + } 451 + 452 + static struct dma_async_tx_descriptor * 453 + dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, 454 + size_t len, size_t count, 455 + enum dma_transfer_direction direction, 456 + unsigned long flags) 457 + { 458 + struct dw_edma_transfer xfer; 459 + 460 + xfer.dchan = dchan; 461 + xfer.direction = direction; 462 + xfer.xfer.cyclic.paddr = paddr; 463 + xfer.xfer.cyclic.len = len; 464 + xfer.xfer.cyclic.cnt = count; 465 + xfer.flags = flags; 466 + xfer.cyclic = true; 467 + 468 + return dw_edma_device_transfer(&xfer); 469 + } 470 + 471 + static void dw_edma_done_interrupt(struct dw_edma_chan *chan) 472 + { 473 + struct dw_edma_desc *desc; 474 + struct virt_dma_desc *vd; 475 + unsigned long flags; 476 + 477 + dw_edma_v0_core_clear_done_int(chan); 478 + 479 + spin_lock_irqsave(&chan->vc.lock, flags); 480 + vd = vchan_next_desc(&chan->vc); 481 + if (vd) { 482 + switch (chan->request) { 483 + case EDMA_REQ_NONE: 484 + desc = vd2dw_edma_desc(vd); 485 + if (desc->chunks_alloc) { 486 + chan->status = EDMA_ST_BUSY; 487 + dw_edma_start_transfer(chan); 488 + } else { 489 + list_del(&vd->node); 490 + vchan_cookie_complete(vd); 491 + chan->status = EDMA_ST_IDLE; 492 + } 493 + break; 494 + 495 + case EDMA_REQ_STOP: 496 + list_del(&vd->node); 497 + vchan_cookie_complete(vd); 498 + chan->request = EDMA_REQ_NONE; 499 + chan->status = EDMA_ST_IDLE; 500 + break; 501 + 502 + case EDMA_REQ_PAUSE: 503 + chan->request = EDMA_REQ_NONE; 504 + chan->status = EDMA_ST_PAUSE; 505 + break; 506 + 507 + default: 508 + break; 509 + } 510 + } 511 + spin_unlock_irqrestore(&chan->vc.lock, flags); 512 + } 513 + 514 + static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) 515 + { 516 + struct virt_dma_desc *vd; 517 + unsigned long flags; 518 + 519 + dw_edma_v0_core_clear_abort_int(chan); 520 + 521 + spin_lock_irqsave(&chan->vc.lock, flags); 522 + vd = vchan_next_desc(&chan->vc); 523 + if (vd) { 524 + list_del(&vd->node); 525 + vchan_cookie_complete(vd); 526 + } 527 + spin_unlock_irqrestore(&chan->vc.lock, flags); 528 + chan->request = EDMA_REQ_NONE; 529 + chan->status = EDMA_ST_IDLE; 530 + } 531 + 532 + static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) 533 + { 534 + struct dw_edma_irq *dw_irq = data; 535 + struct dw_edma *dw = dw_irq->dw; 536 + unsigned long total, pos, val; 537 + unsigned long off; 538 + u32 mask; 539 + 540 + if (write) { 541 + total = dw->wr_ch_cnt; 542 + off = 0; 543 + mask = dw_irq->wr_mask; 544 + } else { 545 + total = dw->rd_ch_cnt; 546 + off = dw->wr_ch_cnt; 547 + mask = dw_irq->rd_mask; 548 + } 549 + 550 + val = dw_edma_v0_core_status_done_int(dw, write ? 551 + EDMA_DIR_WRITE : 552 + EDMA_DIR_READ); 553 + val &= mask; 554 + for_each_set_bit(pos, &val, total) { 555 + struct dw_edma_chan *chan = &dw->chan[pos + off]; 556 + 557 + dw_edma_done_interrupt(chan); 558 + } 559 + 560 + val = dw_edma_v0_core_status_abort_int(dw, write ? 561 + EDMA_DIR_WRITE : 562 + EDMA_DIR_READ); 563 + val &= mask; 564 + for_each_set_bit(pos, &val, total) { 565 + struct dw_edma_chan *chan = &dw->chan[pos + off]; 566 + 567 + dw_edma_abort_interrupt(chan); 568 + } 569 + 570 + return IRQ_HANDLED; 571 + } 572 + 573 + static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) 574 + { 575 + return dw_edma_interrupt(irq, data, true); 576 + } 577 + 578 + static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) 579 + { 580 + return dw_edma_interrupt(irq, data, false); 581 + } 582 + 583 + static irqreturn_t dw_edma_interrupt_common(int irq, void *data) 584 + { 585 + dw_edma_interrupt(irq, data, true); 586 + dw_edma_interrupt(irq, data, false); 587 + 588 + return IRQ_HANDLED; 589 + } 590 + 591 + static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) 592 + { 593 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 594 + 595 + if (chan->status != EDMA_ST_IDLE) 596 + return -EBUSY; 597 + 598 + pm_runtime_get(chan->chip->dev); 599 + 600 + return 0; 601 + } 602 + 603 + static void dw_edma_free_chan_resources(struct dma_chan *dchan) 604 + { 605 + unsigned long timeout = jiffies + msecs_to_jiffies(5000); 606 + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); 607 + int ret; 608 + 609 + while (time_before(jiffies, timeout)) { 610 + ret = dw_edma_device_terminate_all(dchan); 611 + if (!ret) 612 + break; 613 + 614 + if (time_after_eq(jiffies, timeout)) 615 + return; 616 + 617 + cpu_relax(); 618 + } 619 + 620 + pm_runtime_put(chan->chip->dev); 621 + } 622 + 623 + static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, 624 + u32 wr_alloc, u32 rd_alloc) 625 + { 626 + struct dw_edma_region *dt_region; 627 + struct device *dev = chip->dev; 628 + struct dw_edma *dw = chip->dw; 629 + struct dw_edma_chan *chan; 630 + size_t ll_chunk, dt_chunk; 631 + struct dw_edma_irq *irq; 632 + struct dma_device *dma; 633 + u32 i, j, cnt, ch_cnt; 634 + u32 alloc, off_alloc; 635 + int err = 0; 636 + u32 pos; 637 + 638 + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; 639 + ll_chunk = dw->ll_region.sz; 640 + dt_chunk = dw->dt_region.sz; 641 + 642 + /* Calculate linked list chunk for each channel */ 643 + ll_chunk /= roundup_pow_of_two(ch_cnt); 644 + 645 + /* Calculate linked list chunk for each channel */ 646 + dt_chunk /= roundup_pow_of_two(ch_cnt); 647 + 648 + if (write) { 649 + i = 0; 650 + cnt = dw->wr_ch_cnt; 651 + dma = &dw->wr_edma; 652 + alloc = wr_alloc; 653 + off_alloc = 0; 654 + } else { 655 + i = dw->wr_ch_cnt; 656 + cnt = dw->rd_ch_cnt; 657 + dma = &dw->rd_edma; 658 + alloc = rd_alloc; 659 + off_alloc = wr_alloc; 660 + } 661 + 662 + INIT_LIST_HEAD(&dma->channels); 663 + for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { 664 + chan = &dw->chan[i]; 665 + 666 + dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); 667 + if (!dt_region) 668 + return -ENOMEM; 669 + 670 + chan->vc.chan.private = dt_region; 671 + 672 + chan->chip = chip; 673 + chan->id = j; 674 + chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; 675 + chan->configured = false; 676 + chan->request = EDMA_REQ_NONE; 677 + chan->status = EDMA_ST_IDLE; 678 + 679 + chan->ll_off = (ll_chunk * i); 680 + chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; 681 + 682 + chan->dt_off = (dt_chunk * i); 683 + 684 + dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", 685 + write ? "write" : "read", j, 686 + chan->ll_off, chan->ll_max); 687 + 688 + if (dw->nr_irqs == 1) 689 + pos = 0; 690 + else 691 + pos = off_alloc + (j % alloc); 692 + 693 + irq = &dw->irq[pos]; 694 + 695 + if (write) 696 + irq->wr_mask |= BIT(j); 697 + else 698 + irq->rd_mask |= BIT(j); 699 + 700 + irq->dw = dw; 701 + memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); 702 + 703 + dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", 704 + write ? "write" : "read", j, 705 + chan->msi.address_hi, chan->msi.address_lo, 706 + chan->msi.data); 707 + 708 + chan->vc.desc_free = vchan_free_desc; 709 + vchan_init(&chan->vc, dma); 710 + 711 + dt_region->paddr = dw->dt_region.paddr + chan->dt_off; 712 + dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; 713 + dt_region->sz = dt_chunk; 714 + 715 + dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", 716 + write ? "write" : "read", j, chan->dt_off); 717 + 718 + dw_edma_v0_core_device_config(chan); 719 + } 720 + 721 + /* Set DMA channel capabilities */ 722 + dma_cap_zero(dma->cap_mask); 723 + dma_cap_set(DMA_SLAVE, dma->cap_mask); 724 + dma_cap_set(DMA_CYCLIC, dma->cap_mask); 725 + dma_cap_set(DMA_PRIVATE, dma->cap_mask); 726 + dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); 727 + dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 728 + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 729 + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 730 + dma->chancnt = cnt; 731 + 732 + /* Set DMA channel callbacks */ 733 + dma->dev = chip->dev; 734 + dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; 735 + dma->device_free_chan_resources = dw_edma_free_chan_resources; 736 + dma->device_config = dw_edma_device_config; 737 + dma->device_pause = dw_edma_device_pause; 738 + dma->device_resume = dw_edma_device_resume; 739 + dma->device_terminate_all = dw_edma_device_terminate_all; 740 + dma->device_issue_pending = dw_edma_device_issue_pending; 741 + dma->device_tx_status = dw_edma_device_tx_status; 742 + dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; 743 + dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; 744 + 745 + dma_set_max_seg_size(dma->dev, U32_MAX); 746 + 747 + /* Register DMA device */ 748 + err = dma_async_device_register(dma); 749 + 750 + return err; 751 + } 752 + 753 + static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) 754 + { 755 + if (*nr_irqs && *alloc < cnt) { 756 + (*alloc)++; 757 + (*nr_irqs)--; 758 + } 759 + } 760 + 761 + static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) 762 + { 763 + while (*mask * alloc < cnt) 764 + (*mask)++; 765 + } 766 + 767 + static int dw_edma_irq_request(struct dw_edma_chip *chip, 768 + u32 *wr_alloc, u32 *rd_alloc) 769 + { 770 + struct device *dev = chip->dev; 771 + struct dw_edma *dw = chip->dw; 772 + u32 wr_mask = 1; 773 + u32 rd_mask = 1; 774 + int i, err = 0; 775 + u32 ch_cnt; 776 + 777 + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; 778 + 779 + if (dw->nr_irqs < 1) 780 + return -EINVAL; 781 + 782 + if (dw->nr_irqs == 1) { 783 + /* Common IRQ shared among all channels */ 784 + err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), 785 + dw_edma_interrupt_common, 786 + IRQF_SHARED, dw->name, &dw->irq[0]); 787 + if (err) { 788 + dw->nr_irqs = 0; 789 + return err; 790 + } 791 + 792 + get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), 793 + &dw->irq[0].msi); 794 + } else { 795 + /* Distribute IRQs equally among all channels */ 796 + int tmp = dw->nr_irqs; 797 + 798 + while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { 799 + dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); 800 + dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); 801 + } 802 + 803 + dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); 804 + dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); 805 + 806 + for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { 807 + err = request_irq(pci_irq_vector(to_pci_dev(dev), i), 808 + i < *wr_alloc ? 809 + dw_edma_interrupt_write : 810 + dw_edma_interrupt_read, 811 + IRQF_SHARED, dw->name, 812 + &dw->irq[i]); 813 + if (err) { 814 + dw->nr_irqs = i; 815 + return err; 816 + } 817 + 818 + get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), 819 + &dw->irq[i].msi); 820 + } 821 + 822 + dw->nr_irqs = i; 823 + } 824 + 825 + return err; 826 + } 827 + 828 + int dw_edma_probe(struct dw_edma_chip *chip) 829 + { 830 + struct device *dev = chip->dev; 831 + struct dw_edma *dw = chip->dw; 832 + u32 wr_alloc = 0; 833 + u32 rd_alloc = 0; 834 + int i, err; 835 + 836 + raw_spin_lock_init(&dw->lock); 837 + 838 + /* Find out how many write channels are supported by hardware */ 839 + dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); 840 + if (!dw->wr_ch_cnt) 841 + return -EINVAL; 842 + 843 + /* Find out how many read channels are supported by hardware */ 844 + dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); 845 + if (!dw->rd_ch_cnt) 846 + return -EINVAL; 847 + 848 + dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", 849 + dw->wr_ch_cnt, dw->rd_ch_cnt); 850 + 851 + /* Allocate channels */ 852 + dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, 853 + sizeof(*dw->chan), GFP_KERNEL); 854 + if (!dw->chan) 855 + return -ENOMEM; 856 + 857 + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); 858 + 859 + /* Disable eDMA, only to establish the ideal initial conditions */ 860 + dw_edma_v0_core_off(dw); 861 + 862 + /* Request IRQs */ 863 + err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); 864 + if (err) 865 + return err; 866 + 867 + /* Setup write channels */ 868 + err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); 869 + if (err) 870 + goto err_irq_free; 871 + 872 + /* Setup read channels */ 873 + err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); 874 + if (err) 875 + goto err_irq_free; 876 + 877 + /* Power management */ 878 + pm_runtime_enable(dev); 879 + 880 + /* Turn debugfs on */ 881 + dw_edma_v0_core_debugfs_on(chip); 882 + 883 + return 0; 884 + 885 + err_irq_free: 886 + for (i = (dw->nr_irqs - 1); i >= 0; i--) 887 + free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); 888 + 889 + dw->nr_irqs = 0; 890 + 891 + return err; 892 + } 893 + EXPORT_SYMBOL_GPL(dw_edma_probe); 894 + 895 + int dw_edma_remove(struct dw_edma_chip *chip) 896 + { 897 + struct dw_edma_chan *chan, *_chan; 898 + struct device *dev = chip->dev; 899 + struct dw_edma *dw = chip->dw; 900 + int i; 901 + 902 + /* Disable eDMA */ 903 + dw_edma_v0_core_off(dw); 904 + 905 + /* Free irqs */ 906 + for (i = (dw->nr_irqs - 1); i >= 0; i--) 907 + free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); 908 + 909 + /* Power management */ 910 + pm_runtime_disable(dev); 911 + 912 + list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, 913 + vc.chan.device_node) { 914 + list_del(&chan->vc.chan.device_node); 915 + tasklet_kill(&chan->vc.task); 916 + } 917 + 918 + list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, 919 + vc.chan.device_node) { 920 + list_del(&chan->vc.chan.device_node); 921 + tasklet_kill(&chan->vc.task); 922 + } 923 + 924 + /* Deregister eDMA device */ 925 + dma_async_device_unregister(&dw->wr_edma); 926 + dma_async_device_unregister(&dw->rd_edma); 927 + 928 + /* Turn debugfs off */ 929 + dw_edma_v0_core_debugfs_off(); 930 + 931 + return 0; 932 + } 933 + EXPORT_SYMBOL_GPL(dw_edma_remove); 934 + 935 + MODULE_LICENSE("GPL v2"); 936 + MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); 937 + MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
+165
drivers/dma/dw-edma/dw-edma-core.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA core driver 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #ifndef _DW_EDMA_CORE_H 10 + #define _DW_EDMA_CORE_H 11 + 12 + #include <linux/msi.h> 13 + #include <linux/dma/edma.h> 14 + 15 + #include "../virt-dma.h" 16 + 17 + #define EDMA_LL_SZ 24 18 + 19 + enum dw_edma_dir { 20 + EDMA_DIR_WRITE = 0, 21 + EDMA_DIR_READ 22 + }; 23 + 24 + enum dw_edma_mode { 25 + EDMA_MODE_LEGACY = 0, 26 + EDMA_MODE_UNROLL 27 + }; 28 + 29 + enum dw_edma_request { 30 + EDMA_REQ_NONE = 0, 31 + EDMA_REQ_STOP, 32 + EDMA_REQ_PAUSE 33 + }; 34 + 35 + enum dw_edma_status { 36 + EDMA_ST_IDLE = 0, 37 + EDMA_ST_PAUSE, 38 + EDMA_ST_BUSY 39 + }; 40 + 41 + struct dw_edma_chan; 42 + struct dw_edma_chunk; 43 + 44 + struct dw_edma_burst { 45 + struct list_head list; 46 + u64 sar; 47 + u64 dar; 48 + u32 sz; 49 + }; 50 + 51 + struct dw_edma_region { 52 + phys_addr_t paddr; 53 + dma_addr_t vaddr; 54 + size_t sz; 55 + }; 56 + 57 + struct dw_edma_chunk { 58 + struct list_head list; 59 + struct dw_edma_chan *chan; 60 + struct dw_edma_burst *burst; 61 + 62 + u32 bursts_alloc; 63 + 64 + u8 cb; 65 + struct dw_edma_region ll_region; /* Linked list */ 66 + }; 67 + 68 + struct dw_edma_desc { 69 + struct virt_dma_desc vd; 70 + struct dw_edma_chan *chan; 71 + struct dw_edma_chunk *chunk; 72 + 73 + u32 chunks_alloc; 74 + 75 + u32 alloc_sz; 76 + u32 xfer_sz; 77 + }; 78 + 79 + struct dw_edma_chan { 80 + struct virt_dma_chan vc; 81 + struct dw_edma_chip *chip; 82 + int id; 83 + enum dw_edma_dir dir; 84 + 85 + off_t ll_off; 86 + u32 ll_max; 87 + 88 + off_t dt_off; 89 + 90 + struct msi_msg msi; 91 + 92 + enum dw_edma_request request; 93 + enum dw_edma_status status; 94 + u8 configured; 95 + 96 + struct dma_slave_config config; 97 + }; 98 + 99 + struct dw_edma_irq { 100 + struct msi_msg msi; 101 + u32 wr_mask; 102 + u32 rd_mask; 103 + struct dw_edma *dw; 104 + }; 105 + 106 + struct dw_edma { 107 + char name[20]; 108 + 109 + struct dma_device wr_edma; 110 + u16 wr_ch_cnt; 111 + 112 + struct dma_device rd_edma; 113 + u16 rd_ch_cnt; 114 + 115 + struct dw_edma_region rg_region; /* Registers */ 116 + struct dw_edma_region ll_region; /* Linked list */ 117 + struct dw_edma_region dt_region; /* Data */ 118 + 119 + struct dw_edma_irq *irq; 120 + int nr_irqs; 121 + 122 + u32 version; 123 + enum dw_edma_mode mode; 124 + 125 + struct dw_edma_chan *chan; 126 + const struct dw_edma_core_ops *ops; 127 + 128 + raw_spinlock_t lock; /* Only for legacy */ 129 + }; 130 + 131 + struct dw_edma_sg { 132 + struct scatterlist *sgl; 133 + unsigned int len; 134 + }; 135 + 136 + struct dw_edma_cyclic { 137 + dma_addr_t paddr; 138 + size_t len; 139 + size_t cnt; 140 + }; 141 + 142 + struct dw_edma_transfer { 143 + struct dma_chan *dchan; 144 + union dw_edma_xfer { 145 + struct dw_edma_sg sg; 146 + struct dw_edma_cyclic cyclic; 147 + } xfer; 148 + enum dma_transfer_direction direction; 149 + unsigned long flags; 150 + bool cyclic; 151 + }; 152 + 153 + static inline 154 + struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) 155 + { 156 + return container_of(vc, struct dw_edma_chan, vc); 157 + } 158 + 159 + static inline 160 + struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) 161 + { 162 + return vc2dw_edma_chan(to_virt_chan(dchan)); 163 + } 164 + 165 + #endif /* _DW_EDMA_CORE_H */
+229
drivers/dma/dw-edma/dw-edma-pcie.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA PCIe driver 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/module.h> 11 + #include <linux/pci.h> 12 + #include <linux/device.h> 13 + #include <linux/dma/edma.h> 14 + #include <linux/pci-epf.h> 15 + #include <linux/msi.h> 16 + 17 + #include "dw-edma-core.h" 18 + 19 + struct dw_edma_pcie_data { 20 + /* eDMA registers location */ 21 + enum pci_barno rg_bar; 22 + off_t rg_off; 23 + size_t rg_sz; 24 + /* eDMA memory linked list location */ 25 + enum pci_barno ll_bar; 26 + off_t ll_off; 27 + size_t ll_sz; 28 + /* eDMA memory data location */ 29 + enum pci_barno dt_bar; 30 + off_t dt_off; 31 + size_t dt_sz; 32 + /* Other */ 33 + u32 version; 34 + enum dw_edma_mode mode; 35 + u8 irqs; 36 + }; 37 + 38 + static const struct dw_edma_pcie_data snps_edda_data = { 39 + /* eDMA registers location */ 40 + .rg_bar = BAR_0, 41 + .rg_off = 0x00001000, /* 4 Kbytes */ 42 + .rg_sz = 0x00002000, /* 8 Kbytes */ 43 + /* eDMA memory linked list location */ 44 + .ll_bar = BAR_2, 45 + .ll_off = 0x00000000, /* 0 Kbytes */ 46 + .ll_sz = 0x00800000, /* 8 Mbytes */ 47 + /* eDMA memory data location */ 48 + .dt_bar = BAR_2, 49 + .dt_off = 0x00800000, /* 8 Mbytes */ 50 + .dt_sz = 0x03800000, /* 56 Mbytes */ 51 + /* Other */ 52 + .version = 0, 53 + .mode = EDMA_MODE_UNROLL, 54 + .irqs = 1, 55 + }; 56 + 57 + static int dw_edma_pcie_probe(struct pci_dev *pdev, 58 + const struct pci_device_id *pid) 59 + { 60 + const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; 61 + struct device *dev = &pdev->dev; 62 + struct dw_edma_chip *chip; 63 + int err, nr_irqs; 64 + struct dw_edma *dw; 65 + 66 + /* Enable PCI device */ 67 + err = pcim_enable_device(pdev); 68 + if (err) { 69 + pci_err(pdev, "enabling device failed\n"); 70 + return err; 71 + } 72 + 73 + /* Mapping PCI BAR regions */ 74 + err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) | 75 + BIT(pdata->ll_bar) | 76 + BIT(pdata->dt_bar), 77 + pci_name(pdev)); 78 + if (err) { 79 + pci_err(pdev, "eDMA BAR I/O remapping failed\n"); 80 + return err; 81 + } 82 + 83 + pci_set_master(pdev); 84 + 85 + /* DMA configuration */ 86 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 87 + if (!err) { 88 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 89 + if (err) { 90 + pci_err(pdev, "consistent DMA mask 64 set failed\n"); 91 + return err; 92 + } 93 + } else { 94 + pci_err(pdev, "DMA mask 64 set failed\n"); 95 + 96 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 97 + if (err) { 98 + pci_err(pdev, "DMA mask 32 set failed\n"); 99 + return err; 100 + } 101 + 102 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 103 + if (err) { 104 + pci_err(pdev, "consistent DMA mask 32 set failed\n"); 105 + return err; 106 + } 107 + } 108 + 109 + /* Data structure allocation */ 110 + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); 111 + if (!chip) 112 + return -ENOMEM; 113 + 114 + dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); 115 + if (!dw) 116 + return -ENOMEM; 117 + 118 + /* IRQs allocation */ 119 + nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs, 120 + PCI_IRQ_MSI | PCI_IRQ_MSIX); 121 + if (nr_irqs < 1) { 122 + pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", 123 + nr_irqs); 124 + return -EPERM; 125 + } 126 + 127 + /* Data structure initialization */ 128 + chip->dw = dw; 129 + chip->dev = dev; 130 + chip->id = pdev->devfn; 131 + chip->irq = pdev->irq; 132 + 133 + dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; 134 + dw->rg_region.vaddr += pdata->rg_off; 135 + dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; 136 + dw->rg_region.paddr += pdata->rg_off; 137 + dw->rg_region.sz = pdata->rg_sz; 138 + 139 + dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; 140 + dw->ll_region.vaddr += pdata->ll_off; 141 + dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; 142 + dw->ll_region.paddr += pdata->ll_off; 143 + dw->ll_region.sz = pdata->ll_sz; 144 + 145 + dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; 146 + dw->dt_region.vaddr += pdata->dt_off; 147 + dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; 148 + dw->dt_region.paddr += pdata->dt_off; 149 + dw->dt_region.sz = pdata->dt_sz; 150 + 151 + dw->version = pdata->version; 152 + dw->mode = pdata->mode; 153 + dw->nr_irqs = nr_irqs; 154 + 155 + /* Debug info */ 156 + pci_dbg(pdev, "Version:\t%u\n", dw->version); 157 + 158 + pci_dbg(pdev, "Mode:\t%s\n", 159 + dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); 160 + 161 + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 162 + pdata->rg_bar, pdata->rg_off, pdata->rg_sz, 163 + &dw->rg_region.vaddr, &dw->rg_region.paddr); 164 + 165 + pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 166 + pdata->ll_bar, pdata->ll_off, pdata->ll_sz, 167 + &dw->ll_region.vaddr, &dw->ll_region.paddr); 168 + 169 + pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 170 + pdata->dt_bar, pdata->dt_off, pdata->dt_sz, 171 + &dw->dt_region.vaddr, &dw->dt_region.paddr); 172 + 173 + pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); 174 + 175 + /* Validating if PCI interrupts were enabled */ 176 + if (!pci_dev_msi_enabled(pdev)) { 177 + pci_err(pdev, "enable interrupt failed\n"); 178 + return -EPERM; 179 + } 180 + 181 + dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL); 182 + if (!dw->irq) 183 + return -ENOMEM; 184 + 185 + /* Starting eDMA driver */ 186 + err = dw_edma_probe(chip); 187 + if (err) { 188 + pci_err(pdev, "eDMA probe failed\n"); 189 + return err; 190 + } 191 + 192 + /* Saving data structure reference */ 193 + pci_set_drvdata(pdev, chip); 194 + 195 + return 0; 196 + } 197 + 198 + static void dw_edma_pcie_remove(struct pci_dev *pdev) 199 + { 200 + struct dw_edma_chip *chip = pci_get_drvdata(pdev); 201 + int err; 202 + 203 + /* Stopping eDMA driver */ 204 + err = dw_edma_remove(chip); 205 + if (err) 206 + pci_warn(pdev, "can't remove device properly: %d\n", err); 207 + 208 + /* Freeing IRQs */ 209 + pci_free_irq_vectors(pdev); 210 + } 211 + 212 + static const struct pci_device_id dw_edma_pcie_id_table[] = { 213 + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, 214 + { } 215 + }; 216 + MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); 217 + 218 + static struct pci_driver dw_edma_pcie_driver = { 219 + .name = "dw-edma-pcie", 220 + .id_table = dw_edma_pcie_id_table, 221 + .probe = dw_edma_pcie_probe, 222 + .remove = dw_edma_pcie_remove, 223 + }; 224 + 225 + module_pci_driver(dw_edma_pcie_driver); 226 + 227 + MODULE_LICENSE("GPL v2"); 228 + MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); 229 + MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
+354
drivers/dma/dw-edma/dw-edma-v0-core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA v0 core 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #include <linux/bitfield.h> 10 + 11 + #include "dw-edma-core.h" 12 + #include "dw-edma-v0-core.h" 13 + #include "dw-edma-v0-regs.h" 14 + #include "dw-edma-v0-debugfs.h" 15 + 16 + enum dw_edma_control { 17 + DW_EDMA_V0_CB = BIT(0), 18 + DW_EDMA_V0_TCB = BIT(1), 19 + DW_EDMA_V0_LLP = BIT(2), 20 + DW_EDMA_V0_LIE = BIT(3), 21 + DW_EDMA_V0_RIE = BIT(4), 22 + DW_EDMA_V0_CCS = BIT(8), 23 + DW_EDMA_V0_LLE = BIT(9), 24 + }; 25 + 26 + static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) 27 + { 28 + return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; 29 + } 30 + 31 + #define SET(dw, name, value) \ 32 + writel(value, &(__dw_regs(dw)->name)) 33 + 34 + #define GET(dw, name) \ 35 + readl(&(__dw_regs(dw)->name)) 36 + 37 + #define SET_RW(dw, dir, name, value) \ 38 + do { \ 39 + if ((dir) == EDMA_DIR_WRITE) \ 40 + SET(dw, wr_##name, value); \ 41 + else \ 42 + SET(dw, rd_##name, value); \ 43 + } while (0) 44 + 45 + #define GET_RW(dw, dir, name) \ 46 + ((dir) == EDMA_DIR_WRITE \ 47 + ? GET(dw, wr_##name) \ 48 + : GET(dw, rd_##name)) 49 + 50 + #define SET_BOTH(dw, name, value) \ 51 + do { \ 52 + SET(dw, wr_##name, value); \ 53 + SET(dw, rd_##name, value); \ 54 + } while (0) 55 + 56 + static inline struct dw_edma_v0_ch_regs __iomem * 57 + __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) 58 + { 59 + if (dw->mode == EDMA_MODE_LEGACY) 60 + return &(__dw_regs(dw)->type.legacy.ch); 61 + 62 + if (dir == EDMA_DIR_WRITE) 63 + return &__dw_regs(dw)->type.unroll.ch[ch].wr; 64 + 65 + return &__dw_regs(dw)->type.unroll.ch[ch].rd; 66 + } 67 + 68 + static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, 69 + u32 value, void __iomem *addr) 70 + { 71 + if (dw->mode == EDMA_MODE_LEGACY) { 72 + u32 viewport_sel; 73 + unsigned long flags; 74 + 75 + raw_spin_lock_irqsave(&dw->lock, flags); 76 + 77 + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); 78 + if (dir == EDMA_DIR_READ) 79 + viewport_sel |= BIT(31); 80 + 81 + writel(viewport_sel, 82 + &(__dw_regs(dw)->type.legacy.viewport_sel)); 83 + writel(value, addr); 84 + 85 + raw_spin_unlock_irqrestore(&dw->lock, flags); 86 + } else { 87 + writel(value, addr); 88 + } 89 + } 90 + 91 + static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, 92 + const void __iomem *addr) 93 + { 94 + u32 value; 95 + 96 + if (dw->mode == EDMA_MODE_LEGACY) { 97 + u32 viewport_sel; 98 + unsigned long flags; 99 + 100 + raw_spin_lock_irqsave(&dw->lock, flags); 101 + 102 + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); 103 + if (dir == EDMA_DIR_READ) 104 + viewport_sel |= BIT(31); 105 + 106 + writel(viewport_sel, 107 + &(__dw_regs(dw)->type.legacy.viewport_sel)); 108 + value = readl(addr); 109 + 110 + raw_spin_unlock_irqrestore(&dw->lock, flags); 111 + } else { 112 + value = readl(addr); 113 + } 114 + 115 + return value; 116 + } 117 + 118 + #define SET_CH(dw, dir, ch, name, value) \ 119 + writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) 120 + 121 + #define GET_CH(dw, dir, ch, name) \ 122 + readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) 123 + 124 + #define SET_LL(ll, value) \ 125 + writel(value, ll) 126 + 127 + /* eDMA management callbacks */ 128 + void dw_edma_v0_core_off(struct dw_edma *dw) 129 + { 130 + SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); 131 + SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); 132 + SET_BOTH(dw, engine_en, 0); 133 + } 134 + 135 + u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) 136 + { 137 + u32 num_ch; 138 + 139 + if (dir == EDMA_DIR_WRITE) 140 + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl)); 141 + else 142 + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl)); 143 + 144 + if (num_ch > EDMA_V0_MAX_NR_CH) 145 + num_ch = EDMA_V0_MAX_NR_CH; 146 + 147 + return (u16)num_ch; 148 + } 149 + 150 + enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) 151 + { 152 + struct dw_edma *dw = chan->chip->dw; 153 + u32 tmp; 154 + 155 + tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, 156 + GET_CH(dw, chan->dir, chan->id, ch_control1)); 157 + 158 + if (tmp == 1) 159 + return DMA_IN_PROGRESS; 160 + else if (tmp == 3) 161 + return DMA_COMPLETE; 162 + else 163 + return DMA_ERROR; 164 + } 165 + 166 + void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) 167 + { 168 + struct dw_edma *dw = chan->chip->dw; 169 + 170 + SET_RW(dw, chan->dir, int_clear, 171 + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); 172 + } 173 + 174 + void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) 175 + { 176 + struct dw_edma *dw = chan->chip->dw; 177 + 178 + SET_RW(dw, chan->dir, int_clear, 179 + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); 180 + } 181 + 182 + u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) 183 + { 184 + return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status)); 185 + } 186 + 187 + u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) 188 + { 189 + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status)); 190 + } 191 + 192 + static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) 193 + { 194 + struct dw_edma_burst *child; 195 + struct dw_edma_v0_lli *lli; 196 + struct dw_edma_v0_llp *llp; 197 + u32 control = 0, i = 0; 198 + u64 sar, dar, addr; 199 + int j; 200 + 201 + lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; 202 + 203 + if (chunk->cb) 204 + control = DW_EDMA_V0_CB; 205 + 206 + j = chunk->bursts_alloc; 207 + list_for_each_entry(child, &chunk->burst->list, list) { 208 + j--; 209 + if (!j) 210 + control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); 211 + 212 + /* Channel control */ 213 + SET_LL(&lli[i].control, control); 214 + /* Transfer size */ 215 + SET_LL(&lli[i].transfer_size, child->sz); 216 + /* SAR - low, high */ 217 + sar = cpu_to_le64(child->sar); 218 + SET_LL(&lli[i].sar_low, lower_32_bits(sar)); 219 + SET_LL(&lli[i].sar_high, upper_32_bits(sar)); 220 + /* DAR - low, high */ 221 + dar = cpu_to_le64(child->dar); 222 + SET_LL(&lli[i].dar_low, lower_32_bits(dar)); 223 + SET_LL(&lli[i].dar_high, upper_32_bits(dar)); 224 + i++; 225 + } 226 + 227 + llp = (struct dw_edma_v0_llp *)&lli[i]; 228 + control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; 229 + if (!chunk->cb) 230 + control |= DW_EDMA_V0_CB; 231 + 232 + /* Channel control */ 233 + SET_LL(&llp->control, control); 234 + /* Linked list - low, high */ 235 + addr = cpu_to_le64(chunk->ll_region.paddr); 236 + SET_LL(&llp->llp_low, lower_32_bits(addr)); 237 + SET_LL(&llp->llp_high, upper_32_bits(addr)); 238 + } 239 + 240 + void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) 241 + { 242 + struct dw_edma_chan *chan = chunk->chan; 243 + struct dw_edma *dw = chan->chip->dw; 244 + u32 tmp; 245 + u64 llp; 246 + 247 + dw_edma_v0_core_write_chunk(chunk); 248 + 249 + if (first) { 250 + /* Enable engine */ 251 + SET_RW(dw, chan->dir, engine_en, BIT(0)); 252 + /* Interrupt unmask - done, abort */ 253 + tmp = GET_RW(dw, chan->dir, int_mask); 254 + tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); 255 + tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); 256 + SET_RW(dw, chan->dir, int_mask, tmp); 257 + /* Linked list error */ 258 + tmp = GET_RW(dw, chan->dir, linked_list_err_en); 259 + tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); 260 + SET_RW(dw, chan->dir, linked_list_err_en, tmp); 261 + /* Channel control */ 262 + SET_CH(dw, chan->dir, chan->id, ch_control1, 263 + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); 264 + /* Linked list - low, high */ 265 + llp = cpu_to_le64(chunk->ll_region.paddr); 266 + SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); 267 + SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); 268 + } 269 + /* Doorbell */ 270 + SET_RW(dw, chan->dir, doorbell, 271 + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); 272 + } 273 + 274 + int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) 275 + { 276 + struct dw_edma *dw = chan->chip->dw; 277 + u32 tmp = 0; 278 + 279 + /* MSI done addr - low, high */ 280 + SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo); 281 + SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi); 282 + /* MSI abort addr - low, high */ 283 + SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo); 284 + SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi); 285 + /* MSI data - low, high */ 286 + switch (chan->id) { 287 + case 0: 288 + case 1: 289 + tmp = GET_RW(dw, chan->dir, ch01_imwr_data); 290 + break; 291 + 292 + case 2: 293 + case 3: 294 + tmp = GET_RW(dw, chan->dir, ch23_imwr_data); 295 + break; 296 + 297 + case 4: 298 + case 5: 299 + tmp = GET_RW(dw, chan->dir, ch45_imwr_data); 300 + break; 301 + 302 + case 6: 303 + case 7: 304 + tmp = GET_RW(dw, chan->dir, ch67_imwr_data); 305 + break; 306 + } 307 + 308 + if (chan->id & BIT(0)) { 309 + /* Channel odd {1, 3, 5, 7} */ 310 + tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; 311 + tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, 312 + chan->msi.data); 313 + } else { 314 + /* Channel even {0, 2, 4, 6} */ 315 + tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; 316 + tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, 317 + chan->msi.data); 318 + } 319 + 320 + switch (chan->id) { 321 + case 0: 322 + case 1: 323 + SET_RW(dw, chan->dir, ch01_imwr_data, tmp); 324 + break; 325 + 326 + case 2: 327 + case 3: 328 + SET_RW(dw, chan->dir, ch23_imwr_data, tmp); 329 + break; 330 + 331 + case 4: 332 + case 5: 333 + SET_RW(dw, chan->dir, ch45_imwr_data, tmp); 334 + break; 335 + 336 + case 6: 337 + case 7: 338 + SET_RW(dw, chan->dir, ch67_imwr_data, tmp); 339 + break; 340 + } 341 + 342 + return 0; 343 + } 344 + 345 + /* eDMA debugfs callbacks */ 346 + void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) 347 + { 348 + dw_edma_v0_debugfs_on(chip); 349 + } 350 + 351 + void dw_edma_v0_core_debugfs_off(void) 352 + { 353 + dw_edma_v0_debugfs_off(); 354 + }
+28
drivers/dma/dw-edma/dw-edma-v0-core.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA v0 core 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #ifndef _DW_EDMA_V0_CORE_H 10 + #define _DW_EDMA_V0_CORE_H 11 + 12 + #include <linux/dma/edma.h> 13 + 14 + /* eDMA management callbacks */ 15 + void dw_edma_v0_core_off(struct dw_edma *chan); 16 + u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir); 17 + enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan); 18 + void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan); 19 + void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan); 20 + u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir); 21 + u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir); 22 + void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); 23 + int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); 24 + /* eDMA debug fs callbacks */ 25 + void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); 26 + void dw_edma_v0_core_debugfs_off(void); 27 + 28 + #endif /* _DW_EDMA_V0_CORE_H */
+310
drivers/dma/dw-edma/dw-edma-v0-debugfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA v0 core 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #include <linux/debugfs.h> 10 + #include <linux/bitfield.h> 11 + 12 + #include "dw-edma-v0-debugfs.h" 13 + #include "dw-edma-v0-regs.h" 14 + #include "dw-edma-core.h" 15 + 16 + #define REGS_ADDR(name) \ 17 + ((dma_addr_t *)&regs->name) 18 + #define REGISTER(name) \ 19 + { #name, REGS_ADDR(name) } 20 + 21 + #define WR_REGISTER(name) \ 22 + { #name, REGS_ADDR(wr_##name) } 23 + #define RD_REGISTER(name) \ 24 + { #name, REGS_ADDR(rd_##name) } 25 + 26 + #define WR_REGISTER_LEGACY(name) \ 27 + { #name, REGS_ADDR(type.legacy.wr_##name) } 28 + #define RD_REGISTER_LEGACY(name) \ 29 + { #name, REGS_ADDR(type.legacy.rd_##name) } 30 + 31 + #define WR_REGISTER_UNROLL(name) \ 32 + { #name, REGS_ADDR(type.unroll.wr_##name) } 33 + #define RD_REGISTER_UNROLL(name) \ 34 + { #name, REGS_ADDR(type.unroll.rd_##name) } 35 + 36 + #define WRITE_STR "write" 37 + #define READ_STR "read" 38 + #define CHANNEL_STR "channel" 39 + #define REGISTERS_STR "registers" 40 + 41 + static struct dentry *base_dir; 42 + static struct dw_edma *dw; 43 + static struct dw_edma_v0_regs *regs; 44 + 45 + static struct { 46 + void *start; 47 + void *end; 48 + } lim[2][EDMA_V0_MAX_NR_CH]; 49 + 50 + struct debugfs_entries { 51 + char name[24]; 52 + dma_addr_t *reg; 53 + }; 54 + 55 + static int dw_edma_debugfs_u32_get(void *data, u64 *val) 56 + { 57 + if (dw->mode == EDMA_MODE_LEGACY && 58 + data >= (void *)&regs->type.legacy.ch) { 59 + void *ptr = (void *)&regs->type.legacy.ch; 60 + u32 viewport_sel = 0; 61 + unsigned long flags; 62 + u16 ch; 63 + 64 + for (ch = 0; ch < dw->wr_ch_cnt; ch++) 65 + if (lim[0][ch].start >= data && data < lim[0][ch].end) { 66 + ptr += (data - lim[0][ch].start); 67 + goto legacy_sel_wr; 68 + } 69 + 70 + for (ch = 0; ch < dw->rd_ch_cnt; ch++) 71 + if (lim[1][ch].start >= data && data < lim[1][ch].end) { 72 + ptr += (data - lim[1][ch].start); 73 + goto legacy_sel_rd; 74 + } 75 + 76 + return 0; 77 + legacy_sel_rd: 78 + viewport_sel = BIT(31); 79 + legacy_sel_wr: 80 + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); 81 + 82 + raw_spin_lock_irqsave(&dw->lock, flags); 83 + 84 + writel(viewport_sel, &regs->type.legacy.viewport_sel); 85 + *val = readl(ptr); 86 + 87 + raw_spin_unlock_irqrestore(&dw->lock, flags); 88 + } else { 89 + *val = readl(data); 90 + } 91 + 92 + return 0; 93 + } 94 + DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); 95 + 96 + static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], 97 + int nr_entries, struct dentry *dir) 98 + { 99 + int i; 100 + 101 + for (i = 0; i < nr_entries; i++) { 102 + if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, 103 + entries[i].reg, &fops_x32)) 104 + break; 105 + } 106 + } 107 + 108 + static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, 109 + struct dentry *dir) 110 + { 111 + int nr_entries; 112 + const struct debugfs_entries debugfs_regs[] = { 113 + REGISTER(ch_control1), 114 + REGISTER(ch_control2), 115 + REGISTER(transfer_size), 116 + REGISTER(sar_low), 117 + REGISTER(sar_high), 118 + REGISTER(dar_low), 119 + REGISTER(dar_high), 120 + REGISTER(llp_low), 121 + REGISTER(llp_high), 122 + }; 123 + 124 + nr_entries = ARRAY_SIZE(debugfs_regs); 125 + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); 126 + } 127 + 128 + static void dw_edma_debugfs_regs_wr(struct dentry *dir) 129 + { 130 + const struct debugfs_entries debugfs_regs[] = { 131 + /* eDMA global registers */ 132 + WR_REGISTER(engine_en), 133 + WR_REGISTER(doorbell), 134 + WR_REGISTER(ch_arb_weight_low), 135 + WR_REGISTER(ch_arb_weight_high), 136 + /* eDMA interrupts registers */ 137 + WR_REGISTER(int_status), 138 + WR_REGISTER(int_mask), 139 + WR_REGISTER(int_clear), 140 + WR_REGISTER(err_status), 141 + WR_REGISTER(done_imwr_low), 142 + WR_REGISTER(done_imwr_high), 143 + WR_REGISTER(abort_imwr_low), 144 + WR_REGISTER(abort_imwr_high), 145 + WR_REGISTER(ch01_imwr_data), 146 + WR_REGISTER(ch23_imwr_data), 147 + WR_REGISTER(ch45_imwr_data), 148 + WR_REGISTER(ch67_imwr_data), 149 + WR_REGISTER(linked_list_err_en), 150 + }; 151 + const struct debugfs_entries debugfs_unroll_regs[] = { 152 + /* eDMA channel context grouping */ 153 + WR_REGISTER_UNROLL(engine_chgroup), 154 + WR_REGISTER_UNROLL(engine_hshake_cnt_low), 155 + WR_REGISTER_UNROLL(engine_hshake_cnt_high), 156 + WR_REGISTER_UNROLL(ch0_pwr_en), 157 + WR_REGISTER_UNROLL(ch1_pwr_en), 158 + WR_REGISTER_UNROLL(ch2_pwr_en), 159 + WR_REGISTER_UNROLL(ch3_pwr_en), 160 + WR_REGISTER_UNROLL(ch4_pwr_en), 161 + WR_REGISTER_UNROLL(ch5_pwr_en), 162 + WR_REGISTER_UNROLL(ch6_pwr_en), 163 + WR_REGISTER_UNROLL(ch7_pwr_en), 164 + }; 165 + struct dentry *regs_dir, *ch_dir; 166 + int nr_entries, i; 167 + char name[16]; 168 + 169 + regs_dir = debugfs_create_dir(WRITE_STR, dir); 170 + if (!regs_dir) 171 + return; 172 + 173 + nr_entries = ARRAY_SIZE(debugfs_regs); 174 + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); 175 + 176 + if (dw->mode == EDMA_MODE_UNROLL) { 177 + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); 178 + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, 179 + regs_dir); 180 + } 181 + 182 + for (i = 0; i < dw->wr_ch_cnt; i++) { 183 + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); 184 + 185 + ch_dir = debugfs_create_dir(name, regs_dir); 186 + if (!ch_dir) 187 + return; 188 + 189 + dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir); 190 + 191 + lim[0][i].start = &regs->type.unroll.ch[i].wr; 192 + lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0]; 193 + } 194 + } 195 + 196 + static void dw_edma_debugfs_regs_rd(struct dentry *dir) 197 + { 198 + const struct debugfs_entries debugfs_regs[] = { 199 + /* eDMA global registers */ 200 + RD_REGISTER(engine_en), 201 + RD_REGISTER(doorbell), 202 + RD_REGISTER(ch_arb_weight_low), 203 + RD_REGISTER(ch_arb_weight_high), 204 + /* eDMA interrupts registers */ 205 + RD_REGISTER(int_status), 206 + RD_REGISTER(int_mask), 207 + RD_REGISTER(int_clear), 208 + RD_REGISTER(err_status_low), 209 + RD_REGISTER(err_status_high), 210 + RD_REGISTER(linked_list_err_en), 211 + RD_REGISTER(done_imwr_low), 212 + RD_REGISTER(done_imwr_high), 213 + RD_REGISTER(abort_imwr_low), 214 + RD_REGISTER(abort_imwr_high), 215 + RD_REGISTER(ch01_imwr_data), 216 + RD_REGISTER(ch23_imwr_data), 217 + RD_REGISTER(ch45_imwr_data), 218 + RD_REGISTER(ch67_imwr_data), 219 + }; 220 + const struct debugfs_entries debugfs_unroll_regs[] = { 221 + /* eDMA channel context grouping */ 222 + RD_REGISTER_UNROLL(engine_chgroup), 223 + RD_REGISTER_UNROLL(engine_hshake_cnt_low), 224 + RD_REGISTER_UNROLL(engine_hshake_cnt_high), 225 + RD_REGISTER_UNROLL(ch0_pwr_en), 226 + RD_REGISTER_UNROLL(ch1_pwr_en), 227 + RD_REGISTER_UNROLL(ch2_pwr_en), 228 + RD_REGISTER_UNROLL(ch3_pwr_en), 229 + RD_REGISTER_UNROLL(ch4_pwr_en), 230 + RD_REGISTER_UNROLL(ch5_pwr_en), 231 + RD_REGISTER_UNROLL(ch6_pwr_en), 232 + RD_REGISTER_UNROLL(ch7_pwr_en), 233 + }; 234 + struct dentry *regs_dir, *ch_dir; 235 + int nr_entries, i; 236 + char name[16]; 237 + 238 + regs_dir = debugfs_create_dir(READ_STR, dir); 239 + if (!regs_dir) 240 + return; 241 + 242 + nr_entries = ARRAY_SIZE(debugfs_regs); 243 + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); 244 + 245 + if (dw->mode == EDMA_MODE_UNROLL) { 246 + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); 247 + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, 248 + regs_dir); 249 + } 250 + 251 + for (i = 0; i < dw->rd_ch_cnt; i++) { 252 + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); 253 + 254 + ch_dir = debugfs_create_dir(name, regs_dir); 255 + if (!ch_dir) 256 + return; 257 + 258 + dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir); 259 + 260 + lim[1][i].start = &regs->type.unroll.ch[i].rd; 261 + lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0]; 262 + } 263 + } 264 + 265 + static void dw_edma_debugfs_regs(void) 266 + { 267 + const struct debugfs_entries debugfs_regs[] = { 268 + REGISTER(ctrl_data_arb_prior), 269 + REGISTER(ctrl), 270 + }; 271 + struct dentry *regs_dir; 272 + int nr_entries; 273 + 274 + regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir); 275 + if (!regs_dir) 276 + return; 277 + 278 + nr_entries = ARRAY_SIZE(debugfs_regs); 279 + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); 280 + 281 + dw_edma_debugfs_regs_wr(regs_dir); 282 + dw_edma_debugfs_regs_rd(regs_dir); 283 + } 284 + 285 + void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) 286 + { 287 + dw = chip->dw; 288 + if (!dw) 289 + return; 290 + 291 + regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; 292 + if (!regs) 293 + return; 294 + 295 + base_dir = debugfs_create_dir(dw->name, 0); 296 + if (!base_dir) 297 + return; 298 + 299 + debugfs_create_u32("version", 0444, base_dir, &dw->version); 300 + debugfs_create_u32("mode", 0444, base_dir, &dw->mode); 301 + debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); 302 + debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); 303 + 304 + dw_edma_debugfs_regs(); 305 + } 306 + 307 + void dw_edma_v0_debugfs_off(void) 308 + { 309 + debugfs_remove_recursive(base_dir); 310 + }
+27
drivers/dma/dw-edma/dw-edma-v0-debugfs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA v0 core 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #ifndef _DW_EDMA_V0_DEBUG_FS_H 10 + #define _DW_EDMA_V0_DEBUG_FS_H 11 + 12 + #include <linux/dma/edma.h> 13 + 14 + #ifdef CONFIG_DEBUG_FS 15 + void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); 16 + void dw_edma_v0_debugfs_off(void); 17 + #else 18 + static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) 19 + { 20 + } 21 + 22 + static inline void dw_edma_v0_debugfs_off(void) 23 + { 24 + } 25 + #endif /* CONFIG_DEBUG_FS */ 26 + 27 + #endif /* _DW_EDMA_V0_DEBUG_FS_H */
+158
drivers/dma/dw-edma/dw-edma-v0-regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA v0 core 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #ifndef _DW_EDMA_V0_REGS_H 10 + #define _DW_EDMA_V0_REGS_H 11 + 12 + #include <linux/dmaengine.h> 13 + 14 + #define EDMA_V0_MAX_NR_CH 8 15 + #define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) 16 + #define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) 17 + #define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) 18 + #define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) 19 + #define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) 20 + #define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) 21 + #define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) 22 + #define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) 23 + 24 + #define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) 25 + #define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) 26 + 27 + struct dw_edma_v0_ch_regs { 28 + u32 ch_control1; /* 0x000 */ 29 + u32 ch_control2; /* 0x004 */ 30 + u32 transfer_size; /* 0x008 */ 31 + u32 sar_low; /* 0x00c */ 32 + u32 sar_high; /* 0x010 */ 33 + u32 dar_low; /* 0x014 */ 34 + u32 dar_high; /* 0x018 */ 35 + u32 llp_low; /* 0x01c */ 36 + u32 llp_high; /* 0x020 */ 37 + }; 38 + 39 + struct dw_edma_v0_ch { 40 + struct dw_edma_v0_ch_regs wr; /* 0x200 */ 41 + u32 padding_1[55]; /* [0x224..0x2fc] */ 42 + struct dw_edma_v0_ch_regs rd; /* 0x300 */ 43 + u32 padding_2[55]; /* [0x224..0x2fc] */ 44 + }; 45 + 46 + struct dw_edma_v0_unroll { 47 + u32 padding_1; /* 0x0f8 */ 48 + u32 wr_engine_chgroup; /* 0x100 */ 49 + u32 rd_engine_chgroup; /* 0x104 */ 50 + u32 wr_engine_hshake_cnt_low; /* 0x108 */ 51 + u32 wr_engine_hshake_cnt_high; /* 0x10c */ 52 + u32 padding_2[2]; /* [0x110..0x114] */ 53 + u32 rd_engine_hshake_cnt_low; /* 0x118 */ 54 + u32 rd_engine_hshake_cnt_high; /* 0x11c */ 55 + u32 padding_3[2]; /* [0x120..0x124] */ 56 + u32 wr_ch0_pwr_en; /* 0x128 */ 57 + u32 wr_ch1_pwr_en; /* 0x12c */ 58 + u32 wr_ch2_pwr_en; /* 0x130 */ 59 + u32 wr_ch3_pwr_en; /* 0x134 */ 60 + u32 wr_ch4_pwr_en; /* 0x138 */ 61 + u32 wr_ch5_pwr_en; /* 0x13c */ 62 + u32 wr_ch6_pwr_en; /* 0x140 */ 63 + u32 wr_ch7_pwr_en; /* 0x144 */ 64 + u32 padding_4[8]; /* [0x148..0x164] */ 65 + u32 rd_ch0_pwr_en; /* 0x168 */ 66 + u32 rd_ch1_pwr_en; /* 0x16c */ 67 + u32 rd_ch2_pwr_en; /* 0x170 */ 68 + u32 rd_ch3_pwr_en; /* 0x174 */ 69 + u32 rd_ch4_pwr_en; /* 0x178 */ 70 + u32 rd_ch5_pwr_en; /* 0x18c */ 71 + u32 rd_ch6_pwr_en; /* 0x180 */ 72 + u32 rd_ch7_pwr_en; /* 0x184 */ 73 + u32 padding_5[30]; /* [0x188..0x1fc] */ 74 + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */ 75 + }; 76 + 77 + struct dw_edma_v0_legacy { 78 + u32 viewport_sel; /* 0x0f8 */ 79 + struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */ 80 + }; 81 + 82 + struct dw_edma_v0_regs { 83 + /* eDMA global registers */ 84 + u32 ctrl_data_arb_prior; /* 0x000 */ 85 + u32 padding_1; /* 0x004 */ 86 + u32 ctrl; /* 0x008 */ 87 + u32 wr_engine_en; /* 0x00c */ 88 + u32 wr_doorbell; /* 0x010 */ 89 + u32 padding_2; /* 0x014 */ 90 + u32 wr_ch_arb_weight_low; /* 0x018 */ 91 + u32 wr_ch_arb_weight_high; /* 0x01c */ 92 + u32 padding_3[3]; /* [0x020..0x028] */ 93 + u32 rd_engine_en; /* 0x02c */ 94 + u32 rd_doorbell; /* 0x030 */ 95 + u32 padding_4; /* 0x034 */ 96 + u32 rd_ch_arb_weight_low; /* 0x038 */ 97 + u32 rd_ch_arb_weight_high; /* 0x03c */ 98 + u32 padding_5[3]; /* [0x040..0x048] */ 99 + /* eDMA interrupts registers */ 100 + u32 wr_int_status; /* 0x04c */ 101 + u32 padding_6; /* 0x050 */ 102 + u32 wr_int_mask; /* 0x054 */ 103 + u32 wr_int_clear; /* 0x058 */ 104 + u32 wr_err_status; /* 0x05c */ 105 + u32 wr_done_imwr_low; /* 0x060 */ 106 + u32 wr_done_imwr_high; /* 0x064 */ 107 + u32 wr_abort_imwr_low; /* 0x068 */ 108 + u32 wr_abort_imwr_high; /* 0x06c */ 109 + u32 wr_ch01_imwr_data; /* 0x070 */ 110 + u32 wr_ch23_imwr_data; /* 0x074 */ 111 + u32 wr_ch45_imwr_data; /* 0x078 */ 112 + u32 wr_ch67_imwr_data; /* 0x07c */ 113 + u32 padding_7[4]; /* [0x080..0x08c] */ 114 + u32 wr_linked_list_err_en; /* 0x090 */ 115 + u32 padding_8[3]; /* [0x094..0x09c] */ 116 + u32 rd_int_status; /* 0x0a0 */ 117 + u32 padding_9; /* 0x0a4 */ 118 + u32 rd_int_mask; /* 0x0a8 */ 119 + u32 rd_int_clear; /* 0x0ac */ 120 + u32 padding_10; /* 0x0b0 */ 121 + u32 rd_err_status_low; /* 0x0b4 */ 122 + u32 rd_err_status_high; /* 0x0b8 */ 123 + u32 padding_11[2]; /* [0x0bc..0x0c0] */ 124 + u32 rd_linked_list_err_en; /* 0x0c4 */ 125 + u32 padding_12; /* 0x0c8 */ 126 + u32 rd_done_imwr_low; /* 0x0cc */ 127 + u32 rd_done_imwr_high; /* 0x0d0 */ 128 + u32 rd_abort_imwr_low; /* 0x0d4 */ 129 + u32 rd_abort_imwr_high; /* 0x0d8 */ 130 + u32 rd_ch01_imwr_data; /* 0x0dc */ 131 + u32 rd_ch23_imwr_data; /* 0x0e0 */ 132 + u32 rd_ch45_imwr_data; /* 0x0e4 */ 133 + u32 rd_ch67_imwr_data; /* 0x0e8 */ 134 + u32 padding_13[4]; /* [0x0ec..0x0f8] */ 135 + /* eDMA channel context grouping */ 136 + union dw_edma_v0_type { 137 + struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */ 138 + struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */ 139 + } type; 140 + }; 141 + 142 + struct dw_edma_v0_lli { 143 + u32 control; 144 + u32 transfer_size; 145 + u32 sar_low; 146 + u32 sar_high; 147 + u32 dar_low; 148 + u32 dar_high; 149 + }; 150 + 151 + struct dw_edma_v0_llp { 152 + u32 control; 153 + u32 reserved; 154 + u32 llp_low; 155 + u32 llp_high; 156 + }; 157 + 158 + #endif /* _DW_EDMA_V0_REGS_H */
+25 -8
drivers/dma/dw/pci.c
··· 15 15 struct dw_dma_pci_data { 16 16 const struct dw_dma_platform_data *pdata; 17 17 int (*probe)(struct dw_dma_chip *chip); 18 + int (*remove)(struct dw_dma_chip *chip); 19 + struct dw_dma_chip *chip; 18 20 }; 19 21 20 22 static const struct dw_dma_pci_data dw_pci_data = { 21 23 .probe = dw_dma_probe, 24 + .remove = dw_dma_remove, 22 25 }; 23 26 24 27 static const struct dw_dma_platform_data idma32_pdata = { ··· 37 34 static const struct dw_dma_pci_data idma32_pci_data = { 38 35 .pdata = &idma32_pdata, 39 36 .probe = idma32_dma_probe, 37 + .remove = idma32_dma_remove, 40 38 }; 41 39 42 40 static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) 43 41 { 44 - const struct dw_dma_pci_data *data = (void *)pid->driver_data; 42 + const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data; 43 + struct dw_dma_pci_data *data; 45 44 struct dw_dma_chip *chip; 46 45 int ret; 47 46 ··· 68 63 if (ret) 69 64 return ret; 70 65 66 + data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL); 67 + if (!data) 68 + return -ENOMEM; 69 + 71 70 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 72 71 if (!chip) 73 72 return -ENOMEM; ··· 82 73 chip->irq = pdev->irq; 83 74 chip->pdata = data->pdata; 84 75 76 + data->chip = chip; 77 + 85 78 ret = data->probe(chip); 86 79 if (ret) 87 80 return ret; 88 81 89 - pci_set_drvdata(pdev, chip); 82 + pci_set_drvdata(pdev, data); 90 83 91 84 return 0; 92 85 } 93 86 94 87 static void dw_pci_remove(struct pci_dev *pdev) 95 88 { 96 - struct dw_dma_chip *chip = pci_get_drvdata(pdev); 89 + struct dw_dma_pci_data *data = pci_get_drvdata(pdev); 90 + struct dw_dma_chip *chip = data->chip; 97 91 int ret; 98 92 99 - ret = dw_dma_remove(chip); 93 + ret = data->remove(chip); 100 94 if (ret) 101 95 dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); 102 96 } ··· 108 96 109 97 static int dw_pci_suspend_late(struct device *dev) 110 98 { 111 - struct pci_dev *pci = to_pci_dev(dev); 112 - struct dw_dma_chip *chip = pci_get_drvdata(pci); 99 + struct dw_dma_pci_data *data = dev_get_drvdata(dev); 100 + struct dw_dma_chip *chip = data->chip; 113 101 114 102 return do_dw_dma_disable(chip); 115 103 }; 116 104 117 105 static int dw_pci_resume_early(struct device *dev) 118 106 { 119 - struct pci_dev *pci = to_pci_dev(dev); 120 - struct dw_dma_chip *chip = pci_get_drvdata(pci); 107 + struct dw_dma_pci_data *data = dev_get_drvdata(dev); 108 + struct dw_dma_chip *chip = data->chip; 121 109 122 110 return do_dw_dma_enable(chip); 123 111 }; ··· 142 130 /* Braswell */ 143 131 { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, 144 132 { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, 133 + 134 + /* Elkhart Lake iDMA 32-bit (OSE DMA) */ 135 + { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data }, 136 + { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data }, 137 + { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data }, 145 138 146 139 /* Haswell */ 147 140 { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
+39 -28
drivers/dma/fsl-edma-common.c
··· 47 47 struct edma_regs *regs = &fsl_chan->edma->regs; 48 48 u32 ch = fsl_chan->vchan.chan.chan_id; 49 49 50 - if (fsl_chan->edma->version == v1) { 50 + if (fsl_chan->edma->drvdata->version == v1) { 51 51 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); 52 52 edma_writeb(fsl_chan->edma, ch, regs->serq); 53 53 } else { ··· 64 64 struct edma_regs *regs = &fsl_chan->edma->regs; 65 65 u32 ch = fsl_chan->vchan.chan.chan_id; 66 66 67 - if (fsl_chan->edma->version == v1) { 67 + if (fsl_chan->edma->drvdata->version == v1) { 68 68 edma_writeb(fsl_chan->edma, ch, regs->cerq); 69 69 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); 70 70 } else { ··· 77 77 } 78 78 EXPORT_SYMBOL_GPL(fsl_edma_disable_request); 79 79 80 + static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, 81 + u32 off, u32 slot, bool enable) 82 + { 83 + u8 val8; 84 + 85 + if (enable) 86 + val8 = EDMAMUX_CHCFG_ENBL | slot; 87 + else 88 + val8 = EDMAMUX_CHCFG_DIS; 89 + 90 + iowrite8(val8, addr + off); 91 + } 92 + 80 93 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, 81 94 unsigned int slot, bool enable) 82 95 { 83 96 u32 ch = fsl_chan->vchan.chan.chan_id; 84 97 void __iomem *muxaddr; 85 98 unsigned int chans_per_mux, ch_off; 99 + u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; 86 100 87 - chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; 101 + chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; 88 102 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 89 103 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 90 104 slot = EDMAMUX_CHCFG_SOURCE(slot); 91 105 92 - if (enable) 93 - iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); 94 - else 95 - iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); 106 + mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); 96 107 } 97 108 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); 98 109 ··· 658 647 edma->regs.erql = edma->membase + EDMA_ERQ; 659 648 edma->regs.eeil = edma->membase + EDMA_EEI; 660 649 661 - edma->regs.serq = edma->membase + ((edma->version == v1) ? 662 - EDMA_SERQ : EDMA64_SERQ); 663 - edma->regs.cerq = edma->membase + ((edma->version == v1) ? 664 - EDMA_CERQ : EDMA64_CERQ); 665 - edma->regs.seei = edma->membase + ((edma->version == v1) ? 666 - EDMA_SEEI : EDMA64_SEEI); 667 - edma->regs.ceei = edma->membase + ((edma->version == v1) ? 668 - EDMA_CEEI : EDMA64_CEEI); 669 - edma->regs.cint = edma->membase + ((edma->version == v1) ? 670 - EDMA_CINT : EDMA64_CINT); 671 - edma->regs.cerr = edma->membase + ((edma->version == v1) ? 672 - EDMA_CERR : EDMA64_CERR); 673 - edma->regs.ssrt = edma->membase + ((edma->version == v1) ? 674 - EDMA_SSRT : EDMA64_SSRT); 675 - edma->regs.cdne = edma->membase + ((edma->version == v1) ? 676 - EDMA_CDNE : EDMA64_CDNE); 677 - edma->regs.intl = edma->membase + ((edma->version == v1) ? 678 - EDMA_INTR : EDMA64_INTL); 679 - edma->regs.errl = edma->membase + ((edma->version == v1) ? 680 - EDMA_ERR : EDMA64_ERRL); 650 + edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ? 651 + EDMA64_SERQ : EDMA_SERQ); 652 + edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ? 653 + EDMA64_CERQ : EDMA_CERQ); 654 + edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ? 655 + EDMA64_SEEI : EDMA_SEEI); 656 + edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ? 657 + EDMA64_CEEI : EDMA_CEEI); 658 + edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ? 659 + EDMA64_CINT : EDMA_CINT); 660 + edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ? 661 + EDMA64_CERR : EDMA_CERR); 662 + edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ? 663 + EDMA64_SSRT : EDMA_SSRT); 664 + edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ? 665 + EDMA64_CDNE : EDMA_CDNE); 666 + edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ? 667 + EDMA64_INTL : EDMA_INTR); 668 + edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ? 669 + EDMA64_ERRL : EDMA_ERR); 681 670 682 - if (edma->version == v2) { 671 + if (edma->drvdata->version == v2) { 683 672 edma->regs.erqh = edma->membase + EDMA64_ERQH; 684 673 edma->regs.eeih = edma->membase + EDMA64_EEIH; 685 674 edma->regs.errh = edma->membase + EDMA64_ERRH;
+9 -1
drivers/dma/fsl-edma-common.h
··· 7 7 #define _FSL_EDMA_COMMON_H_ 8 8 9 9 #include <linux/dma-direction.h> 10 + #include <linux/platform_device.h> 10 11 #include "virt-dma.h" 11 12 12 13 #define EDMA_CR_EDBG BIT(1) ··· 141 140 v2, /* 64ch Coldfire */ 142 141 }; 143 142 143 + struct fsl_edma_drvdata { 144 + enum edma_version version; 145 + u32 dmamuxs; 146 + int (*setup_irq)(struct platform_device *pdev, 147 + struct fsl_edma_engine *fsl_edma); 148 + }; 149 + 144 150 struct fsl_edma_engine { 145 151 struct dma_device dma_dev; 146 152 void __iomem *membase; 147 153 void __iomem *muxbase[DMAMUX_NR]; 148 154 struct clk *muxclk[DMAMUX_NR]; 149 155 struct mutex fsl_edma_mutex; 156 + const struct fsl_edma_drvdata *drvdata; 150 157 u32 n_chans; 151 158 int txirq; 152 159 int errirq; 153 160 bool big_endian; 154 - enum edma_version version; 155 161 struct edma_regs regs; 156 162 struct fsl_edma_chan chans[]; 157 163 };
+30 -13
drivers/dma/fsl-edma.c
··· 92 92 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; 93 93 struct dma_chan *chan, *_chan; 94 94 struct fsl_edma_chan *fsl_chan; 95 - unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; 95 + u32 dmamux_nr = fsl_edma->drvdata->dmamuxs; 96 + unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr; 96 97 97 98 if (dma_spec->args_count != 2) 98 99 return NULL; ··· 181 180 clk_disable_unprepare(fsl_edma->muxclk[i]); 182 181 } 183 182 183 + static struct fsl_edma_drvdata vf610_data = { 184 + .version = v1, 185 + .dmamuxs = DMAMUX_NR, 186 + .setup_irq = fsl_edma_irq_init, 187 + }; 188 + 189 + static const struct of_device_id fsl_edma_dt_ids[] = { 190 + { .compatible = "fsl,vf610-edma", .data = &vf610_data}, 191 + { /* sentinel */ } 192 + }; 193 + MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); 194 + 184 195 static int fsl_edma_probe(struct platform_device *pdev) 185 196 { 197 + const struct of_device_id *of_id = 198 + of_match_device(fsl_edma_dt_ids, &pdev->dev); 186 199 struct device_node *np = pdev->dev.of_node; 187 200 struct fsl_edma_engine *fsl_edma; 201 + const struct fsl_edma_drvdata *drvdata = NULL; 188 202 struct fsl_edma_chan *fsl_chan; 189 203 struct edma_regs *regs; 190 204 struct resource *res; 191 205 int len, chans; 192 206 int ret, i; 207 + 208 + if (of_id) 209 + drvdata = of_id->data; 210 + if (!drvdata) { 211 + dev_err(&pdev->dev, "unable to find driver data\n"); 212 + return -EINVAL; 213 + } 193 214 194 215 ret = of_property_read_u32(np, "dma-channels", &chans); 195 216 if (ret) { ··· 224 201 if (!fsl_edma) 225 202 return -ENOMEM; 226 203 227 - fsl_edma->version = v1; 204 + fsl_edma->drvdata = drvdata; 228 205 fsl_edma->n_chans = chans; 229 206 mutex_init(&fsl_edma->fsl_edma_mutex); 230 207 ··· 236 213 fsl_edma_setup_regs(fsl_edma); 237 214 regs = &fsl_edma->regs; 238 215 239 - for (i = 0; i < DMAMUX_NR; i++) { 216 + for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { 240 217 char clkname[32]; 241 218 242 219 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); ··· 282 259 } 283 260 284 261 edma_writel(fsl_edma, ~0, regs->intl); 285 - ret = fsl_edma_irq_init(pdev, fsl_edma); 262 + ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); 286 263 if (ret) 287 264 return ret; 288 265 ··· 314 291 if (ret) { 315 292 dev_err(&pdev->dev, 316 293 "Can't register Freescale eDMA engine. (%d)\n", ret); 317 - fsl_disable_clocks(fsl_edma, DMAMUX_NR); 294 + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); 318 295 return ret; 319 296 } 320 297 ··· 323 300 dev_err(&pdev->dev, 324 301 "Can't register Freescale eDMA of_dma. (%d)\n", ret); 325 302 dma_async_device_unregister(&fsl_edma->dma_dev); 326 - fsl_disable_clocks(fsl_edma, DMAMUX_NR); 303 + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); 327 304 return ret; 328 305 } 329 306 ··· 342 319 fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); 343 320 of_dma_controller_free(np); 344 321 dma_async_device_unregister(&fsl_edma->dma_dev); 345 - fsl_disable_clocks(fsl_edma, DMAMUX_NR); 322 + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); 346 323 347 324 return 0; 348 325 } ··· 400 377 .suspend_late = fsl_edma_suspend_late, 401 378 .resume_early = fsl_edma_resume_early, 402 379 }; 403 - 404 - static const struct of_device_id fsl_edma_dt_ids[] = { 405 - { .compatible = "fsl,vf610-edma", }, 406 - { /* sentinel */ } 407 - }; 408 - MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); 409 380 410 381 static struct platform_driver fsl_edma_driver = { 411 382 .driver = {
+10 -8
drivers/dma/fsl-qdma.c
··· 113 113 /* Field definition for Descriptor offset */ 114 114 #define QDMA_CCDF_STATUS 20 115 115 #define QDMA_CCDF_OFFSET 20 116 + #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32) 116 117 117 118 /* Field definition for safe loop count*/ 118 119 #define FSL_QDMA_HALT_COUNT 1500 ··· 342 341 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, 343 342 dma_addr_t dst, dma_addr_t src, u32 len) 344 343 { 344 + u32 cmd; 345 345 struct fsl_qdma_format *sdf, *ddf; 346 346 struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; 347 347 ··· 371 369 /* This entry is the last entry. */ 372 370 qdma_csgf_set_f(csgf_dest, len); 373 371 /* Descriptor Buffer */ 374 - sdf->data = 375 - cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << 376 - FSL_QDMA_CMD_RWTTYPE_OFFSET); 377 - ddf->data = 378 - cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << 379 - FSL_QDMA_CMD_RWTTYPE_OFFSET); 380 - ddf->data |= 381 - cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); 372 + cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << 373 + FSL_QDMA_CMD_RWTTYPE_OFFSET); 374 + sdf->data = QDMA_SDDF_CMD(cmd); 375 + 376 + cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << 377 + FSL_QDMA_CMD_RWTTYPE_OFFSET); 378 + cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); 379 + ddf->data = QDMA_SDDF_CMD(cmd); 382 380 } 383 381 384 382 /*
+2 -2
drivers/dma/hsu/hsu.c
··· 61 61 62 62 if (hsuc->direction == DMA_MEM_TO_DEV) { 63 63 bsr = config->dst_maxburst; 64 - mtsr = config->src_addr_width; 64 + mtsr = config->dst_addr_width; 65 65 } else if (hsuc->direction == DMA_DEV_TO_MEM) { 66 66 bsr = config->src_maxburst; 67 - mtsr = config->dst_addr_width; 67 + mtsr = config->src_addr_width; 68 68 } 69 69 70 70 hsu_chan_disable(hsuc);
+2 -7
drivers/dma/imx-sdma.c
··· 1934 1934 static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) 1935 1935 { 1936 1936 struct sdma_channel *sdmac = to_sdma_chan(chan); 1937 - struct sdma_engine *sdma = sdmac->sdma; 1938 1937 struct imx_dma_data *data = fn_param; 1939 1938 1940 1939 if (!imx_dma_is_general_purpose(chan)) 1941 - return false; 1942 - 1943 - /* return false if it's not the right device */ 1944 - if (sdma->dev->of_node != data->of_node) 1945 1940 return false; 1946 1941 1947 1942 sdmac->data = *data; ··· 1966 1971 * be set to sdmac->event_id1. 1967 1972 */ 1968 1973 data.dma_request2 = 0; 1969 - data.of_node = ofdma->of_node; 1970 1974 1971 - return dma_request_channel(mask, sdma_filter_fn, &data); 1975 + return __dma_request_channel(&mask, sdma_filter_fn, &data, 1976 + ofdma->of_node); 1972 1977 } 1973 1978 1974 1979 static int sdma_probe(struct platform_device *pdev)
+8 -3
drivers/dma/mcf-edma.c
··· 164 164 free_irq(irq, mcf_edma); 165 165 } 166 166 167 + static struct fsl_edma_drvdata mcf_data = { 168 + .version = v2, 169 + .setup_irq = mcf_edma_irq_init, 170 + }; 171 + 167 172 static int mcf_edma_probe(struct platform_device *pdev) 168 173 { 169 174 struct mcf_edma_platform_data *pdata; ··· 192 187 193 188 mcf_edma->n_chans = chans; 194 189 195 - /* Set up version for ColdFire edma */ 196 - mcf_edma->version = v2; 190 + /* Set up drvdata for ColdFire edma */ 191 + mcf_edma->drvdata = &mcf_data; 197 192 mcf_edma->big_endian = 1; 198 193 199 194 if (!mcf_edma->n_chans) { ··· 228 223 iowrite32(~0, regs->inth); 229 224 iowrite32(~0, regs->intl); 230 225 231 - ret = mcf_edma_irq_init(pdev, mcf_edma); 226 + ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma); 232 227 if (ret) 233 228 return ret; 234 229
+11
drivers/dma/mediatek/Kconfig
··· 25 25 26 26 This controller provides the channels which is dedicated to 27 27 memory-to-memory transfer to offload from CPU. 28 + 29 + config MTK_UART_APDMA 30 + tristate "MediaTek SoCs APDMA support for UART" 31 + depends on OF && SERIAL_8250_MT6577 32 + select DMA_ENGINE 33 + select DMA_VIRTUAL_CHANNELS 34 + help 35 + Support for the UART DMA engine found on MediaTek MTK SoCs. 36 + When SERIAL_8250_MT6577 is enabled, and if you want to use DMA, 37 + you can enable the config. The DMA engine can only be used 38 + with MediaTek SoCs.
+1
drivers/dma/mediatek/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 + obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o 2 3 obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o 3 4 obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o
+666
drivers/dma/mediatek/mtk-uart-apdma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * MediaTek UART APDMA driver. 4 + * 5 + * Copyright (c) 2019 MediaTek Inc. 6 + * Author: Long Cheng <long.cheng@mediatek.com> 7 + */ 8 + 9 + #include <linux/clk.h> 10 + #include <linux/dmaengine.h> 11 + #include <linux/dma-mapping.h> 12 + #include <linux/err.h> 13 + #include <linux/init.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/iopoll.h> 16 + #include <linux/kernel.h> 17 + #include <linux/list.h> 18 + #include <linux/module.h> 19 + #include <linux/of_device.h> 20 + #include <linux/of_dma.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/pm_runtime.h> 23 + #include <linux/slab.h> 24 + #include <linux/spinlock.h> 25 + 26 + #include "../virt-dma.h" 27 + 28 + /* The default number of virtual channel */ 29 + #define MTK_UART_APDMA_NR_VCHANS 8 30 + 31 + #define VFF_EN_B BIT(0) 32 + #define VFF_STOP_B BIT(0) 33 + #define VFF_FLUSH_B BIT(0) 34 + #define VFF_4G_EN_B BIT(0) 35 + /* rx valid size >= vff thre */ 36 + #define VFF_RX_INT_EN_B (BIT(0) | BIT(1)) 37 + /* tx left size >= vff thre */ 38 + #define VFF_TX_INT_EN_B BIT(0) 39 + #define VFF_WARM_RST_B BIT(0) 40 + #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1)) 41 + #define VFF_TX_INT_CLR_B 0 42 + #define VFF_STOP_CLR_B 0 43 + #define VFF_EN_CLR_B 0 44 + #define VFF_INT_EN_CLR_B 0 45 + #define VFF_4G_SUPPORT_CLR_B 0 46 + 47 + /* 48 + * interrupt trigger level for tx 49 + * if threshold is n, no polling is required to start tx. 50 + * otherwise need polling VFF_FLUSH. 51 + */ 52 + #define VFF_TX_THRE(n) (n) 53 + /* interrupt trigger level for rx */ 54 + #define VFF_RX_THRE(n) ((n) * 3 / 4) 55 + 56 + #define VFF_RING_SIZE 0xffff 57 + /* invert this bit when wrap ring head again */ 58 + #define VFF_RING_WRAP 0x10000 59 + 60 + #define VFF_INT_FLAG 0x00 61 + #define VFF_INT_EN 0x04 62 + #define VFF_EN 0x08 63 + #define VFF_RST 0x0c 64 + #define VFF_STOP 0x10 65 + #define VFF_FLUSH 0x14 66 + #define VFF_ADDR 0x1c 67 + #define VFF_LEN 0x24 68 + #define VFF_THRE 0x28 69 + #define VFF_WPT 0x2c 70 + #define VFF_RPT 0x30 71 + /* TX: the buffer size HW can read. RX: the buffer size SW can read. */ 72 + #define VFF_VALID_SIZE 0x3c 73 + /* TX: the buffer size SW can write. RX: the buffer size HW can write. */ 74 + #define VFF_LEFT_SIZE 0x40 75 + #define VFF_DEBUG_STATUS 0x50 76 + #define VFF_4G_SUPPORT 0x54 77 + 78 + struct mtk_uart_apdmadev { 79 + struct dma_device ddev; 80 + struct clk *clk; 81 + bool support_33bits; 82 + unsigned int dma_requests; 83 + }; 84 + 85 + struct mtk_uart_apdma_desc { 86 + struct virt_dma_desc vd; 87 + 88 + dma_addr_t addr; 89 + unsigned int avail_len; 90 + }; 91 + 92 + struct mtk_chan { 93 + struct virt_dma_chan vc; 94 + struct dma_slave_config cfg; 95 + struct mtk_uart_apdma_desc *desc; 96 + enum dma_transfer_direction dir; 97 + 98 + void __iomem *base; 99 + unsigned int irq; 100 + 101 + unsigned int rx_status; 102 + }; 103 + 104 + static inline struct mtk_uart_apdmadev * 105 + to_mtk_uart_apdma_dev(struct dma_device *d) 106 + { 107 + return container_of(d, struct mtk_uart_apdmadev, ddev); 108 + } 109 + 110 + static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c) 111 + { 112 + return container_of(c, struct mtk_chan, vc.chan); 113 + } 114 + 115 + static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc 116 + (struct dma_async_tx_descriptor *t) 117 + { 118 + return container_of(t, struct mtk_uart_apdma_desc, vd.tx); 119 + } 120 + 121 + static void mtk_uart_apdma_write(struct mtk_chan *c, 122 + unsigned int reg, unsigned int val) 123 + { 124 + writel(val, c->base + reg); 125 + } 126 + 127 + static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) 128 + { 129 + return readl(c->base + reg); 130 + } 131 + 132 + static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) 133 + { 134 + struct dma_chan *chan = vd->tx.chan; 135 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 136 + 137 + kfree(c->desc); 138 + } 139 + 140 + static void mtk_uart_apdma_start_tx(struct mtk_chan *c) 141 + { 142 + struct mtk_uart_apdmadev *mtkd = 143 + to_mtk_uart_apdma_dev(c->vc.chan.device); 144 + struct mtk_uart_apdma_desc *d = c->desc; 145 + unsigned int wpt, vff_sz; 146 + 147 + vff_sz = c->cfg.dst_port_window_size; 148 + if (!mtk_uart_apdma_read(c, VFF_LEN)) { 149 + mtk_uart_apdma_write(c, VFF_ADDR, d->addr); 150 + mtk_uart_apdma_write(c, VFF_LEN, vff_sz); 151 + mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz)); 152 + mtk_uart_apdma_write(c, VFF_WPT, 0); 153 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); 154 + 155 + if (mtkd->support_33bits) 156 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); 157 + } 158 + 159 + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); 160 + if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) 161 + dev_err(c->vc.chan.device->dev, "Enable TX fail\n"); 162 + 163 + if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) { 164 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); 165 + return; 166 + } 167 + 168 + wpt = mtk_uart_apdma_read(c, VFF_WPT); 169 + 170 + wpt += c->desc->avail_len; 171 + if ((wpt & VFF_RING_SIZE) == vff_sz) 172 + wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP; 173 + 174 + /* Let DMA start moving data */ 175 + mtk_uart_apdma_write(c, VFF_WPT, wpt); 176 + 177 + /* HW auto set to 0 when left size >= threshold */ 178 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); 179 + if (!mtk_uart_apdma_read(c, VFF_FLUSH)) 180 + mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); 181 + } 182 + 183 + static void mtk_uart_apdma_start_rx(struct mtk_chan *c) 184 + { 185 + struct mtk_uart_apdmadev *mtkd = 186 + to_mtk_uart_apdma_dev(c->vc.chan.device); 187 + struct mtk_uart_apdma_desc *d = c->desc; 188 + unsigned int vff_sz; 189 + 190 + vff_sz = c->cfg.src_port_window_size; 191 + if (!mtk_uart_apdma_read(c, VFF_LEN)) { 192 + mtk_uart_apdma_write(c, VFF_ADDR, d->addr); 193 + mtk_uart_apdma_write(c, VFF_LEN, vff_sz); 194 + mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz)); 195 + mtk_uart_apdma_write(c, VFF_RPT, 0); 196 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); 197 + 198 + if (mtkd->support_33bits) 199 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); 200 + } 201 + 202 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); 203 + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); 204 + if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) 205 + dev_err(c->vc.chan.device->dev, "Enable RX fail\n"); 206 + } 207 + 208 + static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) 209 + { 210 + struct mtk_uart_apdma_desc *d = c->desc; 211 + 212 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); 213 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); 214 + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); 215 + 216 + list_del(&d->vd.node); 217 + vchan_cookie_complete(&d->vd); 218 + } 219 + 220 + static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) 221 + { 222 + struct mtk_uart_apdma_desc *d = c->desc; 223 + unsigned int len, wg, rg; 224 + int cnt; 225 + 226 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); 227 + 228 + if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE)) 229 + return; 230 + 231 + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); 232 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); 233 + 234 + len = c->cfg.src_port_window_size; 235 + rg = mtk_uart_apdma_read(c, VFF_RPT); 236 + wg = mtk_uart_apdma_read(c, VFF_WPT); 237 + cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE); 238 + 239 + /* 240 + * The buffer is ring buffer. If wrap bit different, 241 + * represents the start of the next cycle for WPT 242 + */ 243 + if ((rg ^ wg) & VFF_RING_WRAP) 244 + cnt += len; 245 + 246 + c->rx_status = d->avail_len - cnt; 247 + mtk_uart_apdma_write(c, VFF_RPT, wg); 248 + 249 + list_del(&d->vd.node); 250 + vchan_cookie_complete(&d->vd); 251 + } 252 + 253 + static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) 254 + { 255 + struct dma_chan *chan = (struct dma_chan *)dev_id; 256 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 257 + unsigned long flags; 258 + 259 + spin_lock_irqsave(&c->vc.lock, flags); 260 + if (c->dir == DMA_DEV_TO_MEM) 261 + mtk_uart_apdma_rx_handler(c); 262 + else if (c->dir == DMA_MEM_TO_DEV) 263 + mtk_uart_apdma_tx_handler(c); 264 + spin_unlock_irqrestore(&c->vc.lock, flags); 265 + 266 + return IRQ_HANDLED; 267 + } 268 + 269 + static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) 270 + { 271 + struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); 272 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 273 + unsigned int status; 274 + int ret; 275 + 276 + ret = pm_runtime_get_sync(mtkd->ddev.dev); 277 + if (ret < 0) { 278 + pm_runtime_put_noidle(chan->device->dev); 279 + return ret; 280 + } 281 + 282 + mtk_uart_apdma_write(c, VFF_ADDR, 0); 283 + mtk_uart_apdma_write(c, VFF_THRE, 0); 284 + mtk_uart_apdma_write(c, VFF_LEN, 0); 285 + mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B); 286 + 287 + ret = readx_poll_timeout(readl, c->base + VFF_EN, 288 + status, !status, 10, 100); 289 + if (ret) 290 + return ret; 291 + 292 + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, 293 + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); 294 + if (ret < 0) { 295 + dev_err(chan->device->dev, "Can't request dma IRQ\n"); 296 + return -EINVAL; 297 + } 298 + 299 + if (mtkd->support_33bits) 300 + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); 301 + 302 + return ret; 303 + } 304 + 305 + static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan) 306 + { 307 + struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); 308 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 309 + 310 + free_irq(c->irq, chan); 311 + 312 + tasklet_kill(&c->vc.task); 313 + 314 + vchan_free_chan_resources(&c->vc); 315 + 316 + pm_runtime_put_sync(mtkd->ddev.dev); 317 + } 318 + 319 + static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan, 320 + dma_cookie_t cookie, 321 + struct dma_tx_state *txstate) 322 + { 323 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 324 + enum dma_status ret; 325 + 326 + ret = dma_cookie_status(chan, cookie, txstate); 327 + if (!txstate) 328 + return ret; 329 + 330 + dma_set_residue(txstate, c->rx_status); 331 + 332 + return ret; 333 + } 334 + 335 + /* 336 + * dmaengine_prep_slave_single will call the function. and sglen is 1. 337 + * 8250 uart using one ring buffer, and deal with one sg. 338 + */ 339 + static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg 340 + (struct dma_chan *chan, struct scatterlist *sgl, 341 + unsigned int sglen, enum dma_transfer_direction dir, 342 + unsigned long tx_flags, void *context) 343 + { 344 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 345 + struct mtk_uart_apdma_desc *d; 346 + 347 + if (!is_slave_direction(dir) || sglen != 1) 348 + return NULL; 349 + 350 + /* Now allocate and setup the descriptor */ 351 + d = kzalloc(sizeof(*d), GFP_ATOMIC); 352 + if (!d) 353 + return NULL; 354 + 355 + d->avail_len = sg_dma_len(sgl); 356 + d->addr = sg_dma_address(sgl); 357 + c->dir = dir; 358 + 359 + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 360 + } 361 + 362 + static void mtk_uart_apdma_issue_pending(struct dma_chan *chan) 363 + { 364 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 365 + struct virt_dma_desc *vd; 366 + unsigned long flags; 367 + 368 + spin_lock_irqsave(&c->vc.lock, flags); 369 + if (vchan_issue_pending(&c->vc)) { 370 + vd = vchan_next_desc(&c->vc); 371 + c->desc = to_mtk_uart_apdma_desc(&vd->tx); 372 + 373 + if (c->dir == DMA_DEV_TO_MEM) 374 + mtk_uart_apdma_start_rx(c); 375 + else if (c->dir == DMA_MEM_TO_DEV) 376 + mtk_uart_apdma_start_tx(c); 377 + } 378 + 379 + spin_unlock_irqrestore(&c->vc.lock, flags); 380 + } 381 + 382 + static int mtk_uart_apdma_slave_config(struct dma_chan *chan, 383 + struct dma_slave_config *config) 384 + { 385 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 386 + 387 + memcpy(&c->cfg, config, sizeof(*config)); 388 + 389 + return 0; 390 + } 391 + 392 + static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) 393 + { 394 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 395 + unsigned long flags; 396 + unsigned int status; 397 + LIST_HEAD(head); 398 + int ret; 399 + 400 + mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); 401 + 402 + ret = readx_poll_timeout(readl, c->base + VFF_FLUSH, 403 + status, status != VFF_FLUSH_B, 10, 100); 404 + if (ret) 405 + dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n", 406 + mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); 407 + 408 + /* 409 + * Stop need 3 steps. 410 + * 1. set stop to 1 411 + * 2. wait en to 0 412 + * 3. set stop as 0 413 + */ 414 + mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B); 415 + ret = readx_poll_timeout(readl, c->base + VFF_EN, 416 + status, !status, 10, 100); 417 + if (ret) 418 + dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n", 419 + mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); 420 + 421 + mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B); 422 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); 423 + 424 + if (c->dir == DMA_DEV_TO_MEM) 425 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); 426 + else if (c->dir == DMA_MEM_TO_DEV) 427 + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); 428 + 429 + synchronize_irq(c->irq); 430 + 431 + spin_lock_irqsave(&c->vc.lock, flags); 432 + vchan_get_all_descriptors(&c->vc, &head); 433 + vchan_dma_desc_free_list(&c->vc, &head); 434 + spin_unlock_irqrestore(&c->vc.lock, flags); 435 + 436 + return 0; 437 + } 438 + 439 + static int mtk_uart_apdma_device_pause(struct dma_chan *chan) 440 + { 441 + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 442 + unsigned long flags; 443 + 444 + spin_lock_irqsave(&c->vc.lock, flags); 445 + 446 + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); 447 + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); 448 + 449 + synchronize_irq(c->irq); 450 + 451 + spin_unlock_irqrestore(&c->vc.lock, flags); 452 + 453 + return 0; 454 + } 455 + 456 + static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) 457 + { 458 + while (!list_empty(&mtkd->ddev.channels)) { 459 + struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels, 460 + struct mtk_chan, vc.chan.device_node); 461 + 462 + list_del(&c->vc.chan.device_node); 463 + tasklet_kill(&c->vc.task); 464 + } 465 + } 466 + 467 + static const struct of_device_id mtk_uart_apdma_match[] = { 468 + { .compatible = "mediatek,mt6577-uart-dma", }, 469 + { /* sentinel */ }, 470 + }; 471 + MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); 472 + 473 + static int mtk_uart_apdma_probe(struct platform_device *pdev) 474 + { 475 + struct device_node *np = pdev->dev.of_node; 476 + struct mtk_uart_apdmadev *mtkd; 477 + int bit_mask = 32, rc; 478 + struct resource *res; 479 + struct mtk_chan *c; 480 + unsigned int i; 481 + 482 + mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL); 483 + if (!mtkd) 484 + return -ENOMEM; 485 + 486 + mtkd->clk = devm_clk_get(&pdev->dev, NULL); 487 + if (IS_ERR(mtkd->clk)) { 488 + dev_err(&pdev->dev, "No clock specified\n"); 489 + rc = PTR_ERR(mtkd->clk); 490 + return rc; 491 + } 492 + 493 + if (of_property_read_bool(np, "mediatek,dma-33bits")) 494 + mtkd->support_33bits = true; 495 + 496 + if (mtkd->support_33bits) 497 + bit_mask = 33; 498 + 499 + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); 500 + if (rc) 501 + return rc; 502 + 503 + dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask); 504 + mtkd->ddev.device_alloc_chan_resources = 505 + mtk_uart_apdma_alloc_chan_resources; 506 + mtkd->ddev.device_free_chan_resources = 507 + mtk_uart_apdma_free_chan_resources; 508 + mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status; 509 + mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending; 510 + mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg; 511 + mtkd->ddev.device_config = mtk_uart_apdma_slave_config; 512 + mtkd->ddev.device_pause = mtk_uart_apdma_device_pause; 513 + mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all; 514 + mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); 515 + mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); 516 + mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 517 + mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 518 + mtkd->ddev.dev = &pdev->dev; 519 + INIT_LIST_HEAD(&mtkd->ddev.channels); 520 + 521 + mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS; 522 + if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) { 523 + dev_info(&pdev->dev, 524 + "Using %u as missing dma-requests property\n", 525 + MTK_UART_APDMA_NR_VCHANS); 526 + } 527 + 528 + for (i = 0; i < mtkd->dma_requests; i++) { 529 + c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL); 530 + if (!c) { 531 + rc = -ENODEV; 532 + goto err_no_dma; 533 + } 534 + 535 + res = platform_get_resource(pdev, IORESOURCE_MEM, i); 536 + if (!res) { 537 + rc = -ENODEV; 538 + goto err_no_dma; 539 + } 540 + 541 + c->base = devm_ioremap_resource(&pdev->dev, res); 542 + if (IS_ERR(c->base)) { 543 + rc = PTR_ERR(c->base); 544 + goto err_no_dma; 545 + } 546 + c->vc.desc_free = mtk_uart_apdma_desc_free; 547 + vchan_init(&c->vc, &mtkd->ddev); 548 + 549 + rc = platform_get_irq(pdev, i); 550 + if (rc < 0) { 551 + dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i); 552 + goto err_no_dma; 553 + } 554 + c->irq = rc; 555 + } 556 + 557 + pm_runtime_enable(&pdev->dev); 558 + pm_runtime_set_active(&pdev->dev); 559 + 560 + rc = dma_async_device_register(&mtkd->ddev); 561 + if (rc) 562 + goto rpm_disable; 563 + 564 + platform_set_drvdata(pdev, mtkd); 565 + 566 + /* Device-tree DMA controller registration */ 567 + rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd); 568 + if (rc) 569 + goto dma_remove; 570 + 571 + return rc; 572 + 573 + dma_remove: 574 + dma_async_device_unregister(&mtkd->ddev); 575 + rpm_disable: 576 + pm_runtime_disable(&pdev->dev); 577 + err_no_dma: 578 + mtk_uart_apdma_free(mtkd); 579 + return rc; 580 + } 581 + 582 + static int mtk_uart_apdma_remove(struct platform_device *pdev) 583 + { 584 + struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev); 585 + 586 + of_dma_controller_free(pdev->dev.of_node); 587 + 588 + mtk_uart_apdma_free(mtkd); 589 + 590 + dma_async_device_unregister(&mtkd->ddev); 591 + 592 + pm_runtime_disable(&pdev->dev); 593 + 594 + return 0; 595 + } 596 + 597 + #ifdef CONFIG_PM_SLEEP 598 + static int mtk_uart_apdma_suspend(struct device *dev) 599 + { 600 + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); 601 + 602 + if (!pm_runtime_suspended(dev)) 603 + clk_disable_unprepare(mtkd->clk); 604 + 605 + return 0; 606 + } 607 + 608 + static int mtk_uart_apdma_resume(struct device *dev) 609 + { 610 + int ret; 611 + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); 612 + 613 + if (!pm_runtime_suspended(dev)) { 614 + ret = clk_prepare_enable(mtkd->clk); 615 + if (ret) 616 + return ret; 617 + } 618 + 619 + return 0; 620 + } 621 + #endif /* CONFIG_PM_SLEEP */ 622 + 623 + #ifdef CONFIG_PM 624 + static int mtk_uart_apdma_runtime_suspend(struct device *dev) 625 + { 626 + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); 627 + 628 + clk_disable_unprepare(mtkd->clk); 629 + 630 + return 0; 631 + } 632 + 633 + static int mtk_uart_apdma_runtime_resume(struct device *dev) 634 + { 635 + int ret; 636 + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); 637 + 638 + ret = clk_prepare_enable(mtkd->clk); 639 + if (ret) 640 + return ret; 641 + 642 + return 0; 643 + } 644 + #endif /* CONFIG_PM */ 645 + 646 + static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { 647 + SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume) 648 + SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend, 649 + mtk_uart_apdma_runtime_resume, NULL) 650 + }; 651 + 652 + static struct platform_driver mtk_uart_apdma_driver = { 653 + .probe = mtk_uart_apdma_probe, 654 + .remove = mtk_uart_apdma_remove, 655 + .driver = { 656 + .name = KBUILD_MODNAME, 657 + .pm = &mtk_uart_apdma_pm_ops, 658 + .of_match_table = of_match_ptr(mtk_uart_apdma_match), 659 + }, 660 + }; 661 + 662 + module_platform_driver(mtk_uart_apdma_driver); 663 + 664 + MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver"); 665 + MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>"); 666 + MODULE_LICENSE("GPL v2");
+2 -4
drivers/dma/mic_x100_dma.c
··· 717 717 if (mic_dma_dbg) { 718 718 mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), 719 719 mic_dma_dbg); 720 - if (mic_dma_dev->dbg_dir) 721 - debugfs_create_file("mic_dma_reg", 0444, 722 - mic_dma_dev->dbg_dir, mic_dma_dev, 723 - &mic_dma_reg_fops); 720 + debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, 721 + mic_dma_dev, &mic_dma_reg_fops); 724 722 } 725 723 return 0; 726 724 }
+2 -8
drivers/dma/mmp_tdma.c
··· 582 582 } 583 583 584 584 struct mmp_tdma_filter_param { 585 - struct device_node *of_node; 586 585 unsigned int chan_id; 587 586 }; 588 587 589 588 static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) 590 589 { 591 590 struct mmp_tdma_filter_param *param = fn_param; 592 - struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 593 - struct dma_device *pdma_device = tdmac->chan.device; 594 - 595 - if (pdma_device->dev->of_node != param->of_node) 596 - return false; 597 591 598 592 if (chan->chan_id != param->chan_id) 599 593 return false; ··· 605 611 if (dma_spec->args_count != 1) 606 612 return NULL; 607 613 608 - param.of_node = ofdma->of_node; 609 614 param.chan_id = dma_spec->args[0]; 610 615 611 616 if (param.chan_id >= TDMA_CHANNEL_NUM) 612 617 return NULL; 613 618 614 - return dma_request_channel(mask, mmp_tdma_filter_fn, &param); 619 + return __dma_request_channel(&mask, mmp_tdma_filter_fn, &param, 620 + ofdma->of_node); 615 621 } 616 622 617 623 static const struct of_device_id mmp_tdma_dt_ids[] = {
+2 -6
drivers/dma/mxs-dma.c
··· 719 719 } 720 720 721 721 struct mxs_dma_filter_param { 722 - struct device_node *of_node; 723 722 unsigned int chan_id; 724 723 }; 725 724 ··· 728 729 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 729 730 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 730 731 int chan_irq; 731 - 732 - if (mxs_dma->dma_device.dev->of_node != param->of_node) 733 - return false; 734 732 735 733 if (chan->chan_id != param->chan_id) 736 734 return false; ··· 751 755 if (dma_spec->args_count != 1) 752 756 return NULL; 753 757 754 - param.of_node = ofdma->of_node; 755 758 param.chan_id = dma_spec->args[0]; 756 759 757 760 if (param.chan_id >= mxs_dma->nr_channels) 758 761 return NULL; 759 762 760 - return dma_request_channel(mask, mxs_dma_filter_fn, &param); 763 + return __dma_request_channel(&mask, mxs_dma_filter_fn, &param, 764 + ofdma->of_node); 761 765 } 762 766 763 767 static int __init mxs_dma_probe(struct platform_device *pdev)
+2 -2
drivers/dma/of-dma.c
··· 313 313 if (count != 1) 314 314 return NULL; 315 315 316 - return dma_request_channel(info->dma_cap, info->filter_fn, 317 - &dma_spec->args[0]); 316 + return __dma_request_channel(&info->dma_cap, info->filter_fn, 317 + &dma_spec->args[0], dma_spec->np); 318 318 } 319 319 EXPORT_SYMBOL_GPL(of_dma_simple_xlate); 320 320
+40
drivers/dma/pl330.c
··· 25 25 #include <linux/err.h> 26 26 #include <linux/pm_runtime.h> 27 27 #include <linux/bug.h> 28 + #include <linux/reset.h> 28 29 29 30 #include "dmaengine.h" 30 31 #define PL330_MAX_CHAN 8 ··· 497 496 unsigned int num_peripherals; 498 497 struct dma_pl330_chan *peripherals; /* keep at end */ 499 498 int quirks; 499 + 500 + struct reset_control *rstc; 501 + struct reset_control *rstc_ocp; 500 502 }; 501 503 502 504 static struct pl330_of_quirks { ··· 3028 3024 3029 3025 amba_set_drvdata(adev, pl330); 3030 3026 3027 + pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); 3028 + if (IS_ERR(pl330->rstc)) { 3029 + if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER) 3030 + dev_err(&adev->dev, "Failed to get reset!\n"); 3031 + return PTR_ERR(pl330->rstc); 3032 + } else { 3033 + ret = reset_control_deassert(pl330->rstc); 3034 + if (ret) { 3035 + dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); 3036 + return ret; 3037 + } 3038 + } 3039 + 3040 + pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); 3041 + if (IS_ERR(pl330->rstc_ocp)) { 3042 + if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER) 3043 + dev_err(&adev->dev, "Failed to get OCP reset!\n"); 3044 + return PTR_ERR(pl330->rstc_ocp); 3045 + } else { 3046 + ret = reset_control_deassert(pl330->rstc_ocp); 3047 + if (ret) { 3048 + dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); 3049 + return ret; 3050 + } 3051 + } 3052 + 3031 3053 for (i = 0; i < AMBA_NR_IRQS; i++) { 3032 3054 irq = adev->irq[i]; 3033 3055 if (irq) { ··· 3194 3164 probe_err2: 3195 3165 pl330_del(pl330); 3196 3166 3167 + if (pl330->rstc_ocp) 3168 + reset_control_assert(pl330->rstc_ocp); 3169 + 3170 + if (pl330->rstc) 3171 + reset_control_assert(pl330->rstc); 3197 3172 return ret; 3198 3173 } 3199 3174 ··· 3237 3202 3238 3203 pl330_del(pl330); 3239 3204 3205 + if (pl330->rstc_ocp) 3206 + reset_control_assert(pl330->rstc_ocp); 3207 + 3208 + if (pl330->rstc) 3209 + reset_control_assert(pl330->rstc); 3240 3210 return 0; 3241 3211 } 3242 3212
+11 -45
drivers/dma/pxa_dma.c
··· 129 129 spinlock_t phy_lock; /* Phy association */ 130 130 #ifdef CONFIG_DEBUG_FS 131 131 struct dentry *dbgfs_root; 132 - struct dentry *dbgfs_state; 133 132 struct dentry **dbgfs_chan; 134 133 #endif 135 134 }; ··· 322 323 int ch, struct dentry *chandir) 323 324 { 324 325 char chan_name[11]; 325 - struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; 326 - struct dentry *chan_reqs = NULL; 326 + struct dentry *chan; 327 327 void *dt; 328 328 329 329 scnprintf(chan_name, sizeof(chan_name), "%d", ch); 330 330 chan = debugfs_create_dir(chan_name, chandir); 331 331 dt = (void *)&pdev->phys[ch]; 332 332 333 - if (chan) 334 - chan_state = debugfs_create_file("state", 0400, chan, dt, 335 - &chan_state_fops); 336 - if (chan_state) 337 - chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, 338 - &descriptors_fops); 339 - if (chan_descr) 340 - chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, 341 - &requester_chan_fops); 342 - if (!chan_reqs) 343 - goto err_state; 333 + debugfs_create_file("state", 0400, chan, dt, &chan_state_fops); 334 + debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops); 335 + debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops); 344 336 345 337 return chan; 346 - 347 - err_state: 348 - debugfs_remove_recursive(chan); 349 - return NULL; 350 338 } 351 339 352 340 static void pxad_init_debugfs(struct pxad_device *pdev) ··· 341 355 int i; 342 356 struct dentry *chandir; 343 357 344 - pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); 345 - if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) 346 - goto err_root; 347 - 348 - pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, 349 - pdev, &state_fops); 350 - if (!pdev->dbgfs_state) 351 - goto err_state; 352 - 353 358 pdev->dbgfs_chan = 354 - kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), 359 + kmalloc_array(pdev->nr_chans, sizeof(struct dentry *), 355 360 GFP_KERNEL); 356 361 if (!pdev->dbgfs_chan) 357 - goto err_alloc; 362 + return; 363 + 364 + pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); 365 + 366 + debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops); 358 367 359 368 chandir = debugfs_create_dir("channels", pdev->dbgfs_root); 360 - if (!chandir) 361 - goto err_chandir; 362 369 363 - for (i = 0; i < pdev->nr_chans; i++) { 370 + for (i = 0; i < pdev->nr_chans; i++) 364 371 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); 365 - if (!pdev->dbgfs_chan[i]) 366 - goto err_chans; 367 - } 368 - 369 - return; 370 - err_chans: 371 - err_chandir: 372 - kfree(pdev->dbgfs_chan); 373 - err_alloc: 374 - err_state: 375 - debugfs_remove_recursive(pdev->dbgfs_root); 376 - err_root: 377 - pr_err("pxad: debugfs is not available\n"); 378 372 } 379 373 380 374 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
+1 -4
drivers/dma/qcom/hidma.h
··· 93 93 * It is used by the DMA complete notification to 94 94 * locate the descriptor that initiated the transfer. 95 95 */ 96 - struct dentry *debugfs; 97 - struct dentry *stats; 98 96 struct hidma_dev *dmadev; 99 97 struct hidma_desc *running; 100 98 ··· 124 126 struct dma_device ddev; 125 127 126 128 struct dentry *debugfs; 127 - struct dentry *stats; 128 129 129 130 /* sysfs entry for the channel id */ 130 131 struct device_attribute *chid_attrs; ··· 155 158 irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); 156 159 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, 157 160 u8 err_code); 158 - int hidma_debug_init(struct hidma_dev *dmadev); 161 + void hidma_debug_init(struct hidma_dev *dmadev); 159 162 void hidma_debug_uninit(struct hidma_dev *dmadev); 160 163 #endif
+7 -30
drivers/dma/qcom/hidma_dbg.c
··· 138 138 debugfs_remove_recursive(dmadev->debugfs); 139 139 } 140 140 141 - int hidma_debug_init(struct hidma_dev *dmadev) 141 + void hidma_debug_init(struct hidma_dev *dmadev) 142 142 { 143 - int rc = 0; 144 143 int chidx = 0; 145 144 struct list_head *position = NULL; 145 + struct dentry *dir; 146 146 147 147 dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); 148 - if (!dmadev->debugfs) { 149 - rc = -ENODEV; 150 - return rc; 151 - } 152 148 153 149 /* walk through the virtual channel list */ 154 150 list_for_each(position, &dmadev->ddev.channels) { ··· 153 157 chan = list_entry(position, struct hidma_chan, 154 158 chan.device_node); 155 159 sprintf(chan->dbg_name, "chan%d", chidx); 156 - chan->debugfs = debugfs_create_dir(chan->dbg_name, 160 + dir = debugfs_create_dir(chan->dbg_name, 157 161 dmadev->debugfs); 158 - if (!chan->debugfs) { 159 - rc = -ENOMEM; 160 - goto cleanup; 161 - } 162 - chan->stats = debugfs_create_file("stats", S_IRUGO, 163 - chan->debugfs, chan, 164 - &hidma_chan_fops); 165 - if (!chan->stats) { 166 - rc = -ENOMEM; 167 - goto cleanup; 168 - } 162 + debugfs_create_file("stats", S_IRUGO, dir, chan, 163 + &hidma_chan_fops); 169 164 chidx++; 170 165 } 171 166 172 - dmadev->stats = debugfs_create_file("stats", S_IRUGO, 173 - dmadev->debugfs, dmadev, 174 - &hidma_dma_fops); 175 - if (!dmadev->stats) { 176 - rc = -ENOMEM; 177 - goto cleanup; 178 - } 179 - 180 - return 0; 181 - cleanup: 182 - hidma_debug_uninit(dmadev); 183 - return rc; 167 + debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev, 168 + &hidma_dma_fops); 184 169 }
-6
drivers/dma/sh/Kconfig
··· 47 47 help 48 48 This driver supports the USB-DMA controller found in the Renesas 49 49 SoCs. 50 - 51 - config SUDMAC 52 - tristate "Renesas SUDMAC support" 53 - depends on SH_DMAE_BASE 54 - help 55 - Enable support for the Renesas SUDMAC controllers.
-1
drivers/dma/sh/Makefile
··· 15 15 16 16 obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o 17 17 obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o 18 - obj-$(CONFIG_SUDMAC) += sudmac.o
+4 -4
drivers/dma/sh/rcar-dmac.c
··· 1165 1165 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 1166 1166 1167 1167 /* Someone calling slave DMA on a generic channel? */ 1168 - if (rchan->mid_rid < 0 || !sg_len) { 1168 + if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { 1169 1169 dev_warn(chan->device->dev, 1170 1170 "%s: bad parameter: len=%d, id=%d\n", 1171 1171 __func__, sg_len, rchan->mid_rid); ··· 1654 1654 * Forcing it to call dma_request_channel() and iterate through all 1655 1655 * channels from all controllers is just pointless. 1656 1656 */ 1657 - if (chan->device->device_config != rcar_dmac_device_config || 1658 - dma_spec->np != chan->device->dev->of_node) 1657 + if (chan->device->device_config != rcar_dmac_device_config) 1659 1658 return false; 1660 1659 1661 1660 return !test_and_set_bit(dma_spec->args[0], dmac->modules); ··· 1674 1675 dma_cap_zero(mask); 1675 1676 dma_cap_set(DMA_SLAVE, mask); 1676 1677 1677 - chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); 1678 + chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, 1679 + ofdma->of_node); 1678 1680 if (!chan) 1679 1681 return NULL; 1680 1682
-414
drivers/dma/sh/sudmac.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Renesas SUDMAC support 4 - * 5 - * Copyright (C) 2013 Renesas Solutions Corp. 6 - * 7 - * based on drivers/dma/sh/shdma.c: 8 - * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 9 - * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 10 - * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 11 - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 12 - */ 13 - 14 - #include <linux/dmaengine.h> 15 - #include <linux/err.h> 16 - #include <linux/init.h> 17 - #include <linux/interrupt.h> 18 - #include <linux/module.h> 19 - #include <linux/platform_device.h> 20 - #include <linux/slab.h> 21 - #include <linux/sudmac.h> 22 - 23 - struct sudmac_chan { 24 - struct shdma_chan shdma_chan; 25 - void __iomem *base; 26 - char dev_id[16]; /* unique name per DMAC of channel */ 27 - 28 - u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ 29 - u32 cfg; 30 - u32 dint_end_bit; 31 - }; 32 - 33 - struct sudmac_device { 34 - struct shdma_dev shdma_dev; 35 - struct sudmac_pdata *pdata; 36 - void __iomem *chan_reg; 37 - }; 38 - 39 - struct sudmac_regs { 40 - u32 base_addr; 41 - u32 base_byte_count; 42 - }; 43 - 44 - struct sudmac_desc { 45 - struct sudmac_regs hw; 46 - struct shdma_desc shdma_desc; 47 - }; 48 - 49 - #define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) 50 - #define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) 51 - #define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ 52 - struct sudmac_device, shdma_dev.dma_dev) 53 - 54 - /* SUDMAC register */ 55 - #define SUDMAC_CH0CFG 0x00 56 - #define SUDMAC_CH0BA 0x10 57 - #define SUDMAC_CH0BBC 0x18 58 - #define SUDMAC_CH0CA 0x20 59 - #define SUDMAC_CH0CBC 0x28 60 - #define SUDMAC_CH0DEN 0x30 61 - #define SUDMAC_DSTSCLR 0x38 62 - #define SUDMAC_DBUFCTRL 0x3C 63 - #define SUDMAC_DINTCTRL 0x40 64 - #define SUDMAC_DINTSTS 0x44 65 - #define SUDMAC_DINTSTSCLR 0x48 66 - #define SUDMAC_CH0SHCTRL 0x50 67 - 68 - /* Definitions for the sudmac_channel.config */ 69 - #define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ 70 - #define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ 71 - #define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ 72 - 73 - /* Definitions for the sudmac_channel.dint_end_bit */ 74 - #define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ 75 - #define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ 76 - 77 - #define SUDMAC_DRV_NAME "sudmac" 78 - 79 - static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) 80 - { 81 - iowrite32(data, sc->base + reg); 82 - } 83 - 84 - static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) 85 - { 86 - return ioread32(sc->base + reg); 87 - } 88 - 89 - static bool sudmac_is_busy(struct sudmac_chan *sc) 90 - { 91 - u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); 92 - 93 - if (den) 94 - return true; /* working */ 95 - 96 - return false; /* waiting */ 97 - } 98 - 99 - static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, 100 - struct shdma_desc *sdesc) 101 - { 102 - sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); 103 - sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); 104 - sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); 105 - } 106 - 107 - static void sudmac_start(struct sudmac_chan *sc) 108 - { 109 - u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); 110 - 111 - sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); 112 - sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); 113 - } 114 - 115 - static void sudmac_start_xfer(struct shdma_chan *schan, 116 - struct shdma_desc *sdesc) 117 - { 118 - struct sudmac_chan *sc = to_chan(schan); 119 - struct sudmac_desc *sd = to_desc(sdesc); 120 - 121 - sudmac_set_reg(sc, &sd->hw, sdesc); 122 - sudmac_start(sc); 123 - } 124 - 125 - static bool sudmac_channel_busy(struct shdma_chan *schan) 126 - { 127 - struct sudmac_chan *sc = to_chan(schan); 128 - 129 - return sudmac_is_busy(sc); 130 - } 131 - 132 - static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) 133 - { 134 - } 135 - 136 - static const struct sudmac_slave_config *sudmac_find_slave( 137 - struct sudmac_chan *sc, int slave_id) 138 - { 139 - struct sudmac_device *sdev = to_sdev(sc); 140 - struct sudmac_pdata *pdata = sdev->pdata; 141 - const struct sudmac_slave_config *cfg; 142 - int i; 143 - 144 - for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 145 - if (cfg->slave_id == slave_id) 146 - return cfg; 147 - 148 - return NULL; 149 - } 150 - 151 - static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, 152 - dma_addr_t slave_addr, bool try) 153 - { 154 - struct sudmac_chan *sc = to_chan(schan); 155 - const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); 156 - 157 - if (!cfg) 158 - return -ENODEV; 159 - 160 - return 0; 161 - } 162 - 163 - static inline void sudmac_dma_halt(struct sudmac_chan *sc) 164 - { 165 - u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); 166 - 167 - sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); 168 - sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); 169 - sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); 170 - } 171 - 172 - static int sudmac_desc_setup(struct shdma_chan *schan, 173 - struct shdma_desc *sdesc, 174 - dma_addr_t src, dma_addr_t dst, size_t *len) 175 - { 176 - struct sudmac_chan *sc = to_chan(schan); 177 - struct sudmac_desc *sd = to_desc(sdesc); 178 - 179 - dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n", 180 - __func__, &src, &dst, *len); 181 - 182 - if (*len > schan->max_xfer_len) 183 - *len = schan->max_xfer_len; 184 - 185 - if (dst) 186 - sd->hw.base_addr = dst; 187 - else if (src) 188 - sd->hw.base_addr = src; 189 - sd->hw.base_byte_count = *len; 190 - 191 - return 0; 192 - } 193 - 194 - static void sudmac_halt(struct shdma_chan *schan) 195 - { 196 - struct sudmac_chan *sc = to_chan(schan); 197 - 198 - sudmac_dma_halt(sc); 199 - } 200 - 201 - static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) 202 - { 203 - struct sudmac_chan *sc = to_chan(schan); 204 - u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); 205 - 206 - if (!(dintsts & sc->dint_end_bit)) 207 - return false; 208 - 209 - /* DMA stop */ 210 - sudmac_dma_halt(sc); 211 - 212 - return true; 213 - } 214 - 215 - static size_t sudmac_get_partial(struct shdma_chan *schan, 216 - struct shdma_desc *sdesc) 217 - { 218 - struct sudmac_chan *sc = to_chan(schan); 219 - struct sudmac_desc *sd = to_desc(sdesc); 220 - u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); 221 - 222 - return sd->hw.base_byte_count - current_byte_count; 223 - } 224 - 225 - static bool sudmac_desc_completed(struct shdma_chan *schan, 226 - struct shdma_desc *sdesc) 227 - { 228 - struct sudmac_chan *sc = to_chan(schan); 229 - struct sudmac_desc *sd = to_desc(sdesc); 230 - u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); 231 - 232 - return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; 233 - } 234 - 235 - static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, 236 - unsigned long flags) 237 - { 238 - struct shdma_dev *sdev = &su_dev->shdma_dev; 239 - struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 240 - struct sudmac_chan *sc; 241 - struct shdma_chan *schan; 242 - int err; 243 - 244 - sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); 245 - if (!sc) 246 - return -ENOMEM; 247 - 248 - schan = &sc->shdma_chan; 249 - schan->max_xfer_len = 64 * 1024 * 1024 - 1; 250 - 251 - shdma_chan_probe(sdev, schan, id); 252 - 253 - sc->base = su_dev->chan_reg; 254 - 255 - /* get platform_data */ 256 - sc->offset = su_dev->pdata->channel->offset; 257 - if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) 258 - sc->cfg |= SUDMAC_SENDBUFM; 259 - if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) 260 - sc->cfg |= SUDMAC_RCVENDM; 261 - sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; 262 - 263 - if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) 264 - sc->dint_end_bit |= SUDMAC_CH0ENDE; 265 - if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) 266 - sc->dint_end_bit |= SUDMAC_CH1ENDE; 267 - 268 - /* set up channel irq */ 269 - if (pdev->id >= 0) 270 - snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", 271 - pdev->id, id); 272 - else 273 - snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); 274 - 275 - err = shdma_request_irq(schan, irq, flags, sc->dev_id); 276 - if (err) { 277 - dev_err(sdev->dma_dev.dev, 278 - "DMA channel %d request_irq failed %d\n", id, err); 279 - goto err_no_irq; 280 - } 281 - 282 - return 0; 283 - 284 - err_no_irq: 285 - /* remove from dmaengine device node */ 286 - shdma_chan_remove(schan); 287 - return err; 288 - } 289 - 290 - static void sudmac_chan_remove(struct sudmac_device *su_dev) 291 - { 292 - struct shdma_chan *schan; 293 - int i; 294 - 295 - shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { 296 - BUG_ON(!schan); 297 - 298 - shdma_chan_remove(schan); 299 - } 300 - } 301 - 302 - static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) 303 - { 304 - /* SUDMAC doesn't need the address */ 305 - return 0; 306 - } 307 - 308 - static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) 309 - { 310 - return &((struct sudmac_desc *)buf)[i].shdma_desc; 311 - } 312 - 313 - static const struct shdma_ops sudmac_shdma_ops = { 314 - .desc_completed = sudmac_desc_completed, 315 - .halt_channel = sudmac_halt, 316 - .channel_busy = sudmac_channel_busy, 317 - .slave_addr = sudmac_slave_addr, 318 - .desc_setup = sudmac_desc_setup, 319 - .set_slave = sudmac_set_slave, 320 - .setup_xfer = sudmac_setup_xfer, 321 - .start_xfer = sudmac_start_xfer, 322 - .embedded_desc = sudmac_embedded_desc, 323 - .chan_irq = sudmac_chan_irq, 324 - .get_partial = sudmac_get_partial, 325 - }; 326 - 327 - static int sudmac_probe(struct platform_device *pdev) 328 - { 329 - struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev); 330 - int err, i; 331 - struct sudmac_device *su_dev; 332 - struct dma_device *dma_dev; 333 - struct resource *chan, *irq_res; 334 - 335 - /* get platform data */ 336 - if (!pdata) 337 - return -ENODEV; 338 - 339 - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 340 - if (!irq_res) 341 - return -ENODEV; 342 - 343 - err = -ENOMEM; 344 - su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), 345 - GFP_KERNEL); 346 - if (!su_dev) 347 - return err; 348 - 349 - dma_dev = &su_dev->shdma_dev.dma_dev; 350 - 351 - chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 352 - su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); 353 - if (IS_ERR(su_dev->chan_reg)) 354 - return PTR_ERR(su_dev->chan_reg); 355 - 356 - dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 357 - 358 - su_dev->shdma_dev.ops = &sudmac_shdma_ops; 359 - su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); 360 - err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); 361 - if (err < 0) 362 - return err; 363 - 364 - /* platform data */ 365 - su_dev->pdata = dev_get_platdata(&pdev->dev); 366 - 367 - platform_set_drvdata(pdev, su_dev); 368 - 369 - /* Create DMA Channel */ 370 - for (i = 0; i < pdata->channel_num; i++) { 371 - err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); 372 - if (err) 373 - goto chan_probe_err; 374 - } 375 - 376 - err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); 377 - if (err < 0) 378 - goto chan_probe_err; 379 - 380 - return err; 381 - 382 - chan_probe_err: 383 - sudmac_chan_remove(su_dev); 384 - 385 - shdma_cleanup(&su_dev->shdma_dev); 386 - 387 - return err; 388 - } 389 - 390 - static int sudmac_remove(struct platform_device *pdev) 391 - { 392 - struct sudmac_device *su_dev = platform_get_drvdata(pdev); 393 - struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; 394 - 395 - dma_async_device_unregister(dma_dev); 396 - sudmac_chan_remove(su_dev); 397 - shdma_cleanup(&su_dev->shdma_dev); 398 - 399 - return 0; 400 - } 401 - 402 - static struct platform_driver sudmac_driver = { 403 - .driver = { 404 - .name = SUDMAC_DRV_NAME, 405 - }, 406 - .probe = sudmac_probe, 407 - .remove = sudmac_remove, 408 - }; 409 - module_platform_driver(sudmac_driver); 410 - 411 - MODULE_AUTHOR("Yoshihiro Shimoda"); 412 - MODULE_DESCRIPTION("Renesas SUDMAC driver"); 413 - MODULE_LICENSE("GPL v2"); 414 - MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
+3 -5
drivers/dma/sh/usb-dmac.c
··· 57 57 u32 residue; 58 58 struct list_head node; 59 59 dma_cookie_t done_cookie; 60 - struct usb_dmac_sg sg[0]; 60 + struct usb_dmac_sg sg[]; 61 61 }; 62 62 63 63 #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) ··· 636 636 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); 637 637 struct of_phandle_args *dma_spec = arg; 638 638 639 - if (dma_spec->np != chan->device->dev->of_node) 640 - return false; 641 - 642 639 /* USB-DMAC should be used with fixed usb controller's FIFO */ 643 640 if (uchan->index != dma_spec->args[0]) 644 641 return false; ··· 656 659 dma_cap_zero(mask); 657 660 dma_cap_set(DMA_SLAVE, mask); 658 661 659 - chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec); 662 + chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec, 663 + ofdma->of_node); 660 664 if (!chan) 661 665 return NULL; 662 666
-1
drivers/dma/stm32-dma.c
··· 1365 1365 1366 1366 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { 1367 1367 chan = &dmadev->chan[i]; 1368 - chan->irq = platform_get_irq(pdev, i); 1369 1368 ret = platform_get_irq(pdev, i); 1370 1369 if (ret < 0) { 1371 1370 if (ret != -EPROBE_DEFER)
+2 -4
drivers/dma/stm32-dmamux.c
··· 295 295 #ifdef CONFIG_PM 296 296 static int stm32_dmamux_runtime_suspend(struct device *dev) 297 297 { 298 - struct platform_device *pdev = 299 - container_of(dev, struct platform_device, dev); 298 + struct platform_device *pdev = to_platform_device(dev); 300 299 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); 301 300 302 301 clk_disable_unprepare(stm32_dmamux->clk); ··· 305 306 306 307 static int stm32_dmamux_runtime_resume(struct device *dev) 307 308 { 308 - struct platform_device *pdev = 309 - container_of(dev, struct platform_device, dev); 309 + struct platform_device *pdev = to_platform_device(dev); 310 310 struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); 311 311 int ret; 312 312
+112 -35
drivers/dma/sun6i-dma.c
··· 64 64 #define DMA_CHAN_LLI_ADDR 0x08 65 65 66 66 #define DMA_CHAN_CUR_CFG 0x0c 67 - #define DMA_CHAN_MAX_DRQ 0x1f 68 - #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ) 69 - #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) 70 - #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) 67 + #define DMA_CHAN_MAX_DRQ_A31 0x1f 68 + #define DMA_CHAN_MAX_DRQ_H6 0x3f 69 + #define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) 70 + #define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6) 71 + #define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5) 72 + #define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8) 71 73 #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) 72 74 #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) 73 75 #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) 74 76 75 - #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) 76 - #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) 77 - #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) 77 + #define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) 78 + #define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16) 79 + #define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16) 80 + #define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16) 78 81 #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) 79 82 #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) 80 83 #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) ··· 97 94 #define LLI_LAST_ITEM 0xfffff800 98 95 #define NORMAL_WAIT 8 99 96 #define DRQ_SDRAM 1 97 + #define LINEAR_MODE 0 98 + #define IO_MODE 1 100 99 101 100 /* forward declaration */ 102 101 struct sun6i_dma_dev; ··· 126 121 */ 127 122 void (*clock_autogate_enable)(struct sun6i_dma_dev *); 128 123 void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); 124 + void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq); 125 + void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode); 129 126 u32 src_burst_lengths; 130 127 u32 dst_burst_lengths; 131 128 u32 src_addr_widths; 132 129 u32 dst_addr_widths; 130 + bool has_mbus_clk; 133 131 }; 134 132 135 133 /* ··· 186 178 struct dma_device slave; 187 179 void __iomem *base; 188 180 struct clk *clk; 181 + struct clk *clk_mbus; 189 182 int irq; 190 183 spinlock_t lock; 191 184 struct reset_control *rstc; ··· 312 303 { 313 304 *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) | 314 305 DMA_CHAN_CFG_DST_BURST_H3(dst_burst); 306 + } 307 + 308 + static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) 309 + { 310 + *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) | 311 + DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); 312 + } 313 + 314 + static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq) 315 + { 316 + *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) | 317 + DMA_CHAN_CFG_DST_DRQ_H6(dst_drq); 318 + } 319 + 320 + static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode) 321 + { 322 + *p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) | 323 + DMA_CHAN_CFG_DST_MODE_A31(dst_mode); 324 + } 325 + 326 + static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode) 327 + { 328 + *p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) | 329 + DMA_CHAN_CFG_DST_MODE_H6(dst_mode); 315 330 } 316 331 317 332 static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) ··· 661 628 662 629 burst = convert_burst(8); 663 630 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); 664 - v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 665 - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 666 - DMA_CHAN_CFG_DST_LINEAR_MODE | 667 - DMA_CHAN_CFG_SRC_LINEAR_MODE | 668 - DMA_CHAN_CFG_SRC_WIDTH(width) | 631 + v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) | 669 632 DMA_CHAN_CFG_DST_WIDTH(width); 670 633 671 634 sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); 635 + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM); 636 + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE); 672 637 673 638 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); 674 639 ··· 718 687 if (dir == DMA_MEM_TO_DEV) { 719 688 v_lli->src = sg_dma_address(sg); 720 689 v_lli->dst = sconfig->dst_addr; 721 - v_lli->cfg = lli_cfg | 722 - DMA_CHAN_CFG_DST_IO_MODE | 723 - DMA_CHAN_CFG_SRC_LINEAR_MODE | 724 - DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 725 - DMA_CHAN_CFG_DST_DRQ(vchan->port); 690 + v_lli->cfg = lli_cfg; 691 + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); 692 + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); 726 693 727 694 dev_dbg(chan2dev(chan), 728 695 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", ··· 731 702 } else { 732 703 v_lli->src = sconfig->src_addr; 733 704 v_lli->dst = sg_dma_address(sg); 734 - v_lli->cfg = lli_cfg | 735 - DMA_CHAN_CFG_DST_LINEAR_MODE | 736 - DMA_CHAN_CFG_SRC_IO_MODE | 737 - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 738 - DMA_CHAN_CFG_SRC_DRQ(vchan->port); 705 + v_lli->cfg = lli_cfg; 706 + sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); 707 + sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); 739 708 740 709 dev_dbg(chan2dev(chan), 741 710 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", ··· 799 772 if (dir == DMA_MEM_TO_DEV) { 800 773 v_lli->src = buf_addr + period_len * i; 801 774 v_lli->dst = sconfig->dst_addr; 802 - v_lli->cfg = lli_cfg | 803 - DMA_CHAN_CFG_DST_IO_MODE | 804 - DMA_CHAN_CFG_SRC_LINEAR_MODE | 805 - DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 806 - DMA_CHAN_CFG_DST_DRQ(vchan->port); 775 + v_lli->cfg = lli_cfg; 776 + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); 777 + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); 807 778 } else { 808 779 v_lli->src = sconfig->src_addr; 809 780 v_lli->dst = buf_addr + period_len * i; 810 - v_lli->cfg = lli_cfg | 811 - DMA_CHAN_CFG_DST_LINEAR_MODE | 812 - DMA_CHAN_CFG_SRC_IO_MODE | 813 - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 814 - DMA_CHAN_CFG_SRC_DRQ(vchan->port); 781 + v_lli->cfg = lli_cfg; 782 + sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); 783 + sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); 815 784 } 816 785 817 786 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); ··· 1072 1049 .nr_max_requests = 30, 1073 1050 .nr_max_vchans = 53, 1074 1051 .set_burst_length = sun6i_set_burst_length_a31, 1052 + .set_drq = sun6i_set_drq_a31, 1053 + .set_mode = sun6i_set_mode_a31, 1075 1054 .src_burst_lengths = BIT(1) | BIT(8), 1076 1055 .dst_burst_lengths = BIT(1) | BIT(8), 1077 1056 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1095 1070 .nr_max_vchans = 37, 1096 1071 .clock_autogate_enable = sun6i_enable_clock_autogate_a23, 1097 1072 .set_burst_length = sun6i_set_burst_length_a31, 1073 + .set_drq = sun6i_set_drq_a31, 1074 + .set_mode = sun6i_set_mode_a31, 1098 1075 .src_burst_lengths = BIT(1) | BIT(8), 1099 1076 .dst_burst_lengths = BIT(1) | BIT(8), 1100 1077 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1113 1086 .nr_max_vchans = 39, 1114 1087 .clock_autogate_enable = sun6i_enable_clock_autogate_a23, 1115 1088 .set_burst_length = sun6i_set_burst_length_a31, 1089 + .set_drq = sun6i_set_drq_a31, 1090 + .set_mode = sun6i_set_mode_a31, 1116 1091 .src_burst_lengths = BIT(1) | BIT(8), 1117 1092 .dst_burst_lengths = BIT(1) | BIT(8), 1118 1093 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1138 1109 .nr_max_vchans = 34, 1139 1110 .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1140 1111 .set_burst_length = sun6i_set_burst_length_h3, 1112 + .set_drq = sun6i_set_drq_a31, 1113 + .set_mode = sun6i_set_mode_a31, 1141 1114 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1142 1115 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1143 1116 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1159 1128 static struct sun6i_dma_config sun50i_a64_dma_cfg = { 1160 1129 .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1161 1130 .set_burst_length = sun6i_set_burst_length_h3, 1131 + .set_drq = sun6i_set_drq_a31, 1132 + .set_mode = sun6i_set_mode_a31, 1162 1133 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1163 1134 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1164 1135 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1174 1141 }; 1175 1142 1176 1143 /* 1144 + * The H6 binding uses the number of dma channels from the 1145 + * device tree node. 1146 + */ 1147 + static struct sun6i_dma_config sun50i_h6_dma_cfg = { 1148 + .clock_autogate_enable = sun6i_enable_clock_autogate_h3, 1149 + .set_burst_length = sun6i_set_burst_length_h3, 1150 + .set_drq = sun6i_set_drq_h6, 1151 + .set_mode = sun6i_set_mode_h6, 1152 + .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1153 + .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), 1154 + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1155 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1156 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1157 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1158 + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | 1159 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | 1160 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 1161 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), 1162 + .has_mbus_clk = true, 1163 + }; 1164 + 1165 + /* 1177 1166 * The V3s have only 8 physical channels, a maximum DRQ port id of 23, 1178 1167 * and a total of 24 usable source and destination endpoints. 1179 1168 */ ··· 1206 1151 .nr_max_vchans = 24, 1207 1152 .clock_autogate_enable = sun6i_enable_clock_autogate_a23, 1208 1153 .set_burst_length = sun6i_set_burst_length_a31, 1154 + .set_drq = sun6i_set_drq_a31, 1155 + .set_mode = sun6i_set_mode_a31, 1209 1156 .src_burst_lengths = BIT(1) | BIT(8), 1210 1157 .dst_burst_lengths = BIT(1) | BIT(8), 1211 1158 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | ··· 1225 1168 { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, 1226 1169 { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, 1227 1170 { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, 1171 + { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, 1228 1172 { /* sentinel */ } 1229 1173 }; 1230 1174 MODULE_DEVICE_TABLE(of, sun6i_dma_match); ··· 1260 1202 if (IS_ERR(sdc->clk)) { 1261 1203 dev_err(&pdev->dev, "No clock specified\n"); 1262 1204 return PTR_ERR(sdc->clk); 1205 + } 1206 + 1207 + if (sdc->cfg->has_mbus_clk) { 1208 + sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus"); 1209 + if (IS_ERR(sdc->clk_mbus)) { 1210 + dev_err(&pdev->dev, "No mbus clock specified\n"); 1211 + return PTR_ERR(sdc->clk_mbus); 1212 + } 1263 1213 } 1264 1214 1265 1215 sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); ··· 1324 1258 ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); 1325 1259 if (ret && !sdc->max_request) { 1326 1260 dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", 1327 - DMA_CHAN_MAX_DRQ); 1328 - sdc->max_request = DMA_CHAN_MAX_DRQ; 1261 + DMA_CHAN_MAX_DRQ_A31); 1262 + sdc->max_request = DMA_CHAN_MAX_DRQ_A31; 1329 1263 } 1330 1264 1331 1265 /* ··· 1374 1308 goto err_reset_assert; 1375 1309 } 1376 1310 1311 + if (sdc->cfg->has_mbus_clk) { 1312 + ret = clk_prepare_enable(sdc->clk_mbus); 1313 + if (ret) { 1314 + dev_err(&pdev->dev, "Couldn't enable mbus clock\n"); 1315 + goto err_clk_disable; 1316 + } 1317 + } 1318 + 1377 1319 ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, 1378 1320 dev_name(&pdev->dev), sdc); 1379 1321 if (ret) { 1380 1322 dev_err(&pdev->dev, "Cannot request IRQ\n"); 1381 - goto err_clk_disable; 1323 + goto err_mbus_clk_disable; 1382 1324 } 1383 1325 1384 1326 ret = dma_async_device_register(&sdc->slave); ··· 1411 1337 dma_async_device_unregister(&sdc->slave); 1412 1338 err_irq_disable: 1413 1339 sun6i_kill_tasklet(sdc); 1340 + err_mbus_clk_disable: 1341 + clk_disable_unprepare(sdc->clk_mbus); 1414 1342 err_clk_disable: 1415 1343 clk_disable_unprepare(sdc->clk); 1416 1344 err_reset_assert: ··· 1431 1355 1432 1356 sun6i_kill_tasklet(sdc); 1433 1357 1358 + clk_disable_unprepare(sdc->clk_mbus); 1434 1359 clk_disable_unprepare(sdc->clk); 1435 1360 reset_control_assert(sdc->rstc); 1436 1361
+10 -2
drivers/dma/tegra20-apb-dma.c
··· 977 977 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 978 978 } 979 979 980 - if (flags & DMA_PREP_INTERRUPT) 980 + if (flags & DMA_PREP_INTERRUPT) { 981 981 csr |= TEGRA_APBDMA_CSR_IE_EOC; 982 + } else { 983 + WARN_ON_ONCE(1); 984 + return NULL; 985 + } 982 986 983 987 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 984 988 ··· 1124 1120 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1125 1121 } 1126 1122 1127 - if (flags & DMA_PREP_INTERRUPT) 1123 + if (flags & DMA_PREP_INTERRUPT) { 1128 1124 csr |= TEGRA_APBDMA_CSR_IE_EOC; 1125 + } else { 1126 + WARN_ON_ONCE(1); 1127 + return NULL; 1128 + } 1129 1129 1130 1130 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1131 1131
+2 -2
drivers/dma/virt-dma.c
··· 98 98 } 99 99 spin_unlock_irq(&vc->lock); 100 100 101 - dmaengine_desc_callback_invoke(&cb, NULL); 101 + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); 102 102 103 103 list_for_each_entry_safe(vd, _vd, &head, node) { 104 104 dmaengine_desc_get_callback(&vd->tx, &cb); ··· 106 106 list_del(&vd->node); 107 107 vchan_vdesc_fini(vd); 108 108 109 - dmaengine_desc_callback_invoke(&cb, NULL); 109 + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); 110 110 } 111 111 } 112 112
+4
drivers/dma/virt-dma.h
··· 14 14 15 15 struct virt_dma_desc { 16 16 struct dma_async_tx_descriptor tx; 17 + struct dmaengine_result tx_result; 17 18 /* protected by vc.lock */ 18 19 struct list_head node; 19 20 }; ··· 62 61 vd->tx.flags = tx_flags; 63 62 vd->tx.tx_submit = vchan_tx_submit; 64 63 vd->tx.desc_free = vchan_tx_desc_free; 64 + 65 + vd->tx_result.result = DMA_TRANS_NOERROR; 66 + vd->tx_result.residue = 0; 65 67 66 68 spin_lock_irqsave(&vc->lock, flags); 67 69 list_add_tail(&vd->node, &vc->desc_allocated);
+1 -3
drivers/dma/xilinx/xilinx_dma.c
··· 1095 1095 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) 1096 1096 { 1097 1097 struct xilinx_vdma_config *config = &chan->config; 1098 - struct xilinx_dma_tx_descriptor *desc, *tail_desc; 1098 + struct xilinx_dma_tx_descriptor *desc; 1099 1099 u32 reg, j; 1100 1100 struct xilinx_vdma_tx_segment *segment, *last = NULL; 1101 1101 int i = 0; ··· 1112 1112 1113 1113 desc = list_first_entry(&chan->pending_list, 1114 1114 struct xilinx_dma_tx_descriptor, node); 1115 - tail_desc = list_last_entry(&chan->pending_list, 1116 - struct xilinx_dma_tx_descriptor, node); 1117 1115 1118 1116 /* Configure the hardware using info in the config structure */ 1119 1117 if (chan->has_vflip) {
+1 -1
drivers/misc/pci_endpoint_test.c
··· 793 793 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, 794 794 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, 795 795 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, 796 - { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, 796 + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, 797 797 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), 798 798 .driver_data = (kernel_ulong_t)&am654_data 799 799 },
+1 -1
drivers/soc/tegra/fuse/fuse-tegra20.c
··· 99 99 dma_cap_zero(mask); 100 100 dma_cap_set(DMA_SLAVE, mask); 101 101 102 - fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL); 102 + fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL); 103 103 if (!fuse->apbdma.chan) 104 104 return -EPROBE_DEFER; 105 105
+47
include/linux/dma/edma.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. 4 + * Synopsys DesignWare eDMA core driver 5 + * 6 + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 7 + */ 8 + 9 + #ifndef _DW_EDMA_H 10 + #define _DW_EDMA_H 11 + 12 + #include <linux/device.h> 13 + #include <linux/dmaengine.h> 14 + 15 + struct dw_edma; 16 + 17 + /** 18 + * struct dw_edma_chip - representation of DesignWare eDMA controller hardware 19 + * @dev: struct device of the eDMA controller 20 + * @id: instance ID 21 + * @irq: irq line 22 + * @dw: struct dw_edma that is filed by dw_edma_probe() 23 + */ 24 + struct dw_edma_chip { 25 + struct device *dev; 26 + int id; 27 + int irq; 28 + struct dw_edma *dw; 29 + }; 30 + 31 + /* Export to the platform drivers */ 32 + #if IS_ENABLED(CONFIG_DW_EDMA) 33 + int dw_edma_probe(struct dw_edma_chip *chip); 34 + int dw_edma_remove(struct dw_edma_chip *chip); 35 + #else 36 + static inline int dw_edma_probe(struct dw_edma_chip *chip) 37 + { 38 + return -ENODEV; 39 + } 40 + 41 + static inline int dw_edma_remove(struct dw_edma_chip *chip) 42 + { 43 + return 0; 44 + } 45 + #endif /* CONFIG_DW_EDMA */ 46 + 47 + #endif /* _DW_EDMA_H */
+8 -4
include/linux/dmaengine.h
··· 1302 1302 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 1303 1303 void dma_issue_pending_all(void); 1304 1304 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1305 - dma_filter_fn fn, void *fn_param); 1305 + dma_filter_fn fn, void *fn_param, 1306 + struct device_node *np); 1306 1307 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1307 1308 1308 1309 struct dma_chan *dma_request_chan(struct device *dev, const char *name); ··· 1328 1327 { 1329 1328 } 1330 1329 static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1331 - dma_filter_fn fn, void *fn_param) 1330 + dma_filter_fn fn, 1331 + void *fn_param, 1332 + struct device_node *np) 1332 1333 { 1333 1334 return NULL; 1334 1335 } ··· 1402 1399 void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1403 1400 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); 1404 1401 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); 1405 - #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 1402 + #define dma_request_channel(mask, x, y) \ 1403 + __dma_request_channel(&(mask), x, y, NULL) 1406 1404 #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ 1407 1405 __dma_request_slave_channel_compat(&(mask), x, y, dev, name) 1408 1406 ··· 1421 1417 if (!fn || !fn_param) 1422 1418 return NULL; 1423 1419 1424 - return __dma_request_channel(mask, fn, fn_param); 1420 + return __dma_request_channel(mask, fn, fn_param, NULL); 1425 1421 } 1426 1422 #endif /* DMAENGINE_H */
+19
include/linux/fpga/adi-axi-common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Analog Devices AXI common registers & definitions 4 + * 5 + * Copyright 2019 Analog Devices Inc. 6 + * 7 + * https://wiki.analog.com/resources/fpga/docs/axi_ip 8 + * https://wiki.analog.com/resources/fpga/docs/hdl/regmap 9 + */ 10 + 11 + #ifndef ADI_AXI_COMMON_H_ 12 + #define ADI_AXI_COMMON_H_ 13 + 14 + #define ADI_AXI_REG_VERSION 0x0000 15 + 16 + #define ADI_AXI_PCORE_VER(major, minor, patch) \ 17 + (((major) << 16) | ((minor) << 8) | (patch)) 18 + 19 + #endif /* ADI_AXI_COMMON_H_ */
+1
include/linux/pci_ids.h
··· 2367 2367 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 2368 2368 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce 2369 2369 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf 2370 + #define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda 2370 2371 2371 2372 #define PCI_VENDOR_ID_USR 0x16ec 2372 2373
-1
include/linux/platform_data/dma-imx.h
··· 52 52 int dma_request2; /* secondary DMA request line */ 53 53 enum sdma_peripheral_type peripheral_type; 54 54 int priority; 55 - struct device_node *of_node; 56 55 }; 57 56 58 57 static inline int imx_dma_is_ipu(struct dma_chan *chan)
-49
include/linux/sudmac.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Header for the SUDMAC driver 4 - * 5 - * Copyright (C) 2013 Renesas Solutions Corp. 6 - */ 7 - #ifndef SUDMAC_H 8 - #define SUDMAC_H 9 - 10 - #include <linux/dmaengine.h> 11 - #include <linux/shdma-base.h> 12 - #include <linux/types.h> 13 - 14 - /* Used by slave DMA clients to request DMA to/from a specific peripheral */ 15 - struct sudmac_slave { 16 - struct shdma_slave shdma_slave; /* Set by the platform */ 17 - }; 18 - 19 - /* 20 - * Supplied by platforms to specify, how a DMA channel has to be configured for 21 - * a certain peripheral 22 - */ 23 - struct sudmac_slave_config { 24 - int slave_id; 25 - }; 26 - 27 - struct sudmac_channel { 28 - unsigned long offset; 29 - unsigned long config; 30 - unsigned long wait; /* The configuable range is 0 to 3 */ 31 - unsigned long dint_end_bit; 32 - }; 33 - 34 - struct sudmac_pdata { 35 - const struct sudmac_slave_config *slave; 36 - int slave_num; 37 - const struct sudmac_channel *channel; 38 - int channel_num; 39 - }; 40 - 41 - /* Definitions for the sudmac_channel.config */ 42 - #define SUDMAC_TX_BUFFER_MODE BIT(0) 43 - #define SUDMAC_RX_END_MODE BIT(1) 44 - 45 - /* Definitions for the sudmac_channel.dint_end_bit */ 46 - #define SUDMAC_DMA_BIT_CH0 BIT(0) 47 - #define SUDMAC_DMA_BIT_CH1 BIT(1) 48 - 49 - #endif