Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
"New HW support:
- Freescale i.MX8ULP edma support in edma driver
- StarFive JH8100 DMA support in Synopsis axi-dmac driver

Updates:
- Tracing support for freescale edma driver, updates to dpaa2 driver
- Remove unused QCom hidma DT support
- Support for i2c dma in imx-sdma
- Maintainers update for idxd and edma drivers"

* tag 'dmaengine-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (42 commits)
MAINTAINERS: Update role for IDXD driver
dmaengine: fsl-edma: use _Generic to handle difference type
dmaengine: fsl-edma: add trace event support
dmaengine: idxd: Avoid unnecessary destruction of file_ida
dmaengine: xilinx: xdma: fix module autoloading
dt-bindings: dma: fsl-edma: allow 'power-domains' property
dt-bindings: dma: fsl-edma: remove 'clocks' from required
dmaengine: fsl-dpaa2-qdma: Fix kernel-doc check warning
dmaengine: imx-sdma: Add i2c dma support
dmaengine: imx-sdma: utilize compiler to calculate ADDRS_ARRAY_SIZE_V<n>
dt-bindings: fsl-imx-sdma: Add I2C peripheral types ID
dt-bindings: fsl-dma: fsl-edma: clean up unused "fsl,imx8qm-adma" compatible string
dmaengine: fsl-edma: clean up unused "fsl,imx8qm-adma" compatible string
dt-bindings: dma: Drop unused QCom hidma binding
dmaengine: qcom: Drop hidma DT support
dmaengine: pl08x: Use kcalloc() instead of kzalloc()
dmaengine: fsl-dpaa2-qdma: Update DPDMAI interfaces to version 3
dmaengine: fsl-edma: fix miss mutex unlock at an error return path
dmaengine: pch_dma: remove unused function chan2parent
dmaengine: fsl-dpaa2-qdma: Add dpdmai_cmd_open
...

+620 -574
+96 -43
Documentation/devicetree/bindings/dma/fsl,edma.yaml
··· 21 21 - enum: 22 22 - fsl,vf610-edma 23 23 - fsl,imx7ulp-edma 24 - - fsl,imx8qm-adma 25 24 - fsl,imx8qm-edma 25 + - fsl,imx8ulp-edma 26 26 - fsl,imx93-edma3 27 27 - fsl,imx93-edma4 28 28 - fsl,imx95-edma5 ··· 43 43 maxItems: 64 44 44 45 45 "#dma-cells": 46 + description: | 47 + Specifies the number of cells needed to encode an DMA channel. 48 + 49 + Encode for cells number 2: 50 + cell 0: index of dma channel mux instance. 51 + cell 1: peripheral dma request id. 52 + 53 + Encode for cells number 3: 54 + cell 0: peripheral dma request id. 55 + cell 1: dma channel priority. 56 + cell 2: bitmask, defined at include/dt-bindings/dma/fsl-edma.h 46 57 enum: 47 58 - 2 48 59 - 3 ··· 64 53 65 54 clocks: 66 55 minItems: 1 67 - maxItems: 2 56 + maxItems: 33 68 57 69 58 clock-names: 70 59 minItems: 1 71 - maxItems: 2 60 + maxItems: 33 61 + 62 + power-domains: 63 + description: 64 + The number of power domains matches the number of channels, arranged 65 + in ascending order according to their associated DMA channels. 66 + minItems: 1 67 + maxItems: 64 72 68 73 69 big-endian: 74 70 description: | ··· 88 70 - compatible 89 71 - reg 90 72 - interrupts 91 - - clocks 92 73 - dma-channels 93 74 94 75 allOf: ··· 97 80 compatible: 98 81 contains: 99 82 enum: 100 - - fsl,imx8qm-adma 101 83 - fsl,imx8qm-edma 102 84 - fsl,imx93-edma3 103 85 - fsl,imx93-edma4 ··· 124 108 properties: 125 109 clocks: 126 110 minItems: 2 111 + maxItems: 2 127 112 clock-names: 128 113 items: 129 114 - const: dmamux0 ··· 153 136 properties: 154 137 clock: 155 138 minItems: 2 139 + maxItems: 2 156 140 clock-names: 157 141 items: 158 142 - const: dma ··· 168 150 const: 2 169 151 dma-channels: 170 152 const: 32 153 + 154 + - if: 155 + properties: 156 + compatible: 157 + contains: 158 + const: fsl,imx8ulp-edma 159 + then: 160 + properties: 161 + clocks: 162 + minItems: 33 163 + clock-names: 164 + minItems: 33 165 + items: 166 + oneOf: 167 + - const: dma 168 + - pattern: "^ch(0[0-9]|[1-2][0-9]|3[01])$" 169 + 170 + interrupt-names: false 171 + interrupts: 172 + minItems: 32 173 + "#dma-cells": 174 + const: 3 175 + 176 + - if: 177 + properties: 178 + compatible: 179 + contains: 180 + enum: 181 + - fsl,vf610-edma 182 + - fsl,imx7ulp-edma 183 + - fsl,imx93-edma3 184 + - fsl,imx93-edma4 185 + - fsl,imx95-edma5 186 + - fsl,imx8ulp-edma 187 + - fsl,ls1028a-edma 188 + then: 189 + required: 190 + - clocks 191 + 192 + - if: 193 + properties: 194 + compatible: 195 + contains: 196 + enum: 197 + - fsl,imx8qm-adma 198 + - fsl,imx8qm-edma 199 + then: 200 + required: 201 + - power-domains 202 + else: 203 + properties: 204 + power-domains: false 171 205 172 206 unevaluatedProperties: false 173 207 ··· 276 206 277 207 - | 278 208 #include <dt-bindings/interrupt-controller/arm-gic.h> 279 - #include <dt-bindings/clock/imx93-clock.h> 209 + #include <dt-bindings/firmware/imx/rsrc.h> 280 210 281 - dma-controller@44000000 { 282 - compatible = "fsl,imx93-edma3"; 283 - reg = <0x44000000 0x200000>; 211 + dma-controller@5a9f0000 { 212 + compatible = "fsl,imx8qm-edma"; 213 + reg = <0x5a9f0000 0x90000>; 284 214 #dma-cells = <3>; 285 - dma-channels = <31>; 286 - interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>, 287 - <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, 288 - <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, 289 - <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, 290 - <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, 291 - <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, 292 - <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, 293 - <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, 294 - <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, 295 - <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, 296 - <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, 297 - <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>, 298 - <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>, 299 - <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, 300 - <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, 301 - <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, 302 - <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>, 303 - <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>, 304 - <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>, 305 - <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>, 306 - <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>, 307 - <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, 308 - <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>, 309 - <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, 310 - <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, 311 - <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, 312 - <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, 313 - <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 314 - <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 315 - <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, 316 - <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; 317 - clocks = <&clk IMX93_CLK_EDMA1_GATE>; 318 - clock-names = "dma"; 215 + dma-channels = <8>; 216 + interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>, 217 + <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>, 218 + <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>, 219 + <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>, 220 + <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>, 221 + <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>, 222 + <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>, 223 + <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>; 224 + power-domains = <&pd IMX_SC_R_DMA_3_CH0>, 225 + <&pd IMX_SC_R_DMA_3_CH1>, 226 + <&pd IMX_SC_R_DMA_3_CH2>, 227 + <&pd IMX_SC_R_DMA_3_CH3>, 228 + <&pd IMX_SC_R_DMA_3_CH4>, 229 + <&pd IMX_SC_R_DMA_3_CH5>, 230 + <&pd IMX_SC_R_DMA_3_CH6>, 231 + <&pd IMX_SC_R_DMA_3_CH7>; 319 232 };
+1
Documentation/devicetree/bindings/dma/fsl,imx-sdma.yaml
··· 94 94 - SAI: 24 95 95 - Multi SAI: 25 96 96 - HDMI Audio: 26 97 + - I2C: 27 97 98 98 99 The third cell: transfer priority ID 99 100 enum:
-95
Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
··· 1 - Qualcomm Technologies HIDMA Management interface 2 - 3 - Qualcomm Technologies HIDMA is a high speed DMA device. It only supports 4 - memcpy and memset capabilities. It has been designed for virtualized 5 - environments. 6 - 7 - Each HIDMA HW instance consists of multiple DMA channels. These channels 8 - share the same bandwidth. The bandwidth utilization can be partitioned 9 - among channels based on the priority and weight assignments. 10 - 11 - There are only two priority levels and 15 weigh assignments possible. 12 - 13 - Other parameters here determine how much of the system bus this HIDMA 14 - instance can use like maximum read/write request and number of bytes to 15 - read/write in a single burst. 16 - 17 - Main node required properties: 18 - - compatible: "qcom,hidma-mgmt-1.0"; 19 - - reg: Address range for DMA device 20 - - dma-channels: Number of channels supported by this DMA controller. 21 - - max-write-burst-bytes: Maximum write burst in bytes that HIDMA can 22 - occupy the bus for in a single transaction. A memcpy requested is 23 - fragmented to multiples of this amount. This parameter is used while 24 - writing into destination memory. Setting this value incorrectly can 25 - starve other peripherals in the system. 26 - - max-read-burst-bytes: Maximum read burst in bytes that HIDMA can 27 - occupy the bus for in a single transaction. A memcpy request is 28 - fragmented to multiples of this amount. This parameter is used while 29 - reading the source memory. Setting this value incorrectly can starve 30 - other peripherals in the system. 31 - - max-write-transactions: This value is how many times a write burst is 32 - applied back to back while writing to the destination before yielding 33 - the bus. 34 - - max-read-transactions: This value is how many times a read burst is 35 - applied back to back while reading the source before yielding the bus. 36 - - channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC. 37 - Once a reset is applied to the HW, HW starts a timer for reset operation 38 - to confirm. If reset is not completed within this time, HW reports reset 39 - failure. 40 - 41 - Sub-nodes: 42 - 43 - HIDMA has one or more DMA channels that are used to move data from one 44 - memory location to another. 45 - 46 - When the OS is not in control of the management interface (i.e. it's a guest), 47 - the channel nodes appear on their own, not under a management node. 48 - 49 - Required properties: 50 - - compatible: must contain "qcom,hidma-1.0" for initial HW or 51 - "qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW. 52 - - reg: Addresses for the transfer and event channel 53 - - interrupts: Should contain the event interrupt 54 - - desc-count: Number of asynchronous requests this channel can handle 55 - - iommus: required a iommu node 56 - 57 - Optional properties for MSI: 58 - - msi-parent : See the generic MSI binding described in 59 - devicetree/bindings/interrupt-controller/msi.txt for a description of the 60 - msi-parent property. 61 - 62 - Example: 63 - 64 - Hypervisor OS configuration: 65 - 66 - hidma-mgmt@f9984000 = { 67 - compatible = "qcom,hidma-mgmt-1.0"; 68 - reg = <0xf9984000 0x15000>; 69 - dma-channels = <6>; 70 - max-write-burst-bytes = <1024>; 71 - max-read-burst-bytes = <1024>; 72 - max-write-transactions = <31>; 73 - max-read-transactions = <31>; 74 - channel-reset-timeout-cycles = <0x500>; 75 - 76 - hidma_24: dma-controller@5c050000 { 77 - compatible = "qcom,hidma-1.0"; 78 - reg = <0 0x5c050000 0x0 0x1000>, 79 - <0 0x5c0b0000 0x0 0x1000>; 80 - interrupts = <0 389 0>; 81 - desc-count = <10>; 82 - iommus = <&system_mmu>; 83 - }; 84 - }; 85 - 86 - Guest OS configuration: 87 - 88 - hidma_24: dma-controller@5c050000 { 89 - compatible = "qcom,hidma-1.0"; 90 - reg = <0 0x5c050000 0x0 0x1000>, 91 - <0 0x5c0b0000 0x0 0x1000>; 92 - interrupts = <0 389 0>; 93 - desc-count = <10>; 94 - iommus = <&system_mmu>; 95 - };
+21 -21
Documentation/devicetree/bindings/dma/snps,dma-spear1340.yaml
··· 93 93 data-width: 94 94 $ref: /schemas/types.yaml#/definitions/uint32-array 95 95 description: Data bus width per each DMA master in bytes. 96 + minItems: 1 97 + maxItems: 4 96 98 items: 97 - maxItems: 4 98 - items: 99 - enum: [4, 8, 16, 32] 99 + enum: [4, 8, 16, 32] 100 100 101 101 data_width: 102 102 $ref: /schemas/types.yaml#/definitions/uint32-array ··· 106 106 deprecated. It' usage is discouraged in favor of data-width one. Moreover 107 107 the property incorrectly permits to define data-bus width of 8 and 16 108 108 bits, which is impossible in accordance with DW DMAC IP-core data book. 109 + minItems: 1 110 + maxItems: 4 109 111 items: 110 - maxItems: 4 111 - items: 112 - enum: 113 - - 0 # 8 bits 114 - - 1 # 16 bits 115 - - 2 # 32 bits 116 - - 3 # 64 bits 117 - - 4 # 128 bits 118 - - 5 # 256 bits 119 - default: 0 112 + enum: 113 + - 0 # 8 bits 114 + - 1 # 16 bits 115 + - 2 # 32 bits 116 + - 3 # 64 bits 117 + - 4 # 128 bits 118 + - 5 # 256 bits 119 + default: 0 120 120 121 121 multi-block: 122 122 $ref: /schemas/types.yaml#/definitions/uint32-array 123 123 description: | 124 124 LLP-based multi-block transfer supported by hardware per 125 125 each DMA channel. 126 + minItems: 1 127 + maxItems: 8 126 128 items: 127 - maxItems: 8 128 - items: 129 - enum: [0, 1] 130 - default: 1 129 + enum: [0, 1] 130 + default: 1 131 131 132 132 snps,max-burst-len: 133 133 $ref: /schemas/types.yaml#/definitions/uint32-array ··· 138 138 will be from 1 to max-burst-len words. It's an array property with one 139 139 cell per channel in the units determined by the value set in the 140 140 CTLx.SRC_TR_WIDTH/CTLx.DST_TR_WIDTH fields (data width). 141 + minItems: 1 142 + maxItems: 8 141 143 items: 142 - maxItems: 8 143 - items: 144 - enum: [4, 8, 16, 32, 64, 128, 256] 145 - default: 256 144 + enum: [4, 8, 16, 32, 64, 128, 256] 145 + default: 256 146 146 147 147 snps,dma-protection-control: 148 148 $ref: /schemas/types.yaml#/definitions/uint32
+1
Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
··· 21 21 - snps,axi-dma-1.01a 22 22 - intel,kmb-axi-dma 23 23 - starfive,jh7110-axi-dma 24 + - starfive,jh8100-axi-dma 24 25 25 26 reg: 26 27 minItems: 1
+1 -2
MAINTAINERS
··· 6181 6181 6182 6182 DESIGNWARE EDMA CORE IP DRIVER 6183 6183 M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 6184 - R: Gustavo Pimentel <gustavo.pimentel@synopsys.com> 6185 6184 R: Serge Semin <fancer.lancer@gmail.com> 6186 6185 L: dmaengine@vger.kernel.org 6187 6186 S: Maintained ··· 11089 11090 11090 11091 INTEL IDXD DRIVER 11091 11092 M: Fenghua Yu <fenghua.yu@intel.com> 11092 - M: Dave Jiang <dave.jiang@intel.com> 11093 + R: Dave Jiang <dave.jiang@intel.com> 11093 11094 L: dmaengine@vger.kernel.org 11094 11095 S: Supported 11095 11096 F: drivers/dma/idxd/*
+4 -2
drivers/dma/Makefile
··· 31 31 obj-$(CONFIG_DW_DMAC_CORE) += dw/ 32 32 obj-$(CONFIG_DW_EDMA) += dw-edma/ 33 33 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 34 + fsl-edma-trace-$(CONFIG_TRACING) := fsl-edma-trace.o 35 + CFLAGS_fsl-edma-trace.o := -I$(src) 34 36 obj-$(CONFIG_FSL_DMA) += fsldma.o 35 - fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o 37 + fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y} 36 38 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o 37 - mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o 39 + mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y} 38 40 obj-$(CONFIG_MCF_EDMA) += mcf-edma.o 39 41 obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o 40 42 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+2 -2
drivers/dma/amba-pl08x.c
··· 2855 2855 } 2856 2856 2857 2857 /* Initialize physical channels */ 2858 - pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), 2859 - GFP_KERNEL); 2858 + pl08x->phy_chans = kcalloc(vd->channels, sizeof(*pl08x->phy_chans), 2859 + GFP_KERNEL); 2860 2860 if (!pl08x->phy_chans) { 2861 2861 ret = -ENOMEM; 2862 2862 goto out_no_phychans;
+34 -44
drivers/dma/dma-axi-dmac.c
··· 1002 1002 return 0; 1003 1003 } 1004 1004 1005 + static void axi_dmac_tasklet_kill(void *task) 1006 + { 1007 + tasklet_kill(task); 1008 + } 1009 + 1010 + static void axi_dmac_free_dma_controller(void *of_node) 1011 + { 1012 + of_dma_controller_free(of_node); 1013 + } 1014 + 1005 1015 static int axi_dmac_probe(struct platform_device *pdev) 1006 1016 { 1007 1017 struct dma_device *dma_dev; ··· 1035 1025 if (IS_ERR(dmac->base)) 1036 1026 return PTR_ERR(dmac->base); 1037 1027 1038 - dmac->clk = devm_clk_get(&pdev->dev, NULL); 1028 + dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL); 1039 1029 if (IS_ERR(dmac->clk)) 1040 1030 return PTR_ERR(dmac->clk); 1041 - 1042 - ret = clk_prepare_enable(dmac->clk); 1043 - if (ret < 0) 1044 - return ret; 1045 1031 1046 1032 version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); 1047 1033 ··· 1047 1041 ret = axi_dmac_parse_dt(&pdev->dev, dmac); 1048 1042 1049 1043 if (ret < 0) 1050 - goto err_clk_disable; 1044 + return ret; 1051 1045 1052 1046 INIT_LIST_HEAD(&dmac->chan.active_descs); 1053 1047 ··· 1078 1072 1079 1073 ret = axi_dmac_detect_caps(dmac, version); 1080 1074 if (ret) 1081 - goto err_clk_disable; 1075 + return ret; 1082 1076 1083 1077 dma_dev->copy_align = (dmac->chan.address_align_mask + 1); 1084 1078 ··· 1094 1088 !AXI_DMAC_DST_COHERENT_GET(ret)) { 1095 1089 dev_err(dmac->dma_dev.dev, 1096 1090 "Coherent DMA not supported in hardware"); 1097 - ret = -EINVAL; 1098 - goto err_clk_disable; 1091 + return -EINVAL; 1099 1092 } 1100 1093 } 1101 1094 1102 - ret = dma_async_device_register(dma_dev); 1095 + ret = dmaenginem_async_device_register(dma_dev); 1103 1096 if (ret) 1104 - goto err_clk_disable; 1097 + return ret; 1098 + 1099 + /* 1100 + * Put the action in here so it get's done before unregistering the DMA 1101 + * device. 1102 + */ 1103 + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill, 1104 + &dmac->chan.vchan.task); 1105 + if (ret) 1106 + return ret; 1105 1107 1106 1108 ret = of_dma_controller_register(pdev->dev.of_node, 1107 1109 of_dma_xlate_by_chan_id, dma_dev); 1108 1110 if (ret) 1109 - goto err_unregister_device; 1111 + return ret; 1110 1112 1111 - ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, 1112 - dev_name(&pdev->dev), dmac); 1113 + ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller, 1114 + pdev->dev.of_node); 1113 1115 if (ret) 1114 - goto err_unregister_of; 1116 + return ret; 1115 1117 1116 - platform_set_drvdata(pdev, dmac); 1118 + ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler, 1119 + IRQF_SHARED, dev_name(&pdev->dev), dmac); 1120 + if (ret) 1121 + return ret; 1117 1122 1118 1123 regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, 1119 1124 &axi_dmac_regmap_config); 1120 - if (IS_ERR(regmap)) { 1121 - ret = PTR_ERR(regmap); 1122 - goto err_free_irq; 1123 - } 1124 1125 1125 - return 0; 1126 - 1127 - err_free_irq: 1128 - free_irq(dmac->irq, dmac); 1129 - err_unregister_of: 1130 - of_dma_controller_free(pdev->dev.of_node); 1131 - err_unregister_device: 1132 - dma_async_device_unregister(&dmac->dma_dev); 1133 - err_clk_disable: 1134 - clk_disable_unprepare(dmac->clk); 1135 - 1136 - return ret; 1137 - } 1138 - 1139 - static void axi_dmac_remove(struct platform_device *pdev) 1140 - { 1141 - struct axi_dmac *dmac = platform_get_drvdata(pdev); 1142 - 1143 - of_dma_controller_free(pdev->dev.of_node); 1144 - free_irq(dmac->irq, dmac); 1145 - tasklet_kill(&dmac->chan.vchan.task); 1146 - dma_async_device_unregister(&dmac->dma_dev); 1147 - clk_disable_unprepare(dmac->clk); 1126 + return PTR_ERR_OR_ZERO(regmap); 1148 1127 } 1149 1128 1150 1129 static const struct of_device_id axi_dmac_of_match_table[] = { ··· 1144 1153 .of_match_table = axi_dmac_of_match_table, 1145 1154 }, 1146 1155 .probe = axi_dmac_probe, 1147 - .remove_new = axi_dmac_remove, 1148 1156 }; 1149 1157 module_platform_driver(axi_dmac_driver); 1150 1158
+27 -11
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
··· 302 302 kfree(desc); 303 303 return NULL; 304 304 } 305 + desc->nr_hw_descs = num; 305 306 306 307 return desc; 307 308 } ··· 329 328 static void axi_desc_put(struct axi_dma_desc *desc) 330 329 { 331 330 struct axi_dma_chan *chan = desc->chan; 332 - int count = atomic_read(&chan->descs_allocated); 331 + int count = desc->nr_hw_descs; 333 332 struct axi_dma_hw_desc *hw_desc; 334 333 int descs_put; 335 334 ··· 1140 1139 /* Remove the completed descriptor from issued list before completing */ 1141 1140 list_del(&vd->node); 1142 1141 vchan_cookie_complete(vd); 1143 - 1144 - /* Submit queued descriptors after processing the completed ones */ 1145 - axi_chan_start_first_queued(chan); 1146 1142 } 1147 1143 1148 1144 out: ··· 1443 1445 return 0; 1444 1446 } 1445 1447 1448 + static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip) 1449 + { 1450 + int irq_count = platform_irq_count(pdev); 1451 + int ret; 1452 + 1453 + for (int i = 0; i < irq_count; i++) { 1454 + chip->irq[i] = platform_get_irq(pdev, i); 1455 + if (chip->irq[i] < 0) 1456 + return chip->irq[i]; 1457 + ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt, 1458 + IRQF_SHARED, KBUILD_MODNAME, chip); 1459 + if (ret < 0) 1460 + return ret; 1461 + } 1462 + 1463 + return 0; 1464 + } 1465 + 1446 1466 static int dw_probe(struct platform_device *pdev) 1447 1467 { 1448 1468 struct axi_dma_chip *chip; ··· 1486 1470 chip->dw = dw; 1487 1471 chip->dev = &pdev->dev; 1488 1472 chip->dw->hdata = hdata; 1489 - 1490 - chip->irq = platform_get_irq(pdev, 0); 1491 - if (chip->irq < 0) 1492 - return chip->irq; 1493 1473 1494 1474 chip->regs = devm_platform_ioremap_resource(pdev, 0); 1495 1475 if (IS_ERR(chip->regs)) ··· 1527 1515 if (!dw->chan) 1528 1516 return -ENOMEM; 1529 1517 1530 - ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt, 1531 - IRQF_SHARED, KBUILD_MODNAME, chip); 1518 + ret = axi_req_irqs(pdev, chip); 1532 1519 if (ret) 1533 1520 return ret; 1534 1521 ··· 1640 1629 pm_runtime_disable(chip->dev); 1641 1630 axi_dma_suspend(chip); 1642 1631 1643 - devm_free_irq(chip->dev, chip->irq, chip); 1632 + for (i = 0; i < DMAC_MAX_CHANNELS; i++) 1633 + if (chip->irq[i] > 0) 1634 + devm_free_irq(chip->dev, chip->irq[i], chip); 1644 1635 1645 1636 of_dma_controller_free(chip->dev->of_node); 1646 1637 ··· 1666 1653 }, { 1667 1654 .compatible = "starfive,jh7110-axi-dma", 1668 1655 .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), 1656 + }, { 1657 + .compatible = "starfive,jh8100-axi-dma", 1658 + .data = (void *)AXI_DMA_FLAG_HAS_RESETS, 1669 1659 }, 1670 1660 {} 1671 1661 };
+2 -1
drivers/dma/dw-axi-dmac/dw-axi-dmac.h
··· 65 65 66 66 struct axi_dma_chip { 67 67 struct device *dev; 68 - int irq; 68 + int irq[DMAC_MAX_CHANNELS]; 69 69 void __iomem *regs; 70 70 void __iomem *apb_regs; 71 71 struct clk *core_clk; ··· 104 104 u32 completed_blocks; 105 105 u32 length; 106 106 u32 period_len; 107 + u32 nr_hw_descs; 107 108 }; 108 109 109 110 struct axi_dma_chan_config {
+7 -7
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
··· 362 362 363 363 for (i = 0; i < priv->num_pairs; i++) { 364 364 err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, 365 - i, &priv->rx_queue_attr[i]); 365 + i, 0, &priv->rx_queue_attr[i]); 366 366 if (err) { 367 367 dev_err(dev, "dpdmai_get_rx_queue() failed\n"); 368 368 goto exit; ··· 370 370 ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; 371 371 372 372 err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, 373 - i, &priv->tx_fqid[i]); 373 + i, 0, &priv->tx_queue_attr[i]); 374 374 if (err) { 375 375 dev_err(dev, "dpdmai_get_tx_queue() failed\n"); 376 376 goto exit; 377 377 } 378 - ppriv->req_fqid = priv->tx_fqid[i]; 379 - ppriv->prio = i; 378 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid; 379 + ppriv->prio = DPAA2_QDMA_DEFAULT_PRIORITY; 380 380 ppriv->priv = priv; 381 381 ppriv++; 382 382 } ··· 542 542 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; 543 543 rx_queue_cfg.dest_cfg.priority = ppriv->prio; 544 544 err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, 545 - rx_queue_cfg.dest_cfg.priority, 545 + rx_queue_cfg.dest_cfg.priority, 0, 546 546 &rx_queue_cfg); 547 547 if (err) { 548 548 dev_err(dev, "dpdmai_set_rx_queue() failed\n"); ··· 642 642 for (i = 0; i < dpaa2_qdma->n_chans; i++) { 643 643 dpaa2_chan = &dpaa2_qdma->chans[i]; 644 644 dpaa2_chan->qdma = dpaa2_qdma; 645 - dpaa2_chan->fqid = priv->tx_fqid[i % num]; 645 + dpaa2_chan->fqid = priv->tx_queue_attr[i % num].fqid; 646 646 dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; 647 647 vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); 648 648 spin_lock_init(&dpaa2_chan->queue_lock); ··· 802 802 dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); 803 803 dpaa2_dpdmai_dpio_unbind(priv); 804 804 dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); 805 - dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle); 805 + dpdmai_destroy(priv->mc_io, 0, priv->dpqdma_id, ls_dev->mc_handle); 806 806 } 807 807 808 808 static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
+3 -2
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
··· 6 6 7 7 #define DPAA2_QDMA_STORE_SIZE 16 8 8 #define NUM_CH 8 9 + #define DPAA2_QDMA_DEFAULT_PRIORITY 0 9 10 10 11 struct dpaa2_qdma_sd_d { 11 12 u32 rsv:32; ··· 123 122 struct dpaa2_qdma_engine *dpaa2_qdma; 124 123 struct dpaa2_qdma_priv_per_prio *ppriv; 125 124 126 - struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; 127 - u32 tx_fqid[DPDMAI_PRIO_NUM]; 125 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_MAX_QUEUE_NUM]; 126 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_MAX_QUEUE_NUM]; 128 127 }; 129 128 130 129 struct dpaa2_qdma_priv_per_prio {
+43 -70
drivers/dma/fsl-dpaa2-qdma/dpdmai.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 // Copyright 2019 NXP 3 3 4 + #include <linux/bitfield.h> 4 5 #include <linux/module.h> 5 6 #include <linux/types.h> 6 7 #include <linux/io.h> 7 8 #include <linux/fsl/mc.h> 8 9 #include "dpdmai.h" 9 10 11 + #define DEST_TYPE_MASK 0xF 12 + 10 13 struct dpdmai_rsp_get_attributes { 11 14 __le32 id; 12 15 u8 num_of_priorities; 13 - u8 pad0[3]; 16 + u8 num_of_queues; 17 + u8 pad0[2]; 14 18 __le16 major; 15 19 __le16 minor; 16 20 }; 17 21 18 22 struct dpdmai_cmd_queue { 19 23 __le32 dest_id; 20 - u8 priority; 21 - u8 queue; 24 + u8 dest_priority; 25 + union { 26 + u8 queue; 27 + u8 pri; 28 + }; 22 29 u8 dest_type; 23 - u8 pad; 30 + u8 queue_idx; 24 31 __le64 user_ctx; 25 32 union { 26 33 __le32 options; 27 34 __le32 fqid; 28 35 }; 29 - }; 36 + } __packed; 30 37 31 38 struct dpdmai_rsp_get_tx_queue { 32 39 __le64 pad; 33 40 __le32 fqid; 34 41 }; 35 42 36 - #define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ 37 - ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) 43 + struct dpdmai_cmd_open { 44 + __le32 dpdmai_id; 45 + } __packed; 38 46 39 - /* cmd, param, offset, width, type, arg_name */ 40 - #define DPDMAI_CMD_CREATE(cmd, cfg) \ 41 - do { \ 42 - MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\ 43 - MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\ 44 - } while (0) 47 + struct dpdmai_cmd_destroy { 48 + __le32 dpdmai_id; 49 + } __packed; 45 50 46 51 static inline u64 mc_enc(int lsoffset, int width, u64 val) 47 52 { ··· 73 68 int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, 74 69 int dpdmai_id, u16 *token) 75 70 { 71 + struct dpdmai_cmd_open *cmd_params; 76 72 struct fsl_mc_command cmd = { 0 }; 77 - __le64 *cmd_dpdmai_id; 78 73 int err; 79 74 80 75 /* prepare command */ 81 76 cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, 82 77 cmd_flags, 0); 83 78 84 - cmd_dpdmai_id = cmd.params; 85 - *cmd_dpdmai_id = cpu_to_le32(dpdmai_id); 79 + cmd_params = (struct dpdmai_cmd_open *)&cmd.params; 80 + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); 86 81 87 82 /* send command to mc*/ 88 83 err = mc_send_command(mc_io, &cmd); ··· 121 116 EXPORT_SYMBOL_GPL(dpdmai_close); 122 117 123 118 /** 124 - * dpdmai_create() - Create the DPDMAI object 125 - * @mc_io: Pointer to MC portal's I/O object 126 - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 127 - * @cfg: Configuration structure 128 - * @token: Returned token; use in subsequent API calls 129 - * 130 - * Create the DPDMAI object, allocate required resources and 131 - * perform required initialization. 132 - * 133 - * The object can be created either by declaring it in the 134 - * DPL file, or by calling this function. 135 - * 136 - * This function returns a unique authentication token, 137 - * associated with the specific object ID and the specific MC 138 - * portal; this token must be used in all subsequent calls to 139 - * this specific object. For objects that are created using the 140 - * DPL file, call dpdmai_open() function to get an authentication 141 - * token first. 142 - * 143 - * Return: '0' on Success; Error code otherwise. 144 - */ 145 - int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, 146 - const struct dpdmai_cfg *cfg, u16 *token) 147 - { 148 - struct fsl_mc_command cmd = { 0 }; 149 - int err; 150 - 151 - /* prepare command */ 152 - cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, 153 - cmd_flags, 0); 154 - DPDMAI_CMD_CREATE(cmd, cfg); 155 - 156 - /* send command to mc*/ 157 - err = mc_send_command(mc_io, &cmd); 158 - if (err) 159 - return err; 160 - 161 - /* retrieve response parameters */ 162 - *token = mc_cmd_hdr_read_token(&cmd); 163 - 164 - return 0; 165 - } 166 - 167 - /** 168 119 * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. 169 120 * @mc_io: Pointer to MC portal's I/O object 170 121 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 122 + * @dpdmai_id: The object id; it must be a valid id within the container that created this object; 171 123 * @token: Token of DPDMAI object 172 124 * 173 125 * Return: '0' on Success; error code otherwise. 174 126 */ 175 - int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) 127 + int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token) 176 128 { 129 + struct dpdmai_cmd_destroy *cmd_params; 177 130 struct fsl_mc_command cmd = { 0 }; 178 131 179 132 /* prepare command */ 180 133 cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, 181 134 cmd_flags, token); 135 + 136 + cmd_params = (struct dpdmai_cmd_destroy *)&cmd.params; 137 + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); 182 138 183 139 /* send command to mc*/ 184 140 return mc_send_command(mc_io, &cmd); ··· 240 274 attr->version.major = le16_to_cpu(rsp_params->major); 241 275 attr->version.minor = le16_to_cpu(rsp_params->minor); 242 276 attr->num_of_priorities = rsp_params->num_of_priorities; 277 + attr->num_of_queues = rsp_params->num_of_queues; 243 278 244 279 return 0; 245 280 } ··· 251 284 * @mc_io: Pointer to MC portal's I/O object 252 285 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 253 286 * @token: Token of DPDMAI object 287 + * @queue_idx: DMA queue index 254 288 * @priority: Select the queue relative to number of 255 289 * priorities configured at DPDMAI creation 256 290 * @cfg: Rx queue configuration 257 291 * 258 292 * Return: '0' on Success; Error code otherwise. 259 293 */ 260 - int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, 294 + int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx, 261 295 u8 priority, const struct dpdmai_rx_queue_cfg *cfg) 262 296 { 263 297 struct dpdmai_cmd_queue *cmd_params; ··· 270 302 271 303 cmd_params = (struct dpdmai_cmd_queue *)cmd.params; 272 304 cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); 273 - cmd_params->priority = cfg->dest_cfg.priority; 274 - cmd_params->queue = priority; 305 + cmd_params->dest_priority = cfg->dest_cfg.priority; 306 + cmd_params->pri = priority; 275 307 cmd_params->dest_type = cfg->dest_cfg.dest_type; 276 308 cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); 277 309 cmd_params->options = cpu_to_le32(cfg->options); 310 + cmd_params->queue_idx = queue_idx; 278 311 279 312 /* send command to mc*/ 280 313 return mc_send_command(mc_io, &cmd); ··· 287 318 * @mc_io: Pointer to MC portal's I/O object 288 319 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 289 320 * @token: Token of DPDMAI object 321 + * @queue_idx: DMA Queue index 290 322 * @priority: Select the queue relative to number of 291 323 * priorities configured at DPDMAI creation 292 324 * @attr: Returned Rx queue attributes 293 325 * 294 326 * Return: '0' on Success; Error code otherwise. 295 327 */ 296 - int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, 328 + int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx, 297 329 u8 priority, struct dpdmai_rx_queue_attr *attr) 298 330 { 299 331 struct dpdmai_cmd_queue *cmd_params; ··· 307 337 308 338 cmd_params = (struct dpdmai_cmd_queue *)cmd.params; 309 339 cmd_params->queue = priority; 340 + cmd_params->queue_idx = queue_idx; 310 341 311 342 /* send command to mc*/ 312 343 err = mc_send_command(mc_io, &cmd); ··· 316 345 317 346 /* retrieve response parameters */ 318 347 attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); 319 - attr->dest_cfg.priority = cmd_params->priority; 320 - attr->dest_cfg.dest_type = cmd_params->dest_type; 348 + attr->dest_cfg.priority = cmd_params->dest_priority; 349 + attr->dest_cfg.dest_type = FIELD_GET(DEST_TYPE_MASK, cmd_params->dest_type); 321 350 attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); 322 351 attr->fqid = le32_to_cpu(cmd_params->fqid); 323 352 ··· 330 359 * @mc_io: Pointer to MC portal's I/O object 331 360 * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' 332 361 * @token: Token of DPDMAI object 362 + * @queue_idx: DMA queue index 333 363 * @priority: Select the queue relative to number of 334 364 * priorities configured at DPDMAI creation 335 - * @fqid: Returned Tx queue 365 + * @attr: Returned DMA Tx queue attributes 336 366 * 337 367 * Return: '0' on Success; Error code otherwise. 338 368 */ 339 369 int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, 340 - u16 token, u8 priority, u32 *fqid) 370 + u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr) 341 371 { 342 372 struct dpdmai_rsp_get_tx_queue *rsp_params; 343 373 struct dpdmai_cmd_queue *cmd_params; ··· 351 379 352 380 cmd_params = (struct dpdmai_cmd_queue *)cmd.params; 353 381 cmd_params->queue = priority; 382 + cmd_params->queue_idx = queue_idx; 354 383 355 384 /* send command to mc*/ 356 385 err = mc_send_command(mc_io, &cmd); ··· 361 388 /* retrieve response parameters */ 362 389 363 390 rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; 364 - *fqid = le32_to_cpu(rsp_params->fqid); 391 + attr->fqid = le32_to_cpu(rsp_params->fqid); 365 392 366 393 return 0; 367 394 }
+30 -31
drivers/dma/fsl-dpaa2-qdma/dpdmai.h
··· 5 5 #define __FSL_DPDMAI_H 6 6 7 7 /* DPDMAI Version */ 8 - #define DPDMAI_VER_MAJOR 2 9 - #define DPDMAI_VER_MINOR 2 8 + #define DPDMAI_VER_MAJOR 3 9 + #define DPDMAI_VER_MINOR 3 10 10 11 - #define DPDMAI_CMD_BASE_VERSION 0 11 + #define DPDMAI_CMD_BASE_VERSION 1 12 12 #define DPDMAI_CMD_ID_OFFSET 4 13 13 14 - #define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \ 15 - DPDMAI_CMD_BASE_VERSION) 14 + /* 15 + * Maximum number of Tx/Rx queues per DPDMAI object 16 + */ 17 + #define DPDMAI_MAX_QUEUE_NUM 8 18 + 19 + #define DPDMAI_CMDID_FORMAT_V(x, v) (((x) << DPDMAI_CMD_ID_OFFSET) | (v)) 20 + #define DPDMAI_CMDID_FORMAT(x) DPDMAI_CMDID_FORMAT_V(x, DPDMAI_CMD_BASE_VERSION) 16 21 17 22 /* Command IDs */ 18 23 #define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800) ··· 31 26 #define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005) 32 27 #define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006) 33 28 34 - #define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010) 35 - #define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011) 36 - #define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012) 37 - #define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013) 38 - #define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014) 39 - #define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015) 40 - #define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016) 41 - #define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017) 42 - 43 - #define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0) 44 - #define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1) 45 - #define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2) 29 + #define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A0, 2) 30 + #define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A1, 2) 31 + #define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A2, 2) 46 32 47 33 #define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ 48 34 #define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ ··· 45 49 * Contains initialization APIs and runtime control APIs for DPDMAI 46 50 */ 47 51 48 - /** 52 + /* 49 53 * Maximum number of Tx/Rx priorities per DPDMAI object 50 54 */ 51 55 #define DPDMAI_PRIO_NUM 2 52 56 53 57 /* DPDMAI queue modification options */ 54 58 55 - /** 59 + /* 56 60 * Select to modify the user's context associated with the queue 57 61 */ 58 62 #define DPDMAI_QUEUE_OPT_USER_CTX 0x1 59 63 60 - /** 64 + /* 61 65 * Select to modify the queue's destination 62 66 */ 63 67 #define DPDMAI_QUEUE_OPT_DEST 0x2 64 68 65 69 /** 66 70 * struct dpdmai_cfg - Structure representing DPDMAI configuration 71 + * @num_queues: Number of the DMA queues 67 72 * @priorities: Priorities for the DMA hardware processing; valid priorities are 68 73 * configured with values 1-8; the entry following last valid entry 69 74 * should be configured with 0 70 75 */ 71 76 struct dpdmai_cfg { 77 + u8 num_queues; 72 78 u8 priorities[DPDMAI_PRIO_NUM]; 73 79 }; 74 80 ··· 78 80 * struct dpdmai_attr - Structure representing DPDMAI attributes 79 81 * @id: DPDMAI object ID 80 82 * @version: DPDMAI version 83 + * @version.major: DPDMAI major version 84 + * @version.minor: DPDMAI minor version 81 85 * @num_of_priorities: number of priorities 86 + * @num_of_queues: number of the DMA queues 82 87 */ 83 88 struct dpdmai_attr { 84 89 int id; 85 - /** 86 - * struct version - DPDMAI version 87 - * @major: DPDMAI major version 88 - * @minor: DPDMAI minor version 89 - */ 90 90 struct { 91 91 u16 major; 92 92 u16 minor; 93 93 } version; 94 94 u8 num_of_priorities; 95 + u8 num_of_queues; 95 96 }; 96 97 97 98 /** ··· 155 158 u32 fqid; 156 159 }; 157 160 161 + struct dpdmai_tx_queue_attr { 162 + u32 fqid; 163 + }; 164 + 158 165 int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, 159 166 int dpdmai_id, u16 *token); 160 167 int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); 161 - int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); 162 - int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, 163 - const struct dpdmai_cfg *cfg, u16 *token); 168 + int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token); 164 169 int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); 165 170 int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); 166 171 int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); 167 172 int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, 168 173 u16 token, struct dpdmai_attr *attr); 169 174 int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, 170 - u8 priority, const struct dpdmai_rx_queue_cfg *cfg); 175 + u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg); 171 176 int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, 172 - u8 priority, struct dpdmai_rx_queue_attr *attr); 177 + u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr); 173 178 int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, 174 - u16 token, u8 priority, u32 *fqid); 179 + u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr); 175 180 176 181 #endif /* __FSL_DPDMAI_H */
+13 -12
drivers/dma/fsl-edma-common.c
··· 3 3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc 4 4 // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> 5 5 6 + #include <linux/cleanup.h> 7 + #include <linux/clk.h> 6 8 #include <linux/dmapool.h> 7 9 #include <linux/module.h> 8 10 #include <linux/slab.h> ··· 76 74 77 75 flags = fsl_edma_drvflags(fsl_chan); 78 76 val = edma_readl_chreg(fsl_chan, ch_sbr); 79 - /* Remote/local swapped wrongly on iMX8 QM Audio edma */ 80 - if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) { 81 - if (!fsl_chan->is_rxchan) 82 - val |= EDMA_V3_CH_SBR_RD; 83 - else 84 - val |= EDMA_V3_CH_SBR_WR; 85 - } else { 86 - if (fsl_chan->is_rxchan) 87 - val |= EDMA_V3_CH_SBR_RD; 88 - else 89 - val |= EDMA_V3_CH_SBR_WR; 90 - } 77 + if (fsl_chan->is_rxchan) 78 + val |= EDMA_V3_CH_SBR_RD; 79 + else 80 + val |= EDMA_V3_CH_SBR_WR; 91 81 92 82 if (fsl_chan->is_remote) 93 83 val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); ··· 540 546 csr |= EDMA_TCD_CSR_START; 541 547 542 548 fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); 549 + 550 + trace_edma_fill_tcd(fsl_chan, tcd); 543 551 } 544 552 545 553 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, ··· 806 810 { 807 811 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 808 812 813 + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 814 + clk_prepare_enable(fsl_chan->clk); 815 + 809 816 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, 810 817 fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? 811 818 sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), ··· 837 838 fsl_chan->tcd_pool = NULL; 838 839 fsl_chan->is_sw = false; 839 840 fsl_chan->srcid = 0; 841 + if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK) 842 + clk_disable_unprepare(fsl_chan->clk); 840 843 } 841 844 842 845 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+58 -52
drivers/dma/fsl-edma-common.h
··· 151 151 enum dma_status status; 152 152 enum fsl_edma_pm_state pm_state; 153 153 bool idle; 154 - u32 slave_id; 155 154 struct fsl_edma_engine *edma; 156 155 struct fsl_edma_desc *edesc; 157 156 struct dma_slave_config cfg; ··· 194 195 #define FSL_EDMA_DRV_HAS_PD BIT(5) 195 196 #define FSL_EDMA_DRV_HAS_CHCLK BIT(6) 196 197 #define FSL_EDMA_DRV_HAS_CHMUX BIT(7) 197 - /* imx8 QM audio edma remote local swapped */ 198 - #define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8) 199 198 /* control and status register is in tcd address space, edma3 reg layout */ 200 199 #define FSL_EDMA_DRV_SPLIT_REG BIT(9) 201 200 #define FSL_EDMA_DRV_BUS_8BYTE BIT(10) ··· 235 238 void __iomem *muxbase[DMAMUX_NR]; 236 239 struct clk *muxclk[DMAMUX_NR]; 237 240 struct clk *dmaclk; 238 - struct clk *chclk; 239 241 struct mutex fsl_edma_mutex; 240 242 const struct fsl_edma_drvdata *drvdata; 241 243 u32 n_chans; ··· 246 250 struct fsl_edma_chan chans[] __counted_by(n_chans); 247 251 }; 248 252 253 + static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) 254 + { 255 + return fsl_chan->edma->drvdata->flags; 256 + } 257 + 249 258 #define edma_read_tcdreg_c(chan, _tcd, __name) \ 250 - (sizeof((_tcd)->__name) == sizeof(u64) ? \ 251 - edma_readq(chan->edma, &(_tcd)->__name) : \ 252 - ((sizeof((_tcd)->__name) == sizeof(u32)) ? \ 253 - edma_readl(chan->edma, &(_tcd)->__name) : \ 254 - edma_readw(chan->edma, &(_tcd)->__name) \ 255 - )) 259 + _Generic(((_tcd)->__name), \ 260 + __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \ 261 + __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \ 262 + __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \ 263 + ) 256 264 257 265 #define edma_read_tcdreg(chan, __name) \ 258 266 ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ ··· 264 264 edma_read_tcdreg_c(chan, ((struct fsl_edma_hw_tcd __iomem *)chan->tcd), __name) \ 265 265 ) 266 266 267 - #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ 268 - do { \ 269 - switch (sizeof(_tcd->__name)) { \ 270 - case sizeof(u64): \ 271 - edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ 272 - break; \ 273 - case sizeof(u32): \ 274 - edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \ 275 - break; \ 276 - case sizeof(u16): \ 277 - edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \ 278 - break; \ 279 - case sizeof(u8): \ 280 - edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \ 281 - break; \ 282 - } \ 283 - } while (0) 267 + #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ 268 + _Generic((_tcd->__name), \ 269 + __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \ 270 + __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \ 271 + __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \ 272 + __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \ 273 + ) 284 274 285 275 #define edma_write_tcdreg(chan, val, __name) \ 286 276 do { \ ··· 311 321 (((struct fsl_edma_hw_tcd *)_tcd)->_field)) 312 322 313 323 #define fsl_edma_le_to_cpu(x) \ 314 - (sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ 315 - (sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ 316 - le16_to_cpu((__force __le16)(x)))) 324 + _Generic((x), \ 325 + __le64 : le64_to_cpu((x)), \ 326 + __le32 : le32_to_cpu((x)), \ 327 + __le16 : le16_to_cpu((x)) \ 328 + ) 317 329 318 330 #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ 319 331 (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ ··· 323 331 fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) 324 332 325 333 #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ 326 - do { \ 327 - switch (sizeof((_tcd)->_field)) { \ 328 - case sizeof(u64): \ 329 - *(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ 330 - break; \ 331 - case sizeof(u32): \ 332 - *(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \ 333 - break; \ 334 - case sizeof(u16): \ 335 - *(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \ 336 - break; \ 337 - } \ 338 - } while (0) 334 + _Generic(((_tcd)->_field), \ 335 + __le64 : (_tcd)->_field = cpu_to_le64(_val), \ 336 + __le32 : (_tcd)->_field = cpu_to_le32(_val), \ 337 + __le16 : (_tcd)->_field = cpu_to_le16(_val) \ 338 + ) 339 339 340 340 #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ 341 341 do { \ ··· 336 352 else \ 337 353 fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ 338 354 } while (0) 355 + 356 + /* Need after struct defination */ 357 + #include "fsl-edma-trace.h" 339 358 340 359 /* 341 360 * R/W functions for big- or little-endian registers: ··· 358 371 h = ioread32(addr + 4); 359 372 } 360 373 374 + trace_edma_readl(edma, addr, l); 375 + trace_edma_readl(edma, addr + 4, h); 376 + 361 377 return (h << 32) | l; 362 378 } 363 379 364 380 static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) 365 381 { 382 + u32 val; 383 + 366 384 if (edma->big_endian) 367 - return ioread32be(addr); 385 + val = ioread32be(addr); 368 386 else 369 - return ioread32(addr); 387 + val = ioread32(addr); 388 + 389 + trace_edma_readl(edma, addr, val); 390 + 391 + return val; 370 392 } 371 393 372 394 static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr) 373 395 { 396 + u16 val; 397 + 374 398 if (edma->big_endian) 375 - return ioread16be(addr); 399 + val = ioread16be(addr); 376 400 else 377 - return ioread16(addr); 401 + val = ioread16(addr); 402 + 403 + trace_edma_readw(edma, addr, val); 404 + 405 + return val; 378 406 } 379 407 380 408 static inline void edma_writeb(struct fsl_edma_engine *edma, ··· 400 398 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); 401 399 else 402 400 iowrite8(val, addr); 401 + 402 + trace_edma_writeb(edma, addr, val); 403 403 } 404 404 405 405 static inline void edma_writew(struct fsl_edma_engine *edma, ··· 412 408 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); 413 409 else 414 410 iowrite16(val, addr); 411 + 412 + trace_edma_writew(edma, addr, val); 415 413 } 416 414 417 415 static inline void edma_writel(struct fsl_edma_engine *edma, ··· 423 417 iowrite32be(val, addr); 424 418 else 425 419 iowrite32(val, addr); 420 + 421 + trace_edma_writel(edma, addr, val); 426 422 } 427 423 428 424 static inline void edma_writeq(struct fsl_edma_engine *edma, ··· 437 429 iowrite32(val & 0xFFFFFFFF, addr); 438 430 iowrite32(val >> 32, addr + 4); 439 431 } 432 + 433 + trace_edma_writel(edma, addr, val & 0xFFFFFFFF); 434 + trace_edma_writel(edma, addr + 4, val >> 32); 440 435 } 441 436 442 437 static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) 443 438 { 444 439 return container_of(chan, struct fsl_edma_chan, vchan.chan); 445 - } 446 - 447 - static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan) 448 - { 449 - return fsl_chan->edma->drvdata->flags; 450 440 } 451 441 452 442 static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+31 -19
drivers/dma/fsl-edma-main.c
··· 105 105 if (dma_spec->args_count != 2) 106 106 return NULL; 107 107 108 - mutex_lock(&fsl_edma->fsl_edma_mutex); 108 + guard(mutex)(&fsl_edma->fsl_edma_mutex); 109 + 109 110 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { 110 111 if (chan->client_count) 111 112 continue; ··· 115 114 if (chan) { 116 115 chan->device->privatecnt++; 117 116 fsl_chan = to_fsl_edma_chan(chan); 118 - fsl_chan->slave_id = dma_spec->args[1]; 119 - fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, 117 + fsl_chan->srcid = dma_spec->args[1]; 118 + 119 + if (!fsl_chan->srcid) { 120 + dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n", 121 + fsl_chan->srcid); 122 + return NULL; 123 + } 124 + 125 + fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, 120 126 true); 121 - mutex_unlock(&fsl_edma->fsl_edma_mutex); 122 127 return chan; 123 128 } 124 129 } 125 130 } 126 - mutex_unlock(&fsl_edma->fsl_edma_mutex); 127 131 return NULL; 128 132 } 129 133 ··· 348 342 .setup_irq = fsl_edma3_irq_init, 349 343 }; 350 344 351 - static struct fsl_edma_drvdata imx8qm_audio_data = { 352 - .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, 345 + static struct fsl_edma_drvdata imx8ulp_data = { 346 + .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK | 347 + FSL_EDMA_DRV_EDMA3, 353 348 .chreg_space_sz = 0x10000, 354 349 .chreg_off = 0x10000, 350 + .mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux), 351 + .mux_skip = 0x10000, 355 352 .setup_irq = fsl_edma3_irq_init, 356 353 }; 357 354 ··· 389 380 { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, 390 381 { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, 391 382 { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data}, 392 - { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, 383 + { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data}, 393 384 { .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, 394 385 { .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, 395 386 { .compatible = "fsl,imx95-edma5", .data = &imx95_data5}, ··· 443 434 struct fsl_edma_engine *fsl_edma; 444 435 const struct fsl_edma_drvdata *drvdata = NULL; 445 436 u32 chan_mask[2] = {0, 0}; 437 + char clk_name[36]; 446 438 struct edma_regs *regs; 447 439 int chans; 448 440 int ret, i; ··· 483 473 if (IS_ERR(fsl_edma->dmaclk)) { 484 474 dev_err(&pdev->dev, "Missing DMA block clock.\n"); 485 475 return PTR_ERR(fsl_edma->dmaclk); 486 - } 487 - } 488 - 489 - if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { 490 - fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp"); 491 - if (IS_ERR(fsl_edma->chclk)) { 492 - dev_err(&pdev->dev, "Missing MP block clock.\n"); 493 - return PTR_ERR(fsl_edma->chclk); 494 476 } 495 477 } 496 478 ··· 542 540 543 541 fsl_chan->edma = fsl_edma; 544 542 fsl_chan->pm_state = RUNNING; 545 - fsl_chan->slave_id = 0; 543 + fsl_chan->srcid = 0; 546 544 fsl_chan->idle = true; 547 545 fsl_chan->dma_dir = DMA_NONE; 548 546 fsl_chan->vchan.desc_free = fsl_edma_free_desc; ··· 553 551 + i * drvdata->chreg_space_sz + drvdata->chreg_off + len; 554 552 fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; 555 553 554 + if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) { 555 + snprintf(clk_name, sizeof(clk_name), "ch%02d", i); 556 + fsl_chan->clk = devm_clk_get_enabled(&pdev->dev, 557 + (const char *)clk_name); 558 + 559 + if (IS_ERR(fsl_chan->clk)) 560 + return PTR_ERR(fsl_chan->clk); 561 + } 556 562 fsl_chan->pdev = pdev; 557 563 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); 558 564 559 565 edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); 560 566 fsl_edma_chan_mux(fsl_chan, 0, false); 567 + if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) 568 + clk_disable_unprepare(fsl_chan->clk); 561 569 } 562 570 563 571 ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); ··· 694 682 continue; 695 683 fsl_chan->pm_state = RUNNING; 696 684 edma_write_tcdreg(fsl_chan, 0, csr); 697 - if (fsl_chan->slave_id != 0) 698 - fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); 685 + if (fsl_chan->srcid != 0) 686 + fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true); 699 687 } 700 688 701 689 if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+4
drivers/dma/fsl-edma-trace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #define CREATE_TRACE_POINTS 4 + #include "fsl-edma-common.h"
+132
drivers/dma/fsl-edma-trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* 3 + * Copyright 2023 NXP. 4 + */ 5 + 6 + #undef TRACE_SYSTEM 7 + #define TRACE_SYSTEM fsl_edma 8 + 9 + #if !defined(__LINUX_FSL_EDMA_TRACE) || defined(TRACE_HEADER_MULTI_READ) 10 + #define __LINUX_FSL_EDMA_TRACE 11 + 12 + #include <linux/types.h> 13 + #include <linux/tracepoint.h> 14 + 15 + DECLARE_EVENT_CLASS(edma_log_io, 16 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 17 + TP_ARGS(edma, addr, value), 18 + TP_STRUCT__entry( 19 + __field(struct fsl_edma_engine *, edma) 20 + __field(void __iomem *, addr) 21 + __field(u32, value) 22 + ), 23 + TP_fast_assign( 24 + __entry->edma = edma; 25 + __entry->addr = addr; 26 + __entry->value = value; 27 + ), 28 + TP_printk("offset %08x: value %08x", 29 + (u32)(__entry->addr - __entry->edma->membase), __entry->value) 30 + ); 31 + 32 + DEFINE_EVENT(edma_log_io, edma_readl, 33 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 34 + TP_ARGS(edma, addr, value) 35 + ); 36 + 37 + DEFINE_EVENT(edma_log_io, edma_writel, 38 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 39 + TP_ARGS(edma, addr, value) 40 + ); 41 + 42 + DEFINE_EVENT(edma_log_io, edma_readw, 43 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 44 + TP_ARGS(edma, addr, value) 45 + ); 46 + 47 + DEFINE_EVENT(edma_log_io, edma_writew, 48 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 49 + TP_ARGS(edma, addr, value) 50 + ); 51 + 52 + DEFINE_EVENT(edma_log_io, edma_readb, 53 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 54 + TP_ARGS(edma, addr, value) 55 + ); 56 + 57 + DEFINE_EVENT(edma_log_io, edma_writeb, 58 + TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value), 59 + TP_ARGS(edma, addr, value) 60 + ); 61 + 62 + DECLARE_EVENT_CLASS(edma_log_tcd, 63 + TP_PROTO(struct fsl_edma_chan *chan, void *tcd), 64 + TP_ARGS(chan, tcd), 65 + TP_STRUCT__entry( 66 + __field(u64, saddr) 67 + __field(u16, soff) 68 + __field(u16, attr) 69 + __field(u32, nbytes) 70 + __field(u64, slast) 71 + __field(u64, daddr) 72 + __field(u16, doff) 73 + __field(u16, citer) 74 + __field(u64, dlast_sga) 75 + __field(u16, csr) 76 + __field(u16, biter) 77 + 78 + ), 79 + TP_fast_assign( 80 + __entry->saddr = fsl_edma_get_tcd_to_cpu(chan, tcd, saddr), 81 + __entry->soff = fsl_edma_get_tcd_to_cpu(chan, tcd, soff), 82 + __entry->attr = fsl_edma_get_tcd_to_cpu(chan, tcd, attr), 83 + __entry->nbytes = fsl_edma_get_tcd_to_cpu(chan, tcd, nbytes), 84 + __entry->slast = fsl_edma_get_tcd_to_cpu(chan, tcd, slast), 85 + __entry->daddr = fsl_edma_get_tcd_to_cpu(chan, tcd, daddr), 86 + __entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff), 87 + __entry->citer = fsl_edma_get_tcd_to_cpu(chan, tcd, citer), 88 + __entry->dlast_sga = fsl_edma_get_tcd_to_cpu(chan, tcd, dlast_sga), 89 + __entry->csr = fsl_edma_get_tcd_to_cpu(chan, tcd, csr), 90 + __entry->biter = fsl_edma_get_tcd_to_cpu(chan, tcd, biter); 91 + ), 92 + TP_printk("\n==== TCD =====\n" 93 + " saddr: 0x%016llx\n" 94 + " soff: 0x%04x\n" 95 + " attr: 0x%04x\n" 96 + " nbytes: 0x%08x\n" 97 + " slast: 0x%016llx\n" 98 + " daddr: 0x%016llx\n" 99 + " doff: 0x%04x\n" 100 + " citer: 0x%04x\n" 101 + " dlast: 0x%016llx\n" 102 + " csr: 0x%04x\n" 103 + " biter: 0x%04x\n", 104 + __entry->saddr, 105 + __entry->soff, 106 + __entry->attr, 107 + __entry->nbytes, 108 + __entry->slast, 109 + __entry->daddr, 110 + __entry->doff, 111 + __entry->citer, 112 + __entry->dlast_sga, 113 + __entry->csr, 114 + __entry->biter) 115 + ); 116 + 117 + DEFINE_EVENT(edma_log_tcd, edma_fill_tcd, 118 + TP_PROTO(struct fsl_edma_chan *chan, void *tcd), 119 + TP_ARGS(chan, tcd) 120 + ); 121 + 122 + #endif 123 + 124 + /* this part must be outside header guard */ 125 + 126 + #undef TRACE_INCLUDE_PATH 127 + #define TRACE_INCLUDE_PATH . 128 + 129 + #undef TRACE_INCLUDE_FILE 130 + #define TRACE_INCLUDE_FILE fsl-edma-trace 131 + 132 + #include <trace/define_trace.h>
+3 -1
drivers/dma/idma64.c
··· 598 598 599 599 idma64->dma.dev = chip->sysdev; 600 600 601 - dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); 601 + ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); 602 + if (ret) 603 + return ret; 602 604 603 605 ret = dma_async_device_register(&idma64->dma); 604 606 if (ret)
+9 -9
drivers/dma/idxd/cdev.c
··· 577 577 struct idxd_cdev *idxd_cdev; 578 578 579 579 idxd_cdev = wq->idxd_cdev; 580 - ida_destroy(&file_ida); 581 580 wq->idxd_cdev = NULL; 582 581 cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); 583 582 put_device(cdev_dev(idxd_cdev)); ··· 591 592 592 593 if (idxd->state != IDXD_DEV_ENABLED) 593 594 return -ENXIO; 595 + 596 + mutex_lock(&wq->wq_lock); 597 + 598 + if (!idxd_wq_driver_name_match(wq, dev)) { 599 + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 600 + rc = -ENODEV; 601 + goto wq_err; 602 + } 594 603 595 604 /* 596 605 * User type WQ is enabled only when SVA is enabled for two reasons: ··· 615 608 dev_dbg(&idxd->pdev->dev, 616 609 "User type WQ cannot be enabled without SVA.\n"); 617 610 618 - return -EOPNOTSUPP; 619 - } 620 - 621 - mutex_lock(&wq->wq_lock); 622 - 623 - if (!idxd_wq_driver_name_match(wq, dev)) { 624 - idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 625 - rc = -ENODEV; 611 + rc = -EOPNOTSUPP; 626 612 goto wq_err; 627 613 } 628 614
+79 -18
drivers/dma/imx-sdma.c
··· 24 24 #include <linux/semaphore.h> 25 25 #include <linux/spinlock.h> 26 26 #include <linux/device.h> 27 + #include <linux/genalloc.h> 27 28 #include <linux/dma-mapping.h> 28 29 #include <linux/firmware.h> 29 30 #include <linux/slab.h> ··· 138 137 * 0: Source on AIPS 139 138 * 12 Destination Bit(DP) 1: Destination on SPBA 140 139 * 0: Destination on AIPS 141 - * 13-15 --------- MUST BE 0 140 + * 13 Source FIFO 1: Source is dual FIFO 141 + * 0: Source is single FIFO 142 + * 14 Destination FIFO 1: Destination is dual FIFO 143 + * 0: Destination is single FIFO 144 + * 15 --------- MUST BE 0 142 145 * 16-23 Higher WML HWML 143 146 * 24-27 N Total number of samples after 144 147 * which Pad adding/Swallowing ··· 173 168 #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) 174 169 #define SDMA_WATERMARK_LEVEL_SP BIT(11) 175 170 #define SDMA_WATERMARK_LEVEL_DP BIT(12) 171 + #define SDMA_WATERMARK_LEVEL_SD BIT(13) 172 + #define SDMA_WATERMARK_LEVEL_DD BIT(14) 176 173 #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) 177 174 #define SDMA_WATERMARK_LEVEL_LWE BIT(28) 178 175 #define SDMA_WATERMARK_LEVEL_HWE BIT(29) ··· 182 175 183 176 #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 184 177 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 178 + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 185 179 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 186 180 187 181 #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ ··· 240 232 s32 utra_addr; 241 233 s32 ram_code_start_addr; 242 234 /* End of v1 array */ 243 - s32 mcu_2_ssish_addr; 235 + union { s32 v1_end; s32 mcu_2_ssish_addr; }; 244 236 s32 ssish_2_mcu_addr; 245 237 s32 hdmi_dma_addr; 246 238 /* End of v2 array */ 247 - s32 zcanfd_2_mcu_addr; 239 + union { s32 v2_end; s32 zcanfd_2_mcu_addr; }; 248 240 s32 zqspi_2_mcu_addr; 249 241 s32 mcu_2_ecspi_addr; 250 242 s32 mcu_2_sai_addr; 251 243 s32 sai_2_mcu_addr; 252 244 s32 uart_2_mcu_rom_addr; 253 245 s32 uartsh_2_mcu_rom_addr; 246 + s32 i2c_2_mcu_addr; 247 + s32 mcu_2_i2c_addr; 254 248 /* End of v3 array */ 255 - s32 mcu_2_zqspi_addr; 249 + union { s32 v3_end; s32 mcu_2_zqspi_addr; }; 256 250 /* End of v4 array */ 251 + s32 v4_end[0]; 257 252 }; 258 253 259 254 /* ··· 542 531 /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ 543 532 bool clk_ratio; 544 533 bool fw_loaded; 534 + struct gen_pool *iram_pool; 545 535 }; 546 536 547 537 static int sdma_config_write(struct dma_chan *chan, ··· 1084 1072 per_2_emi = sdma->script_addrs->sai_2_mcu_addr; 1085 1073 emi_2_per = sdma->script_addrs->mcu_2_sai_addr; 1086 1074 break; 1075 + case IMX_DMATYPE_I2C: 1076 + per_2_emi = sdma->script_addrs->i2c_2_mcu_addr; 1077 + emi_2_per = sdma->script_addrs->mcu_2_i2c_addr; 1078 + sdmac->is_ram_script = true; 1079 + break; 1087 1080 case IMX_DMATYPE_HDMI: 1088 1081 emi_2_per = sdma->script_addrs->hdmi_dma_addr; 1089 1082 sdmac->is_ram_script = true; ··· 1272 1255 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; 1273 1256 1274 1257 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; 1258 + 1259 + /* 1260 + * Limitation: The p2p script support dual fifos in maximum, 1261 + * So when fifo number is larger than 1, force enable dual 1262 + * fifos. 1263 + */ 1264 + if (sdmac->n_fifos_src > 1) 1265 + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD; 1266 + if (sdmac->n_fifos_dst > 1) 1267 + sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD; 1275 1268 } 1276 1269 1277 1270 static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac) ··· 1385 1358 { 1386 1359 int ret = -EBUSY; 1387 1360 1388 - sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, 1389 - GFP_NOWAIT); 1361 + if (sdma->iram_pool) 1362 + sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool, 1363 + sizeof(struct sdma_buffer_descriptor), 1364 + &sdma->bd0_phys); 1365 + else 1366 + sdma->bd0 = dma_alloc_coherent(sdma->dev, 1367 + sizeof(struct sdma_buffer_descriptor), 1368 + &sdma->bd0_phys, GFP_NOWAIT); 1390 1369 if (!sdma->bd0) { 1391 1370 ret = -ENOMEM; 1392 1371 goto out; ··· 1412 1379 static int sdma_alloc_bd(struct sdma_desc *desc) 1413 1380 { 1414 1381 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1382 + struct sdma_engine *sdma = desc->sdmac->sdma; 1415 1383 int ret = 0; 1416 1384 1417 - desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, 1418 - &desc->bd_phys, GFP_NOWAIT); 1385 + if (sdma->iram_pool) 1386 + desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys); 1387 + else 1388 + desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT); 1389 + 1419 1390 if (!desc->bd) { 1420 1391 ret = -ENOMEM; 1421 1392 goto out; ··· 1431 1394 static void sdma_free_bd(struct sdma_desc *desc) 1432 1395 { 1433 1396 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1397 + struct sdma_engine *sdma = desc->sdmac->sdma; 1434 1398 1435 - dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, 1436 - desc->bd_phys); 1399 + if (sdma->iram_pool) 1400 + gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size); 1401 + else 1402 + dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys); 1437 1403 } 1438 1404 1439 1405 static void sdma_desc_free(struct virt_dma_desc *vd) ··· 1683 1643 if (count & 3 || sg->dma_address & 3) 1684 1644 goto err_bd_out; 1685 1645 break; 1646 + case DMA_SLAVE_BUSWIDTH_3_BYTES: 1647 + bd->mode.command = 3; 1648 + break; 1686 1649 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1687 1650 bd->mode.command = 2; 1688 1651 if (count & 1 || sg->dma_address & 1) ··· 1923 1880 spin_unlock_irqrestore(&sdmac->vc.lock, flags); 1924 1881 } 1925 1882 1926 - #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1927 - #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 1928 - #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45 1929 - #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46 1883 + #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 \ 1884 + (offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32)) 1885 + 1886 + #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \ 1887 + (offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32)) 1888 + 1889 + #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \ 1890 + (offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32)) 1891 + 1892 + #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \ 1893 + (offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32)) 1930 1894 1931 1895 static void sdma_add_scripts(struct sdma_engine *sdma, 1932 1896 const struct sdma_script_start_addrs *addr) ··· 2118 2068 { 2119 2069 int i, ret; 2120 2070 dma_addr_t ccb_phys; 2071 + int ccbsize; 2121 2072 2122 2073 ret = clk_enable(sdma->clk_ipg); 2123 2074 if (ret) ··· 2134 2083 /* Be sure SDMA has not started yet */ 2135 2084 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 2136 2085 2137 - sdma->channel_control = dma_alloc_coherent(sdma->dev, 2138 - MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) + 2139 - sizeof(struct sdma_context_data), 2140 - &ccb_phys, GFP_KERNEL); 2086 + ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control) 2087 + + sizeof(struct sdma_context_data)); 2088 + 2089 + if (sdma->iram_pool) 2090 + sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys); 2091 + else 2092 + sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys, 2093 + GFP_KERNEL); 2141 2094 2142 2095 if (!sdma->channel_control) { 2143 2096 ret = -ENOMEM; ··· 2325 2270 */ 2326 2271 if (i) 2327 2272 vchan_init(&sdmac->vc, &sdma->dma_device); 2273 + } 2274 + 2275 + if (np) { 2276 + sdma->iram_pool = of_gen_pool_get(np, "iram", 0); 2277 + if (sdma->iram_pool) 2278 + dev_info(&pdev->dev, "alloc bd from iram.\n"); 2328 2279 } 2329 2280 2330 2281 ret = sdma_init(sdma);
+2 -2
drivers/dma/mcf-edma-main.c
··· 195 195 struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i]; 196 196 197 197 mcf_chan->edma = mcf_edma; 198 - mcf_chan->slave_id = i; 198 + mcf_chan->srcid = i; 199 199 mcf_chan->idle = true; 200 200 mcf_chan->dma_dir = DMA_NONE; 201 201 mcf_chan->vchan.desc_free = fsl_edma_free_desc; ··· 277 277 if (chan->device->dev->driver == &mcf_edma_driver.driver) { 278 278 struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan); 279 279 280 - return (mcf_chan->slave_id == (uintptr_t)param); 280 + return (mcf_chan->srcid == (uintptr_t)param); 281 281 } 282 282 283 283 return false;
-5
drivers/dma/pch_dma.c
··· 155 155 return &chan->dev->device; 156 156 } 157 157 158 - static inline struct device *chan2parent(struct dma_chan *chan) 159 - { 160 - return chan->dev->device.parent; 161 - } 162 - 163 158 static inline 164 159 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) 165 160 {
-11
drivers/dma/qcom/hidma.c
··· 50 50 #include <linux/platform_device.h> 51 51 #include <linux/slab.h> 52 52 #include <linux/spinlock.h> 53 - #include <linux/of_dma.h> 54 53 #include <linux/property.h> 55 54 #include <linux/delay.h> 56 55 #include <linux/acpi.h> ··· 946 947 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); 947 948 #endif 948 949 949 - static const struct of_device_id hidma_match[] = { 950 - {.compatible = "qcom,hidma-1.0",}, 951 - {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),}, 952 - {.compatible = "qcom,hidma-1.2", 953 - .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),}, 954 - {}, 955 - }; 956 - MODULE_DEVICE_TABLE(of, hidma_match); 957 - 958 950 static struct platform_driver hidma_driver = { 959 951 .probe = hidma_probe, 960 952 .remove_new = hidma_remove, 961 953 .shutdown = hidma_shutdown, 962 954 .driver = { 963 955 .name = "hidma", 964 - .of_match_table = hidma_match, 965 956 .acpi_match_table = ACPI_PTR(hidma_acpi_ids), 966 957 }, 967 958 };
+1 -108
drivers/dma/qcom/hidma_mgmt.c
··· 7 7 8 8 #include <linux/dmaengine.h> 9 9 #include <linux/acpi.h> 10 - #include <linux/of.h> 11 10 #include <linux/property.h> 12 - #include <linux/of_address.h> 13 - #include <linux/of_irq.h> 14 - #include <linux/of_platform.h> 15 - #include <linux/of_device.h> 16 11 #include <linux/platform_device.h> 17 12 #include <linux/module.h> 18 13 #include <linux/uaccess.h> ··· 322 327 MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); 323 328 #endif 324 329 325 - static const struct of_device_id hidma_mgmt_match[] = { 326 - {.compatible = "qcom,hidma-mgmt-1.0",}, 327 - {}, 328 - }; 329 - MODULE_DEVICE_TABLE(of, hidma_mgmt_match); 330 - 331 330 static struct platform_driver hidma_mgmt_driver = { 332 331 .probe = hidma_mgmt_probe, 333 332 .driver = { 334 333 .name = "hidma-mgmt", 335 - .of_match_table = hidma_mgmt_match, 336 334 .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), 337 335 }, 338 336 }; 339 337 340 - #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) 341 - static int object_counter; 342 - 343 - static int __init hidma_mgmt_of_populate_channels(struct device_node *np) 344 - { 345 - struct platform_device *pdev_parent = of_find_device_by_node(np); 346 - struct platform_device_info pdevinfo; 347 - struct device_node *child; 348 - struct resource *res; 349 - int ret = 0; 350 - 351 - /* allocate a resource array */ 352 - res = kcalloc(3, sizeof(*res), GFP_KERNEL); 353 - if (!res) 354 - return -ENOMEM; 355 - 356 - for_each_available_child_of_node(np, child) { 357 - struct platform_device *new_pdev; 358 - 359 - ret = of_address_to_resource(child, 0, &res[0]); 360 - if (!ret) 361 - goto out; 362 - 363 - ret = of_address_to_resource(child, 1, &res[1]); 364 - if (!ret) 365 - goto out; 366 - 367 - ret = of_irq_to_resource(child, 0, &res[2]); 368 - if (ret <= 0) 369 - goto out; 370 - 371 - memset(&pdevinfo, 0, sizeof(pdevinfo)); 372 - pdevinfo.fwnode = &child->fwnode; 373 - pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL; 374 - pdevinfo.name = child->name; 375 - pdevinfo.id = object_counter++; 376 - pdevinfo.res = res; 377 - pdevinfo.num_res = 3; 378 - pdevinfo.data = NULL; 379 - pdevinfo.size_data = 0; 380 - pdevinfo.dma_mask = DMA_BIT_MASK(64); 381 - new_pdev = platform_device_register_full(&pdevinfo); 382 - if (IS_ERR(new_pdev)) { 383 - ret = PTR_ERR(new_pdev); 384 - goto out; 385 - } 386 - new_pdev->dev.of_node = child; 387 - of_dma_configure(&new_pdev->dev, child, true); 388 - /* 389 - * It is assumed that calling of_msi_configure is safe on 390 - * platforms with or without MSI support. 391 - */ 392 - of_msi_configure(&new_pdev->dev, child); 393 - } 394 - 395 - kfree(res); 396 - 397 - return ret; 398 - 399 - out: 400 - of_node_put(child); 401 - kfree(res); 402 - 403 - return ret; 404 - } 405 - #endif 406 - 407 - static int __init hidma_mgmt_init(void) 408 - { 409 - #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) 410 - struct device_node *child; 411 - 412 - for_each_matching_node(child, hidma_mgmt_match) { 413 - /* device tree based firmware here */ 414 - hidma_mgmt_of_populate_channels(child); 415 - } 416 - #endif 417 - /* 418 - * We do not check for return value here, as it is assumed that 419 - * platform_driver_register must not fail. The reason for this is that 420 - * the (potential) hidma_mgmt_of_populate_channels calls above are not 421 - * cleaned up if it does fail, and to do this work is quite 422 - * complicated. In particular, various calls of of_address_to_resource, 423 - * of_irq_to_resource, platform_device_register_full, of_dma_configure, 424 - * and of_msi_configure which then call other functions and so on, must 425 - * be cleaned up - this is not a trivial exercise. 426 - * 427 - * Currently, this module is not intended to be unloaded, and there is 428 - * no module_exit function defined which does the needed cleanup. For 429 - * this reason, we have to assume success here. 430 - */ 431 - platform_driver_register(&hidma_mgmt_driver); 432 - 433 - return 0; 434 - } 435 - module_init(hidma_mgmt_init); 338 + module_platform_driver(hidma_mgmt_driver); 436 339 MODULE_LICENSE("GPL v2");
+10
drivers/dma/virt-dma.h
··· 81 81 */ 82 82 static inline bool vchan_issue_pending(struct virt_dma_chan *vc) 83 83 { 84 + lockdep_assert_held(&vc->lock); 85 + 84 86 list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); 85 87 return !list_empty(&vc->desc_issued); 86 88 } ··· 97 95 { 98 96 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 99 97 dma_cookie_t cookie; 98 + 99 + lockdep_assert_held(&vc->lock); 100 100 101 101 cookie = vd->tx.cookie; 102 102 dma_cookie_complete(&vd->tx); ··· 150 146 { 151 147 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 152 148 149 + lockdep_assert_held(&vc->lock); 150 + 153 151 list_add_tail(&vd->node, &vc->desc_terminated); 154 152 155 153 if (vc->cyclic == vd) ··· 166 160 */ 167 161 static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) 168 162 { 163 + lockdep_assert_held(&vc->lock); 164 + 169 165 return list_first_entry_or_null(&vc->desc_issued, 170 166 struct virt_dma_desc, node); 171 167 } ··· 185 177 static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 186 178 struct list_head *head) 187 179 { 180 + lockdep_assert_held(&vc->lock); 181 + 188 182 list_splice_tail_init(&vc->desc_allocated, head); 189 183 list_splice_tail_init(&vc->desc_submitted, head); 190 184 list_splice_tail_init(&vc->desc_issued, head);
+1
drivers/dma/xilinx/xdma.c
··· 1307 1307 { "xdma", 0}, 1308 1308 { }, 1309 1309 }; 1310 + MODULE_DEVICE_TABLE(platform, xdma_id_table); 1310 1311 1311 1312 static struct platform_driver xdma_driver = { 1312 1313 .driver = {
+4 -6
drivers/dma/xilinx/xilinx_dpdma.c
··· 1043 1043 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) 1044 1044 { 1045 1045 struct xilinx_dpdma_tx_desc *active; 1046 - unsigned long flags; 1047 1046 1048 - spin_lock_irqsave(&chan->lock, flags); 1047 + spin_lock(&chan->lock); 1049 1048 1050 1049 xilinx_dpdma_debugfs_desc_done_irq(chan); 1051 1050 ··· 1056 1057 "chan%u: DONE IRQ with no active descriptor!\n", 1057 1058 chan->id); 1058 1059 1059 - spin_unlock_irqrestore(&chan->lock, flags); 1060 + spin_unlock(&chan->lock); 1060 1061 } 1061 1062 1062 1063 /** ··· 1071 1072 { 1072 1073 struct xilinx_dpdma_tx_desc *pending; 1073 1074 struct xilinx_dpdma_sw_desc *sw_desc; 1074 - unsigned long flags; 1075 1075 u32 desc_id; 1076 1076 1077 - spin_lock_irqsave(&chan->lock, flags); 1077 + spin_lock(&chan->lock); 1078 1078 1079 1079 pending = chan->desc.pending; 1080 1080 if (!chan->running || !pending) ··· 1106 1108 spin_unlock(&chan->vchan.lock); 1107 1109 1108 1110 out: 1109 - spin_unlock_irqrestore(&chan->lock, flags); 1111 + spin_unlock(&chan->lock); 1110 1112 } 1111 1113 1112 1114 /**
+1
include/linux/dma/imx-dma.h
··· 41 41 IMX_DMATYPE_SAI, /* SAI */ 42 42 IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */ 43 43 IMX_DMATYPE_HDMI, /* HDMI Audio */ 44 + IMX_DMATYPE_I2C, /* I2C */ 44 45 }; 45 46 46 47 enum imx_dma_prio {