Merge tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
"A bunch of driver fixes for:

- dma mask fix for mmp pdma driver

- Xilinx regmap max register, uninitialized addr_width fix

- device leak fix for bunch of drivers in the subsystem

- stm32 dmamux, TI crossbar driver fixes for device & of node leak
and route allocation cleanup

- Tegra use afer free fix

- Memory leak fix in Qualcomm gpi and omap-dma driver

- compatible fix for apple driver"

* tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits)
dmaengine: apple-admac: Add "apple,t8103-admac" compatible
dmaengine: omap-dma: fix dma_pool resource leak in error paths
dmaengine: qcom: gpi: Fix memory leak in gpi_peripheral_config()
dmaengine: sh: rz-dmac: Fix rz_dmac_terminate_all()
dmaengine: xilinx_dma: Fix uninitialized addr_width when "xlnx,addrwidth" property is missing
dmaengine: tegra-adma: Fix use-after-free
dmaengine: fsl-edma: Fix clk leak on alloc_chan_resources failure
dmaengine: mmp_pdma: Fix race condition in mmp_pdma_residue()
dmaengine: ti: k3-udma: fix device leak on udma lookup
dmaengine: ti: dma-crossbar: clean up dra7x route allocation error paths
dmaengine: ti: dma-crossbar: fix device leak on am335x route allocation
dmaengine: ti: dma-crossbar: fix device leak on dra7x route allocation
dmaengine: stm32: dmamux: clean up route allocation error labels
dmaengine: stm32: dmamux: fix OF node leak on route allocation failure
dmaengine: stm32: dmamux: fix device leak on route allocation
dmaengine: sh: rz-dmac: fix device leak on probe failure
dmaengine: lpc32xx-dmamux: fix device leak on route allocation
dmaengine: lpc18xx-dmamux: fix device leak on route allocation
dmaengine: idxd: fix device leaks on compat bind and unbind
dmaengine: dw: dmamux: fix OF node leak on route allocation failure
...

+168 -73
+1
drivers/dma/apple-admac.c
··· 936 } 937 938 static const struct of_device_id admac_of_match[] = { 939 { .compatible = "apple,admac", }, 940 { } 941 };
··· 936 } 937 938 static const struct of_device_id admac_of_match[] = { 939 + { .compatible = "apple,t8103-admac", }, 940 { .compatible = "apple,admac", }, 941 { } 942 };
+7 -2
drivers/dma/at_hdmac.c
··· 1765 static void atc_free_chan_resources(struct dma_chan *chan) 1766 { 1767 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1768 1769 BUG_ON(atc_chan_is_enabled(atchan)); 1770 ··· 1775 /* 1776 * Free atslave allocated in at_dma_xlate() 1777 */ 1778 - kfree(chan->private); 1779 - chan->private = NULL; 1780 1781 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1782 }
··· 1765 static void atc_free_chan_resources(struct dma_chan *chan) 1766 { 1767 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1768 + struct at_dma_slave *atslave; 1769 1770 BUG_ON(atc_chan_is_enabled(atchan)); 1771 ··· 1774 /* 1775 * Free atslave allocated in at_dma_xlate() 1776 */ 1777 + atslave = chan->private; 1778 + if (atslave) { 1779 + put_device(atslave->dma_dev); 1780 + kfree(atslave); 1781 + chan->private = NULL; 1782 + } 1783 1784 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1785 }
+5 -1
drivers/dma/bcm-sba-raid.c
··· 1699 /* Prealloc channel resource */ 1700 ret = sba_prealloc_channel_resources(sba); 1701 if (ret) 1702 - goto fail_free_mchan; 1703 1704 /* Check availability of debugfs */ 1705 if (!debugfs_initialized()) ··· 1729 fail_free_resources: 1730 debugfs_remove_recursive(sba->root); 1731 sba_freeup_channel_resources(sba); 1732 fail_free_mchan: 1733 mbox_free_channel(sba->mchan); 1734 return ret; ··· 1745 debugfs_remove_recursive(sba->root); 1746 1747 sba_freeup_channel_resources(sba); 1748 1749 mbox_free_channel(sba->mchan); 1750 }
··· 1699 /* Prealloc channel resource */ 1700 ret = sba_prealloc_channel_resources(sba); 1701 if (ret) 1702 + goto fail_put_mbox; 1703 1704 /* Check availability of debugfs */ 1705 if (!debugfs_initialized()) ··· 1729 fail_free_resources: 1730 debugfs_remove_recursive(sba->root); 1731 sba_freeup_channel_resources(sba); 1732 + fail_put_mbox: 1733 + put_device(sba->mbox_dev); 1734 fail_free_mchan: 1735 mbox_free_channel(sba->mchan); 1736 return ret; ··· 1743 debugfs_remove_recursive(sba->root); 1744 1745 sba_freeup_channel_resources(sba); 1746 + 1747 + put_device(sba->mbox_dev); 1748 1749 mbox_free_channel(sba->mchan); 1750 }
+10 -7
drivers/dma/cv1800b-dmamux.c
··· 102 struct llist_node *node; 103 unsigned long flags; 104 unsigned int chid, devid, cpuid; 105 - int ret; 106 107 if (dma_spec->args_count != DMAMUX_NCELLS) { 108 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 109 - return ERR_PTR(-EINVAL); 110 } 111 112 devid = dma_spec->args[0]; ··· 115 116 if (devid > MAX_DMA_MAPPING_ID) { 117 dev_err(&pdev->dev, "invalid device id: %u\n", devid); 118 - return ERR_PTR(-EINVAL); 119 } 120 121 if (cpuid > MAX_DMA_CPU_ID) { 122 dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid); 123 - return ERR_PTR(-EINVAL); 124 } 125 126 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 127 if (!dma_spec->np) { 128 dev_err(&pdev->dev, "can't get dma master\n"); 129 - return ERR_PTR(-EINVAL); 130 } 131 132 spin_lock_irqsave(&dmamux->lock, flags); ··· 136 if (map->peripheral == devid && map->cpu == cpuid) 137 goto found; 138 } 139 - 140 - ret = -EINVAL; 141 goto failed; 142 } else { 143 node = llist_del_first(&dmamux->free_maps); ··· 169 dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n", 170 chid, devid, cpuid); 171 172 return map; 173 174 failed: 175 spin_unlock_irqrestore(&dmamux->lock, flags); 176 of_node_put(dma_spec->np); 177 dev_err(&pdev->dev, "errno %d\n", ret); 178 return ERR_PTR(ret); 179 } 180
··· 102 struct llist_node *node; 103 unsigned long flags; 104 unsigned int chid, devid, cpuid; 105 + int ret = -EINVAL; 106 107 if (dma_spec->args_count != DMAMUX_NCELLS) { 108 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 109 + goto err_put_pdev; 110 } 111 112 devid = dma_spec->args[0]; ··· 115 116 if (devid > MAX_DMA_MAPPING_ID) { 117 dev_err(&pdev->dev, "invalid device id: %u\n", devid); 118 + goto err_put_pdev; 119 } 120 121 if (cpuid > MAX_DMA_CPU_ID) { 122 dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid); 123 + goto err_put_pdev; 124 } 125 126 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 127 if (!dma_spec->np) { 128 dev_err(&pdev->dev, "can't get dma master\n"); 129 + goto err_put_pdev; 130 } 131 132 spin_lock_irqsave(&dmamux->lock, flags); ··· 136 if (map->peripheral == devid && map->cpu == cpuid) 137 goto found; 138 } 139 goto failed; 140 } else { 141 node = llist_del_first(&dmamux->free_maps); ··· 171 dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n", 172 chid, devid, cpuid); 173 174 + put_device(&pdev->dev); 175 + 176 return map; 177 178 failed: 179 spin_unlock_irqrestore(&dmamux->lock, flags); 180 of_node_put(dma_spec->np); 181 dev_err(&pdev->dev, "errno %d\n", ret); 182 + err_put_pdev: 183 + put_device(&pdev->dev); 184 + 185 return ERR_PTR(ret); 186 } 187
+3 -1
drivers/dma/dw/rzn1-dmamux.c
··· 90 91 if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { 92 ret = -EBUSY; 93 - goto free_map; 94 } 95 96 mask = BIT(map->req_idx); ··· 103 104 clear_bitmap: 105 clear_bit(map->req_idx, dmamux->used_chans); 106 free_map: 107 kfree(map); 108 put_device:
··· 90 91 if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { 92 ret = -EBUSY; 93 + goto put_dma_spec_np; 94 } 95 96 mask = BIT(map->req_idx); ··· 103 104 clear_bitmap: 105 clear_bit(map->req_idx, dmamux->used_chans); 106 + put_dma_spec_np: 107 + of_node_put(dma_spec->np); 108 free_map: 109 kfree(map); 110 put_device:
+1
drivers/dma/fsl-edma-common.c
··· 873 free_irq(fsl_chan->txirq, fsl_chan); 874 err_txirq: 875 dma_pool_destroy(fsl_chan->tcd_pool); 876 877 return ret; 878 }
··· 873 free_irq(fsl_chan->txirq, fsl_chan); 874 err_txirq: 875 dma_pool_destroy(fsl_chan->tcd_pool); 876 + clk_disable_unprepare(fsl_chan->clk); 877 878 return ret; 879 }
+19 -4
drivers/dma/idxd/compat.c
··· 20 int rc = -ENODEV; 21 22 dev = bus_find_device_by_name(bus, NULL, buf); 23 - if (dev && dev->driver) { 24 device_driver_detach(dev); 25 rc = count; 26 } 27 28 return rc; 29 } ··· 43 struct idxd_dev *idxd_dev; 44 45 dev = bus_find_device_by_name(bus, NULL, buf); 46 - if (!dev || dev->driver || drv != &dsa_drv.drv) 47 return -ENODEV; 48 49 idxd_dev = confdev_to_idxd_dev(dev); 50 if (is_idxd_dev(idxd_dev)) { ··· 61 alt_drv = driver_find("user", bus); 62 } 63 if (!alt_drv) 64 - return -ENODEV; 65 66 rc = device_driver_attach(alt_drv, dev); 67 if (rc < 0) 68 - return rc; 69 70 return count; 71 } 72 static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); 73
··· 20 int rc = -ENODEV; 21 22 dev = bus_find_device_by_name(bus, NULL, buf); 23 + if (!dev) 24 + return -ENODEV; 25 + 26 + if (dev->driver) { 27 device_driver_detach(dev); 28 rc = count; 29 } 30 + 31 + put_device(dev); 32 33 return rc; 34 } ··· 38 struct idxd_dev *idxd_dev; 39 40 dev = bus_find_device_by_name(bus, NULL, buf); 41 + if (!dev) 42 return -ENODEV; 43 + 44 + if (dev->driver || drv != &dsa_drv.drv) 45 + goto err_put_dev; 46 47 idxd_dev = confdev_to_idxd_dev(dev); 48 if (is_idxd_dev(idxd_dev)) { ··· 53 alt_drv = driver_find("user", bus); 54 } 55 if (!alt_drv) 56 + goto err_put_dev; 57 58 rc = device_driver_attach(alt_drv, dev); 59 if (rc < 0) 60 + goto err_put_dev; 61 + 62 + put_device(dev); 63 64 return count; 65 + 66 + err_put_dev: 67 + put_device(dev); 68 + 69 + return rc; 70 } 71 static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); 72
+14 -5
drivers/dma/lpc18xx-dmamux.c
··· 57 struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev); 58 unsigned long flags; 59 unsigned mux; 60 61 if (dma_spec->args_count != 3) { 62 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 63 - return ERR_PTR(-EINVAL); 64 } 65 66 mux = dma_spec->args[0]; 67 if (mux >= dmamux->dma_master_requests) { 68 dev_err(&pdev->dev, "invalid mux number: %d\n", 69 dma_spec->args[0]); 70 - return ERR_PTR(-EINVAL); 71 } 72 73 if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) { 74 dev_err(&pdev->dev, "invalid dma mux value: %d\n", 75 dma_spec->args[1]); 76 - return ERR_PTR(-EINVAL); 77 } 78 79 /* The of_node_put() will be done in the core for the node */ 80 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 81 if (!dma_spec->np) { 82 dev_err(&pdev->dev, "can't get dma master\n"); 83 - return ERR_PTR(-EINVAL); 84 } 85 86 spin_lock_irqsave(&dmamux->lock, flags); ··· 90 dev_err(&pdev->dev, "dma request %u busy with %u.%u\n", 91 mux, mux, dmamux->muxes[mux].value); 92 of_node_put(dma_spec->np); 93 - return ERR_PTR(-EBUSY); 94 } 95 96 dmamux->muxes[mux].busy = true; ··· 108 dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux, 109 dmamux->muxes[mux].value, mux); 110 111 return &dmamux->muxes[mux]; 112 } 113 114 static int lpc18xx_dmamux_probe(struct platform_device *pdev)
··· 57 struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev); 58 unsigned long flags; 59 unsigned mux; 60 + int ret = -EINVAL; 61 62 if (dma_spec->args_count != 3) { 63 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 64 + goto err_put_pdev; 65 } 66 67 mux = dma_spec->args[0]; 68 if (mux >= dmamux->dma_master_requests) { 69 dev_err(&pdev->dev, "invalid mux number: %d\n", 70 dma_spec->args[0]); 71 + goto err_put_pdev; 72 } 73 74 if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) { 75 dev_err(&pdev->dev, "invalid dma mux value: %d\n", 76 dma_spec->args[1]); 77 + goto err_put_pdev; 78 } 79 80 /* The of_node_put() will be done in the core for the node */ 81 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 82 if (!dma_spec->np) { 83 dev_err(&pdev->dev, "can't get dma master\n"); 84 + goto err_put_pdev; 85 } 86 87 spin_lock_irqsave(&dmamux->lock, flags); ··· 89 dev_err(&pdev->dev, "dma request %u busy with %u.%u\n", 90 mux, mux, dmamux->muxes[mux].value); 91 of_node_put(dma_spec->np); 92 + ret = -EBUSY; 93 + goto err_put_pdev; 94 } 95 96 dmamux->muxes[mux].busy = true; ··· 106 dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux, 107 dmamux->muxes[mux].value, mux); 108 109 + put_device(&pdev->dev); 110 + 111 return &dmamux->muxes[mux]; 112 + 113 + err_put_pdev: 114 + put_device(&pdev->dev); 115 + 116 + return ERR_PTR(ret); 117 } 118 119 static int lpc18xx_dmamux_probe(struct platform_device *pdev)
+14 -5
drivers/dma/lpc32xx-dmamux.c
··· 95 struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev); 96 unsigned long flags; 97 struct lpc32xx_dmamux *mux = NULL; 98 int i; 99 100 if (dma_spec->args_count != 3) { 101 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 102 - return ERR_PTR(-EINVAL); 103 } 104 105 for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) { ··· 112 if (!mux) { 113 dev_err(&pdev->dev, "invalid mux request number: %d\n", 114 dma_spec->args[0]); 115 - return ERR_PTR(-EINVAL); 116 } 117 118 if (dma_spec->args[2] > 1) { 119 dev_err(&pdev->dev, "invalid dma mux value: %d\n", 120 dma_spec->args[1]); 121 - return ERR_PTR(-EINVAL); 122 } 123 124 /* The of_node_put() will be done in the core for the node */ 125 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 126 if (!dma_spec->np) { 127 dev_err(&pdev->dev, "can't get dma master\n"); 128 - return ERR_PTR(-EINVAL); 129 } 130 131 spin_lock_irqsave(&dmamux->lock, flags); ··· 134 dev_err(dev, "dma request signal %d busy, routed to %s\n", 135 mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); 136 of_node_put(dma_spec->np); 137 - return ERR_PTR(-EBUSY); 138 } 139 140 mux->busy = true; ··· 150 dev_dbg(dev, "dma request signal %d routed to %s\n", 151 mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); 152 153 return mux; 154 } 155 156 static int lpc32xx_dmamux_probe(struct platform_device *pdev)
··· 95 struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev); 96 unsigned long flags; 97 struct lpc32xx_dmamux *mux = NULL; 98 + int ret = -EINVAL; 99 int i; 100 101 if (dma_spec->args_count != 3) { 102 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 103 + goto err_put_pdev; 104 } 105 106 for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) { ··· 111 if (!mux) { 112 dev_err(&pdev->dev, "invalid mux request number: %d\n", 113 dma_spec->args[0]); 114 + goto err_put_pdev; 115 } 116 117 if (dma_spec->args[2] > 1) { 118 dev_err(&pdev->dev, "invalid dma mux value: %d\n", 119 dma_spec->args[1]); 120 + goto err_put_pdev; 121 } 122 123 /* The of_node_put() will be done in the core for the node */ 124 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 125 if (!dma_spec->np) { 126 dev_err(&pdev->dev, "can't get dma master\n"); 127 + goto err_put_pdev; 128 } 129 130 spin_lock_irqsave(&dmamux->lock, flags); ··· 133 dev_err(dev, "dma request signal %d busy, routed to %s\n", 134 mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); 135 of_node_put(dma_spec->np); 136 + ret = -EBUSY; 137 + goto err_put_pdev; 138 } 139 140 mux->busy = true; ··· 148 dev_dbg(dev, "dma request signal %d routed to %s\n", 149 mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); 150 151 + put_device(&pdev->dev); 152 + 153 return mux; 154 + 155 + err_put_pdev: 156 + put_device(&pdev->dev); 157 + 158 + return ERR_PTR(ret); 159 } 160 161 static int lpc32xx_dmamux_probe(struct platform_device *pdev)
+14 -12
drivers/dma/mmp_pdma.c
··· 152 * 153 * Controller Configuration: 154 * @run_bits: Control bits in DCSR register for channel start/stop 155 - * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform 156 - * settings, or explicit mask like DMA_BIT_MASK(32/64) 157 */ 158 struct mmp_pdma_ops { 159 /* Hardware Register Operations */ ··· 173 174 /* Controller Configuration */ 175 u32 run_bits; 176 - u64 dma_mask; 177 }; 178 179 struct mmp_pdma_device { ··· 928 { 929 struct mmp_pdma_desc_sw *sw; 930 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); 931 u64 curr; 932 u32 residue = 0; 933 bool passed = false; ··· 945 curr = pdev->ops->read_dst_addr(chan->phy); 946 else 947 curr = pdev->ops->read_src_addr(chan->phy); 948 949 list_for_each_entry(sw, &chan->chain_running, node) { 950 u64 start, end; ··· 992 continue; 993 994 if (sw->async_tx.cookie == cookie) { 995 return residue; 996 } else { 997 residue = 0; 998 passed = false; 999 } 1000 } 1001 1002 /* We should only get here in case of cyclic transactions */ 1003 return residue; ··· 1178 .get_desc_src_addr = get_desc_src_addr_32, 1179 .get_desc_dst_addr = get_desc_dst_addr_32, 1180 .run_bits = (DCSR_RUN), 1181 - .dma_mask = 0, /* let OF/platform set DMA mask */ 1182 }; 1183 1184 static const struct mmp_pdma_ops spacemit_k1_pdma_ops = { ··· 1191 .get_desc_src_addr = get_desc_src_addr_64, 1192 .get_desc_dst_addr = get_desc_dst_addr_64, 1193 .run_bits = (DCSR_RUN | DCSR_LPAEEN), 1194 - .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */ 1195 }; 1196 1197 static const struct of_device_id mmp_pdma_dt_ids[] = { ··· 1320 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1321 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1322 1323 - /* Set DMA mask based on ops->dma_mask, or OF/platform */ 1324 - if (pdev->ops->dma_mask) 1325 - dma_set_mask(pdev->dev, pdev->ops->dma_mask); 1326 - else if (pdev->dev->coherent_dma_mask) 1327 - dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1328 - else 1329 - dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); 1330 1331 ret = dma_async_device_register(&pdev->device); 1332 if (ret) {
··· 152 * 153 * Controller Configuration: 154 * @run_bits: Control bits in DCSR register for channel start/stop 155 + * @dma_width: DMA addressing width in bits (32 or 64). Determines the 156 + * DMA mask capability of the controller hardware. 157 */ 158 struct mmp_pdma_ops { 159 /* Hardware Register Operations */ ··· 173 174 /* Controller Configuration */ 175 u32 run_bits; 176 + u32 dma_width; 177 }; 178 179 struct mmp_pdma_device { ··· 928 { 929 struct mmp_pdma_desc_sw *sw; 930 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); 931 + unsigned long flags; 932 u64 curr; 933 u32 residue = 0; 934 bool passed = false; ··· 944 curr = pdev->ops->read_dst_addr(chan->phy); 945 else 946 curr = pdev->ops->read_src_addr(chan->phy); 947 + 948 + spin_lock_irqsave(&chan->desc_lock, flags); 949 950 list_for_each_entry(sw, &chan->chain_running, node) { 951 u64 start, end; ··· 989 continue; 990 991 if (sw->async_tx.cookie == cookie) { 992 + spin_unlock_irqrestore(&chan->desc_lock, flags); 993 return residue; 994 } else { 995 residue = 0; 996 passed = false; 997 } 998 } 999 + 1000 + spin_unlock_irqrestore(&chan->desc_lock, flags); 1001 1002 /* We should only get here in case of cyclic transactions */ 1003 return residue; ··· 1172 .get_desc_src_addr = get_desc_src_addr_32, 1173 .get_desc_dst_addr = get_desc_dst_addr_32, 1174 .run_bits = (DCSR_RUN), 1175 + .dma_width = 32, 1176 }; 1177 1178 static const struct mmp_pdma_ops spacemit_k1_pdma_ops = { ··· 1185 .get_desc_src_addr = get_desc_src_addr_64, 1186 .get_desc_dst_addr = get_desc_dst_addr_64, 1187 .run_bits = (DCSR_RUN | DCSR_LPAEEN), 1188 + .dma_width = 64, 1189 }; 1190 1191 static const struct of_device_id mmp_pdma_dt_ids[] = { ··· 1314 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1315 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1316 1317 + /* Set DMA mask based on controller hardware capabilities */ 1318 + dma_set_mask_and_coherent(pdev->dev, 1319 + DMA_BIT_MASK(pdev->ops->dma_width)); 1320 1321 ret = dma_async_device_register(&pdev->device); 1322 if (ret) {
+4 -2
drivers/dma/qcom/gpi.c
··· 1605 gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config) 1606 { 1607 struct gchan *gchan = to_gchan(chan); 1608 1609 if (!config->peripheral_config) 1610 return -EINVAL; 1611 1612 - gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT); 1613 - if (!gchan->config) 1614 return -ENOMEM; 1615 1616 memcpy(gchan->config, config->peripheral_config, config->peripheral_size); 1617 1618 return 0;
··· 1605 gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config) 1606 { 1607 struct gchan *gchan = to_gchan(chan); 1608 + void *new_config; 1609 1610 if (!config->peripheral_config) 1611 return -EINVAL; 1612 1613 + new_config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT); 1614 + if (!new_config) 1615 return -ENOMEM; 1616 1617 + gchan->config = new_config; 1618 memcpy(gchan->config, config->peripheral_config, config->peripheral_size); 1619 1620 return 0;
+16 -2
drivers/dma/sh/rz-dmac.c
··· 557 static int rz_dmac_terminate_all(struct dma_chan *chan) 558 { 559 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 560 unsigned long flags; 561 LIST_HEAD(head); 562 563 rz_dmac_disable_hw(channel); 564 spin_lock_irqsave(&channel->vc.lock, flags); 565 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 566 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 567 vchan_get_all_descriptors(&channel->vc, &head); ··· 859 return 0; 860 } 861 862 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac) 863 { 864 struct device_node *np = dev->of_node; ··· 887 dev_err(dev, "ICU device not found.\n"); 888 return -ENODEV; 889 } 890 891 dmac_index = args.args[0]; 892 if (dmac_index > RZV2H_MAX_DMAC_INDEX) { ··· 1071 reset_control_assert(dmac->rstc); 1072 pm_runtime_put(&pdev->dev); 1073 pm_runtime_disable(&pdev->dev); 1074 - 1075 - platform_device_put(dmac->icu.pdev); 1076 } 1077 1078 static const struct of_device_id of_rz_dmac_match[] = {
··· 557 static int rz_dmac_terminate_all(struct dma_chan *chan) 558 { 559 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 560 + struct rz_lmdesc *lmdesc = channel->lmdesc.base; 561 unsigned long flags; 562 + unsigned int i; 563 LIST_HEAD(head); 564 565 rz_dmac_disable_hw(channel); 566 spin_lock_irqsave(&channel->vc.lock, flags); 567 + for (i = 0; i < DMAC_NR_LMDESC; i++) 568 + lmdesc[i].header = 0; 569 + 570 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 571 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 572 vchan_get_all_descriptors(&channel->vc, &head); ··· 854 return 0; 855 } 856 857 + static void rz_dmac_put_device(void *_dev) 858 + { 859 + struct device *dev = _dev; 860 + 861 + put_device(dev); 862 + } 863 + 864 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac) 865 { 866 struct device_node *np = dev->of_node; ··· 875 dev_err(dev, "ICU device not found.\n"); 876 return -ENODEV; 877 } 878 + 879 + ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev); 880 + if (ret) 881 + return ret; 882 883 dmac_index = args.args[0]; 884 if (dmac_index > RZV2H_MAX_DMAC_INDEX) { ··· 1055 reset_control_assert(dmac->rstc); 1056 pm_runtime_put(&pdev->dev); 1057 pm_runtime_disable(&pdev->dev); 1058 } 1059 1060 static const struct of_device_id of_rz_dmac_match[] = {
+19 -12
drivers/dma/stm32/stm32-dmamux.c
··· 90 struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); 91 struct stm32_dmamux *mux; 92 u32 i, min, max; 93 - int ret; 94 unsigned long flags; 95 96 if (dma_spec->args_count != 3) { 97 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 98 - return ERR_PTR(-EINVAL); 99 } 100 101 if (dma_spec->args[0] > dmamux->dmamux_requests) { 102 dev_err(&pdev->dev, "invalid mux request number: %d\n", 103 dma_spec->args[0]); 104 - return ERR_PTR(-EINVAL); 105 } 106 107 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 108 - if (!mux) 109 - return ERR_PTR(-ENOMEM); 110 111 spin_lock_irqsave(&dmamux->lock, flags); 112 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, ··· 118 spin_unlock_irqrestore(&dmamux->lock, flags); 119 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 120 ret = -ENOMEM; 121 - goto error_chan_id; 122 } 123 set_bit(mux->chan_id, dmamux->dma_inuse); 124 spin_unlock_irqrestore(&dmamux->lock, flags); ··· 135 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); 136 if (!dma_spec->np) { 137 dev_err(&pdev->dev, "can't get dma master\n"); 138 - ret = -EINVAL; 139 - goto error; 140 } 141 142 /* Set dma request */ ··· 143 ret = pm_runtime_resume_and_get(&pdev->dev); 144 if (ret < 0) { 145 spin_unlock_irqrestore(&dmamux->lock, flags); 146 - goto error; 147 } 148 spin_unlock_irqrestore(&dmamux->lock, flags); 149 ··· 161 dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", 162 mux->request, mux->master, mux->chan_id); 163 164 return mux; 165 166 - error: 167 clear_bit(mux->chan_id, dmamux->dma_inuse); 168 - 169 - error_chan_id: 170 kfree(mux); 171 return ERR_PTR(ret); 172 } 173
··· 90 struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); 91 struct stm32_dmamux *mux; 92 u32 i, min, max; 93 + int ret = -EINVAL; 94 unsigned long flags; 95 96 if (dma_spec->args_count != 3) { 97 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 98 + goto err_put_pdev; 99 } 100 101 if (dma_spec->args[0] > dmamux->dmamux_requests) { 102 dev_err(&pdev->dev, "invalid mux request number: %d\n", 103 dma_spec->args[0]); 104 + goto err_put_pdev; 105 } 106 107 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 108 + if (!mux) { 109 + ret = -ENOMEM; 110 + goto err_put_pdev; 111 + } 112 113 spin_lock_irqsave(&dmamux->lock, flags); 114 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, ··· 116 spin_unlock_irqrestore(&dmamux->lock, flags); 117 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 118 ret = -ENOMEM; 119 + goto err_free_mux; 120 } 121 set_bit(mux->chan_id, dmamux->dma_inuse); 122 spin_unlock_irqrestore(&dmamux->lock, flags); ··· 133 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); 134 if (!dma_spec->np) { 135 dev_err(&pdev->dev, "can't get dma master\n"); 136 + goto err_clear_inuse; 137 } 138 139 /* Set dma request */ ··· 142 ret = pm_runtime_resume_and_get(&pdev->dev); 143 if (ret < 0) { 144 spin_unlock_irqrestore(&dmamux->lock, flags); 145 + goto err_put_dma_spec_np; 146 } 147 spin_unlock_irqrestore(&dmamux->lock, flags); 148 ··· 160 dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", 161 mux->request, mux->master, mux->chan_id); 162 163 + put_device(&pdev->dev); 164 + 165 return mux; 166 167 + err_put_dma_spec_np: 168 + of_node_put(dma_spec->np); 169 + err_clear_inuse: 170 clear_bit(mux->chan_id, dmamux->dma_inuse); 171 + err_free_mux: 172 kfree(mux); 173 + err_put_pdev: 174 + put_device(&pdev->dev); 175 + 176 return ERR_PTR(ret); 177 } 178
+9 -1
drivers/dma/tegra210-adma.c
··· 429 return; 430 } 431 432 - kfree(tdc->desc); 433 tdc->desc = NULL; 434 } 435 436 static void tegra_adma_start(struct tegra_adma_chan *tdc) ··· 1162 tdma->dma_dev.device_config = tegra_adma_slave_config; 1163 tdma->dma_dev.device_tx_status = tegra_adma_tx_status; 1164 tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; 1165 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1166 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1167 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
··· 429 return; 430 } 431 432 + vchan_terminate_vdesc(&tdc->desc->vd); 433 tdc->desc = NULL; 434 + } 435 + 436 + static void tegra_adma_synchronize(struct dma_chan *dc) 437 + { 438 + struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 439 + 440 + vchan_synchronize(&tdc->vc); 441 } 442 443 static void tegra_adma_start(struct tegra_adma_chan *tdc) ··· 1155 tdma->dma_dev.device_config = tegra_adma_slave_config; 1156 tdma->dma_dev.device_tx_status = tegra_adma_tx_status; 1157 tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; 1158 + tdma->dma_dev.device_synchronize = tegra_adma_synchronize; 1159 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1160 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1161 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+20 -15
drivers/dma/ti/dma-crossbar.c
··· 79 { 80 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 81 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); 82 - struct ti_am335x_xbar_map *map; 83 84 if (dma_spec->args_count != 3) 85 - return ERR_PTR(-EINVAL); 86 87 if (dma_spec->args[2] >= xbar->xbar_events) { 88 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", 89 dma_spec->args[2]); 90 - return ERR_PTR(-EINVAL); 91 } 92 93 if (dma_spec->args[0] >= xbar->dma_requests) { 94 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", 95 dma_spec->args[0]); 96 - return ERR_PTR(-EINVAL); 97 } 98 99 /* The of_node_put() will be done in the core for the node */ 100 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 101 if (!dma_spec->np) { 102 dev_err(&pdev->dev, "Can't get DMA master\n"); 103 - return ERR_PTR(-EINVAL); 104 } 105 106 map = kzalloc(sizeof(*map), GFP_KERNEL); 107 if (!map) { 108 of_node_put(dma_spec->np); 109 - return ERR_PTR(-ENOMEM); 110 } 111 112 map->dma_line = (u16)dma_spec->args[0]; ··· 120 map->mux_val, map->dma_line); 121 122 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); 123 124 return map; 125 } ··· 245 { 246 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 247 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); 248 - struct ti_dra7_xbar_map *map; 249 250 if (dma_spec->args[0] >= xbar->xbar_requests) { 251 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", 252 dma_spec->args[0]); 253 - put_device(&pdev->dev); 254 - return ERR_PTR(-EINVAL); 255 } 256 257 /* The of_node_put() will be done in the core for the node */ 258 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 259 if (!dma_spec->np) { 260 dev_err(&pdev->dev, "Can't get DMA master\n"); 261 - put_device(&pdev->dev); 262 - return ERR_PTR(-EINVAL); 263 } 264 265 map = kzalloc(sizeof(*map), GFP_KERNEL); 266 if (!map) { 267 of_node_put(dma_spec->np); 268 - put_device(&pdev->dev); 269 - return ERR_PTR(-ENOMEM); 270 } 271 272 mutex_lock(&xbar->mutex); ··· 275 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 276 kfree(map); 277 of_node_put(dma_spec->np); 278 - put_device(&pdev->dev); 279 - return ERR_PTR(-ENOMEM); 280 } 281 set_bit(map->xbar_out, xbar->dma_inuse); 282 mutex_unlock(&xbar->mutex); ··· 289 map->xbar_in, map->xbar_out); 290 291 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); 292 293 return map; 294 }
··· 79 { 80 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 81 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); 82 + struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL); 83 84 if (dma_spec->args_count != 3) 85 + goto out_put_pdev; 86 87 if (dma_spec->args[2] >= xbar->xbar_events) { 88 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", 89 dma_spec->args[2]); 90 + goto out_put_pdev; 91 } 92 93 if (dma_spec->args[0] >= xbar->dma_requests) { 94 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", 95 dma_spec->args[0]); 96 + goto out_put_pdev; 97 } 98 99 /* The of_node_put() will be done in the core for the node */ 100 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 101 if (!dma_spec->np) { 102 dev_err(&pdev->dev, "Can't get DMA master\n"); 103 + goto out_put_pdev; 104 } 105 106 map = kzalloc(sizeof(*map), GFP_KERNEL); 107 if (!map) { 108 of_node_put(dma_spec->np); 109 + map = ERR_PTR(-ENOMEM); 110 + goto out_put_pdev; 111 } 112 113 map->dma_line = (u16)dma_spec->args[0]; ··· 119 map->mux_val, map->dma_line); 120 121 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); 122 + 123 + out_put_pdev: 124 + put_device(&pdev->dev); 125 126 return map; 127 } ··· 241 { 242 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 243 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); 244 + struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL); 245 246 if (dma_spec->args[0] >= xbar->xbar_requests) { 247 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", 248 dma_spec->args[0]); 249 + goto out_put_pdev; 250 } 251 252 /* The of_node_put() will be done in the core for the node */ 253 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 254 if (!dma_spec->np) { 255 dev_err(&pdev->dev, "Can't get DMA master\n"); 256 + goto out_put_pdev; 257 } 258 259 map = kzalloc(sizeof(*map), GFP_KERNEL); 260 if (!map) { 261 of_node_put(dma_spec->np); 262 + map = ERR_PTR(-ENOMEM); 263 + goto out_put_pdev; 264 } 265 266 mutex_lock(&xbar->mutex); ··· 273 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 274 kfree(map); 275 of_node_put(dma_spec->np); 276 + map = ERR_PTR(-ENOMEM); 277 + goto out_put_pdev; 278 } 279 set_bit(map->xbar_out, xbar->dma_inuse); 280 mutex_unlock(&xbar->mutex); ··· 287 map->xbar_in, map->xbar_out); 288 289 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); 290 + 291 + out_put_pdev: 292 + put_device(&pdev->dev); 293 294 return map; 295 }
+1 -1
drivers/dma/ti/k3-udma-private.c
··· 42 } 43 44 ud = platform_get_drvdata(pdev); 45 if (!ud) { 46 pr_debug("UDMA has not been probed\n"); 47 - put_device(&pdev->dev); 48 return ERR_PTR(-EPROBE_DEFER); 49 } 50
··· 42 } 43 44 ud = platform_get_drvdata(pdev); 45 + put_device(&pdev->dev); 46 if (!ud) { 47 pr_debug("UDMA has not been probed\n"); 48 return ERR_PTR(-EPROBE_DEFER); 49 } 50
+4
drivers/dma/ti/omap-dma.c
··· 1808 if (rc) { 1809 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1810 rc); 1811 omap_dma_free(od); 1812 return rc; 1813 } ··· 1825 if (rc) { 1826 pr_warn("OMAP-DMA: failed to register DMA controller\n"); 1827 dma_async_device_unregister(&od->ddev); 1828 omap_dma_free(od); 1829 } 1830 }
··· 1808 if (rc) { 1809 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1810 rc); 1811 + if (od->ll123_supported) 1812 + dma_pool_destroy(od->desc_pool); 1813 omap_dma_free(od); 1814 return rc; 1815 } ··· 1823 if (rc) { 1824 pr_warn("OMAP-DMA: failed to register DMA controller\n"); 1825 dma_async_device_unregister(&od->ddev); 1826 + if (od->ll123_supported) 1827 + dma_pool_destroy(od->desc_pool); 1828 omap_dma_free(od); 1829 } 1830 }
+1
drivers/dma/xilinx/xdma-regs.h
··· 9 10 /* The length of register space exposed to host */ 11 #define XDMA_REG_SPACE_LEN 65536 12 13 /* 14 * maximum number of DMA channels for each direction:
··· 9 10 /* The length of register space exposed to host */ 11 #define XDMA_REG_SPACE_LEN 65536 12 + #define XDMA_MAX_REG_OFFSET (XDMA_REG_SPACE_LEN - 4) 13 14 /* 15 * maximum number of DMA channels for each direction:
+1 -1
drivers/dma/xilinx/xdma.c
··· 38 .reg_bits = 32, 39 .val_bits = 32, 40 .reg_stride = 4, 41 - .max_register = XDMA_REG_SPACE_LEN, 42 }; 43 44 /**
··· 38 .reg_bits = 32, 39 .val_bits = 32, 40 .reg_stride = 4, 41 + .max_register = XDMA_MAX_REG_OFFSET, 42 }; 43 44 /**
+5 -2
drivers/dma/xilinx/xilinx_dma.c
··· 131 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 132 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 133 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 134 135 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 136 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ ··· 3160 struct device_node *node = pdev->dev.of_node; 3161 struct xilinx_dma_device *xdev; 3162 struct device_node *child, *np = pdev->dev.of_node; 3163 - u32 num_frames, addr_width, len_width; 3164 int i, err; 3165 3166 /* Allocate and initialize the DMA engine structure */ ··· 3236 3237 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 3238 if (err < 0) 3239 - dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 3240 3241 if (addr_width > 32) 3242 xdev->ext_addr = true;
··· 131 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 132 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 133 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 134 + #define XILINX_DMA_DFAULT_ADDRWIDTH 0x20 135 136 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 137 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ ··· 3159 struct device_node *node = pdev->dev.of_node; 3160 struct xilinx_dma_device *xdev; 3161 struct device_node *child, *np = pdev->dev.of_node; 3162 + u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width; 3163 int i, err; 3164 3165 /* Allocate and initialize the DMA engine structure */ ··· 3235 3236 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 3237 if (err < 0) 3238 + dev_warn(xdev->dev, 3239 + "missing xlnx,addrwidth property, using default value %d\n", 3240 + XILINX_DMA_DFAULT_ADDRWIDTH); 3241 3242 if (addr_width > 32) 3243 xdev->ext_addr = true;