Merge tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
"A bunch of driver fixes for:

- dma mask fix for mmp pdma driver

- Xilinx regmap max register, uninitialized addr_width fix

- device leak fix for bunch of drivers in the subsystem

- stm32 dmamux, TI crossbar driver fixes for device & of node leak
and route allocation cleanup

- Tegra use afer free fix

- Memory leak fix in Qualcomm gpi and omap-dma driver

- compatible fix for apple driver"

* tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits)
dmaengine: apple-admac: Add "apple,t8103-admac" compatible
dmaengine: omap-dma: fix dma_pool resource leak in error paths
dmaengine: qcom: gpi: Fix memory leak in gpi_peripheral_config()
dmaengine: sh: rz-dmac: Fix rz_dmac_terminate_all()
dmaengine: xilinx_dma: Fix uninitialized addr_width when "xlnx,addrwidth" property is missing
dmaengine: tegra-adma: Fix use-after-free
dmaengine: fsl-edma: Fix clk leak on alloc_chan_resources failure
dmaengine: mmp_pdma: Fix race condition in mmp_pdma_residue()
dmaengine: ti: k3-udma: fix device leak on udma lookup
dmaengine: ti: dma-crossbar: clean up dra7x route allocation error paths
dmaengine: ti: dma-crossbar: fix device leak on am335x route allocation
dmaengine: ti: dma-crossbar: fix device leak on dra7x route allocation
dmaengine: stm32: dmamux: clean up route allocation error labels
dmaengine: stm32: dmamux: fix OF node leak on route allocation failure
dmaengine: stm32: dmamux: fix device leak on route allocation
dmaengine: sh: rz-dmac: fix device leak on probe failure
dmaengine: lpc32xx-dmamux: fix device leak on route allocation
dmaengine: lpc18xx-dmamux: fix device leak on route allocation
dmaengine: idxd: fix device leaks on compat bind and unbind
dmaengine: dw: dmamux: fix OF node leak on route allocation failure
...

+168 -73
+1
drivers/dma/apple-admac.c
··· 936 936 } 937 937 938 938 static const struct of_device_id admac_of_match[] = { 939 + { .compatible = "apple,t8103-admac", }, 939 940 { .compatible = "apple,admac", }, 940 941 { } 941 942 };
+7 -2
drivers/dma/at_hdmac.c
··· 1765 1765 static void atc_free_chan_resources(struct dma_chan *chan) 1766 1766 { 1767 1767 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1768 + struct at_dma_slave *atslave; 1768 1769 1769 1770 BUG_ON(atc_chan_is_enabled(atchan)); 1770 1771 ··· 1775 1774 /* 1776 1775 * Free atslave allocated in at_dma_xlate() 1777 1776 */ 1778 - kfree(chan->private); 1779 - chan->private = NULL; 1777 + atslave = chan->private; 1778 + if (atslave) { 1779 + put_device(atslave->dma_dev); 1780 + kfree(atslave); 1781 + chan->private = NULL; 1782 + } 1780 1783 1781 1784 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1782 1785 }
+5 -1
drivers/dma/bcm-sba-raid.c
··· 1699 1699 /* Prealloc channel resource */ 1700 1700 ret = sba_prealloc_channel_resources(sba); 1701 1701 if (ret) 1702 - goto fail_free_mchan; 1702 + goto fail_put_mbox; 1703 1703 1704 1704 /* Check availability of debugfs */ 1705 1705 if (!debugfs_initialized()) ··· 1729 1729 fail_free_resources: 1730 1730 debugfs_remove_recursive(sba->root); 1731 1731 sba_freeup_channel_resources(sba); 1732 + fail_put_mbox: 1733 + put_device(sba->mbox_dev); 1732 1734 fail_free_mchan: 1733 1735 mbox_free_channel(sba->mchan); 1734 1736 return ret; ··· 1745 1743 debugfs_remove_recursive(sba->root); 1746 1744 1747 1745 sba_freeup_channel_resources(sba); 1746 + 1747 + put_device(sba->mbox_dev); 1748 1748 1749 1749 mbox_free_channel(sba->mchan); 1750 1750 }
+10 -7
drivers/dma/cv1800b-dmamux.c
··· 102 102 struct llist_node *node; 103 103 unsigned long flags; 104 104 unsigned int chid, devid, cpuid; 105 - int ret; 105 + int ret = -EINVAL; 106 106 107 107 if (dma_spec->args_count != DMAMUX_NCELLS) { 108 108 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 109 - return ERR_PTR(-EINVAL); 109 + goto err_put_pdev; 110 110 } 111 111 112 112 devid = dma_spec->args[0]; ··· 115 115 116 116 if (devid > MAX_DMA_MAPPING_ID) { 117 117 dev_err(&pdev->dev, "invalid device id: %u\n", devid); 118 - return ERR_PTR(-EINVAL); 118 + goto err_put_pdev; 119 119 } 120 120 121 121 if (cpuid > MAX_DMA_CPU_ID) { 122 122 dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid); 123 - return ERR_PTR(-EINVAL); 123 + goto err_put_pdev; 124 124 } 125 125 126 126 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 127 127 if (!dma_spec->np) { 128 128 dev_err(&pdev->dev, "can't get dma master\n"); 129 - return ERR_PTR(-EINVAL); 129 + goto err_put_pdev; 130 130 } 131 131 132 132 spin_lock_irqsave(&dmamux->lock, flags); ··· 136 136 if (map->peripheral == devid && map->cpu == cpuid) 137 137 goto found; 138 138 } 139 - 140 - ret = -EINVAL; 141 139 goto failed; 142 140 } else { 143 141 node = llist_del_first(&dmamux->free_maps); ··· 169 171 dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n", 170 172 chid, devid, cpuid); 171 173 174 + put_device(&pdev->dev); 175 + 172 176 return map; 173 177 174 178 failed: 175 179 spin_unlock_irqrestore(&dmamux->lock, flags); 176 180 of_node_put(dma_spec->np); 177 181 dev_err(&pdev->dev, "errno %d\n", ret); 182 + err_put_pdev: 183 + put_device(&pdev->dev); 184 + 178 185 return ERR_PTR(ret); 179 186 } 180 187
+3 -1
drivers/dma/dw/rzn1-dmamux.c
··· 90 90 91 91 if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { 92 92 ret = -EBUSY; 93 - goto free_map; 93 + goto put_dma_spec_np; 94 94 } 95 95 96 96 mask = BIT(map->req_idx); ··· 103 103 104 104 clear_bitmap: 105 105 clear_bit(map->req_idx, dmamux->used_chans); 106 + put_dma_spec_np: 107 + of_node_put(dma_spec->np); 106 108 free_map: 107 109 kfree(map); 108 110 put_device:
+1
drivers/dma/fsl-edma-common.c
··· 873 873 free_irq(fsl_chan->txirq, fsl_chan); 874 874 err_txirq: 875 875 dma_pool_destroy(fsl_chan->tcd_pool); 876 + clk_disable_unprepare(fsl_chan->clk); 876 877 877 878 return ret; 878 879 }
+19 -4
drivers/dma/idxd/compat.c
··· 20 20 int rc = -ENODEV; 21 21 22 22 dev = bus_find_device_by_name(bus, NULL, buf); 23 - if (dev && dev->driver) { 23 + if (!dev) 24 + return -ENODEV; 25 + 26 + if (dev->driver) { 24 27 device_driver_detach(dev); 25 28 rc = count; 26 29 } 30 + 31 + put_device(dev); 27 32 28 33 return rc; 29 34 } ··· 43 38 struct idxd_dev *idxd_dev; 44 39 45 40 dev = bus_find_device_by_name(bus, NULL, buf); 46 - if (!dev || dev->driver || drv != &dsa_drv.drv) 41 + if (!dev) 47 42 return -ENODEV; 43 + 44 + if (dev->driver || drv != &dsa_drv.drv) 45 + goto err_put_dev; 48 46 49 47 idxd_dev = confdev_to_idxd_dev(dev); 50 48 if (is_idxd_dev(idxd_dev)) { ··· 61 53 alt_drv = driver_find("user", bus); 62 54 } 63 55 if (!alt_drv) 64 - return -ENODEV; 56 + goto err_put_dev; 65 57 66 58 rc = device_driver_attach(alt_drv, dev); 67 59 if (rc < 0) 68 - return rc; 60 + goto err_put_dev; 61 + 62 + put_device(dev); 69 63 70 64 return count; 65 + 66 + err_put_dev: 67 + put_device(dev); 68 + 69 + return rc; 71 70 } 72 71 static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store); 73 72
+14 -5
drivers/dma/lpc18xx-dmamux.c
··· 57 57 struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev); 58 58 unsigned long flags; 59 59 unsigned mux; 60 + int ret = -EINVAL; 60 61 61 62 if (dma_spec->args_count != 3) { 62 63 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 63 - return ERR_PTR(-EINVAL); 64 + goto err_put_pdev; 64 65 } 65 66 66 67 mux = dma_spec->args[0]; 67 68 if (mux >= dmamux->dma_master_requests) { 68 69 dev_err(&pdev->dev, "invalid mux number: %d\n", 69 70 dma_spec->args[0]); 70 - return ERR_PTR(-EINVAL); 71 + goto err_put_pdev; 71 72 } 72 73 73 74 if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) { 74 75 dev_err(&pdev->dev, "invalid dma mux value: %d\n", 75 76 dma_spec->args[1]); 76 - return ERR_PTR(-EINVAL); 77 + goto err_put_pdev; 77 78 } 78 79 79 80 /* The of_node_put() will be done in the core for the node */ 80 81 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 81 82 if (!dma_spec->np) { 82 83 dev_err(&pdev->dev, "can't get dma master\n"); 83 - return ERR_PTR(-EINVAL); 84 + goto err_put_pdev; 84 85 } 85 86 86 87 spin_lock_irqsave(&dmamux->lock, flags); ··· 90 89 dev_err(&pdev->dev, "dma request %u busy with %u.%u\n", 91 90 mux, mux, dmamux->muxes[mux].value); 92 91 of_node_put(dma_spec->np); 93 - return ERR_PTR(-EBUSY); 92 + ret = -EBUSY; 93 + goto err_put_pdev; 94 94 } 95 95 96 96 dmamux->muxes[mux].busy = true; ··· 108 106 dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux, 109 107 dmamux->muxes[mux].value, mux); 110 108 109 + put_device(&pdev->dev); 110 + 111 111 return &dmamux->muxes[mux]; 112 + 113 + err_put_pdev: 114 + put_device(&pdev->dev); 115 + 116 + return ERR_PTR(ret); 112 117 } 113 118 114 119 static int lpc18xx_dmamux_probe(struct platform_device *pdev)
+14 -5
drivers/dma/lpc32xx-dmamux.c
··· 95 95 struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev); 96 96 unsigned long flags; 97 97 struct lpc32xx_dmamux *mux = NULL; 98 + int ret = -EINVAL; 98 99 int i; 99 100 100 101 if (dma_spec->args_count != 3) { 101 102 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 102 - return ERR_PTR(-EINVAL); 103 + goto err_put_pdev; 103 104 } 104 105 105 106 for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) { ··· 112 111 if (!mux) { 113 112 dev_err(&pdev->dev, "invalid mux request number: %d\n", 114 113 dma_spec->args[0]); 115 - return ERR_PTR(-EINVAL); 114 + goto err_put_pdev; 116 115 } 117 116 118 117 if (dma_spec->args[2] > 1) { 119 118 dev_err(&pdev->dev, "invalid dma mux value: %d\n", 120 119 dma_spec->args[1]); 121 - return ERR_PTR(-EINVAL); 120 + goto err_put_pdev; 122 121 } 123 122 124 123 /* The of_node_put() will be done in the core for the node */ 125 124 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 126 125 if (!dma_spec->np) { 127 126 dev_err(&pdev->dev, "can't get dma master\n"); 128 - return ERR_PTR(-EINVAL); 127 + goto err_put_pdev; 129 128 } 130 129 131 130 spin_lock_irqsave(&dmamux->lock, flags); ··· 134 133 dev_err(dev, "dma request signal %d busy, routed to %s\n", 135 134 mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); 136 135 of_node_put(dma_spec->np); 137 - return ERR_PTR(-EBUSY); 136 + ret = -EBUSY; 137 + goto err_put_pdev; 138 138 } 139 139 140 140 mux->busy = true; ··· 150 148 dev_dbg(dev, "dma request signal %d routed to %s\n", 151 149 mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1); 152 150 151 + put_device(&pdev->dev); 152 + 153 153 return mux; 154 + 155 + err_put_pdev: 156 + put_device(&pdev->dev); 157 + 158 + return ERR_PTR(ret); 154 159 } 155 160 156 161 static int lpc32xx_dmamux_probe(struct platform_device *pdev)
+14 -12
drivers/dma/mmp_pdma.c
··· 152 152 * 153 153 * Controller Configuration: 154 154 * @run_bits: Control bits in DCSR register for channel start/stop 155 - * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform 156 - * settings, or explicit mask like DMA_BIT_MASK(32/64) 155 + * @dma_width: DMA addressing width in bits (32 or 64). Determines the 156 + * DMA mask capability of the controller hardware. 157 157 */ 158 158 struct mmp_pdma_ops { 159 159 /* Hardware Register Operations */ ··· 173 173 174 174 /* Controller Configuration */ 175 175 u32 run_bits; 176 - u64 dma_mask; 176 + u32 dma_width; 177 177 }; 178 178 179 179 struct mmp_pdma_device { ··· 928 928 { 929 929 struct mmp_pdma_desc_sw *sw; 930 930 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device); 931 + unsigned long flags; 931 932 u64 curr; 932 933 u32 residue = 0; 933 934 bool passed = false; ··· 945 944 curr = pdev->ops->read_dst_addr(chan->phy); 946 945 else 947 946 curr = pdev->ops->read_src_addr(chan->phy); 947 + 948 + spin_lock_irqsave(&chan->desc_lock, flags); 948 949 949 950 list_for_each_entry(sw, &chan->chain_running, node) { 950 951 u64 start, end; ··· 992 989 continue; 993 990 994 991 if (sw->async_tx.cookie == cookie) { 992 + spin_unlock_irqrestore(&chan->desc_lock, flags); 995 993 return residue; 996 994 } else { 997 995 residue = 0; 998 996 passed = false; 999 997 } 1000 998 } 999 + 1000 + spin_unlock_irqrestore(&chan->desc_lock, flags); 1001 1001 1002 1002 /* We should only get here in case of cyclic transactions */ 1003 1003 return residue; ··· 1178 1172 .get_desc_src_addr = get_desc_src_addr_32, 1179 1173 .get_desc_dst_addr = get_desc_dst_addr_32, 1180 1174 .run_bits = (DCSR_RUN), 1181 - .dma_mask = 0, /* let OF/platform set DMA mask */ 1175 + .dma_width = 32, 1182 1176 }; 1183 1177 1184 1178 static const struct mmp_pdma_ops spacemit_k1_pdma_ops = { ··· 1191 1185 .get_desc_src_addr = get_desc_src_addr_64, 1192 1186 .get_desc_dst_addr = get_desc_dst_addr_64, 1193 1187 .run_bits = (DCSR_RUN | DCSR_LPAEEN), 1194 - .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */ 1188 + .dma_width = 64, 1195 1189 }; 1196 1190 1197 1191 static const struct of_device_id mmp_pdma_dt_ids[] = { ··· 1320 1314 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1321 1315 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1322 1316 1323 - /* Set DMA mask based on ops->dma_mask, or OF/platform */ 1324 - if (pdev->ops->dma_mask) 1325 - dma_set_mask(pdev->dev, pdev->ops->dma_mask); 1326 - else if (pdev->dev->coherent_dma_mask) 1327 - dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 1328 - else 1329 - dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); 1317 + /* Set DMA mask based on controller hardware capabilities */ 1318 + dma_set_mask_and_coherent(pdev->dev, 1319 + DMA_BIT_MASK(pdev->ops->dma_width)); 1330 1320 1331 1321 ret = dma_async_device_register(&pdev->device); 1332 1322 if (ret) {
+4 -2
drivers/dma/qcom/gpi.c
··· 1605 1605 gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config) 1606 1606 { 1607 1607 struct gchan *gchan = to_gchan(chan); 1608 + void *new_config; 1608 1609 1609 1610 if (!config->peripheral_config) 1610 1611 return -EINVAL; 1611 1612 1612 - gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT); 1613 - if (!gchan->config) 1613 + new_config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT); 1614 + if (!new_config) 1614 1615 return -ENOMEM; 1615 1616 1617 + gchan->config = new_config; 1616 1618 memcpy(gchan->config, config->peripheral_config, config->peripheral_size); 1617 1619 1618 1620 return 0;
+16 -2
drivers/dma/sh/rz-dmac.c
··· 557 557 static int rz_dmac_terminate_all(struct dma_chan *chan) 558 558 { 559 559 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 560 + struct rz_lmdesc *lmdesc = channel->lmdesc.base; 560 561 unsigned long flags; 562 + unsigned int i; 561 563 LIST_HEAD(head); 562 564 563 565 rz_dmac_disable_hw(channel); 564 566 spin_lock_irqsave(&channel->vc.lock, flags); 567 + for (i = 0; i < DMAC_NR_LMDESC; i++) 568 + lmdesc[i].header = 0; 569 + 565 570 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 566 571 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 567 572 vchan_get_all_descriptors(&channel->vc, &head); ··· 859 854 return 0; 860 855 } 861 856 857 + static void rz_dmac_put_device(void *_dev) 858 + { 859 + struct device *dev = _dev; 860 + 861 + put_device(dev); 862 + } 863 + 862 864 static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac) 863 865 { 864 866 struct device_node *np = dev->of_node; ··· 887 875 dev_err(dev, "ICU device not found.\n"); 888 876 return -ENODEV; 889 877 } 878 + 879 + ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev); 880 + if (ret) 881 + return ret; 890 882 891 883 dmac_index = args.args[0]; 892 884 if (dmac_index > RZV2H_MAX_DMAC_INDEX) { ··· 1071 1055 reset_control_assert(dmac->rstc); 1072 1056 pm_runtime_put(&pdev->dev); 1073 1057 pm_runtime_disable(&pdev->dev); 1074 - 1075 - platform_device_put(dmac->icu.pdev); 1076 1058 } 1077 1059 1078 1060 static const struct of_device_id of_rz_dmac_match[] = {
+19 -12
drivers/dma/stm32/stm32-dmamux.c
··· 90 90 struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); 91 91 struct stm32_dmamux *mux; 92 92 u32 i, min, max; 93 - int ret; 93 + int ret = -EINVAL; 94 94 unsigned long flags; 95 95 96 96 if (dma_spec->args_count != 3) { 97 97 dev_err(&pdev->dev, "invalid number of dma mux args\n"); 98 - return ERR_PTR(-EINVAL); 98 + goto err_put_pdev; 99 99 } 100 100 101 101 if (dma_spec->args[0] > dmamux->dmamux_requests) { 102 102 dev_err(&pdev->dev, "invalid mux request number: %d\n", 103 103 dma_spec->args[0]); 104 - return ERR_PTR(-EINVAL); 104 + goto err_put_pdev; 105 105 } 106 106 107 107 mux = kzalloc(sizeof(*mux), GFP_KERNEL); 108 - if (!mux) 109 - return ERR_PTR(-ENOMEM); 108 + if (!mux) { 109 + ret = -ENOMEM; 110 + goto err_put_pdev; 111 + } 110 112 111 113 spin_lock_irqsave(&dmamux->lock, flags); 112 114 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, ··· 118 116 spin_unlock_irqrestore(&dmamux->lock, flags); 119 117 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 120 118 ret = -ENOMEM; 121 - goto error_chan_id; 119 + goto err_free_mux; 122 120 } 123 121 set_bit(mux->chan_id, dmamux->dma_inuse); 124 122 spin_unlock_irqrestore(&dmamux->lock, flags); ··· 135 133 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); 136 134 if (!dma_spec->np) { 137 135 dev_err(&pdev->dev, "can't get dma master\n"); 138 - ret = -EINVAL; 139 - goto error; 136 + goto err_clear_inuse; 140 137 } 141 138 142 139 /* Set dma request */ ··· 143 142 ret = pm_runtime_resume_and_get(&pdev->dev); 144 143 if (ret < 0) { 145 144 spin_unlock_irqrestore(&dmamux->lock, flags); 146 - goto error; 145 + goto err_put_dma_spec_np; 147 146 } 148 147 spin_unlock_irqrestore(&dmamux->lock, flags); 149 148 ··· 161 160 dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", 162 161 mux->request, mux->master, mux->chan_id); 163 162 163 + put_device(&pdev->dev); 164 + 164 165 return mux; 165 166 166 - error: 167 + err_put_dma_spec_np: 168 + of_node_put(dma_spec->np); 169 + err_clear_inuse: 167 170 clear_bit(mux->chan_id, dmamux->dma_inuse); 168 - 169 - error_chan_id: 171 + err_free_mux: 170 172 kfree(mux); 173 + err_put_pdev: 174 + put_device(&pdev->dev); 175 + 171 176 return ERR_PTR(ret); 172 177 } 173 178
+9 -1
drivers/dma/tegra210-adma.c
··· 429 429 return; 430 430 } 431 431 432 - kfree(tdc->desc); 432 + vchan_terminate_vdesc(&tdc->desc->vd); 433 433 tdc->desc = NULL; 434 + } 435 + 436 + static void tegra_adma_synchronize(struct dma_chan *dc) 437 + { 438 + struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); 439 + 440 + vchan_synchronize(&tdc->vc); 434 441 } 435 442 436 443 static void tegra_adma_start(struct tegra_adma_chan *tdc) ··· 1162 1155 tdma->dma_dev.device_config = tegra_adma_slave_config; 1163 1156 tdma->dma_dev.device_tx_status = tegra_adma_tx_status; 1164 1157 tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all; 1158 + tdma->dma_dev.device_synchronize = tegra_adma_synchronize; 1165 1159 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1166 1160 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 1167 1161 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+20 -15
drivers/dma/ti/dma-crossbar.c
··· 79 79 { 80 80 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 81 81 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); 82 - struct ti_am335x_xbar_map *map; 82 + struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL); 83 83 84 84 if (dma_spec->args_count != 3) 85 - return ERR_PTR(-EINVAL); 85 + goto out_put_pdev; 86 86 87 87 if (dma_spec->args[2] >= xbar->xbar_events) { 88 88 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", 89 89 dma_spec->args[2]); 90 - return ERR_PTR(-EINVAL); 90 + goto out_put_pdev; 91 91 } 92 92 93 93 if (dma_spec->args[0] >= xbar->dma_requests) { 94 94 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", 95 95 dma_spec->args[0]); 96 - return ERR_PTR(-EINVAL); 96 + goto out_put_pdev; 97 97 } 98 98 99 99 /* The of_node_put() will be done in the core for the node */ 100 100 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 101 101 if (!dma_spec->np) { 102 102 dev_err(&pdev->dev, "Can't get DMA master\n"); 103 - return ERR_PTR(-EINVAL); 103 + goto out_put_pdev; 104 104 } 105 105 106 106 map = kzalloc(sizeof(*map), GFP_KERNEL); 107 107 if (!map) { 108 108 of_node_put(dma_spec->np); 109 - return ERR_PTR(-ENOMEM); 109 + map = ERR_PTR(-ENOMEM); 110 + goto out_put_pdev; 110 111 } 111 112 112 113 map->dma_line = (u16)dma_spec->args[0]; ··· 120 119 map->mux_val, map->dma_line); 121 120 122 121 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); 122 + 123 + out_put_pdev: 124 + put_device(&pdev->dev); 123 125 124 126 return map; 125 127 } ··· 245 241 { 246 242 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); 247 243 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); 248 - struct ti_dra7_xbar_map *map; 244 + struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL); 249 245 250 246 if (dma_spec->args[0] >= xbar->xbar_requests) { 251 247 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", 252 248 dma_spec->args[0]); 253 - put_device(&pdev->dev); 254 - return ERR_PTR(-EINVAL); 249 + goto out_put_pdev; 255 250 } 256 251 257 252 /* The of_node_put() will be done in the core for the node */ 258 253 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); 259 254 if (!dma_spec->np) { 260 255 dev_err(&pdev->dev, "Can't get DMA master\n"); 261 - put_device(&pdev->dev); 262 - return ERR_PTR(-EINVAL); 256 + goto out_put_pdev; 263 257 } 264 258 265 259 map = kzalloc(sizeof(*map), GFP_KERNEL); 266 260 if (!map) { 267 261 of_node_put(dma_spec->np); 268 - put_device(&pdev->dev); 269 - return ERR_PTR(-ENOMEM); 262 + map = ERR_PTR(-ENOMEM); 263 + goto out_put_pdev; 270 264 } 271 265 272 266 mutex_lock(&xbar->mutex); ··· 275 273 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 276 274 kfree(map); 277 275 of_node_put(dma_spec->np); 278 - put_device(&pdev->dev); 279 - return ERR_PTR(-ENOMEM); 276 + map = ERR_PTR(-ENOMEM); 277 + goto out_put_pdev; 280 278 } 281 279 set_bit(map->xbar_out, xbar->dma_inuse); 282 280 mutex_unlock(&xbar->mutex); ··· 289 287 map->xbar_in, map->xbar_out); 290 288 291 289 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); 290 + 291 + out_put_pdev: 292 + put_device(&pdev->dev); 292 293 293 294 return map; 294 295 }
+1 -1
drivers/dma/ti/k3-udma-private.c
··· 42 42 } 43 43 44 44 ud = platform_get_drvdata(pdev); 45 + put_device(&pdev->dev); 45 46 if (!ud) { 46 47 pr_debug("UDMA has not been probed\n"); 47 - put_device(&pdev->dev); 48 48 return ERR_PTR(-EPROBE_DEFER); 49 49 } 50 50
+4
drivers/dma/ti/omap-dma.c
··· 1808 1808 if (rc) { 1809 1809 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1810 1810 rc); 1811 + if (od->ll123_supported) 1812 + dma_pool_destroy(od->desc_pool); 1811 1813 omap_dma_free(od); 1812 1814 return rc; 1813 1815 } ··· 1825 1823 if (rc) { 1826 1824 pr_warn("OMAP-DMA: failed to register DMA controller\n"); 1827 1825 dma_async_device_unregister(&od->ddev); 1826 + if (od->ll123_supported) 1827 + dma_pool_destroy(od->desc_pool); 1828 1828 omap_dma_free(od); 1829 1829 } 1830 1830 }
+1
drivers/dma/xilinx/xdma-regs.h
··· 9 9 10 10 /* The length of register space exposed to host */ 11 11 #define XDMA_REG_SPACE_LEN 65536 12 + #define XDMA_MAX_REG_OFFSET (XDMA_REG_SPACE_LEN - 4) 12 13 13 14 /* 14 15 * maximum number of DMA channels for each direction:
+1 -1
drivers/dma/xilinx/xdma.c
··· 38 38 .reg_bits = 32, 39 39 .val_bits = 32, 40 40 .reg_stride = 4, 41 - .max_register = XDMA_REG_SPACE_LEN, 41 + .max_register = XDMA_MAX_REG_OFFSET, 42 42 }; 43 43 44 44 /**
+5 -2
drivers/dma/xilinx/xilinx_dma.c
··· 131 131 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20 132 132 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 133 133 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1 134 + #define XILINX_DMA_DFAULT_ADDRWIDTH 0x20 134 135 135 136 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 136 137 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ ··· 3160 3159 struct device_node *node = pdev->dev.of_node; 3161 3160 struct xilinx_dma_device *xdev; 3162 3161 struct device_node *child, *np = pdev->dev.of_node; 3163 - u32 num_frames, addr_width, len_width; 3162 + u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width; 3164 3163 int i, err; 3165 3164 3166 3165 /* Allocate and initialize the DMA engine structure */ ··· 3236 3235 3237 3236 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); 3238 3237 if (err < 0) 3239 - dev_warn(xdev->dev, "missing xlnx,addrwidth property\n"); 3238 + dev_warn(xdev->dev, 3239 + "missing xlnx,addrwidth property, using default value %d\n", 3240 + XILINX_DMA_DFAULT_ADDRWIDTH); 3240 3241 3241 3242 if (addr_width > 32) 3242 3243 xdev->ext_addr = true;