Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:

- Big pile of __counted_by attribute annotations to several structures
for bounds checking of flexible arrays at run-time

- Another big pile platform remove callback returning void changes

- Device tree device_get_match_data() usage and dropping
of_match_device() calls

- Minor driver updates to pxa, idxd fsl, hisi etc drivers

* tag 'dmaengine-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (106 commits)
dmaengine: stm32-mdma: correct desc prep when channel running
dmaengine: dw-axi-dmac: Add support DMAX_NUM_CHANNELS > 16
dmaengine: xilinx: xilinx_dma: Fix kernel doc about xilinx_dma_remove()
dmaengine: mmp_tdma: drop unused variable 'of_id'
MAINTAINERS: Add entries for NXP(Freescale) eDMA drivers
dmaengine: xilinx: xdma: Support cyclic transfers
dmaengine: xilinx: xdma: Prepare the introduction of cyclic transfers
dmaengine: Drop unnecessary of_match_device() calls
dmaengine: Use device_get_match_data()
dmaengine: pxa_dma: Annotate struct pxad_desc_sw with __counted_by
dmaengine: pxa_dma: Remove an erroneous BUG_ON() in pxad_free_desc()
dmaengine: xilinx: xdma: Use resource_size() in xdma_probe()
dmaengine: fsl-dpaa2-qdma: Remove redundant initialization owner in dpaa2_qdma_driver
dmaengine: Remove unused declaration dma_chan_cleanup()
dmaengine: mmp: fix Wvoid-pointer-to-enum-cast warning
dmaengine: qcom: fix Wvoid-pointer-to-enum-cast warning
dmaengine: fsl-edma: Remove redundant dev_err() for platform_get_irq()
dmaengine: ep93xx_dma: Annotate struct ep93xx_dma_engine with __counted_by
dmaengine: idxd: add wq driver name support for accel-config user tool
dmaengine: fsl-edma: Annotate struct struct fsl_edma_engine with __counted_by
...

+577 -425
+6
Documentation/ABI/stable/sysfs-driver-dma-idxd
··· 270 270 correlates to the operations allowed. It's visible only 271 271 on platforms that support the capability. 272 272 273 + What: /sys/bus/dsa/devices/wq<m>.<n>/driver_name 274 + Date: Sept 8, 2023 275 + KernelVersion: 6.7.0 276 + Contact: dmaengine@vger.kernel.org 277 + Description: Name of driver to be bounded to the wq. 278 + 273 279 What: /sys/bus/dsa/devices/engine<m>.<n>/group_id 274 280 Date: Oct 25, 2019 275 281 KernelVersion: 5.6.0
+2
Documentation/devicetree/bindings/dma/qcom,gpi.yaml
··· 69 69 dma-channel-mask: 70 70 maxItems: 1 71 71 72 + dma-coherent: true 73 + 72 74 required: 73 75 - compatible 74 76 - reg
+8
MAINTAINERS
··· 8338 8338 F: drivers/spi/spi-fsl-dspi.c 8339 8339 F: include/linux/spi/spi-fsl-dspi.h 8340 8340 8341 + FREESCALE eDMA DRIVER 8342 + M: Frank Li <Frank.Li@nxp.com> 8343 + L: imx@lists.linux.dev 8344 + L: dmaengine@vger.kernel.org 8345 + S: Maintained 8346 + F: Documentation/devicetree/bindings/dma/fsl,edma.yaml 8347 + F: drivers/dma/fsl-edma*.* 8348 + 8341 8349 FREESCALE ENETC ETHERNET DRIVERS 8342 8350 M: Claudiu Manoil <claudiu.manoil@nxp.com> 8343 8351 M: Vladimir Oltean <vladimir.oltean@nxp.com>
+1 -1
drivers/dma/Kconfig
··· 362 362 363 363 config K3_DMA 364 364 tristate "Hisilicon K3 DMA support" 365 - depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST 365 + depends on ARCH_HISI || COMPILE_TEST 366 366 select DMA_ENGINE 367 367 select DMA_VIRTUAL_CHANNELS 368 368 help
+2 -4
drivers/dma/altera-msgdma.c
··· 923 923 * 924 924 * Return: Always '0' 925 925 */ 926 - static int msgdma_remove(struct platform_device *pdev) 926 + static void msgdma_remove(struct platform_device *pdev) 927 927 { 928 928 struct msgdma_device *mdev = platform_get_drvdata(pdev); 929 929 ··· 933 933 msgdma_dev_remove(mdev); 934 934 935 935 dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n"); 936 - 937 - return 0; 938 936 } 939 937 940 938 #ifdef CONFIG_OF ··· 950 952 .of_match_table = of_match_ptr(msgdma_match), 951 953 }, 952 954 .probe = msgdma_probe, 953 - .remove = msgdma_remove, 955 + .remove_new = msgdma_remove, 954 956 }; 955 957 956 958 module_platform_driver(msgdma_driver);
+3 -5
drivers/dma/apple-admac.c
··· 128 128 int irq; 129 129 int irq_index; 130 130 int nchannels; 131 - struct admac_chan channels[]; 131 + struct admac_chan channels[] __counted_by(nchannels); 132 132 }; 133 133 134 134 struct admac_tx { ··· 925 925 return err; 926 926 } 927 927 928 - static int admac_remove(struct platform_device *pdev) 928 + static void admac_remove(struct platform_device *pdev) 929 929 { 930 930 struct admac_data *ad = platform_get_drvdata(pdev); 931 931 ··· 933 933 dma_async_device_unregister(&ad->dma); 934 934 free_irq(ad->irq, ad); 935 935 reset_control_rearm(ad->rstc); 936 - 937 - return 0; 938 936 } 939 937 940 938 static const struct of_device_id admac_of_match[] = { ··· 947 949 .of_match_table = admac_of_match, 948 950 }, 949 951 .probe = admac_probe, 950 - .remove = admac_remove, 952 + .remove_new = admac_remove, 951 953 }; 952 954 module_platform_driver(apple_admac_driver); 953 955
+3 -5
drivers/dma/at_hdmac.c
··· 239 239 bool memset_buffer; 240 240 dma_addr_t memset_paddr; 241 241 int *memset_vaddr; 242 - struct atdma_sg sg[]; 242 + struct atdma_sg sg[] __counted_by(sglen); 243 243 }; 244 244 245 245 /*-- Channels --------------------------------------------------------*/ ··· 2100 2100 return err; 2101 2101 } 2102 2102 2103 - static int at_dma_remove(struct platform_device *pdev) 2103 + static void at_dma_remove(struct platform_device *pdev) 2104 2104 { 2105 2105 struct at_dma *atdma = platform_get_drvdata(pdev); 2106 2106 struct dma_chan *chan, *_chan; ··· 2122 2122 } 2123 2123 2124 2124 clk_disable_unprepare(atdma->clk); 2125 - 2126 - return 0; 2127 2125 } 2128 2126 2129 2127 static void at_dma_shutdown(struct platform_device *pdev) ··· 2240 2242 }; 2241 2243 2242 2244 static struct platform_driver at_dma_driver = { 2243 - .remove = at_dma_remove, 2245 + .remove_new = at_dma_remove, 2244 2246 .shutdown = at_dma_shutdown, 2245 2247 .id_table = atdma_devtypes, 2246 2248 .driver = {
+2 -4
drivers/dma/at_xdmac.c
··· 2431 2431 return ret; 2432 2432 } 2433 2433 2434 - static int at_xdmac_remove(struct platform_device *pdev) 2434 + static void at_xdmac_remove(struct platform_device *pdev) 2435 2435 { 2436 2436 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); 2437 2437 int i; ··· 2452 2452 tasklet_kill(&atchan->tasklet); 2453 2453 at_xdmac_free_chan_resources(&atchan->chan); 2454 2454 } 2455 - 2456 - return 0; 2457 2455 } 2458 2456 2459 2457 static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = { ··· 2476 2478 2477 2479 static struct platform_driver at_xdmac_driver = { 2478 2480 .probe = at_xdmac_probe, 2479 - .remove = at_xdmac_remove, 2481 + .remove_new = at_xdmac_remove, 2480 2482 .driver = { 2481 2483 .name = "at_xdmac", 2482 2484 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
+2 -4
drivers/dma/bcm-sba-raid.c
··· 1734 1734 return ret; 1735 1735 } 1736 1736 1737 - static int sba_remove(struct platform_device *pdev) 1737 + static void sba_remove(struct platform_device *pdev) 1738 1738 { 1739 1739 struct sba_device *sba = platform_get_drvdata(pdev); 1740 1740 ··· 1745 1745 sba_freeup_channel_resources(sba); 1746 1746 1747 1747 mbox_free_channel(sba->mchan); 1748 - 1749 - return 0; 1750 1748 } 1751 1749 1752 1750 static const struct of_device_id sba_of_match[] = { ··· 1756 1758 1757 1759 static struct platform_driver sba_driver = { 1758 1760 .probe = sba_probe, 1759 - .remove = sba_remove, 1761 + .remove_new = sba_remove, 1760 1762 .driver = { 1761 1763 .name = "bcm-sba-raid", 1762 1764 .of_match_table = sba_of_match,
+2 -4
drivers/dma/bcm2835-dma.c
··· 1019 1019 return rc; 1020 1020 } 1021 1021 1022 - static int bcm2835_dma_remove(struct platform_device *pdev) 1022 + static void bcm2835_dma_remove(struct platform_device *pdev) 1023 1023 { 1024 1024 struct bcm2835_dmadev *od = platform_get_drvdata(pdev); 1025 1025 1026 1026 dma_async_device_unregister(&od->ddev); 1027 1027 bcm2835_dma_free(od); 1028 - 1029 - return 0; 1030 1028 } 1031 1029 1032 1030 static struct platform_driver bcm2835_dma_driver = { 1033 1031 .probe = bcm2835_dma_probe, 1034 - .remove = bcm2835_dma_remove, 1032 + .remove_new = bcm2835_dma_remove, 1035 1033 .driver = { 1036 1034 .name = "bcm2835-dma", 1037 1035 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
+2 -4
drivers/dma/bestcomm/bestcomm.c
··· 455 455 } 456 456 457 457 458 - static int mpc52xx_bcom_remove(struct platform_device *op) 458 + static void mpc52xx_bcom_remove(struct platform_device *op) 459 459 { 460 460 /* Clean up the engine */ 461 461 bcom_engine_cleanup(); ··· 473 473 /* Release memory */ 474 474 kfree(bcom_eng); 475 475 bcom_eng = NULL; 476 - 477 - return 0; 478 476 } 479 477 480 478 static const struct of_device_id mpc52xx_bcom_of_match[] = { ··· 486 488 487 489 static struct platform_driver mpc52xx_bcom_of_platform_driver = { 488 490 .probe = mpc52xx_bcom_probe, 489 - .remove = mpc52xx_bcom_remove, 491 + .remove_new = mpc52xx_bcom_remove, 490 492 .driver = { 491 493 .name = DRIVER_NAME, 492 494 .of_match_table = mpc52xx_bcom_of_match,
+4 -7
drivers/dma/dma-axi-dmac.c
··· 117 117 unsigned int num_submitted; 118 118 unsigned int num_completed; 119 119 unsigned int num_sgs; 120 - struct axi_dmac_sg sg[]; 120 + struct axi_dmac_sg sg[] __counted_by(num_sgs); 121 121 }; 122 122 123 123 struct axi_dmac_chan { ··· 484 484 desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); 485 485 if (!desc) 486 486 return NULL; 487 + desc->num_sgs = num_sgs; 487 488 488 489 for (i = 0; i < num_sgs; i++) 489 490 desc->sg[i].id = AXI_DMAC_SG_UNUSED; 490 - 491 - desc->num_sgs = num_sgs; 492 491 493 492 return desc; 494 493 } ··· 1028 1029 return ret; 1029 1030 } 1030 1031 1031 - static int axi_dmac_remove(struct platform_device *pdev) 1032 + static void axi_dmac_remove(struct platform_device *pdev) 1032 1033 { 1033 1034 struct axi_dmac *dmac = platform_get_drvdata(pdev); 1034 1035 ··· 1037 1038 tasklet_kill(&dmac->chan.vchan.task); 1038 1039 dma_async_device_unregister(&dmac->dma_dev); 1039 1040 clk_disable_unprepare(dmac->clk); 1040 - 1041 - return 0; 1042 1041 } 1043 1042 1044 1043 static const struct of_device_id axi_dmac_of_match_table[] = { ··· 1051 1054 .of_match_table = axi_dmac_of_match_table, 1052 1055 }, 1053 1056 .probe = axi_dmac_probe, 1054 - .remove = axi_dmac_remove, 1057 + .remove_new = axi_dmac_remove, 1055 1058 }; 1056 1059 module_platform_driver(axi_dmac_driver); 1057 1060
+2 -4
drivers/dma/dma-jz4780.c
··· 1008 1008 return ret; 1009 1009 } 1010 1010 1011 - static int jz4780_dma_remove(struct platform_device *pdev) 1011 + static void jz4780_dma_remove(struct platform_device *pdev) 1012 1012 { 1013 1013 struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev); 1014 1014 int i; ··· 1020 1020 1021 1021 for (i = 0; i < jzdma->soc_data->nb_channels; i++) 1022 1022 tasklet_kill(&jzdma->chan[i].vchan.task); 1023 - 1024 - return 0; 1025 1023 } 1026 1024 1027 1025 static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { ··· 1122 1124 1123 1125 static struct platform_driver jz4780_dma_driver = { 1124 1126 .probe = jz4780_dma_probe, 1125 - .remove = jz4780_dma_remove, 1127 + .remove_new = jz4780_dma_remove, 1126 1128 .driver = { 1127 1129 .name = "jz4780-dma", 1128 1130 .of_match_table = jz4780_dma_dt_match,
+114 -42
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
··· 63 63 } 64 64 65 65 static inline void 66 + axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val) 67 + { 68 + iowrite64(val, chip->regs + reg); 69 + } 70 + 71 + static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg) 72 + { 73 + return ioread64(chip->regs + reg); 74 + } 75 + 76 + static inline void 66 77 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val) 67 78 { 68 79 iowrite32(val, chan->chan_regs + reg); ··· 193 182 194 183 static inline void axi_chan_disable(struct axi_dma_chan *chan) 195 184 { 196 - u32 val; 185 + u64 val; 197 186 198 - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 199 - val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); 200 - if (chan->chip->dw->hdata->reg_map_8_channels) 201 - val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; 202 - else 203 - val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 204 - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 187 + if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { 188 + val = axi_dma_ioread64(chan->chip, DMAC_CHEN); 189 + if (chan->id >= DMAC_CHAN_16) { 190 + val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16) 191 + << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT)); 192 + val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16) 193 + << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT); 194 + } else { 195 + val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); 196 + val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 197 + } 198 + axi_dma_iowrite64(chan->chip, DMAC_CHEN, val); 199 + } else { 200 + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 201 + val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); 202 + if (chan->chip->dw->hdata->reg_map_8_channels) 203 + val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; 204 + else 205 + val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 206 + axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val); 207 + } 205 208 } 206 209 207 210 static inline void axi_chan_enable(struct axi_dma_chan *chan) 208 211 { 209 - u32 val; 212 + u64 val; 210 213 211 - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 212 - if (chan->chip->dw->hdata->reg_map_8_channels) 213 - val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 214 - BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; 215 - else 216 - val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 214 + if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { 215 + val = axi_dma_ioread64(chan->chip, DMAC_CHEN); 216 + if (chan->id >= DMAC_CHAN_16) { 217 + val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16) 218 + << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) | 219 + (u64)(BIT(chan->id) >> DMAC_CHAN_16) 220 + << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT); 221 + } else { 222 + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 217 223 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 218 - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 224 + } 225 + axi_dma_iowrite64(chan->chip, DMAC_CHEN, val); 226 + } else { 227 + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 228 + if (chan->chip->dw->hdata->reg_map_8_channels) { 229 + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 230 + BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; 231 + } else { 232 + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | 233 + BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT; 234 + } 235 + axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val); 236 + } 219 237 } 220 238 221 239 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) 222 240 { 223 - u32 val; 241 + u64 val; 224 242 225 - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 243 + if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) 244 + val = axi_dma_ioread64(chan->chip, DMAC_CHEN); 245 + else 246 + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 226 247 227 - return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT)); 248 + if (chan->id >= DMAC_CHAN_16) 249 + return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT)); 250 + else 251 + return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT)); 228 252 } 229 253 230 254 static void axi_dma_hw_init(struct axi_dma_chip *chip) ··· 1221 1175 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); 1222 1176 unsigned long flags; 1223 1177 unsigned int timeout = 20; /* timeout iterations */ 1224 - u32 val; 1178 + u64 val; 1225 1179 1226 1180 spin_lock_irqsave(&chan->vc.lock, flags); 1227 1181 1228 - if (chan->chip->dw->hdata->reg_map_8_channels) { 1229 - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 1230 - val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | 1231 - BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; 1232 - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 1182 + if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { 1183 + val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG); 1184 + if (chan->id >= DMAC_CHAN_16) { 1185 + val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16) 1186 + << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) | 1187 + (u64)(BIT(chan->id) >> DMAC_CHAN_16) 1188 + << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT); 1189 + } else { 1190 + val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | 1191 + BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; 1192 + } 1193 + axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val); 1233 1194 } else { 1234 - val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); 1235 - val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | 1195 + if (chan->chip->dw->hdata->reg_map_8_channels) { 1196 + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 1197 + val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | 1198 + BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; 1199 + axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val); 1200 + } else { 1201 + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); 1202 + val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT | 1236 1203 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT; 1237 - axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); 1204 + axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val); 1205 + } 1238 1206 } 1239 1207 1240 1208 do { ··· 1270 1210 /* Called in chan locked context */ 1271 1211 static inline void axi_chan_resume(struct axi_dma_chan *chan) 1272 1212 { 1273 - u32 val; 1213 + u64 val; 1274 1214 1275 - if (chan->chip->dw->hdata->reg_map_8_channels) { 1276 - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 1277 - val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); 1278 - val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); 1279 - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); 1215 + if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) { 1216 + val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG); 1217 + if (chan->id >= DMAC_CHAN_16) { 1218 + val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16) 1219 + << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT)); 1220 + val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16) 1221 + << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT)); 1222 + } else { 1223 + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); 1224 + val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); 1225 + } 1226 + axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val); 1280 1227 } else { 1281 - val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); 1282 - val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); 1283 - val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); 1284 - axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); 1228 + if (chan->chip->dw->hdata->reg_map_8_channels) { 1229 + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); 1230 + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); 1231 + val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); 1232 + axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val); 1233 + } else { 1234 + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG); 1235 + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT); 1236 + val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT); 1237 + axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val); 1238 + } 1285 1239 } 1286 1240 1287 1241 chan->is_paused = false; ··· 1609 1535 return ret; 1610 1536 } 1611 1537 1612 - static int dw_remove(struct platform_device *pdev) 1538 + static void dw_remove(struct platform_device *pdev) 1613 1539 { 1614 1540 struct axi_dma_chip *chip = platform_get_drvdata(pdev); 1615 1541 struct dw_axi_dma *dw = chip->dw; ··· 1638 1564 list_del(&chan->vc.chan.device_node); 1639 1565 tasklet_kill(&chan->vc.task); 1640 1566 } 1641 - 1642 - return 0; 1643 1567 } 1644 1568 1645 1569 static const struct dev_pm_ops dw_axi_dma_pm_ops = { ··· 1660 1588 1661 1589 static struct platform_driver dw_driver = { 1662 1590 .probe = dw_probe, 1663 - .remove = dw_remove, 1591 + .remove_new = dw_remove, 1664 1592 .driver = { 1665 1593 .name = KBUILD_MODNAME, 1666 1594 .of_match_table = dw_dma_of_id_table,
+5 -1
drivers/dma/dw-axi-dmac/dw-axi-dmac.h
··· 18 18 19 19 #include "../virt-dma.h" 20 20 21 - #define DMAC_MAX_CHANNELS 16 21 + #define DMAC_MAX_CHANNELS 32 22 22 #define DMAC_MAX_MASTERS 2 23 23 #define DMAC_MAX_BLK_SIZE 0x200000 24 24 ··· 221 221 222 222 /* DMAC_CHEN2 */ 223 223 #define DMAC_CHAN_EN2_WE_SHIFT 16 224 + 225 + /* DMAC CHAN BLOCKS */ 226 + #define DMAC_CHAN_BLOCK_SHIFT 32 227 + #define DMAC_CHAN_16 16 224 228 225 229 /* DMAC_CHSUSP */ 226 230 #define DMAC_CHAN_SUSP2_SHIFT 0
+2 -4
drivers/dma/dw/platform.c
··· 93 93 return err; 94 94 } 95 95 96 - static int dw_remove(struct platform_device *pdev) 96 + static void dw_remove(struct platform_device *pdev) 97 97 { 98 98 struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev); 99 99 struct dw_dma_chip *chip = data->chip; ··· 109 109 110 110 pm_runtime_disable(&pdev->dev); 111 111 clk_disable_unprepare(chip->clk); 112 - 113 - return 0; 114 112 } 115 113 116 114 static void dw_shutdown(struct platform_device *pdev) ··· 191 193 192 194 static struct platform_driver dw_driver = { 193 195 .probe = dw_probe, 194 - .remove = dw_remove, 196 + .remove_new = dw_remove, 195 197 .shutdown = dw_shutdown, 196 198 .driver = { 197 199 .name = DRV_NAME,
+1 -1
drivers/dma/ep93xx_dma.c
··· 213 213 #define INTERRUPT_NEXT_BUFFER 2 214 214 215 215 size_t num_channels; 216 - struct ep93xx_dma_chan channels[]; 216 + struct ep93xx_dma_chan channels[] __counted_by(num_channels); 217 217 }; 218 218 219 219 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
-1
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
··· 814 814 static struct fsl_mc_driver dpaa2_qdma_driver = { 815 815 .driver = { 816 816 .name = "dpaa2-qdma", 817 - .owner = THIS_MODULE, 818 817 }, 819 818 .probe = dpaa2_qdma_probe, 820 819 .remove = dpaa2_qdma_remove,
+1 -1
drivers/dma/fsl-edma-common.h
··· 225 225 bool big_endian; 226 226 struct edma_regs regs; 227 227 u64 chan_masked; 228 - struct fsl_edma_chan chans[]; 228 + struct fsl_edma_chan chans[] __counted_by(n_chans); 229 229 }; 230 230 231 231 #define edma_read_tcdreg(chan, __name) \
+5 -14
drivers/dma/fsl-edma-main.c
··· 13 13 #include <linux/interrupt.h> 14 14 #include <linux/clk.h> 15 15 #include <linux/of.h> 16 - #include <linux/of_device.h> 17 - #include <linux/of_address.h> 18 - #include <linux/of_irq.h> 19 16 #include <linux/of_dma.h> 20 17 #include <linux/dma-mapping.h> 21 18 #include <linux/pm_runtime.h> 22 19 #include <linux/pm_domain.h> 20 + #include <linux/property.h> 23 21 24 22 #include "fsl-edma-common.h" 25 23 ··· 230 232 231 233 /* request channel irq */ 232 234 fsl_chan->txirq = platform_get_irq(pdev, i); 233 - if (fsl_chan->txirq < 0) { 234 - dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i); 235 + if (fsl_chan->txirq < 0) 235 236 return -EINVAL; 236 - } 237 237 238 238 ret = devm_request_irq(&pdev->dev, fsl_chan->txirq, 239 239 fsl_edma3_tx_handler, IRQF_SHARED, ··· 414 418 415 419 static int fsl_edma_probe(struct platform_device *pdev) 416 420 { 417 - const struct of_device_id *of_id = 418 - of_match_device(fsl_edma_dt_ids, &pdev->dev); 419 421 struct device_node *np = pdev->dev.of_node; 420 422 struct fsl_edma_engine *fsl_edma; 421 423 const struct fsl_edma_drvdata *drvdata = NULL; ··· 422 428 int chans; 423 429 int ret, i; 424 430 425 - if (of_id) 426 - drvdata = of_id->data; 431 + drvdata = device_get_match_data(&pdev->dev); 427 432 if (!drvdata) { 428 433 dev_err(&pdev->dev, "unable to find driver data\n"); 429 434 return -EINVAL; ··· 610 617 return 0; 611 618 } 612 619 613 - static int fsl_edma_remove(struct platform_device *pdev) 620 + static void fsl_edma_remove(struct platform_device *pdev) 614 621 { 615 622 struct device_node *np = pdev->dev.of_node; 616 623 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev); ··· 620 627 of_dma_controller_free(np); 621 628 dma_async_device_unregister(&fsl_edma->dma_dev); 622 629 fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); 623 - 624 - return 0; 625 630 } 626 631 627 632 static int fsl_edma_suspend_late(struct device *dev) ··· 683 692 .pm = &fsl_edma_pm_ops, 684 693 }, 685 694 .probe = fsl_edma_probe, 686 - .remove = fsl_edma_remove, 695 + .remove_new = fsl_edma_remove, 687 696 }; 688 697 689 698 static int __init fsl_edma_init(void)
+2 -3
drivers/dma/fsl-qdma.c
··· 1266 1266 } 1267 1267 } 1268 1268 1269 - static int fsl_qdma_remove(struct platform_device *pdev) 1269 + static void fsl_qdma_remove(struct platform_device *pdev) 1270 1270 { 1271 1271 int i; 1272 1272 struct fsl_qdma_queue *status; ··· 1283 1283 dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) * 1284 1284 status->n_cq, status->cq, status->bus_addr); 1285 1285 } 1286 - return 0; 1287 1286 } 1288 1287 1289 1288 static const struct of_device_id fsl_qdma_dt_ids[] = { ··· 1297 1298 .of_match_table = fsl_qdma_dt_ids, 1298 1299 }, 1299 1300 .probe = fsl_qdma_probe, 1300 - .remove = fsl_qdma_remove, 1301 + .remove_new = fsl_qdma_remove, 1301 1302 }; 1302 1303 1303 1304 module_platform_driver(fsl_qdma_driver);
+2 -4
drivers/dma/fsl_raid.c
··· 857 857 chan->oub_phys_addr); 858 858 } 859 859 860 - static int fsl_re_remove(struct platform_device *ofdev) 860 + static void fsl_re_remove(struct platform_device *ofdev) 861 861 { 862 862 struct fsl_re_drv_private *re_priv; 863 863 struct device *dev; ··· 872 872 873 873 /* Unregister the driver */ 874 874 dma_async_device_unregister(&re_priv->dma_dev); 875 - 876 - return 0; 877 875 } 878 876 879 877 static const struct of_device_id fsl_re_ids[] = { ··· 886 888 .of_match_table = fsl_re_ids, 887 889 }, 888 890 .probe = fsl_re_probe, 889 - .remove = fsl_re_remove, 891 + .remove_new = fsl_re_remove, 890 892 }; 891 893 892 894 module_platform_driver(fsl_re_driver);
+2 -4
drivers/dma/fsldma.c
··· 1306 1306 return err; 1307 1307 } 1308 1308 1309 - static int fsldma_of_remove(struct platform_device *op) 1309 + static void fsldma_of_remove(struct platform_device *op) 1310 1310 { 1311 1311 struct fsldma_device *fdev; 1312 1312 unsigned int i; ··· 1324 1324 1325 1325 iounmap(fdev->regs); 1326 1326 kfree(fdev); 1327 - 1328 - return 0; 1329 1327 } 1330 1328 1331 1329 #ifdef CONFIG_PM ··· 1404 1406 #endif 1405 1407 }, 1406 1408 .probe = fsldma_of_probe, 1407 - .remove = fsldma_of_remove, 1409 + .remove_new = fsldma_of_remove, 1408 1410 }; 1409 1411 1410 1412 /*----------------------------------------------------------------------------*/
+1 -1
drivers/dma/hisi_dma.c
··· 163 163 u32 chan_depth; 164 164 enum hisi_dma_reg_layout reg_layout; 165 165 void __iomem *queue_base; /* queue region start of register */ 166 - struct hisi_dma_chan chan[]; 166 + struct hisi_dma_chan chan[] __counted_by(chan_num); 167 167 }; 168 168 169 169 #ifdef CONFIG_DEBUG_FS
+2 -4
drivers/dma/idma64.c
··· 660 660 return 0; 661 661 } 662 662 663 - static int idma64_platform_remove(struct platform_device *pdev) 663 + static void idma64_platform_remove(struct platform_device *pdev) 664 664 { 665 665 struct idma64_chip *chip = platform_get_drvdata(pdev); 666 666 667 667 idma64_remove(chip); 668 - 669 - return 0; 670 668 } 671 669 672 670 static int __maybe_unused idma64_pm_suspend(struct device *dev) ··· 689 691 690 692 static struct platform_driver idma64_platform_driver = { 691 693 .probe = idma64_platform_probe, 692 - .remove = idma64_platform_remove, 694 + .remove_new = idma64_platform_remove, 693 695 .driver = { 694 696 .name = LPSS_IDMA64_DRIVER_NAME, 695 697 .pm = &idma64_dev_pm_ops,
+3 -3
drivers/dma/idxd/Makefile
··· 1 1 ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD 2 2 3 + obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o 4 + idxd_bus-y := bus.o 5 + 3 6 obj-$(CONFIG_INTEL_IDXD) += idxd.o 4 7 idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o 5 8 6 9 idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o 7 - 8 - obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o 9 - idxd_bus-y := bus.o 10 10 11 11 obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o 12 12 idxd_compat-y := compat.o
+7
drivers/dma/idxd/cdev.c
··· 509 509 510 510 static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) 511 511 { 512 + struct device *dev = &idxd_dev->conf_dev; 512 513 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 513 514 struct idxd_device *idxd = wq->idxd; 514 515 int rc; ··· 536 535 } 537 536 538 537 mutex_lock(&wq->wq_lock); 538 + 539 + if (!idxd_wq_driver_name_match(wq, dev)) { 540 + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 541 + rc = -ENODEV; 542 + goto wq_err; 543 + } 539 544 540 545 wq->wq = create_workqueue(dev_name(wq_confdev(wq))); 541 546 if (!wq->wq) {
+6
drivers/dma/idxd/dma.c
··· 306 306 return -ENXIO; 307 307 308 308 mutex_lock(&wq->wq_lock); 309 + if (!idxd_wq_driver_name_match(wq, dev)) { 310 + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 311 + rc = -ENODEV; 312 + goto err; 313 + } 314 + 309 315 wq->type = IDXD_WQT_KERNEL; 310 316 311 317 rc = drv_enable_wq(wq);
+9
drivers/dma/idxd/idxd.h
··· 159 159 int minor; 160 160 }; 161 161 162 + #define DRIVER_NAME_SIZE 128 163 + 162 164 #define IDXD_ALLOCATED_BATCH_SIZE 128U 163 165 #define WQ_NAME_SIZE 1024 164 166 #define WQ_TYPE_SIZE 10 ··· 229 227 /* Lock to protect upasid_xa access. */ 230 228 struct mutex uc_lock; 231 229 struct xarray upasid_xa; 230 + 231 + char driver_name[DRIVER_NAME_SIZE + 1]; 232 232 }; 233 233 234 234 struct idxd_engine { ··· 648 644 wqcfg->max_batch_shift = 0; 649 645 else 650 646 wqcfg->max_batch_shift = max_batch_shift; 647 + } 648 + 649 + static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) 650 + { 651 + return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); 651 652 } 652 653 653 654 int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
+2 -2
drivers/dma/idxd/irq.c
··· 434 434 val |= IDXD_INTC_ERR; 435 435 436 436 for (i = 0; i < 4; i++) 437 - dev_warn(dev, "err[%d]: %#16.16llx\n", 438 - i, idxd->sw_err.bits[i]); 437 + dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n", 438 + i, idxd->sw_err.bits[i]); 439 439 err = true; 440 440 } 441 441
+34
drivers/dma/idxd/sysfs.c
··· 1259 1259 static struct device_attribute dev_attr_wq_op_config = 1260 1260 __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store); 1261 1261 1262 + static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf) 1263 + { 1264 + struct idxd_wq *wq = confdev_to_wq(dev); 1265 + 1266 + return sysfs_emit(buf, "%s\n", wq->driver_name); 1267 + } 1268 + 1269 + static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr, 1270 + const char *buf, size_t count) 1271 + { 1272 + struct idxd_wq *wq = confdev_to_wq(dev); 1273 + char *input, *pos; 1274 + 1275 + if (wq->state != IDXD_WQ_DISABLED) 1276 + return -EPERM; 1277 + 1278 + if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0) 1279 + return -EINVAL; 1280 + 1281 + input = kstrndup(buf, count, GFP_KERNEL); 1282 + if (!input) 1283 + return -ENOMEM; 1284 + 1285 + pos = strim(input); 1286 + memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); 1287 + sprintf(wq->driver_name, "%s", pos); 1288 + kfree(input); 1289 + return count; 1290 + } 1291 + 1292 + static struct device_attribute dev_attr_wq_driver_name = 1293 + __ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store); 1294 + 1262 1295 static struct attribute *idxd_wq_attributes[] = { 1263 1296 &dev_attr_wq_clients.attr, 1264 1297 &dev_attr_wq_state.attr, ··· 1311 1278 &dev_attr_wq_occupancy.attr, 1312 1279 &dev_attr_wq_enqcmds_retries.attr, 1313 1280 &dev_attr_wq_op_config.attr, 1281 + &dev_attr_wq_driver_name.attr, 1314 1282 NULL, 1315 1283 }; 1316 1284
+2 -4
drivers/dma/img-mdc-dma.c
··· 1017 1017 return ret; 1018 1018 } 1019 1019 1020 - static int mdc_dma_remove(struct platform_device *pdev) 1020 + static void mdc_dma_remove(struct platform_device *pdev) 1021 1021 { 1022 1022 struct mdc_dma *mdma = platform_get_drvdata(pdev); 1023 1023 struct mdc_chan *mchan, *next; ··· 1037 1037 pm_runtime_disable(&pdev->dev); 1038 1038 if (!pm_runtime_status_suspended(&pdev->dev)) 1039 1039 img_mdc_runtime_suspend(&pdev->dev); 1040 - 1041 - return 0; 1042 1040 } 1043 1041 1044 1042 #ifdef CONFIG_PM_SLEEP ··· 1076 1078 .of_match_table = of_match_ptr(mdc_dma_of_match), 1077 1079 }, 1078 1080 .probe = mdc_dma_probe, 1079 - .remove = mdc_dma_remove, 1081 + .remove_new = mdc_dma_remove, 1080 1082 }; 1081 1083 module_platform_driver(mdc_dma_driver); 1082 1084
+2 -4
drivers/dma/imx-dma.c
··· 1216 1216 } 1217 1217 } 1218 1218 1219 - static int imxdma_remove(struct platform_device *pdev) 1219 + static void imxdma_remove(struct platform_device *pdev) 1220 1220 { 1221 1221 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 1222 1222 ··· 1229 1229 1230 1230 clk_disable_unprepare(imxdma->dma_ipg); 1231 1231 clk_disable_unprepare(imxdma->dma_ahb); 1232 - 1233 - return 0; 1234 1232 } 1235 1233 1236 1234 static struct platform_driver imxdma_driver = { ··· 1236 1238 .name = "imx-dma", 1237 1239 .of_match_table = imx_dma_of_dev_id, 1238 1240 }, 1239 - .remove = imxdma_remove, 1241 + .remove_new = imxdma_remove, 1240 1242 }; 1241 1243 1242 1244 static int __init imxdma_module_init(void)
+2 -3
drivers/dma/imx-sdma.c
··· 2358 2358 return ret; 2359 2359 } 2360 2360 2361 - static int sdma_remove(struct platform_device *pdev) 2361 + static void sdma_remove(struct platform_device *pdev) 2362 2362 { 2363 2363 struct sdma_engine *sdma = platform_get_drvdata(pdev); 2364 2364 int i; ··· 2377 2377 } 2378 2378 2379 2379 platform_set_drvdata(pdev, NULL); 2380 - return 0; 2381 2380 } 2382 2381 2383 2382 static struct platform_driver sdma_driver = { ··· 2384 2385 .name = "imx-sdma", 2385 2386 .of_match_table = sdma_dt_ids, 2386 2387 }, 2387 - .remove = sdma_remove, 2388 + .remove_new = sdma_remove, 2388 2389 .probe = sdma_probe, 2389 2390 }; 2390 2391
+12 -18
drivers/dma/k3dma.c
··· 15 15 #include <linux/platform_device.h> 16 16 #include <linux/slab.h> 17 17 #include <linux/spinlock.h> 18 - #include <linux/of_device.h> 19 18 #include <linux/of.h> 20 19 #include <linux/clk.h> 21 20 #include <linux/of_dma.h> ··· 838 839 { 839 840 const struct k3dma_soc_data *soc_data; 840 841 struct k3_dma_dev *d; 841 - const struct of_device_id *of_id; 842 842 int i, ret, irq = 0; 843 843 844 844 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); ··· 852 854 if (IS_ERR(d->base)) 853 855 return PTR_ERR(d->base); 854 856 855 - of_id = of_match_device(k3_pdma_dt_ids, &op->dev); 856 - if (of_id) { 857 - of_property_read_u32((&op->dev)->of_node, 858 - "dma-channels", &d->dma_channels); 859 - of_property_read_u32((&op->dev)->of_node, 860 - "dma-requests", &d->dma_requests); 861 - ret = of_property_read_u32((&op->dev)->of_node, 862 - "dma-channel-mask", &d->dma_channel_mask); 863 - if (ret) { 864 - dev_warn(&op->dev, 865 - "dma-channel-mask doesn't exist, considering all as available.\n"); 866 - d->dma_channel_mask = (u32)~0UL; 867 - } 857 + of_property_read_u32((&op->dev)->of_node, 858 + "dma-channels", &d->dma_channels); 859 + of_property_read_u32((&op->dev)->of_node, 860 + "dma-requests", &d->dma_requests); 861 + ret = of_property_read_u32((&op->dev)->of_node, 862 + "dma-channel-mask", &d->dma_channel_mask); 863 + if (ret) { 864 + dev_warn(&op->dev, 865 + "dma-channel-mask doesn't exist, considering all as available.\n"); 866 + d->dma_channel_mask = (u32)~0UL; 868 867 } 869 868 870 869 if (!(soc_data->flags & K3_FLAG_NOCLK)) { ··· 969 974 return ret; 970 975 } 971 976 972 - static int k3_dma_remove(struct platform_device *op) 977 + static void k3_dma_remove(struct platform_device *op) 973 978 { 974 979 struct k3_dma_chan *c, *cn; 975 980 struct k3_dma_dev *d = platform_get_drvdata(op); ··· 985 990 } 986 991 tasklet_kill(&d->task); 987 992 clk_disable_unprepare(d->clk); 988 - return 0; 989 993 } 990 994 991 995 #ifdef CONFIG_PM_SLEEP ··· 1028 1034 .of_match_table = k3_pdma_dt_ids, 1029 1035 }, 1030 1036 .probe = k3_dma_probe, 1031 - .remove = k3_dma_remove, 1037 + .remove_new = k3_dma_remove, 1032 1038 }; 1033 1039 1034 1040 module_platform_driver(k3_pdma_driver);
+2 -4
drivers/dma/mcf-edma-main.c
··· 255 255 return 0; 256 256 } 257 257 258 - static int mcf_edma_remove(struct platform_device *pdev) 258 + static void mcf_edma_remove(struct platform_device *pdev) 259 259 { 260 260 struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev); 261 261 262 262 mcf_edma_irq_free(pdev, mcf_edma); 263 263 fsl_edma_cleanup_vchan(&mcf_edma->dma_dev); 264 264 dma_async_device_unregister(&mcf_edma->dma_dev); 265 - 266 - return 0; 267 265 } 268 266 269 267 static struct platform_driver mcf_edma_driver = { ··· 269 271 .name = "mcf-edma", 270 272 }, 271 273 .probe = mcf_edma_probe, 272 - .remove = mcf_edma_remove, 274 + .remove_new = mcf_edma_remove, 273 275 }; 274 276 275 277 bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
+2 -4
drivers/dma/mediatek/mtk-cqdma.c
··· 885 885 return err; 886 886 } 887 887 888 - static int mtk_cqdma_remove(struct platform_device *pdev) 888 + static void mtk_cqdma_remove(struct platform_device *pdev) 889 889 { 890 890 struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev); 891 891 struct mtk_cqdma_vchan *vc; ··· 918 918 919 919 dma_async_device_unregister(&cqdma->ddev); 920 920 of_dma_controller_free(pdev->dev.of_node); 921 - 922 - return 0; 923 921 } 924 922 925 923 static struct platform_driver mtk_cqdma_driver = { 926 924 .probe = mtk_cqdma_probe, 927 - .remove = mtk_cqdma_remove, 925 + .remove_new = mtk_cqdma_remove, 928 926 .driver = { 929 927 .name = KBUILD_MODNAME, 930 928 .of_match_table = mtk_cqdma_match,
+2 -4
drivers/dma/mediatek/mtk-hsdma.c
··· 1009 1009 return err; 1010 1010 } 1011 1011 1012 - static int mtk_hsdma_remove(struct platform_device *pdev) 1012 + static void mtk_hsdma_remove(struct platform_device *pdev) 1013 1013 { 1014 1014 struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev); 1015 1015 struct mtk_hsdma_vchan *vc; ··· 1034 1034 1035 1035 dma_async_device_unregister(&hsdma->ddev); 1036 1036 of_dma_controller_free(pdev->dev.of_node); 1037 - 1038 - return 0; 1039 1037 } 1040 1038 1041 1039 static struct platform_driver mtk_hsdma_driver = { 1042 1040 .probe = mtk_hsdma_probe, 1043 - .remove = mtk_hsdma_remove, 1041 + .remove_new = mtk_hsdma_remove, 1044 1042 .driver = { 1045 1043 .name = KBUILD_MODNAME, 1046 1044 .of_match_table = mtk_hsdma_match,
+2 -4
drivers/dma/mediatek/mtk-uart-apdma.c
··· 572 572 return rc; 573 573 } 574 574 575 - static int mtk_uart_apdma_remove(struct platform_device *pdev) 575 + static void mtk_uart_apdma_remove(struct platform_device *pdev) 576 576 { 577 577 struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev); 578 578 ··· 583 583 dma_async_device_unregister(&mtkd->ddev); 584 584 585 585 pm_runtime_disable(&pdev->dev); 586 - 587 - return 0; 588 586 } 589 587 590 588 #ifdef CONFIG_PM_SLEEP ··· 637 639 638 640 static struct platform_driver mtk_uart_apdma_driver = { 639 641 .probe = mtk_uart_apdma_probe, 640 - .remove = mtk_uart_apdma_remove, 642 + .remove_new = mtk_uart_apdma_remove, 641 643 .driver = { 642 644 .name = KBUILD_MODNAME, 643 645 .pm = &mtk_uart_apdma_pm_ops,
+3 -7
drivers/dma/mmp_pdma.c
··· 15 15 #include <linux/device.h> 16 16 #include <linux/platform_data/mmp_dma.h> 17 17 #include <linux/dmapool.h> 18 - #include <linux/of_device.h> 19 18 #include <linux/of_dma.h> 20 19 #include <linux/of.h> 21 20 ··· 931 932 } 932 933 } 933 934 934 - static int mmp_pdma_remove(struct platform_device *op) 935 + static void mmp_pdma_remove(struct platform_device *op) 935 936 { 936 937 struct mmp_pdma_device *pdev = platform_get_drvdata(op); 937 938 struct mmp_pdma_phy *phy; ··· 957 958 } 958 959 959 960 dma_async_device_unregister(&pdev->device); 960 - return 0; 961 961 } 962 962 963 963 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) ··· 1018 1020 static int mmp_pdma_probe(struct platform_device *op) 1019 1021 { 1020 1022 struct mmp_pdma_device *pdev; 1021 - const struct of_device_id *of_id; 1022 1023 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 1023 1024 int i, ret, irq = 0; 1024 1025 int dma_channels = 0, irq_num = 0; ··· 1037 1040 if (IS_ERR(pdev->base)) 1038 1041 return PTR_ERR(pdev->base); 1039 1042 1040 - of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); 1041 - if (of_id) { 1043 + if (pdev->dev->of_node) { 1042 1044 /* Parse new and deprecated dma-channels properties */ 1043 1045 if (of_property_read_u32(pdev->dev->of_node, "dma-channels", 1044 1046 &dma_channels)) ··· 1137 1141 }, 1138 1142 .id_table = mmp_pdma_id_table, 1139 1143 .probe = mmp_pdma_probe, 1140 - .remove = mmp_pdma_remove, 1144 + .remove_new = mmp_pdma_remove, 1141 1145 }; 1142 1146 1143 1147 module_platform_driver(mmp_pdma_driver);
+9 -26
drivers/dma/mmp_tdma.c
··· 14 14 #include <linux/slab.h> 15 15 #include <linux/dmaengine.h> 16 16 #include <linux/platform_device.h> 17 + #include <linux/property.h> 17 18 #include <linux/device.h> 18 19 #include <linux/genalloc.h> 19 - #include <linux/of_device.h> 20 20 #include <linux/of_dma.h> 21 21 22 22 #include "dmaengine.h" ··· 552 552 mmp_tdma_enable_chan(tdmac); 553 553 } 554 554 555 - static int mmp_tdma_remove(struct platform_device *pdev) 555 + static void mmp_tdma_remove(struct platform_device *pdev) 556 556 { 557 557 if (pdev->dev.of_node) 558 558 of_dma_controller_free(pdev->dev.of_node); 559 - 560 - return 0; 561 559 } 562 560 563 561 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, ··· 635 637 static int mmp_tdma_probe(struct platform_device *pdev) 636 638 { 637 639 enum mmp_tdma_type type; 638 - const struct of_device_id *of_id; 639 640 struct mmp_tdma_device *tdev; 640 641 int i, ret; 641 642 int irq = 0, irq_num = 0; 642 643 int chan_num = TDMA_CHANNEL_NUM; 643 644 struct gen_pool *pool = NULL; 644 645 645 - of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 646 - if (of_id) 647 - type = (enum mmp_tdma_type) of_id->data; 648 - else 649 - type = platform_get_device_id(pdev)->driver_data; 646 + type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev); 650 647 651 648 /* always have couple channels */ 652 649 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); ··· 719 726 return ret; 720 727 } 721 728 722 - if (pdev->dev.of_node) { 723 - ret = of_dma_controller_register(pdev->dev.of_node, 724 - mmp_tdma_xlate, tdev); 725 - if (ret) { 726 - dev_err(tdev->device.dev, 727 - "failed to register controller\n"); 728 - return ret; 729 - } 729 + ret = of_dma_controller_register(pdev->dev.of_node, 730 + mmp_tdma_xlate, tdev); 731 + if (ret) { 732 + dev_err(tdev->device.dev, "failed to register controller\n"); 733 + return ret; 730 734 } 731 735 732 736 dev_info(tdev->device.dev, "initialized\n"); 733 737 return 0; 734 738 } 735 739 736 - static const struct platform_device_id mmp_tdma_id_table[] = { 737 - { "mmp-adma", MMP_AUD_TDMA }, 738 - { "pxa910-squ", PXA910_SQU }, 739 - { }, 740 - }; 741 - 742 740 static struct platform_driver mmp_tdma_driver = { 743 741 .driver = { 744 742 .name = "mmp-tdma", 745 743 .of_match_table = mmp_tdma_dt_ids, 746 744 }, 747 - .id_table = mmp_tdma_id_table, 748 745 .probe = mmp_tdma_probe, 749 - .remove = mmp_tdma_remove, 746 + .remove_new = mmp_tdma_remove, 750 747 }; 751 748 752 749 module_platform_driver(mmp_tdma_driver);
+4 -7
drivers/dma/moxart-dma.c
··· 124 124 unsigned int dma_cycles; 125 125 struct virt_dma_desc vd; 126 126 uint8_t es; 127 - struct moxart_sg sg[]; 127 + struct moxart_sg sg[] __counted_by(sglen); 128 128 }; 129 129 130 130 struct moxart_chan { ··· 309 309 d = kzalloc(struct_size(d, sg, sg_len), GFP_ATOMIC); 310 310 if (!d) 311 311 return NULL; 312 + d->sglen = sg_len; 312 313 313 314 d->dma_dir = dir; 314 315 d->dev_addr = dev_addr; ··· 319 318 d->sg[i].addr = sg_dma_address(sgent); 320 319 d->sg[i].len = sg_dma_len(sgent); 321 320 } 322 - 323 - d->sglen = sg_len; 324 321 325 322 ch->error = 0; 326 323 ··· 629 630 return 0; 630 631 } 631 632 632 - static int moxart_remove(struct platform_device *pdev) 633 + static void moxart_remove(struct platform_device *pdev) 633 634 { 634 635 struct moxart_dmadev *m = platform_get_drvdata(pdev); 635 636 ··· 639 640 640 641 if (pdev->dev.of_node) 641 642 of_dma_controller_free(pdev->dev.of_node); 642 - 643 - return 0; 644 643 } 645 644 646 645 static const struct of_device_id moxart_dma_match[] = { ··· 649 652 650 653 static struct platform_driver moxart_driver = { 651 654 .probe = moxart_probe, 652 - .remove = moxart_remove, 655 + .remove_new = moxart_remove, 653 656 .driver = { 654 657 .name = "moxart-dma-engine", 655 658 .of_match_table = moxart_dma_match,
+2 -4
drivers/dma/mpc512x_dma.c
··· 1084 1084 return retval; 1085 1085 } 1086 1086 1087 - static int mpc_dma_remove(struct platform_device *op) 1087 + static void mpc_dma_remove(struct platform_device *op) 1088 1088 { 1089 1089 struct device *dev = &op->dev; 1090 1090 struct mpc_dma *mdma = dev_get_drvdata(dev); ··· 1099 1099 free_irq(mdma->irq, mdma); 1100 1100 irq_dispose_mapping(mdma->irq); 1101 1101 tasklet_kill(&mdma->tasklet); 1102 - 1103 - return 0; 1104 1102 } 1105 1103 1106 1104 static const struct of_device_id mpc_dma_match[] = { ··· 1110 1112 1111 1113 static struct platform_driver mpc_dma_driver = { 1112 1114 .probe = mpc_dma_probe, 1113 - .remove = mpc_dma_remove, 1115 + .remove_new = mpc_dma_remove, 1114 1116 .driver = { 1115 1117 .name = DRV_NAME, 1116 1118 .of_match_table = mpc_dma_match,
+3 -8
drivers/dma/mv_xor.c
··· 10 10 #include <linux/dma-mapping.h> 11 11 #include <linux/spinlock.h> 12 12 #include <linux/interrupt.h> 13 - #include <linux/of_device.h> 14 13 #include <linux/platform_device.h> 14 + #include <linux/property.h> 15 15 #include <linux/memory.h> 16 16 #include <linux/clk.h> 17 17 #include <linux/of.h> ··· 1328 1328 * setting up. In non-dt case it can only be the legacy one. 1329 1329 */ 1330 1330 xordev->xor_type = XOR_ORION; 1331 - if (pdev->dev.of_node) { 1332 - const struct of_device_id *of_id = 1333 - of_match_device(mv_xor_dt_ids, 1334 - &pdev->dev); 1335 - 1336 - xordev->xor_type = (uintptr_t)of_id->data; 1337 - } 1331 + if (pdev->dev.of_node) 1332 + xordev->xor_type = (uintptr_t)device_get_match_data(&pdev->dev); 1338 1333 1339 1334 /* 1340 1335 * (Re-)program MBUS remapping windows if we are asked to.
+2 -4
drivers/dma/mv_xor_v2.c
··· 855 855 return ret; 856 856 } 857 857 858 - static int mv_xor_v2_remove(struct platform_device *pdev) 858 + static void mv_xor_v2_remove(struct platform_device *pdev) 859 859 { 860 860 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev); 861 861 ··· 870 870 platform_msi_domain_free_irqs(&pdev->dev); 871 871 872 872 tasklet_kill(&xor_dev->irq_tasklet); 873 - 874 - return 0; 875 873 } 876 874 877 875 #ifdef CONFIG_OF ··· 884 886 .probe = mv_xor_v2_probe, 885 887 .suspend = mv_xor_v2_suspend, 886 888 .resume = mv_xor_v2_resume, 887 - .remove = mv_xor_v2_remove, 889 + .remove_new = mv_xor_v2_remove, 888 890 .driver = { 889 891 .name = "mv_xor_v2", 890 892 .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
+2 -4
drivers/dma/nbpfaxi.c
··· 1454 1454 return ret; 1455 1455 } 1456 1456 1457 - static int nbpf_remove(struct platform_device *pdev) 1457 + static void nbpf_remove(struct platform_device *pdev) 1458 1458 { 1459 1459 struct nbpf_device *nbpf = platform_get_drvdata(pdev); 1460 1460 int i; ··· 1472 1472 of_dma_controller_free(pdev->dev.of_node); 1473 1473 dma_async_device_unregister(&nbpf->dma_dev); 1474 1474 clk_disable_unprepare(nbpf->clk); 1475 - 1476 - return 0; 1477 1475 } 1478 1476 1479 1477 static const struct platform_device_id nbpf_ids[] = { ··· 1515 1517 }, 1516 1518 .id_table = nbpf_ids, 1517 1519 .probe = nbpf_probe, 1518 - .remove = nbpf_remove, 1520 + .remove_new = nbpf_remove, 1519 1521 }; 1520 1522 1521 1523 module_platform_driver(nbpf_driver);
+2 -4
drivers/dma/owl-dma.c
··· 1231 1231 return ret; 1232 1232 } 1233 1233 1234 - static int owl_dma_remove(struct platform_device *pdev) 1234 + static void owl_dma_remove(struct platform_device *pdev) 1235 1235 { 1236 1236 struct owl_dma *od = platform_get_drvdata(pdev); 1237 1237 ··· 1248 1248 1249 1249 clk_disable_unprepare(od->clk); 1250 1250 dma_pool_destroy(od->lli_pool); 1251 - 1252 - return 0; 1253 1251 } 1254 1252 1255 1253 static struct platform_driver owl_dma_driver = { 1256 1254 .probe = owl_dma_probe, 1257 - .remove = owl_dma_remove, 1255 + .remove_new = owl_dma_remove, 1258 1256 .driver = { 1259 1257 .name = "dma-owl", 1260 1258 .of_match_table = of_match_ptr(owl_dma_match),
+2 -3
drivers/dma/ppc4xx/adma.c
··· 4230 4230 /** 4231 4231 * ppc440spe_adma_remove - remove the asynch device 4232 4232 */ 4233 - static int ppc440spe_adma_remove(struct platform_device *ofdev) 4233 + static void ppc440spe_adma_remove(struct platform_device *ofdev) 4234 4234 { 4235 4235 struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev); 4236 4236 struct device_node *np = ofdev->dev.of_node; ··· 4278 4278 of_address_to_resource(np, 0, &res); 4279 4279 release_mem_region(res.start, resource_size(&res)); 4280 4280 kfree(adev); 4281 - return 0; 4282 4281 } 4283 4282 4284 4283 /* ··· 4549 4550 4550 4551 static struct platform_driver ppc440spe_adma_driver = { 4551 4552 .probe = ppc440spe_adma_probe, 4552 - .remove = ppc440spe_adma_remove, 4553 + .remove_new = ppc440spe_adma_remove, 4553 4554 .driver = { 4554 4555 .name = "PPC440SP(E)-ADMA", 4555 4556 .of_match_table = ppc440spe_adma_of_match,
+12 -14
drivers/dma/pxa_dma.c
··· 15 15 #include <linux/device.h> 16 16 #include <linux/platform_data/mmp_dma.h> 17 17 #include <linux/dmapool.h> 18 - #include <linux/of_device.h> 19 - #include <linux/of_dma.h> 20 18 #include <linux/of.h> 19 + #include <linux/of_dma.h> 21 20 #include <linux/wait.h> 22 21 #include <linux/dma/pxa-dma.h> 23 22 ··· 90 91 bool cyclic; 91 92 struct dma_pool *desc_pool; /* Channel's used allocator */ 92 93 93 - struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */ 94 + struct pxad_desc_hw *hw_desc[] __counted_by(nb_desc); 95 + /* DMA coherent descriptors */ 94 96 }; 95 97 96 98 struct pxad_phy { ··· 722 722 dma_addr_t dma; 723 723 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); 724 724 725 - BUG_ON(sw_desc->nb_desc == 0); 726 725 for (i = sw_desc->nb_desc - 1; i >= 0; i--) { 727 726 if (i > 0) 728 727 dma = sw_desc->hw_desc[i - 1]->ddadr; ··· 739 740 { 740 741 struct pxad_desc_sw *sw_desc; 741 742 dma_addr_t dma; 743 + void *desc; 742 744 int i; 743 745 744 746 sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc), ··· 749 749 sw_desc->desc_pool = chan->desc_pool; 750 750 751 751 for (i = 0; i < nb_hw_desc; i++) { 752 - sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool, 753 - GFP_NOWAIT, &dma); 754 - if (!sw_desc->hw_desc[i]) { 752 + desc = dma_pool_alloc(sw_desc->desc_pool, GFP_NOWAIT, &dma); 753 + if (!desc) { 755 754 dev_err(&chan->vc.chan.dev->device, 756 755 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n", 757 756 __func__, i, sw_desc->desc_pool); 758 757 goto err; 759 758 } 760 759 760 + sw_desc->nb_desc++; 761 + sw_desc->hw_desc[i] = desc; 762 + 761 763 if (i == 0) 762 764 sw_desc->first = dma; 763 765 else 764 766 sw_desc->hw_desc[i - 1]->ddadr = dma; 765 - sw_desc->nb_desc++; 766 767 } 767 768 768 769 return sw_desc; ··· 1222 1221 } 1223 1222 } 1224 1223 1225 - static int pxad_remove(struct platform_device *op) 1224 + static void pxad_remove(struct platform_device *op) 1226 1225 { 1227 1226 struct pxad_device *pdev = platform_get_drvdata(op); 1228 1227 1229 1228 pxad_cleanup_debugfs(pdev); 1230 1229 pxad_free_channels(&pdev->slave); 1231 - return 0; 1232 1230 } 1233 1231 1234 1232 static int pxad_init_phys(struct platform_device *op, ··· 1343 1343 static int pxad_probe(struct platform_device *op) 1344 1344 { 1345 1345 struct pxad_device *pdev; 1346 - const struct of_device_id *of_id; 1347 1346 const struct dma_slave_map *slave_map = NULL; 1348 1347 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 1349 1348 int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0; ··· 1360 1361 if (IS_ERR(pdev->base)) 1361 1362 return PTR_ERR(pdev->base); 1362 1363 1363 - of_id = of_match_device(pxad_dt_ids, &op->dev); 1364 - if (of_id) { 1364 + if (op->dev.of_node) { 1365 1365 /* Parse new and deprecated dma-channels properties */ 1366 1366 if (of_property_read_u32(op->dev.of_node, "dma-channels", 1367 1367 &dma_channels)) ··· 1442 1444 }, 1443 1445 .id_table = pxad_id_table, 1444 1446 .probe = pxad_probe, 1445 - .remove = pxad_remove, 1447 + .remove_new = pxad_remove, 1446 1448 }; 1447 1449 1448 1450 static bool pxad_filter_fn(struct dma_chan *chan, void *param)
+3 -5
drivers/dma/qcom/bam_dma.c
··· 74 74 struct list_head desc_node; 75 75 enum dma_transfer_direction dir; 76 76 size_t length; 77 - struct bam_desc_hw desc[]; 77 + struct bam_desc_hw desc[] __counted_by(num_desc); 78 78 }; 79 79 80 80 enum bam_reg { ··· 1386 1386 return ret; 1387 1387 } 1388 1388 1389 - static int bam_dma_remove(struct platform_device *pdev) 1389 + static void bam_dma_remove(struct platform_device *pdev) 1390 1390 { 1391 1391 struct bam_device *bdev = platform_get_drvdata(pdev); 1392 1392 u32 i; ··· 1416 1416 tasklet_kill(&bdev->task); 1417 1417 1418 1418 clk_disable_unprepare(bdev->bamclk); 1419 - 1420 - return 0; 1421 1419 } 1422 1420 1423 1421 static int __maybe_unused bam_dma_runtime_suspend(struct device *dev) ··· 1473 1475 1474 1476 static struct platform_driver bam_dma_driver = { 1475 1477 .probe = bam_dma_probe, 1476 - .remove = bam_dma_remove, 1478 + .remove_new = bam_dma_remove, 1477 1479 .driver = { 1478 1480 .name = "bam-dma-engine", 1479 1481 .pm = &bam_dma_pm_ops,
+3 -5
drivers/dma/qcom/hidma.c
··· 745 745 { 746 746 enum hidma_cap cap; 747 747 748 - cap = (enum hidma_cap) device_get_match_data(dev); 748 + cap = (uintptr_t) device_get_match_data(dev); 749 749 return cap ? ((cap & test_cap) > 0) : 0; 750 750 } 751 751 ··· 915 915 916 916 } 917 917 918 - static int hidma_remove(struct platform_device *pdev) 918 + static void hidma_remove(struct platform_device *pdev) 919 919 { 920 920 struct hidma_dev *dmadev = platform_get_drvdata(pdev); 921 921 ··· 935 935 dev_info(&pdev->dev, "HI-DMA engine removed\n"); 936 936 pm_runtime_put_sync_suspend(&pdev->dev); 937 937 pm_runtime_disable(&pdev->dev); 938 - 939 - return 0; 940 938 } 941 939 942 940 #if IS_ENABLED(CONFIG_ACPI) ··· 958 960 959 961 static struct platform_driver hidma_driver = { 960 962 .probe = hidma_probe, 961 - .remove = hidma_remove, 963 + .remove_new = hidma_remove, 962 964 .shutdown = hidma_shutdown, 963 965 .driver = { 964 966 .name = "hidma",
+2 -4
drivers/dma/qcom/qcom_adm.c
··· 904 904 return ret; 905 905 } 906 906 907 - static int adm_dma_remove(struct platform_device *pdev) 907 + static void adm_dma_remove(struct platform_device *pdev) 908 908 { 909 909 struct adm_device *adev = platform_get_drvdata(pdev); 910 910 struct adm_chan *achan; ··· 927 927 928 928 clk_disable_unprepare(adev->core_clk); 929 929 clk_disable_unprepare(adev->iface_clk); 930 - 931 - return 0; 932 930 } 933 931 934 932 static const struct of_device_id adm_of_match[] = { ··· 937 939 938 940 static struct platform_driver adm_dma_driver = { 939 941 .probe = adm_dma_probe, 940 - .remove = adm_dma_remove, 942 + .remove_new = adm_dma_remove, 941 943 .driver = { 942 944 .name = "adm-dma-engine", 943 945 .of_match_table = adm_of_match,
+5 -7
drivers/dma/sa11x0-dma.c
··· 78 78 bool cyclic; 79 79 80 80 unsigned sglen; 81 - struct sa11x0_dma_sg sg[]; 81 + struct sa11x0_dma_sg sg[] __counted_by(sglen); 82 82 }; 83 83 84 84 struct sa11x0_dma_phy; ··· 558 558 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 559 559 return NULL; 560 560 } 561 + txd->sglen = j; 561 562 562 563 j = 0; 563 564 for_each_sg(sg, sgent, sglen, i) { ··· 594 593 595 594 txd->ddar = c->ddar; 596 595 txd->size = size; 597 - txd->sglen = j; 598 596 599 597 dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n", 600 598 &c->vc, &txd->vd, txd->size, txd->sglen); ··· 628 628 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); 629 629 return NULL; 630 630 } 631 + txd->sglen = sglen; 631 632 632 633 for (i = k = 0; i < size / period; i++) { 633 634 size_t tlen, len = period; ··· 654 653 655 654 txd->ddar = c->ddar; 656 655 txd->size = size; 657 - txd->sglen = sglen; 658 656 txd->cyclic = 1; 659 657 txd->period = sgperiod; 660 658 ··· 984 984 return ret; 985 985 } 986 986 987 - static int sa11x0_dma_remove(struct platform_device *pdev) 987 + static void sa11x0_dma_remove(struct platform_device *pdev) 988 988 { 989 989 struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); 990 990 unsigned pch; ··· 997 997 tasklet_kill(&d->task); 998 998 iounmap(d->base); 999 999 kfree(d); 1000 - 1001 - return 0; 1002 1000 } 1003 1001 1004 1002 static __maybe_unused int sa11x0_dma_suspend(struct device *dev) ··· 1079 1081 .pm = &sa11x0_dma_pm_ops, 1080 1082 }, 1081 1083 .probe = sa11x0_dma_probe, 1082 - .remove = sa11x0_dma_remove, 1084 + .remove_new = sa11x0_dma_remove, 1083 1085 }; 1084 1086 1085 1087 static int __init sa11x0_dma_init(void)
+2 -4
drivers/dma/sf-pdma/sf-pdma.c
··· 566 566 return 0; 567 567 } 568 568 569 - static int sf_pdma_remove(struct platform_device *pdev) 569 + static void sf_pdma_remove(struct platform_device *pdev) 570 570 { 571 571 struct sf_pdma *pdma = platform_get_drvdata(pdev); 572 572 struct sf_pdma_chan *ch; ··· 584 584 } 585 585 586 586 dma_async_device_unregister(&pdma->dma_dev); 587 - 588 - return 0; 589 587 } 590 588 591 589 static const struct of_device_id sf_pdma_dt_ids[] = { ··· 595 597 596 598 static struct platform_driver sf_pdma_driver = { 597 599 .probe = sf_pdma_probe, 598 - .remove = sf_pdma_remove, 600 + .remove_new = sf_pdma_remove, 599 601 .driver = { 600 602 .name = "sf-pdma", 601 603 .of_match_table = sf_pdma_dt_ids,
+1 -1
drivers/dma/sf-pdma/sf-pdma.h
··· 113 113 void __iomem *membase; 114 114 void __iomem *mappedbase; 115 115 u32 n_chans; 116 - struct sf_pdma_chan chans[]; 116 + struct sf_pdma_chan chans[] __counted_by(n_chans); 117 117 }; 118 118 119 119 #endif /* _SF_PDMA_H */
+2 -4
drivers/dma/sh/rcar-dmac.c
··· 1990 1990 return ret; 1991 1991 } 1992 1992 1993 - static int rcar_dmac_remove(struct platform_device *pdev) 1993 + static void rcar_dmac_remove(struct platform_device *pdev) 1994 1994 { 1995 1995 struct rcar_dmac *dmac = platform_get_drvdata(pdev); 1996 1996 ··· 1998 1998 dma_async_device_unregister(&dmac->engine); 1999 1999 2000 2000 pm_runtime_disable(&pdev->dev); 2001 - 2002 - return 0; 2003 2001 } 2004 2002 2005 2003 static void rcar_dmac_shutdown(struct platform_device *pdev) ··· 2039 2041 .of_match_table = rcar_dmac_of_ids, 2040 2042 }, 2041 2043 .probe = rcar_dmac_probe, 2042 - .remove = rcar_dmac_remove, 2044 + .remove_new = rcar_dmac_remove, 2043 2045 .shutdown = rcar_dmac_shutdown, 2044 2046 }; 2045 2047
+2 -4
drivers/dma/sh/rz-dmac.c
··· 969 969 return ret; 970 970 } 971 971 972 - static int rz_dmac_remove(struct platform_device *pdev) 972 + static void rz_dmac_remove(struct platform_device *pdev) 973 973 { 974 974 struct rz_dmac *dmac = platform_get_drvdata(pdev); 975 975 unsigned int i; ··· 987 987 reset_control_assert(dmac->rstc); 988 988 pm_runtime_put(&pdev->dev); 989 989 pm_runtime_disable(&pdev->dev); 990 - 991 - return 0; 992 990 } 993 991 994 992 static const struct of_device_id of_rz_dmac_match[] = { ··· 1001 1003 .of_match_table = of_rz_dmac_match, 1002 1004 }, 1003 1005 .probe = rz_dmac_probe, 1004 - .remove = rz_dmac_remove, 1006 + .remove_new = rz_dmac_remove, 1005 1007 }; 1006 1008 1007 1009 module_platform_driver(rz_dmac_driver);
+2 -4
drivers/dma/sh/shdmac.c
··· 882 882 return err; 883 883 } 884 884 885 - static int sh_dmae_remove(struct platform_device *pdev) 885 + static void sh_dmae_remove(struct platform_device *pdev) 886 886 { 887 887 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 888 888 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; ··· 899 899 shdma_cleanup(&shdev->shdma_dev); 900 900 901 901 synchronize_rcu(); 902 - 903 - return 0; 904 902 } 905 903 906 904 static struct platform_driver sh_dmae_driver = { ··· 906 908 .pm = &sh_dmae_pm, 907 909 .name = SH_DMAE_DRV_NAME, 908 910 }, 909 - .remove = sh_dmae_remove, 911 + .remove_new = sh_dmae_remove, 910 912 }; 911 913 912 914 static int __init sh_dmae_init(void)
+3 -5
drivers/dma/sh/usb-dmac.c
··· 57 57 u32 residue; 58 58 struct list_head node; 59 59 dma_cookie_t done_cookie; 60 - struct usb_dmac_sg sg[]; 60 + struct usb_dmac_sg sg[] __counted_by(sg_allocated_len); 61 61 }; 62 62 63 63 #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) ··· 866 866 devm_free_irq(dmac->dev, uchan->irq, uchan); 867 867 } 868 868 869 - static int usb_dmac_remove(struct platform_device *pdev) 869 + static void usb_dmac_remove(struct platform_device *pdev) 870 870 { 871 871 struct usb_dmac *dmac = platform_get_drvdata(pdev); 872 872 int i; ··· 877 877 dma_async_device_unregister(&dmac->engine); 878 878 879 879 pm_runtime_disable(&pdev->dev); 880 - 881 - return 0; 882 880 } 883 881 884 882 static void usb_dmac_shutdown(struct platform_device *pdev) ··· 899 901 .of_match_table = usb_dmac_of_ids, 900 902 }, 901 903 .probe = usb_dmac_probe, 902 - .remove = usb_dmac_remove, 904 + .remove_new = usb_dmac_remove, 903 905 .shutdown = usb_dmac_shutdown, 904 906 }; 905 907
+14 -8
drivers/dma/sprd-dma.c
··· 212 212 struct clk *ashb_clk; 213 213 int irq; 214 214 u32 total_chns; 215 - struct sprd_dma_chn channels[]; 215 + struct sprd_dma_chn channels[] __counted_by(total_chns); 216 216 }; 217 217 218 218 static void sprd_dma_free_desc(struct virt_dma_desc *vd); ··· 572 572 schan->cur_desc = NULL; 573 573 } 574 574 575 - static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc, 576 - enum sprd_dma_int_type int_type, 575 + static bool sprd_dma_check_trans_done(enum sprd_dma_int_type int_type, 577 576 enum sprd_dma_req_mode req_mode) 578 577 { 579 578 if (int_type == SPRD_DMA_NO_INT) ··· 618 619 vchan_cyclic_callback(&sdesc->vd); 619 620 } else { 620 621 /* Check if the dma request descriptor is done. */ 621 - trans_done = sprd_dma_check_trans_done(sdesc, int_type, 622 - req_type); 622 + trans_done = sprd_dma_check_trans_done(int_type, req_type); 623 623 if (trans_done == true) { 624 624 vchan_cookie_complete(&sdesc->vd); 625 625 schan->cur_desc = NULL; ··· 1115 1117 u32 chn_count; 1116 1118 int ret, i; 1117 1119 1120 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 1121 + if (ret) { 1122 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1123 + if (ret) { 1124 + dev_err(&pdev->dev, "unable to set coherent mask to 32\n"); 1125 + return ret; 1126 + } 1127 + } 1128 + 1118 1129 /* Parse new and deprecated dma-channels properties */ 1119 1130 ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count); 1120 1131 if (ret) ··· 1239 1232 return ret; 1240 1233 } 1241 1234 1242 - static int sprd_dma_remove(struct platform_device *pdev) 1235 + static void sprd_dma_remove(struct platform_device *pdev) 1243 1236 { 1244 1237 struct sprd_dma_dev *sdev = platform_get_drvdata(pdev); 1245 1238 struct sprd_dma_chn *c, *cn; ··· 1262 1255 1263 1256 pm_runtime_put_noidle(&pdev->dev); 1264 1257 pm_runtime_disable(&pdev->dev); 1265 - return 0; 1266 1258 } 1267 1259 1268 1260 static const struct of_device_id sprd_dma_match[] = { ··· 1298 1292 1299 1293 static struct platform_driver sprd_dma_driver = { 1300 1294 .probe = sprd_dma_probe, 1301 - .remove = sprd_dma_remove, 1295 + .remove_new = sprd_dma_remove, 1302 1296 .driver = { 1303 1297 .name = "sprd-dma", 1304 1298 .of_match_table = sprd_dma_match,
+5 -13
drivers/dma/st_fdma.c
··· 10 10 11 11 #include <linux/init.h> 12 12 #include <linux/module.h> 13 - #include <linux/of_device.h> 13 + #include <linux/of.h> 14 14 #include <linux/of_dma.h> 15 15 #include <linux/platform_device.h> 16 + #include <linux/property.h> 16 17 #include <linux/interrupt.h> 17 18 #include <linux/remoteproc.h> 18 19 #include <linux/slab.h> ··· 740 739 static int st_fdma_probe(struct platform_device *pdev) 741 740 { 742 741 struct st_fdma_dev *fdev; 743 - const struct of_device_id *match; 744 742 struct device_node *np = pdev->dev.of_node; 745 743 const struct st_fdma_driverdata *drvdata; 746 744 int ret, i; 747 745 748 - match = of_match_device((st_fdma_match), &pdev->dev); 749 - if (!match || !match->data) { 750 - dev_err(&pdev->dev, "No device match found\n"); 751 - return -ENODEV; 752 - } 753 - 754 - drvdata = match->data; 746 + drvdata = device_get_match_data(&pdev->dev); 755 747 756 748 fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL); 757 749 if (!fdev) ··· 843 849 return ret; 844 850 } 845 851 846 - static int st_fdma_remove(struct platform_device *pdev) 852 + static void st_fdma_remove(struct platform_device *pdev) 847 853 { 848 854 struct st_fdma_dev *fdev = platform_get_drvdata(pdev); 849 855 850 856 devm_free_irq(&pdev->dev, fdev->irq, fdev); 851 857 st_slim_rproc_put(fdev->slim_rproc); 852 858 of_dma_controller_free(pdev->dev.of_node); 853 - 854 - return 0; 855 859 } 856 860 857 861 static struct platform_driver st_fdma_platform_driver = { ··· 858 866 .of_match_table = st_fdma_match, 859 867 }, 860 868 .probe = st_fdma_probe, 861 - .remove = st_fdma_remove, 869 + .remove_new = st_fdma_remove, 862 870 }; 863 871 module_platform_driver(st_fdma_platform_driver); 864 872
+1 -1
drivers/dma/st_fdma.h
··· 97 97 struct st_fdma_chan *fchan; 98 98 bool iscyclic; 99 99 unsigned int n_nodes; 100 - struct st_fdma_sw_node node[]; 100 + struct st_fdma_sw_node node[] __counted_by(n_nodes); 101 101 }; 102 102 103 103 enum st_fdma_type {
+4 -15
drivers/dma/stm32-dma.c
··· 21 21 #include <linux/list.h> 22 22 #include <linux/module.h> 23 23 #include <linux/of.h> 24 - #include <linux/of_device.h> 25 24 #include <linux/of_dma.h> 26 25 #include <linux/platform_device.h> 27 26 #include <linux/pm_runtime.h> ··· 190 191 struct virt_dma_desc vdesc; 191 192 bool cyclic; 192 193 u32 num_sgs; 193 - struct stm32_dma_sg_req sg_req[]; 194 + struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs); 194 195 }; 195 196 196 197 /** ··· 1104 1105 desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); 1105 1106 if (!desc) 1106 1107 return NULL; 1108 + desc->num_sgs = sg_len; 1107 1109 1108 1110 /* Set peripheral flow controller */ 1109 1111 if (chan->dma_sconfig.device_fc) ··· 1143 1143 desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg); 1144 1144 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; 1145 1145 } 1146 - 1147 - desc->num_sgs = sg_len; 1148 1146 desc->cyclic = false; 1149 1147 1150 1148 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ··· 1216 1218 desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT); 1217 1219 if (!desc) 1218 1220 return NULL; 1221 + desc->num_sgs = num_periods; 1219 1222 1220 1223 for (i = 0; i < num_periods; i++) { 1221 1224 desc->sg_req[i].len = period_len; ··· 1233 1234 if (!chan->trig_mdma) 1234 1235 buf_addr += period_len; 1235 1236 } 1236 - 1237 - desc->num_sgs = num_periods; 1238 1237 desc->cyclic = true; 1239 1238 1240 1239 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ··· 1253 1256 desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); 1254 1257 if (!desc) 1255 1258 return NULL; 1259 + desc->num_sgs = num_sgs; 1256 1260 1257 1261 threshold = chan->threshold; 1258 1262 ··· 1283 1285 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; 1284 1286 desc->sg_req[i].len = xfer_count; 1285 1287 } 1286 - 1287 - desc->num_sgs = num_sgs; 1288 1288 desc->cyclic = false; 1289 1289 1290 1290 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ··· 1563 1567 struct stm32_dma_chan *chan; 1564 1568 struct stm32_dma_device *dmadev; 1565 1569 struct dma_device *dd; 1566 - const struct of_device_id *match; 1567 1570 struct resource *res; 1568 1571 struct reset_control *rst; 1569 1572 int i, ret; 1570 - 1571 - match = of_match_device(stm32_dma_of_match, &pdev->dev); 1572 - if (!match) { 1573 - dev_err(&pdev->dev, "Error: No device match found\n"); 1574 - return -ENODEV; 1575 - } 1576 1573 1577 1574 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); 1578 1575 if (!dmadev)
+6 -7
drivers/dma/stm32-mdma.c
··· 224 224 u32 ccr; 225 225 bool cyclic; 226 226 u32 count; 227 - struct stm32_mdma_desc_node node[]; 227 + struct stm32_mdma_desc_node node[] __counted_by(count); 228 228 }; 229 229 230 230 struct stm32_mdma_dma_config { ··· 256 256 u32 nr_ahb_addr_masks; 257 257 u32 chan_reserved; 258 258 struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; 259 - u32 ahb_addr_masks[]; 259 + u32 ahb_addr_masks[] __counted_by(nr_ahb_addr_masks); 260 260 }; 261 261 262 262 static struct stm32_mdma_device *stm32_mdma_get_dev( ··· 321 321 desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT); 322 322 if (!desc) 323 323 return NULL; 324 + desc->count = count; 324 325 325 326 for (i = 0; i < count; i++) { 326 327 desc->node[i].hwdesc = ··· 330 329 if (!desc->node[i].hwdesc) 331 330 goto err; 332 331 } 333 - 334 - desc->count = count; 335 332 336 333 return desc; 337 334 ··· 488 489 src_maxburst = chan->dma_config.src_maxburst; 489 490 dst_maxburst = chan->dma_config.dst_maxburst; 490 491 491 - ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); 492 + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN; 492 493 ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); 493 494 ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); 494 495 ··· 964 965 if (!desc) 965 966 return NULL; 966 967 967 - ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); 968 + ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN; 968 969 ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); 969 970 ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); 970 971 cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); ··· 1626 1627 GFP_KERNEL); 1627 1628 if (!dmadev) 1628 1629 return -ENOMEM; 1630 + dmadev->nr_ahb_addr_masks = count; 1629 1631 1630 1632 dmadev->nr_channels = nr_channels; 1631 1633 dmadev->nr_requests = nr_requests; 1632 1634 device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", 1633 1635 dmadev->ahb_addr_masks, 1634 1636 count); 1635 - dmadev->nr_ahb_addr_masks = count; 1636 1637 1637 1638 dmadev->base = devm_platform_ioremap_resource(pdev, 0); 1638 1639 if (IS_ERR(dmadev->base))
+2 -4
drivers/dma/sun4i-dma.c
··· 1271 1271 return ret; 1272 1272 } 1273 1273 1274 - static int sun4i_dma_remove(struct platform_device *pdev) 1274 + static void sun4i_dma_remove(struct platform_device *pdev) 1275 1275 { 1276 1276 struct sun4i_dma_dev *priv = platform_get_drvdata(pdev); 1277 1277 ··· 1282 1282 dma_async_device_unregister(&priv->slave); 1283 1283 1284 1284 clk_disable_unprepare(priv->clk); 1285 - 1286 - return 0; 1287 1285 } 1288 1286 1289 1287 static const struct of_device_id sun4i_dma_match[] = { ··· 1292 1294 1293 1295 static struct platform_driver sun4i_dma_driver = { 1294 1296 .probe = sun4i_dma_probe, 1295 - .remove = sun4i_dma_remove, 1297 + .remove_new = sun4i_dma_remove, 1296 1298 .driver = { 1297 1299 .name = "sun4i-dma", 1298 1300 .of_match_table = sun4i_dma_match,
+2 -4
drivers/dma/sun6i-dma.c
··· 1470 1470 return ret; 1471 1471 } 1472 1472 1473 - static int sun6i_dma_remove(struct platform_device *pdev) 1473 + static void sun6i_dma_remove(struct platform_device *pdev) 1474 1474 { 1475 1475 struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev); 1476 1476 ··· 1484 1484 reset_control_assert(sdc->rstc); 1485 1485 1486 1486 sun6i_dma_free(sdc); 1487 - 1488 - return 0; 1489 1487 } 1490 1488 1491 1489 static struct platform_driver sun6i_dma_driver = { 1492 1490 .probe = sun6i_dma_probe, 1493 - .remove = sun6i_dma_remove, 1491 + .remove_new = sun6i_dma_remove, 1494 1492 .driver = { 1495 1493 .name = "sun6i-dma", 1496 1494 .of_match_table = sun6i_dma_match,
+3 -5
drivers/dma/tegra186-gpc-dma.c
··· 221 221 unsigned int sg_count; 222 222 struct virt_dma_desc vd; 223 223 struct tegra_dma_channel *tdc; 224 - struct tegra_dma_sg_req sg_req[]; 224 + struct tegra_dma_sg_req sg_req[] __counted_by(sg_count); 225 225 }; 226 226 227 227 /* ··· 1473 1473 return 0; 1474 1474 } 1475 1475 1476 - static int tegra_dma_remove(struct platform_device *pdev) 1476 + static void tegra_dma_remove(struct platform_device *pdev) 1477 1477 { 1478 1478 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1479 1479 1480 1480 of_dma_controller_free(pdev->dev.of_node); 1481 1481 dma_async_device_unregister(&tdma->dma_dev); 1482 - 1483 - return 0; 1484 1482 } 1485 1483 1486 1484 static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) ··· 1531 1533 .of_match_table = tegra_dma_of_match, 1532 1534 }, 1533 1535 .probe = tegra_dma_probe, 1534 - .remove = tegra_dma_remove, 1536 + .remove_new = tegra_dma_remove, 1535 1537 }; 1536 1538 1537 1539 module_platform_driver(tegra_dma_driver);
+2 -4
drivers/dma/tegra20-apb-dma.c
··· 1581 1581 return ret; 1582 1582 } 1583 1583 1584 - static int tegra_dma_remove(struct platform_device *pdev) 1584 + static void tegra_dma_remove(struct platform_device *pdev) 1585 1585 { 1586 1586 struct tegra_dma *tdma = platform_get_drvdata(pdev); 1587 1587 ··· 1589 1589 dma_async_device_unregister(&tdma->dma_dev); 1590 1590 pm_runtime_disable(&pdev->dev); 1591 1591 clk_unprepare(tdma->dma_clk); 1592 - 1593 - return 0; 1594 1592 } 1595 1593 1596 1594 static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev) ··· 1675 1677 .of_match_table = tegra_dma_of_match, 1676 1678 }, 1677 1679 .probe = tegra_dma_probe, 1678 - .remove = tegra_dma_remove, 1680 + .remove_new = tegra_dma_remove, 1679 1681 }; 1680 1682 1681 1683 module_platform_driver(tegra_dmac_driver);
+3 -5
drivers/dma/tegra210-adma.c
··· 162 162 const struct tegra_adma_chip_data *cdata; 163 163 164 164 /* Last member of the structure */ 165 - struct tegra_adma_chan channels[]; 165 + struct tegra_adma_chan channels[] __counted_by(nr_channels); 166 166 }; 167 167 168 168 static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) ··· 949 949 return ret; 950 950 } 951 951 952 - static int tegra_adma_remove(struct platform_device *pdev) 952 + static void tegra_adma_remove(struct platform_device *pdev) 953 953 { 954 954 struct tegra_adma *tdma = platform_get_drvdata(pdev); 955 955 int i; ··· 961 961 irq_dispose_mapping(tdma->channels[i].irq); 962 962 963 963 pm_runtime_disable(&pdev->dev); 964 - 965 - return 0; 966 964 } 967 965 968 966 static const struct dev_pm_ops tegra_adma_dev_pm_ops = { ··· 977 979 .of_match_table = tegra_adma_of_match, 978 980 }, 979 981 .probe = tegra_adma_probe, 980 - .remove = tegra_adma_remove, 982 + .remove_new = tegra_adma_remove, 981 983 }; 982 984 983 985 module_platform_driver(tegra_admac_driver);
+2 -3
drivers/dma/ti/cppi41.c
··· 1156 1156 return ret; 1157 1157 } 1158 1158 1159 - static int cppi41_dma_remove(struct platform_device *pdev) 1159 + static void cppi41_dma_remove(struct platform_device *pdev) 1160 1160 { 1161 1161 struct cppi41_dd *cdd = platform_get_drvdata(pdev); 1162 1162 int error; ··· 1173 1173 pm_runtime_dont_use_autosuspend(&pdev->dev); 1174 1174 pm_runtime_put_sync(&pdev->dev); 1175 1175 pm_runtime_disable(&pdev->dev); 1176 - return 0; 1177 1176 } 1178 1177 1179 1178 static int __maybe_unused cppi41_suspend(struct device *dev) ··· 1243 1244 1244 1245 static struct platform_driver cpp41_dma_driver = { 1245 1246 .probe = cppi41_dma_probe, 1246 - .remove = cppi41_dma_remove, 1247 + .remove_new = cppi41_dma_remove, 1247 1248 .driver = { 1248 1249 .name = "cppi41-dma-engine", 1249 1250 .pm = &cppi41_pm_ops,
+5 -7
drivers/dma/ti/edma.c
··· 202 202 u32 residue; 203 203 u32 residue_stat; 204 204 205 - struct edma_pset pset[]; 205 + struct edma_pset pset[] __counted_by(pset_nr); 206 206 }; 207 207 208 208 struct edma_cc; ··· 2401 2401 if (irq < 0 && node) 2402 2402 irq = irq_of_parse_and_map(node, 0); 2403 2403 2404 - if (irq >= 0) { 2404 + if (irq > 0) { 2405 2405 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", 2406 2406 dev_name(dev)); 2407 2407 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, ··· 2417 2417 if (irq < 0 && node) 2418 2418 irq = irq_of_parse_and_map(node, 2); 2419 2419 2420 - if (irq >= 0) { 2420 + if (irq > 0) { 2421 2421 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", 2422 2422 dev_name(dev)); 2423 2423 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, ··· 2550 2550 } 2551 2551 } 2552 2552 2553 - static int edma_remove(struct platform_device *pdev) 2553 + static void edma_remove(struct platform_device *pdev) 2554 2554 { 2555 2555 struct device *dev = &pdev->dev; 2556 2556 struct edma_cc *ecc = dev_get_drvdata(dev); ··· 2568 2568 edma_free_slot(ecc, ecc->dummy_slot); 2569 2569 pm_runtime_put_sync(dev); 2570 2570 pm_runtime_disable(dev); 2571 - 2572 - return 0; 2573 2571 } 2574 2572 2575 2573 #ifdef CONFIG_PM_SLEEP ··· 2626 2628 2627 2629 static struct platform_driver edma_driver = { 2628 2630 .probe = edma_probe, 2629 - .remove = edma_remove, 2631 + .remove_new = edma_remove, 2630 2632 .driver = { 2631 2633 .name = "edma", 2632 2634 .pm = &edma_pm_ops,
+4 -7
drivers/dma/ti/omap-dma.c
··· 124 124 uint32_t csdp; /* CSDP value */ 125 125 126 126 unsigned sglen; 127 - struct omap_sg sg[]; 127 + struct omap_sg sg[] __counted_by(sglen); 128 128 }; 129 129 130 130 enum { ··· 1005 1005 d = kzalloc(struct_size(d, sg, sglen), GFP_ATOMIC); 1006 1006 if (!d) 1007 1007 return NULL; 1008 + d->sglen = sglen; 1008 1009 1009 1010 d->dir = dir; 1010 1011 d->dev_addr = dev_addr; ··· 1120 1119 omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1)); 1121 1120 } 1122 1121 } 1123 - 1124 - d->sglen = sglen; 1125 1122 1126 1123 /* Release the dma_pool entries if one allocation failed */ 1127 1124 if (ll_failed) { ··· 1843 1844 return rc; 1844 1845 } 1845 1846 1846 - static int omap_dma_remove(struct platform_device *pdev) 1847 + static void omap_dma_remove(struct platform_device *pdev) 1847 1848 { 1848 1849 struct omap_dmadev *od = platform_get_drvdata(pdev); 1849 1850 int irq; ··· 1868 1869 dma_pool_destroy(od->desc_pool); 1869 1870 1870 1871 omap_dma_free(od); 1871 - 1872 - return 0; 1873 1872 } 1874 1873 1875 1874 static const struct omap_dma_config omap2420_data = { ··· 1915 1918 1916 1919 static struct platform_driver omap_dma_driver = { 1917 1920 .probe = omap_dma_probe, 1918 - .remove = omap_dma_remove, 1921 + .remove_new = omap_dma_remove, 1919 1922 .driver = { 1920 1923 .name = "omap-dma-engine", 1921 1924 .of_match_table = omap_dma_match,
+2 -3
drivers/dma/timb_dma.c
··· 740 740 741 741 } 742 742 743 - static int td_remove(struct platform_device *pdev) 743 + static void td_remove(struct platform_device *pdev) 744 744 { 745 745 struct timb_dma *td = platform_get_drvdata(pdev); 746 746 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 754 754 release_mem_region(iomem->start, resource_size(iomem)); 755 755 756 756 dev_dbg(&pdev->dev, "Removed...\n"); 757 - return 0; 758 757 } 759 758 760 759 static struct platform_driver td_driver = { ··· 761 762 .name = DRIVER_NAME, 762 763 }, 763 764 .probe = td_probe, 764 - .remove = td_remove, 765 + .remove_new = td_remove, 765 766 }; 766 767 767 768 module_platform_driver(td_driver);
+4 -6
drivers/dma/txx9dmac.c
··· 1151 1151 return 0; 1152 1152 } 1153 1153 1154 - static int txx9dmac_chan_remove(struct platform_device *pdev) 1154 + static void txx9dmac_chan_remove(struct platform_device *pdev) 1155 1155 { 1156 1156 struct txx9dmac_chan *dc = platform_get_drvdata(pdev); 1157 1157 ··· 1162 1162 tasklet_kill(&dc->tasklet); 1163 1163 } 1164 1164 dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; 1165 - return 0; 1166 1165 } 1167 1166 1168 1167 static int __init txx9dmac_probe(struct platform_device *pdev) ··· 1214 1215 return 0; 1215 1216 } 1216 1217 1217 - static int txx9dmac_remove(struct platform_device *pdev) 1218 + static void txx9dmac_remove(struct platform_device *pdev) 1218 1219 { 1219 1220 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1220 1221 ··· 1223 1224 devm_free_irq(&pdev->dev, ddev->irq, ddev); 1224 1225 tasklet_kill(&ddev->tasklet); 1225 1226 } 1226 - return 0; 1227 1227 } 1228 1228 1229 1229 static void txx9dmac_shutdown(struct platform_device *pdev) ··· 1260 1262 }; 1261 1263 1262 1264 static struct platform_driver txx9dmac_chan_driver = { 1263 - .remove = txx9dmac_chan_remove, 1265 + .remove_new = txx9dmac_chan_remove, 1264 1266 .driver = { 1265 1267 .name = "txx9dmac-chan", 1266 1268 }, 1267 1269 }; 1268 1270 1269 1271 static struct platform_driver txx9dmac_driver = { 1270 - .remove = txx9dmac_remove, 1272 + .remove_new = txx9dmac_remove, 1271 1273 .shutdown = txx9dmac_shutdown, 1272 1274 .driver = { 1273 1275 .name = "txx9dmac",
+4 -4
drivers/dma/uniphier-xdmac.c
··· 80 80 unsigned int nr_node; 81 81 unsigned int cur_node; 82 82 enum dma_transfer_direction dir; 83 - struct uniphier_xdmac_desc_node nodes[]; 83 + struct uniphier_xdmac_desc_node nodes[] __counted_by(nr_node); 84 84 }; 85 85 86 86 struct uniphier_xdmac_chan { ··· 97 97 struct dma_device ddev; 98 98 void __iomem *reg_base; 99 99 int nr_chans; 100 - struct uniphier_xdmac_chan channels[]; 100 + struct uniphier_xdmac_chan channels[] __counted_by(nr_chans); 101 101 }; 102 102 103 103 static struct uniphier_xdmac_chan * ··· 295 295 xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT); 296 296 if (!xd) 297 297 return NULL; 298 + xd->nr_node = nr; 298 299 299 300 for (i = 0; i < nr; i++) { 300 301 burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE); ··· 310 309 } 311 310 312 311 xd->dir = DMA_MEM_TO_MEM; 313 - xd->nr_node = nr; 314 312 xd->cur_node = 0; 315 313 316 314 return vchan_tx_prep(vc, &xd->vd, flags); ··· 351 351 xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT); 352 352 if (!xd) 353 353 return NULL; 354 + xd->nr_node = sg_len; 354 355 355 356 for_each_sg(sgl, sg, sg_len, i) { 356 357 xd->nodes[i].src = (direction == DMA_DEV_TO_MEM) ··· 386 385 } 387 386 388 387 xd->dir = direction; 389 - xd->nr_node = sg_len; 390 388 xd->cur_node = 0; 391 389 392 390 return vchan_tx_prep(vc, &xd->vd, flags);
+2 -4
drivers/dma/xgene-dma.c
··· 1776 1776 return ret; 1777 1777 } 1778 1778 1779 - static int xgene_dma_remove(struct platform_device *pdev) 1779 + static void xgene_dma_remove(struct platform_device *pdev) 1780 1780 { 1781 1781 struct xgene_dma *pdma = platform_get_drvdata(pdev); 1782 1782 struct xgene_dma_chan *chan; ··· 1797 1797 1798 1798 if (!IS_ERR(pdma->clk)) 1799 1799 clk_disable_unprepare(pdma->clk); 1800 - 1801 - return 0; 1802 1800 } 1803 1801 1804 1802 #ifdef CONFIG_ACPI ··· 1815 1817 1816 1818 static struct platform_driver xgene_dma_driver = { 1817 1819 .probe = xgene_dma_probe, 1818 - .remove = xgene_dma_remove, 1820 + .remove_new = xgene_dma_remove, 1819 1821 .driver = { 1820 1822 .name = "X-Gene-DMA", 1821 1823 .of_match_table = xgene_dma_of_match_ptr,
+2
drivers/dma/xilinx/xdma-regs.h
··· 44 44 FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag))) 45 45 #define XDMA_DESC_CONTROL_LAST \ 46 46 XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED) 47 + #define XDMA_DESC_CONTROL_CYCLIC \ 48 + XDMA_DESC_CONTROL(1, XDMA_DESC_COMPLETED) 47 49 48 50 /* 49 51 * Descriptor for a single contiguous memory block transfer.
+174 -13
drivers/dma/xilinx/xdma.c
··· 83 83 * @dblk_num: Number of hardware descriptor blocks 84 84 * @desc_num: Number of hardware descriptors 85 85 * @completed_desc_num: Completed hardware descriptors 86 + * @cyclic: Cyclic transfer vs. scatter-gather 87 + * @periods: Number of periods in the cyclic transfer 88 + * @period_size: Size of a period in bytes in cyclic transfers 86 89 */ 87 90 struct xdma_desc { 88 91 struct virt_dma_desc vdesc; ··· 96 93 u32 dblk_num; 97 94 u32 desc_num; 98 95 u32 completed_desc_num; 96 + bool cyclic; 97 + u32 periods; 98 + u32 period_size; 99 99 }; 100 100 101 101 #define XDMA_DEV_STATUS_REG_DMA BIT(0) ··· 143 137 } 144 138 145 139 /** 146 - * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer 140 + * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer 147 141 * @sw_desc: Tx descriptor pointer 148 142 */ 149 - static void xdma_link_desc_blocks(struct xdma_desc *sw_desc) 143 + static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc) 150 144 { 151 145 struct xdma_desc_block *block; 152 146 u32 last_blk_desc, desc_control; ··· 178 172 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1]; 179 173 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE; 180 174 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST); 175 + } 176 + 177 + /** 178 + * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer 179 + * @sw_desc: Tx descriptor pointer 180 + */ 181 + static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc) 182 + { 183 + struct xdma_desc_block *block; 184 + struct xdma_hw_desc *desc; 185 + int i; 186 + 187 + block = sw_desc->desc_blocks; 188 + for (i = 0; i < sw_desc->desc_num - 1; i++) { 189 + desc = block->virt_addr + i * XDMA_DESC_SIZE; 190 + desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE)); 191 + } 192 + desc = block->virt_addr + i * XDMA_DESC_SIZE; 193 + desc->next_desc = cpu_to_le64(block->dma_addr); 181 194 } 182 195 183 196 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan) ··· 256 231 * xdma_alloc_desc - Allocate descriptor 257 232 * @chan: DMA channel pointer 258 233 * @desc_num: Number of hardware descriptors 234 + * @cyclic: Whether this is a cyclic transfer 259 235 */ 260 236 static struct xdma_desc * 261 - xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num) 237 + xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic) 262 238 { 263 239 struct xdma_desc *sw_desc; 264 240 struct xdma_hw_desc *desc; 265 241 dma_addr_t dma_addr; 266 242 u32 dblk_num; 243 + u32 control; 267 244 void *addr; 268 245 int i, j; 269 246 ··· 275 248 276 249 sw_desc->chan = chan; 277 250 sw_desc->desc_num = desc_num; 251 + sw_desc->cyclic = cyclic; 278 252 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT); 279 253 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks), 280 254 GFP_NOWAIT); 281 255 if (!sw_desc->desc_blocks) 282 256 goto failed; 257 + 258 + if (cyclic) 259 + control = XDMA_DESC_CONTROL_CYCLIC; 260 + else 261 + control = XDMA_DESC_CONTROL(1, 0); 283 262 284 263 sw_desc->dblk_num = dblk_num; 285 264 for (i = 0; i < sw_desc->dblk_num; i++) { ··· 296 263 sw_desc->desc_blocks[i].virt_addr = addr; 297 264 sw_desc->desc_blocks[i].dma_addr = dma_addr; 298 265 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++) 299 - desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0)); 266 + desc[j].control = cpu_to_le32(control); 300 267 } 301 268 302 - xdma_link_desc_blocks(sw_desc); 269 + if (cyclic) 270 + xdma_link_cyclic_desc_blocks(sw_desc); 271 + else 272 + xdma_link_sg_desc_blocks(sw_desc); 303 273 304 274 return sw_desc; 305 275 ··· 502 466 for_each_sg(sgl, sg, sg_len, i) 503 467 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX); 504 468 505 - sw_desc = xdma_alloc_desc(xdma_chan, desc_num); 469 + sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false); 506 470 if (!sw_desc) 507 471 return NULL; 508 472 sw_desc->dir = dir; ··· 543 507 addr += len; 544 508 rest -= len; 545 509 } while (rest); 510 + } 511 + 512 + tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); 513 + if (!tx_desc) 514 + goto failed; 515 + 516 + return tx_desc; 517 + 518 + failed: 519 + xdma_free_desc(&sw_desc->vdesc); 520 + 521 + return NULL; 522 + } 523 + 524 + /** 525 + * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions 526 + * @chan: DMA channel pointer 527 + * @address: Device DMA address to access 528 + * @size: Total length to transfer 529 + * @period_size: Period size to use for each transfer 530 + * @dir: Transfer direction 531 + * @flags: Transfer ack flags 532 + */ 533 + static struct dma_async_tx_descriptor * 534 + xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address, 535 + size_t size, size_t period_size, 536 + enum dma_transfer_direction dir, 537 + unsigned long flags) 538 + { 539 + struct xdma_chan *xdma_chan = to_xdma_chan(chan); 540 + struct xdma_device *xdev = xdma_chan->xdev_hdl; 541 + unsigned int periods = size / period_size; 542 + struct dma_async_tx_descriptor *tx_desc; 543 + struct xdma_desc_block *dblk; 544 + struct xdma_hw_desc *desc; 545 + struct xdma_desc *sw_desc; 546 + unsigned int i; 547 + 548 + /* 549 + * Simplify the whole logic by preventing an abnormally high number of 550 + * periods and periods size. 551 + */ 552 + if (period_size > XDMA_DESC_BLEN_MAX) { 553 + xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); 554 + return NULL; 555 + } 556 + 557 + if (periods > XDMA_DESC_ADJACENT) { 558 + xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); 559 + return NULL; 560 + } 561 + 562 + sw_desc = xdma_alloc_desc(xdma_chan, periods, true); 563 + if (!sw_desc) 564 + return NULL; 565 + 566 + sw_desc->periods = periods; 567 + sw_desc->period_size = period_size; 568 + sw_desc->dir = dir; 569 + 570 + dblk = sw_desc->desc_blocks; 571 + desc = dblk->virt_addr; 572 + 573 + /* fill hardware descriptor */ 574 + for (i = 0; i < periods; i++) { 575 + desc->bytes = cpu_to_le32(period_size); 576 + if (dir == DMA_MEM_TO_DEV) { 577 + desc->src_addr = cpu_to_le64(address + i * period_size); 578 + desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr); 579 + } else { 580 + desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr); 581 + desc->dst_addr = cpu_to_le64(address + i * period_size); 582 + } 583 + 584 + desc++; 546 585 } 547 586 548 587 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); ··· 688 577 return 0; 689 578 } 690 579 580 + static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 581 + struct dma_tx_state *state) 582 + { 583 + struct xdma_chan *xdma_chan = to_xdma_chan(chan); 584 + struct xdma_desc *desc = NULL; 585 + struct virt_dma_desc *vd; 586 + enum dma_status ret; 587 + unsigned long flags; 588 + unsigned int period_idx; 589 + u32 residue = 0; 590 + 591 + ret = dma_cookie_status(chan, cookie, state); 592 + if (ret == DMA_COMPLETE) 593 + return ret; 594 + 595 + spin_lock_irqsave(&xdma_chan->vchan.lock, flags); 596 + 597 + vd = vchan_find_desc(&xdma_chan->vchan, cookie); 598 + if (vd) 599 + desc = to_xdma_desc(vd); 600 + if (!desc || !desc->cyclic) { 601 + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); 602 + return ret; 603 + } 604 + 605 + period_idx = desc->completed_desc_num % desc->periods; 606 + residue = (desc->periods - period_idx) * desc->period_size; 607 + 608 + spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags); 609 + 610 + dma_set_residue(state, residue); 611 + 612 + return ret; 613 + } 614 + 691 615 /** 692 616 * xdma_channel_isr - XDMA channel interrupt handler 693 617 * @irq: IRQ number ··· 736 590 struct virt_dma_desc *vd; 737 591 struct xdma_desc *desc; 738 592 int ret; 593 + u32 st; 739 594 740 595 spin_lock(&xchan->vchan.lock); 741 596 ··· 755 608 goto out; 756 609 757 610 desc->completed_desc_num += complete_desc_num; 611 + 612 + if (desc->cyclic) { 613 + ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, 614 + &st); 615 + if (ret) 616 + goto out; 617 + 618 + regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st); 619 + 620 + vchan_cyclic_callback(vd); 621 + goto out; 622 + } 623 + 758 624 /* 759 625 * if all data blocks are transferred, remove and complete the request 760 626 */ ··· 781 621 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) 782 622 goto out; 783 623 784 - /* transfer the rest of data */ 624 + /* transfer the rest of data (SG only) */ 785 625 xdma_xfer_start(xchan); 786 626 787 627 out: ··· 1001 841 * xdma_remove - Driver remove function 1002 842 * @pdev: Pointer to the platform_device structure 1003 843 */ 1004 - static int xdma_remove(struct platform_device *pdev) 844 + static void xdma_remove(struct platform_device *pdev) 1005 845 { 1006 846 struct xdma_device *xdev = platform_get_drvdata(pdev); 1007 847 ··· 1010 850 1011 851 if (xdev->status & XDMA_DEV_STATUS_REG_DMA) 1012 852 dma_async_device_unregister(&xdev->dma_dev); 1013 - 1014 - return 0; 1015 853 } 1016 854 1017 855 /** ··· 1043 885 goto failed; 1044 886 } 1045 887 xdev->irq_start = res->start; 1046 - xdev->irq_num = res->end - res->start + 1; 888 + xdev->irq_num = resource_size(res); 1047 889 1048 890 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1049 891 if (!res) { ··· 1079 921 1080 922 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask); 1081 923 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask); 924 + dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask); 1082 925 1083 926 xdev->dma_dev.dev = &pdev->dev; 927 + xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 1084 928 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources; 1085 929 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources; 1086 - xdev->dma_dev.device_tx_status = dma_cookie_status; 930 + xdev->dma_dev.device_tx_status = xdma_tx_status; 1087 931 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg; 1088 932 xdev->dma_dev.device_config = xdma_device_config; 1089 933 xdev->dma_dev.device_issue_pending = xdma_issue_pending; 1090 934 xdev->dma_dev.filter.map = pdata->device_map; 1091 935 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt; 1092 936 xdev->dma_dev.filter.fn = xdma_filter_fn; 937 + xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic; 1093 938 1094 939 ret = dma_async_device_register(&xdev->dma_dev); 1095 940 if (ret) { ··· 1127 966 }, 1128 967 .id_table = xdma_id_table, 1129 968 .probe = xdma_probe, 1130 - .remove = xdma_remove, 969 + .remove_new = xdma_remove, 1131 970 }; 1132 971 1133 972 module_platform_driver(xdma_driver);
+2 -6
drivers/dma/xilinx/xilinx_dma.c
··· 3242 3242 /** 3243 3243 * xilinx_dma_remove - Driver remove function 3244 3244 * @pdev: Pointer to the platform_device structure 3245 - * 3246 - * Return: Always '0' 3247 3245 */ 3248 - static int xilinx_dma_remove(struct platform_device *pdev) 3246 + static void xilinx_dma_remove(struct platform_device *pdev) 3249 3247 { 3250 3248 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); 3251 3249 int i; ··· 3257 3259 xilinx_dma_chan_remove(xdev->chan[i]); 3258 3260 3259 3261 xdma_disable_allclks(xdev); 3260 - 3261 - return 0; 3262 3262 } 3263 3263 3264 3264 static struct platform_driver xilinx_vdma_driver = { ··· 3265 3269 .of_match_table = xilinx_dma_of_ids, 3266 3270 }, 3267 3271 .probe = xilinx_dma_probe, 3268 - .remove = xilinx_dma_remove, 3272 + .remove_new = xilinx_dma_remove, 3269 3273 }; 3270 3274 3271 3275 module_platform_driver(xilinx_vdma_driver);
+2 -4
drivers/dma/xilinx/xilinx_dpdma.c
··· 1736 1736 return ret; 1737 1737 } 1738 1738 1739 - static int xilinx_dpdma_remove(struct platform_device *pdev) 1739 + static void xilinx_dpdma_remove(struct platform_device *pdev) 1740 1740 { 1741 1741 struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev); 1742 1742 unsigned int i; ··· 1751 1751 1752 1752 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1753 1753 xilinx_dpdma_chan_remove(xdev->chan[i]); 1754 - 1755 - return 0; 1756 1754 } 1757 1755 1758 1756 static const struct of_device_id xilinx_dpdma_of_match[] = { ··· 1761 1763 1762 1764 static struct platform_driver xilinx_dpdma_driver = { 1763 1765 .probe = xilinx_dpdma_probe, 1764 - .remove = xilinx_dpdma_remove, 1766 + .remove_new = xilinx_dpdma_remove, 1765 1767 .driver = { 1766 1768 .name = "xilinx-zynqmp-dpdma", 1767 1769 .of_match_table = xilinx_dpdma_of_match,
+2 -4
drivers/dma/xilinx/zynqmp_dma.c
··· 1147 1147 * 1148 1148 * Return: Always '0' 1149 1149 */ 1150 - static int zynqmp_dma_remove(struct platform_device *pdev) 1150 + static void zynqmp_dma_remove(struct platform_device *pdev) 1151 1151 { 1152 1152 struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev); 1153 1153 ··· 1158 1158 pm_runtime_disable(zdev->dev); 1159 1159 if (!pm_runtime_enabled(zdev->dev)) 1160 1160 zynqmp_dma_runtime_suspend(zdev->dev); 1161 - 1162 - return 0; 1163 1161 } 1164 1162 1165 1163 static const struct of_device_id zynqmp_dma_of_match[] = { ··· 1173 1175 .pm = &zynqmp_dma_dev_pm_ops, 1174 1176 }, 1175 1177 .probe = zynqmp_dma_probe, 1176 - .remove = zynqmp_dma_remove, 1178 + .remove_new = zynqmp_dma_remove, 1177 1179 }; 1178 1180 1179 1181 module_platform_driver(zynqmp_dma_driver);
-2
include/linux/dmaengine.h
··· 517 517 return dev_name(&chan->dev->device); 518 518 } 519 519 520 - void dma_chan_cleanup(struct kref *kref); 521 - 522 520 /** 523 521 * typedef dma_filter_fn - callback filter for dma_request_channel 524 522 * @chan: channel to be reviewed
+1
include/uapi/linux/idxd.h
··· 31 31 IDXD_SCMD_WQ_IRQ_ERR = 0x80100000, 32 32 IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000, 33 33 IDXD_SCMD_DEV_EVL_ERR = 0x80120000, 34 + IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000, 34 35 }; 35 36 36 37 #define IDXD_SCMD_SOFTERR_MASK 0x80000000