Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-fix-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
"A bunch of driver fixes, notably:

- More idxd fixes for driver unregister, error handling and bus
assignment

- HAS_IOMEM depends fix for few drivers

- lock fix in pl330 driver

- xilinx drivers fixes for initialize registers, missing dependencies
and limiting descriptor IDs

- mediatek descriptor management fixes"

* tag 'dmaengine-fix-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
dmaengine: mediatek: use GFP_NOWAIT instead of GFP_ATOMIC in prep_dma
dmaengine: mediatek: do not issue a new desc if one is still current
dmaengine: mediatek: free the proper desc in desc_free handler
dmaengine: ipu: fix doc warning in ipu_irq.c
dmaengine: rcar-dmac: Fix PM reference leak in rcar_dmac_probe()
dmaengine: idxd: Fix missing error code in idxd_cdev_open()
dmaengine: stedma40: add missing iounmap() on error in d40_probe()
dmaengine: SF_PDMA depends on HAS_IOMEM
dmaengine: QCOM_HIDMA_MGMT depends on HAS_IOMEM
dmaengine: ALTERA_MSGDMA depends on HAS_IOMEM
dmaengine: idxd: Add missing cleanup for early error out in probe call
dmaengine: xilinx: dpdma: Limit descriptor IDs to 16 bits
dmaengine: xilinx: dpdma: Add missing dependencies to Kconfig
dmaengine: stm32-mdma: fix PM reference leak in stm32_mdma_alloc_chan_resourc()
dmaengine: zynqmp_dma: Fix PM reference leak in zynqmp_dma_alloc_chan_resourc()
dmaengine: xilinx: dpdma: initialize registers before request_irq
dmaengine: pl330: fix wrong usage of spinlock flags in dma_cyclc
dmaengine: fsl-dpaa2-qdma: Fix error return code in two functions
dmaengine: idxd: add missing dsa driver unregister
dmaengine: idxd: add engine 'struct device' missing bus type assignment

+122 -26
+2
drivers/dma/Kconfig
··· 59 59 #devices 60 60 config ALTERA_MSGDMA 61 61 tristate "Altera / Intel mSGDMA Engine" 62 + depends on HAS_IOMEM 62 63 select DMA_ENGINE 63 64 help 64 65 Enable support for Altera / Intel mSGDMA controller. ··· 702 701 703 702 config XILINX_ZYNQMP_DPDMA 704 703 tristate "Xilinx DPDMA Engine" 704 + depends on HAS_IOMEM && OF 705 705 select DMA_ENGINE 706 706 select DMA_VIRTUAL_CHANNELS 707 707 help
+3
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
··· 332 332 } 333 333 334 334 if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) { 335 + err = -EINVAL; 335 336 dev_err(dev, "DPDMAI major version mismatch\n" 336 337 "Found %u.%u, supported version is %u.%u\n", 337 338 priv->dpdmai_attr.version.major, ··· 342 341 } 343 342 344 343 if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) { 344 + err = -EINVAL; 345 345 dev_err(dev, "DPDMAI minor version mismatch\n" 346 346 "Found %u.%u, supported version is %u.%u\n", 347 347 priv->dpdmai_attr.version.major, ··· 477 475 ppriv->store = 478 476 dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev); 479 477 if (!ppriv->store) { 478 + err = -ENOMEM; 480 479 dev_err(dev, "dpaa2_io_store_create() failed\n"); 481 480 goto err_store; 482 481 }
+1
drivers/dma/idxd/cdev.c
··· 110 110 pasid = iommu_sva_get_pasid(sva); 111 111 if (pasid == IOMMU_PASID_INVALID) { 112 112 iommu_sva_unbind_device(sva); 113 + rc = -EINVAL; 113 114 goto failed; 114 115 } 115 116
+60 -3
drivers/dma/idxd/init.c
··· 168 168 return rc; 169 169 } 170 170 171 + static void idxd_cleanup_interrupts(struct idxd_device *idxd) 172 + { 173 + struct pci_dev *pdev = idxd->pdev; 174 + struct idxd_irq_entry *irq_entry; 175 + int i, msixcnt; 176 + 177 + msixcnt = pci_msix_vec_count(pdev); 178 + if (msixcnt <= 0) 179 + return; 180 + 181 + irq_entry = &idxd->irq_entries[0]; 182 + free_irq(irq_entry->vector, irq_entry); 183 + 184 + for (i = 1; i < msixcnt; i++) { 185 + 186 + irq_entry = &idxd->irq_entries[i]; 187 + if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) 188 + idxd_device_release_int_handle(idxd, idxd->int_handles[i], 189 + IDXD_IRQ_MSIX); 190 + free_irq(irq_entry->vector, irq_entry); 191 + } 192 + 193 + idxd_mask_error_interrupts(idxd); 194 + pci_free_irq_vectors(pdev); 195 + } 196 + 171 197 static int idxd_setup_wqs(struct idxd_device *idxd) 172 198 { 173 199 struct device *dev = &idxd->pdev->dev; ··· 268 242 engine->idxd = idxd; 269 243 device_initialize(&engine->conf_dev); 270 244 engine->conf_dev.parent = &idxd->conf_dev; 245 + engine->conf_dev.bus = &dsa_bus_type; 271 246 engine->conf_dev.type = &idxd_engine_device_type; 272 247 rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id); 273 248 if (rc < 0) { ··· 328 301 while (--i >= 0) 329 302 put_device(&idxd->groups[i]->conf_dev); 330 303 return rc; 304 + } 305 + 306 + static void idxd_cleanup_internals(struct idxd_device *idxd) 307 + { 308 + int i; 309 + 310 + for (i = 0; i < idxd->max_groups; i++) 311 + put_device(&idxd->groups[i]->conf_dev); 312 + for (i = 0; i < idxd->max_engines; i++) 313 + put_device(&idxd->engines[i]->conf_dev); 314 + for (i = 0; i < idxd->max_wqs; i++) 315 + put_device(&idxd->wqs[i]->conf_dev); 316 + destroy_workqueue(idxd->wq); 331 317 } 332 318 333 319 static int idxd_setup_internals(struct idxd_device *idxd) ··· 571 531 dev_dbg(dev, "Loading RO device config\n"); 572 532 rc = idxd_device_load_config(idxd); 573 533 if (rc < 0) 574 - goto err; 534 + goto err_config; 575 535 } 576 536 577 537 rc = idxd_setup_interrupts(idxd); 578 538 if (rc) 579 - goto err; 539 + goto err_config; 580 540 581 541 dev_dbg(dev, "IDXD interrupt setup complete.\n"); 582 542 ··· 589 549 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 590 550 return 0; 591 551 552 + err_config: 553 + idxd_cleanup_internals(idxd); 592 554 err: 593 555 if (device_pasid_enabled(idxd)) 594 556 idxd_disable_system_pasid(idxd); 595 557 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 596 558 return rc; 559 + } 560 + 561 + static void idxd_cleanup(struct idxd_device *idxd) 562 + { 563 + struct device *dev = &idxd->pdev->dev; 564 + 565 + perfmon_pmu_remove(idxd); 566 + idxd_cleanup_interrupts(idxd); 567 + idxd_cleanup_internals(idxd); 568 + if (device_pasid_enabled(idxd)) 569 + idxd_disable_system_pasid(idxd); 570 + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 597 571 } 598 572 599 573 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ··· 662 608 rc = idxd_register_devices(idxd); 663 609 if (rc) { 664 610 dev_err(dev, "IDXD sysfs setup failed\n"); 665 - goto err; 611 + goto err_dev_register; 666 612 } 667 613 668 614 idxd->state = IDXD_DEV_CONF_READY; ··· 672 618 673 619 return 0; 674 620 621 + err_dev_register: 622 + idxd_cleanup(idxd); 675 623 err: 676 624 pci_iounmap(pdev, idxd->reg_base); 677 625 err_iomap: ··· 843 787 844 788 static void __exit idxd_exit_module(void) 845 789 { 790 + idxd_unregister_driver(); 846 791 pci_unregister_driver(&idxd_pci_driver); 847 792 idxd_cdev_remove(); 848 793 idxd_unregister_bus_type();
+1 -1
drivers/dma/ipu/ipu_irq.c
··· 230 230 } 231 231 232 232 /** 233 - * ipu_irq_map() - map an IPU interrupt source to an IRQ number 233 + * ipu_irq_unmap() - unmap an IPU interrupt source 234 234 * @source: interrupt source bit position (see ipu_irq_map()) 235 235 * @return: 0 or negative error code 236 236 */
+14 -13
drivers/dma/mediatek/mtk-uart-apdma.c
··· 131 131 132 132 static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) 133 133 { 134 - struct dma_chan *chan = vd->tx.chan; 135 - struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); 136 - 137 - kfree(c->desc); 134 + kfree(container_of(vd, struct mtk_uart_apdma_desc, vd)); 138 135 } 139 136 140 137 static void mtk_uart_apdma_start_tx(struct mtk_chan *c) ··· 204 207 205 208 static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) 206 209 { 207 - struct mtk_uart_apdma_desc *d = c->desc; 208 - 209 210 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); 210 211 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); 211 212 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); 212 - 213 - list_del(&d->vd.node); 214 - vchan_cookie_complete(&d->vd); 215 213 } 216 214 217 215 static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) ··· 237 245 238 246 c->rx_status = d->avail_len - cnt; 239 247 mtk_uart_apdma_write(c, VFF_RPT, wg); 248 + } 240 249 241 - list_del(&d->vd.node); 242 - vchan_cookie_complete(&d->vd); 250 + static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c) 251 + { 252 + struct mtk_uart_apdma_desc *d = c->desc; 253 + 254 + if (d) { 255 + list_del(&d->vd.node); 256 + vchan_cookie_complete(&d->vd); 257 + c->desc = NULL; 258 + } 243 259 } 244 260 245 261 static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) ··· 261 261 mtk_uart_apdma_rx_handler(c); 262 262 else if (c->dir == DMA_MEM_TO_DEV) 263 263 mtk_uart_apdma_tx_handler(c); 264 + mtk_uart_apdma_chan_complete_handler(c); 264 265 spin_unlock_irqrestore(&c->vc.lock, flags); 265 266 266 267 return IRQ_HANDLED; ··· 349 348 return NULL; 350 349 351 350 /* Now allocate and setup the descriptor */ 352 - d = kzalloc(sizeof(*d), GFP_ATOMIC); 351 + d = kzalloc(sizeof(*d), GFP_NOWAIT); 353 352 if (!d) 354 353 return NULL; 355 354 ··· 367 366 unsigned long flags; 368 367 369 368 spin_lock_irqsave(&c->vc.lock, flags); 370 - if (vchan_issue_pending(&c->vc)) { 369 + if (vchan_issue_pending(&c->vc) && !c->desc) { 371 370 vd = vchan_next_desc(&c->vc); 372 371 c->desc = to_mtk_uart_apdma_desc(&vd->tx); 373 372
+4 -2
drivers/dma/pl330.c
··· 2694 2694 for (i = 0; i < len / period_len; i++) { 2695 2695 desc = pl330_get_desc(pch); 2696 2696 if (!desc) { 2697 + unsigned long iflags; 2698 + 2697 2699 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", 2698 2700 __func__, __LINE__); 2699 2701 2700 2702 if (!first) 2701 2703 return NULL; 2702 2704 2703 - spin_lock_irqsave(&pl330->pool_lock, flags); 2705 + spin_lock_irqsave(&pl330->pool_lock, iflags); 2704 2706 2705 2707 while (!list_empty(&first->node)) { 2706 2708 desc = list_entry(first->node.next, ··· 2712 2710 2713 2711 list_move_tail(&first->node, &pl330->desc_pool); 2714 2712 2715 - spin_unlock_irqrestore(&pl330->pool_lock, flags); 2713 + spin_unlock_irqrestore(&pl330->pool_lock, iflags); 2716 2714 2717 2715 return NULL; 2718 2716 }
+1
drivers/dma/qcom/Kconfig
··· 33 33 34 34 config QCOM_HIDMA_MGMT 35 35 tristate "Qualcomm Technologies HIDMA Management support" 36 + depends on HAS_IOMEM 36 37 select DMA_ENGINE 37 38 help 38 39 Enable support for the Qualcomm Technologies HIDMA Management.
+1
drivers/dma/sf-pdma/Kconfig
··· 1 1 config SF_PDMA 2 2 tristate "Sifive PDMA controller driver" 3 + depends on HAS_IOMEM 3 4 select DMA_ENGINE 4 5 select DMA_VIRTUAL_CHANNELS 5 6 help
+1 -1
drivers/dma/sh/rcar-dmac.c
··· 1913 1913 1914 1914 /* Enable runtime PM and initialize the device. */ 1915 1915 pm_runtime_enable(&pdev->dev); 1916 - ret = pm_runtime_get_sync(&pdev->dev); 1916 + ret = pm_runtime_resume_and_get(&pdev->dev); 1917 1917 if (ret < 0) { 1918 1918 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 1919 1919 return ret;
+3
drivers/dma/ste_dma40.c
··· 3675 3675 3676 3676 kfree(base->lcla_pool.base_unaligned); 3677 3677 3678 + if (base->lcpa_base) 3679 + iounmap(base->lcpa_base); 3680 + 3678 3681 if (base->phy_lcpa) 3679 3682 release_mem_region(base->phy_lcpa, 3680 3683 base->lcpa_size);
+2 -2
drivers/dma/stm32-mdma.c
··· 1452 1452 return -ENOMEM; 1453 1453 } 1454 1454 1455 - ret = pm_runtime_get_sync(dmadev->ddev.dev); 1455 + ret = pm_runtime_resume_and_get(dmadev->ddev.dev); 1456 1456 if (ret < 0) 1457 1457 return ret; 1458 1458 ··· 1718 1718 u32 ccr, id; 1719 1719 int ret; 1720 1720 1721 - ret = pm_runtime_get_sync(dev); 1721 + ret = pm_runtime_resume_and_get(dev); 1722 1722 if (ret < 0) 1723 1723 return ret; 1724 1724
+28 -3
drivers/dma/xilinx/xilinx_dpdma.c
··· 113 113 #define XILINX_DPDMA_CH_VDO 0x020 114 114 #define XILINX_DPDMA_CH_PYLD_SZ 0x024 115 115 #define XILINX_DPDMA_CH_DESC_ID 0x028 116 + #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0) 116 117 117 118 /* DPDMA descriptor fields */ 118 119 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5 ··· 867 866 * will be used, but it should be enough. 868 867 */ 869 868 list_for_each_entry(sw_desc, &desc->descriptors, node) 870 - sw_desc->hw.desc_id = desc->vdesc.tx.cookie; 869 + sw_desc->hw.desc_id = desc->vdesc.tx.cookie 870 + & XILINX_DPDMA_CH_DESC_ID_MASK; 871 871 872 872 sw_desc = list_first_entry(&desc->descriptors, 873 873 struct xilinx_dpdma_sw_desc, node); ··· 1088 1086 if (!chan->running || !pending) 1089 1087 goto out; 1090 1088 1091 - desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID); 1089 + desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID) 1090 + & XILINX_DPDMA_CH_DESC_ID_MASK; 1092 1091 1093 1092 /* If the retrigger raced with vsync, retry at the next frame. */ 1094 1093 sw_desc = list_first_entry(&pending->descriptors, ··· 1462 1459 */ 1463 1460 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev) 1464 1461 { 1465 - dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL); 1462 + dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL); 1466 1463 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL); 1467 1464 } 1468 1465 ··· 1599 1596 return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan); 1600 1597 } 1601 1598 1599 + static void dpdma_hw_init(struct xilinx_dpdma_device *xdev) 1600 + { 1601 + unsigned int i; 1602 + void __iomem *reg; 1603 + 1604 + /* Disable all interrupts */ 1605 + xilinx_dpdma_disable_irq(xdev); 1606 + 1607 + /* Stop all channels */ 1608 + for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { 1609 + reg = xdev->reg + XILINX_DPDMA_CH_BASE 1610 + + XILINX_DPDMA_CH_OFFSET * i; 1611 + dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); 1612 + } 1613 + 1614 + /* Clear the interrupt status registers */ 1615 + dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL); 1616 + dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL); 1617 + } 1618 + 1602 1619 static int xilinx_dpdma_probe(struct platform_device *pdev) 1603 1620 { 1604 1621 struct xilinx_dpdma_device *xdev; ··· 1644 1621 xdev->reg = devm_platform_ioremap_resource(pdev, 0); 1645 1622 if (IS_ERR(xdev->reg)) 1646 1623 return PTR_ERR(xdev->reg); 1624 + 1625 + dpdma_hw_init(xdev); 1647 1626 1648 1627 xdev->irq = platform_get_irq(pdev, 0); 1649 1628 if (xdev->irq < 0) {
+1 -1
drivers/dma/xilinx/zynqmp_dma.c
··· 468 468 struct zynqmp_dma_desc_sw *desc; 469 469 int i, ret; 470 470 471 - ret = pm_runtime_get_sync(chan->dev); 471 + ret = pm_runtime_resume_and_get(chan->dev); 472 472 if (ret < 0) 473 473 return ret; 474 474