Merge tag 'dmaengine-fix-5.0-rc6' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
- Fix in at_xdmac fr wrongful channel state
- Fix for imx driver for wrong callback invocation
- Fix to bcm driver for interrupt race & transaction abort.
- Fix in dmatest to abort in mapping error

* tag 'dmaengine-fix-5.0-rc6' of git://git.infradead.org/users/vkoul/slave-dma:
dmaengine: dmatest: Abort test in case of mapping error
dmaengine: bcm2835: Fix abort of transactions
dmaengine: bcm2835: Fix interrupt race on RT
dmaengine: imx-dma: fix wrong callback invoke
dmaengine: at_xdmac: Fix wrongfull report of a channel as in use

Changed files
+53 -76
drivers
+10 -9
drivers/dma/at_xdmac.c
··· 203 203 u32 save_cim; 204 204 u32 save_cnda; 205 205 u32 save_cndc; 206 + u32 irq_status; 206 207 unsigned long status; 207 208 struct tasklet_struct tasklet; 208 209 struct dma_slave_config sconfig; ··· 1581 1580 struct at_xdmac_desc *desc; 1582 1581 u32 error_mask; 1583 1582 1584 - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", 1585 - __func__, atchan->status); 1583 + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", 1584 + __func__, atchan->irq_status); 1586 1585 1587 1586 error_mask = AT_XDMAC_CIS_RBEIS 1588 1587 | AT_XDMAC_CIS_WBEIS ··· 1590 1589 1591 1590 if (at_xdmac_chan_is_cyclic(atchan)) { 1592 1591 at_xdmac_handle_cyclic(atchan); 1593 - } else if ((atchan->status & AT_XDMAC_CIS_LIS) 1594 - || (atchan->status & error_mask)) { 1592 + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) 1593 + || (atchan->irq_status & error_mask)) { 1595 1594 struct dma_async_tx_descriptor *txd; 1596 1595 1597 - if (atchan->status & AT_XDMAC_CIS_RBEIS) 1596 + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) 1598 1597 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); 1599 - if (atchan->status & AT_XDMAC_CIS_WBEIS) 1598 + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) 1600 1599 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); 1601 - if (atchan->status & AT_XDMAC_CIS_ROIS) 1600 + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) 1602 1601 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); 1603 1602 1604 1603 spin_lock(&atchan->lock); ··· 1653 1652 atchan = &atxdmac->chan[i]; 1654 1653 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1655 1654 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); 1656 - atchan->status = chan_status & chan_imr; 1655 + atchan->irq_status = chan_status & chan_imr; 1657 1656 dev_vdbg(atxdmac->dma.dev, 1658 1657 "%s: chan%d: imr=0x%x, status=0x%x\n", 1659 1658 __func__, i, chan_imr, chan_status); ··· 1667 1666 at_xdmac_chan_read(atchan, AT_XDMAC_CDA), 1668 1667 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); 1669 1668 1670 - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1669 + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1671 1670 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1672 1671 1673 1672 tasklet_schedule(&atchan->tasklet);
+25 -45
drivers/dma/bcm2835-dma.c
··· 406 406 } 407 407 } 408 408 409 - static int bcm2835_dma_abort(void __iomem *chan_base) 409 + static int bcm2835_dma_abort(struct bcm2835_chan *c) 410 410 { 411 - unsigned long cs; 411 + void __iomem *chan_base = c->chan_base; 412 412 long int timeout = 10000; 413 413 414 - cs = readl(chan_base + BCM2835_DMA_CS); 415 - if (!(cs & BCM2835_DMA_ACTIVE)) 414 + /* 415 + * A zero control block address means the channel is idle. 416 + * (The ACTIVE flag in the CS register is not a reliable indicator.) 417 + */ 418 + if (!readl(chan_base + BCM2835_DMA_ADDR)) 416 419 return 0; 417 420 418 421 /* Write 0 to the active bit - Pause the DMA */ 419 422 writel(0, chan_base + BCM2835_DMA_CS); 420 423 421 424 /* Wait for any current AXI transfer to complete */ 422 - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 425 + while ((readl(chan_base + BCM2835_DMA_CS) & 426 + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) 423 427 cpu_relax(); 424 - cs = readl(chan_base + BCM2835_DMA_CS); 425 - } 426 428 427 - /* We'll un-pause when we set of our next DMA */ 429 + /* Peripheral might be stuck and fail to signal AXI write responses */ 428 430 if (!timeout) 429 - return -ETIMEDOUT; 431 + dev_err(c->vc.chan.device->dev, 432 + "failed to complete outstanding writes\n"); 430 433 431 - if (!(cs & BCM2835_DMA_ACTIVE)) 432 - return 0; 433 - 434 - /* Terminate the control block chain */ 435 - writel(0, chan_base + BCM2835_DMA_NEXTCB); 436 - 437 - /* Abort the whole DMA */ 438 - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, 439 - chan_base + BCM2835_DMA_CS); 440 - 434 + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); 441 435 return 0; 442 436 } 443 437 ··· 470 476 471 477 spin_lock_irqsave(&c->vc.lock, flags); 472 478 473 - /* Acknowledge interrupt */ 474 - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 479 + /* 480 + * Clear the INT flag to receive further interrupts. Keep the channel 481 + * active in case the descriptor is cyclic or in case the client has 482 + * already terminated the descriptor and issued a new one. (May happen 483 + * if this IRQ handler is threaded.) If the channel is finished, it 484 + * will remain idle despite the ACTIVE flag being set. 485 + */ 486 + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, 487 + c->chan_base + BCM2835_DMA_CS); 475 488 476 489 d = c->desc; 477 490 ··· 486 485 if (d->cyclic) { 487 486 /* call the cyclic callback */ 488 487 vchan_cyclic_callback(&d->vd); 489 - 490 - /* Keep the DMA engine running */ 491 - writel(BCM2835_DMA_ACTIVE, 492 - c->chan_base + BCM2835_DMA_CS); 493 - } else { 488 + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { 494 489 vchan_cookie_complete(&c->desc->vd); 495 490 bcm2835_dma_start_desc(c); 496 491 } ··· 776 779 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 777 780 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 778 781 unsigned long flags; 779 - int timeout = 10000; 780 782 LIST_HEAD(head); 781 783 782 784 spin_lock_irqsave(&c->vc.lock, flags); ··· 785 789 list_del_init(&c->node); 786 790 spin_unlock(&d->lock); 787 791 788 - /* 789 - * Stop DMA activity: we assume the callback will not be called 790 - * after bcm_dma_abort() returns (even if it does, it will see 791 - * c->desc is NULL and exit.) 792 - */ 792 + /* stop DMA activity */ 793 793 if (c->desc) { 794 794 vchan_terminate_vdesc(&c->desc->vd); 795 795 c->desc = NULL; 796 - bcm2835_dma_abort(c->chan_base); 797 - 798 - /* Wait for stopping */ 799 - while (--timeout) { 800 - if (!(readl(c->chan_base + BCM2835_DMA_CS) & 801 - BCM2835_DMA_ACTIVE)) 802 - break; 803 - 804 - cpu_relax(); 805 - } 806 - 807 - if (!timeout) 808 - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); 796 + bcm2835_dma_abort(c); 809 797 } 810 798 811 799 vchan_get_all_descriptors(&c->vc, &head);
+14 -18
drivers/dma/dmatest.c
··· 711 711 srcs[i] = um->addr[i] + src_off; 712 712 ret = dma_mapping_error(dev->dev, um->addr[i]); 713 713 if (ret) { 714 - dmaengine_unmap_put(um); 715 714 result("src mapping error", total_tests, 716 715 src_off, dst_off, len, ret); 717 - failed_tests++; 718 - continue; 716 + goto error_unmap_continue; 719 717 } 720 718 um->to_cnt++; 721 719 } ··· 728 730 DMA_BIDIRECTIONAL); 729 731 ret = dma_mapping_error(dev->dev, dsts[i]); 730 732 if (ret) { 731 - dmaengine_unmap_put(um); 732 733 result("dst mapping error", total_tests, 733 734 src_off, dst_off, len, ret); 734 - failed_tests++; 735 - continue; 735 + goto error_unmap_continue; 736 736 } 737 737 um->bidi_cnt++; 738 738 } ··· 758 762 } 759 763 760 764 if (!tx) { 761 - dmaengine_unmap_put(um); 762 765 result("prep error", total_tests, src_off, 763 766 dst_off, len, ret); 764 767 msleep(100); 765 - failed_tests++; 766 - continue; 768 + goto error_unmap_continue; 767 769 } 768 770 769 771 done->done = false; ··· 770 776 cookie = tx->tx_submit(tx); 771 777 772 778 if (dma_submit_error(cookie)) { 773 - dmaengine_unmap_put(um); 774 779 result("submit error", total_tests, src_off, 775 780 dst_off, len, ret); 776 781 msleep(100); 777 - failed_tests++; 778 - continue; 782 + goto error_unmap_continue; 779 783 } 780 784 dma_async_issue_pending(chan); 781 785 ··· 782 790 783 791 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 784 792 785 - dmaengine_unmap_put(um); 786 - 787 793 if (!done->done) { 788 794 result("test timed out", total_tests, src_off, dst_off, 789 795 len, 0); 790 - failed_tests++; 791 - continue; 796 + goto error_unmap_continue; 792 797 } else if (status != DMA_COMPLETE) { 793 798 result(status == DMA_ERROR ? 794 799 "completion error status" : 795 800 "completion busy status", total_tests, src_off, 796 801 dst_off, len, ret); 797 - failed_tests++; 798 - continue; 802 + goto error_unmap_continue; 799 803 } 804 + 805 + dmaengine_unmap_put(um); 800 806 801 807 if (params->noverify) { 802 808 verbose_result("test passed", total_tests, src_off, ··· 836 846 verbose_result("test passed", total_tests, src_off, 837 847 dst_off, len, 0); 838 848 } 849 + 850 + continue; 851 + 852 + error_unmap_continue: 853 + dmaengine_unmap_put(um); 854 + failed_tests++; 839 855 } 840 856 ktime = ktime_sub(ktime_get(), ktime); 841 857 ktime = ktime_sub(ktime, comparetime);
+4 -4
drivers/dma/imx-dma.c
··· 618 618 { 619 619 struct imxdma_channel *imxdmac = (void *)data; 620 620 struct imxdma_engine *imxdma = imxdmac->imxdma; 621 - struct imxdma_desc *desc; 621 + struct imxdma_desc *desc, *next_desc; 622 622 unsigned long flags; 623 623 624 624 spin_lock_irqsave(&imxdma->lock, flags); ··· 648 648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 649 649 650 650 if (!list_empty(&imxdmac->ld_queue)) { 651 - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, 652 - node); 651 + next_desc = list_first_entry(&imxdmac->ld_queue, 652 + struct imxdma_desc, node); 653 653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 654 - if (imxdma_xfer_desc(desc) < 0) 654 + if (imxdma_xfer_desc(next_desc) < 0) 655 655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 656 656 __func__, imxdmac->channel); 657 657 }