Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: xilinx: xdma: Fix synchronization issue

The current xdma_synchronize method does not properly wait for the last
transfer to be done. Due to limitations of the XMDA engine, it is not
possible to stop a transfer in the middle of a descriptor. Said
otherwise, if a stop is requested at the end of descriptor "N" and the OS
is fast enough, the DMA controller will effectively stop immediately.
However, if the OS is slightly too slow to request the stop and the DMA
engine starts descriptor "N+1", the N+1 transfer will be performed until
its end. This means that after a terminate_all, the last descriptor must
remain valid and the synchronization must wait for this last descriptor to
be terminated.

Fixes: 855c2e1d1842 ("dmaengine: xilinx: xdma: Rework xdma_terminate_all()")
Fixes: f5c392d106e7 ("dmaengine: xilinx: xdma: Add terminate_all/synchronize callbacks")
Cc: stable@vger.kernel.org
Suggested-by: Miquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: Louis Chauvet <louis.chauvet@bootlin.com>
Link: https://lore.kernel.org/r/20240327-digigram-xdma-fixes-v1-2-45f4a52c0283@bootlin.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Louis Chauvet and committed by
Vinod Koul
6a40fb82 5b9706bf

+21 -8
+3
drivers/dma/xilinx/xdma-regs.h
··· 117 117 CHAN_CTRL_IE_WRITE_ERROR | \ 118 118 CHAN_CTRL_IE_DESC_ERROR) 119 119 120 + /* bits of the channel status register */ 121 + #define XDMA_CHAN_STATUS_BUSY BIT(0) 122 + 120 123 #define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START 121 124 122 125 #define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+18 -8
drivers/dma/xilinx/xdma.c
··· 71 71 enum dma_transfer_direction dir; 72 72 struct dma_slave_config cfg; 73 73 u32 irq; 74 + struct completion last_interrupt; 75 + bool stop_requested; 74 76 }; 75 77 76 78 /** ··· 378 376 return ret; 379 377 380 378 xchan->busy = true; 379 + xchan->stop_requested = false; 380 + reinit_completion(&xchan->last_interrupt); 381 381 382 382 return 0; 383 383 } ··· 391 387 static int xdma_xfer_stop(struct xdma_chan *xchan) 392 388 { 393 389 int ret; 394 - u32 val; 395 390 struct xdma_device *xdev = xchan->xdev_hdl; 396 391 397 392 /* clear run stop bit to prevent any further auto-triggering */ ··· 398 395 CHAN_CTRL_RUN_STOP); 399 396 if (ret) 400 397 return ret; 401 - 402 - /* Clear the channel status register */ 403 - ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val); 404 - if (ret) 405 - return ret; 406 - 407 - return 0; 398 + return ret; 408 399 } 409 400 410 401 /** ··· 471 474 xchan->xdev_hdl = xdev; 472 475 xchan->base = base + i * XDMA_CHAN_STRIDE; 473 476 xchan->dir = dir; 477 + xchan->stop_requested = false; 478 + init_completion(&xchan->last_interrupt); 474 479 475 480 ret = xdma_channel_init(xchan); 476 481 if (ret) ··· 520 521 spin_lock_irqsave(&xdma_chan->vchan.lock, flags); 521 522 522 523 xdma_chan->busy = false; 524 + xdma_chan->stop_requested = true; 523 525 vd = vchan_next_desc(&xdma_chan->vchan); 524 526 if (vd) { 525 527 list_del(&vd->node); ··· 542 542 static void xdma_synchronize(struct dma_chan *chan) 543 543 { 544 544 struct xdma_chan *xdma_chan = to_xdma_chan(chan); 545 + struct xdma_device *xdev = xdma_chan->xdev_hdl; 546 + int st = 0; 547 + 548 + /* If the engine continues running, wait for the last interrupt */ 549 + regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st); 550 + if (st & XDMA_CHAN_STATUS_BUSY) 551 + wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000)); 545 552 546 553 vchan_synchronize(&xdma_chan->vchan); 547 554 } ··· 882 875 int ret; 883 876 u32 st; 884 877 bool repeat_tx; 878 + 879 + if (xchan->stop_requested) 880 + complete(&xchan->last_interrupt); 885 881 886 882 spin_lock(&xchan->vchan.lock); 887 883