Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma: pl330: Fix cyclic transfers

Allocate a descriptor for each period of a cyclic transfer, not just the first.
Also since the callback needs to be called for each finished period make sure to
initialize the callback and callback_param fields of each descriptor in a cyclic
transfer.

Cc: stable@vger.kernel.org
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Lars-Peter Clausen and committed by
Vinod Koul
fc514460 27abb2ff

+67 -26
+67 -26
drivers/dma/pl330.c
··· 2505 2505 /* Assign cookies to all nodes */ 2506 2506 while (!list_empty(&last->node)) { 2507 2507 desc = list_entry(last->node.next, struct dma_pl330_desc, node); 2508 + if (pch->cyclic) { 2509 + desc->txd.callback = last->txd.callback; 2510 + desc->txd.callback_param = last->txd.callback_param; 2511 + } 2508 2512 2509 2513 dma_cookie_assign(&desc->txd); 2510 2514 ··· 2692 2688 size_t period_len, enum dma_transfer_direction direction, 2693 2689 unsigned long flags, void *context) 2694 2690 { 2695 - struct dma_pl330_desc *desc; 2691 + struct dma_pl330_desc *desc = NULL, *first = NULL; 2696 2692 struct dma_pl330_chan *pch = to_pchan(chan); 2693 + struct dma_pl330_dmac *pdmac = pch->dmac; 2694 + unsigned int i; 2697 2695 dma_addr_t dst; 2698 2696 dma_addr_t src; 2699 2697 2700 - desc = pl330_get_desc(pch); 2701 - if (!desc) { 2702 - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", 2703 - __func__, __LINE__); 2698 + if (len % period_len != 0) 2704 2699 return NULL; 2705 - } 2706 2700 2707 - switch (direction) { 2708 - case DMA_MEM_TO_DEV: 2709 - desc->rqcfg.src_inc = 1; 2710 - desc->rqcfg.dst_inc = 0; 2711 - desc->req.rqtype = MEMTODEV; 2712 - src = dma_addr; 2713 - dst = pch->fifo_addr; 2714 - break; 2715 - case DMA_DEV_TO_MEM: 2716 - desc->rqcfg.src_inc = 0; 2717 - desc->rqcfg.dst_inc = 1; 2718 - desc->req.rqtype = DEVTOMEM; 2719 - src = pch->fifo_addr; 2720 - dst = dma_addr; 2721 - break; 2722 - default: 2701 + if (!is_slave_direction(direction)) { 2723 2702 dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", 2724 2703 __func__, __LINE__); 2725 2704 return NULL; 2726 2705 } 2727 2706 2728 - desc->rqcfg.brst_size = pch->burst_sz; 2729 - desc->rqcfg.brst_len = 1; 2707 + for (i = 0; i < len / period_len; i++) { 2708 + desc = pl330_get_desc(pch); 2709 + if (!desc) { 2710 + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", 2711 + __func__, __LINE__); 2712 + 2713 + if (!first) 2714 + return NULL; 2715 + 2716 + spin_lock_irqsave(&pdmac->pool_lock, flags); 2717 + 2718 + while (!list_empty(&first->node)) { 2719 + desc = list_entry(first->node.next, 2720 + struct dma_pl330_desc, node); 2721 + list_move_tail(&desc->node, &pdmac->desc_pool); 2722 + } 2723 + 2724 + list_move_tail(&first->node, &pdmac->desc_pool); 2725 + 2726 + spin_unlock_irqrestore(&pdmac->pool_lock, flags); 2727 + 2728 + return NULL; 2729 + } 2730 + 2731 + switch (direction) { 2732 + case DMA_MEM_TO_DEV: 2733 + desc->rqcfg.src_inc = 1; 2734 + desc->rqcfg.dst_inc = 0; 2735 + desc->req.rqtype = MEMTODEV; 2736 + src = dma_addr; 2737 + dst = pch->fifo_addr; 2738 + break; 2739 + case DMA_DEV_TO_MEM: 2740 + desc->rqcfg.src_inc = 0; 2741 + desc->rqcfg.dst_inc = 1; 2742 + desc->req.rqtype = DEVTOMEM; 2743 + src = pch->fifo_addr; 2744 + dst = dma_addr; 2745 + break; 2746 + default: 2747 + break; 2748 + } 2749 + 2750 + desc->rqcfg.brst_size = pch->burst_sz; 2751 + desc->rqcfg.brst_len = 1; 2752 + fill_px(&desc->px, dst, src, period_len); 2753 + 2754 + if (!first) 2755 + first = desc; 2756 + else 2757 + list_add_tail(&desc->node, &first->node); 2758 + 2759 + dma_addr += period_len; 2760 + } 2761 + 2762 + if (!desc) 2763 + return NULL; 2730 2764 2731 2765 pch->cyclic = true; 2732 - 2733 - fill_px(&desc->px, dst, src, period_len); 2766 + desc->txd.flags = flags; 2734 2767 2735 2768 return &desc->txd; 2736 2769 }