mmc: tmio-mmc: Improve DMA stability on sh-mobile

On some SDHI tmio implementations the order of DMA and command completion
interrupts swaps, which leads to malfunction. This patch postpones
DMA activation until the MMC command completion IRQ time.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Chris Ball <cjb@laptop.org>

authored by Guennadi Liakhovetski and committed by Chris Ball 51fc7b2c 4f665cb6

+34 -29
+34 -29
drivers/mmc/host/tmio_mmc.c
··· 485 485 unsigned int count; 486 486 unsigned long flags; 487 487 488 - if (!data) { 488 + if (host->chan_tx || host->chan_rx) { 489 + pr_err("PIO IRQ in DMA mode!\n"); 490 + return; 491 + } else if (!data) { 489 492 pr_debug("Spurious PIO IRQ\n"); 490 493 return; 491 494 } ··· 651 648 if (host->data->flags & MMC_DATA_READ) { 652 649 if (!host->chan_rx) 653 650 enable_mmc_irqs(host, TMIO_MASK_READOP); 651 + else 652 + tasklet_schedule(&host->dma_issue); 654 653 } else { 655 654 if (!host->chan_tx) 656 655 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); ··· 784 779 #endif 785 780 } 786 781 787 - static void tmio_dma_complete(void *arg) 788 - { 789 - struct tmio_mmc_host *host = arg; 790 - 791 - dev_dbg(&host->pdev->dev, "Command completed\n"); 792 - 793 - if (!host->data) 794 - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); 795 - else 796 - enable_mmc_irqs(host, TMIO_STAT_DATAEND); 797 - } 798 - 799 782 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 800 783 { 801 784 struct scatterlist *sg = host->sg_ptr, *sg_tmp; ··· 810 817 goto pio; 811 818 } 812 819 820 + disable_mmc_irqs(host, TMIO_STAT_RXRDY); 821 + 813 822 /* The only sg element can be unaligned, use our bounce buffer then */ 814 823 if (!aligned) { 815 824 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); ··· 822 827 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 823 828 if (ret > 0) 824 829 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 825 - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 830 + DMA_FROM_DEVICE, DMA_CTRL_ACK); 826 831 827 - if (desc) { 828 - desc->callback = tmio_dma_complete; 829 - desc->callback_param = host; 832 + if (desc) 830 833 cookie = dmaengine_submit(desc); 831 - dma_async_issue_pending(chan); 832 - } 834 + 833 835 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 834 836 __func__, host->sg_len, ret, cookie, host->mrq); 835 837 ··· 878 886 goto pio; 879 887 } 880 888 889 + disable_mmc_irqs(host, TMIO_STAT_TXRQ); 890 + 881 891 /* The only sg element can be unaligned, use our bounce buffer then */ 882 892 if (!aligned) { 883 893 unsigned long flags; ··· 894 900 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 895 901 if (ret > 0) 896 902 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 897 - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 903 + DMA_TO_DEVICE, DMA_CTRL_ACK); 898 904 899 - if (desc) { 900 - desc->callback = tmio_dma_complete; 901 - desc->callback_param = host; 905 + if (desc) 902 906 cookie = dmaengine_submit(desc); 903 - } 907 + 904 908 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 905 909 __func__, host->sg_len, ret, cookie, host->mrq); 906 910 ··· 939 947 static void tmio_issue_tasklet_fn(unsigned long priv) 940 948 { 941 949 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 942 - struct dma_chan *chan = host->chan_tx; 950 + struct dma_chan *chan = NULL; 943 951 944 - dma_async_issue_pending(chan); 952 + spin_lock_irq(&host->lock); 953 + 954 + if (host && host->data) { 955 + if (host->data->flags & MMC_DATA_READ) 956 + chan = host->chan_rx; 957 + else 958 + chan = host->chan_tx; 959 + } 960 + 961 + spin_unlock_irq(&host->lock); 962 + 963 + enable_mmc_irqs(host, TMIO_STAT_DATAEND); 964 + 965 + if (chan) 966 + dma_async_issue_pending(chan); 945 967 } 946 968 947 969 static void tmio_tasklet_fn(unsigned long arg) 948 970 { 949 971 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 950 - unsigned long flags; 951 972 952 - spin_lock_irqsave(&host->lock, flags); 973 + spin_lock_irq(&host->lock); 953 974 954 975 if (!host->data) 955 976 goto out; ··· 978 973 979 974 tmio_mmc_do_data_irq(host); 980 975 out: 981 - spin_unlock_irqrestore(&host->lock, flags); 976 + spin_unlock_irq(&host->lock); 982 977 } 983 978 984 979 /* It might be necessary to make filter MFD specific */