mmc: tmio-mmc: Improve DMA stability on sh-mobile

On some SDHI tmio implementations the order of DMA and command completion
interrupts swaps, which leads to malfunction. This patch postpones
DMA activation until the MMC command completion IRQ time.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Chris Ball <cjb@laptop.org>

authored by Guennadi Liakhovetski and committed by Chris Ball 51fc7b2c 4f665cb6

+34 -29
+34 -29
drivers/mmc/host/tmio_mmc.c
··· 485 unsigned int count; 486 unsigned long flags; 487 488 - if (!data) { 489 pr_debug("Spurious PIO IRQ\n"); 490 return; 491 } ··· 651 if (host->data->flags & MMC_DATA_READ) { 652 if (!host->chan_rx) 653 enable_mmc_irqs(host, TMIO_MASK_READOP); 654 } else { 655 if (!host->chan_tx) 656 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); ··· 784 #endif 785 } 786 787 - static void tmio_dma_complete(void *arg) 788 - { 789 - struct tmio_mmc_host *host = arg; 790 - 791 - dev_dbg(&host->pdev->dev, "Command completed\n"); 792 - 793 - if (!host->data) 794 - dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); 795 - else 796 - enable_mmc_irqs(host, TMIO_STAT_DATAEND); 797 - } 798 - 799 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 800 { 801 struct scatterlist *sg = host->sg_ptr, *sg_tmp; ··· 810 goto pio; 811 } 812 813 /* The only sg element can be unaligned, use our bounce buffer then */ 814 if (!aligned) { 815 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); ··· 822 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 823 if (ret > 0) 824 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 825 - DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 826 827 - if (desc) { 828 - desc->callback = tmio_dma_complete; 829 - desc->callback_param = host; 830 cookie = dmaengine_submit(desc); 831 - dma_async_issue_pending(chan); 832 - } 833 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 834 __func__, host->sg_len, ret, cookie, host->mrq); 835 ··· 878 goto pio; 879 } 880 881 /* The only sg element can be unaligned, use our bounce buffer then */ 882 if (!aligned) { 883 unsigned long flags; ··· 894 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 895 if (ret > 0) 896 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 897 - DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 898 899 - if (desc) { 900 - desc->callback = tmio_dma_complete; 901 - desc->callback_param = host; 902 cookie = dmaengine_submit(desc); 903 - } 904 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 905 __func__, host->sg_len, ret, cookie, host->mrq); 906 ··· 939 static void tmio_issue_tasklet_fn(unsigned long priv) 940 { 941 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 942 - struct dma_chan *chan = host->chan_tx; 943 944 - dma_async_issue_pending(chan); 945 } 946 947 static void tmio_tasklet_fn(unsigned long arg) 948 { 949 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 950 - unsigned long flags; 951 952 - spin_lock_irqsave(&host->lock, flags); 953 954 if (!host->data) 955 goto out; ··· 978 979 tmio_mmc_do_data_irq(host); 980 out: 981 - spin_unlock_irqrestore(&host->lock, flags); 982 } 983 984 /* It might be necessary to make filter MFD specific */
··· 485 unsigned int count; 486 unsigned long flags; 487 488 + if (host->chan_tx || host->chan_rx) { 489 + pr_err("PIO IRQ in DMA mode!\n"); 490 + return; 491 + } else if (!data) { 492 pr_debug("Spurious PIO IRQ\n"); 493 return; 494 } ··· 648 if (host->data->flags & MMC_DATA_READ) { 649 if (!host->chan_rx) 650 enable_mmc_irqs(host, TMIO_MASK_READOP); 651 + else 652 + tasklet_schedule(&host->dma_issue); 653 } else { 654 if (!host->chan_tx) 655 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); ··· 779 #endif 780 } 781 782 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 783 { 784 struct scatterlist *sg = host->sg_ptr, *sg_tmp; ··· 817 goto pio; 818 } 819 820 + disable_mmc_irqs(host, TMIO_STAT_RXRDY); 821 + 822 /* The only sg element can be unaligned, use our bounce buffer then */ 823 if (!aligned) { 824 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); ··· 827 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 828 if (ret > 0) 829 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 830 + DMA_FROM_DEVICE, DMA_CTRL_ACK); 831 832 + if (desc) 833 cookie = dmaengine_submit(desc); 834 + 835 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 836 __func__, host->sg_len, ret, cookie, host->mrq); 837 ··· 886 goto pio; 887 } 888 889 + disable_mmc_irqs(host, TMIO_STAT_TXRQ); 890 + 891 /* The only sg element can be unaligned, use our bounce buffer then */ 892 if (!aligned) { 893 unsigned long flags; ··· 900 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 901 if (ret > 0) 902 desc = chan->device->device_prep_slave_sg(chan, sg, ret, 903 + DMA_TO_DEVICE, DMA_CTRL_ACK); 904 905 + if (desc) 906 cookie = dmaengine_submit(desc); 907 + 908 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 909 __func__, host->sg_len, ret, cookie, host->mrq); 910 ··· 947 static void tmio_issue_tasklet_fn(unsigned long priv) 948 { 949 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 950 + struct dma_chan *chan = NULL; 951 952 + spin_lock_irq(&host->lock); 953 + 954 + if (host && host->data) { 955 + if (host->data->flags & MMC_DATA_READ) 956 + chan = host->chan_rx; 957 + else 958 + chan = host->chan_tx; 959 + } 960 + 961 + spin_unlock_irq(&host->lock); 962 + 963 + enable_mmc_irqs(host, TMIO_STAT_DATAEND); 964 + 965 + if (chan) 966 + dma_async_issue_pending(chan); 967 } 968 969 static void tmio_tasklet_fn(unsigned long arg) 970 { 971 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 972 973 + spin_lock_irq(&host->lock); 974 975 if (!host->data) 976 goto out; ··· 973 974 tmio_mmc_do_data_irq(host); 975 out: 976 + spin_unlock_irq(&host->lock); 977 } 978 979 /* It might be necessary to make filter MFD specific */