Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Lantiq: Lock DMA register accesses for SMP

The DMA controller channel and port configuration is changed by
selecting the port or channel in one register and then update the
configuration in other registers. This has to be done in an atomic
operation. Previously only the local interrupts were deactivated which
works for single CPU systems. If the system supports SMP a better
locking is needed, use spinlocks instead.
On more recent SoCs (at least xrx200 and later) there are two memory
regions to change the configuration, there we could use one area for
each CPU and do not have to synchronize between the CPUs and more.

Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Cc: john@phrozen.org
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/14912/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Hauke Mehrtens and committed by
Ralf Baechle
98e58b01 b3d91db3

+20 -18
+20 -18
arch/mips/lantiq/xway/dma.c
··· 20 20 #include <linux/io.h> 21 21 #include <linux/dma-mapping.h> 22 22 #include <linux/module.h> 23 + #include <linux/spinlock.h> 23 24 #include <linux/clk.h> 24 25 #include <linux/err.h> 25 26 ··· 60 59 ltq_dma_membase + (z)) 61 60 62 61 static void __iomem *ltq_dma_membase; 62 + static DEFINE_SPINLOCK(ltq_dma_lock); 63 63 64 64 void 65 65 ltq_dma_enable_irq(struct ltq_dma_channel *ch) 66 66 { 67 67 unsigned long flags; 68 68 69 - local_irq_save(flags); 69 + spin_lock_irqsave(&ltq_dma_lock, flags); 70 70 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 71 71 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 72 - local_irq_restore(flags); 72 + spin_unlock_irqrestore(&ltq_dma_lock, flags); 73 73 } 74 74 EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); 75 75 ··· 79 77 { 80 78 unsigned long flags; 81 79 82 - local_irq_save(flags); 80 + spin_lock_irqsave(&ltq_dma_lock, flags); 83 81 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 84 82 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); 85 - local_irq_restore(flags); 83 + spin_unlock_irqrestore(&ltq_dma_lock, flags); 86 84 } 87 85 EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); 88 86 ··· 91 89 { 92 90 unsigned long flags; 93 91 94 - local_irq_save(flags); 92 + spin_lock_irqsave(&ltq_dma_lock, flags); 95 93 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 96 94 ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); 97 - local_irq_restore(flags); 95 + spin_unlock_irqrestore(&ltq_dma_lock, flags); 98 96 } 99 97 EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); 100 98 ··· 103 101 { 104 102 unsigned long flag; 105 103 106 - local_irq_save(flag); 104 + spin_lock_irqsave(&ltq_dma_lock, flag); 107 105 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 108 106 ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); 109 - ltq_dma_enable_irq(ch); 110 - local_irq_restore(flag); 107 + ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 108 + spin_unlock_irqrestore(&ltq_dma_lock, flag); 111 109 } 112 110 EXPORT_SYMBOL_GPL(ltq_dma_open); 113 111 ··· 116 114 { 117 115 unsigned long flag; 118 116 119 - local_irq_save(flag); 117 + spin_lock_irqsave(&ltq_dma_lock, flag); 120 118 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 121 119 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 122 - ltq_dma_disable_irq(ch); 123 - local_irq_restore(flag); 120 + ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); 121 + spin_unlock_irqrestore(&ltq_dma_lock, flag); 124 122 } 125 123 EXPORT_SYMBOL_GPL(ltq_dma_close); 126 124 ··· 135 133 &ch->phys, GFP_ATOMIC); 136 134 memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); 137 135 138 - local_irq_save(flags); 136 + spin_lock_irqsave(&ltq_dma_lock, flags); 139 137 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 140 138 ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); 141 139 ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); ··· 144 142 ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); 145 143 while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) 146 144 ; 147 - local_irq_restore(flags); 145 + spin_unlock_irqrestore(&ltq_dma_lock, flags); 148 146 } 149 147 150 148 void ··· 154 152 155 153 ltq_dma_alloc(ch); 156 154 157 - local_irq_save(flags); 155 + spin_lock_irqsave(&ltq_dma_lock, flags); 158 156 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); 159 157 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 160 158 ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); 161 - local_irq_restore(flags); 159 + spin_unlock_irqrestore(&ltq_dma_lock, flags); 162 160 } 163 161 EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); 164 162 ··· 169 167 170 168 ltq_dma_alloc(ch); 171 169 172 - local_irq_save(flags); 170 + spin_lock_irqsave(&ltq_dma_lock, flags); 173 171 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); 174 172 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 175 173 ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); 176 - local_irq_restore(flags); 174 + spin_unlock_irqrestore(&ltq_dma_lock, flags); 177 175 } 178 176 EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); 179 177