[ARM SMP] Use event instructions for spinlocks

ARMv6K CPUs have SEV (send event) and WFE (wait for event) instructions
which allow the CPU clock to be suspended until another CPU issues a
SEV, rather than spinning on the lock wasting power. Make use of these
instructions.

Note that WFE does not wait if an event has been sent since the last WFE
cleared the event status, so although it may look racy, the instruction
implementation ensures that these are dealt with.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Russell King and committed by Russell King 00b4c907 df2f5e72

+24 -2
+24 -2
include/asm-arm/spinlock.h
··· 30 __asm__ __volatile__( 31 "1: ldrex %0, [%1]\n" 32 " teq %0, #0\n" 33 " strexeq %0, %2, [%1]\n" 34 " teqeq %0, #0\n" 35 " bne 1b" ··· 68 smp_mb(); 69 70 __asm__ __volatile__( 71 - " str %1, [%0]" 72 : 73 : "r" (&lock->lock), "r" (0) 74 : "cc"); ··· 94 __asm__ __volatile__( 95 "1: ldrex %0, [%1]\n" 96 " teq %0, #0\n" 97 " strexeq %0, %2, [%1]\n" 98 " teq %0, #0\n" 99 " bne 1b" ··· 132 smp_mb(); 133 134 __asm__ __volatile__( 135 - "str %1, [%0]" 136 : 137 : "r" (&rw->lock), "r" (0) 138 : "cc"); ··· 162 "1: ldrex %0, [%2]\n" 163 " adds %0, %0, #1\n" 164 " strexpl %1, %0, [%2]\n" 165 " rsbpls %0, %1, #0\n" 166 " bmi 1b" 167 : "=&r" (tmp), "=&r" (tmp2) ··· 186 " strex %1, %0, [%2]\n" 187 " teq %1, #0\n" 188 " bne 1b" 189 : "=&r" (tmp), "=&r" (tmp2) 190 : "r" (&rw->lock) 191 : "cc");
··· 30 __asm__ __volatile__( 31 "1: ldrex %0, [%1]\n" 32 " teq %0, #0\n" 33 + #ifdef CONFIG_CPU_32v6K 34 + " wfene\n" 35 + #endif 36 " strexeq %0, %2, [%1]\n" 37 " teqeq %0, #0\n" 38 " bne 1b" ··· 65 smp_mb(); 66 67 __asm__ __volatile__( 68 + " str %1, [%0]\n" 69 + #ifdef CONFIG_CPU_32v6K 70 + " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ 71 + " sev" 72 + #endif 73 : 74 : "r" (&lock->lock), "r" (0) 75 : "cc"); ··· 87 __asm__ __volatile__( 88 "1: ldrex %0, [%1]\n" 89 " teq %0, #0\n" 90 + #ifdef CONFIG_CPU_32v6K 91 + " wfene\n" 92 + #endif 93 " strexeq %0, %2, [%1]\n" 94 " teq %0, #0\n" 95 " bne 1b" ··· 122 smp_mb(); 123 124 __asm__ __volatile__( 125 + "str %1, [%0]\n" 126 + #ifdef CONFIG_CPU_32v6K 127 + " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ 128 + " sev\n" 129 + #endif 130 : 131 : "r" (&rw->lock), "r" (0) 132 : "cc"); ··· 148 "1: ldrex %0, [%2]\n" 149 " adds %0, %0, #1\n" 150 " strexpl %1, %0, [%2]\n" 151 + #ifdef CONFIG_CPU_32v6K 152 + " wfemi\n" 153 + #endif 154 " rsbpls %0, %1, #0\n" 155 " bmi 1b" 156 : "=&r" (tmp), "=&r" (tmp2) ··· 169 " strex %1, %0, [%2]\n" 170 " teq %1, #0\n" 171 " bne 1b" 172 + #ifdef CONFIG_CPU_32v6K 173 + "\n cmp %0, #0\n" 174 + " mcreq p15, 0, %0, c7, c10, 4\n" 175 + " seveq" 176 + #endif 177 : "=&r" (tmp), "=&r" (tmp2) 178 : "r" (&rw->lock) 179 : "cc");