Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Fix microMIPS LL/SC immediate offsets

In the microMIPS encoding some memory access instructions have their
immediate offset reduced to 12 bits only. That does not match the GCC
`R' constraint we use in some places to satisfy the requirement,
resulting in build failures like this:

{standard input}: Assembler messages:
{standard input}:720: Error: macro used $at after ".set noat"
{standard input}:720: Warning: macro instruction expanded into multiple instructions

Fix the problem by defining a macro, `GCC_OFF12_ASM', that expands to
the right constraint depending on whether microMIPS or standard MIPS
code is produced. Also apply the fix to where `m' is used as in the
worst case this change does nothing, e.g. where the pointer was already
in a register such as a function argument and no further offset was
requested, and in the best case it avoids an extraneous sequence of up
to two instructions to load the high 20 bits of the address in the LL/SC
loop. This reduces the risk of lock contention that is the higher the
more instructions there are in the critical section between LL and SC.

Strictly speaking we could just bulk-replace `R' with `ZC' as the latter
constraint adjusts automatically depending on the ISA selected.
However it was only introduced with GCC 4.9 and we keep supporing older
compilers for the standard MIPS configuration, hence the slightly more
complicated approach I chose.

The choice of a zero-argument function-like rather than an object-like
macro was made so that it does not look like a function call taking the
C expression used for the constraint as an argument. This is so as not
to confuse the reader or formatting checkers like `checkpatch.pl' and
follows previous practice.

Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com>
Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/8482/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Maciej W. Rozycki and committed by
Ralf Baechle
b0984c43 aec711d5

+126 -91
+24 -15
arch/mips/include/asm/atomic.h
··· 17 17 #include <linux/irqflags.h> 18 18 #include <linux/types.h> 19 19 #include <asm/barrier.h> 20 + #include <asm/compiler.h> 20 21 #include <asm/cpu-features.h> 21 22 #include <asm/cmpxchg.h> 22 23 #include <asm/war.h> ··· 54 53 " sc %0, %1 \n" \ 55 54 " beqzl %0, 1b \n" \ 56 55 " .set mips0 \n" \ 57 - : "=&r" (temp), "+m" (v->counter) \ 56 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 58 57 : "Ir" (i)); \ 59 58 } else if (kernel_uses_llsc) { \ 60 59 int temp; \ ··· 66 65 " " #asm_op " %0, %2 \n" \ 67 66 " sc %0, %1 \n" \ 68 67 " .set mips0 \n" \ 69 - : "=&r" (temp), "+m" (v->counter) \ 68 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 70 69 : "Ir" (i)); \ 71 70 } while (unlikely(!temp)); \ 72 71 } else { \ ··· 96 95 " beqzl %0, 1b \n" \ 97 96 " " #asm_op " %0, %1, %3 \n" \ 98 97 " .set mips0 \n" \ 99 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 98 + : "=&r" (result), "=&r" (temp), \ 99 + "+" GCC_OFF12_ASM() (v->counter) \ 100 100 : "Ir" (i)); \ 101 101 } else if (kernel_uses_llsc) { \ 102 102 int temp; \ ··· 109 107 " " #asm_op " %0, %1, %3 \n" \ 110 108 " sc %0, %2 \n" \ 111 109 " .set mips0 \n" \ 112 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 110 + : "=&r" (result), "=&r" (temp), \ 111 + "+" GCC_OFF12_ASM() (v->counter) \ 113 112 : "Ir" (i)); \ 114 113 } while (unlikely(!result)); \ 115 114 \ ··· 170 167 " .set reorder \n" 171 168 "1: \n" 172 169 " .set mips0 \n" 173 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 174 - : "Ir" (i), "m" (v->counter) 170 + : "=&r" (result), "=&r" (temp), 171 + "+" GCC_OFF12_ASM() (v->counter) 172 + : "Ir" (i), GCC_OFF12_ASM() (v->counter) 175 173 : "memory"); 176 174 } else if (kernel_uses_llsc) { 177 175 int temp; ··· 189 185 " .set reorder \n" 190 186 "1: \n" 191 187 " .set mips0 \n" 192 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 188 + : "=&r" (result), "=&r" (temp), 189 + "+" GCC_OFF12_ASM() (v->counter) 193 190 : "Ir" (i)); 194 191 } else { 195 192 unsigned long flags; ··· 333 328 " scd %0, %1 \n" \ 334 329 " beqzl %0, 1b \n" \ 335 330 " .set mips0 \n" \ 336 - : "=&r" (temp), "+m" (v->counter) \ 331 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 337 332 : "Ir" (i)); \ 338 333 } else if (kernel_uses_llsc) { \ 339 334 long temp; \ ··· 345 340 " " #asm_op " %0, %2 \n" \ 346 341 " scd %0, %1 \n" \ 347 342 " .set mips0 \n" \ 348 - : "=&r" (temp), "+m" (v->counter) \ 343 + : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 349 344 : "Ir" (i)); \ 350 345 } while (unlikely(!temp)); \ 351 346 } else { \ ··· 375 370 " beqzl %0, 1b \n" \ 376 371 " " #asm_op " %0, %1, %3 \n" \ 377 372 " .set mips0 \n" \ 378 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 373 + : "=&r" (result), "=&r" (temp), \ 374 + "+" GCC_OFF12_ASM() (v->counter) \ 379 375 : "Ir" (i)); \ 380 376 } else if (kernel_uses_llsc) { \ 381 377 long temp; \ ··· 388 382 " " #asm_op " %0, %1, %3 \n" \ 389 383 " scd %0, %2 \n" \ 390 384 " .set mips0 \n" \ 391 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ 392 - : "Ir" (i), "m" (v->counter) \ 385 + : "=&r" (result), "=&r" (temp), \ 386 + "=" GCC_OFF12_ASM() (v->counter) \ 387 + : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 393 388 : "memory"); \ 394 389 } while (unlikely(!result)); \ 395 390 \ ··· 450 443 " .set reorder \n" 451 444 "1: \n" 452 445 " .set mips0 \n" 453 - : "=&r" (result), "=&r" (temp), "=m" (v->counter) 454 - : "Ir" (i), "m" (v->counter) 446 + : "=&r" (result), "=&r" (temp), 447 + "=" GCC_OFF12_ASM() (v->counter) 448 + : "Ir" (i), GCC_OFF12_ASM() (v->counter) 455 449 : "memory"); 456 450 } else if (kernel_uses_llsc) { 457 451 long temp; ··· 469 461 " .set reorder \n" 470 462 "1: \n" 471 463 " .set mips0 \n" 472 - : "=&r" (result), "=&r" (temp), "+m" (v->counter) 464 + : "=&r" (result), "=&r" (temp), 465 + "+" GCC_OFF12_ASM() (v->counter) 473 466 : "Ir" (i)); 474 467 } else { 475 468 unsigned long flags;
+18 -17
arch/mips/include/asm/bitops.h
··· 17 17 #include <linux/types.h> 18 18 #include <asm/barrier.h> 19 19 #include <asm/byteorder.h> /* sigh ... */ 20 + #include <asm/compiler.h> 20 21 #include <asm/cpu-features.h> 21 22 #include <asm/sgidefs.h> 22 23 #include <asm/war.h> ··· 79 78 " " __SC "%0, %1 \n" 80 79 " beqzl %0, 1b \n" 81 80 " .set mips0 \n" 82 - : "=&r" (temp), "=m" (*m) 83 - : "ir" (1UL << bit), "m" (*m)); 81 + : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) 82 + : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); 84 83 #ifdef CONFIG_CPU_MIPSR2 85 84 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 86 85 do { ··· 88 87 " " __LL "%0, %1 # set_bit \n" 89 88 " " __INS "%0, %3, %2, 1 \n" 90 89 " " __SC "%0, %1 \n" 91 - : "=&r" (temp), "+m" (*m) 90 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 92 91 : "ir" (bit), "r" (~0)); 93 92 } while (unlikely(!temp)); 94 93 #endif /* CONFIG_CPU_MIPSR2 */ ··· 100 99 " or %0, %2 \n" 101 100 " " __SC "%0, %1 \n" 102 101 " .set mips0 \n" 103 - : "=&r" (temp), "+m" (*m) 102 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 104 103 : "ir" (1UL << bit)); 105 104 } while (unlikely(!temp)); 106 105 } else ··· 131 130 " " __SC "%0, %1 \n" 132 131 " beqzl %0, 1b \n" 133 132 " .set mips0 \n" 134 - : "=&r" (temp), "+m" (*m) 133 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 135 134 : "ir" (~(1UL << bit))); 136 135 #ifdef CONFIG_CPU_MIPSR2 137 136 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { ··· 140 139 " " __LL "%0, %1 # clear_bit \n" 141 140 " " __INS "%0, $0, %2, 1 \n" 142 141 " " __SC "%0, %1 \n" 143 - : "=&r" (temp), "+m" (*m) 142 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 144 143 : "ir" (bit)); 145 144 } while (unlikely(!temp)); 146 145 #endif /* CONFIG_CPU_MIPSR2 */ ··· 152 151 " and %0, %2 \n" 153 152 " " __SC "%0, %1 \n" 154 153 " .set mips0 \n" 155 - : "=&r" (temp), "+m" (*m) 154 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 156 155 : "ir" (~(1UL << bit))); 157 156 } while (unlikely(!temp)); 158 157 } else ··· 197 196 " " __SC "%0, %1 \n" 198 197 " beqzl %0, 1b \n" 199 198 " .set mips0 \n" 200 - : "=&r" (temp), "+m" (*m) 199 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 201 200 : "ir" (1UL << bit)); 202 201 } else if (kernel_uses_llsc) { 203 202 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); ··· 210 209 " xor %0, %2 \n" 211 210 " " __SC "%0, %1 \n" 212 211 " .set mips0 \n" 213 - : "=&r" (temp), "+m" (*m) 212 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 214 213 : "ir" (1UL << bit)); 215 214 } while (unlikely(!temp)); 216 215 } else ··· 245 244 " beqzl %2, 1b \n" 246 245 " and %2, %0, %3 \n" 247 246 " .set mips0 \n" 248 - : "=&r" (temp), "+m" (*m), "=&r" (res) 247 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 249 248 : "r" (1UL << bit) 250 249 : "memory"); 251 250 } else if (kernel_uses_llsc) { ··· 259 258 " or %2, %0, %3 \n" 260 259 " " __SC "%2, %1 \n" 261 260 " .set mips0 \n" 262 - : "=&r" (temp), "+m" (*m), "=&r" (res) 261 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 263 262 : "r" (1UL << bit) 264 263 : "memory"); 265 264 } while (unlikely(!res)); ··· 313 312 " or %2, %0, %3 \n" 314 313 " " __SC "%2, %1 \n" 315 314 " .set mips0 \n" 316 - : "=&r" (temp), "+m" (*m), "=&r" (res) 315 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 317 316 : "r" (1UL << bit) 318 317 : "memory"); 319 318 } while (unlikely(!res)); ··· 355 354 " beqzl %2, 1b \n" 356 355 " and %2, %0, %3 \n" 357 356 " .set mips0 \n" 358 - : "=&r" (temp), "+m" (*m), "=&r" (res) 357 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 359 358 : "r" (1UL << bit) 360 359 : "memory"); 361 360 #ifdef CONFIG_CPU_MIPSR2 ··· 369 368 " " __EXT "%2, %0, %3, 1 \n" 370 369 " " __INS "%0, $0, %3, 1 \n" 371 370 " " __SC "%0, %1 \n" 372 - : "=&r" (temp), "+m" (*m), "=&r" (res) 371 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 373 372 : "ir" (bit) 374 373 : "memory"); 375 374 } while (unlikely(!temp)); ··· 386 385 " xor %2, %3 \n" 387 386 " " __SC "%2, %1 \n" 388 387 " .set mips0 \n" 389 - : "=&r" (temp), "+m" (*m), "=&r" (res) 388 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 390 389 : "r" (1UL << bit) 391 390 : "memory"); 392 391 } while (unlikely(!res)); ··· 428 427 " beqzl %2, 1b \n" 429 428 " and %2, %0, %3 \n" 430 429 " .set mips0 \n" 431 - : "=&r" (temp), "+m" (*m), "=&r" (res) 430 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 432 431 : "r" (1UL << bit) 433 432 : "memory"); 434 433 } else if (kernel_uses_llsc) { ··· 442 441 " xor %2, %0, %3 \n" 443 442 " " __SC "\t%2, %1 \n" 444 443 " .set mips0 \n" 445 - : "=&r" (temp), "+m" (*m), "=&r" (res) 444 + : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 446 445 : "r" (1UL << bit) 447 446 : "memory"); 448 447 } while (unlikely(!res));
+15 -12
arch/mips/include/asm/cmpxchg.h
··· 10 10 11 11 #include <linux/bug.h> 12 12 #include <linux/irqflags.h> 13 + #include <asm/compiler.h> 13 14 #include <asm/war.h> 14 15 15 16 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) ··· 31 30 " sc %2, %1 \n" 32 31 " beqzl %2, 1b \n" 33 32 " .set mips0 \n" 34 - : "=&r" (retval), "=m" (*m), "=&r" (dummy) 35 - : "R" (*m), "Jr" (val) 33 + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 34 + : GCC_OFF12_ASM() (*m), "Jr" (val) 36 35 : "memory"); 37 36 } else if (kernel_uses_llsc) { 38 37 unsigned long dummy; ··· 46 45 " .set arch=r4000 \n" 47 46 " sc %2, %1 \n" 48 47 " .set mips0 \n" 49 - : "=&r" (retval), "=m" (*m), "=&r" (dummy) 50 - : "R" (*m), "Jr" (val) 48 + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 49 + "=&r" (dummy) 50 + : GCC_OFF12_ASM() (*m), "Jr" (val) 51 51 : "memory"); 52 52 } while (unlikely(!dummy)); 53 53 } else { ··· 82 80 " scd %2, %1 \n" 83 81 " beqzl %2, 1b \n" 84 82 " .set mips0 \n" 85 - : "=&r" (retval), "=m" (*m), "=&r" (dummy) 86 - : "R" (*m), "Jr" (val) 83 + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 84 + : GCC_OFF12_ASM() (*m), "Jr" (val) 87 85 : "memory"); 88 86 } else if (kernel_uses_llsc) { 89 87 unsigned long dummy; ··· 95 93 " move %2, %z4 \n" 96 94 " scd %2, %1 \n" 97 95 " .set mips0 \n" 98 - : "=&r" (retval), "=m" (*m), "=&r" (dummy) 99 - : "R" (*m), "Jr" (val) 96 + : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 97 + "=&r" (dummy) 98 + : GCC_OFF12_ASM() (*m), "Jr" (val) 100 99 : "memory"); 101 100 } while (unlikely(!dummy)); 102 101 } else { ··· 158 155 " beqzl $1, 1b \n" \ 159 156 "2: \n" \ 160 157 " .set pop \n" \ 161 - : "=&r" (__ret), "=R" (*m) \ 162 - : "R" (*m), "Jr" (old), "Jr" (new) \ 158 + : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 159 + : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 163 160 : "memory"); \ 164 161 } else if (kernel_uses_llsc) { \ 165 162 __asm__ __volatile__( \ ··· 175 172 " beqz $1, 1b \n" \ 176 173 " .set pop \n" \ 177 174 "2: \n" \ 178 - : "=&r" (__ret), "=R" (*m) \ 179 - : "R" (*m), "Jr" (old), "Jr" (new) \ 175 + : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 176 + : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 180 177 : "memory"); \ 181 178 } else { \ 182 179 unsigned long __flags; \
+8
arch/mips/include/asm/compiler.h
··· 16 16 #define GCC_REG_ACCUM "accum" 17 17 #endif 18 18 19 + #ifndef CONFIG_CPU_MICROMIPS 20 + #define GCC_OFF12_ASM() "R" 21 + #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) 22 + #define GCC_OFF12_ASM() "ZC" 23 + #else 24 + #error "microMIPS compilation unsupported with GCC older than 4.9" 25 + #endif 26 + 19 27 #endif /* _ASM_COMPILER_H */
+4 -2
arch/mips/include/asm/edac.h
··· 1 1 #ifndef ASM_EDAC_H 2 2 #define ASM_EDAC_H 3 3 4 + #include <asm/compiler.h> 5 + 4 6 /* ECC atomic, DMA, SMP and interrupt safe scrub function */ 5 7 6 8 static inline void atomic_scrub(void *va, u32 size) ··· 26 24 " sc %0, %1 \n" 27 25 " beqz %0, 1b \n" 28 26 " .set mips0 \n" 29 - : "=&r" (temp), "=m" (*virt_addr) 30 - : "m" (*virt_addr)); 27 + : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) 28 + : GCC_OFF12_ASM() (*virt_addr)); 31 29 32 30 virt_addr++; 33 31 }
+15 -8
arch/mips/include/asm/futex.h
··· 14 14 #include <linux/uaccess.h> 15 15 #include <asm/asm-eva.h> 16 16 #include <asm/barrier.h> 17 + #include <asm/compiler.h> 17 18 #include <asm/errno.h> 18 19 #include <asm/war.h> 19 20 ··· 43 42 " "__UA_ADDR "\t1b, 4b \n" \ 44 43 " "__UA_ADDR "\t2b, 4b \n" \ 45 44 " .previous \n" \ 46 - : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ 47 - : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ 45 + : "=r" (ret), "=&r" (oldval), \ 46 + "=" GCC_OFF12_ASM() (*uaddr) \ 47 + : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 48 + "i" (-EFAULT) \ 48 49 : "memory"); \ 49 50 } else if (cpu_has_llsc) { \ 50 51 __asm__ __volatile__( \ ··· 71 68 " "__UA_ADDR "\t1b, 4b \n" \ 72 69 " "__UA_ADDR "\t2b, 4b \n" \ 73 70 " .previous \n" \ 74 - : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ 75 - : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ 71 + : "=r" (ret), "=&r" (oldval), \ 72 + "=" GCC_OFF12_ASM() (*uaddr) \ 73 + : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 74 + "i" (-EFAULT) \ 76 75 : "memory"); \ 77 76 } else \ 78 77 ret = -ENOSYS; \ ··· 171 166 " "__UA_ADDR "\t1b, 4b \n" 172 167 " "__UA_ADDR "\t2b, 4b \n" 173 168 " .previous \n" 174 - : "+r" (ret), "=&r" (val), "=R" (*uaddr) 175 - : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 169 + : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 170 + : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 171 + "i" (-EFAULT) 176 172 : "memory"); 177 173 } else if (cpu_has_llsc) { 178 174 __asm__ __volatile__( ··· 199 193 " "__UA_ADDR "\t1b, 4b \n" 200 194 " "__UA_ADDR "\t2b, 4b \n" 201 195 " .previous \n" 202 - : "+r" (ret), "=&r" (val), "=R" (*uaddr) 203 - : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 196 + : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 197 + : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 198 + "i" (-EFAULT) 204 199 : "memory"); 205 200 } else 206 201 return -ENOSYS;
+13 -12
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
··· 49 49 50 50 #include <linux/types.h> 51 51 52 + #include <asm/compiler.h> 52 53 #include <asm/war.h> 53 54 54 55 #ifndef R10000_LLSC_WAR ··· 85 84 " "__beqz"%0, 1b \n" 86 85 " nop \n" 87 86 " .set pop \n" 88 - : "=&r" (temp), "=m" (*addr) 89 - : "ir" (~mask), "ir" (value), "m" (*addr)); 87 + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 88 + : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); 90 89 } 91 90 92 91 /* ··· 106 105 " "__beqz"%0, 1b \n" 107 106 " nop \n" 108 107 " .set pop \n" 109 - : "=&r" (temp), "=m" (*addr) 110 - : "ir" (mask), "m" (*addr)); 108 + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 109 + : "ir" (mask), GCC_OFF12_ASM() (*addr)); 111 110 } 112 111 113 112 /* ··· 127 126 " "__beqz"%0, 1b \n" 128 127 " nop \n" 129 128 " .set pop \n" 130 - : "=&r" (temp), "=m" (*addr) 131 - : "ir" (~mask), "m" (*addr)); 129 + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 130 + : "ir" (~mask), GCC_OFF12_ASM() (*addr)); 132 131 } 133 132 134 133 /* ··· 148 147 " "__beqz"%0, 1b \n" 149 148 " nop \n" 150 149 " .set pop \n" 151 - : "=&r" (temp), "=m" (*addr) 152 - : "ir" (mask), "m" (*addr)); 150 + : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 151 + : "ir" (mask), GCC_OFF12_ASM() (*addr)); 153 152 } 154 153 155 154 /* ··· 220 219 " .set arch=r4000 \n" \ 221 220 "1: ll %0, %1 #custom_read_reg32 \n" \ 222 221 " .set pop \n" \ 223 - : "=r" (tmp), "=m" (*address) \ 224 - : "m" (*address)) 222 + : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 223 + : GCC_OFF12_ASM() (*address)) 225 224 226 225 #define custom_write_reg32(address, tmp) \ 227 226 __asm__ __volatile__( \ ··· 231 230 " "__beqz"%0, 1b \n" \ 232 231 " nop \n" \ 233 232 " .set pop \n" \ 234 - : "=&r" (tmp), "=m" (*address) \ 235 - : "0" (tmp), "m" (*address)) 233 + : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 234 + : "0" (tmp), GCC_OFF12_ASM() (*address)) 236 235 237 236 #endif /* __ASM_REGOPS_H__ */
+3 -1
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
··· 76 76 77 77 #include <linux/prefetch.h> 78 78 79 + #include <asm/compiler.h> 80 + 79 81 #include <asm/octeon/cvmx-fpa.h> 80 82 /** 81 83 * By default we disable the max depth support. Most programs ··· 275 273 " lbu %[ticket], %[now_serving]\n" 276 274 "4:\n" 277 275 ".set pop\n" : 278 - [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 276 + [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 279 277 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 280 278 [my_ticket] "=r"(my_ticket) 281 279 );
+26 -24
arch/mips/include/asm/spinlock.h
··· 12 12 #include <linux/compiler.h> 13 13 14 14 #include <asm/barrier.h> 15 + #include <asm/compiler.h> 15 16 #include <asm/war.h> 16 17 17 18 /* ··· 89 88 " subu %[ticket], %[ticket], 1 \n" 90 89 " .previous \n" 91 90 " .set pop \n" 92 - : [ticket_ptr] "+m" (lock->lock), 91 + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 93 92 [serving_now_ptr] "+m" (lock->h.serving_now), 94 93 [ticket] "=&r" (tmp), 95 94 [my_ticket] "=&r" (my_ticket) ··· 122 121 " subu %[ticket], %[ticket], 1 \n" 123 122 " .previous \n" 124 123 " .set pop \n" 125 - : [ticket_ptr] "+m" (lock->lock), 124 + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 126 125 [serving_now_ptr] "+m" (lock->h.serving_now), 127 126 [ticket] "=&r" (tmp), 128 127 [my_ticket] "=&r" (my_ticket) ··· 164 163 " li %[ticket], 0 \n" 165 164 " .previous \n" 166 165 " .set pop \n" 167 - : [ticket_ptr] "+m" (lock->lock), 166 + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 168 167 [ticket] "=&r" (tmp), 169 168 [my_ticket] "=&r" (tmp2), 170 169 [now_serving] "=&r" (tmp3) ··· 188 187 " li %[ticket], 0 \n" 189 188 " .previous \n" 190 189 " .set pop \n" 191 - : [ticket_ptr] "+m" (lock->lock), 190 + : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 192 191 [ticket] "=&r" (tmp), 193 192 [my_ticket] "=&r" (tmp2), 194 193 [now_serving] "=&r" (tmp3) ··· 235 234 " beqzl %1, 1b \n" 236 235 " nop \n" 237 236 " .set reorder \n" 238 - : "=m" (rw->lock), "=&r" (tmp) 239 - : "m" (rw->lock) 237 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 238 + : GCC_OFF12_ASM() (rw->lock) 240 239 : "memory"); 241 240 } else { 242 241 do { ··· 245 244 " bltz %1, 1b \n" 246 245 " addu %1, 1 \n" 247 246 "2: sc %1, %0 \n" 248 - : "=m" (rw->lock), "=&r" (tmp) 249 - : "m" (rw->lock) 247 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 248 + : GCC_OFF12_ASM() (rw->lock) 250 249 : "memory"); 251 250 } while (unlikely(!tmp)); 252 251 } ··· 269 268 " sub %1, 1 \n" 270 269 " sc %1, %0 \n" 271 270 " beqzl %1, 1b \n" 272 - : "=m" (rw->lock), "=&r" (tmp) 273 - : "m" (rw->lock) 271 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 272 + : GCC_OFF12_ASM() (rw->lock) 274 273 : "memory"); 275 274 } else { 276 275 do { ··· 278 277 "1: ll %1, %2 # arch_read_unlock \n" 279 278 " sub %1, 1 \n" 280 279 " sc %1, %0 \n" 281 - : "=m" (rw->lock), "=&r" (tmp) 282 - : "m" (rw->lock) 280 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 281 + : GCC_OFF12_ASM() (rw->lock) 283 282 : "memory"); 284 283 } while (unlikely(!tmp)); 285 284 } ··· 299 298 " beqzl %1, 1b \n" 300 299 " nop \n" 301 300 " .set reorder \n" 302 - : "=m" (rw->lock), "=&r" (tmp) 303 - : "m" (rw->lock) 301 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 302 + : GCC_OFF12_ASM() (rw->lock) 304 303 : "memory"); 305 304 } else { 306 305 do { ··· 309 308 " bnez %1, 1b \n" 310 309 " lui %1, 0x8000 \n" 311 310 "2: sc %1, %0 \n" 312 - : "=m" (rw->lock), "=&r" (tmp) 313 - : "m" (rw->lock) 311 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 312 + : GCC_OFF12_ASM() (rw->lock) 314 313 : "memory"); 315 314 } while (unlikely(!tmp)); 316 315 } ··· 349 348 __WEAK_LLSC_MB 350 349 " li %2, 1 \n" 351 350 "2: \n" 352 - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 353 - : "m" (rw->lock) 351 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 352 + : GCC_OFF12_ASM() (rw->lock) 354 353 : "memory"); 355 354 } else { 356 355 __asm__ __volatile__( ··· 366 365 __WEAK_LLSC_MB 367 366 " li %2, 1 \n" 368 367 "2: \n" 369 - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 370 - : "m" (rw->lock) 368 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 369 + : GCC_OFF12_ASM() (rw->lock) 371 370 : "memory"); 372 371 } 373 372 ··· 393 392 " li %2, 1 \n" 394 393 " .set reorder \n" 395 394 "2: \n" 396 - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 397 - : "m" (rw->lock) 395 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 396 + : GCC_OFF12_ASM() (rw->lock) 398 397 : "memory"); 399 398 } else { 400 399 do { ··· 406 405 " sc %1, %0 \n" 407 406 " li %2, 1 \n" 408 407 "2: \n" 409 - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 410 - : "m" (rw->lock) 408 + : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), 409 + "=&r" (ret) 410 + : GCC_OFF12_ASM() (rw->lock) 411 411 : "memory"); 412 412 } while (unlikely(!tmp)); 413 413