Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/asm: Remove UPD_CONSTR after GCC 4.9 removal

UPD_CONSTR was previously a preprocessor define for an old GCC 4.9
inline asm bug with m<> constraints.

Fixes: 6563139d90ad ("powerpc: remove GCC version check for UPD_CONSTR")
Suggested-by: Nathan Chancellor <nathan@kernel.org>
Suggested-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210914161712.2463458-1-ndesaulniers@google.com

authored by

Nick Desaulniers and committed by
Michael Ellerman
2a24d80f 7eff9bc0

+11 -13
-2
arch/powerpc/include/asm/asm-const.h
··· 12 12 # define ASM_CONST(x) __ASM_CONST(x) 13 13 #endif 14 14 15 - #define UPD_CONSTR "<>" 16 - 17 15 #endif /* _ASM_POWERPC_ASM_CONST_H */
+4 -4
arch/powerpc/include/asm/atomic.h
··· 27 27 { 28 28 int t; 29 29 30 - __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); 30 + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); 31 31 32 32 return t; 33 33 } 34 34 35 35 static __inline__ void arch_atomic_set(atomic_t *v, int i) 36 36 { 37 - __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); 37 + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); 38 38 } 39 39 40 40 #define ATOMIC_OP(op, asm_op) \ ··· 320 320 { 321 321 s64 t; 322 322 323 - __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter)); 323 + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter)); 324 324 325 325 return t; 326 326 } 327 327 328 328 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i) 329 329 { 330 - __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); 330 + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i)); 331 331 } 332 332 333 333 #define ATOMIC64_OP(op, asm_op) \
+2 -2
arch/powerpc/include/asm/io.h
··· 122 122 { \ 123 123 u##size ret; \ 124 124 __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ 125 - : "=r" (ret) : "m"UPD_CONSTR (*addr) : "memory"); \ 125 + : "=r" (ret) : "m<>" (*addr) : "memory"); \ 126 126 return ret; \ 127 127 } 128 128 ··· 130 130 static inline void name(volatile u##size __iomem *addr, u##size val) \ 131 131 { \ 132 132 __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ 133 - : "=m"UPD_CONSTR (*addr) : "r" (val) : "memory"); \ 133 + : "=m<>" (*addr) : "r" (val) : "memory"); \ 134 134 mmiowb_set_pending(); \ 135 135 } 136 136
+3 -3
arch/powerpc/include/asm/uaccess.h
··· 86 86 "1: " op "%U1%X1 %0,%1 # put_user\n" \ 87 87 EX_TABLE(1b, %l2) \ 88 88 : \ 89 - : "r" (x), "m"UPD_CONSTR (*addr) \ 89 + : "r" (x), "m<>" (*addr) \ 90 90 : \ 91 91 : label) 92 92 ··· 143 143 "1: "op"%U1%X1 %0, %1 # get_user\n" \ 144 144 EX_TABLE(1b, %l2) \ 145 145 : "=r" (x) \ 146 - : "m"UPD_CONSTR (*addr) \ 146 + : "m<>" (*addr) \ 147 147 : \ 148 148 : label) 149 149 ··· 200 200 ".previous\n" \ 201 201 EX_TABLE(1b, 3b) \ 202 202 : "=r" (err), "=r" (x) \ 203 - : "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err)) 203 + : "m<>" (*addr), "i" (-EFAULT), "0" (err)) 204 204 205 205 #ifdef __powerpc64__ 206 206 #define __get_user_asm2(x, addr, err) \
+2 -2
arch/powerpc/kvm/powerpc.c
··· 1094 1094 1095 1095 preempt_disable(); 1096 1096 enable_kernel_fp(); 1097 - asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m"UPD_CONSTR (fprd) : "m"UPD_CONSTR (fprs) 1097 + asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs) 1098 1098 : "fr0"); 1099 1099 preempt_enable(); 1100 1100 return fprd; ··· 1106 1106 1107 1107 preempt_disable(); 1108 1108 enable_kernel_fp(); 1109 - asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m"UPD_CONSTR (fprs) : "m"UPD_CONSTR (fprd) 1109 + asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd) 1110 1110 : "fr0"); 1111 1111 preempt_enable(); 1112 1112 return fprs;