Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm: 'Simplify' GEN_*_RMWcc() macros

Currently the GEN_*_RMWcc() macros include a return statement, which
pretty much mandates we directly wrap them in a (inline) function.

Macros with return statements are tricky and, as per the above, limit
use, so remove the return statement and make them
statement-expressions. This allows them to be used more widely.

Also, shuffle the arguments a bit. Place the @cc argument as 3rd, this
makes it consistent between UNARY and BINARY, but more importantly, it
makes the @arg0 argument last.

Since the @arg0 argument is now last, we can do CPP trickery and make
it an optional argument, simplifying the users; 17 out of 18
occurences do not need this argument.

Finally, change to asm symbolic names, instead of the numeric ordering
of operands, which allows us to get rid of __BINARY_RMWcc_ARG and get
cleaner code overall.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: JBeulich@suse.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: bp@alien8.de
Cc: hpa@linux.intel.com
Link: https://lkml.kernel.org/r/20181003130957.108960094@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
288e4521 756b1df4

+64 -53
+4 -4
arch/x86/include/asm/atomic.h
··· 82 82 */ 83 83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) 84 84 { 85 - GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 85 + return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i); 86 86 } 87 87 #define arch_atomic_sub_and_test arch_atomic_sub_and_test 88 88 ··· 122 122 */ 123 123 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) 124 124 { 125 - GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 125 + return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e); 126 126 } 127 127 #define arch_atomic_dec_and_test arch_atomic_dec_and_test 128 128 ··· 136 136 */ 137 137 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) 138 138 { 139 - GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 139 + return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e); 140 140 } 141 141 #define arch_atomic_inc_and_test arch_atomic_inc_and_test 142 142 ··· 151 151 */ 152 152 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) 153 153 { 154 - GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 154 + return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i); 155 155 } 156 156 #define arch_atomic_add_negative arch_atomic_add_negative 157 157
+4 -4
arch/x86/include/asm/atomic64_64.h
··· 73 73 */ 74 74 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) 75 75 { 76 - GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); 76 + return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i); 77 77 } 78 78 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 79 79 ··· 115 115 */ 116 116 static inline bool arch_atomic64_dec_and_test(atomic64_t *v) 117 117 { 118 - GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); 118 + return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e); 119 119 } 120 120 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 121 121 ··· 129 129 */ 130 130 static inline bool arch_atomic64_inc_and_test(atomic64_t *v) 131 131 { 132 - GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); 132 + return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e); 133 133 } 134 134 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 135 135 ··· 144 144 */ 145 145 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) 146 146 { 147 - GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); 147 + return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i); 148 148 } 149 149 #define arch_atomic64_add_negative arch_atomic64_add_negative 150 150
+3 -6
arch/x86/include/asm/bitops.h
··· 217 217 */ 218 218 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 219 219 { 220 - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), 221 - *addr, "Ir", nr, "%0", c); 220 + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr); 222 221 } 223 222 224 223 /** ··· 263 264 */ 264 265 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 265 266 { 266 - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), 267 - *addr, "Ir", nr, "%0", c); 267 + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr); 268 268 } 269 269 270 270 /** ··· 316 318 */ 317 319 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 318 320 { 319 - GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), 320 - *addr, "Ir", nr, "%0", c); 321 + return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr); 321 322 } 322 323 323 324 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+4 -4
arch/x86/include/asm/local.h
··· 53 53 */ 54 54 static inline bool local_sub_and_test(long i, local_t *l) 55 55 { 56 - GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e); 56 + return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i); 57 57 } 58 58 59 59 /** ··· 66 66 */ 67 67 static inline bool local_dec_and_test(local_t *l) 68 68 { 69 - GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e); 69 + return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e); 70 70 } 71 71 72 72 /** ··· 79 79 */ 80 80 static inline bool local_inc_and_test(local_t *l) 81 81 { 82 - GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e); 82 + return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e); 83 83 } 84 84 85 85 /** ··· 93 93 */ 94 94 static inline bool local_add_negative(long i, local_t *l) 95 95 { 96 - GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s); 96 + return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i); 97 97 } 98 98 99 99 /**
+1 -1
arch/x86/include/asm/preempt.h
··· 88 88 */ 89 89 static __always_inline bool __preempt_count_dec_and_test(void) 90 90 { 91 - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); 91 + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); 92 92 } 93 93 94 94 /*
+7 -6
arch/x86/include/asm/refcount.h
··· 79 79 static __always_inline __must_check 80 80 bool refcount_sub_and_test(unsigned int i, refcount_t *r) 81 81 { 82 - GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", 83 - "REFCOUNT_CHECK_LT_ZERO counter=\"%0\"", 84 - r->refs.counter, "er", i, "%0", e, "cx"); 82 + 83 + return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", 84 + "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", 85 + r->refs.counter, e, "er", i, "cx"); 85 86 } 86 87 87 88 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) 88 89 { 89 - GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", 90 - "REFCOUNT_CHECK_LT_ZERO counter=\"%0\"", 91 - r->refs.counter, "%0", e, "cx"); 90 + return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", 91 + "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"", 92 + r->refs.counter, e, "cx"); 92 93 } 93 94 94 95 static __always_inline __must_check
+41 -28
arch/x86/include/asm/rmwcc.h
··· 2 2 #ifndef _ASM_X86_RMWcc 3 3 #define _ASM_X86_RMWcc 4 4 5 + /* This counts to 12. Any more, it will return 13th argument. */ 6 + #define __RMWcc_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n 7 + #define RMWcc_ARGS(X...) __RMWcc_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) 8 + 9 + #define __RMWcc_CONCAT(a, b) a ## b 10 + #define RMWcc_CONCAT(a, b) __RMWcc_CONCAT(a, b) 11 + 5 12 #define __CLOBBERS_MEM(clb...) "memory", ## clb 6 13 7 14 #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO) 8 15 9 16 /* Use asm goto */ 10 17 11 - #define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ 12 - do { \ 18 + #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 19 + ({ \ 20 + bool c = false; \ 13 21 asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ 14 - : : [counter] "m" (var), ## __VA_ARGS__ \ 22 + : : [var] "m" (_var), ## __VA_ARGS__ \ 15 23 : clobbers : cc_label); \ 16 - return 0; \ 17 - cc_label: \ 18 - return 1; \ 19 - } while (0) 20 - 21 - #define __BINARY_RMWcc_ARG " %1, " 22 - 24 + if (0) { \ 25 + cc_label: c = true; \ 26 + } \ 27 + c; \ 28 + }) 23 29 24 30 #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ 25 31 26 32 /* Use flags output or a set instruction */ 27 33 28 - #define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \ 29 - do { \ 34 + #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 35 + ({ \ 30 36 bool c; \ 31 37 asm volatile (fullop CC_SET(cc) \ 32 - : [counter] "+m" (var), CC_OUT(cc) (c) \ 38 + : [var] "+m" (_var), CC_OUT(cc) (c) \ 33 39 : __VA_ARGS__ : clobbers); \ 34 - return c; \ 35 - } while (0) 36 - 37 - #define __BINARY_RMWcc_ARG " %2, " 40 + c; \ 41 + }) 38 42 39 43 #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */ 40 44 41 - #define GEN_UNARY_RMWcc(op, var, arg0, cc) \ 45 + #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ 42 46 __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM()) 43 47 44 - #define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\ 45 - __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \ 48 + #define GEN_UNARY_RMWcc_3(op, var, cc) \ 49 + GEN_UNARY_RMWcc_4(op, var, cc, "%[var]") 50 + 51 + #define GEN_UNARY_RMWcc(X...) RMWcc_CONCAT(GEN_UNARY_RMWcc_, RMWcc_ARGS(X))(X) 52 + 53 + #define GEN_BINARY_RMWcc_6(op, var, cc, vcon, _val, arg0) \ 54 + __GEN_RMWcc(op " %[val], " arg0, var, cc, \ 55 + __CLOBBERS_MEM(), [val] vcon (_val)) 56 + 57 + #define GEN_BINARY_RMWcc_5(op, var, cc, vcon, val) \ 58 + GEN_BINARY_RMWcc_6(op, var, cc, vcon, val, "%[var]") 59 + 60 + #define GEN_BINARY_RMWcc(X...) RMWcc_CONCAT(GEN_BINARY_RMWcc_, RMWcc_ARGS(X))(X) 61 + 62 + #define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, cc, clobbers...) \ 63 + __GEN_RMWcc(op " %[var]\n\t" suffix, var, cc, \ 46 64 __CLOBBERS_MEM(clobbers)) 47 65 48 - #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \ 49 - __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \ 50 - __CLOBBERS_MEM(), vcon (val)) 51 - 52 - #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc, \ 53 - clobbers...) \ 54 - __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \ 55 - __CLOBBERS_MEM(clobbers), vcon (val)) 66 + #define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, cc, vcon, _val, clobbers...)\ 67 + __GEN_RMWcc(op " %[val], %[var]\n\t" suffix, var, cc, \ 68 + __CLOBBERS_MEM(clobbers), [val] vcon (_val)) 56 69 57 70 #endif /* _ASM_X86_RMWcc */