Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm: Remove code depending on __GCC_ASM_FLAG_OUTPUTS__

The minimum supported GCC version is 8.1, which supports flag output operands
and always defines __GCC_ASM_FLAG_OUTPUTS__ macro.

Remove code depending on __GCC_ASM_FLAG_OUTPUTS__ and use the "=@ccCOND" flag
output operand directly.

Use the equivalent "=@ccz" instead of "=@cce" flag output operand for
CMPXCHG8B and CMPXCHG16B instructions. These instructions set a single flag
bit - the Zero flag - and "=@ccz" is used to distinguish the CC user from
comparison instructions, where set ZERO flag indeed means that the values are
equal.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20250905121723.GCaLrU04lP2A50PT-B@fat_crate.local

authored by

Uros Bizjak and committed by
Borislav Petkov (AMD)
c6c973db 13bdfb53

+35 -104
+1 -1
arch/x86/boot/bitops.h
··· 27 27 bool v; 28 28 const u32 *p = addr; 29 29 30 - asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr)); 30 + asm("btl %2,%1" : "=@ccc" (v) : "m" (*p), "Ir" (nr)); 31 31 return v; 32 32 } 33 33
+4 -4
arch/x86/boot/boot.h
··· 155 155 static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) 156 156 { 157 157 bool diff; 158 - asm volatile("fs repe cmpsb" CC_SET(nz) 159 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 158 + asm volatile("fs repe cmpsb" 159 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 160 160 return diff; 161 161 } 162 162 static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) 163 163 { 164 164 bool diff; 165 - asm volatile("gs repe cmpsb" CC_SET(nz) 166 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 165 + asm volatile("gs repe cmpsb" 166 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 167 167 return diff; 168 168 } 169 169
+2 -2
arch/x86/boot/string.c
··· 32 32 int memcmp(const void *s1, const void *s2, size_t len) 33 33 { 34 34 bool diff; 35 - asm("repe cmpsb" CC_SET(nz) 36 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 35 + asm("repe cmpsb" 36 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 37 37 return diff; 38 38 } 39 39
+2 -4
arch/x86/include/asm/archrandom.h
··· 23 23 unsigned int retry = RDRAND_RETRY_LOOPS; 24 24 do { 25 25 asm volatile("rdrand %[out]" 26 - CC_SET(c) 27 - : CC_OUT(c) (ok), [out] "=r" (*v)); 26 + : "=@ccc" (ok), [out] "=r" (*v)); 28 27 if (ok) 29 28 return true; 30 29 } while (--retry); ··· 34 35 { 35 36 bool ok; 36 37 asm volatile("rdseed %[out]" 37 - CC_SET(c) 38 - : CC_OUT(c) (ok), [out] "=r" (*v)); 38 + : "=@ccc" (ok), [out] "=r" (*v)); 39 39 return ok; 40 40 } 41 41
-12
arch/x86/include/asm/asm.h
··· 122 122 } 123 123 #endif 124 124 125 - /* 126 - * Macros to generate condition code outputs from inline assembly, 127 - * The output operand must be type "bool". 128 - */ 129 - #ifdef __GCC_ASM_FLAG_OUTPUTS__ 130 - # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" 131 - # define CC_OUT(c) "=@cc" #c 132 - #else 133 - # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" 134 - # define CC_OUT(c) [_cc_ ## c] "=qm" 135 - #endif 136 - 137 125 #ifdef __KERNEL__ 138 126 139 127 # include <asm/extable_fixup_types.h>
+6 -12
arch/x86/include/asm/bitops.h
··· 99 99 { 100 100 bool negative; 101 101 asm_inline volatile(LOCK_PREFIX "xorb %2,%1" 102 - CC_SET(s) 103 - : CC_OUT(s) (negative), WBYTE_ADDR(addr) 102 + : "=@ccs" (negative), WBYTE_ADDR(addr) 104 103 : "iq" ((char)mask) : "memory"); 105 104 return negative; 106 105 } ··· 148 149 bool oldbit; 149 150 150 151 asm(__ASM_SIZE(bts) " %2,%1" 151 - CC_SET(c) 152 - : CC_OUT(c) (oldbit) 152 + : "=@ccc" (oldbit) 153 153 : ADDR, "Ir" (nr) : "memory"); 154 154 return oldbit; 155 155 } ··· 173 175 bool oldbit; 174 176 175 177 asm volatile(__ASM_SIZE(btr) " %2,%1" 176 - CC_SET(c) 177 - : CC_OUT(c) (oldbit) 178 + : "=@ccc" (oldbit) 178 179 : ADDR, "Ir" (nr) : "memory"); 179 180 return oldbit; 180 181 } ··· 184 187 bool oldbit; 185 188 186 189 asm volatile(__ASM_SIZE(btc) " %2,%1" 187 - CC_SET(c) 188 - : CC_OUT(c) (oldbit) 190 + : "=@ccc" (oldbit) 189 191 : ADDR, "Ir" (nr) : "memory"); 190 192 191 193 return oldbit; ··· 207 211 bool oldbit; 208 212 209 213 asm volatile("testb %2,%1" 210 - CC_SET(nz) 211 - : CC_OUT(nz) (oldbit) 214 + : "=@ccnz" (oldbit) 212 215 : "m" (((unsigned char *)addr)[nr >> 3]), 213 216 "i" (1 << (nr & 7)) 214 217 :"memory"); ··· 220 225 bool oldbit; 221 226 222 227 asm volatile(__ASM_SIZE(bt) " %2,%1" 223 - CC_SET(c) 224 - : CC_OUT(c) (oldbit) 228 + : "=@ccc" (oldbit) 225 229 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); 226 230 227 231 return oldbit;
+4 -8
arch/x86/include/asm/cmpxchg.h
··· 166 166 { \ 167 167 volatile u8 *__ptr = (volatile u8 *)(_ptr); \ 168 168 asm_inline volatile(lock "cmpxchgb %[new], %[ptr]" \ 169 - CC_SET(z) \ 170 - : CC_OUT(z) (success), \ 169 + : "=@ccz" (success), \ 171 170 [ptr] "+m" (*__ptr), \ 172 171 [old] "+a" (__old) \ 173 172 : [new] "q" (__new) \ ··· 177 178 { \ 178 179 volatile u16 *__ptr = (volatile u16 *)(_ptr); \ 179 180 asm_inline volatile(lock "cmpxchgw %[new], %[ptr]" \ 180 - CC_SET(z) \ 181 - : CC_OUT(z) (success), \ 181 + : "=@ccz" (success), \ 182 182 [ptr] "+m" (*__ptr), \ 183 183 [old] "+a" (__old) \ 184 184 : [new] "r" (__new) \ ··· 188 190 { \ 189 191 volatile u32 *__ptr = (volatile u32 *)(_ptr); \ 190 192 asm_inline volatile(lock "cmpxchgl %[new], %[ptr]" \ 191 - CC_SET(z) \ 192 - : CC_OUT(z) (success), \ 193 + : "=@ccz" (success), \ 193 194 [ptr] "+m" (*__ptr), \ 194 195 [old] "+a" (__old) \ 195 196 : [new] "r" (__new) \ ··· 199 202 { \ 200 203 volatile u64 *__ptr = (volatile u64 *)(_ptr); \ 201 204 asm_inline volatile(lock "cmpxchgq %[new], %[ptr]" \ 202 - CC_SET(z) \ 203 - : CC_OUT(z) (success), \ 205 + : "=@ccz" (success), \ 204 206 [ptr] "+m" (*__ptr), \ 205 207 [old] "+a" (__old) \ 206 208 : [new] "r" (__new) \
+2 -4
arch/x86/include/asm/cmpxchg_32.h
··· 46 46 bool ret; \ 47 47 \ 48 48 asm_inline volatile(_lock "cmpxchg8b %[ptr]" \ 49 - CC_SET(e) \ 50 - : CC_OUT(e) (ret), \ 49 + : "=@ccz" (ret), \ 51 50 [ptr] "+m" (*(_ptr)), \ 52 51 "+a" (o.low), "+d" (o.high) \ 53 52 : "b" (n.low), "c" (n.high) \ ··· 124 125 ALTERNATIVE(_lock_loc \ 125 126 "call cmpxchg8b_emu", \ 126 127 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \ 127 - CC_SET(e) \ 128 - : ALT_OUTPUT_SP(CC_OUT(e) (ret), \ 128 + : ALT_OUTPUT_SP("=@ccz" (ret), \ 129 129 "+a" (o.low), "+d" (o.high)) \ 130 130 : "b" (n.low), "c" (n.high), \ 131 131 [ptr] "S" (_ptr) \
+1 -2
arch/x86/include/asm/cmpxchg_64.h
··· 66 66 bool ret; \ 67 67 \ 68 68 asm_inline volatile(_lock "cmpxchg16b %[ptr]" \ 69 - CC_SET(e) \ 70 - : CC_OUT(e) (ret), \ 69 + : "=@ccz" (ret), \ 71 70 [ptr] "+m" (*(_ptr)), \ 72 71 "+a" (o.low), "+d" (o.high) \ 73 72 : "b" (n.low), "c" (n.high) \
+4 -8
arch/x86/include/asm/percpu.h
··· 309 309 \ 310 310 asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \ 311 311 __percpu_arg([var]) \ 312 - CC_SET(z) \ 313 - : CC_OUT(z) (success), \ 312 + : "=@ccz" (success), \ 314 313 [oval] "+a" (pco_old__), \ 315 314 [var] "+m" (__my_cpu_var(_var)) \ 316 315 : [nval] __pcpu_reg_##size(, pco_new__) \ ··· 366 367 asm_inline qual ( \ 367 368 ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ 368 369 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ 369 - CC_SET(z) \ 370 - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ 370 + : ALT_OUTPUT_SP("=@ccz" (success), \ 371 371 [var] "+m" (__my_cpu_var(_var)), \ 372 372 "+a" (old__.low), "+d" (old__.high)) \ 373 373 : "b" (new__.low), "c" (new__.high), \ ··· 434 436 asm_inline qual ( \ 435 437 ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ 436 438 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ 437 - CC_SET(z) \ 438 - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ 439 + : ALT_OUTPUT_SP("=@ccz" (success), \ 439 440 [var] "+m" (__my_cpu_var(_var)), \ 440 441 "+a" (old__.low), "+d" (old__.high)) \ 441 442 : "b" (new__.low), "c" (new__.high), \ ··· 582 585 bool oldbit; \ 583 586 \ 584 587 asm volatile("btl %[nr], " __percpu_arg([var]) \ 585 - CC_SET(c) \ 586 - : CC_OUT(c) (oldbit) \ 588 + : "=@ccc" (oldbit) \ 587 589 : [var] "m" (__my_cpu_var(_var)), \ 588 590 [nr] "rI" (_nr)); \ 589 591 oldbit; \
+2 -24
arch/x86/include/asm/rmwcc.h
··· 6 6 7 7 #define __CLOBBERS_MEM(clb...) "memory", ## clb 8 8 9 - #ifndef __GCC_ASM_FLAG_OUTPUTS__ 10 - 11 - /* Use asm goto */ 12 - 13 - #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 14 - ({ \ 15 - bool c = false; \ 16 - asm goto (fullop "; j" #cc " %l[cc_label]" \ 17 - : : [var] "m" (_var), ## __VA_ARGS__ \ 18 - : clobbers : cc_label); \ 19 - if (0) { \ 20 - cc_label: c = true; \ 21 - } \ 22 - c; \ 23 - }) 24 - 25 - #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ 26 - 27 - /* Use flags output or a set instruction */ 28 - 29 9 #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 30 10 ({ \ 31 11 bool c; \ 32 - asm_inline volatile (fullop CC_SET(cc) \ 33 - : [var] "+m" (_var), CC_OUT(cc) (c) \ 12 + asm_inline volatile (fullop \ 13 + : [var] "+m" (_var), "=@cc" #cc (c) \ 34 14 : __VA_ARGS__ : clobbers); \ 35 15 c; \ 36 16 }) 37 - 38 - #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ 39 17 40 18 #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ 41 19 __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
+1 -2
arch/x86/include/asm/sev.h
··· 491 491 492 492 /* "pvalidate" mnemonic support in binutils 2.36 and newer */ 493 493 asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" 494 - CC_SET(c) 495 - : CC_OUT(c) (no_rmpupdate), "=a"(rc) 494 + : "=@ccc"(no_rmpupdate), "=a"(rc) 496 495 : "a"(vaddr), "c"(rmp_psize), "d"(validate) 497 496 : "memory", "cc"); 498 497
+1 -2
arch/x86/include/asm/signal.h
··· 83 83 static inline int __gen_sigismember(sigset_t *set, int _sig) 84 84 { 85 85 bool ret; 86 - asm("btl %2,%1" CC_SET(c) 87 - : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); 86 + asm("btl %2,%1" : "=@ccc"(ret) : "m"(*set), "Ir"(_sig-1)); 88 87 return ret; 89 88 } 90 89
+1 -2
arch/x86/include/asm/special_insns.h
··· 284 284 * See movdir64b()'s comment on operand specification. 285 285 */ 286 286 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" 287 - CC_SET(z) 288 - : CC_OUT(z) (zf), "+m" (*__dst) 287 + : "=@ccz" (zf), "+m" (*__dst) 289 288 : "m" (*__src), "a" (__dst), "d" (__src)); 290 289 291 290 /* Submission failure is indicated via EFLAGS.ZF=1 */
+3 -4
arch/x86/include/asm/uaccess.h
··· 378 378 asm_goto_output("\n" \ 379 379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 380 380 _ASM_EXTABLE_UA(1b, %l[label]) \ 381 - : CC_OUT(z) (success), \ 381 + : "=@ccz" (success), \ 382 382 [ptr] "+m" (*_ptr), \ 383 383 [old] "+a" (__old) \ 384 384 : [new] ltype (__new) \ ··· 397 397 asm_goto_output("\n" \ 398 398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 399 399 _ASM_EXTABLE_UA(1b, %l[label]) \ 400 - : CC_OUT(z) (success), \ 400 + : "=@ccz" (success), \ 401 401 "+A" (__old), \ 402 402 [ptr] "+m" (*_ptr) \ 403 403 : "b" ((u32)__new), \ ··· 417 417 __typeof__(*(_ptr)) __new = (_new); \ 418 418 asm volatile("\n" \ 419 419 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 420 - CC_SET(z) \ 421 420 "2:\n" \ 422 421 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 423 422 %[errout]) \ 424 - : CC_OUT(z) (success), \ 423 + : "=@ccz" (success), \ 425 424 [errout] "+r" (__err), \ 426 425 [ptr] "+m" (*_ptr), \ 427 426 [old] "+a" (__old) \
-12
tools/arch/x86/include/asm/asm.h
··· 108 108 109 109 #endif 110 110 111 - /* 112 - * Macros to generate condition code outputs from inline assembly, 113 - * The output operand must be type "bool". 114 - */ 115 - #ifdef __GCC_ASM_FLAG_OUTPUTS__ 116 - # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" 117 - # define CC_OUT(c) "=@cc" #c 118 - #else 119 - # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" 120 - # define CC_OUT(c) [_cc_ ## c] "=qm" 121 - #endif 122 - 123 111 #ifdef __KERNEL__ 124 112 125 113 /* Exception table entry */
+1 -1
tools/perf/bench/find-bit-bench.c
··· 37 37 accumulator++; 38 38 } 39 39 40 - #if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__) 40 + #if defined(__i386__) || defined(__x86_64__) 41 41 static bool asm_test_bit(long nr, const unsigned long *addr) 42 42 { 43 43 bool oldbit;