Merge tag 'x86_cleanups_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Borislav Petkov:

- Simplify inline asm flag output operands now that the minimum
compiler version supports the =@ccCOND syntax

- Remove a bunch of AS_* Kconfig symbols which detect assembler support
for various instruction mnemonics now that the minimum assembler
version supports them all

- The usual cleanups all over the place

* tag 'x86_cleanups_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/asm: Remove code depending on __GCC_ASM_FLAG_OUTPUTS__
x86/sgx: Use ENCLS mnemonic in <kernel/cpu/sgx/encls.h>
x86/mtrr: Remove license boilerplate text with bad FSF address
x86/asm: Use RDPKRU and WRPKRU mnemonics in <asm/special_insns.h>
x86/idle: Use MONITORX and MWAITX mnemonics in <asm/mwait.h>
x86/entry/fred: Push __KERNEL_CS directly
x86/kconfig: Remove CONFIG_AS_AVX512
crypto: x86 - Remove CONFIG_AS_VPCLMULQDQ
crypto: X86 - Remove CONFIG_AS_VAES
crypto: x86 - Remove CONFIG_AS_GFNI
x86/kconfig: Drop unused and needless config X86_64_SMP

+55 -239
-4
arch/x86/Kconfig
··· 412 412 def_bool y 413 413 depends on INTEL_IOMMU && ACPI 414 414 415 - config X86_64_SMP 416 - def_bool y 417 - depends on X86_64 && SMP 418 - 419 415 config ARCH_SUPPORTS_UPROBES 420 416 def_bool y 421 417
-20
arch/x86/Kconfig.assembler
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 3 3 4 - config AS_AVX512 5 - def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5) 6 - help 7 - Supported by binutils >= 2.25 and LLVM integrated assembler 8 - 9 - config AS_GFNI 10 - def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2) 11 - help 12 - Supported by binutils >= 2.30 and LLVM integrated assembler 13 - 14 - config AS_VAES 15 - def_bool $(as-instr,vaesenc %ymm0$(comma)%ymm1$(comma)%ymm2) 16 - help 17 - Supported by binutils >= 2.30 and LLVM integrated assembler 18 - 19 - config AS_VPCLMULQDQ 20 - def_bool $(as-instr,vpclmulqdq \$0x10$(comma)%ymm0$(comma)%ymm1$(comma)%ymm2) 21 - help 22 - Supported by binutils >= 2.30 and LLVM integrated assembler 23 - 24 4 config AS_WRUSS 25 5 def_bool $(as-instr64,wrussq %rax$(comma)(%rbx)) 26 6 help
+1 -1
arch/x86/boot/bitops.h
··· 27 27 bool v; 28 28 const u32 *p = addr; 29 29 30 - asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr)); 30 + asm("btl %2,%1" : "=@ccc" (v) : "m" (*p), "Ir" (nr)); 31 31 return v; 32 32 } 33 33
+4 -4
arch/x86/boot/boot.h
··· 155 155 static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) 156 156 { 157 157 bool diff; 158 - asm volatile("fs repe cmpsb" CC_SET(nz) 159 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 158 + asm volatile("fs repe cmpsb" 159 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 160 160 return diff; 161 161 } 162 162 static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) 163 163 { 164 164 bool diff; 165 - asm volatile("gs repe cmpsb" CC_SET(nz) 166 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 165 + asm volatile("gs repe cmpsb" 166 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 167 167 return diff; 168 168 } 169 169
+2 -2
arch/x86/boot/string.c
··· 32 32 int memcmp(const void *s1, const void *s2, size_t len) 33 33 { 34 34 bool diff; 35 - asm("repe cmpsb" CC_SET(nz) 36 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 35 + asm("repe cmpsb" 36 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 37 37 return diff; 38 38 } 39 39
+1 -1
arch/x86/crypto/Kconfig
··· 306 306 307 307 config CRYPTO_ARIA_GFNI_AVX512_X86_64 308 308 tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)" 309 - depends on 64BIT && AS_GFNI 309 + depends on 64BIT 310 310 select CRYPTO_SKCIPHER 311 311 select CRYPTO_ALGAPI 312 312 select CRYPTO_ARIA
+2 -4
arch/x86/crypto/Makefile
··· 46 46 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o 47 47 aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ 48 48 aes-gcm-aesni-x86_64.o \ 49 - aes-xts-avx-x86_64.o 50 - ifeq ($(CONFIG_AS_VAES)$(CONFIG_AS_VPCLMULQDQ),yy) 51 - aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o 52 - endif 49 + aes-xts-avx-x86_64.o \ 50 + aes-gcm-avx10-x86_64.o 53 51 54 52 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o 55 53 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
-2
arch/x86/crypto/aes-ctr-avx-x86_64.S
··· 552 552 _aes_ctr_crypt 1 553 553 SYM_FUNC_END(aes_xctr_crypt_aesni_avx) 554 554 555 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 556 555 .set VL, 32 557 556 .set USE_AVX512, 0 558 557 SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2) ··· 569 570 SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512) 570 571 _aes_ctr_crypt 1 571 572 SYM_FUNC_END(aes_xctr_crypt_vaes_avx512) 572 - #endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ
-2
arch/x86/crypto/aes-xts-avx-x86_64.S
··· 886 886 _aes_xts_crypt 0 887 887 SYM_FUNC_END(aes_xts_decrypt_aesni_avx) 888 888 889 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 890 889 .set VL, 32 891 890 .set USE_AVX512, 0 892 891 SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2) ··· 903 904 SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512) 904 905 _aes_xts_crypt 0 905 906 SYM_FUNC_END(aes_xts_decrypt_vaes_avx512) 906 - #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
+3 -19
arch/x86/crypto/aesni-intel_glue.c
··· 828 828 }} 829 829 830 830 DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500); 831 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 832 831 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600); 833 832 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800); 834 - #endif 835 833 836 834 /* The common part of the x86_64 AES-GCM key struct */ 837 835 struct aes_gcm_key { ··· 910 912 #define FLAG_RFC4106 BIT(0) 911 913 #define FLAG_ENC BIT(1) 912 914 #define FLAG_AVX BIT(2) 913 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 914 - # define FLAG_AVX10_256 BIT(3) 915 - # define FLAG_AVX10_512 BIT(4) 916 - #else 917 - /* 918 - * This should cause all calls to the AVX10 assembly functions to be 919 - * optimized out, avoiding the need to ifdef each call individually. 920 - */ 921 - # define FLAG_AVX10_256 0 922 - # define FLAG_AVX10_512 0 923 - #endif 915 + #define FLAG_AVX10_256 BIT(3) 916 + #define FLAG_AVX10_512 BIT(4) 924 917 925 918 static inline struct aes_gcm_key * 926 919 aes_gcm_key_get(struct crypto_aead *tfm, int flags) ··· 1508 1519 "generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx", 1509 1520 AES_GCM_KEY_AESNI_SIZE, 500); 1510 1521 1511 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 1512 1522 /* aes_gcm_algs_vaes_avx10_256 */ 1513 1523 DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256, 1514 1524 "generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256", ··· 1517 1529 DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512, 1518 1530 "generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512", 1519 1531 AES_GCM_KEY_AVX10_SIZE, 800); 1520 - #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ 1521 1532 1522 1533 static int __init register_avx_algs(void) 1523 1534 { ··· 1538 1551 * Similarly, the assembler support was added at about the same time. 1539 1552 * For simplicity, just always check for VAES and VPCLMULQDQ together. 1540 1553 */ 1541 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 1542 1554 if (!boot_cpu_has(X86_FEATURE_AVX2) || 1543 1555 !boot_cpu_has(X86_FEATURE_VAES) || 1544 1556 !boot_cpu_has(X86_FEATURE_VPCLMULQDQ) || ··· 1578 1592 ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512)); 1579 1593 if (err) 1580 1594 return err; 1581 - #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ 1595 + 1582 1596 return 0; 1583 1597 } 1584 1598 ··· 1593 1607 { 1594 1608 unregister_skciphers(skcipher_algs_aesni_avx); 1595 1609 unregister_aeads(aes_gcm_algs_aesni_avx); 1596 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 1597 1610 unregister_skciphers(skcipher_algs_vaes_avx2); 1598 1611 unregister_skciphers(skcipher_algs_vaes_avx512); 1599 1612 unregister_aeads(aes_gcm_algs_vaes_avx10_256); 1600 1613 unregister_aeads(aes_gcm_algs_vaes_avx10_512); 1601 - #endif 1602 1614 } 1603 1615 #else /* CONFIG_X86_64 */ 1604 1616 static struct aead_alg aes_gcm_algs_aesni[0];
-10
arch/x86/crypto/aria-aesni-avx-asm_64.S
··· 295 295 vpshufb t1, t0, t2; \ 296 296 vpxor t2, x7, x7; 297 297 298 - #ifdef CONFIG_AS_GFNI 299 298 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 300 299 x4, x5, x6, x7, \ 301 300 t0, t1, t2, t3, \ ··· 316 317 vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ 317 318 vgf2p8affineinvqb $0, t2, x3, x3; \ 318 319 vgf2p8affineinvqb $0, t2, x7, x7 319 - 320 - #endif /* CONFIG_AS_GFNI */ 321 320 322 321 #define aria_sbox_8way(x0, x1, x2, x3, \ 323 322 x4, x5, x6, x7, \ ··· 558 561 y4, y5, y6, y7, \ 559 562 mem_tmp, 8); 560 563 561 - #ifdef CONFIG_AS_GFNI 562 564 #define aria_fe_gfni(x0, x1, x2, x3, \ 563 565 x4, x5, x6, x7, \ 564 566 y0, y1, y2, y3, \ ··· 715 719 y4, y5, y6, y7, \ 716 720 mem_tmp, 8); 717 721 718 - #endif /* CONFIG_AS_GFNI */ 719 - 720 722 /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ 721 723 .section .rodata.cst16, "aM", @progbits, 16 722 724 .align 16 ··· 766 772 .Ltf_hi__x2__and__fwd_aff: 767 773 .octa 0x3F893781E95FE1576CDA64D2BA0CB204 768 774 769 - #ifdef CONFIG_AS_GFNI 770 775 /* AES affine: */ 771 776 #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) 772 777 .Ltf_aff_bitmatrix: ··· 864 871 BV8(0, 0, 0, 0, 0, 1, 0, 0), 865 872 BV8(0, 0, 0, 0, 0, 0, 1, 0), 866 873 BV8(0, 0, 0, 0, 0, 0, 0, 1)) 867 - #endif /* CONFIG_AS_GFNI */ 868 874 869 875 /* 4-bit mask */ 870 876 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 ··· 1132 1140 RET; 1133 1141 SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way) 1134 1142 1135 - #ifdef CONFIG_AS_GFNI 1136 1143 SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) 1137 1144 /* input: 1138 1145 * %r9: rk ··· 1350 1359 FRAME_END 1351 1360 RET; 1352 1361 SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way) 1353 - #endif /* CONFIG_AS_GFNI */
+1 -9
arch/x86/crypto/aria-aesni-avx2-asm_64.S
··· 302 302 vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ 303 303 vpxor t0, x7, x7; 304 304 305 - #ifdef CONFIG_AS_GFNI 306 305 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 307 306 x4, x5, x6, x7, \ 308 307 t0, t1, t2, t3, \ ··· 324 325 vgf2p8affineinvqb $0, t2, x3, x3; \ 325 326 vgf2p8affineinvqb $0, t2, x7, x7 326 327 327 - #endif /* CONFIG_AS_GFNI */ 328 328 #define aria_sbox_8way(x0, x1, x2, x3, \ 329 329 x4, x5, x6, x7, \ 330 330 t0, t1, t2, t3, \ ··· 596 598 aria_load_state_8way(y0, y1, y2, y3, \ 597 599 y4, y5, y6, y7, \ 598 600 mem_tmp, 8); 599 - #ifdef CONFIG_AS_GFNI 601 + 600 602 #define aria_fe_gfni(x0, x1, x2, x3, \ 601 603 x4, x5, x6, x7, \ 602 604 y0, y1, y2, y3, \ ··· 750 752 aria_load_state_8way(y0, y1, y2, y3, \ 751 753 y4, y5, y6, y7, \ 752 754 mem_tmp, 8); 753 - #endif /* CONFIG_AS_GFNI */ 754 755 755 756 .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 756 757 .align 32 ··· 803 806 .Ltf_hi__x2__and__fwd_aff: 804 807 .octa 0x3F893781E95FE1576CDA64D2BA0CB204 805 808 806 - #ifdef CONFIG_AS_GFNI 807 809 .section .rodata.cst8, "aM", @progbits, 8 808 810 .align 8 809 811 /* AES affine: */ ··· 863 867 BV8(0, 0, 0, 0, 0, 1, 0, 0), 864 868 BV8(0, 0, 0, 0, 0, 0, 1, 0), 865 869 BV8(0, 0, 0, 0, 0, 0, 0, 1)) 866 - 867 - #endif /* CONFIG_AS_GFNI */ 868 870 869 871 /* 4-bit mask */ 870 872 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 ··· 1213 1219 RET; 1214 1220 SYM_FUNC_END(aria_aesni_avx2_ctr_crypt_32way) 1215 1221 1216 - #ifdef CONFIG_AS_GFNI 1217 1222 SYM_FUNC_START_LOCAL(__aria_aesni_avx2_gfni_crypt_32way) 1218 1223 /* input: 1219 1224 * %r9: rk ··· 1431 1438 FRAME_END 1432 1439 RET; 1433 1440 SYM_FUNC_END(aria_aesni_avx2_gfni_ctr_crypt_32way) 1434 - #endif /* CONFIG_AS_GFNI */
+1 -3
arch/x86/crypto/aria_aesni_avx2_glue.c
··· 26 26 const u8 *src, 27 27 u8 *keystream, u8 *iv); 28 28 EXPORT_SYMBOL_GPL(aria_aesni_avx2_ctr_crypt_32way); 29 - #ifdef CONFIG_AS_GFNI 30 29 asmlinkage void aria_aesni_avx2_gfni_encrypt_32way(const void *ctx, u8 *dst, 31 30 const u8 *src); 32 31 EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_encrypt_32way); ··· 36 37 const u8 *src, 37 38 u8 *keystream, u8 *iv); 38 39 EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_ctr_crypt_32way); 39 - #endif /* CONFIG_AS_GFNI */ 40 40 41 41 static struct aria_avx_ops aria_ops; 42 42 ··· 211 213 return -ENODEV; 212 214 } 213 215 214 - if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { 216 + if (boot_cpu_has(X86_FEATURE_GFNI)) { 215 217 aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 216 218 aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 217 219 aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
+1 -3
arch/x86/crypto/aria_aesni_avx_glue.c
··· 26 26 const u8 *src, 27 27 u8 *keystream, u8 *iv); 28 28 EXPORT_SYMBOL_GPL(aria_aesni_avx_ctr_crypt_16way); 29 - #ifdef CONFIG_AS_GFNI 30 29 asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst, 31 30 const u8 *src); 32 31 EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_encrypt_16way); ··· 36 37 const u8 *src, 37 38 u8 *keystream, u8 *iv); 38 39 EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_ctr_crypt_16way); 39 - #endif /* CONFIG_AS_GFNI */ 40 40 41 41 static struct aria_avx_ops aria_ops; 42 42 ··· 197 199 return -ENODEV; 198 200 } 199 201 200 - if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { 202 + if (boot_cpu_has(X86_FEATURE_GFNI)) { 201 203 aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 202 204 aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 203 205 aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
+1 -2
arch/x86/entry/entry_64_fred.S
··· 97 97 push %rdi /* fred_ss handed in by the caller */ 98 98 push %rbp 99 99 pushf 100 - mov $__KERNEL_CS, %rax 101 - push %rax 100 + push $__KERNEL_CS 102 101 103 102 /* 104 103 * Unlike the IDT event delivery, FRED _always_ pushes an error code
+2 -4
arch/x86/include/asm/archrandom.h
··· 23 23 unsigned int retry = RDRAND_RETRY_LOOPS; 24 24 do { 25 25 asm volatile("rdrand %[out]" 26 - CC_SET(c) 27 - : CC_OUT(c) (ok), [out] "=r" (*v)); 26 + : "=@ccc" (ok), [out] "=r" (*v)); 28 27 if (ok) 29 28 return true; 30 29 } while (--retry); ··· 34 35 { 35 36 bool ok; 36 37 asm volatile("rdseed %[out]" 37 - CC_SET(c) 38 - : CC_OUT(c) (ok), [out] "=r" (*v)); 38 + : "=@ccc" (ok), [out] "=r" (*v)); 39 39 return ok; 40 40 } 41 41
-12
arch/x86/include/asm/asm.h
··· 122 122 } 123 123 #endif 124 124 125 - /* 126 - * Macros to generate condition code outputs from inline assembly, 127 - * The output operand must be type "bool". 128 - */ 129 - #ifdef __GCC_ASM_FLAG_OUTPUTS__ 130 - # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" 131 - # define CC_OUT(c) "=@cc" #c 132 - #else 133 - # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" 134 - # define CC_OUT(c) [_cc_ ## c] "=qm" 135 - #endif 136 - 137 125 #ifdef __KERNEL__ 138 126 139 127 # include <asm/extable_fixup_types.h>
+6 -12
arch/x86/include/asm/bitops.h
··· 99 99 { 100 100 bool negative; 101 101 asm_inline volatile(LOCK_PREFIX "xorb %2,%1" 102 - CC_SET(s) 103 - : CC_OUT(s) (negative), WBYTE_ADDR(addr) 102 + : "=@ccs" (negative), WBYTE_ADDR(addr) 104 103 : "iq" ((char)mask) : "memory"); 105 104 return negative; 106 105 } ··· 148 149 bool oldbit; 149 150 150 151 asm(__ASM_SIZE(bts) " %2,%1" 151 - CC_SET(c) 152 - : CC_OUT(c) (oldbit) 152 + : "=@ccc" (oldbit) 153 153 : ADDR, "Ir" (nr) : "memory"); 154 154 return oldbit; 155 155 } ··· 173 175 bool oldbit; 174 176 175 177 asm volatile(__ASM_SIZE(btr) " %2,%1" 176 - CC_SET(c) 177 - : CC_OUT(c) (oldbit) 178 + : "=@ccc" (oldbit) 178 179 : ADDR, "Ir" (nr) : "memory"); 179 180 return oldbit; 180 181 } ··· 184 187 bool oldbit; 185 188 186 189 asm volatile(__ASM_SIZE(btc) " %2,%1" 187 - CC_SET(c) 188 - : CC_OUT(c) (oldbit) 190 + : "=@ccc" (oldbit) 189 191 : ADDR, "Ir" (nr) : "memory"); 190 192 191 193 return oldbit; ··· 207 211 bool oldbit; 208 212 209 213 asm volatile("testb %2,%1" 210 - CC_SET(nz) 211 - : CC_OUT(nz) (oldbit) 214 + : "=@ccnz" (oldbit) 212 215 : "m" (((unsigned char *)addr)[nr >> 3]), 213 216 "i" (1 << (nr & 7)) 214 217 :"memory"); ··· 220 225 bool oldbit; 221 226 222 227 asm volatile(__ASM_SIZE(bt) " %2,%1" 223 - CC_SET(c) 224 - : CC_OUT(c) (oldbit) 228 + : "=@ccc" (oldbit) 225 229 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); 226 230 227 231 return oldbit;
+4 -8
arch/x86/include/asm/cmpxchg.h
··· 166 166 { \ 167 167 volatile u8 *__ptr = (volatile u8 *)(_ptr); \ 168 168 asm_inline volatile(lock "cmpxchgb %[new], %[ptr]" \ 169 - CC_SET(z) \ 170 - : CC_OUT(z) (success), \ 169 + : "=@ccz" (success), \ 171 170 [ptr] "+m" (*__ptr), \ 172 171 [old] "+a" (__old) \ 173 172 : [new] "q" (__new) \ ··· 177 178 { \ 178 179 volatile u16 *__ptr = (volatile u16 *)(_ptr); \ 179 180 asm_inline volatile(lock "cmpxchgw %[new], %[ptr]" \ 180 - CC_SET(z) \ 181 - : CC_OUT(z) (success), \ 181 + : "=@ccz" (success), \ 182 182 [ptr] "+m" (*__ptr), \ 183 183 [old] "+a" (__old) \ 184 184 : [new] "r" (__new) \ ··· 188 190 { \ 189 191 volatile u32 *__ptr = (volatile u32 *)(_ptr); \ 190 192 asm_inline volatile(lock "cmpxchgl %[new], %[ptr]" \ 191 - CC_SET(z) \ 192 - : CC_OUT(z) (success), \ 193 + : "=@ccz" (success), \ 193 194 [ptr] "+m" (*__ptr), \ 194 195 [old] "+a" (__old) \ 195 196 : [new] "r" (__new) \ ··· 199 202 { \ 200 203 volatile u64 *__ptr = (volatile u64 *)(_ptr); \ 201 204 asm_inline volatile(lock "cmpxchgq %[new], %[ptr]" \ 202 - CC_SET(z) \ 203 - : CC_OUT(z) (success), \ 205 + : "=@ccz" (success), \ 204 206 [ptr] "+m" (*__ptr), \ 205 207 [old] "+a" (__old) \ 206 208 : [new] "r" (__new) \
+2 -4
arch/x86/include/asm/cmpxchg_32.h
··· 46 46 bool ret; \ 47 47 \ 48 48 asm_inline volatile(_lock "cmpxchg8b %[ptr]" \ 49 - CC_SET(e) \ 50 - : CC_OUT(e) (ret), \ 49 + : "=@ccz" (ret), \ 51 50 [ptr] "+m" (*(_ptr)), \ 52 51 "+a" (o.low), "+d" (o.high) \ 53 52 : "b" (n.low), "c" (n.high) \ ··· 124 125 ALTERNATIVE(_lock_loc \ 125 126 "call cmpxchg8b_emu", \ 126 127 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \ 127 - CC_SET(e) \ 128 - : ALT_OUTPUT_SP(CC_OUT(e) (ret), \ 128 + : ALT_OUTPUT_SP("=@ccz" (ret), \ 129 129 "+a" (o.low), "+d" (o.high)) \ 130 130 : "b" (n.low), "c" (n.high), \ 131 131 [ptr] "S" (_ptr) \
+1 -2
arch/x86/include/asm/cmpxchg_64.h
··· 66 66 bool ret; \ 67 67 \ 68 68 asm_inline volatile(_lock "cmpxchg16b %[ptr]" \ 69 - CC_SET(e) \ 70 - : CC_OUT(e) (ret), \ 69 + : "=@ccz" (ret), \ 71 70 [ptr] "+m" (*(_ptr)), \ 72 71 "+a" (o.low), "+d" (o.high) \ 73 72 : "b" (n.low), "c" (n.high) \
+1 -14
arch/x86/include/asm/mtrr.h
··· 1 + /* SPDX-License-Identifier: LGPL-2.0+ */ 1 2 /* Generic MTRR (Memory Type Range Register) ioctls. 2 3 3 4 Copyright (C) 1997-1999 Richard Gooch 4 - 5 - This library is free software; you can redistribute it and/or 6 - modify it under the terms of the GNU Library General Public 7 - License as published by the Free Software Foundation; either 8 - version 2 of the License, or (at your option) any later version. 9 - 10 - This library is distributed in the hope that it will be useful, 11 - but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 - Library General Public License for more details. 14 - 15 - You should have received a copy of the GNU Library General Public 16 - License along with this library; if not, write to the Free 17 - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 5 19 6 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 20 7 The postal address is:
+2 -6
arch/x86/include/asm/mwait.h
··· 36 36 37 37 static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) 38 38 { 39 - /* "monitorx %eax, %ecx, %edx" */ 40 - asm volatile(".byte 0x0f, 0x01, 0xfa" 41 - :: "a" (eax), "c" (ecx), "d"(edx)); 39 + asm volatile("monitorx" :: "a" (eax), "c" (ecx), "d"(edx)); 42 40 } 43 41 44 42 static __always_inline void __mwait(u32 eax, u32 ecx) ··· 78 80 { 79 81 /* No need for TSA buffer clearing on AMD */ 80 82 81 - /* "mwaitx %eax, %ebx, %ecx" */ 82 - asm volatile(".byte 0x0f, 0x01, 0xfb" 83 - :: "a" (eax), "b" (ebx), "c" (ecx)); 83 + asm volatile("mwaitx" :: "a" (eax), "b" (ebx), "c" (ecx)); 84 84 } 85 85 86 86 /*
+4 -8
arch/x86/include/asm/percpu.h
··· 309 309 \ 310 310 asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \ 311 311 __percpu_arg([var]) \ 312 - CC_SET(z) \ 313 - : CC_OUT(z) (success), \ 312 + : "=@ccz" (success), \ 314 313 [oval] "+a" (pco_old__), \ 315 314 [var] "+m" (__my_cpu_var(_var)) \ 316 315 : [nval] __pcpu_reg_##size(, pco_new__) \ ··· 366 367 asm_inline qual ( \ 367 368 ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ 368 369 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ 369 - CC_SET(z) \ 370 - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ 370 + : ALT_OUTPUT_SP("=@ccz" (success), \ 371 371 [var] "+m" (__my_cpu_var(_var)), \ 372 372 "+a" (old__.low), "+d" (old__.high)) \ 373 373 : "b" (new__.low), "c" (new__.high), \ ··· 434 436 asm_inline qual ( \ 435 437 ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ 436 438 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ 437 - CC_SET(z) \ 438 - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ 439 + : ALT_OUTPUT_SP("=@ccz" (success), \ 439 440 [var] "+m" (__my_cpu_var(_var)), \ 440 441 "+a" (old__.low), "+d" (old__.high)) \ 441 442 : "b" (new__.low), "c" (new__.high), \ ··· 582 585 bool oldbit; \ 583 586 \ 584 587 asm volatile("btl %[nr], " __percpu_arg([var]) \ 585 - CC_SET(c) \ 586 - : CC_OUT(c) (oldbit) \ 588 + : "=@ccc" (oldbit) \ 587 589 : [var] "m" (__my_cpu_var(_var)), \ 588 590 [nr] "rI" (_nr)); \ 589 591 oldbit; \
+2 -24
arch/x86/include/asm/rmwcc.h
··· 6 6 7 7 #define __CLOBBERS_MEM(clb...) "memory", ## clb 8 8 9 - #ifndef __GCC_ASM_FLAG_OUTPUTS__ 10 - 11 - /* Use asm goto */ 12 - 13 - #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 14 - ({ \ 15 - bool c = false; \ 16 - asm goto (fullop "; j" #cc " %l[cc_label]" \ 17 - : : [var] "m" (_var), ## __VA_ARGS__ \ 18 - : clobbers : cc_label); \ 19 - if (0) { \ 20 - cc_label: c = true; \ 21 - } \ 22 - c; \ 23 - }) 24 - 25 - #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ 26 - 27 - /* Use flags output or a set instruction */ 28 - 29 9 #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 30 10 ({ \ 31 11 bool c; \ 32 - asm_inline volatile (fullop CC_SET(cc) \ 33 - : [var] "+m" (_var), CC_OUT(cc) (c) \ 12 + asm_inline volatile (fullop \ 13 + : [var] "+m" (_var), "=@cc" #cc (c) \ 34 14 : __VA_ARGS__ : clobbers); \ 35 15 c; \ 36 16 }) 37 - 38 - #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ 39 17 40 18 #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ 41 19 __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
+1 -2
arch/x86/include/asm/sev.h
··· 491 491 492 492 /* "pvalidate" mnemonic support in binutils 2.36 and newer */ 493 493 asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" 494 - CC_SET(c) 495 - : CC_OUT(c) (no_rmpupdate), "=a"(rc) 494 + : "=@ccc"(no_rmpupdate), "=a"(rc) 496 495 : "a"(vaddr), "c"(rmp_psize), "d"(validate) 497 496 : "memory", "cc"); 498 497
+1 -2
arch/x86/include/asm/signal.h
··· 83 83 static inline int __gen_sigismember(sigset_t *set, int _sig) 84 84 { 85 85 bool ret; 86 - asm("btl %2,%1" CC_SET(c) 87 - : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); 86 + asm("btl %2,%1" : "=@ccc"(ret) : "m"(*set), "Ir"(_sig-1)); 88 87 return ret; 89 88 } 90 89
+3 -7
arch/x86/include/asm/special_insns.h
··· 75 75 * "rdpkru" instruction. Places PKRU contents in to EAX, 76 76 * clears EDX and requires that ecx=0. 77 77 */ 78 - asm volatile(".byte 0x0f,0x01,0xee\n\t" 79 - : "=a" (pkru), "=d" (edx) 80 - : "c" (ecx)); 78 + asm volatile("rdpkru" : "=a" (pkru), "=d" (edx) : "c" (ecx)); 81 79 return pkru; 82 80 } 83 81 ··· 87 89 * "wrpkru" instruction. Loads contents in EAX to PKRU, 88 90 * requires that ecx = edx = 0. 89 91 */ 90 - asm volatile(".byte 0x0f,0x01,0xef\n\t" 91 - : : "a" (pkru), "c"(ecx), "d"(edx)); 92 + asm volatile("wrpkru" : : "a" (pkru), "c"(ecx), "d"(edx)); 92 93 } 93 94 94 95 #else ··· 284 287 * See movdir64b()'s comment on operand specification. 285 288 */ 286 289 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" 287 - CC_SET(z) 288 - : CC_OUT(z) (zf), "+m" (*__dst) 290 + : "=@ccz" (zf), "+m" (*__dst) 289 291 : "m" (*__src), "a" (__dst), "d" (__src)); 290 292 291 293 /* Submission failure is indicated via EFLAGS.ZF=1 */
+3 -4
arch/x86/include/asm/uaccess.h
··· 378 378 asm_goto_output("\n" \ 379 379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 380 380 _ASM_EXTABLE_UA(1b, %l[label]) \ 381 - : CC_OUT(z) (success), \ 381 + : "=@ccz" (success), \ 382 382 [ptr] "+m" (*_ptr), \ 383 383 [old] "+a" (__old) \ 384 384 : [new] ltype (__new) \ ··· 397 397 asm_goto_output("\n" \ 398 398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 399 399 _ASM_EXTABLE_UA(1b, %l[label]) \ 400 - : CC_OUT(z) (success), \ 400 + : "=@ccz" (success), \ 401 401 "+A" (__old), \ 402 402 [ptr] "+m" (*_ptr) \ 403 403 : "b" ((u32)__new), \ ··· 417 417 __typeof__(*(_ptr)) __new = (_new); \ 418 418 asm volatile("\n" \ 419 419 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 420 - CC_SET(z) \ 421 420 "2:\n" \ 422 421 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 423 422 %[errout]) \ 424 - : CC_OUT(z) (success), \ 423 + : "=@ccz" (success), \ 425 424 [errout] "+r" (__err), \ 426 425 [ptr] "+m" (*_ptr), \ 427 426 [old] "+a" (__old) \
+1 -14
arch/x86/kernel/cpu/mtrr/cleanup.c
··· 1 + // SPDX-License-Identifier: LGPL-2.0+ 1 2 /* 2 3 * MTRR (Memory Type Range Register) cleanup 3 4 * 4 5 * Copyright (C) 2009 Yinghai Lu 5 - * 6 - * This library is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU Library General Public 8 - * License as published by the Free Software Foundation; either 9 - * version 2 of the License, or (at your option) any later version. 10 - * 11 - * This library is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 - * Library General Public License for more details. 15 - * 16 - * You should have received a copy of the GNU Library General Public 17 - * License along with this library; if not, write to the Free 18 - * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 6 */ 20 7 #include <linux/init.h> 21 8 #include <linux/pci.h>
+1 -14
arch/x86/kernel/cpu/mtrr/mtrr.c
··· 1 + // SPDX-License-Identifier: LGPL-2.0+ 1 2 /* Generic MTRR (Memory Type Range Register) driver. 2 3 3 4 Copyright (C) 1997-2000 Richard Gooch 4 5 Copyright (c) 2002 Patrick Mochel 5 - 6 - This library is free software; you can redistribute it and/or 7 - modify it under the terms of the GNU Library General Public 8 - License as published by the Free Software Foundation; either 9 - version 2 of the License, or (at your option) any later version. 10 - 11 - This library is distributed in the hope that it will be useful, 12 - but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 - Library General Public License for more details. 15 - 16 - You should have received a copy of the GNU Library General Public 17 - License along with this library; if not, write to the Free 18 - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 6 20 7 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 21 8 The postal address is:
+3 -3
arch/x86/kernel/cpu/sgx/encls.h
··· 68 68 ({ \ 69 69 int ret; \ 70 70 asm volatile( \ 71 - "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ 71 + "1: encls\n" \ 72 72 "2:\n" \ 73 73 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ 74 74 : "=a"(ret) \ ··· 111 111 ({ \ 112 112 int ret; \ 113 113 asm volatile( \ 114 - "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ 115 - " xor %%eax,%%eax;\n" \ 114 + "1: encls\n\t" \ 115 + "xor %%eax,%%eax\n" \ 116 116 "2:\n" \ 117 117 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ 118 118 : "=a"(ret), "=b"(rbx_out) \
-12
tools/arch/x86/include/asm/asm.h
··· 108 108 109 109 #endif 110 110 111 - /* 112 - * Macros to generate condition code outputs from inline assembly, 113 - * The output operand must be type "bool". 114 - */ 115 - #ifdef __GCC_ASM_FLAG_OUTPUTS__ 116 - # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" 117 - # define CC_OUT(c) "=@cc" #c 118 - #else 119 - # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" 120 - # define CC_OUT(c) [_cc_ ## c] "=qm" 121 - #endif 122 - 123 111 #ifdef __KERNEL__ 124 112 125 113 /* Exception table entry */
+1 -1
tools/perf/bench/find-bit-bench.c
··· 37 37 accumulator++; 38 38 } 39 39 40 - #if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__) 40 + #if defined(__i386__) || defined(__x86_64__) 41 41 static bool asm_test_bit(long nr, const unsigned long *addr) 42 42 { 43 43 bool oldbit;