Merge tag 'x86_cleanups_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Borislav Petkov:

- Simplify inline asm flag output operands now that the minimum
compiler version supports the =@ccCOND syntax

- Remove a bunch of AS_* Kconfig symbols which detect assembler support
for various instruction mnemonics now that the minimum assembler
version supports them all

- The usual cleanups all over the place

* tag 'x86_cleanups_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/asm: Remove code depending on __GCC_ASM_FLAG_OUTPUTS__
x86/sgx: Use ENCLS mnemonic in <kernel/cpu/sgx/encls.h>
x86/mtrr: Remove license boilerplate text with bad FSF address
x86/asm: Use RDPKRU and WRPKRU mnemonics in <asm/special_insns.h>
x86/idle: Use MONITORX and MWAITX mnemonics in <asm/mwait.h>
x86/entry/fred: Push __KERNEL_CS directly
x86/kconfig: Remove CONFIG_AS_AVX512
crypto: x86 - Remove CONFIG_AS_VPCLMULQDQ
crypto: X86 - Remove CONFIG_AS_VAES
crypto: x86 - Remove CONFIG_AS_GFNI
x86/kconfig: Drop unused and needless config X86_64_SMP

+55 -239
-4
arch/x86/Kconfig
··· 412 def_bool y 413 depends on INTEL_IOMMU && ACPI 414 415 - config X86_64_SMP 416 - def_bool y 417 - depends on X86_64 && SMP 418 - 419 config ARCH_SUPPORTS_UPROBES 420 def_bool y 421
··· 412 def_bool y 413 depends on INTEL_IOMMU && ACPI 414 415 config ARCH_SUPPORTS_UPROBES 416 def_bool y 417
-20
arch/x86/Kconfig.assembler
··· 1 # SPDX-License-Identifier: GPL-2.0 2 # Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 3 4 - config AS_AVX512 5 - def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5) 6 - help 7 - Supported by binutils >= 2.25 and LLVM integrated assembler 8 - 9 - config AS_GFNI 10 - def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2) 11 - help 12 - Supported by binutils >= 2.30 and LLVM integrated assembler 13 - 14 - config AS_VAES 15 - def_bool $(as-instr,vaesenc %ymm0$(comma)%ymm1$(comma)%ymm2) 16 - help 17 - Supported by binutils >= 2.30 and LLVM integrated assembler 18 - 19 - config AS_VPCLMULQDQ 20 - def_bool $(as-instr,vpclmulqdq \$0x10$(comma)%ymm0$(comma)%ymm1$(comma)%ymm2) 21 - help 22 - Supported by binutils >= 2.30 and LLVM integrated assembler 23 - 24 config AS_WRUSS 25 def_bool $(as-instr64,wrussq %rax$(comma)(%rbx)) 26 help
··· 1 # SPDX-License-Identifier: GPL-2.0 2 # Copyright (C) 2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 3 4 config AS_WRUSS 5 def_bool $(as-instr64,wrussq %rax$(comma)(%rbx)) 6 help
+1 -1
arch/x86/boot/bitops.h
··· 27 bool v; 28 const u32 *p = addr; 29 30 - asm("btl %2,%1" CC_SET(c) : CC_OUT(c) (v) : "m" (*p), "Ir" (nr)); 31 return v; 32 } 33
··· 27 bool v; 28 const u32 *p = addr; 29 30 + asm("btl %2,%1" : "=@ccc" (v) : "m" (*p), "Ir" (nr)); 31 return v; 32 } 33
+4 -4
arch/x86/boot/boot.h
··· 155 static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) 156 { 157 bool diff; 158 - asm volatile("fs repe cmpsb" CC_SET(nz) 159 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 160 return diff; 161 } 162 static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) 163 { 164 bool diff; 165 - asm volatile("gs repe cmpsb" CC_SET(nz) 166 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 167 return diff; 168 } 169
··· 155 static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len) 156 { 157 bool diff; 158 + asm volatile("fs repe cmpsb" 159 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 160 return diff; 161 } 162 static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len) 163 { 164 bool diff; 165 + asm volatile("gs repe cmpsb" 166 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 167 return diff; 168 } 169
+2 -2
arch/x86/boot/string.c
··· 32 int memcmp(const void *s1, const void *s2, size_t len) 33 { 34 bool diff; 35 - asm("repe cmpsb" CC_SET(nz) 36 - : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len)); 37 return diff; 38 } 39
··· 32 int memcmp(const void *s1, const void *s2, size_t len) 33 { 34 bool diff; 35 + asm("repe cmpsb" 36 + : "=@ccnz" (diff), "+D" (s1), "+S" (s2), "+c" (len)); 37 return diff; 38 } 39
+1 -1
arch/x86/crypto/Kconfig
··· 306 307 config CRYPTO_ARIA_GFNI_AVX512_X86_64 308 tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)" 309 - depends on 64BIT && AS_GFNI 310 select CRYPTO_SKCIPHER 311 select CRYPTO_ALGAPI 312 select CRYPTO_ARIA
··· 306 307 config CRYPTO_ARIA_GFNI_AVX512_X86_64 308 tristate "Ciphers: ARIA with modes: ECB, CTR (AVX512/GFNI)" 309 + depends on 64BIT 310 select CRYPTO_SKCIPHER 311 select CRYPTO_ALGAPI 312 select CRYPTO_ARIA
+2 -4
arch/x86/crypto/Makefile
··· 46 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o 47 aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ 48 aes-gcm-aesni-x86_64.o \ 49 - aes-xts-avx-x86_64.o 50 - ifeq ($(CONFIG_AS_VAES)$(CONFIG_AS_VPCLMULQDQ),yy) 51 - aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o 52 - endif 53 54 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o 55 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
··· 46 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o 47 aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ 48 aes-gcm-aesni-x86_64.o \ 49 + aes-xts-avx-x86_64.o \ 50 + aes-gcm-avx10-x86_64.o 51 52 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o 53 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
-2
arch/x86/crypto/aes-ctr-avx-x86_64.S
··· 552 _aes_ctr_crypt 1 553 SYM_FUNC_END(aes_xctr_crypt_aesni_avx) 554 555 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 556 .set VL, 32 557 .set USE_AVX512, 0 558 SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2) ··· 569 SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512) 570 _aes_ctr_crypt 1 571 SYM_FUNC_END(aes_xctr_crypt_vaes_avx512) 572 - #endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ
··· 552 _aes_ctr_crypt 1 553 SYM_FUNC_END(aes_xctr_crypt_aesni_avx) 554 555 .set VL, 32 556 .set USE_AVX512, 0 557 SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2) ··· 570 SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx512) 571 _aes_ctr_crypt 1 572 SYM_FUNC_END(aes_xctr_crypt_vaes_avx512)
-2
arch/x86/crypto/aes-xts-avx-x86_64.S
··· 886 _aes_xts_crypt 0 887 SYM_FUNC_END(aes_xts_decrypt_aesni_avx) 888 889 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 890 .set VL, 32 891 .set USE_AVX512, 0 892 SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2) ··· 903 SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512) 904 _aes_xts_crypt 0 905 SYM_FUNC_END(aes_xts_decrypt_vaes_avx512) 906 - #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
··· 886 _aes_xts_crypt 0 887 SYM_FUNC_END(aes_xts_decrypt_aesni_avx) 888 889 .set VL, 32 890 .set USE_AVX512, 0 891 SYM_TYPED_FUNC_START(aes_xts_encrypt_vaes_avx2) ··· 904 SYM_TYPED_FUNC_START(aes_xts_decrypt_vaes_avx512) 905 _aes_xts_crypt 0 906 SYM_FUNC_END(aes_xts_decrypt_vaes_avx512)
+3 -19
arch/x86/crypto/aesni-intel_glue.c
··· 828 }} 829 830 DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500); 831 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 832 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600); 833 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800); 834 - #endif 835 836 /* The common part of the x86_64 AES-GCM key struct */ 837 struct aes_gcm_key { ··· 910 #define FLAG_RFC4106 BIT(0) 911 #define FLAG_ENC BIT(1) 912 #define FLAG_AVX BIT(2) 913 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 914 - # define FLAG_AVX10_256 BIT(3) 915 - # define FLAG_AVX10_512 BIT(4) 916 - #else 917 - /* 918 - * This should cause all calls to the AVX10 assembly functions to be 919 - * optimized out, avoiding the need to ifdef each call individually. 920 - */ 921 - # define FLAG_AVX10_256 0 922 - # define FLAG_AVX10_512 0 923 - #endif 924 925 static inline struct aes_gcm_key * 926 aes_gcm_key_get(struct crypto_aead *tfm, int flags) ··· 1508 "generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx", 1509 AES_GCM_KEY_AESNI_SIZE, 500); 1510 1511 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 1512 /* aes_gcm_algs_vaes_avx10_256 */ 1513 DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256, 1514 "generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256", ··· 1517 DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512, 1518 "generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512", 1519 AES_GCM_KEY_AVX10_SIZE, 800); 1520 - #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ 1521 1522 static int __init register_avx_algs(void) 1523 { ··· 1538 * Similarly, the assembler support was added at about the same time. 1539 * For simplicity, just always check for VAES and VPCLMULQDQ together. 1540 */ 1541 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 1542 if (!boot_cpu_has(X86_FEATURE_AVX2) || 1543 !boot_cpu_has(X86_FEATURE_VAES) || 1544 !boot_cpu_has(X86_FEATURE_VPCLMULQDQ) || ··· 1578 ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512)); 1579 if (err) 1580 return err; 1581 - #endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */ 1582 return 0; 1583 } 1584 ··· 1593 { 1594 unregister_skciphers(skcipher_algs_aesni_avx); 1595 unregister_aeads(aes_gcm_algs_aesni_avx); 1596 - #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) 1597 unregister_skciphers(skcipher_algs_vaes_avx2); 1598 unregister_skciphers(skcipher_algs_vaes_avx512); 1599 unregister_aeads(aes_gcm_algs_vaes_avx10_256); 1600 unregister_aeads(aes_gcm_algs_vaes_avx10_512); 1601 - #endif 1602 } 1603 #else /* CONFIG_X86_64 */ 1604 static struct aead_alg aes_gcm_algs_aesni[0];
··· 828 }} 829 830 DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500); 831 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600); 832 DEFINE_AVX_SKCIPHER_ALGS(vaes_avx512, "vaes-avx512", 800); 833 834 /* The common part of the x86_64 AES-GCM key struct */ 835 struct aes_gcm_key { ··· 912 #define FLAG_RFC4106 BIT(0) 913 #define FLAG_ENC BIT(1) 914 #define FLAG_AVX BIT(2) 915 + #define FLAG_AVX10_256 BIT(3) 916 + #define FLAG_AVX10_512 BIT(4) 917 918 static inline struct aes_gcm_key * 919 aes_gcm_key_get(struct crypto_aead *tfm, int flags) ··· 1519 "generic-gcm-aesni-avx", "rfc4106-gcm-aesni-avx", 1520 AES_GCM_KEY_AESNI_SIZE, 500); 1521 1522 /* aes_gcm_algs_vaes_avx10_256 */ 1523 DEFINE_GCM_ALGS(vaes_avx10_256, FLAG_AVX10_256, 1524 "generic-gcm-vaes-avx10_256", "rfc4106-gcm-vaes-avx10_256", ··· 1529 DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512, 1530 "generic-gcm-vaes-avx10_512", "rfc4106-gcm-vaes-avx10_512", 1531 AES_GCM_KEY_AVX10_SIZE, 800); 1532 1533 static int __init register_avx_algs(void) 1534 { ··· 1551 * Similarly, the assembler support was added at about the same time. 1552 * For simplicity, just always check for VAES and VPCLMULQDQ together. 1553 */ 1554 if (!boot_cpu_has(X86_FEATURE_AVX2) || 1555 !boot_cpu_has(X86_FEATURE_VAES) || 1556 !boot_cpu_has(X86_FEATURE_VPCLMULQDQ) || ··· 1592 ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512)); 1593 if (err) 1594 return err; 1595 + 1596 return 0; 1597 } 1598 ··· 1607 { 1608 unregister_skciphers(skcipher_algs_aesni_avx); 1609 unregister_aeads(aes_gcm_algs_aesni_avx); 1610 unregister_skciphers(skcipher_algs_vaes_avx2); 1611 unregister_skciphers(skcipher_algs_vaes_avx512); 1612 unregister_aeads(aes_gcm_algs_vaes_avx10_256); 1613 unregister_aeads(aes_gcm_algs_vaes_avx10_512); 1614 } 1615 #else /* CONFIG_X86_64 */ 1616 static struct aead_alg aes_gcm_algs_aesni[0];
-10
arch/x86/crypto/aria-aesni-avx-asm_64.S
··· 295 vpshufb t1, t0, t2; \ 296 vpxor t2, x7, x7; 297 298 - #ifdef CONFIG_AS_GFNI 299 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 300 x4, x5, x6, x7, \ 301 t0, t1, t2, t3, \ ··· 316 vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ 317 vgf2p8affineinvqb $0, t2, x3, x3; \ 318 vgf2p8affineinvqb $0, t2, x7, x7 319 - 320 - #endif /* CONFIG_AS_GFNI */ 321 322 #define aria_sbox_8way(x0, x1, x2, x3, \ 323 x4, x5, x6, x7, \ ··· 558 y4, y5, y6, y7, \ 559 mem_tmp, 8); 560 561 - #ifdef CONFIG_AS_GFNI 562 #define aria_fe_gfni(x0, x1, x2, x3, \ 563 x4, x5, x6, x7, \ 564 y0, y1, y2, y3, \ ··· 715 y4, y5, y6, y7, \ 716 mem_tmp, 8); 717 718 - #endif /* CONFIG_AS_GFNI */ 719 - 720 /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ 721 .section .rodata.cst16, "aM", @progbits, 16 722 .align 16 ··· 766 .Ltf_hi__x2__and__fwd_aff: 767 .octa 0x3F893781E95FE1576CDA64D2BA0CB204 768 769 - #ifdef CONFIG_AS_GFNI 770 /* AES affine: */ 771 #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) 772 .Ltf_aff_bitmatrix: ··· 864 BV8(0, 0, 0, 0, 0, 1, 0, 0), 865 BV8(0, 0, 0, 0, 0, 0, 1, 0), 866 BV8(0, 0, 0, 0, 0, 0, 0, 1)) 867 - #endif /* CONFIG_AS_GFNI */ 868 869 /* 4-bit mask */ 870 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 ··· 1132 RET; 1133 SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way) 1134 1135 - #ifdef CONFIG_AS_GFNI 1136 SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) 1137 /* input: 1138 * %r9: rk ··· 1350 FRAME_END 1351 RET; 1352 SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way) 1353 - #endif /* CONFIG_AS_GFNI */
··· 295 vpshufb t1, t0, t2; \ 296 vpxor t2, x7, x7; 297 298 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 299 x4, x5, x6, x7, \ 300 t0, t1, t2, t3, \ ··· 317 vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ 318 vgf2p8affineinvqb $0, t2, x3, x3; \ 319 vgf2p8affineinvqb $0, t2, x7, x7 320 321 #define aria_sbox_8way(x0, x1, x2, x3, \ 322 x4, x5, x6, x7, \ ··· 561 y4, y5, y6, y7, \ 562 mem_tmp, 8); 563 564 #define aria_fe_gfni(x0, x1, x2, x3, \ 565 x4, x5, x6, x7, \ 566 y0, y1, y2, y3, \ ··· 719 y4, y5, y6, y7, \ 720 mem_tmp, 8); 721 722 /* NB: section is mergeable, all elements must be aligned 16-byte blocks */ 723 .section .rodata.cst16, "aM", @progbits, 16 724 .align 16 ··· 772 .Ltf_hi__x2__and__fwd_aff: 773 .octa 0x3F893781E95FE1576CDA64D2BA0CB204 774 775 /* AES affine: */ 776 #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) 777 .Ltf_aff_bitmatrix: ··· 871 BV8(0, 0, 0, 0, 0, 1, 0, 0), 872 BV8(0, 0, 0, 0, 0, 0, 1, 0), 873 BV8(0, 0, 0, 0, 0, 0, 0, 1)) 874 875 /* 4-bit mask */ 876 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 ··· 1140 RET; 1141 SYM_FUNC_END(aria_aesni_avx_ctr_crypt_16way) 1142 1143 SYM_FUNC_START_LOCAL(__aria_aesni_avx_gfni_crypt_16way) 1144 /* input: 1145 * %r9: rk ··· 1359 FRAME_END 1360 RET; 1361 SYM_FUNC_END(aria_aesni_avx_gfni_ctr_crypt_16way)
+1 -9
arch/x86/crypto/aria-aesni-avx2-asm_64.S
··· 302 vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ 303 vpxor t0, x7, x7; 304 305 - #ifdef CONFIG_AS_GFNI 306 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 307 x4, x5, x6, x7, \ 308 t0, t1, t2, t3, \ ··· 324 vgf2p8affineinvqb $0, t2, x3, x3; \ 325 vgf2p8affineinvqb $0, t2, x7, x7 326 327 - #endif /* CONFIG_AS_GFNI */ 328 #define aria_sbox_8way(x0, x1, x2, x3, \ 329 x4, x5, x6, x7, \ 330 t0, t1, t2, t3, \ ··· 596 aria_load_state_8way(y0, y1, y2, y3, \ 597 y4, y5, y6, y7, \ 598 mem_tmp, 8); 599 - #ifdef CONFIG_AS_GFNI 600 #define aria_fe_gfni(x0, x1, x2, x3, \ 601 x4, x5, x6, x7, \ 602 y0, y1, y2, y3, \ ··· 750 aria_load_state_8way(y0, y1, y2, y3, \ 751 y4, y5, y6, y7, \ 752 mem_tmp, 8); 753 - #endif /* CONFIG_AS_GFNI */ 754 755 .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 756 .align 32 ··· 803 .Ltf_hi__x2__and__fwd_aff: 804 .octa 0x3F893781E95FE1576CDA64D2BA0CB204 805 806 - #ifdef CONFIG_AS_GFNI 807 .section .rodata.cst8, "aM", @progbits, 8 808 .align 8 809 /* AES affine: */ ··· 863 BV8(0, 0, 0, 0, 0, 1, 0, 0), 864 BV8(0, 0, 0, 0, 0, 0, 1, 0), 865 BV8(0, 0, 0, 0, 0, 0, 0, 1)) 866 - 867 - #endif /* CONFIG_AS_GFNI */ 868 869 /* 4-bit mask */ 870 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 ··· 1213 RET; 1214 SYM_FUNC_END(aria_aesni_avx2_ctr_crypt_32way) 1215 1216 - #ifdef CONFIG_AS_GFNI 1217 SYM_FUNC_START_LOCAL(__aria_aesni_avx2_gfni_crypt_32way) 1218 /* input: 1219 * %r9: rk ··· 1431 FRAME_END 1432 RET; 1433 SYM_FUNC_END(aria_aesni_avx2_gfni_ctr_crypt_32way) 1434 - #endif /* CONFIG_AS_GFNI */
··· 302 vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ 303 vpxor t0, x7, x7; 304 305 #define aria_sbox_8way_gfni(x0, x1, x2, x3, \ 306 x4, x5, x6, x7, \ 307 t0, t1, t2, t3, \ ··· 325 vgf2p8affineinvqb $0, t2, x3, x3; \ 326 vgf2p8affineinvqb $0, t2, x7, x7 327 328 #define aria_sbox_8way(x0, x1, x2, x3, \ 329 x4, x5, x6, x7, \ 330 t0, t1, t2, t3, \ ··· 598 aria_load_state_8way(y0, y1, y2, y3, \ 599 y4, y5, y6, y7, \ 600 mem_tmp, 8); 601 + 602 #define aria_fe_gfni(x0, x1, x2, x3, \ 603 x4, x5, x6, x7, \ 604 y0, y1, y2, y3, \ ··· 752 aria_load_state_8way(y0, y1, y2, y3, \ 753 y4, y5, y6, y7, \ 754 mem_tmp, 8); 755 756 .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32 757 .align 32 ··· 806 .Ltf_hi__x2__and__fwd_aff: 807 .octa 0x3F893781E95FE1576CDA64D2BA0CB204 808 809 .section .rodata.cst8, "aM", @progbits, 8 810 .align 8 811 /* AES affine: */ ··· 867 BV8(0, 0, 0, 0, 0, 1, 0, 0), 868 BV8(0, 0, 0, 0, 0, 0, 1, 0), 869 BV8(0, 0, 0, 0, 0, 0, 0, 1)) 870 871 /* 4-bit mask */ 872 .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4 ··· 1219 RET; 1220 SYM_FUNC_END(aria_aesni_avx2_ctr_crypt_32way) 1221 1222 SYM_FUNC_START_LOCAL(__aria_aesni_avx2_gfni_crypt_32way) 1223 /* input: 1224 * %r9: rk ··· 1438 FRAME_END 1439 RET; 1440 SYM_FUNC_END(aria_aesni_avx2_gfni_ctr_crypt_32way)
+1 -3
arch/x86/crypto/aria_aesni_avx2_glue.c
··· 26 const u8 *src, 27 u8 *keystream, u8 *iv); 28 EXPORT_SYMBOL_GPL(aria_aesni_avx2_ctr_crypt_32way); 29 - #ifdef CONFIG_AS_GFNI 30 asmlinkage void aria_aesni_avx2_gfni_encrypt_32way(const void *ctx, u8 *dst, 31 const u8 *src); 32 EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_encrypt_32way); ··· 36 const u8 *src, 37 u8 *keystream, u8 *iv); 38 EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_ctr_crypt_32way); 39 - #endif /* CONFIG_AS_GFNI */ 40 41 static struct aria_avx_ops aria_ops; 42 ··· 211 return -ENODEV; 212 } 213 214 - if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { 215 aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 216 aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 217 aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
··· 26 const u8 *src, 27 u8 *keystream, u8 *iv); 28 EXPORT_SYMBOL_GPL(aria_aesni_avx2_ctr_crypt_32way); 29 asmlinkage void aria_aesni_avx2_gfni_encrypt_32way(const void *ctx, u8 *dst, 30 const u8 *src); 31 EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_encrypt_32way); ··· 37 const u8 *src, 38 u8 *keystream, u8 *iv); 39 EXPORT_SYMBOL_GPL(aria_aesni_avx2_gfni_ctr_crypt_32way); 40 41 static struct aria_avx_ops aria_ops; 42 ··· 213 return -ENODEV; 214 } 215 216 + if (boot_cpu_has(X86_FEATURE_GFNI)) { 217 aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 218 aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 219 aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
+1 -3
arch/x86/crypto/aria_aesni_avx_glue.c
··· 26 const u8 *src, 27 u8 *keystream, u8 *iv); 28 EXPORT_SYMBOL_GPL(aria_aesni_avx_ctr_crypt_16way); 29 - #ifdef CONFIG_AS_GFNI 30 asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst, 31 const u8 *src); 32 EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_encrypt_16way); ··· 36 const u8 *src, 37 u8 *keystream, u8 *iv); 38 EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_ctr_crypt_16way); 39 - #endif /* CONFIG_AS_GFNI */ 40 41 static struct aria_avx_ops aria_ops; 42 ··· 197 return -ENODEV; 198 } 199 200 - if (boot_cpu_has(X86_FEATURE_GFNI) && IS_ENABLED(CONFIG_AS_GFNI)) { 201 aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 202 aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 203 aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
··· 26 const u8 *src, 27 u8 *keystream, u8 *iv); 28 EXPORT_SYMBOL_GPL(aria_aesni_avx_ctr_crypt_16way); 29 asmlinkage void aria_aesni_avx_gfni_encrypt_16way(const void *ctx, u8 *dst, 30 const u8 *src); 31 EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_encrypt_16way); ··· 37 const u8 *src, 38 u8 *keystream, u8 *iv); 39 EXPORT_SYMBOL_GPL(aria_aesni_avx_gfni_ctr_crypt_16way); 40 41 static struct aria_avx_ops aria_ops; 42 ··· 199 return -ENODEV; 200 } 201 202 + if (boot_cpu_has(X86_FEATURE_GFNI)) { 203 aria_ops.aria_encrypt_16way = aria_aesni_avx_gfni_encrypt_16way; 204 aria_ops.aria_decrypt_16way = aria_aesni_avx_gfni_decrypt_16way; 205 aria_ops.aria_ctr_crypt_16way = aria_aesni_avx_gfni_ctr_crypt_16way;
+1 -2
arch/x86/entry/entry_64_fred.S
··· 97 push %rdi /* fred_ss handed in by the caller */ 98 push %rbp 99 pushf 100 - mov $__KERNEL_CS, %rax 101 - push %rax 102 103 /* 104 * Unlike the IDT event delivery, FRED _always_ pushes an error code
··· 97 push %rdi /* fred_ss handed in by the caller */ 98 push %rbp 99 pushf 100 + push $__KERNEL_CS 101 102 /* 103 * Unlike the IDT event delivery, FRED _always_ pushes an error code
+2 -4
arch/x86/include/asm/archrandom.h
··· 23 unsigned int retry = RDRAND_RETRY_LOOPS; 24 do { 25 asm volatile("rdrand %[out]" 26 - CC_SET(c) 27 - : CC_OUT(c) (ok), [out] "=r" (*v)); 28 if (ok) 29 return true; 30 } while (--retry); ··· 34 { 35 bool ok; 36 asm volatile("rdseed %[out]" 37 - CC_SET(c) 38 - : CC_OUT(c) (ok), [out] "=r" (*v)); 39 return ok; 40 } 41
··· 23 unsigned int retry = RDRAND_RETRY_LOOPS; 24 do { 25 asm volatile("rdrand %[out]" 26 + : "=@ccc" (ok), [out] "=r" (*v)); 27 if (ok) 28 return true; 29 } while (--retry); ··· 35 { 36 bool ok; 37 asm volatile("rdseed %[out]" 38 + : "=@ccc" (ok), [out] "=r" (*v)); 39 return ok; 40 } 41
-12
arch/x86/include/asm/asm.h
··· 122 } 123 #endif 124 125 - /* 126 - * Macros to generate condition code outputs from inline assembly, 127 - * The output operand must be type "bool". 128 - */ 129 - #ifdef __GCC_ASM_FLAG_OUTPUTS__ 130 - # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" 131 - # define CC_OUT(c) "=@cc" #c 132 - #else 133 - # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" 134 - # define CC_OUT(c) [_cc_ ## c] "=qm" 135 - #endif 136 - 137 #ifdef __KERNEL__ 138 139 # include <asm/extable_fixup_types.h>
··· 122 } 123 #endif 124 125 #ifdef __KERNEL__ 126 127 # include <asm/extable_fixup_types.h>
+6 -12
arch/x86/include/asm/bitops.h
··· 99 { 100 bool negative; 101 asm_inline volatile(LOCK_PREFIX "xorb %2,%1" 102 - CC_SET(s) 103 - : CC_OUT(s) (negative), WBYTE_ADDR(addr) 104 : "iq" ((char)mask) : "memory"); 105 return negative; 106 } ··· 148 bool oldbit; 149 150 asm(__ASM_SIZE(bts) " %2,%1" 151 - CC_SET(c) 152 - : CC_OUT(c) (oldbit) 153 : ADDR, "Ir" (nr) : "memory"); 154 return oldbit; 155 } ··· 173 bool oldbit; 174 175 asm volatile(__ASM_SIZE(btr) " %2,%1" 176 - CC_SET(c) 177 - : CC_OUT(c) (oldbit) 178 : ADDR, "Ir" (nr) : "memory"); 179 return oldbit; 180 } ··· 184 bool oldbit; 185 186 asm volatile(__ASM_SIZE(btc) " %2,%1" 187 - CC_SET(c) 188 - : CC_OUT(c) (oldbit) 189 : ADDR, "Ir" (nr) : "memory"); 190 191 return oldbit; ··· 207 bool oldbit; 208 209 asm volatile("testb %2,%1" 210 - CC_SET(nz) 211 - : CC_OUT(nz) (oldbit) 212 : "m" (((unsigned char *)addr)[nr >> 3]), 213 "i" (1 << (nr & 7)) 214 :"memory"); ··· 220 bool oldbit; 221 222 asm volatile(__ASM_SIZE(bt) " %2,%1" 223 - CC_SET(c) 224 - : CC_OUT(c) (oldbit) 225 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); 226 227 return oldbit;
··· 99 { 100 bool negative; 101 asm_inline volatile(LOCK_PREFIX "xorb %2,%1" 102 + : "=@ccs" (negative), WBYTE_ADDR(addr) 103 : "iq" ((char)mask) : "memory"); 104 return negative; 105 } ··· 149 bool oldbit; 150 151 asm(__ASM_SIZE(bts) " %2,%1" 152 + : "=@ccc" (oldbit) 153 : ADDR, "Ir" (nr) : "memory"); 154 return oldbit; 155 } ··· 175 bool oldbit; 176 177 asm volatile(__ASM_SIZE(btr) " %2,%1" 178 + : "=@ccc" (oldbit) 179 : ADDR, "Ir" (nr) : "memory"); 180 return oldbit; 181 } ··· 187 bool oldbit; 188 189 asm volatile(__ASM_SIZE(btc) " %2,%1" 190 + : "=@ccc" (oldbit) 191 : ADDR, "Ir" (nr) : "memory"); 192 193 return oldbit; ··· 211 bool oldbit; 212 213 asm volatile("testb %2,%1" 214 + : "=@ccnz" (oldbit) 215 : "m" (((unsigned char *)addr)[nr >> 3]), 216 "i" (1 << (nr & 7)) 217 :"memory"); ··· 225 bool oldbit; 226 227 asm volatile(__ASM_SIZE(bt) " %2,%1" 228 + : "=@ccc" (oldbit) 229 : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory"); 230 231 return oldbit;
+4 -8
arch/x86/include/asm/cmpxchg.h
··· 166 { \ 167 volatile u8 *__ptr = (volatile u8 *)(_ptr); \ 168 asm_inline volatile(lock "cmpxchgb %[new], %[ptr]" \ 169 - CC_SET(z) \ 170 - : CC_OUT(z) (success), \ 171 [ptr] "+m" (*__ptr), \ 172 [old] "+a" (__old) \ 173 : [new] "q" (__new) \ ··· 177 { \ 178 volatile u16 *__ptr = (volatile u16 *)(_ptr); \ 179 asm_inline volatile(lock "cmpxchgw %[new], %[ptr]" \ 180 - CC_SET(z) \ 181 - : CC_OUT(z) (success), \ 182 [ptr] "+m" (*__ptr), \ 183 [old] "+a" (__old) \ 184 : [new] "r" (__new) \ ··· 188 { \ 189 volatile u32 *__ptr = (volatile u32 *)(_ptr); \ 190 asm_inline volatile(lock "cmpxchgl %[new], %[ptr]" \ 191 - CC_SET(z) \ 192 - : CC_OUT(z) (success), \ 193 [ptr] "+m" (*__ptr), \ 194 [old] "+a" (__old) \ 195 : [new] "r" (__new) \ ··· 199 { \ 200 volatile u64 *__ptr = (volatile u64 *)(_ptr); \ 201 asm_inline volatile(lock "cmpxchgq %[new], %[ptr]" \ 202 - CC_SET(z) \ 203 - : CC_OUT(z) (success), \ 204 [ptr] "+m" (*__ptr), \ 205 [old] "+a" (__old) \ 206 : [new] "r" (__new) \
··· 166 { \ 167 volatile u8 *__ptr = (volatile u8 *)(_ptr); \ 168 asm_inline volatile(lock "cmpxchgb %[new], %[ptr]" \ 169 + : "=@ccz" (success), \ 170 [ptr] "+m" (*__ptr), \ 171 [old] "+a" (__old) \ 172 : [new] "q" (__new) \ ··· 178 { \ 179 volatile u16 *__ptr = (volatile u16 *)(_ptr); \ 180 asm_inline volatile(lock "cmpxchgw %[new], %[ptr]" \ 181 + : "=@ccz" (success), \ 182 [ptr] "+m" (*__ptr), \ 183 [old] "+a" (__old) \ 184 : [new] "r" (__new) \ ··· 190 { \ 191 volatile u32 *__ptr = (volatile u32 *)(_ptr); \ 192 asm_inline volatile(lock "cmpxchgl %[new], %[ptr]" \ 193 + : "=@ccz" (success), \ 194 [ptr] "+m" (*__ptr), \ 195 [old] "+a" (__old) \ 196 : [new] "r" (__new) \ ··· 202 { \ 203 volatile u64 *__ptr = (volatile u64 *)(_ptr); \ 204 asm_inline volatile(lock "cmpxchgq %[new], %[ptr]" \ 205 + : "=@ccz" (success), \ 206 [ptr] "+m" (*__ptr), \ 207 [old] "+a" (__old) \ 208 : [new] "r" (__new) \
+2 -4
arch/x86/include/asm/cmpxchg_32.h
··· 46 bool ret; \ 47 \ 48 asm_inline volatile(_lock "cmpxchg8b %[ptr]" \ 49 - CC_SET(e) \ 50 - : CC_OUT(e) (ret), \ 51 [ptr] "+m" (*(_ptr)), \ 52 "+a" (o.low), "+d" (o.high) \ 53 : "b" (n.low), "c" (n.high) \ ··· 124 ALTERNATIVE(_lock_loc \ 125 "call cmpxchg8b_emu", \ 126 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \ 127 - CC_SET(e) \ 128 - : ALT_OUTPUT_SP(CC_OUT(e) (ret), \ 129 "+a" (o.low), "+d" (o.high)) \ 130 : "b" (n.low), "c" (n.high), \ 131 [ptr] "S" (_ptr) \
··· 46 bool ret; \ 47 \ 48 asm_inline volatile(_lock "cmpxchg8b %[ptr]" \ 49 + : "=@ccz" (ret), \ 50 [ptr] "+m" (*(_ptr)), \ 51 "+a" (o.low), "+d" (o.high) \ 52 : "b" (n.low), "c" (n.high) \ ··· 125 ALTERNATIVE(_lock_loc \ 126 "call cmpxchg8b_emu", \ 127 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \ 128 + : ALT_OUTPUT_SP("=@ccz" (ret), \ 129 "+a" (o.low), "+d" (o.high)) \ 130 : "b" (n.low), "c" (n.high), \ 131 [ptr] "S" (_ptr) \
+1 -2
arch/x86/include/asm/cmpxchg_64.h
··· 66 bool ret; \ 67 \ 68 asm_inline volatile(_lock "cmpxchg16b %[ptr]" \ 69 - CC_SET(e) \ 70 - : CC_OUT(e) (ret), \ 71 [ptr] "+m" (*(_ptr)), \ 72 "+a" (o.low), "+d" (o.high) \ 73 : "b" (n.low), "c" (n.high) \
··· 66 bool ret; \ 67 \ 68 asm_inline volatile(_lock "cmpxchg16b %[ptr]" \ 69 + : "=@ccz" (ret), \ 70 [ptr] "+m" (*(_ptr)), \ 71 "+a" (o.low), "+d" (o.high) \ 72 : "b" (n.low), "c" (n.high) \
+1 -14
arch/x86/include/asm/mtrr.h
··· 1 /* Generic MTRR (Memory Type Range Register) ioctls. 2 3 Copyright (C) 1997-1999 Richard Gooch 4 - 5 - This library is free software; you can redistribute it and/or 6 - modify it under the terms of the GNU Library General Public 7 - License as published by the Free Software Foundation; either 8 - version 2 of the License, or (at your option) any later version. 9 - 10 - This library is distributed in the hope that it will be useful, 11 - but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 - Library General Public License for more details. 14 - 15 - You should have received a copy of the GNU Library General Public 16 - License along with this library; if not, write to the Free 17 - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 19 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 20 The postal address is:
··· 1 + /* SPDX-License-Identifier: LGPL-2.0+ */ 2 /* Generic MTRR (Memory Type Range Register) ioctls. 3 4 Copyright (C) 1997-1999 Richard Gooch 5 6 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 7 The postal address is:
+2 -6
arch/x86/include/asm/mwait.h
··· 36 37 static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) 38 { 39 - /* "monitorx %eax, %ecx, %edx" */ 40 - asm volatile(".byte 0x0f, 0x01, 0xfa" 41 - :: "a" (eax), "c" (ecx), "d"(edx)); 42 } 43 44 static __always_inline void __mwait(u32 eax, u32 ecx) ··· 78 { 79 /* No need for TSA buffer clearing on AMD */ 80 81 - /* "mwaitx %eax, %ebx, %ecx" */ 82 - asm volatile(".byte 0x0f, 0x01, 0xfb" 83 - :: "a" (eax), "b" (ebx), "c" (ecx)); 84 } 85 86 /*
··· 36 37 static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx) 38 { 39 + asm volatile("monitorx" :: "a" (eax), "c" (ecx), "d"(edx)); 40 } 41 42 static __always_inline void __mwait(u32 eax, u32 ecx) ··· 80 { 81 /* No need for TSA buffer clearing on AMD */ 82 83 + asm volatile("mwaitx" :: "a" (eax), "b" (ebx), "c" (ecx)); 84 } 85 86 /*
+4 -8
arch/x86/include/asm/percpu.h
··· 309 \ 310 asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \ 311 __percpu_arg([var]) \ 312 - CC_SET(z) \ 313 - : CC_OUT(z) (success), \ 314 [oval] "+a" (pco_old__), \ 315 [var] "+m" (__my_cpu_var(_var)) \ 316 : [nval] __pcpu_reg_##size(, pco_new__) \ ··· 366 asm_inline qual ( \ 367 ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ 368 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ 369 - CC_SET(z) \ 370 - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ 371 [var] "+m" (__my_cpu_var(_var)), \ 372 "+a" (old__.low), "+d" (old__.high)) \ 373 : "b" (new__.low), "c" (new__.high), \ ··· 434 asm_inline qual ( \ 435 ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ 436 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ 437 - CC_SET(z) \ 438 - : ALT_OUTPUT_SP(CC_OUT(z) (success), \ 439 [var] "+m" (__my_cpu_var(_var)), \ 440 "+a" (old__.low), "+d" (old__.high)) \ 441 : "b" (new__.low), "c" (new__.high), \ ··· 582 bool oldbit; \ 583 \ 584 asm volatile("btl %[nr], " __percpu_arg([var]) \ 585 - CC_SET(c) \ 586 - : CC_OUT(c) (oldbit) \ 587 : [var] "m" (__my_cpu_var(_var)), \ 588 [nr] "rI" (_nr)); \ 589 oldbit; \
··· 309 \ 310 asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \ 311 __percpu_arg([var]) \ 312 + : "=@ccz" (success), \ 313 [oval] "+a" (pco_old__), \ 314 [var] "+m" (__my_cpu_var(_var)) \ 315 : [nval] __pcpu_reg_##size(, pco_new__) \ ··· 367 asm_inline qual ( \ 368 ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ 369 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ 370 + : ALT_OUTPUT_SP("=@ccz" (success), \ 371 [var] "+m" (__my_cpu_var(_var)), \ 372 "+a" (old__.low), "+d" (old__.high)) \ 373 : "b" (new__.low), "c" (new__.high), \ ··· 436 asm_inline qual ( \ 437 ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ 438 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ 439 + : ALT_OUTPUT_SP("=@ccz" (success), \ 440 [var] "+m" (__my_cpu_var(_var)), \ 441 "+a" (old__.low), "+d" (old__.high)) \ 442 : "b" (new__.low), "c" (new__.high), \ ··· 585 bool oldbit; \ 586 \ 587 asm volatile("btl %[nr], " __percpu_arg([var]) \ 588 + : "=@ccc" (oldbit) \ 589 : [var] "m" (__my_cpu_var(_var)), \ 590 [nr] "rI" (_nr)); \ 591 oldbit; \
+2 -24
arch/x86/include/asm/rmwcc.h
··· 6 7 #define __CLOBBERS_MEM(clb...) "memory", ## clb 8 9 - #ifndef __GCC_ASM_FLAG_OUTPUTS__ 10 - 11 - /* Use asm goto */ 12 - 13 - #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 14 - ({ \ 15 - bool c = false; \ 16 - asm goto (fullop "; j" #cc " %l[cc_label]" \ 17 - : : [var] "m" (_var), ## __VA_ARGS__ \ 18 - : clobbers : cc_label); \ 19 - if (0) { \ 20 - cc_label: c = true; \ 21 - } \ 22 - c; \ 23 - }) 24 - 25 - #else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ 26 - 27 - /* Use flags output or a set instruction */ 28 - 29 #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 30 ({ \ 31 bool c; \ 32 - asm_inline volatile (fullop CC_SET(cc) \ 33 - : [var] "+m" (_var), CC_OUT(cc) (c) \ 34 : __VA_ARGS__ : clobbers); \ 35 c; \ 36 }) 37 - 38 - #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */ 39 40 #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ 41 __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
··· 6 7 #define __CLOBBERS_MEM(clb...) "memory", ## clb 8 9 #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ 10 ({ \ 11 bool c; \ 12 + asm_inline volatile (fullop \ 13 + : [var] "+m" (_var), "=@cc" #cc (c) \ 14 : __VA_ARGS__ : clobbers); \ 15 c; \ 16 }) 17 18 #define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \ 19 __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
+1 -2
arch/x86/include/asm/sev.h
··· 491 492 /* "pvalidate" mnemonic support in binutils 2.36 and newer */ 493 asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" 494 - CC_SET(c) 495 - : CC_OUT(c) (no_rmpupdate), "=a"(rc) 496 : "a"(vaddr), "c"(rmp_psize), "d"(validate) 497 : "memory", "cc"); 498
··· 491 492 /* "pvalidate" mnemonic support in binutils 2.36 and newer */ 493 asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" 494 + : "=@ccc"(no_rmpupdate), "=a"(rc) 495 : "a"(vaddr), "c"(rmp_psize), "d"(validate) 496 : "memory", "cc"); 497
+1 -2
arch/x86/include/asm/signal.h
··· 83 static inline int __gen_sigismember(sigset_t *set, int _sig) 84 { 85 bool ret; 86 - asm("btl %2,%1" CC_SET(c) 87 - : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1)); 88 return ret; 89 } 90
··· 83 static inline int __gen_sigismember(sigset_t *set, int _sig) 84 { 85 bool ret; 86 + asm("btl %2,%1" : "=@ccc"(ret) : "m"(*set), "Ir"(_sig-1)); 87 return ret; 88 } 89
+3 -7
arch/x86/include/asm/special_insns.h
··· 75 * "rdpkru" instruction. Places PKRU contents in to EAX, 76 * clears EDX and requires that ecx=0. 77 */ 78 - asm volatile(".byte 0x0f,0x01,0xee\n\t" 79 - : "=a" (pkru), "=d" (edx) 80 - : "c" (ecx)); 81 return pkru; 82 } 83 ··· 87 * "wrpkru" instruction. Loads contents in EAX to PKRU, 88 * requires that ecx = edx = 0. 89 */ 90 - asm volatile(".byte 0x0f,0x01,0xef\n\t" 91 - : : "a" (pkru), "c"(ecx), "d"(edx)); 92 } 93 94 #else ··· 284 * See movdir64b()'s comment on operand specification. 285 */ 286 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" 287 - CC_SET(z) 288 - : CC_OUT(z) (zf), "+m" (*__dst) 289 : "m" (*__src), "a" (__dst), "d" (__src)); 290 291 /* Submission failure is indicated via EFLAGS.ZF=1 */
··· 75 * "rdpkru" instruction. Places PKRU contents in to EAX, 76 * clears EDX and requires that ecx=0. 77 */ 78 + asm volatile("rdpkru" : "=a" (pkru), "=d" (edx) : "c" (ecx)); 79 return pkru; 80 } 81 ··· 89 * "wrpkru" instruction. Loads contents in EAX to PKRU, 90 * requires that ecx = edx = 0. 91 */ 92 + asm volatile("wrpkru" : : "a" (pkru), "c"(ecx), "d"(edx)); 93 } 94 95 #else ··· 287 * See movdir64b()'s comment on operand specification. 288 */ 289 asm volatile(".byte 0xf3, 0x0f, 0x38, 0xf8, 0x02, 0x66, 0x90" 290 + : "=@ccz" (zf), "+m" (*__dst) 291 : "m" (*__src), "a" (__dst), "d" (__src)); 292 293 /* Submission failure is indicated via EFLAGS.ZF=1 */
+3 -4
arch/x86/include/asm/uaccess.h
··· 378 asm_goto_output("\n" \ 379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 380 _ASM_EXTABLE_UA(1b, %l[label]) \ 381 - : CC_OUT(z) (success), \ 382 [ptr] "+m" (*_ptr), \ 383 [old] "+a" (__old) \ 384 : [new] ltype (__new) \ ··· 397 asm_goto_output("\n" \ 398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 399 _ASM_EXTABLE_UA(1b, %l[label]) \ 400 - : CC_OUT(z) (success), \ 401 "+A" (__old), \ 402 [ptr] "+m" (*_ptr) \ 403 : "b" ((u32)__new), \ ··· 417 __typeof__(*(_ptr)) __new = (_new); \ 418 asm volatile("\n" \ 419 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 420 - CC_SET(z) \ 421 "2:\n" \ 422 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 423 %[errout]) \ 424 - : CC_OUT(z) (success), \ 425 [errout] "+r" (__err), \ 426 [ptr] "+m" (*_ptr), \ 427 [old] "+a" (__old) \
··· 378 asm_goto_output("\n" \ 379 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 380 _ASM_EXTABLE_UA(1b, %l[label]) \ 381 + : "=@ccz" (success), \ 382 [ptr] "+m" (*_ptr), \ 383 [old] "+a" (__old) \ 384 : [new] ltype (__new) \ ··· 397 asm_goto_output("\n" \ 398 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ 399 _ASM_EXTABLE_UA(1b, %l[label]) \ 400 + : "=@ccz" (success), \ 401 "+A" (__old), \ 402 [ptr] "+m" (*_ptr) \ 403 : "b" ((u32)__new), \ ··· 417 __typeof__(*(_ptr)) __new = (_new); \ 418 asm volatile("\n" \ 419 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ 420 "2:\n" \ 421 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \ 422 %[errout]) \ 423 + : "=@ccz" (success), \ 424 [errout] "+r" (__err), \ 425 [ptr] "+m" (*_ptr), \ 426 [old] "+a" (__old) \
+1 -14
arch/x86/kernel/cpu/mtrr/cleanup.c
··· 1 /* 2 * MTRR (Memory Type Range Register) cleanup 3 * 4 * Copyright (C) 2009 Yinghai Lu 5 - * 6 - * This library is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU Library General Public 8 - * License as published by the Free Software Foundation; either 9 - * version 2 of the License, or (at your option) any later version. 10 - * 11 - * This library is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 - * Library General Public License for more details. 15 - * 16 - * You should have received a copy of the GNU Library General Public 17 - * License along with this library; if not, write to the Free 18 - * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 #include <linux/init.h> 21 #include <linux/pci.h>
··· 1 + // SPDX-License-Identifier: LGPL-2.0+ 2 /* 3 * MTRR (Memory Type Range Register) cleanup 4 * 5 * Copyright (C) 2009 Yinghai Lu 6 */ 7 #include <linux/init.h> 8 #include <linux/pci.h>
+1 -14
arch/x86/kernel/cpu/mtrr/mtrr.c
··· 1 /* Generic MTRR (Memory Type Range Register) driver. 2 3 Copyright (C) 1997-2000 Richard Gooch 4 Copyright (c) 2002 Patrick Mochel 5 - 6 - This library is free software; you can redistribute it and/or 7 - modify it under the terms of the GNU Library General Public 8 - License as published by the Free Software Foundation; either 9 - version 2 of the License, or (at your option) any later version. 10 - 11 - This library is distributed in the hope that it will be useful, 12 - but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 - Library General Public License for more details. 15 - 16 - You should have received a copy of the GNU Library General Public 17 - License along with this library; if not, write to the Free 18 - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 21 The postal address is:
··· 1 + // SPDX-License-Identifier: LGPL-2.0+ 2 /* Generic MTRR (Memory Type Range Register) driver. 3 4 Copyright (C) 1997-2000 Richard Gooch 5 Copyright (c) 2002 Patrick Mochel 6 7 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 8 The postal address is:
+3 -3
arch/x86/kernel/cpu/sgx/encls.h
··· 68 ({ \ 69 int ret; \ 70 asm volatile( \ 71 - "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ 72 "2:\n" \ 73 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ 74 : "=a"(ret) \ ··· 111 ({ \ 112 int ret; \ 113 asm volatile( \ 114 - "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ 115 - " xor %%eax,%%eax;\n" \ 116 "2:\n" \ 117 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ 118 : "=a"(ret), "=b"(rbx_out) \
··· 68 ({ \ 69 int ret; \ 70 asm volatile( \ 71 + "1: encls\n" \ 72 "2:\n" \ 73 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ 74 : "=a"(ret) \ ··· 111 ({ \ 112 int ret; \ 113 asm volatile( \ 114 + "1: encls\n\t" \ 115 + "xor %%eax,%%eax\n" \ 116 "2:\n" \ 117 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ 118 : "=a"(ret), "=b"(rbx_out) \
-12
tools/arch/x86/include/asm/asm.h
··· 108 109 #endif 110 111 - /* 112 - * Macros to generate condition code outputs from inline assembly, 113 - * The output operand must be type "bool". 114 - */ 115 - #ifdef __GCC_ASM_FLAG_OUTPUTS__ 116 - # define CC_SET(c) "\n\t/* output condition code " #c "*/\n" 117 - # define CC_OUT(c) "=@cc" #c 118 - #else 119 - # define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" 120 - # define CC_OUT(c) [_cc_ ## c] "=qm" 121 - #endif 122 - 123 #ifdef __KERNEL__ 124 125 /* Exception table entry */
··· 108 109 #endif 110 111 #ifdef __KERNEL__ 112 113 /* Exception table entry */
+1 -1
tools/perf/bench/find-bit-bench.c
··· 37 accumulator++; 38 } 39 40 - #if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__) 41 static bool asm_test_bit(long nr, const unsigned long *addr) 42 { 43 bool oldbit;
··· 37 accumulator++; 38 } 39 40 + #if defined(__i386__) || defined(__x86_64__) 41 static bool asm_test_bit(long nr, const unsigned long *addr) 42 { 43 bool oldbit;