Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm: Cleanup prefetch primitives

This is based on a patch originally by hpa.

With the current improvements to the alternatives, we can simply use %P1
as a mem8 operand constraint and rely on the toolchain to generate the
proper instruction sizes. For example, on 32-bit, where we use an empty
old instruction we get:

apply_alternatives: feat: 6*32+8, old: (c104648b, len: 4), repl: (c195566c, len: 4)
c104648b: alt_insn: 90 90 90 90
c195566c: rpl_insn: 0f 0d 4b 5c

...

apply_alternatives: feat: 6*32+8, old: (c18e09b4, len: 3), repl: (c1955948, len: 3)
c18e09b4: alt_insn: 90 90 90
c1955948: rpl_insn: 0f 0d 08

...

apply_alternatives: feat: 6*32+8, old: (c1190cf9, len: 7), repl: (c1955a79, len: 7)
c1190cf9: alt_insn: 90 90 90 90 90 90 90
c1955a79: rpl_insn: 0f 0d 0d a0 d4 85 c1

all with the proper padding done depending on the size of the
replacement instruction the compiler generates.

Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: H. Peter Anvin <hpa@linux.intel.com>

+13 -10
+1 -1
arch/x86/include/asm/apic.h
··· 91 91 { 92 92 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); 93 93 94 - alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, 94 + alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP, 95 95 ASM_OUTPUT2("=r" (v), "=m" (*addr)), 96 96 ASM_OUTPUT2("0" (v), "m" (*addr))); 97 97 }
+7 -9
arch/x86/include/asm/processor.h
··· 761 761 #define ARCH_HAS_SPINLOCK_PREFETCH 762 762 763 763 #ifdef CONFIG_X86_32 764 - # define BASE_PREFETCH ASM_NOP4 764 + # define BASE_PREFETCH "" 765 765 # define ARCH_HAS_PREFETCH 766 766 #else 767 - # define BASE_PREFETCH "prefetcht0 (%1)" 767 + # define BASE_PREFETCH "prefetcht0 %P1" 768 768 #endif 769 769 770 770 /* ··· 775 775 */ 776 776 static inline void prefetch(const void *x) 777 777 { 778 - alternative_input(BASE_PREFETCH, 779 - "prefetchnta (%1)", 778 + alternative_input(BASE_PREFETCH, "prefetchnta %P1", 780 779 X86_FEATURE_XMM, 781 - "r" (x)); 780 + "m" (*(const char *)x)); 782 781 } 783 782 784 783 /* ··· 787 788 */ 788 789 static inline void prefetchw(const void *x) 789 790 { 790 - alternative_input(BASE_PREFETCH, 791 - "prefetchw (%1)", 792 - X86_FEATURE_3DNOW, 793 - "r" (x)); 791 + alternative_input(BASE_PREFETCH, "prefetchw %P1", 792 + X86_FEATURE_3DNOWPREFETCH, 793 + "m" (*(const char *)x)); 794 794 } 795 795 796 796 static inline void spin_lock_prefetch(const void *x)
+5
arch/x86/kernel/cpu/amd.c
··· 711 711 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); 712 712 713 713 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 714 + 715 + /* 3DNow or LM implies PREFETCHW */ 716 + if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) 717 + if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 718 + set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 714 719 } 715 720 716 721 #ifdef CONFIG_X86_32