Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:

- Prevent an out-of-bounds access in mtrr_write()

- Break a circular dependency in the new hyperv IPI acceleration code

- Address the build breakage related to inline functions by enforcing
gnu_inline and explicitly bringing native_save_fl() out of line,
which also adds a set of _ARM_ARG macros which provide 32/64bit
safety.

- Initialize the shadow CR4 per cpu variable before using it.

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mtrr: Don't copy out-of-bounds data in mtrr_write
x86/hyper-v: Fix the circular dependency in IPI enlightenment
x86/paravirt: Make native_save_fl() extern inline
x86/asm: Add _ASM_ARG* constants for argument registers to <asm/asm.h>
compiler-gcc.h: Add __attribute__((gnu_inline)) to all inline declarations
x86/mm/32: Initialize the CR4 shadow before __flush_tlb_all()

Changed files
+129 -11
arch
include
+5
arch/x86/hyperv/hv_apic.c
··· 114 114 ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; 115 115 nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); 116 116 } 117 + if (nr_bank < 0) 118 + goto ipi_mask_ex_done; 117 119 if (!nr_bank) 118 120 ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; 119 121 ··· 160 158 161 159 for_each_cpu(cur_cpu, mask) { 162 160 vcpu = hv_cpu_number_to_vp_number(cur_cpu); 161 + if (vcpu == VP_INVAL) 162 + goto ipi_mask_done; 163 + 163 164 /* 164 165 * This particular version of the IPI hypercall can 165 166 * only target upto 64 CPUs.
+4 -1
arch/x86/hyperv/hv_init.c
··· 265 265 { 266 266 u64 guest_id, required_msrs; 267 267 union hv_x64_msr_hypercall_contents hypercall_msr; 268 - int cpuhp; 268 + int cpuhp, i; 269 269 270 270 if (x86_hyper_type != X86_HYPER_MS_HYPERV) 271 271 return; ··· 292 292 GFP_KERNEL); 293 293 if (!hv_vp_index) 294 294 return; 295 + 296 + for (i = 0; i < num_possible_cpus(); i++) 297 + hv_vp_index[i] = VP_INVAL; 295 298 296 299 hv_vp_assist_page = kcalloc(num_possible_cpus(), 297 300 sizeof(*hv_vp_assist_page), GFP_KERNEL);
+59
arch/x86/include/asm/asm.h
··· 46 46 #define _ASM_SI __ASM_REG(si) 47 47 #define _ASM_DI __ASM_REG(di) 48 48 49 + #ifndef __x86_64__ 50 + /* 32 bit */ 51 + 52 + #define _ASM_ARG1 _ASM_AX 53 + #define _ASM_ARG2 _ASM_DX 54 + #define _ASM_ARG3 _ASM_CX 55 + 56 + #define _ASM_ARG1L eax 57 + #define _ASM_ARG2L edx 58 + #define _ASM_ARG3L ecx 59 + 60 + #define _ASM_ARG1W ax 61 + #define _ASM_ARG2W dx 62 + #define _ASM_ARG3W cx 63 + 64 + #define _ASM_ARG1B al 65 + #define _ASM_ARG2B dl 66 + #define _ASM_ARG3B cl 67 + 68 + #else 69 + /* 64 bit */ 70 + 71 + #define _ASM_ARG1 _ASM_DI 72 + #define _ASM_ARG2 _ASM_SI 73 + #define _ASM_ARG3 _ASM_DX 74 + #define _ASM_ARG4 _ASM_CX 75 + #define _ASM_ARG5 r8 76 + #define _ASM_ARG6 r9 77 + 78 + #define _ASM_ARG1Q rdi 79 + #define _ASM_ARG2Q rsi 80 + #define _ASM_ARG3Q rdx 81 + #define _ASM_ARG4Q rcx 82 + #define _ASM_ARG5Q r8 83 + #define _ASM_ARG6Q r9 84 + 85 + #define _ASM_ARG1L edi 86 + #define _ASM_ARG2L esi 87 + #define _ASM_ARG3L edx 88 + #define _ASM_ARG4L ecx 89 + #define _ASM_ARG5L r8d 90 + #define _ASM_ARG6L r9d 91 + 92 + #define _ASM_ARG1W di 93 + #define _ASM_ARG2W si 94 + #define _ASM_ARG3W dx 95 + #define _ASM_ARG4W cx 96 + #define _ASM_ARG5W r8w 97 + #define _ASM_ARG6W r9w 98 + 99 + #define _ASM_ARG1B dil 100 + #define _ASM_ARG2B sil 101 + #define _ASM_ARG3B dl 102 + #define _ASM_ARG4B cl 103 + #define _ASM_ARG5B r8b 104 + #define _ASM_ARG6B r9b 105 + 106 + #endif 107 + 49 108 /* 50 109 * Macros to generate condition code outputs from inline assembly, 51 110 * The output operand must be type "bool".
+1 -1
arch/x86/include/asm/irqflags.h
··· 13 13 * Interrupt control: 14 14 */ 15 15 16 - static inline unsigned long native_save_fl(void) 16 + extern inline unsigned long native_save_fl(void) 17 17 { 18 18 unsigned long flags; 19 19
+4 -1
arch/x86/include/asm/mshyperv.h
··· 9 9 #include <asm/hyperv-tlfs.h> 10 10 #include <asm/nospec-branch.h> 11 11 12 + #define VP_INVAL U32_MAX 13 + 12 14 struct ms_hyperv_info { 13 15 u32 features; 14 16 u32 misc_features; ··· 21 19 }; 22 20 23 21 extern struct ms_hyperv_info ms_hyperv; 24 - 25 22 26 23 /* 27 24 * Generate the guest ID. ··· 282 281 */ 283 282 for_each_cpu(cpu, cpus) { 284 283 vcpu = hv_cpu_number_to_vp_number(cpu); 284 + if (vcpu == VP_INVAL) 285 + return -1; 285 286 vcpu_bank = vcpu / 64; 286 287 vcpu_offset = vcpu % 64; 287 288 __set_bit(vcpu_offset, (unsigned long *)
+1
arch/x86/kernel/Makefile
··· 61 61 obj-y += tsc.o tsc_msr.o io_delay.o rtc.o 62 62 obj-y += pci-iommu_table.o 63 63 obj-y += resource.o 64 + obj-y += irqflags.o 64 65 65 66 obj-y += process.o 66 67 obj-y += fpu/
+2 -1
arch/x86/kernel/cpu/mtrr/if.c
··· 106 106 107 107 memset(line, 0, LINE_SIZE); 108 108 109 - length = strncpy_from_user(line, buf, LINE_SIZE - 1); 109 + len = min_t(size_t, len, LINE_SIZE - 1); 110 + length = strncpy_from_user(line, buf, len); 110 111 if (length < 0) 111 112 return length; 112 113
+26
arch/x86/kernel/irqflags.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <asm/asm.h> 4 + #include <asm/export.h> 5 + #include <linux/linkage.h> 6 + 7 + /* 8 + * unsigned long native_save_fl(void) 9 + */ 10 + ENTRY(native_save_fl) 11 + pushf 12 + pop %_ASM_AX 13 + ret 14 + ENDPROC(native_save_fl) 15 + EXPORT_SYMBOL(native_save_fl) 16 + 17 + /* 18 + * void native_restore_fl(unsigned long flags) 19 + * %eax/%rdi: flags 20 + */ 21 + ENTRY(native_restore_fl) 22 + push %_ASM_ARG1 23 + popf 24 + ret 25 + ENDPROC(native_restore_fl) 26 + EXPORT_SYMBOL(native_restore_fl)
+5
arch/x86/kernel/smpboot.c
··· 221 221 #ifdef CONFIG_X86_32 222 222 /* switch away from the initial page table */ 223 223 load_cr3(swapper_pg_dir); 224 + /* 225 + * Initialize the CR4 shadow before doing anything that could 226 + * try to read it. 227 + */ 228 + cr4_init_shadow(); 224 229 __flush_tlb_all(); 225 230 #endif 226 231 load_current_idt();
+22 -7
include/linux/compiler-gcc.h
··· 66 66 #endif 67 67 68 68 /* 69 + * Feature detection for gnu_inline (gnu89 extern inline semantics). Either 70 + * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics, 71 + * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not 72 + * defined so the gnu89 semantics are the default. 73 + */ 74 + #ifdef __GNUC_STDC_INLINE__ 75 + # define __gnu_inline __attribute__((gnu_inline)) 76 + #else 77 + # define __gnu_inline 78 + #endif 79 + 80 + /* 69 81 * Force always-inline if the user requests it so via the .config, 70 82 * or if gcc is too old. 71 83 * GCC does not warn about unused static inline functions for 72 84 * -Wunused-function. This turns out to avoid the need for complex #ifdef 73 85 * directives. Suppress the warning in clang as well by using "unused" 74 86 * function attribute, which is redundant but not harmful for gcc. 87 + * Prefer gnu_inline, so that extern inline functions do not emit an 88 + * externally visible function. This makes extern inline behave as per gnu89 89 + * semantics rather than c99. This prevents multiple symbol definition errors 90 + * of extern inline functions at link time. 91 + * A lot of inline functions can cause havoc with function tracing. 75 92 */ 76 93 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ 77 94 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) 78 - #define inline inline __attribute__((always_inline,unused)) notrace 79 - #define __inline__ __inline__ __attribute__((always_inline,unused)) notrace 80 - #define __inline __inline __attribute__((always_inline,unused)) notrace 95 + #define inline \ 96 + inline __attribute__((always_inline, unused)) notrace __gnu_inline 81 97 #else 82 - /* A lot of inline functions can cause havoc with function tracing */ 83 - #define inline inline __attribute__((unused)) notrace 84 - #define __inline__ __inline__ __attribute__((unused)) notrace 85 - #define __inline __inline __attribute__((unused)) notrace 98 + #define inline inline __attribute__((unused)) notrace __gnu_inline 86 99 #endif 87 100 101 + #define __inline__ inline 102 + #define __inline inline 88 103 #define __always_inline inline __attribute__((always_inline)) 89 104 #define noinline __attribute__((noinline)) 90 105