Merge tag 'x86_urgent_for_v6.9_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Fix CPU feature dependencies of GFNI, VAES, and VPCLMULQDQ

- Print the correct error code when FRED reports a bad event type

- Add a FRED-specific INT80 handler without the special dances that
need to happen in the current one

- Enable the using-the-default-return-thunk-but-you-should-not warning
only on configs which actually enable those special return thunks

- Check the proper feature flags when selecting BHI retpoline
mitigation

* tag 'x86_urgent_for_v6.9_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpufeatures: Fix dependencies for GFNI, VAES, and VPCLMULQDQ
x86/fred: Fix incorrect error code printout in fred_bad_type()
x86/fred: Fix INT80 emulation for FRED
x86/retpolines: Enable the default thunk warning only on relevant configs
x86/bugs: Fix BHI retpoline check

+87 -12
+65
arch/x86/entry/common.c
··· 255 instrumentation_end(); 256 syscall_exit_to_user_mode(regs); 257 } 258 #else /* CONFIG_IA32_EMULATION */ 259 260 /* Handles int $0x80 on a 32bit kernel */
··· 255 instrumentation_end(); 256 syscall_exit_to_user_mode(regs); 257 } 258 + 259 + #ifdef CONFIG_X86_FRED 260 + /* 261 + * A FRED-specific INT80 handler is warranted for the follwing reasons: 262 + * 263 + * 1) As INT instructions and hardware interrupts are separate event 264 + * types, FRED does not preclude the use of vector 0x80 for external 265 + * interrupts. As a result, the FRED setup code does not reserve 266 + * vector 0x80 and calling int80_is_external() is not merely 267 + * suboptimal but actively incorrect: it could cause a system call 268 + * to be incorrectly ignored. 269 + * 270 + * 2) It is called only for handling vector 0x80 of event type 271 + * EVENT_TYPE_SWINT and will never be called to handle any external 272 + * interrupt (event type EVENT_TYPE_EXTINT). 273 + * 274 + * 3) FRED has separate entry flows depending on if the event came from 275 + * user space or kernel space, and because the kernel does not use 276 + * INT insns, the FRED kernel entry handler fred_entry_from_kernel() 277 + * falls through to fred_bad_type() if the event type is 278 + * EVENT_TYPE_SWINT, i.e., INT insns. So if the kernel is handling 279 + * an INT insn, it can only be from a user level. 280 + * 281 + * 4) int80_emulation() does a CLEAR_BRANCH_HISTORY. While FRED will 282 + * likely take a different approach if it is ever needed: it 283 + * probably belongs in either fred_intx()/ fred_other() or 284 + * asm_fred_entrypoint_user(), depending on if this ought to be done 285 + * for all entries from userspace or only system 286 + * calls. 287 + * 288 + * 5) INT $0x80 is the fast path for 32-bit system calls under FRED. 289 + */ 290 + DEFINE_FREDENTRY_RAW(int80_emulation) 291 + { 292 + int nr; 293 + 294 + enter_from_user_mode(regs); 295 + 296 + instrumentation_begin(); 297 + add_random_kstack_offset(); 298 + 299 + /* 300 + * FRED pushed 0 into regs::orig_ax and regs::ax contains the 301 + * syscall number. 302 + * 303 + * User tracing code (ptrace or signal handlers) might assume 304 + * that the regs::orig_ax contains a 32-bit number on invoking 305 + * a 32-bit syscall. 306 + * 307 + * Establish the syscall convention by saving the 32bit truncated 308 + * syscall number in regs::orig_ax and by invalidating regs::ax. 309 + */ 310 + regs->orig_ax = regs->ax & GENMASK(31, 0); 311 + regs->ax = -ENOSYS; 312 + 313 + nr = syscall_32_enter(regs); 314 + 315 + local_irq_enable(); 316 + nr = syscall_enter_from_user_mode_work(regs, nr); 317 + do_syscall_32_irqs_on(regs, nr); 318 + 319 + instrumentation_end(); 320 + syscall_exit_to_user_mode(regs); 321 + } 322 + #endif 323 #else /* CONFIG_IA32_EMULATION */ 324 325 /* Handles int $0x80 on a 32bit kernel */
+5 -5
arch/x86/entry/entry_fred.c
··· 28 if (regs->fred_cs.sl > 0) { 29 pr_emerg("PANIC: invalid or fatal FRED event; event type %u " 30 "vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n", 31 - regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax, 32 fred_event_data(regs), regs->cs, regs->ip); 33 - die("invalid or fatal FRED event", regs, regs->orig_ax); 34 panic("invalid or fatal FRED event"); 35 } else { 36 unsigned long flags = oops_begin(); ··· 38 39 pr_alert("BUG: invalid or fatal FRED event; event type %u " 40 "vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n", 41 - regs->fred_ss.type, regs->fred_ss.vector, regs->orig_ax, 42 fred_event_data(regs), regs->cs, regs->ip); 43 44 - if (__die("Invalid or fatal FRED event", regs, regs->orig_ax)) 45 sig = 0; 46 47 oops_end(flags, regs, sig); ··· 66 /* INT80 */ 67 case IA32_SYSCALL_VECTOR: 68 if (ia32_enabled()) 69 - return int80_emulation(regs); 70 fallthrough; 71 #endif 72
··· 28 if (regs->fred_cs.sl > 0) { 29 pr_emerg("PANIC: invalid or fatal FRED event; event type %u " 30 "vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n", 31 + regs->fred_ss.type, regs->fred_ss.vector, error_code, 32 fred_event_data(regs), regs->cs, regs->ip); 33 + die("invalid or fatal FRED event", regs, error_code); 34 panic("invalid or fatal FRED event"); 35 } else { 36 unsigned long flags = oops_begin(); ··· 38 39 pr_alert("BUG: invalid or fatal FRED event; event type %u " 40 "vector %u error 0x%lx aux 0x%lx at %04x:%016lx\n", 41 + regs->fred_ss.type, regs->fred_ss.vector, error_code, 42 fred_event_data(regs), regs->cs, regs->ip); 43 44 + if (__die("Invalid or fatal FRED event", regs, error_code)) 45 sig = 0; 46 47 oops_end(flags, regs, sig); ··· 66 /* INT80 */ 67 case IA32_SYSCALL_VECTOR: 68 if (ia32_enabled()) 69 + return fred_int80_emulation(regs); 70 fallthrough; 71 #endif 72
+7 -4
arch/x86/kernel/cpu/bugs.c
··· 1652 return; 1653 1654 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ 1655 - if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { 1656 spec_ctrl_disable_kernel_rrsba(); 1657 if (rrsba_disabled) 1658 return; ··· 2805 { 2806 if (!boot_cpu_has_bug(X86_BUG_BHI)) 2807 return "; BHI: Not affected"; 2808 - else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) 2809 return "; BHI: BHI_DIS_S"; 2810 - else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) 2811 return "; BHI: SW loop, KVM: SW loop"; 2812 - else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled) 2813 return "; BHI: Retpoline"; 2814 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) 2815 return "; BHI: Vulnerable, KVM: SW loop";
··· 1652 return; 1653 1654 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ 1655 + if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 1656 + !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { 1657 spec_ctrl_disable_kernel_rrsba(); 1658 if (rrsba_disabled) 1659 return; ··· 2804 { 2805 if (!boot_cpu_has_bug(X86_BUG_BHI)) 2806 return "; BHI: Not affected"; 2807 + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) 2808 return "; BHI: BHI_DIS_S"; 2809 + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) 2810 return "; BHI: SW loop, KVM: SW loop"; 2811 + else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && 2812 + !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && 2813 + rrsba_disabled) 2814 return "; BHI: Retpoline"; 2815 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) 2816 return "; BHI: Vulnerable, KVM: SW loop";
+3 -3
arch/x86/kernel/cpu/cpuid-deps.c
··· 44 { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, 45 { X86_FEATURE_AES, X86_FEATURE_XMM2 }, 46 { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, 47 { X86_FEATURE_FMA, X86_FEATURE_AVX }, 48 { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, 49 { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, 50 { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, ··· 59 { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, 60 { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, 61 { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, 62 - { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL }, 63 - { X86_FEATURE_VAES, X86_FEATURE_AVX512VL }, 64 - { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL }, 65 { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, 66 { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, 67 { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
··· 44 { X86_FEATURE_F16C, X86_FEATURE_XMM2, }, 45 { X86_FEATURE_AES, X86_FEATURE_XMM2 }, 46 { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, 47 + { X86_FEATURE_GFNI, X86_FEATURE_XMM2 }, 48 { X86_FEATURE_FMA, X86_FEATURE_AVX }, 49 + { X86_FEATURE_VAES, X86_FEATURE_AVX }, 50 + { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX }, 51 { X86_FEATURE_AVX2, X86_FEATURE_AVX, }, 52 { X86_FEATURE_AVX512F, X86_FEATURE_AVX, }, 53 { X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F }, ··· 56 { X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F }, 57 { X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F }, 58 { X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL }, 59 { X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL }, 60 { X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL }, 61 { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
+7
arch/x86/lib/retpoline.S
··· 382 SYM_CODE_START(__x86_return_thunk) 383 UNWIND_HINT_FUNC 384 ANNOTATE_NOENDBR 385 ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \ 386 "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS 387 int3 388 SYM_CODE_END(__x86_return_thunk) 389 EXPORT_SYMBOL(__x86_return_thunk)
··· 382 SYM_CODE_START(__x86_return_thunk) 383 UNWIND_HINT_FUNC 384 ANNOTATE_NOENDBR 385 + #if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \ 386 + defined(CONFIG_MITIGATION_SRSO) || \ 387 + defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) 388 ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \ 389 "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS 390 + #else 391 + ANNOTATE_UNRET_SAFE 392 + ret 393 + #endif 394 int3 395 SYM_CODE_END(__x86_return_thunk) 396 EXPORT_SYMBOL(__x86_return_thunk)