Merge tag 'x86_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Update the 'mitigations=' kernel param documentation

- Check the IBPB feature flag before enabling IBPB in firmware calls
because cloud vendors' fantasy when it comes to creating guest
configurations is unlimited

- Unexport sev_es_ghcb_hv_call() before 5.19 releases now that HyperV
doesn't need it anymore

- Remove dead CONFIG_* items

* tag 'x86_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
docs/kernel-parameters: Update descriptions for "mitigations=" param with retbleed
x86/bugs: Do not enable IBPB at firmware entry when IBPB is not available
Revert "x86/sev: Expose sev_es_ghcb_hv_call() for use by HyperV"
x86/configs: Update configs in x86_debug.config

+22 -33
+2
Documentation/admin-guide/kernel-parameters.txt
··· 3176 3176 no_entry_flush [PPC] 3177 3177 no_uaccess_flush [PPC] 3178 3178 mmio_stale_data=off [X86] 3179 + retbleed=off [X86] 3179 3180 3180 3181 Exceptions: 3181 3182 This does not have any effect on ··· 3199 3198 mds=full,nosmt [X86] 3200 3199 tsx_async_abort=full,nosmt [X86] 3201 3200 mmio_stale_data=full,nosmt [X86] 3201 + retbleed=auto,nosmt [X86] 3202 3202 3203 3203 mminit_loglevel= 3204 3204 [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
+1 -6
arch/x86/include/asm/sev.h
··· 72 72 73 73 struct real_mode_header; 74 74 enum stack_type; 75 - struct ghcb; 76 75 77 76 /* Early IDT entry points for #VC handler */ 78 77 extern void vc_no_ghcb(void); ··· 155 156 __sev_es_nmi_complete(); 156 157 } 157 158 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); 158 - extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 159 - bool set_ghcb_msr, 160 - struct es_em_ctxt *ctxt, 161 - u64 exit_code, u64 exit_info_1, 162 - u64 exit_info_2); 159 + 163 160 static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) 164 161 { 165 162 int rc;
+1
arch/x86/kernel/cpu/bugs.c
··· 1520 1520 * enable IBRS around firmware calls. 1521 1521 */ 1522 1522 if (boot_cpu_has_bug(X86_BUG_RETBLEED) && 1523 + boot_cpu_has(X86_FEATURE_IBPB) && 1523 1524 (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1524 1525 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { 1525 1526
+9 -16
arch/x86/kernel/sev-shared.c
··· 219 219 return ES_VMM_ERROR; 220 220 } 221 221 222 - enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr, 223 - struct es_em_ctxt *ctxt, u64 exit_code, 224 - u64 exit_info_1, u64 exit_info_2) 222 + static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 223 + struct es_em_ctxt *ctxt, 224 + u64 exit_code, u64 exit_info_1, 225 + u64 exit_info_2) 225 226 { 226 227 /* Fill in protocol and format specifiers */ 227 228 ghcb->protocol_version = ghcb_version; ··· 232 231 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 233 232 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 234 233 235 - /* 236 - * Hyper-V unenlightened guests use a paravisor for communicating and 237 - * GHCB pages are being allocated and set up by that paravisor. Linux 238 - * should not change the GHCB page's physical address. 239 - */ 240 - if (set_ghcb_msr) 241 - sev_es_wr_ghcb_msr(__pa(ghcb)); 242 - 234 + sev_es_wr_ghcb_msr(__pa(ghcb)); 243 235 VMGEXIT(); 244 236 245 237 return verify_exception_info(ghcb, ctxt); ··· 789 795 */ 790 796 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); 791 797 ghcb_set_sw_scratch(ghcb, sw_scratch); 792 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO, 798 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, 793 799 exit_info_1, exit_info_2); 794 800 if (ret != ES_OK) 795 801 return ret; ··· 831 837 832 838 ghcb_set_rax(ghcb, rax); 833 839 834 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, 835 - SVM_EXIT_IOIO, exit_info_1, 0); 840 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); 836 841 if (ret != ES_OK) 837 842 return ret; 838 843 ··· 887 894 /* xgetbv will cause #GP - use reset value for xcr0 */ 888 895 ghcb_set_xcr0(ghcb, 1); 889 896 890 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0); 897 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); 891 898 if (ret != ES_OK) 892 899 return ret; 893 900 ··· 912 919 bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); 913 920 enum es_result ret; 914 921 915 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0); 922 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); 916 923 if (ret != ES_OK) 917 924 return ret; 918 925
+8 -9
arch/x86/kernel/sev.c
··· 786 786 ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); 787 787 788 788 /* This will advance the shared buffer data points to. */ 789 - ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0); 789 + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0); 790 790 791 791 /* 792 792 * Page State Change VMGEXIT can pass error code through ··· 1212 1212 ghcb_set_rdx(ghcb, regs->dx); 1213 1213 } 1214 1214 1215 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR, 1216 - exit_info_1, 0); 1215 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); 1217 1216 1218 1217 if ((ret == ES_OK) && (!exit_info_1)) { 1219 1218 regs->ax = ghcb->save.rax; ··· 1451 1452 1452 1453 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); 1453 1454 1454 - return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2); 1455 + return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); 1455 1456 } 1456 1457 1457 1458 /* ··· 1627 1628 1628 1629 /* Using a value of 0 for ExitInfo1 means RAX holds the value */ 1629 1630 ghcb_set_rax(ghcb, val); 1630 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1631 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1631 1632 if (ret != ES_OK) 1632 1633 return ret; 1633 1634 ··· 1657 1658 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, 1658 1659 struct es_em_ctxt *ctxt) 1659 1660 { 1660 - return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0); 1661 + return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); 1661 1662 } 1662 1663 1663 1664 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) ··· 1666 1667 1667 1668 ghcb_set_rcx(ghcb, ctxt->regs->cx); 1668 1669 1669 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0); 1670 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); 1670 1671 if (ret != ES_OK) 1671 1672 return ret; 1672 1673 ··· 1707 1708 if (x86_platform.hyper.sev_es_hcall_prepare) 1708 1709 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); 1709 1710 1710 - ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1711 + ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1711 1712 if (ret != ES_OK) 1712 1713 return ret; 1713 1714 ··· 2196 2197 ghcb_set_rbx(ghcb, input->data_npages); 2197 2198 } 2198 2199 2199 - ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, exit_code, input->req_gpa, input->resp_gpa); 2200 + ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa); 2200 2201 if (ret) 2201 2202 goto e_put; 2202 2203
+1 -2
kernel/configs/x86_debug.config
··· 7 7 CONFIG_DEBUG_KMEMLEAK=y 8 8 CONFIG_DEBUG_PAGEALLOC=y 9 9 CONFIG_SLUB_DEBUG_ON=y 10 - CONFIG_KMEMCHECK=y 11 10 CONFIG_DEBUG_OBJECTS=y 12 11 CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 13 12 CONFIG_GCOV_KERNEL=y 14 13 CONFIG_LOCKDEP=y 15 14 CONFIG_PROVE_LOCKING=y 16 15 CONFIG_SCHEDSTATS=y 17 - CONFIG_VMLINUX_VALIDATION=y 16 + CONFIG_NOINSTR_VALIDATION=y 18 17 CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y