x86/sev: Expose sev_es_ghcb_hv_call() for use by HyperV

Hyper-V needs to issue the GHCB HV call in order to read/write MSRs in
Isolation VMs. For that, expose sev_es_ghcb_hv_call().

The Hyper-V Isolation VMs are unenlightened guests and run a paravisor
at VMPL0 for communicating. GHCB pages are being allocated and set up
by that paravisor. Linux gets the GHCB page's physical address via
MSR_AMD64_SEV_ES_GHCB from the paravisor and should not change it.

Add a @set_ghcb_msr parameter to sev_es_ghcb_hv_call() to control
whether the function should set the GHCB's address prior to the call or
not and export that function for use by HyperV.

[ bp: - Massage commit message
- add a struct ghcb forward declaration to fix randconfig builds. ]

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20211025122116.264793-6-ltykernel@gmail.com

authored by Tianyu Lan and committed by Borislav Petkov 007faec0 ce47d0c0

+29 -15
+6
arch/x86/include/asm/sev.h
··· 53 53 54 54 struct real_mode_header; 55 55 enum stack_type; 56 + struct ghcb; 56 57 57 58 /* Early IDT entry points for #VC handler */ 58 59 extern void vc_no_ghcb(void); ··· 82 81 __sev_es_nmi_complete(); 83 82 } 84 83 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); 84 + extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 85 + bool set_ghcb_msr, 86 + struct es_em_ctxt *ctxt, 87 + u64 exit_code, u64 exit_info_1, 88 + u64 exit_info_2); 85 89 #else 86 90 static inline void sev_es_ist_enter(struct pt_regs *regs) { } 87 91 static inline void sev_es_ist_exit(void) { }
+16 -9
arch/x86/kernel/sev-shared.c
··· 125 125 return ES_VMM_ERROR; 126 126 } 127 127 128 - static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 129 - struct es_em_ctxt *ctxt, 130 - u64 exit_code, u64 exit_info_1, 131 - u64 exit_info_2) 128 + enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr, 129 + struct es_em_ctxt *ctxt, u64 exit_code, 130 + u64 exit_info_1, u64 exit_info_2) 132 131 { 133 132 /* Fill in protocol and format specifiers */ 134 133 ghcb->protocol_version = GHCB_PROTOCOL_MAX; ··· 137 138 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 138 139 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 139 140 140 - sev_es_wr_ghcb_msr(__pa(ghcb)); 141 + /* 142 + * Hyper-V unenlightened guests use a paravisor for communicating and 143 + * GHCB pages are being allocated and set up by that paravisor. Linux 144 + * should not change the GHCB page's physical address. 145 + */ 146 + if (set_ghcb_msr) 147 + sev_es_wr_ghcb_msr(__pa(ghcb)); 148 + 141 149 VMGEXIT(); 142 150 143 151 return verify_exception_info(ghcb, ctxt); ··· 424 418 */ 425 419 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); 426 420 ghcb_set_sw_scratch(ghcb, sw_scratch); 427 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, 421 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO, 428 422 exit_info_1, exit_info_2); 429 423 if (ret != ES_OK) 430 424 return ret; ··· 466 460 467 461 ghcb_set_rax(ghcb, rax); 468 462 469 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); 463 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, 464 + SVM_EXIT_IOIO, exit_info_1, 0); 470 465 if (ret != ES_OK) 471 466 return ret; 472 467 ··· 498 491 /* xgetbv will cause #GP - use reset value for xcr0 */ 499 492 ghcb_set_xcr0(ghcb, 1); 500 493 501 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); 494 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0); 502 495 if (ret != ES_OK) 503 496 return ret; 504 497 ··· 523 516 bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); 524 517 enum es_result ret; 525 518 526 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); 519 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0); 527 520 if (ret != ES_OK) 528 521 return ret; 529 522
+7 -6
arch/x86/kernel/sev.c
··· 648 648 ghcb_set_rdx(ghcb, regs->dx); 649 649 } 650 650 651 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); 651 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR, 652 + exit_info_1, 0); 652 653 653 654 if ((ret == ES_OK) && (!exit_info_1)) { 654 655 regs->ax = ghcb->save.rax; ··· 868 867 869 868 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); 870 869 871 - return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); 870 + return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2); 872 871 } 873 872 874 873 static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb, ··· 1118 1117 1119 1118 /* Using a value of 0 for ExitInfo1 means RAX holds the value */ 1120 1119 ghcb_set_rax(ghcb, val); 1121 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1120 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1122 1121 if (ret != ES_OK) 1123 1122 return ret; 1124 1123 ··· 1148 1147 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, 1149 1148 struct es_em_ctxt *ctxt) 1150 1149 { 1151 - return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); 1150 + return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0); 1152 1151 } 1153 1152 1154 1153 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) ··· 1157 1156 1158 1157 ghcb_set_rcx(ghcb, ctxt->regs->cx); 1159 1158 1160 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); 1159 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0); 1161 1160 if (ret != ES_OK) 1162 1161 return ret; 1163 1162 ··· 1198 1197 if (x86_platform.hyper.sev_es_hcall_prepare) 1199 1198 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); 1200 1199 1201 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1200 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1202 1201 if (ret != ES_OK) 1203 1202 return ret; 1204 1203