x86/sev: Expose sev_es_ghcb_hv_call() for use by HyperV

Hyper-V needs to issue the GHCB HV call in order to read/write MSRs in
Isolation VMs. For that, expose sev_es_ghcb_hv_call().

The Hyper-V Isolation VMs are unenlightened guests and run a paravisor
at VMPL0 for communicating. GHCB pages are being allocated and set up
by that paravisor. Linux gets the GHCB page's physical address via
MSR_AMD64_SEV_ES_GHCB from the paravisor and should not change it.

Add a @set_ghcb_msr parameter to sev_es_ghcb_hv_call() to control
whether the function should set the GHCB's address prior to the call or
not and export that function for use by HyperV.

[ bp: - Massage commit message
- add a struct ghcb forward declaration to fix randconfig builds. ]

Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20211025122116.264793-6-ltykernel@gmail.com

authored by Tianyu Lan and committed by Borislav Petkov 007faec0 ce47d0c0

+29 -15
+6
arch/x86/include/asm/sev.h
··· 53 54 struct real_mode_header; 55 enum stack_type; 56 57 /* Early IDT entry points for #VC handler */ 58 extern void vc_no_ghcb(void); ··· 82 __sev_es_nmi_complete(); 83 } 84 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); 85 #else 86 static inline void sev_es_ist_enter(struct pt_regs *regs) { } 87 static inline void sev_es_ist_exit(void) { }
··· 53 54 struct real_mode_header; 55 enum stack_type; 56 + struct ghcb; 57 58 /* Early IDT entry points for #VC handler */ 59 extern void vc_no_ghcb(void); ··· 81 __sev_es_nmi_complete(); 82 } 83 extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); 84 + extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 85 + bool set_ghcb_msr, 86 + struct es_em_ctxt *ctxt, 87 + u64 exit_code, u64 exit_info_1, 88 + u64 exit_info_2); 89 #else 90 static inline void sev_es_ist_enter(struct pt_regs *regs) { } 91 static inline void sev_es_ist_exit(void) { }
+16 -9
arch/x86/kernel/sev-shared.c
··· 125 return ES_VMM_ERROR; 126 } 127 128 - static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, 129 - struct es_em_ctxt *ctxt, 130 - u64 exit_code, u64 exit_info_1, 131 - u64 exit_info_2) 132 { 133 /* Fill in protocol and format specifiers */ 134 ghcb->protocol_version = GHCB_PROTOCOL_MAX; ··· 137 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 138 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 139 140 - sev_es_wr_ghcb_msr(__pa(ghcb)); 141 VMGEXIT(); 142 143 return verify_exception_info(ghcb, ctxt); ··· 424 */ 425 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); 426 ghcb_set_sw_scratch(ghcb, sw_scratch); 427 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, 428 exit_info_1, exit_info_2); 429 if (ret != ES_OK) 430 return ret; ··· 466 467 ghcb_set_rax(ghcb, rax); 468 469 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0); 470 if (ret != ES_OK) 471 return ret; 472 ··· 498 /* xgetbv will cause #GP - use reset value for xcr0 */ 499 ghcb_set_xcr0(ghcb, 1); 500 501 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0); 502 if (ret != ES_OK) 503 return ret; 504 ··· 523 bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); 524 enum es_result ret; 525 526 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0); 527 if (ret != ES_OK) 528 return ret; 529
··· 125 return ES_VMM_ERROR; 126 } 127 128 + enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr, 129 + struct es_em_ctxt *ctxt, u64 exit_code, 130 + u64 exit_info_1, u64 exit_info_2) 131 { 132 /* Fill in protocol and format specifiers */ 133 ghcb->protocol_version = GHCB_PROTOCOL_MAX; ··· 138 ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 139 ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 140 141 + /* 142 + * Hyper-V unenlightened guests use a paravisor for communicating and 143 + * GHCB pages are being allocated and set up by that paravisor. Linux 144 + * should not change the GHCB page's physical address. 145 + */ 146 + if (set_ghcb_msr) 147 + sev_es_wr_ghcb_msr(__pa(ghcb)); 148 + 149 VMGEXIT(); 150 151 return verify_exception_info(ghcb, ctxt); ··· 418 */ 419 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer); 420 ghcb_set_sw_scratch(ghcb, sw_scratch); 421 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO, 422 exit_info_1, exit_info_2); 423 if (ret != ES_OK) 424 return ret; ··· 460 461 ghcb_set_rax(ghcb, rax); 462 463 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, 464 + SVM_EXIT_IOIO, exit_info_1, 0); 465 if (ret != ES_OK) 466 return ret; 467 ··· 491 /* xgetbv will cause #GP - use reset value for xcr0 */ 492 ghcb_set_xcr0(ghcb, 1); 493 494 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0); 495 if (ret != ES_OK) 496 return ret; 497 ··· 516 bool rdtscp = (exit_code == SVM_EXIT_RDTSCP); 517 enum es_result ret; 518 519 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0); 520 if (ret != ES_OK) 521 return ret; 522
+7 -6
arch/x86/kernel/sev.c
··· 648 ghcb_set_rdx(ghcb, regs->dx); 649 } 650 651 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0); 652 653 if ((ret == ES_OK) && (!exit_info_1)) { 654 regs->ax = ghcb->save.rax; ··· 868 869 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); 870 871 - return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2); 872 } 873 874 static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb, ··· 1118 1119 /* Using a value of 0 for ExitInfo1 means RAX holds the value */ 1120 ghcb_set_rax(ghcb, val); 1121 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1122 if (ret != ES_OK) 1123 return ret; 1124 ··· 1148 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, 1149 struct es_em_ctxt *ctxt) 1150 { 1151 - return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0); 1152 } 1153 1154 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) ··· 1157 1158 ghcb_set_rcx(ghcb, ctxt->regs->cx); 1159 1160 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0); 1161 if (ret != ES_OK) 1162 return ret; 1163 ··· 1198 if (x86_platform.hyper.sev_es_hcall_prepare) 1199 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); 1200 1201 - ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1202 if (ret != ES_OK) 1203 return ret; 1204
··· 648 ghcb_set_rdx(ghcb, regs->dx); 649 } 650 651 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR, 652 + exit_info_1, 0); 653 654 if ((ret == ES_OK) && (!exit_info_1)) { 655 regs->ax = ghcb->save.rax; ··· 867 868 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer)); 869 870 + return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2); 871 } 872 873 static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb, ··· 1117 1118 /* Using a value of 0 for ExitInfo1 means RAX holds the value */ 1119 ghcb_set_rax(ghcb, val); 1120 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0); 1121 if (ret != ES_OK) 1122 return ret; 1123 ··· 1147 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb, 1148 struct es_em_ctxt *ctxt) 1149 { 1150 + return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0); 1151 } 1152 1153 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt) ··· 1156 1157 ghcb_set_rcx(ghcb, ctxt->regs->cx); 1158 1159 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0); 1160 if (ret != ES_OK) 1161 return ret; 1162 ··· 1197 if (x86_platform.hyper.sev_es_hcall_prepare) 1198 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs); 1199 1200 + ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0); 1201 if (ret != ES_OK) 1202 return ret; 1203