Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: SVM: Provide an updated VMRUN invocation for SEV-ES guests

The run sequence is different for an SEV-ES guest compared to a legacy or
even an SEV guest. The guest vCPU register state of an SEV-ES guest will
be restored on VMRUN and saved on VMEXIT. There is no need to restore the
guest registers directly and through VMLOAD before VMRUN and no need to
save the guest registers directly and through VMSAVE on VMEXIT.

Update the svm_vcpu_run() function to skip register state saving and
restoring and provide an alternative function for running an SEV-ES guest
in vmenter.S

Additionally, certain host state is restored across an SEV-ES VMRUN. As
a result certain register states are not required to be restored upon
VMEXIT (e.g. FS, GS, etc.), so only do that if the guest is not an SEV-ES
guest.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Message-Id: <fb1c66d32f2194e171b95fc1a8affd6d326e10c1.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Tom Lendacky and committed by
Paolo Bonzini
16809ecd 86137773

+77 -9
+16 -9
arch/x86/kvm/svm/svm.c
··· 3700 3700 guest_enter_irqoff(); 3701 3701 lockdep_hardirqs_on(CALLER_ADDR0); 3702 3702 3703 - __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); 3703 + if (sev_es_guest(svm->vcpu.kvm)) { 3704 + __svm_sev_es_vcpu_run(svm->vmcb_pa); 3705 + } else { 3706 + __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); 3704 3707 3705 3708 #ifdef CONFIG_X86_64 3706 - native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); 3709 + native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); 3707 3710 #else 3708 - loadsegment(fs, svm->host.fs); 3711 + loadsegment(fs, svm->host.fs); 3709 3712 #ifndef CONFIG_X86_32_LAZY_GS 3710 - loadsegment(gs, svm->host.gs); 3713 + loadsegment(gs, svm->host.gs); 3711 3714 #endif 3712 3715 #endif 3716 + } 3713 3717 3714 3718 /* 3715 3719 * VMEXIT disables interrupts (host state), but tracing and lockdep ··· 3811 3807 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) 3812 3808 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 3813 3809 3814 - reload_tss(vcpu); 3810 + if (!sev_es_guest(svm->vcpu.kvm)) 3811 + reload_tss(vcpu); 3815 3812 3816 3813 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); 3817 3814 3818 - vcpu->arch.cr2 = svm->vmcb->save.cr2; 3819 - vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 3820 - vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3821 - vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3815 + if (!sev_es_guest(svm->vcpu.kvm)) { 3816 + vcpu->arch.cr2 = svm->vmcb->save.cr2; 3817 + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 3818 + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3819 + vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3820 + } 3822 3821 3823 3822 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3824 3823 kvm_before_interrupt(&svm->vcpu);
+5
arch/x86/kvm/svm/svm.h
··· 591 591 void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu); 592 592 void sev_es_vcpu_put(struct vcpu_svm *svm); 593 593 594 + /* vmenter.S */ 595 + 596 + void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); 597 + void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); 598 + 594 599 #endif
+50
arch/x86/kvm/svm/vmenter.S
··· 168 168 pop %_ASM_BP 169 169 ret 170 170 SYM_FUNC_END(__svm_vcpu_run) 171 + 172 + /** 173 + * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode 174 + * @vmcb_pa: unsigned long 175 + */ 176 + SYM_FUNC_START(__svm_sev_es_vcpu_run) 177 + push %_ASM_BP 178 + #ifdef CONFIG_X86_64 179 + push %r15 180 + push %r14 181 + push %r13 182 + push %r12 183 + #else 184 + push %edi 185 + push %esi 186 + #endif 187 + push %_ASM_BX 188 + 189 + /* Enter guest mode */ 190 + mov %_ASM_ARG1, %_ASM_AX 191 + sti 192 + 193 + 1: vmrun %_ASM_AX 194 + jmp 3f 195 + 2: cmpb $0, kvm_rebooting 196 + jne 3f 197 + ud2 198 + _ASM_EXTABLE(1b, 2b) 199 + 200 + 3: cli 201 + 202 + #ifdef CONFIG_RETPOLINE 203 + /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ 204 + FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE 205 + #endif 206 + 207 + pop %_ASM_BX 208 + 209 + #ifdef CONFIG_X86_64 210 + pop %r12 211 + pop %r13 212 + pop %r14 213 + pop %r15 214 + #else 215 + pop %esi 216 + pop %edi 217 + #endif 218 + pop %_ASM_BP 219 + ret 220 + SYM_FUNC_END(__svm_sev_es_vcpu_run)
+6
arch/x86/kvm/x86.c
··· 880 880 881 881 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 882 882 { 883 + if (vcpu->arch.guest_state_protected) 884 + return; 885 + 883 886 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { 884 887 885 888 if (vcpu->arch.xcr0 != host_xcr0) ··· 903 900 904 901 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 905 902 { 903 + if (vcpu->arch.guest_state_protected) 904 + return; 905 + 906 906 if (static_cpu_has(X86_FEATURE_PKU) && 907 907 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || 908 908 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {