Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw

Replace inline assembly in nested_vmx_check_vmentry_hw
with a call to __vmx_vcpu_run. The function is not
performance critical, so (double) GPR save/restore
in __vmx_vcpu_run can be tolerated, as far as performance
effects are concerned.

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Reviewed-and-tested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
[sean: dropped versioning info from changelog]
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20201231002702.2223707-5-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Uros Bizjak and committed by
Paolo Bonzini
150f17bf 53666664

+5 -32
+3 -29
arch/x86/kvm/vmx/nested.c
··· 12 12 #include "nested.h" 13 13 #include "pmu.h" 14 14 #include "trace.h" 15 + #include "vmx.h" 15 16 #include "x86.h" 16 17 17 18 static bool __read_mostly enable_shadow_vmcs = 1; ··· 3058 3057 vmx->loaded_vmcs->host_state.cr4 = cr4; 3059 3058 } 3060 3059 3061 - asm( 3062 - "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ 3063 - "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" 3064 - "je 1f \n\t" 3065 - __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" 3066 - "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" 3067 - "1: \n\t" 3068 - "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ 3069 - 3070 - /* Check if vmlaunch or vmresume is needed */ 3071 - "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" 3072 - 3073 - /* 3074 - * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set 3075 - * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail 3076 - * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the 3077 - * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. 3078 - */ 3079 - "call vmx_vmenter\n\t" 3080 - 3081 - CC_SET(be) 3082 - : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) 3083 - : [HOST_RSP]"r"((unsigned long)HOST_RSP), 3084 - [loaded_vmcs]"r"(vmx->loaded_vmcs), 3085 - [launched]"i"(offsetof(struct loaded_vmcs, launched)), 3086 - [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), 3087 - [wordsize]"i"(sizeof(ulong)) 3088 - : "memory" 3089 - ); 3060 + vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, 3061 + vmx->loaded_vmcs->launched); 3090 3062 3091 3063 if (vmx->msr_autoload.host.nr) 3092 3064 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+1 -1
arch/x86/kvm/vmx/vmenter.S
··· 44 44 * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump 45 45 * to vmx_vmexit. 46 46 */ 47 - SYM_FUNC_START(vmx_vmenter) 47 + SYM_FUNC_START_LOCAL(vmx_vmenter) 48 48 /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ 49 49 je 2f 50 50
-2
arch/x86/kvm/vmx/vmx.c
··· 6658 6658 } 6659 6659 } 6660 6660 6661 - bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); 6662 - 6663 6661 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, 6664 6662 struct vcpu_vmx *vmx) 6665 6663 {
+1
arch/x86/kvm/vmx/vmx.h
··· 388 388 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); 389 389 void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); 390 390 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); 391 + bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); 391 392 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); 392 393 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); 393 394 void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,