KVM: Avoid instruction emulation when event delivery is pending

When an event (such as an interrupt) is injected, and the stack is
shadowed (and therefore write protected), the guest will exit. The
current code will see that the stack is shadowed and emulate a few
instructions, each time postponing the injection. Eventually the
injection may succeed, but at that time the guest may be unwilling
to accept the interrupt (for example, the TPR may have changed).

This occurs every once in a while during a Windows 2008 boot.

Fix by unshadowing the fault address if the fault was due to an event
injection.

Signed-off-by: Avi Kivity <avi@qumranet.com>

+9 -1
+1
arch/x86/kvm/mmu.c
··· 1814 spin_unlock(&vcpu->kvm->mmu_lock); 1815 return r; 1816 } 1817 1818 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1819 {
··· 1814 spin_unlock(&vcpu->kvm->mmu_lock); 1815 return r; 1816 } 1817 + EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); 1818 1819 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1820 {
+6 -1
arch/x86/kvm/svm.c
··· 1008 struct kvm *kvm = svm->vcpu.kvm; 1009 u64 fault_address; 1010 u32 error_code; 1011 1012 if (!irqchip_in_kernel(kvm) && 1013 - is_external_interrupt(exit_int_info)) 1014 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1015 1016 fault_address = svm->vmcb->control.exit_info_2; 1017 error_code = svm->vmcb->control.exit_info_1; ··· 1028 (u32)fault_address, (u32)(fault_address >> 32), 1029 handler); 1030 1031 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1032 } 1033
··· 1008 struct kvm *kvm = svm->vcpu.kvm; 1009 u64 fault_address; 1010 u32 error_code; 1011 + bool event_injection = false; 1012 1013 if (!irqchip_in_kernel(kvm) && 1014 + is_external_interrupt(exit_int_info)) { 1015 + event_injection = true; 1016 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1017 + } 1018 1019 fault_address = svm->vmcb->control.exit_info_2; 1020 error_code = svm->vmcb->control.exit_info_1; ··· 1025 (u32)fault_address, (u32)(fault_address >> 32), 1026 handler); 1027 1028 + if (event_injection) 1029 + kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); 1030 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1031 } 1032
+2
arch/x86/kvm/vmx.c
··· 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2300 (u32)((u64)cr2 >> 32), handler); 2301 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2302 } 2303
··· 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2300 (u32)((u64)cr2 >> 32), handler); 2301 + if (vect_info & VECTORING_INFO_VALID_MASK) 2302 + kvm_mmu_unprotect_page_virt(vcpu, cr2); 2303 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2304 } 2305