kvm: vmx: Allow disabling virtual NMI support

To simplify testing of these rarely used code paths, add a module parameter
that turns it on. One eventinj.flat test (NMI after iret) fails when
loading kvm_intel with vnmi=0.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>

+21 -10
+21 -10
arch/x86/kvm/vmx.c
··· 70 static bool __read_mostly enable_vpid = 1; 71 module_param_named(vpid, enable_vpid, bool, 0444); 72 73 static bool __read_mostly flexpriority_enabled = 1; 74 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 75 ··· 5239 5240 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 5241 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 5242 /* Enable the preemption timer dynamically */ 5243 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 5244 return pin_based_exec_ctrl; ··· 5677 5678 static void enable_nmi_window(struct kvm_vcpu *vcpu) 5679 { 5680 - if (!cpu_has_virtual_nmis() || 5681 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 5682 enable_irq_window(vcpu); 5683 return; ··· 5718 { 5719 struct vcpu_vmx *vmx = to_vmx(vcpu); 5720 5721 - if (!cpu_has_virtual_nmis()) { 5722 /* 5723 * Tracking the NMI-blocked state in software is built upon 5724 * finding the next open IRQ window. This, in turn, depends on ··· 5749 struct vcpu_vmx *vmx = to_vmx(vcpu); 5750 bool masked; 5751 5752 - if (!cpu_has_virtual_nmis()) 5753 return vmx->loaded_vmcs->soft_vnmi_blocked; 5754 if (vmx->loaded_vmcs->nmi_known_unmasked) 5755 return false; ··· 5762 { 5763 struct vcpu_vmx *vmx = to_vmx(vcpu); 5764 5765 - if (!cpu_has_virtual_nmis()) { 5766 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 5767 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 5768 vmx->loaded_vmcs->vnmi_blocked_time = 0; ··· 5783 if (to_vmx(vcpu)->nested.nested_run_pending) 5784 return 0; 5785 5786 - if (!cpu_has_virtual_nmis() && 5787 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 5788 return 0; 5789 ··· 6514 * AAK134, BY25. 6515 */ 6516 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 6517 - cpu_has_virtual_nmis() && 6518 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 6519 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 6520 ··· 6574 6575 static int handle_nmi_window(struct kvm_vcpu *vcpu) 6576 { 6577 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 6578 CPU_BASED_VIRTUAL_NMI_PENDING); 6579 ++vcpu->stat.nmi_window_exits; ··· 6797 6798 if (!cpu_has_vmx_flexpriority()) 6799 flexpriority_enabled = 0; 6800 6801 /* 6802 * set_apic_access_page_addr() is used to reload apic access ··· 8022 * "blocked by NMI" bit has to be set before next VM entry. 8023 */ 8024 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 8025 - cpu_has_virtual_nmis() && 8026 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 8027 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 8028 GUEST_INTR_STATE_NMI); ··· 8867 return 0; 8868 } 8869 8870 - if (unlikely(!cpu_has_virtual_nmis() && 8871 vmx->loaded_vmcs->soft_vnmi_blocked)) { 8872 if (vmx_interrupt_allowed(vcpu)) { 8873 vmx->loaded_vmcs->soft_vnmi_blocked = 0; ··· 9168 9169 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 9170 9171 - if (cpu_has_virtual_nmis()) { 9172 if (vmx->loaded_vmcs->nmi_known_unmasked) 9173 return; 9174 /* ··· 9317 unsigned long debugctlmsr, cr3, cr4; 9318 9319 /* Record the guest's net vcpu time for enforced NMI injections. */ 9320 - if (unlikely(!cpu_has_virtual_nmis() && 9321 vmx->loaded_vmcs->soft_vnmi_blocked)) 9322 vmx->loaded_vmcs->entry_time = ktime_get(); 9323
··· 70 static bool __read_mostly enable_vpid = 1; 71 module_param_named(vpid, enable_vpid, bool, 0444); 72 73 + static bool __read_mostly enable_vnmi = 1; 74 + module_param_named(vnmi, enable_vnmi, bool, S_IRUGO); 75 + 76 static bool __read_mostly flexpriority_enabled = 1; 77 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 78 ··· 5236 5237 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 5238 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 5239 + 5240 + if (!enable_vnmi) 5241 + pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS; 5242 + 5243 /* Enable the preemption timer dynamically */ 5244 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 5245 return pin_based_exec_ctrl; ··· 5670 5671 static void enable_nmi_window(struct kvm_vcpu *vcpu) 5672 { 5673 + if (!enable_vnmi || 5674 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 5675 enable_irq_window(vcpu); 5676 return; ··· 5711 { 5712 struct vcpu_vmx *vmx = to_vmx(vcpu); 5713 5714 + if (!enable_vnmi) { 5715 /* 5716 * Tracking the NMI-blocked state in software is built upon 5717 * finding the next open IRQ window. This, in turn, depends on ··· 5742 struct vcpu_vmx *vmx = to_vmx(vcpu); 5743 bool masked; 5744 5745 + if (!enable_vnmi) 5746 return vmx->loaded_vmcs->soft_vnmi_blocked; 5747 if (vmx->loaded_vmcs->nmi_known_unmasked) 5748 return false; ··· 5755 { 5756 struct vcpu_vmx *vmx = to_vmx(vcpu); 5757 5758 + if (!enable_vnmi) { 5759 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 5760 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 5761 vmx->loaded_vmcs->vnmi_blocked_time = 0; ··· 5776 if (to_vmx(vcpu)->nested.nested_run_pending) 5777 return 0; 5778 5779 + if (!enable_vnmi && 5780 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 5781 return 0; 5782 ··· 6507 * AAK134, BY25. 6508 */ 6509 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 6510 + enable_vnmi && 6511 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 6512 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 6513 ··· 6567 6568 static int handle_nmi_window(struct kvm_vcpu *vcpu) 6569 { 6570 + WARN_ON_ONCE(!enable_vnmi); 6571 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 6572 CPU_BASED_VIRTUAL_NMI_PENDING); 6573 ++vcpu->stat.nmi_window_exits; ··· 6789 6790 if (!cpu_has_vmx_flexpriority()) 6791 flexpriority_enabled = 0; 6792 + 6793 + if (!cpu_has_virtual_nmis()) 6794 + enable_vnmi = 0; 6795 6796 /* 6797 * set_apic_access_page_addr() is used to reload apic access ··· 8011 * "blocked by NMI" bit has to be set before next VM entry. 8012 */ 8013 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 8014 + enable_vnmi && 8015 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 8016 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 8017 GUEST_INTR_STATE_NMI); ··· 8856 return 0; 8857 } 8858 8859 + if (unlikely(!enable_vnmi && 8860 vmx->loaded_vmcs->soft_vnmi_blocked)) { 8861 if (vmx_interrupt_allowed(vcpu)) { 8862 vmx->loaded_vmcs->soft_vnmi_blocked = 0; ··· 9157 9158 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 9159 9160 + if (enable_vnmi) { 9161 if (vmx->loaded_vmcs->nmi_known_unmasked) 9162 return; 9163 /* ··· 9306 unsigned long debugctlmsr, cr3, cr4; 9307 9308 /* Record the guest's net vcpu time for enforced NMI injections. */ 9309 + if (unlikely(!enable_vnmi && 9310 vmx->loaded_vmcs->soft_vnmi_blocked)) 9311 vmx->loaded_vmcs->entry_time = ktime_get(); 9312