Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: hyper-v: Prepare to meet unallocated Hyper-V context

Currently, Hyper-V context is part of 'struct kvm_vcpu_arch' and is always
available. As a preparation to allocating it dynamically, check that it is
not NULL at call sites which can normally proceed without it i.e. the
behavior is identical to the situation when Hyper-V emulation is not being
used by the guest.

When Hyper-V context for a particular vCPU is not allocated, we may still
need to get 'vp_index' from there. E.g. in a hypothetical situation when
Hyper-V emulation was enabled on one CPU and wasn't on another, Hyper-V
style send-IPI hypercall may still be used. Luckily, vp_index is always
initialized to kvm_vcpu_get_idx() and can only be changed when Hyper-V
context is present. Introduce kvm_hv_get_vpindex() helper for
simplification.

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-12-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Vitaly Kuznetsov and committed by
Paolo Bonzini
f2bc14b6 9ff5e030

+29 -14
+10 -7
arch/x86/kvm/hyperv.c
··· 142 142 return NULL; 143 143 144 144 vcpu = kvm_get_vcpu(kvm, vpidx); 145 - if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx) 145 + if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx) 146 146 return vcpu; 147 147 kvm_for_each_vcpu(i, vcpu, kvm) 148 - if (to_hv_vcpu(vcpu)->vp_index == vpidx) 148 + if (kvm_hv_get_vpindex(vcpu) == vpidx) 149 149 return vcpu; 150 150 return NULL; 151 151 } ··· 377 377 break; 378 378 } 379 379 380 - trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, 381 - to_hv_vcpu(vcpu)->vp_index, msr, 382 - *pdata); 380 + trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); 383 381 384 382 return 0; 385 383 } ··· 804 806 u64 time_now, exp_time; 805 807 int i; 806 808 809 + if (!hv_vcpu) 810 + return; 811 + 807 812 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 808 813 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { 809 814 stimer = &hv_vcpu->stimer[i]; ··· 842 841 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) 843 842 { 844 843 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 844 + 845 + if (!hv_vcpu) 846 + return false; 845 847 846 848 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) 847 849 return false; ··· 1508 1504 1509 1505 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); 1510 1506 kvm_for_each_vcpu(i, vcpu, kvm) { 1511 - if (test_bit(to_hv_vcpu(vcpu)->vp_index, 1512 - (unsigned long *)vp_bitmap)) 1507 + if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap)) 1513 1508 __set_bit(i, vcpu_bitmap); 1514 1509 } 1515 1510 return vcpu_bitmap;
+10
arch/x86/kvm/hyperv.h
··· 83 83 return &vcpu->kvm->arch.hyperv.hv_syndbg; 84 84 } 85 85 86 + static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu) 87 + { 88 + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 89 + 90 + return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu); 91 + } 92 + 86 93 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); 87 94 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); 88 95 ··· 127 120 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) 128 121 { 129 122 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 123 + 124 + if (!hv_vcpu) 125 + return false; 130 126 131 127 return !bitmap_empty(hv_vcpu->stimer_pending_bitmap, 132 128 HV_SYNIC_STIMER_COUNT);
+3 -2
arch/x86/kvm/lapic.c
··· 1245 1245 apic_clear_isr(vector, apic); 1246 1246 apic_update_ppr(apic); 1247 1247 1248 - if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) 1248 + if (to_hv_vcpu(apic->vcpu) && 1249 + test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap)) 1249 1250 kvm_hv_synic_send_eoi(apic->vcpu, vector); 1250 1251 1251 1252 kvm_ioapic_send_eoi(apic, vector); ··· 2513 2512 */ 2514 2513 2515 2514 apic_clear_irr(vector, apic); 2516 - if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { 2515 + if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) { 2517 2516 /* 2518 2517 * For auto-EOI interrupts, there might be another pending 2519 2518 * interrupt above PPR, so check whether to raise another
+1 -3
arch/x86/kvm/vmx/vmx.c
··· 6810 6810 6811 6811 /* All fields are clean at this point */ 6812 6812 if (static_branch_unlikely(&enable_evmcs)) { 6813 - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 6814 - 6815 6813 current_evmcs->hv_clean_fields |= 6816 6814 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; 6817 6815 6818 - current_evmcs->hv_vp_id = hv_vcpu->vp_index; 6816 + current_evmcs->hv_vp_id = kvm_hv_get_vpindex(vcpu); 6819 6817 } 6820 6818 6821 6819 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+5 -2
arch/x86/kvm/x86.c
··· 8803 8803 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) 8804 8804 return; 8805 8805 8806 - bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, 8807 - to_hv_synic(vcpu)->vec_bitmap, 256); 8806 + if (to_hv_vcpu(vcpu)) 8807 + bitmap_or((ulong *)eoi_exit_bitmap, 8808 + vcpu->arch.ioapic_handled_vectors, 8809 + to_hv_synic(vcpu)->vec_bitmap, 256); 8810 + 8808 8811 static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 8809 8812 } 8810 8813