Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Update Xen TSC leaves during CPUID emulation

The Xen emulation in KVM modifies certain CPUID leaves to expose
TSC information to the guest.

Previously, these CPUID leaves were updated whenever guest time changed,
but this conflicts with KVM_SET_CPUID/KVM_SET_CPUID2 ioctls which reject
changes to CPUID entries on running vCPUs.

Fix this by updating the TSC information directly in the CPUID emulation
handler instead of modifying the vCPU's CPUID entries.

Signed-off-by: Fred Griffoul <fgriffo@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20250124150539.69975-1-fgriffo@amazon.co.uk
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

Fred Griffoul and committed by
Sean Christopherson
a2b00f85 26e228ec

+29 -27
+16
arch/x86/kvm/cpuid.c
··· 2006 2006 } else if (function == 0x80000007) { 2007 2007 if (kvm_hv_invtsc_suppressed(vcpu)) 2008 2008 *edx &= ~feature_bit(CONSTANT_TSC); 2009 + } else if (IS_ENABLED(CONFIG_KVM_XEN) && 2010 + kvm_xen_is_tsc_leaf(vcpu, function)) { 2011 + /* 2012 + * Update guest TSC frequency information if necessary. 2013 + * Ignore failures, there is no sane value that can be 2014 + * provided if KVM can't get the TSC frequency. 2015 + */ 2016 + if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) 2017 + kvm_guest_time_update(vcpu); 2018 + 2019 + if (index == 1) { 2020 + *ecx = vcpu->arch.hv_clock.tsc_to_system_mul; 2021 + *edx = vcpu->arch.hv_clock.tsc_shift; 2022 + } else if (index == 2) { 2023 + *eax = vcpu->arch.hw_tsc_khz; 2024 + } 2009 2025 } 2010 2026 } else { 2011 2027 *eax = *ebx = *ecx = *edx = 0;
+1 -2
arch/x86/kvm/x86.c
··· 3170 3170 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); 3171 3171 } 3172 3172 3173 - static int kvm_guest_time_update(struct kvm_vcpu *v) 3173 + int kvm_guest_time_update(struct kvm_vcpu *v) 3174 3174 { 3175 3175 unsigned long flags, tgt_tsc_khz; 3176 3176 unsigned seq; ··· 3253 3253 &vcpu->hv_clock.tsc_shift, 3254 3254 &vcpu->hv_clock.tsc_to_system_mul); 3255 3255 vcpu->hw_tsc_khz = tgt_tsc_khz; 3256 - kvm_xen_update_tsc_info(v); 3257 3256 } 3258 3257 3259 3258 vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
+1
arch/x86/kvm/x86.h
··· 362 362 u64 get_kvmclock_ns(struct kvm *kvm); 363 363 uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm); 364 364 bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp); 365 + int kvm_guest_time_update(struct kvm_vcpu *v); 365 366 366 367 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, 367 368 gva_t addr, void *val, unsigned int bytes,
-23
arch/x86/kvm/xen.c
··· 2256 2256 del_timer_sync(&vcpu->arch.xen.poll_timer); 2257 2257 } 2258 2258 2259 - void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu) 2260 - { 2261 - struct kvm_cpuid_entry2 *entry; 2262 - u32 function; 2263 - 2264 - if (!vcpu->arch.xen.cpuid.base) 2265 - return; 2266 - 2267 - function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3); 2268 - if (function > vcpu->arch.xen.cpuid.limit) 2269 - return; 2270 - 2271 - entry = kvm_find_cpuid_entry_index(vcpu, function, 1); 2272 - if (entry) { 2273 - entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul; 2274 - entry->edx = vcpu->arch.hv_clock.tsc_shift; 2275 - } 2276 - 2277 - entry = kvm_find_cpuid_entry_index(vcpu, function, 2); 2278 - if (entry) 2279 - entry->eax = vcpu->arch.hw_tsc_khz; 2280 - } 2281 - 2282 2259 void kvm_xen_init_vm(struct kvm *kvm) 2283 2260 { 2284 2261 mutex_init(&kvm->arch.xen.xen_lock);
+11 -2
arch/x86/kvm/xen.h
··· 9 9 #ifndef __ARCH_X86_KVM_XEN_H__ 10 10 #define __ARCH_X86_KVM_XEN_H__ 11 11 12 + #include <asm/xen/cpuid.h> 12 13 #include <asm/xen/hypervisor.h> 13 14 14 15 #ifdef CONFIG_KVM_XEN ··· 36 35 int kvm_xen_setup_evtchn(struct kvm *kvm, 37 36 struct kvm_kernel_irq_routing_entry *e, 38 37 const struct kvm_irq_routing_entry *ue); 39 - void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu); 40 38 41 39 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) 42 40 { ··· 48 48 vcpu->arch.xen.vcpu_info_cache.active && 49 49 vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu)) 50 50 kvm_xen_inject_vcpu_vector(vcpu); 51 + } 52 + 53 + static inline bool kvm_xen_is_tsc_leaf(struct kvm_vcpu *vcpu, u32 function) 54 + { 55 + return static_branch_unlikely(&kvm_xen_enabled.key) && 56 + vcpu->arch.xen.cpuid.base && 57 + function <= vcpu->arch.xen.cpuid.limit && 58 + function == (vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3)); 51 59 } 52 60 53 61 static inline bool kvm_xen_msr_enabled(struct kvm *kvm) ··· 178 170 return false; 179 171 } 180 172 181 - static inline void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu) 173 + static inline bool kvm_xen_is_tsc_leaf(struct kvm_vcpu *vcpu, u32 function) 182 174 { 175 + return false; 183 176 } 184 177 #endif 185 178