Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Move TSC scaling logic out of call-back read_l1_tsc()

Both VMX and SVM scales the host TSC in the same way in call-back
read_l1_tsc(), so this patch moves the scaling logic from call-back
read_l1_tsc() to a common function kvm_read_l1_tsc().

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Haozhong Zhang and committed by
Paolo Bonzini
4ba76538 58ea6767

+12 -7
+1
arch/x86/include/asm/kvm_host.h
··· 1226 1226 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1227 1227 1228 1228 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc); 1229 + u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc); 1229 1230 1230 1231 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); 1231 1232 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+2 -2
arch/x86/kvm/lapic.c
··· 1250 1250 1251 1251 tsc_deadline = apic->lapic_timer.expired_tscdeadline; 1252 1252 apic->lapic_timer.expired_tscdeadline = 0; 1253 - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); 1253 + guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1254 1254 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); 1255 1255 1256 1256 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */ ··· 1318 1318 local_irq_save(flags); 1319 1319 1320 1320 now = apic->lapic_timer.timer.base->get_time(); 1321 - guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc()); 1321 + guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1322 1322 if (likely(tscdeadline > guest_tsc)) { 1323 1323 ns = (tscdeadline - guest_tsc) * 1000000ULL; 1324 1324 do_div(ns, this_tsc_khz);
+1 -2
arch/x86/kvm/svm.c
··· 2984 2984 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 2985 2985 { 2986 2986 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu)); 2987 - return vmcb->control.tsc_offset + 2988 - kvm_scale_tsc(vcpu, host_tsc); 2987 + return vmcb->control.tsc_offset + host_tsc; 2989 2988 } 2990 2989 2991 2990 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+8 -3
arch/x86/kvm/x86.c
··· 1401 1401 return target_tsc - tsc; 1402 1402 } 1403 1403 1404 + u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) 1405 + { 1406 + return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc)); 1407 + } 1408 + EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 1409 + 1404 1410 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1405 1411 { 1406 1412 struct kvm *kvm = vcpu->kvm; ··· 1744 1738 kernel_ns = get_kernel_ns(); 1745 1739 } 1746 1740 1747 - tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); 1741 + tsc_timestamp = kvm_read_l1_tsc(v, host_tsc); 1748 1742 1749 1743 /* 1750 1744 * We may have to catch up the TSC to match elapsed wall clock ··· 6551 6545 if (hw_breakpoint_active()) 6552 6546 hw_breakpoint_restore(); 6553 6547 6554 - vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, 6555 - rdtsc()); 6548 + vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 6556 6549 6557 6550 vcpu->mode = OUTSIDE_GUEST_MODE; 6558 6551 smp_wmb();