Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Add code to track call origin for msr assignment

In order to track who initiated the call (host or guest) to modify an msr
value I have changed function call parameters along the call path. The
specific change is to add a struct pointer parameter that points to (index,
data, caller) information rather than having this information passed as
individual parameters.

The initial use for this capability is for updating the IA32_TSC_ADJUST msr
while setting the tsc value. It is anticipated that this capability is
useful for other tasks.

Signed-off-by: Will Auld <will.auld@intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

authored by

Will Auld and committed by
Marcelo Tosatti
8fe8ab46 5419369e

+56 -21
+9 -3
arch/x86/include/asm/kvm_host.h
··· 620 620 621 621 struct x86_instruction_info; 622 622 623 + struct msr_data { 624 + bool host_initiated; 625 + u32 index; 626 + u64 data; 627 + }; 628 + 623 629 struct kvm_x86_ops { 624 630 int (*cpu_has_kvm_support)(void); /* __init */ 625 631 int (*disabled_by_bios)(void); /* __init */ ··· 648 642 649 643 void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu); 650 644 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 651 - int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 645 + int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr); 652 646 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 653 647 void (*get_segment)(struct kvm_vcpu *vcpu, 654 648 struct kvm_segment *var, int seg); ··· 799 793 800 794 void kvm_enable_efer_bits(u64); 801 795 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 802 - int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 796 + int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 803 797 804 798 struct x86_emulate_ctxt; 805 799 ··· 826 820 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); 827 821 828 822 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 829 - int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 823 + int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr); 830 824 831 825 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); 832 826 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+10 -4
arch/x86/kvm/svm.c
··· 3127 3127 return 0; 3128 3128 } 3129 3129 3130 - static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) 3130 + static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 3131 3131 { 3132 3132 struct vcpu_svm *svm = to_svm(vcpu); 3133 3133 3134 + u32 ecx = msr->index; 3135 + u64 data = msr->data; 3134 3136 switch (ecx) { 3135 3137 case MSR_IA32_TSC: 3136 - kvm_write_tsc(vcpu, data); 3138 + kvm_write_tsc(vcpu, msr); 3137 3139 break; 3138 3140 case MSR_STAR: 3139 3141 svm->vmcb->save.star = data; ··· 3190 3188 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 3191 3189 break; 3192 3190 default: 3193 - return kvm_set_msr_common(vcpu, ecx, data); 3191 + return kvm_set_msr_common(vcpu, msr); 3194 3192 } 3195 3193 return 0; 3196 3194 } 3197 3195 3198 3196 static int wrmsr_interception(struct vcpu_svm *svm) 3199 3197 { 3198 + struct msr_data msr; 3200 3199 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 3201 3200 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) 3202 3201 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 3203 3202 3203 + msr.data = data; 3204 + msr.index = ecx; 3205 + msr.host_initiated = false; 3204 3206 3205 3207 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 3206 - if (svm_set_msr(&svm->vcpu, ecx, data)) { 3208 + if (svm_set_msr(&svm->vcpu, &msr)) { 3207 3209 trace_kvm_msr_write_ex(ecx, data); 3208 3210 kvm_inject_gp(&svm->vcpu, 0); 3209 3211 } else {
+12 -6
arch/x86/kvm/vmx.c
··· 2220 2220 * Returns 0 on success, non-0 otherwise. 2221 2221 * Assumes vcpu_load() was already called. 2222 2222 */ 2223 - static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 2223 + static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 2224 2224 { 2225 2225 struct vcpu_vmx *vmx = to_vmx(vcpu); 2226 2226 struct shared_msr_entry *msr; 2227 2227 int ret = 0; 2228 + u32 msr_index = msr_info->index; 2229 + u64 data = msr_info->data; 2228 2230 2229 2231 switch (msr_index) { 2230 2232 case MSR_EFER: 2231 - ret = kvm_set_msr_common(vcpu, msr_index, data); 2233 + ret = kvm_set_msr_common(vcpu, msr_info); 2232 2234 break; 2233 2235 #ifdef CONFIG_X86_64 2234 2236 case MSR_FS_BASE: ··· 2256 2254 vmcs_writel(GUEST_SYSENTER_ESP, data); 2257 2255 break; 2258 2256 case MSR_IA32_TSC: 2259 - kvm_write_tsc(vcpu, data); 2257 + kvm_write_tsc(vcpu, msr_info); 2260 2258 break; 2261 2259 case MSR_IA32_CR_PAT: 2262 2260 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { ··· 2264 2262 vcpu->arch.pat = data; 2265 2263 break; 2266 2264 } 2267 - ret = kvm_set_msr_common(vcpu, msr_index, data); 2265 + ret = kvm_set_msr_common(vcpu, msr_info); 2268 2266 break; 2269 2267 case MSR_TSC_AUX: 2270 2268 if (!vmx->rdtscp_enabled) ··· 2287 2285 } 2288 2286 break; 2289 2287 } 2290 - ret = kvm_set_msr_common(vcpu, msr_index, data); 2288 + ret = kvm_set_msr_common(vcpu, msr_info); 2291 2289 } 2292 2290 2293 2291 return ret; ··· 4650 4648 4651 4649 static int handle_wrmsr(struct kvm_vcpu *vcpu) 4652 4650 { 4651 + struct msr_data msr; 4653 4652 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 4654 4653 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) 4655 4654 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); 4656 4655 4657 - if (vmx_set_msr(vcpu, ecx, data) != 0) { 4656 + msr.data = data; 4657 + msr.index = ecx; 4658 + msr.host_initiated = false; 4659 + if (vmx_set_msr(vcpu, &msr) != 0) { 4658 4660 trace_kvm_msr_write_ex(ecx, data); 4659 4661 kvm_inject_gp(vcpu, 0); 4660 4662 return 1;
+24 -7
arch/x86/kvm/x86.c
··· 890 890 * Returns 0 on success, non-0 otherwise. 891 891 * Assumes vcpu_load() was already called. 892 892 */ 893 - int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 893 + int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 894 894 { 895 - return kvm_x86_ops->set_msr(vcpu, msr_index, data); 895 + return kvm_x86_ops->set_msr(vcpu, msr); 896 896 } 897 897 898 898 /* ··· 900 900 */ 901 901 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) 902 902 { 903 - return kvm_set_msr(vcpu, index, *data); 903 + struct msr_data msr; 904 + 905 + msr.data = *data; 906 + msr.index = index; 907 + msr.host_initiated = true; 908 + return kvm_set_msr(vcpu, &msr); 904 909 } 905 910 906 911 #ifdef CONFIG_X86_64 ··· 1135 1130 #endif 1136 1131 } 1137 1132 1138 - void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) 1133 + void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1139 1134 { 1140 1135 struct kvm *kvm = vcpu->kvm; 1141 1136 u64 offset, ns, elapsed; 1142 1137 unsigned long flags; 1143 1138 s64 usdiff; 1144 1139 bool matched; 1140 + u64 data = msr->data; 1145 1141 1146 1142 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1147 1143 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ··· 1863 1857 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); 1864 1858 } 1865 1859 1866 - int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1860 + int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1867 1861 { 1868 1862 bool pr = false; 1863 + u32 msr = msr_info->index; 1864 + u64 data = msr_info->data; 1869 1865 1870 1866 switch (msr) { 1871 1867 case MSR_EFER: ··· 4539 4531 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, 4540 4532 u32 msr_index, u64 data) 4541 4533 { 4542 - return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); 4534 + struct msr_data msr; 4535 + 4536 + msr.data = data; 4537 + msr.index = msr_index; 4538 + msr.host_initiated = false; 4539 + return kvm_set_msr(emul_to_vcpu(ctxt), &msr); 4543 4540 } 4544 4541 4545 4542 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, ··· 6388 6375 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 6389 6376 { 6390 6377 int r; 6378 + struct msr_data msr; 6391 6379 6392 6380 r = vcpu_load(vcpu); 6393 6381 if (r) 6394 6382 return r; 6395 - kvm_write_tsc(vcpu, 0); 6383 + msr.data = 0x0; 6384 + msr.index = MSR_IA32_TSC; 6385 + msr.host_initiated = true; 6386 + kvm_write_tsc(vcpu, &msr); 6396 6387 vcpu_put(vcpu); 6397 6388 6398 6389 return r;
+1 -1
arch/x86/kvm/x86.h
··· 112 112 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 113 113 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); 114 114 115 - void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data); 115 + void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); 116 116 117 117 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, 118 118 gva_t addr, void *val, unsigned int bytes,