Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: TDX: Make pmu_intel.c ignore guest TD case

TDX KVM doesn't support PMU yet, it's future work of TDX KVM support as
another patch series. For now, handle TDX by updating vcpu_to_lbr_desc()
and vcpu_to_lbr_records() to return NULL.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Co-developed-by: Tony Lindgren <tony.lindgren@linux.intel.com>
Signed-off-by: Tony Lindgren <tony.lindgren@linux.intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
---
- Add pragma poison for to_vmx() (Paolo)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Isaku Yamahata and committed by
Paolo Bonzini
ffb6fc84 0186dd29

+80 -34
+51 -1
arch/x86/kvm/vmx/pmu_intel.c
··· 19 19 #include "lapic.h" 20 20 #include "nested.h" 21 21 #include "pmu.h" 22 + #include "tdx.h" 22 23 23 24 /* 24 25 * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX ··· 34 33 #define INTEL_RDPMC_INDEX_MASK GENMASK(15, 0) 35 34 36 35 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0) 36 + 37 + static struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu) 38 + { 39 + if (is_td_vcpu(vcpu)) 40 + return NULL; 41 + 42 + return &to_vmx(vcpu)->lbr_desc; 43 + } 44 + 45 + static struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu) 46 + { 47 + if (is_td_vcpu(vcpu)) 48 + return NULL; 49 + 50 + return &to_vmx(vcpu)->lbr_desc.records; 51 + } 52 + 53 + #pragma GCC poison to_vmx 37 54 38 55 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) 39 56 { ··· 148 129 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); 149 130 } 150 131 132 + static bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu) 133 + { 134 + if (is_td_vcpu(vcpu)) 135 + return false; 136 + 137 + return cpuid_model_is_consistent(vcpu); 138 + } 139 + 140 + bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) 141 + { 142 + if (is_td_vcpu(vcpu)) 143 + return false; 144 + 145 + return !!vcpu_to_lbr_records(vcpu)->nr; 146 + } 147 + 151 148 static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index) 152 149 { 153 150 struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu); ··· 229 194 { 230 195 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 231 196 197 + if (!lbr_desc) 198 + return; 199 + 232 200 if (lbr_desc->event) { 233 201 perf_event_release_kernel(lbr_desc->event); 234 202 lbr_desc->event = NULL; ··· 272 234 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK | 273 235 PERF_SAMPLE_BRANCH_USER, 274 236 }; 237 + 238 + if (WARN_ON_ONCE(!lbr_desc)) 239 + return 0; 275 240 276 241 if (unlikely(lbr_desc->event)) { 277 242 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use); ··· 507 466 u64 perf_capabilities; 508 467 u64 counter_rsvd; 509 468 469 + if (!lbr_desc) 470 + return; 471 + 510 472 memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); 511 473 512 474 /* ··· 586 542 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); 587 543 588 544 perf_capabilities = vcpu_get_perf_capabilities(vcpu); 589 - if (cpuid_model_is_consistent(vcpu) && 545 + if (intel_pmu_lbr_is_compatible(vcpu) && 590 546 (perf_capabilities & PMU_CAP_LBR_FMT)) 591 547 memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps)); 592 548 else ··· 613 569 int i; 614 570 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 615 571 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 572 + 573 + if (!lbr_desc) 574 + return; 616 575 617 576 for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) { 618 577 pmu->gp_counters[i].type = KVM_PMC_GP; ··· 723 676 { 724 677 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); 725 678 struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); 679 + 680 + if (WARN_ON_ONCE(!lbr_desc)) 681 + return; 726 682 727 683 if (!lbr_desc->event) { 728 684 vmx_disable_lbr_msrs_passthrough(vcpu);
+28
arch/x86/kvm/vmx/pmu_intel.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __KVM_X86_VMX_PMU_INTEL_H 3 + #define __KVM_X86_VMX_PMU_INTEL_H 4 + 5 + #include <linux/kvm_host.h> 6 + 7 + bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); 8 + int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); 9 + 10 + struct lbr_desc { 11 + /* Basic info about guest LBR records. */ 12 + struct x86_pmu_lbr records; 13 + 14 + /* 15 + * Emulate LBR feature via passthrough LBR registers when the 16 + * per-vcpu guest LBR event is scheduled on the current pcpu. 17 + * 18 + * The records may be inaccurate if the host reclaims the LBR. 19 + */ 20 + struct perf_event *event; 21 + 22 + /* True if LBRs are marked as not intercepted in the MSR bitmap */ 23 + bool msr_passthrough; 24 + }; 25 + 26 + extern struct x86_pmu_lbr vmx_lbr_caps; 27 + 28 + #endif /* __KVM_X86_VMX_PMU_INTEL_H */
+1 -33
arch/x86/kvm/vmx/vmx.h
··· 11 11 12 12 #include "capabilities.h" 13 13 #include "../kvm_cache_regs.h" 14 + #include "pmu_intel.h" 14 15 #include "vmcs.h" 15 16 #include "vmx_ops.h" 16 17 #include "../cpuid.h" ··· 90 89 }; 91 90 u32 full; 92 91 }; 93 - 94 - struct lbr_desc { 95 - /* Basic info about guest LBR records. */ 96 - struct x86_pmu_lbr records; 97 - 98 - /* 99 - * Emulate LBR feature via passthrough LBR registers when the 100 - * per-vcpu guest LBR event is scheduled on the current pcpu. 101 - * 102 - * The records may be inaccurate if the host reclaims the LBR. 103 - */ 104 - struct perf_event *event; 105 - 106 - /* True if LBRs are marked as not intercepted in the MSR bitmap */ 107 - bool msr_passthrough; 108 - }; 109 - 110 - extern struct x86_pmu_lbr vmx_lbr_caps; 111 92 112 93 /* 113 94 * The nested_vmx structure is part of vcpu_vmx, and holds information we need ··· 643 660 static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) 644 661 { 645 662 return container_of(vcpu, struct vcpu_vmx, vcpu); 646 - } 647 - 648 - static inline struct lbr_desc *vcpu_to_lbr_desc(struct kvm_vcpu *vcpu) 649 - { 650 - return &to_vmx(vcpu)->lbr_desc; 651 - } 652 - 653 - static inline struct x86_pmu_lbr *vcpu_to_lbr_records(struct kvm_vcpu *vcpu) 654 - { 655 - return &vcpu_to_lbr_desc(vcpu)->records; 656 - } 657 - 658 - static inline bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu) 659 - { 660 - return !!vcpu_to_lbr_records(vcpu)->nr; 661 663 } 662 664 663 665 void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);