Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: selftests: Validate more arch-events in pmu_counters_test

Add support for 5 new architectural events (4 topdown level 1 metrics
events and LBR inserts event) that will first show up in Intel's
Clearwater Forest CPUs. Detailed info about the new events can be found
in SDM section 21.2.7 "Pre-defined Architectural Performance Events".

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Yi Lai <yi1.lai@intel.com>
[sean: drop "unavailable_mask" changes]
Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20250919214648.1585683-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

Dapeng Mi and committed by
Sean Christopherson
2922b595 1fcd3053

+29 -1
+10
tools/testing/selftests/kvm/include/x86/pmu.h
··· 61 61 #define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00) 62 62 #define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00) 63 63 #define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01) 64 + #define INTEL_ARCH_TOPDOWN_BE_BOUND RAW_EVENT(0xa4, 0x02) 65 + #define INTEL_ARCH_TOPDOWN_BAD_SPEC RAW_EVENT(0x73, 0x00) 66 + #define INTEL_ARCH_TOPDOWN_FE_BOUND RAW_EVENT(0x9c, 0x01) 67 + #define INTEL_ARCH_TOPDOWN_RETIRING RAW_EVENT(0xc2, 0x02) 68 + #define INTEL_ARCH_LBR_INSERTS RAW_EVENT(0xe4, 0x01) 64 69 65 70 #define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00) 66 71 #define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00) ··· 85 80 INTEL_ARCH_BRANCHES_RETIRED_INDEX, 86 81 INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX, 87 82 INTEL_ARCH_TOPDOWN_SLOTS_INDEX, 83 + INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX, 84 + INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX, 85 + INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX, 86 + INTEL_ARCH_TOPDOWN_RETIRING_INDEX, 87 + INTEL_ARCH_LBR_INSERTS_INDEX, 88 88 NR_INTEL_ARCH_EVENTS, 89 89 }; 90 90
+6 -1
tools/testing/selftests/kvm/include/x86/processor.h
··· 265 265 #define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15) 266 266 #define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23) 267 267 #define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31) 268 - #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7) 268 + #define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 12) 269 269 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31) 270 270 #define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4) 271 271 #define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12) ··· 332 332 #define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED KVM_X86_PMU_FEATURE(EBX, 5) 333 333 #define X86_PMU_FEATURE_BRANCHES_MISPREDICTED KVM_X86_PMU_FEATURE(EBX, 6) 334 334 #define X86_PMU_FEATURE_TOPDOWN_SLOTS KVM_X86_PMU_FEATURE(EBX, 7) 335 + #define X86_PMU_FEATURE_TOPDOWN_BE_BOUND KVM_X86_PMU_FEATURE(EBX, 8) 336 + #define X86_PMU_FEATURE_TOPDOWN_BAD_SPEC KVM_X86_PMU_FEATURE(EBX, 9) 337 + #define X86_PMU_FEATURE_TOPDOWN_FE_BOUND KVM_X86_PMU_FEATURE(EBX, 10) 338 + #define X86_PMU_FEATURE_TOPDOWN_RETIRING KVM_X86_PMU_FEATURE(EBX, 11) 339 + #define X86_PMU_FEATURE_LBR_INSERTS KVM_X86_PMU_FEATURE(EBX, 12) 335 340 336 341 #define X86_PMU_FEATURE_INSNS_RETIRED_FIXED KVM_X86_PMU_FEATURE(ECX, 0) 337 342 #define X86_PMU_FEATURE_CPU_CYCLES_FIXED KVM_X86_PMU_FEATURE(ECX, 1)
+5
tools/testing/selftests/kvm/lib/x86/pmu.c
··· 19 19 INTEL_ARCH_BRANCHES_RETIRED, 20 20 INTEL_ARCH_BRANCHES_MISPREDICTED, 21 21 INTEL_ARCH_TOPDOWN_SLOTS, 22 + INTEL_ARCH_TOPDOWN_BE_BOUND, 23 + INTEL_ARCH_TOPDOWN_BAD_SPEC, 24 + INTEL_ARCH_TOPDOWN_FE_BOUND, 25 + INTEL_ARCH_TOPDOWN_RETIRING, 26 + INTEL_ARCH_LBR_INSERTS, 22 27 }; 23 28 kvm_static_assert(ARRAY_SIZE(intel_pmu_arch_events) == NR_INTEL_ARCH_EVENTS); 24 29
+8
tools/testing/selftests/kvm/x86/pmu_counters_test.c
··· 75 75 [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL }, 76 76 [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL }, 77 77 [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED }, 78 + [INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BE_BOUND, X86_PMU_FEATURE_NULL }, 79 + [INTEL_ARCH_TOPDOWN_BAD_SPEC_INDEX] = { X86_PMU_FEATURE_TOPDOWN_BAD_SPEC, X86_PMU_FEATURE_NULL }, 80 + [INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX] = { X86_PMU_FEATURE_TOPDOWN_FE_BOUND, X86_PMU_FEATURE_NULL }, 81 + [INTEL_ARCH_TOPDOWN_RETIRING_INDEX] = { X86_PMU_FEATURE_TOPDOWN_RETIRING, X86_PMU_FEATURE_NULL }, 82 + [INTEL_ARCH_LBR_INSERTS_INDEX] = { X86_PMU_FEATURE_LBR_INSERTS, X86_PMU_FEATURE_NULL }, 78 83 }; 79 84 80 85 kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS); ··· 176 171 fallthrough; 177 172 case INTEL_ARCH_CPU_CYCLES_INDEX: 178 173 case INTEL_ARCH_REFERENCE_CYCLES_INDEX: 174 + case INTEL_ARCH_TOPDOWN_BE_BOUND_INDEX: 175 + case INTEL_ARCH_TOPDOWN_FE_BOUND_INDEX: 179 176 GUEST_ASSERT_NE(count, 0); 180 177 break; 181 178 case INTEL_ARCH_TOPDOWN_SLOTS_INDEX: 179 + case INTEL_ARCH_TOPDOWN_RETIRING_INDEX: 182 180 __GUEST_ASSERT(count >= NUM_INSNS_RETIRED, 183 181 "Expected top-down slots >= %u, got count = %lu", 184 182 NUM_INSNS_RETIRED, count);