Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2023, Tencent, Inc.
4 */
5#ifndef SELFTEST_KVM_PMU_H
6#define SELFTEST_KVM_PMU_H
7
8#include <stdint.h>
9
10#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
11
12/*
13 * Encode an eventsel+umask pair into event-select MSR format. Note, this is
14 * technically AMD's format, as Intel's format only supports 8 bits for the
15 * event selector, i.e. doesn't use bits 24:16 for the selector. But, OR-ing
16 * in '0' is a nop and won't clobber the CMASK.
17 */
18#define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) | \
19 ((eventsel) & 0xff) | \
20 ((umask) & 0xff) << 8)
21
22/*
23 * These are technically Intel's definitions, but except for CMASK (see above),
24 * AMD's layout is compatible with Intel's.
25 */
26#define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0)
27#define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8)
28#define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16)
29#define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17)
30#define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18)
31#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19)
32#define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20)
33#define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21)
34#define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22)
35#define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23)
36#define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24)
37
38/* RDPMC control flags, Intel only. */
39#define INTEL_RDPMC_METRICS BIT_ULL(29)
40#define INTEL_RDPMC_FIXED BIT_ULL(30)
41#define INTEL_RDPMC_FAST BIT_ULL(31)
42
43/* Fixed PMC controls, Intel only. */
44#define FIXED_PMC_GLOBAL_CTRL_ENABLE(_idx) BIT_ULL((32 + (_idx)))
45
46#define FIXED_PMC_KERNEL BIT_ULL(0)
47#define FIXED_PMC_USER BIT_ULL(1)
48#define FIXED_PMC_ANYTHREAD BIT_ULL(2)
49#define FIXED_PMC_ENABLE_PMI BIT_ULL(3)
50#define FIXED_PMC_NR_BITS 4
51#define FIXED_PMC_CTRL(_idx, _val) ((_val) << ((_idx) * FIXED_PMC_NR_BITS))
52
53#define PMU_CAP_FW_WRITES BIT_ULL(13)
54#define PMU_CAP_LBR_FMT 0x3f
55
56#define INTEL_ARCH_CPU_CYCLES RAW_EVENT(0x3c, 0x00)
57#define INTEL_ARCH_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
58#define INTEL_ARCH_REFERENCE_CYCLES RAW_EVENT(0x3c, 0x01)
59#define INTEL_ARCH_LLC_REFERENCES RAW_EVENT(0x2e, 0x4f)
60#define INTEL_ARCH_LLC_MISSES RAW_EVENT(0x2e, 0x41)
61#define INTEL_ARCH_BRANCHES_RETIRED RAW_EVENT(0xc4, 0x00)
62#define INTEL_ARCH_BRANCHES_MISPREDICTED RAW_EVENT(0xc5, 0x00)
63#define INTEL_ARCH_TOPDOWN_SLOTS RAW_EVENT(0xa4, 0x01)
64
65#define AMD_ZEN_CORE_CYCLES RAW_EVENT(0x76, 0x00)
66#define AMD_ZEN_INSTRUCTIONS_RETIRED RAW_EVENT(0xc0, 0x00)
67#define AMD_ZEN_BRANCHES_RETIRED RAW_EVENT(0xc2, 0x00)
68#define AMD_ZEN_BRANCHES_MISPREDICTED RAW_EVENT(0xc3, 0x00)
69
70/*
71 * Note! The order and thus the index of the architectural events matters as
72 * support for each event is enumerated via CPUID using the index of the event.
73 */
74enum intel_pmu_architectural_events {
75 INTEL_ARCH_CPU_CYCLES_INDEX,
76 INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX,
77 INTEL_ARCH_REFERENCE_CYCLES_INDEX,
78 INTEL_ARCH_LLC_REFERENCES_INDEX,
79 INTEL_ARCH_LLC_MISSES_INDEX,
80 INTEL_ARCH_BRANCHES_RETIRED_INDEX,
81 INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
82 INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
83 NR_INTEL_ARCH_EVENTS,
84};
85
86enum amd_pmu_zen_events {
87 AMD_ZEN_CORE_CYCLES_INDEX,
88 AMD_ZEN_INSTRUCTIONS_INDEX,
89 AMD_ZEN_BRANCHES_INDEX,
90 AMD_ZEN_BRANCH_MISSES_INDEX,
91 NR_AMD_ZEN_EVENTS,
92};
93
94extern const uint64_t intel_pmu_arch_events[];
95extern const uint64_t amd_pmu_zen_events[];
96
97#endif /* SELFTEST_KVM_PMU_H */