Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PERF_EVENT_H
3#define _ASM_X86_PERF_EVENT_H
4
5/*
6 * Performance event hw details:
7 */
8
9#define INTEL_PMC_MAX_GENERIC 32
10#define INTEL_PMC_MAX_FIXED 4
11#define INTEL_PMC_IDX_FIXED 32
12
13#define X86_PMC_IDX_MAX 64
14
15#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
17
18#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20
21#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
32
33#define HSW_IN_TX (1ULL << 32)
34#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
36#define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
37
38#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
39#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
40#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
41
42#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
43#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
44 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
45
46#define AMD64_EVENTSEL_EVENT \
47 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48#define INTEL_ARCH_EVENT_MASK \
49 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
50
51#define AMD64_L3_SLICE_SHIFT 48
52#define AMD64_L3_SLICE_MASK \
53 (0xFULL << AMD64_L3_SLICE_SHIFT)
54#define AMD64_L3_SLICEID_MASK \
55 (0x7ULL << AMD64_L3_SLICE_SHIFT)
56
57#define AMD64_L3_THREAD_SHIFT 56
58#define AMD64_L3_THREAD_MASK \
59 (0xFFULL << AMD64_L3_THREAD_SHIFT)
60#define AMD64_L3_F19H_THREAD_MASK \
61 (0x3ULL << AMD64_L3_THREAD_SHIFT)
62
63#define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
64#define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
65
66#define AMD64_L3_COREID_SHIFT 42
67#define AMD64_L3_COREID_MASK \
68 (0x7ULL << AMD64_L3_COREID_SHIFT)
69
70#define X86_RAW_EVENT_MASK \
71 (ARCH_PERFMON_EVENTSEL_EVENT | \
72 ARCH_PERFMON_EVENTSEL_UMASK | \
73 ARCH_PERFMON_EVENTSEL_EDGE | \
74 ARCH_PERFMON_EVENTSEL_INV | \
75 ARCH_PERFMON_EVENTSEL_CMASK)
76#define X86_ALL_EVENT_FLAGS \
77 (ARCH_PERFMON_EVENTSEL_EDGE | \
78 ARCH_PERFMON_EVENTSEL_INV | \
79 ARCH_PERFMON_EVENTSEL_CMASK | \
80 ARCH_PERFMON_EVENTSEL_ANY | \
81 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
82 HSW_IN_TX | \
83 HSW_IN_TX_CHECKPOINTED)
84#define AMD64_RAW_EVENT_MASK \
85 (X86_RAW_EVENT_MASK | \
86 AMD64_EVENTSEL_EVENT)
87#define AMD64_RAW_EVENT_MASK_NB \
88 (AMD64_EVENTSEL_EVENT | \
89 ARCH_PERFMON_EVENTSEL_UMASK)
90#define AMD64_NUM_COUNTERS 4
91#define AMD64_NUM_COUNTERS_CORE 6
92#define AMD64_NUM_COUNTERS_NB 4
93
94#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
95#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
96#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
97#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
98 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
99
100#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
101#define ARCH_PERFMON_EVENTS_COUNT 7
102
103#define PEBS_DATACFG_MEMINFO BIT_ULL(0)
104#define PEBS_DATACFG_GP BIT_ULL(1)
105#define PEBS_DATACFG_XMMS BIT_ULL(2)
106#define PEBS_DATACFG_LBRS BIT_ULL(3)
107#define PEBS_DATACFG_LBR_SHIFT 24
108
109/*
110 * Intel "Architectural Performance Monitoring" CPUID
111 * detection/enumeration details:
112 */
113union cpuid10_eax {
114 struct {
115 unsigned int version_id:8;
116 unsigned int num_counters:8;
117 unsigned int bit_width:8;
118 unsigned int mask_length:8;
119 } split;
120 unsigned int full;
121};
122
123union cpuid10_ebx {
124 struct {
125 unsigned int no_unhalted_core_cycles:1;
126 unsigned int no_instructions_retired:1;
127 unsigned int no_unhalted_reference_cycles:1;
128 unsigned int no_llc_reference:1;
129 unsigned int no_llc_misses:1;
130 unsigned int no_branch_instruction_retired:1;
131 unsigned int no_branch_misses_retired:1;
132 } split;
133 unsigned int full;
134};
135
136union cpuid10_edx {
137 struct {
138 unsigned int num_counters_fixed:5;
139 unsigned int bit_width_fixed:8;
140 unsigned int reserved:19;
141 } split;
142 unsigned int full;
143};
144
145/*
146 * Intel Architectural LBR CPUID detection/enumeration details:
147 */
148union cpuid28_eax {
149 struct {
150 /* Supported LBR depth values */
151 unsigned int lbr_depth_mask:8;
152 unsigned int reserved:22;
153 /* Deep C-state Reset */
154 unsigned int lbr_deep_c_reset:1;
155 /* IP values contain LIP */
156 unsigned int lbr_lip:1;
157 } split;
158 unsigned int full;
159};
160
161union cpuid28_ebx {
162 struct {
163 /* CPL Filtering Supported */
164 unsigned int lbr_cpl:1;
165 /* Branch Filtering Supported */
166 unsigned int lbr_filter:1;
167 /* Call-stack Mode Supported */
168 unsigned int lbr_call_stack:1;
169 } split;
170 unsigned int full;
171};
172
173union cpuid28_ecx {
174 struct {
175 /* Mispredict Bit Supported */
176 unsigned int lbr_mispred:1;
177 /* Timed LBRs Supported */
178 unsigned int lbr_timed_lbr:1;
179 /* Branch Type Field Supported */
180 unsigned int lbr_br_type:1;
181 } split;
182 unsigned int full;
183};
184
185struct x86_pmu_capability {
186 int version;
187 int num_counters_gp;
188 int num_counters_fixed;
189 int bit_width_gp;
190 int bit_width_fixed;
191 unsigned int events_mask;
192 int events_mask_len;
193};
194
195/*
196 * Fixed-purpose performance events:
197 */
198
199/*
200 * All 3 fixed-mode PMCs are configured via this single MSR:
201 */
202#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
203
204/*
205 * The counts are available in three separate MSRs:
206 */
207
208/* Instr_Retired.Any: */
209#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
210#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
211
212/* CPU_CLK_Unhalted.Core: */
213#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
214#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
215
216/* CPU_CLK_Unhalted.Ref: */
217#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
218#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
219#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
220
221/*
222 * We model BTS tracing as another fixed-mode PMC.
223 *
224 * We choose a value in the middle of the fixed event range, since lower
225 * values are used by actual fixed events and higher values are used
226 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
227 */
228#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
229
230#define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
231#define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(62)
232#define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
233#define GLOBAL_STATUS_ASIF BIT_ULL(60)
234#define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
235#define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
236#define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
237#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
238
239/*
240 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
241 *
242 * We choose bit 58 because it's used to indicate LBR stack frozen state
243 * for architectural perfmon v4, also we unconditionally mask that bit in
244 * the handle_pmi_common(), so it'll never be set in the overflow handling.
245 *
246 * With this fake counter assigned, the guest LBR event user (such as KVM),
247 * can program the LBR registers on its own, and we don't actually do anything
248 * with then in the host context.
249 */
250#define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
251
252/*
253 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
254 * since it would claim bit 58 which is effectively Fixed26.
255 */
256#define INTEL_FIXED_VLBR_EVENT 0x1b00
257
258/*
259 * Adaptive PEBS v4
260 */
261
262struct pebs_basic {
263 u64 format_size;
264 u64 ip;
265 u64 applicable_counters;
266 u64 tsc;
267};
268
269struct pebs_meminfo {
270 u64 address;
271 u64 aux;
272 u64 latency;
273 u64 tsx_tuning;
274};
275
276struct pebs_gprs {
277 u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
278 u64 r8, r9, r10, r11, r12, r13, r14, r15;
279};
280
281struct pebs_xmm {
282 u64 xmm[16*2]; /* two entries for each register */
283};
284
285/*
286 * IBS cpuid feature detection
287 */
288
289#define IBS_CPUID_FEATURES 0x8000001b
290
291/*
292 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
293 * bit 0 is used to indicate the existence of IBS.
294 */
295#define IBS_CAPS_AVAIL (1U<<0)
296#define IBS_CAPS_FETCHSAM (1U<<1)
297#define IBS_CAPS_OPSAM (1U<<2)
298#define IBS_CAPS_RDWROPCNT (1U<<3)
299#define IBS_CAPS_OPCNT (1U<<4)
300#define IBS_CAPS_BRNTRGT (1U<<5)
301#define IBS_CAPS_OPCNTEXT (1U<<6)
302#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
303#define IBS_CAPS_OPBRNFUSE (1U<<8)
304#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
305#define IBS_CAPS_OPDATA4 (1U<<10)
306
307#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
308 | IBS_CAPS_FETCHSAM \
309 | IBS_CAPS_OPSAM)
310
311/*
312 * IBS APIC setup
313 */
314#define IBSCTL 0x1cc
315#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
316#define IBSCTL_LVT_OFFSET_MASK 0x0F
317
318/* IBS fetch bits/masks */
319#define IBS_FETCH_RAND_EN (1ULL<<57)
320#define IBS_FETCH_VAL (1ULL<<49)
321#define IBS_FETCH_ENABLE (1ULL<<48)
322#define IBS_FETCH_CNT 0xFFFF0000ULL
323#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
324
325/*
326 * IBS op bits/masks
327 * The lower 7 bits of the current count are random bits
328 * preloaded by hardware and ignored in software
329 */
330#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
331#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
332#define IBS_OP_CNT_CTL (1ULL<<19)
333#define IBS_OP_VAL (1ULL<<18)
334#define IBS_OP_ENABLE (1ULL<<17)
335#define IBS_OP_MAX_CNT 0x0000FFFFULL
336#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
337#define IBS_RIP_INVALID (1ULL<<38)
338
339#ifdef CONFIG_X86_LOCAL_APIC
340extern u32 get_ibs_caps(void);
341#else
342static inline u32 get_ibs_caps(void) { return 0; }
343#endif
344
345#ifdef CONFIG_PERF_EVENTS
346extern void perf_events_lapic_init(void);
347
348/*
349 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
350 * unused and ABI specified to be 0, so nobody should care what we do with
351 * them.
352 *
353 * EXACT - the IP points to the exact instruction that triggered the
354 * event (HW bugs exempt).
355 * VM - original X86_VM_MASK; see set_linear_ip().
356 */
357#define PERF_EFLAGS_EXACT (1UL << 3)
358#define PERF_EFLAGS_VM (1UL << 5)
359
360struct pt_regs;
361struct x86_perf_regs {
362 struct pt_regs regs;
363 u64 *xmm_regs;
364};
365
366extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
367extern unsigned long perf_misc_flags(struct pt_regs *regs);
368#define perf_misc_flags(regs) perf_misc_flags(regs)
369
370#include <asm/stacktrace.h>
371
372/*
373 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
374 * and the comment with PERF_EFLAGS_EXACT.
375 */
376#define perf_arch_fetch_caller_regs(regs, __ip) { \
377 (regs)->ip = (__ip); \
378 (regs)->sp = (unsigned long)__builtin_frame_address(0); \
379 (regs)->cs = __KERNEL_CS; \
380 regs->flags = 0; \
381}
382
383struct perf_guest_switch_msr {
384 unsigned msr;
385 u64 host, guest;
386};
387
388struct x86_pmu_lbr {
389 unsigned int nr;
390 unsigned int from;
391 unsigned int to;
392 unsigned int info;
393};
394
395extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
396extern void perf_check_microcode(void);
397extern int x86_perf_rdpmc_index(struct perf_event *event);
398#else
399static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
400{
401 memset(cap, 0, sizeof(*cap));
402}
403
404static inline void perf_events_lapic_init(void) { }
405static inline void perf_check_microcode(void) { }
406#endif
407
408#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
409extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
410extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
411#else
412static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
413{
414 *nr = 0;
415 return NULL;
416}
417static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
418{
419 return -1;
420}
421#endif
422
423#ifdef CONFIG_CPU_SUP_INTEL
424 extern void intel_pt_handle_vmx(int on);
425#else
426static inline void intel_pt_handle_vmx(int on)
427{
428
429}
430#endif
431
432#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
433 extern void amd_pmu_enable_virt(void);
434 extern void amd_pmu_disable_virt(void);
435#else
436 static inline void amd_pmu_enable_virt(void) { }
437 static inline void amd_pmu_disable_virt(void) { }
438#endif
439
440#define arch_perf_out_copy_user copy_from_user_nmi
441
442#endif /* _ASM_X86_PERF_EVENT_H */