Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Per core/cpu state
4 *
5 * Used to coordinate shared registers between HT threads or
6 * among events on a single PMU.
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/nmi.h>
17#include <linux/kvm_host.h>
18
19#include <asm/cpufeature.h>
20#include <asm/hardirq.h>
21#include <asm/intel-family.h>
22#include <asm/intel_pt.h>
23#include <asm/apic.h>
24#include <asm/cpu_device_id.h>
25
26#include "../perf_event.h"
27
28/*
29 * Intel PerfMon, used on Core and later.
30 */
31static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
32{
33 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
34 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
35 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
36 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
37 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
38 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
39 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
40 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
41};
42
43static struct event_constraint intel_core_event_constraints[] __read_mostly =
44{
45 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
46 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
47 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
48 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
49 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
51 EVENT_CONSTRAINT_END
52};
53
54static struct event_constraint intel_core2_event_constraints[] __read_mostly =
55{
56 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
57 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
58 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
59 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
60 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
61 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
62 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
63 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
64 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
65 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
66 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
67 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
68 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
69 EVENT_CONSTRAINT_END
70};
71
72static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
73{
74 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
75 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
76 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
77 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
78 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
79 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
80 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
81 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
82 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
83 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
84 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
85 EVENT_CONSTRAINT_END
86};
87
88static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
89{
90 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
91 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
92 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
93 EVENT_EXTRA_END
94};
95
96static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
97{
98 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
99 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
100 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
101 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
102 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
103 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
104 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
105 EVENT_CONSTRAINT_END
106};
107
108static struct event_constraint intel_snb_event_constraints[] __read_mostly =
109{
110 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
111 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
112 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
113 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
114 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
115 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
116 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
117 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
118 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
119 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
120 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
121 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
122
123 /*
124 * When HT is off these events can only run on the bottom 4 counters
125 * When HT is on, they are impacted by the HT bug and require EXCL access
126 */
127 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
128 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
129 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
130 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
131
132 EVENT_CONSTRAINT_END
133};
134
135static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
136{
137 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
138 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
139 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
140 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
141 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
142 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
143 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
144 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
145 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
146 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
147 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
148 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
149 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
150
151 /*
152 * When HT is off these events can only run on the bottom 4 counters
153 * When HT is on, they are impacted by the HT bug and require EXCL access
154 */
155 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
156 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
157 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
158 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
159
160 EVENT_CONSTRAINT_END
161};
162
163static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
164{
165 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
166 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
167 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
169 EVENT_EXTRA_END
170};
171
172static struct event_constraint intel_v1_event_constraints[] __read_mostly =
173{
174 EVENT_CONSTRAINT_END
175};
176
177static struct event_constraint intel_gen_event_constraints[] __read_mostly =
178{
179 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
180 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
181 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
182 EVENT_CONSTRAINT_END
183};
184
185static struct event_constraint intel_v5_gen_event_constraints[] __read_mostly =
186{
187 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
188 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
189 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
190 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
191 FIXED_EVENT_CONSTRAINT(0x0500, 4),
192 FIXED_EVENT_CONSTRAINT(0x0600, 5),
193 FIXED_EVENT_CONSTRAINT(0x0700, 6),
194 FIXED_EVENT_CONSTRAINT(0x0800, 7),
195 FIXED_EVENT_CONSTRAINT(0x0900, 8),
196 FIXED_EVENT_CONSTRAINT(0x0a00, 9),
197 FIXED_EVENT_CONSTRAINT(0x0b00, 10),
198 FIXED_EVENT_CONSTRAINT(0x0c00, 11),
199 FIXED_EVENT_CONSTRAINT(0x0d00, 12),
200 FIXED_EVENT_CONSTRAINT(0x0e00, 13),
201 FIXED_EVENT_CONSTRAINT(0x0f00, 14),
202 FIXED_EVENT_CONSTRAINT(0x1000, 15),
203 EVENT_CONSTRAINT_END
204};
205
206static struct event_constraint intel_slm_event_constraints[] __read_mostly =
207{
208 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
209 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
210 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
211 EVENT_CONSTRAINT_END
212};
213
214static struct event_constraint intel_skl_event_constraints[] = {
215 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
216 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
217 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
218 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
219
220 /*
221 * when HT is off, these can only run on the bottom 4 counters
222 */
223 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
224 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
225 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
226 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
227 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
228
229 EVENT_CONSTRAINT_END
230};
231
232static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
235 EVENT_EXTRA_END
236};
237
238static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
239 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
240 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
241 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
242 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
243 EVENT_EXTRA_END
244};
245
246static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
247 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
248 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
249 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
250 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
251 EVENT_EXTRA_END
252};
253
254static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
255 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
256 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
257 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
258 /*
259 * Note the low 8 bits eventsel code is not a continuous field, containing
260 * some #GPing bits. These are masked out.
261 */
262 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
263 EVENT_EXTRA_END
264};
265
266static struct event_constraint intel_icl_event_constraints[] = {
267 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
268 FIXED_EVENT_CONSTRAINT(0x01c0, 0), /* old INST_RETIRED.PREC_DIST */
269 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
270 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
271 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
272 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
273 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
274 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
275 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
276 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
277 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
278 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
279 INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
280 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
281 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
282 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
283 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
284 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
285 INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
286 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
287 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
288 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
289 INTEL_EVENT_CONSTRAINT(0xef, 0xf),
290 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
291 EVENT_CONSTRAINT_END
292};
293
294static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
295 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
296 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
297 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
298 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
299 EVENT_EXTRA_END
300};
301
302static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
303 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
304 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
305 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
306 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
307 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
308 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
309 EVENT_EXTRA_END
310};
311
312static struct event_constraint intel_spr_event_constraints[] = {
313 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
314 FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
315 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
316 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
317 FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
318 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
319 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
320 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
321 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
322 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
323 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
324 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
325 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
326
327 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
328 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
329 /*
330 * Generally event codes < 0x90 are restricted to counters 0-3.
331 * The 0x2E and 0x3C are exception, which has no restriction.
332 */
333 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
334
335 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
336 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
337 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
338 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
339 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
340 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
341 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
342 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
343 /*
344 * Generally event codes >= 0x90 are likely to have no restrictions.
345 * The exception are defined as above.
346 */
347 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
348
349 EVENT_CONSTRAINT_END
350};
351
352static struct extra_reg intel_gnr_extra_regs[] __read_mostly = {
353 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
354 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
355 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
356 INTEL_UEVENT_EXTRA_REG(0x02c6, MSR_PEBS_FRONTEND, 0x9, FE),
357 INTEL_UEVENT_EXTRA_REG(0x03c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
358 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
359 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
360 EVENT_EXTRA_END
361};
362
363EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
364EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
365EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
366
367static struct attribute *nhm_mem_events_attrs[] = {
368 EVENT_PTR(mem_ld_nhm),
369 NULL,
370};
371
372/*
373 * topdown events for Intel Core CPUs.
374 *
375 * The events are all in slots, which is a free slot in a 4 wide
376 * pipeline. Some events are already reported in slots, for cycle
377 * events we multiply by the pipeline width (4).
378 *
379 * With Hyper Threading on, topdown metrics are either summed or averaged
380 * between the threads of a core: (count_t0 + count_t1).
381 *
382 * For the average case the metric is always scaled to pipeline width,
383 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
384 */
385
386EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
387 "event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
388 "event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
389EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
390EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
391 "event=0xe,umask=0x1"); /* uops_issued.any */
392EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
393 "event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
394EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
395 "event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
396EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
397 "event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
398 "event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
399EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
400 "4", "2");
401
402EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
403EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
404EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
405EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
406EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
407EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
408EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
409EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
410EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
411
412static struct attribute *snb_events_attrs[] = {
413 EVENT_PTR(td_slots_issued),
414 EVENT_PTR(td_slots_retired),
415 EVENT_PTR(td_fetch_bubbles),
416 EVENT_PTR(td_total_slots),
417 EVENT_PTR(td_total_slots_scale),
418 EVENT_PTR(td_recovery_bubbles),
419 EVENT_PTR(td_recovery_bubbles_scale),
420 NULL,
421};
422
423static struct attribute *snb_mem_events_attrs[] = {
424 EVENT_PTR(mem_ld_snb),
425 EVENT_PTR(mem_st_snb),
426 NULL,
427};
428
429static struct event_constraint intel_hsw_event_constraints[] = {
430 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
431 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
432 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
433 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
434 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
435 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
436 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
437 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
438 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
439 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
440 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
441 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
442
443 /*
444 * When HT is off these events can only run on the bottom 4 counters
445 * When HT is on, they are impacted by the HT bug and require EXCL access
446 */
447 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
448 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
449 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
450 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
451
452 EVENT_CONSTRAINT_END
453};
454
455static struct event_constraint intel_bdw_event_constraints[] = {
456 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
457 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
458 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
459 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
460 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
461 /*
462 * when HT is off, these can only run on the bottom 4 counters
463 */
464 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
465 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
466 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
467 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
468 EVENT_CONSTRAINT_END
469};
470
471static u64 intel_pmu_event_map(int hw_event)
472{
473 return intel_perfmon_event_map[hw_event];
474}
475
476static __initconst const u64 spr_hw_cache_event_ids
477 [PERF_COUNT_HW_CACHE_MAX]
478 [PERF_COUNT_HW_CACHE_OP_MAX]
479 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
480{
481 [ C(L1D ) ] = {
482 [ C(OP_READ) ] = {
483 [ C(RESULT_ACCESS) ] = 0x81d0,
484 [ C(RESULT_MISS) ] = 0xe124,
485 },
486 [ C(OP_WRITE) ] = {
487 [ C(RESULT_ACCESS) ] = 0x82d0,
488 },
489 },
490 [ C(L1I ) ] = {
491 [ C(OP_READ) ] = {
492 [ C(RESULT_MISS) ] = 0xe424,
493 },
494 [ C(OP_WRITE) ] = {
495 [ C(RESULT_ACCESS) ] = -1,
496 [ C(RESULT_MISS) ] = -1,
497 },
498 },
499 [ C(LL ) ] = {
500 [ C(OP_READ) ] = {
501 [ C(RESULT_ACCESS) ] = 0x12a,
502 [ C(RESULT_MISS) ] = 0x12a,
503 },
504 [ C(OP_WRITE) ] = {
505 [ C(RESULT_ACCESS) ] = 0x12a,
506 [ C(RESULT_MISS) ] = 0x12a,
507 },
508 },
509 [ C(DTLB) ] = {
510 [ C(OP_READ) ] = {
511 [ C(RESULT_ACCESS) ] = 0x81d0,
512 [ C(RESULT_MISS) ] = 0xe12,
513 },
514 [ C(OP_WRITE) ] = {
515 [ C(RESULT_ACCESS) ] = 0x82d0,
516 [ C(RESULT_MISS) ] = 0xe13,
517 },
518 },
519 [ C(ITLB) ] = {
520 [ C(OP_READ) ] = {
521 [ C(RESULT_ACCESS) ] = -1,
522 [ C(RESULT_MISS) ] = 0xe11,
523 },
524 [ C(OP_WRITE) ] = {
525 [ C(RESULT_ACCESS) ] = -1,
526 [ C(RESULT_MISS) ] = -1,
527 },
528 [ C(OP_PREFETCH) ] = {
529 [ C(RESULT_ACCESS) ] = -1,
530 [ C(RESULT_MISS) ] = -1,
531 },
532 },
533 [ C(BPU ) ] = {
534 [ C(OP_READ) ] = {
535 [ C(RESULT_ACCESS) ] = 0x4c4,
536 [ C(RESULT_MISS) ] = 0x4c5,
537 },
538 [ C(OP_WRITE) ] = {
539 [ C(RESULT_ACCESS) ] = -1,
540 [ C(RESULT_MISS) ] = -1,
541 },
542 [ C(OP_PREFETCH) ] = {
543 [ C(RESULT_ACCESS) ] = -1,
544 [ C(RESULT_MISS) ] = -1,
545 },
546 },
547 [ C(NODE) ] = {
548 [ C(OP_READ) ] = {
549 [ C(RESULT_ACCESS) ] = 0x12a,
550 [ C(RESULT_MISS) ] = 0x12a,
551 },
552 },
553};
554
555static __initconst const u64 spr_hw_cache_extra_regs
556 [PERF_COUNT_HW_CACHE_MAX]
557 [PERF_COUNT_HW_CACHE_OP_MAX]
558 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
559{
560 [ C(LL ) ] = {
561 [ C(OP_READ) ] = {
562 [ C(RESULT_ACCESS) ] = 0x10001,
563 [ C(RESULT_MISS) ] = 0x3fbfc00001,
564 },
565 [ C(OP_WRITE) ] = {
566 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
567 [ C(RESULT_MISS) ] = 0x3f3fc00002,
568 },
569 },
570 [ C(NODE) ] = {
571 [ C(OP_READ) ] = {
572 [ C(RESULT_ACCESS) ] = 0x10c000001,
573 [ C(RESULT_MISS) ] = 0x3fb3000001,
574 },
575 },
576};
577
578/*
579 * Notes on the events:
580 * - data reads do not include code reads (comparable to earlier tables)
581 * - data counts include speculative execution (except L1 write, dtlb, bpu)
582 * - remote node access includes remote memory, remote cache, remote mmio.
583 * - prefetches are not included in the counts.
584 * - icache miss does not include decoded icache
585 */
586
587#define SKL_DEMAND_DATA_RD BIT_ULL(0)
588#define SKL_DEMAND_RFO BIT_ULL(1)
589#define SKL_ANY_RESPONSE BIT_ULL(16)
590#define SKL_SUPPLIER_NONE BIT_ULL(17)
591#define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
592#define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
593#define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
594#define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
595#define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
596 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
597 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
598 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
599#define SKL_SPL_HIT BIT_ULL(30)
600#define SKL_SNOOP_NONE BIT_ULL(31)
601#define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
602#define SKL_SNOOP_MISS BIT_ULL(33)
603#define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
604#define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
605#define SKL_SNOOP_HITM BIT_ULL(36)
606#define SKL_SNOOP_NON_DRAM BIT_ULL(37)
607#define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
608 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
609 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
610 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
611#define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
612#define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
613 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
614 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
615 SKL_SNOOP_HITM|SKL_SPL_HIT)
616#define SKL_DEMAND_WRITE SKL_DEMAND_RFO
617#define SKL_LLC_ACCESS SKL_ANY_RESPONSE
618#define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
619 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
620 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
621
622static __initconst const u64 skl_hw_cache_event_ids
623 [PERF_COUNT_HW_CACHE_MAX]
624 [PERF_COUNT_HW_CACHE_OP_MAX]
625 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
626{
627 [ C(L1D ) ] = {
628 [ C(OP_READ) ] = {
629 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
630 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
631 },
632 [ C(OP_WRITE) ] = {
633 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
634 [ C(RESULT_MISS) ] = 0x0,
635 },
636 [ C(OP_PREFETCH) ] = {
637 [ C(RESULT_ACCESS) ] = 0x0,
638 [ C(RESULT_MISS) ] = 0x0,
639 },
640 },
641 [ C(L1I ) ] = {
642 [ C(OP_READ) ] = {
643 [ C(RESULT_ACCESS) ] = 0x0,
644 [ C(RESULT_MISS) ] = 0x283, /* ICACHE_64B.MISS */
645 },
646 [ C(OP_WRITE) ] = {
647 [ C(RESULT_ACCESS) ] = -1,
648 [ C(RESULT_MISS) ] = -1,
649 },
650 [ C(OP_PREFETCH) ] = {
651 [ C(RESULT_ACCESS) ] = 0x0,
652 [ C(RESULT_MISS) ] = 0x0,
653 },
654 },
655 [ C(LL ) ] = {
656 [ C(OP_READ) ] = {
657 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
658 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
659 },
660 [ C(OP_WRITE) ] = {
661 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
662 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
663 },
664 [ C(OP_PREFETCH) ] = {
665 [ C(RESULT_ACCESS) ] = 0x0,
666 [ C(RESULT_MISS) ] = 0x0,
667 },
668 },
669 [ C(DTLB) ] = {
670 [ C(OP_READ) ] = {
671 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
672 [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
673 },
674 [ C(OP_WRITE) ] = {
675 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
676 [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
677 },
678 [ C(OP_PREFETCH) ] = {
679 [ C(RESULT_ACCESS) ] = 0x0,
680 [ C(RESULT_MISS) ] = 0x0,
681 },
682 },
683 [ C(ITLB) ] = {
684 [ C(OP_READ) ] = {
685 [ C(RESULT_ACCESS) ] = 0x2085, /* ITLB_MISSES.STLB_HIT */
686 [ C(RESULT_MISS) ] = 0xe85, /* ITLB_MISSES.WALK_COMPLETED */
687 },
688 [ C(OP_WRITE) ] = {
689 [ C(RESULT_ACCESS) ] = -1,
690 [ C(RESULT_MISS) ] = -1,
691 },
692 [ C(OP_PREFETCH) ] = {
693 [ C(RESULT_ACCESS) ] = -1,
694 [ C(RESULT_MISS) ] = -1,
695 },
696 },
697 [ C(BPU ) ] = {
698 [ C(OP_READ) ] = {
699 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
700 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
701 },
702 [ C(OP_WRITE) ] = {
703 [ C(RESULT_ACCESS) ] = -1,
704 [ C(RESULT_MISS) ] = -1,
705 },
706 [ C(OP_PREFETCH) ] = {
707 [ C(RESULT_ACCESS) ] = -1,
708 [ C(RESULT_MISS) ] = -1,
709 },
710 },
711 [ C(NODE) ] = {
712 [ C(OP_READ) ] = {
713 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
714 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
715 },
716 [ C(OP_WRITE) ] = {
717 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
718 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
719 },
720 [ C(OP_PREFETCH) ] = {
721 [ C(RESULT_ACCESS) ] = 0x0,
722 [ C(RESULT_MISS) ] = 0x0,
723 },
724 },
725};
726
727static __initconst const u64 skl_hw_cache_extra_regs
728 [PERF_COUNT_HW_CACHE_MAX]
729 [PERF_COUNT_HW_CACHE_OP_MAX]
730 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
731{
732 [ C(LL ) ] = {
733 [ C(OP_READ) ] = {
734 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
735 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
736 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
737 SKL_L3_MISS|SKL_ANY_SNOOP|
738 SKL_SUPPLIER_NONE,
739 },
740 [ C(OP_WRITE) ] = {
741 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
742 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
743 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
744 SKL_L3_MISS|SKL_ANY_SNOOP|
745 SKL_SUPPLIER_NONE,
746 },
747 [ C(OP_PREFETCH) ] = {
748 [ C(RESULT_ACCESS) ] = 0x0,
749 [ C(RESULT_MISS) ] = 0x0,
750 },
751 },
752 [ C(NODE) ] = {
753 [ C(OP_READ) ] = {
754 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
755 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
756 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
757 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
758 },
759 [ C(OP_WRITE) ] = {
760 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
761 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
762 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
763 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
764 },
765 [ C(OP_PREFETCH) ] = {
766 [ C(RESULT_ACCESS) ] = 0x0,
767 [ C(RESULT_MISS) ] = 0x0,
768 },
769 },
770};
771
772#define SNB_DMND_DATA_RD (1ULL << 0)
773#define SNB_DMND_RFO (1ULL << 1)
774#define SNB_DMND_IFETCH (1ULL << 2)
775#define SNB_DMND_WB (1ULL << 3)
776#define SNB_PF_DATA_RD (1ULL << 4)
777#define SNB_PF_RFO (1ULL << 5)
778#define SNB_PF_IFETCH (1ULL << 6)
779#define SNB_LLC_DATA_RD (1ULL << 7)
780#define SNB_LLC_RFO (1ULL << 8)
781#define SNB_LLC_IFETCH (1ULL << 9)
782#define SNB_BUS_LOCKS (1ULL << 10)
783#define SNB_STRM_ST (1ULL << 11)
784#define SNB_OTHER (1ULL << 15)
785#define SNB_RESP_ANY (1ULL << 16)
786#define SNB_NO_SUPP (1ULL << 17)
787#define SNB_LLC_HITM (1ULL << 18)
788#define SNB_LLC_HITE (1ULL << 19)
789#define SNB_LLC_HITS (1ULL << 20)
790#define SNB_LLC_HITF (1ULL << 21)
791#define SNB_LOCAL (1ULL << 22)
792#define SNB_REMOTE (0xffULL << 23)
793#define SNB_SNP_NONE (1ULL << 31)
794#define SNB_SNP_NOT_NEEDED (1ULL << 32)
795#define SNB_SNP_MISS (1ULL << 33)
796#define SNB_NO_FWD (1ULL << 34)
797#define SNB_SNP_FWD (1ULL << 35)
798#define SNB_HITM (1ULL << 36)
799#define SNB_NON_DRAM (1ULL << 37)
800
801#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
802#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
803#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
804
805#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
806 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
807 SNB_HITM)
808
809#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
810#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
811
812#define SNB_L3_ACCESS SNB_RESP_ANY
813#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
814
815static __initconst const u64 snb_hw_cache_extra_regs
816 [PERF_COUNT_HW_CACHE_MAX]
817 [PERF_COUNT_HW_CACHE_OP_MAX]
818 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
819{
820 [ C(LL ) ] = {
821 [ C(OP_READ) ] = {
822 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
823 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
824 },
825 [ C(OP_WRITE) ] = {
826 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
827 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
828 },
829 [ C(OP_PREFETCH) ] = {
830 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
831 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
832 },
833 },
834 [ C(NODE) ] = {
835 [ C(OP_READ) ] = {
836 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
837 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
838 },
839 [ C(OP_WRITE) ] = {
840 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
841 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
842 },
843 [ C(OP_PREFETCH) ] = {
844 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
845 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
846 },
847 },
848};
849
850static __initconst const u64 snb_hw_cache_event_ids
851 [PERF_COUNT_HW_CACHE_MAX]
852 [PERF_COUNT_HW_CACHE_OP_MAX]
853 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
854{
855 [ C(L1D) ] = {
856 [ C(OP_READ) ] = {
857 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
858 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
859 },
860 [ C(OP_WRITE) ] = {
861 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
862 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
863 },
864 [ C(OP_PREFETCH) ] = {
865 [ C(RESULT_ACCESS) ] = 0x0,
866 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
867 },
868 },
869 [ C(L1I ) ] = {
870 [ C(OP_READ) ] = {
871 [ C(RESULT_ACCESS) ] = 0x0,
872 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
873 },
874 [ C(OP_WRITE) ] = {
875 [ C(RESULT_ACCESS) ] = -1,
876 [ C(RESULT_MISS) ] = -1,
877 },
878 [ C(OP_PREFETCH) ] = {
879 [ C(RESULT_ACCESS) ] = 0x0,
880 [ C(RESULT_MISS) ] = 0x0,
881 },
882 },
883 [ C(LL ) ] = {
884 [ C(OP_READ) ] = {
885 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
886 [ C(RESULT_ACCESS) ] = 0x01b7,
887 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
888 [ C(RESULT_MISS) ] = 0x01b7,
889 },
890 [ C(OP_WRITE) ] = {
891 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
892 [ C(RESULT_ACCESS) ] = 0x01b7,
893 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
894 [ C(RESULT_MISS) ] = 0x01b7,
895 },
896 [ C(OP_PREFETCH) ] = {
897 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
898 [ C(RESULT_ACCESS) ] = 0x01b7,
899 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
900 [ C(RESULT_MISS) ] = 0x01b7,
901 },
902 },
903 [ C(DTLB) ] = {
904 [ C(OP_READ) ] = {
905 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
906 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
907 },
908 [ C(OP_WRITE) ] = {
909 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
910 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
911 },
912 [ C(OP_PREFETCH) ] = {
913 [ C(RESULT_ACCESS) ] = 0x0,
914 [ C(RESULT_MISS) ] = 0x0,
915 },
916 },
917 [ C(ITLB) ] = {
918 [ C(OP_READ) ] = {
919 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
920 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
921 },
922 [ C(OP_WRITE) ] = {
923 [ C(RESULT_ACCESS) ] = -1,
924 [ C(RESULT_MISS) ] = -1,
925 },
926 [ C(OP_PREFETCH) ] = {
927 [ C(RESULT_ACCESS) ] = -1,
928 [ C(RESULT_MISS) ] = -1,
929 },
930 },
931 [ C(BPU ) ] = {
932 [ C(OP_READ) ] = {
933 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
934 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
935 },
936 [ C(OP_WRITE) ] = {
937 [ C(RESULT_ACCESS) ] = -1,
938 [ C(RESULT_MISS) ] = -1,
939 },
940 [ C(OP_PREFETCH) ] = {
941 [ C(RESULT_ACCESS) ] = -1,
942 [ C(RESULT_MISS) ] = -1,
943 },
944 },
945 [ C(NODE) ] = {
946 [ C(OP_READ) ] = {
947 [ C(RESULT_ACCESS) ] = 0x01b7,
948 [ C(RESULT_MISS) ] = 0x01b7,
949 },
950 [ C(OP_WRITE) ] = {
951 [ C(RESULT_ACCESS) ] = 0x01b7,
952 [ C(RESULT_MISS) ] = 0x01b7,
953 },
954 [ C(OP_PREFETCH) ] = {
955 [ C(RESULT_ACCESS) ] = 0x01b7,
956 [ C(RESULT_MISS) ] = 0x01b7,
957 },
958 },
959
960};
961
962/*
963 * Notes on the events:
964 * - data reads do not include code reads (comparable to earlier tables)
965 * - data counts include speculative execution (except L1 write, dtlb, bpu)
966 * - remote node access includes remote memory, remote cache, remote mmio.
967 * - prefetches are not included in the counts because they are not
968 * reliably counted.
969 */
970
971#define HSW_DEMAND_DATA_RD BIT_ULL(0)
972#define HSW_DEMAND_RFO BIT_ULL(1)
973#define HSW_ANY_RESPONSE BIT_ULL(16)
974#define HSW_SUPPLIER_NONE BIT_ULL(17)
975#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
976#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
977#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
978#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
979#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
980 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
981 HSW_L3_MISS_REMOTE_HOP2P)
982#define HSW_SNOOP_NONE BIT_ULL(31)
983#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
984#define HSW_SNOOP_MISS BIT_ULL(33)
985#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
986#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
987#define HSW_SNOOP_HITM BIT_ULL(36)
988#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
989#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
990 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
991 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
992 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
993#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
994#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
995#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
996#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
997 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
998#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
999
1000#define BDW_L3_MISS_LOCAL BIT(26)
1001#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
1002 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
1003 HSW_L3_MISS_REMOTE_HOP2P)
1004
1005
1006static __initconst const u64 hsw_hw_cache_event_ids
1007 [PERF_COUNT_HW_CACHE_MAX]
1008 [PERF_COUNT_HW_CACHE_OP_MAX]
1009 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1010{
1011 [ C(L1D ) ] = {
1012 [ C(OP_READ) ] = {
1013 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1014 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
1015 },
1016 [ C(OP_WRITE) ] = {
1017 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1018 [ C(RESULT_MISS) ] = 0x0,
1019 },
1020 [ C(OP_PREFETCH) ] = {
1021 [ C(RESULT_ACCESS) ] = 0x0,
1022 [ C(RESULT_MISS) ] = 0x0,
1023 },
1024 },
1025 [ C(L1I ) ] = {
1026 [ C(OP_READ) ] = {
1027 [ C(RESULT_ACCESS) ] = 0x0,
1028 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
1029 },
1030 [ C(OP_WRITE) ] = {
1031 [ C(RESULT_ACCESS) ] = -1,
1032 [ C(RESULT_MISS) ] = -1,
1033 },
1034 [ C(OP_PREFETCH) ] = {
1035 [ C(RESULT_ACCESS) ] = 0x0,
1036 [ C(RESULT_MISS) ] = 0x0,
1037 },
1038 },
1039 [ C(LL ) ] = {
1040 [ C(OP_READ) ] = {
1041 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1042 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1043 },
1044 [ C(OP_WRITE) ] = {
1045 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1046 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1047 },
1048 [ C(OP_PREFETCH) ] = {
1049 [ C(RESULT_ACCESS) ] = 0x0,
1050 [ C(RESULT_MISS) ] = 0x0,
1051 },
1052 },
1053 [ C(DTLB) ] = {
1054 [ C(OP_READ) ] = {
1055 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1056 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1057 },
1058 [ C(OP_WRITE) ] = {
1059 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1060 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1061 },
1062 [ C(OP_PREFETCH) ] = {
1063 [ C(RESULT_ACCESS) ] = 0x0,
1064 [ C(RESULT_MISS) ] = 0x0,
1065 },
1066 },
1067 [ C(ITLB) ] = {
1068 [ C(OP_READ) ] = {
1069 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
1070 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1071 },
1072 [ C(OP_WRITE) ] = {
1073 [ C(RESULT_ACCESS) ] = -1,
1074 [ C(RESULT_MISS) ] = -1,
1075 },
1076 [ C(OP_PREFETCH) ] = {
1077 [ C(RESULT_ACCESS) ] = -1,
1078 [ C(RESULT_MISS) ] = -1,
1079 },
1080 },
1081 [ C(BPU ) ] = {
1082 [ C(OP_READ) ] = {
1083 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
1084 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1085 },
1086 [ C(OP_WRITE) ] = {
1087 [ C(RESULT_ACCESS) ] = -1,
1088 [ C(RESULT_MISS) ] = -1,
1089 },
1090 [ C(OP_PREFETCH) ] = {
1091 [ C(RESULT_ACCESS) ] = -1,
1092 [ C(RESULT_MISS) ] = -1,
1093 },
1094 },
1095 [ C(NODE) ] = {
1096 [ C(OP_READ) ] = {
1097 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1098 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1099 },
1100 [ C(OP_WRITE) ] = {
1101 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1102 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
1103 },
1104 [ C(OP_PREFETCH) ] = {
1105 [ C(RESULT_ACCESS) ] = 0x0,
1106 [ C(RESULT_MISS) ] = 0x0,
1107 },
1108 },
1109};
1110
1111static __initconst const u64 hsw_hw_cache_extra_regs
1112 [PERF_COUNT_HW_CACHE_MAX]
1113 [PERF_COUNT_HW_CACHE_OP_MAX]
1114 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1115{
1116 [ C(LL ) ] = {
1117 [ C(OP_READ) ] = {
1118 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1119 HSW_LLC_ACCESS,
1120 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1121 HSW_L3_MISS|HSW_ANY_SNOOP,
1122 },
1123 [ C(OP_WRITE) ] = {
1124 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1125 HSW_LLC_ACCESS,
1126 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1127 HSW_L3_MISS|HSW_ANY_SNOOP,
1128 },
1129 [ C(OP_PREFETCH) ] = {
1130 [ C(RESULT_ACCESS) ] = 0x0,
1131 [ C(RESULT_MISS) ] = 0x0,
1132 },
1133 },
1134 [ C(NODE) ] = {
1135 [ C(OP_READ) ] = {
1136 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1137 HSW_L3_MISS_LOCAL_DRAM|
1138 HSW_SNOOP_DRAM,
1139 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1140 HSW_L3_MISS_REMOTE|
1141 HSW_SNOOP_DRAM,
1142 },
1143 [ C(OP_WRITE) ] = {
1144 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1145 HSW_L3_MISS_LOCAL_DRAM|
1146 HSW_SNOOP_DRAM,
1147 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1148 HSW_L3_MISS_REMOTE|
1149 HSW_SNOOP_DRAM,
1150 },
1151 [ C(OP_PREFETCH) ] = {
1152 [ C(RESULT_ACCESS) ] = 0x0,
1153 [ C(RESULT_MISS) ] = 0x0,
1154 },
1155 },
1156};
1157
1158static __initconst const u64 westmere_hw_cache_event_ids
1159 [PERF_COUNT_HW_CACHE_MAX]
1160 [PERF_COUNT_HW_CACHE_OP_MAX]
1161 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1162{
1163 [ C(L1D) ] = {
1164 [ C(OP_READ) ] = {
1165 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1166 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1167 },
1168 [ C(OP_WRITE) ] = {
1169 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1170 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1171 },
1172 [ C(OP_PREFETCH) ] = {
1173 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1174 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1175 },
1176 },
1177 [ C(L1I ) ] = {
1178 [ C(OP_READ) ] = {
1179 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1180 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1181 },
1182 [ C(OP_WRITE) ] = {
1183 [ C(RESULT_ACCESS) ] = -1,
1184 [ C(RESULT_MISS) ] = -1,
1185 },
1186 [ C(OP_PREFETCH) ] = {
1187 [ C(RESULT_ACCESS) ] = 0x0,
1188 [ C(RESULT_MISS) ] = 0x0,
1189 },
1190 },
1191 [ C(LL ) ] = {
1192 [ C(OP_READ) ] = {
1193 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1194 [ C(RESULT_ACCESS) ] = 0x01b7,
1195 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1196 [ C(RESULT_MISS) ] = 0x01b7,
1197 },
1198 /*
1199 * Use RFO, not WRITEBACK, because a write miss would typically occur
1200 * on RFO.
1201 */
1202 [ C(OP_WRITE) ] = {
1203 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1204 [ C(RESULT_ACCESS) ] = 0x01b7,
1205 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1206 [ C(RESULT_MISS) ] = 0x01b7,
1207 },
1208 [ C(OP_PREFETCH) ] = {
1209 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1210 [ C(RESULT_ACCESS) ] = 0x01b7,
1211 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1212 [ C(RESULT_MISS) ] = 0x01b7,
1213 },
1214 },
1215 [ C(DTLB) ] = {
1216 [ C(OP_READ) ] = {
1217 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1218 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1219 },
1220 [ C(OP_WRITE) ] = {
1221 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1222 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1223 },
1224 [ C(OP_PREFETCH) ] = {
1225 [ C(RESULT_ACCESS) ] = 0x0,
1226 [ C(RESULT_MISS) ] = 0x0,
1227 },
1228 },
1229 [ C(ITLB) ] = {
1230 [ C(OP_READ) ] = {
1231 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1232 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
1233 },
1234 [ C(OP_WRITE) ] = {
1235 [ C(RESULT_ACCESS) ] = -1,
1236 [ C(RESULT_MISS) ] = -1,
1237 },
1238 [ C(OP_PREFETCH) ] = {
1239 [ C(RESULT_ACCESS) ] = -1,
1240 [ C(RESULT_MISS) ] = -1,
1241 },
1242 },
1243 [ C(BPU ) ] = {
1244 [ C(OP_READ) ] = {
1245 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1246 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1247 },
1248 [ C(OP_WRITE) ] = {
1249 [ C(RESULT_ACCESS) ] = -1,
1250 [ C(RESULT_MISS) ] = -1,
1251 },
1252 [ C(OP_PREFETCH) ] = {
1253 [ C(RESULT_ACCESS) ] = -1,
1254 [ C(RESULT_MISS) ] = -1,
1255 },
1256 },
1257 [ C(NODE) ] = {
1258 [ C(OP_READ) ] = {
1259 [ C(RESULT_ACCESS) ] = 0x01b7,
1260 [ C(RESULT_MISS) ] = 0x01b7,
1261 },
1262 [ C(OP_WRITE) ] = {
1263 [ C(RESULT_ACCESS) ] = 0x01b7,
1264 [ C(RESULT_MISS) ] = 0x01b7,
1265 },
1266 [ C(OP_PREFETCH) ] = {
1267 [ C(RESULT_ACCESS) ] = 0x01b7,
1268 [ C(RESULT_MISS) ] = 0x01b7,
1269 },
1270 },
1271};
1272
1273/*
1274 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1275 * See IA32 SDM Vol 3B 30.6.1.3
1276 */
1277
1278#define NHM_DMND_DATA_RD (1 << 0)
1279#define NHM_DMND_RFO (1 << 1)
1280#define NHM_DMND_IFETCH (1 << 2)
1281#define NHM_DMND_WB (1 << 3)
1282#define NHM_PF_DATA_RD (1 << 4)
1283#define NHM_PF_DATA_RFO (1 << 5)
1284#define NHM_PF_IFETCH (1 << 6)
1285#define NHM_OFFCORE_OTHER (1 << 7)
1286#define NHM_UNCORE_HIT (1 << 8)
1287#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1288#define NHM_OTHER_CORE_HITM (1 << 10)
1289 /* reserved */
1290#define NHM_REMOTE_CACHE_FWD (1 << 12)
1291#define NHM_REMOTE_DRAM (1 << 13)
1292#define NHM_LOCAL_DRAM (1 << 14)
1293#define NHM_NON_DRAM (1 << 15)
1294
1295#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1296#define NHM_REMOTE (NHM_REMOTE_DRAM)
1297
1298#define NHM_DMND_READ (NHM_DMND_DATA_RD)
1299#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1300#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1301
1302#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1303#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1304#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1305
1306static __initconst const u64 nehalem_hw_cache_extra_regs
1307 [PERF_COUNT_HW_CACHE_MAX]
1308 [PERF_COUNT_HW_CACHE_OP_MAX]
1309 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1310{
1311 [ C(LL ) ] = {
1312 [ C(OP_READ) ] = {
1313 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1314 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1315 },
1316 [ C(OP_WRITE) ] = {
1317 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1318 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1319 },
1320 [ C(OP_PREFETCH) ] = {
1321 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1322 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1323 },
1324 },
1325 [ C(NODE) ] = {
1326 [ C(OP_READ) ] = {
1327 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1328 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1329 },
1330 [ C(OP_WRITE) ] = {
1331 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1332 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1333 },
1334 [ C(OP_PREFETCH) ] = {
1335 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1336 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1337 },
1338 },
1339};
1340
1341static __initconst const u64 nehalem_hw_cache_event_ids
1342 [PERF_COUNT_HW_CACHE_MAX]
1343 [PERF_COUNT_HW_CACHE_OP_MAX]
1344 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1345{
1346 [ C(L1D) ] = {
1347 [ C(OP_READ) ] = {
1348 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
1349 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
1350 },
1351 [ C(OP_WRITE) ] = {
1352 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
1353 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
1354 },
1355 [ C(OP_PREFETCH) ] = {
1356 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
1357 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
1358 },
1359 },
1360 [ C(L1I ) ] = {
1361 [ C(OP_READ) ] = {
1362 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1363 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1364 },
1365 [ C(OP_WRITE) ] = {
1366 [ C(RESULT_ACCESS) ] = -1,
1367 [ C(RESULT_MISS) ] = -1,
1368 },
1369 [ C(OP_PREFETCH) ] = {
1370 [ C(RESULT_ACCESS) ] = 0x0,
1371 [ C(RESULT_MISS) ] = 0x0,
1372 },
1373 },
1374 [ C(LL ) ] = {
1375 [ C(OP_READ) ] = {
1376 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1377 [ C(RESULT_ACCESS) ] = 0x01b7,
1378 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1379 [ C(RESULT_MISS) ] = 0x01b7,
1380 },
1381 /*
1382 * Use RFO, not WRITEBACK, because a write miss would typically occur
1383 * on RFO.
1384 */
1385 [ C(OP_WRITE) ] = {
1386 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1387 [ C(RESULT_ACCESS) ] = 0x01b7,
1388 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1389 [ C(RESULT_MISS) ] = 0x01b7,
1390 },
1391 [ C(OP_PREFETCH) ] = {
1392 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1393 [ C(RESULT_ACCESS) ] = 0x01b7,
1394 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1395 [ C(RESULT_MISS) ] = 0x01b7,
1396 },
1397 },
1398 [ C(DTLB) ] = {
1399 [ C(OP_READ) ] = {
1400 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1401 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
1402 },
1403 [ C(OP_WRITE) ] = {
1404 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1405 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
1406 },
1407 [ C(OP_PREFETCH) ] = {
1408 [ C(RESULT_ACCESS) ] = 0x0,
1409 [ C(RESULT_MISS) ] = 0x0,
1410 },
1411 },
1412 [ C(ITLB) ] = {
1413 [ C(OP_READ) ] = {
1414 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
1415 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
1416 },
1417 [ C(OP_WRITE) ] = {
1418 [ C(RESULT_ACCESS) ] = -1,
1419 [ C(RESULT_MISS) ] = -1,
1420 },
1421 [ C(OP_PREFETCH) ] = {
1422 [ C(RESULT_ACCESS) ] = -1,
1423 [ C(RESULT_MISS) ] = -1,
1424 },
1425 },
1426 [ C(BPU ) ] = {
1427 [ C(OP_READ) ] = {
1428 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1429 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
1430 },
1431 [ C(OP_WRITE) ] = {
1432 [ C(RESULT_ACCESS) ] = -1,
1433 [ C(RESULT_MISS) ] = -1,
1434 },
1435 [ C(OP_PREFETCH) ] = {
1436 [ C(RESULT_ACCESS) ] = -1,
1437 [ C(RESULT_MISS) ] = -1,
1438 },
1439 },
1440 [ C(NODE) ] = {
1441 [ C(OP_READ) ] = {
1442 [ C(RESULT_ACCESS) ] = 0x01b7,
1443 [ C(RESULT_MISS) ] = 0x01b7,
1444 },
1445 [ C(OP_WRITE) ] = {
1446 [ C(RESULT_ACCESS) ] = 0x01b7,
1447 [ C(RESULT_MISS) ] = 0x01b7,
1448 },
1449 [ C(OP_PREFETCH) ] = {
1450 [ C(RESULT_ACCESS) ] = 0x01b7,
1451 [ C(RESULT_MISS) ] = 0x01b7,
1452 },
1453 },
1454};
1455
1456static __initconst const u64 core2_hw_cache_event_ids
1457 [PERF_COUNT_HW_CACHE_MAX]
1458 [PERF_COUNT_HW_CACHE_OP_MAX]
1459 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1460{
1461 [ C(L1D) ] = {
1462 [ C(OP_READ) ] = {
1463 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
1464 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
1465 },
1466 [ C(OP_WRITE) ] = {
1467 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
1468 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
1469 },
1470 [ C(OP_PREFETCH) ] = {
1471 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
1472 [ C(RESULT_MISS) ] = 0,
1473 },
1474 },
1475 [ C(L1I ) ] = {
1476 [ C(OP_READ) ] = {
1477 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
1478 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
1479 },
1480 [ C(OP_WRITE) ] = {
1481 [ C(RESULT_ACCESS) ] = -1,
1482 [ C(RESULT_MISS) ] = -1,
1483 },
1484 [ C(OP_PREFETCH) ] = {
1485 [ C(RESULT_ACCESS) ] = 0,
1486 [ C(RESULT_MISS) ] = 0,
1487 },
1488 },
1489 [ C(LL ) ] = {
1490 [ C(OP_READ) ] = {
1491 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1492 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1493 },
1494 [ C(OP_WRITE) ] = {
1495 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1496 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1497 },
1498 [ C(OP_PREFETCH) ] = {
1499 [ C(RESULT_ACCESS) ] = 0,
1500 [ C(RESULT_MISS) ] = 0,
1501 },
1502 },
1503 [ C(DTLB) ] = {
1504 [ C(OP_READ) ] = {
1505 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
1506 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
1507 },
1508 [ C(OP_WRITE) ] = {
1509 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
1510 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
1511 },
1512 [ C(OP_PREFETCH) ] = {
1513 [ C(RESULT_ACCESS) ] = 0,
1514 [ C(RESULT_MISS) ] = 0,
1515 },
1516 },
1517 [ C(ITLB) ] = {
1518 [ C(OP_READ) ] = {
1519 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1520 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
1521 },
1522 [ C(OP_WRITE) ] = {
1523 [ C(RESULT_ACCESS) ] = -1,
1524 [ C(RESULT_MISS) ] = -1,
1525 },
1526 [ C(OP_PREFETCH) ] = {
1527 [ C(RESULT_ACCESS) ] = -1,
1528 [ C(RESULT_MISS) ] = -1,
1529 },
1530 },
1531 [ C(BPU ) ] = {
1532 [ C(OP_READ) ] = {
1533 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1534 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1535 },
1536 [ C(OP_WRITE) ] = {
1537 [ C(RESULT_ACCESS) ] = -1,
1538 [ C(RESULT_MISS) ] = -1,
1539 },
1540 [ C(OP_PREFETCH) ] = {
1541 [ C(RESULT_ACCESS) ] = -1,
1542 [ C(RESULT_MISS) ] = -1,
1543 },
1544 },
1545};
1546
1547static __initconst const u64 atom_hw_cache_event_ids
1548 [PERF_COUNT_HW_CACHE_MAX]
1549 [PERF_COUNT_HW_CACHE_OP_MAX]
1550 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1551{
1552 [ C(L1D) ] = {
1553 [ C(OP_READ) ] = {
1554 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1555 [ C(RESULT_MISS) ] = 0,
1556 },
1557 [ C(OP_WRITE) ] = {
1558 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1559 [ C(RESULT_MISS) ] = 0,
1560 },
1561 [ C(OP_PREFETCH) ] = {
1562 [ C(RESULT_ACCESS) ] = 0x0,
1563 [ C(RESULT_MISS) ] = 0,
1564 },
1565 },
1566 [ C(L1I ) ] = {
1567 [ C(OP_READ) ] = {
1568 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1569 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1570 },
1571 [ C(OP_WRITE) ] = {
1572 [ C(RESULT_ACCESS) ] = -1,
1573 [ C(RESULT_MISS) ] = -1,
1574 },
1575 [ C(OP_PREFETCH) ] = {
1576 [ C(RESULT_ACCESS) ] = 0,
1577 [ C(RESULT_MISS) ] = 0,
1578 },
1579 },
1580 [ C(LL ) ] = {
1581 [ C(OP_READ) ] = {
1582 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1583 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1584 },
1585 [ C(OP_WRITE) ] = {
1586 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1587 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1588 },
1589 [ C(OP_PREFETCH) ] = {
1590 [ C(RESULT_ACCESS) ] = 0,
1591 [ C(RESULT_MISS) ] = 0,
1592 },
1593 },
1594 [ C(DTLB) ] = {
1595 [ C(OP_READ) ] = {
1596 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1597 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1598 },
1599 [ C(OP_WRITE) ] = {
1600 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1601 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1602 },
1603 [ C(OP_PREFETCH) ] = {
1604 [ C(RESULT_ACCESS) ] = 0,
1605 [ C(RESULT_MISS) ] = 0,
1606 },
1607 },
1608 [ C(ITLB) ] = {
1609 [ C(OP_READ) ] = {
1610 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1611 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1612 },
1613 [ C(OP_WRITE) ] = {
1614 [ C(RESULT_ACCESS) ] = -1,
1615 [ C(RESULT_MISS) ] = -1,
1616 },
1617 [ C(OP_PREFETCH) ] = {
1618 [ C(RESULT_ACCESS) ] = -1,
1619 [ C(RESULT_MISS) ] = -1,
1620 },
1621 },
1622 [ C(BPU ) ] = {
1623 [ C(OP_READ) ] = {
1624 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1625 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1626 },
1627 [ C(OP_WRITE) ] = {
1628 [ C(RESULT_ACCESS) ] = -1,
1629 [ C(RESULT_MISS) ] = -1,
1630 },
1631 [ C(OP_PREFETCH) ] = {
1632 [ C(RESULT_ACCESS) ] = -1,
1633 [ C(RESULT_MISS) ] = -1,
1634 },
1635 },
1636};
1637
1638EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1639EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1640/* no_alloc_cycles.not_delivered */
1641EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1642 "event=0xca,umask=0x50");
1643EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1644/* uops_retired.all */
1645EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1646 "event=0xc2,umask=0x10");
1647/* uops_retired.all */
1648EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1649 "event=0xc2,umask=0x10");
1650
1651static struct attribute *slm_events_attrs[] = {
1652 EVENT_PTR(td_total_slots_slm),
1653 EVENT_PTR(td_total_slots_scale_slm),
1654 EVENT_PTR(td_fetch_bubbles_slm),
1655 EVENT_PTR(td_fetch_bubbles_scale_slm),
1656 EVENT_PTR(td_slots_issued_slm),
1657 EVENT_PTR(td_slots_retired_slm),
1658 NULL
1659};
1660
1661static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1662{
1663 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1664 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1665 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1666 EVENT_EXTRA_END
1667};
1668
1669#define SLM_DMND_READ SNB_DMND_DATA_RD
1670#define SLM_DMND_WRITE SNB_DMND_RFO
1671#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1672
1673#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1674#define SLM_LLC_ACCESS SNB_RESP_ANY
1675#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1676
1677static __initconst const u64 slm_hw_cache_extra_regs
1678 [PERF_COUNT_HW_CACHE_MAX]
1679 [PERF_COUNT_HW_CACHE_OP_MAX]
1680 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1681{
1682 [ C(LL ) ] = {
1683 [ C(OP_READ) ] = {
1684 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1685 [ C(RESULT_MISS) ] = 0,
1686 },
1687 [ C(OP_WRITE) ] = {
1688 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1689 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1690 },
1691 [ C(OP_PREFETCH) ] = {
1692 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1693 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1694 },
1695 },
1696};
1697
1698static __initconst const u64 slm_hw_cache_event_ids
1699 [PERF_COUNT_HW_CACHE_MAX]
1700 [PERF_COUNT_HW_CACHE_OP_MAX]
1701 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1702{
1703 [ C(L1D) ] = {
1704 [ C(OP_READ) ] = {
1705 [ C(RESULT_ACCESS) ] = 0,
1706 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1707 },
1708 [ C(OP_WRITE) ] = {
1709 [ C(RESULT_ACCESS) ] = 0,
1710 [ C(RESULT_MISS) ] = 0,
1711 },
1712 [ C(OP_PREFETCH) ] = {
1713 [ C(RESULT_ACCESS) ] = 0,
1714 [ C(RESULT_MISS) ] = 0,
1715 },
1716 },
1717 [ C(L1I ) ] = {
1718 [ C(OP_READ) ] = {
1719 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1720 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1721 },
1722 [ C(OP_WRITE) ] = {
1723 [ C(RESULT_ACCESS) ] = -1,
1724 [ C(RESULT_MISS) ] = -1,
1725 },
1726 [ C(OP_PREFETCH) ] = {
1727 [ C(RESULT_ACCESS) ] = 0,
1728 [ C(RESULT_MISS) ] = 0,
1729 },
1730 },
1731 [ C(LL ) ] = {
1732 [ C(OP_READ) ] = {
1733 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1734 [ C(RESULT_ACCESS) ] = 0x01b7,
1735 [ C(RESULT_MISS) ] = 0,
1736 },
1737 [ C(OP_WRITE) ] = {
1738 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1739 [ C(RESULT_ACCESS) ] = 0x01b7,
1740 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1741 [ C(RESULT_MISS) ] = 0x01b7,
1742 },
1743 [ C(OP_PREFETCH) ] = {
1744 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1745 [ C(RESULT_ACCESS) ] = 0x01b7,
1746 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1747 [ C(RESULT_MISS) ] = 0x01b7,
1748 },
1749 },
1750 [ C(DTLB) ] = {
1751 [ C(OP_READ) ] = {
1752 [ C(RESULT_ACCESS) ] = 0,
1753 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1754 },
1755 [ C(OP_WRITE) ] = {
1756 [ C(RESULT_ACCESS) ] = 0,
1757 [ C(RESULT_MISS) ] = 0,
1758 },
1759 [ C(OP_PREFETCH) ] = {
1760 [ C(RESULT_ACCESS) ] = 0,
1761 [ C(RESULT_MISS) ] = 0,
1762 },
1763 },
1764 [ C(ITLB) ] = {
1765 [ C(OP_READ) ] = {
1766 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1767 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1768 },
1769 [ C(OP_WRITE) ] = {
1770 [ C(RESULT_ACCESS) ] = -1,
1771 [ C(RESULT_MISS) ] = -1,
1772 },
1773 [ C(OP_PREFETCH) ] = {
1774 [ C(RESULT_ACCESS) ] = -1,
1775 [ C(RESULT_MISS) ] = -1,
1776 },
1777 },
1778 [ C(BPU ) ] = {
1779 [ C(OP_READ) ] = {
1780 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1781 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1782 },
1783 [ C(OP_WRITE) ] = {
1784 [ C(RESULT_ACCESS) ] = -1,
1785 [ C(RESULT_MISS) ] = -1,
1786 },
1787 [ C(OP_PREFETCH) ] = {
1788 [ C(RESULT_ACCESS) ] = -1,
1789 [ C(RESULT_MISS) ] = -1,
1790 },
1791 },
1792};
1793
1794EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1795EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1796/* UOPS_NOT_DELIVERED.ANY */
1797EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1798/* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1799EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1800/* UOPS_RETIRED.ANY */
1801EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1802/* UOPS_ISSUED.ANY */
1803EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1804
1805static struct attribute *glm_events_attrs[] = {
1806 EVENT_PTR(td_total_slots_glm),
1807 EVENT_PTR(td_total_slots_scale_glm),
1808 EVENT_PTR(td_fetch_bubbles_glm),
1809 EVENT_PTR(td_recovery_bubbles_glm),
1810 EVENT_PTR(td_slots_issued_glm),
1811 EVENT_PTR(td_slots_retired_glm),
1812 NULL
1813};
1814
1815static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1816 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1817 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1818 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1819 EVENT_EXTRA_END
1820};
1821
1822#define GLM_DEMAND_DATA_RD BIT_ULL(0)
1823#define GLM_DEMAND_RFO BIT_ULL(1)
1824#define GLM_ANY_RESPONSE BIT_ULL(16)
1825#define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1826#define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1827#define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1828#define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1829#define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1830#define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1831#define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1832
1833static __initconst const u64 glm_hw_cache_event_ids
1834 [PERF_COUNT_HW_CACHE_MAX]
1835 [PERF_COUNT_HW_CACHE_OP_MAX]
1836 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1837 [C(L1D)] = {
1838 [C(OP_READ)] = {
1839 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1840 [C(RESULT_MISS)] = 0x0,
1841 },
1842 [C(OP_WRITE)] = {
1843 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1844 [C(RESULT_MISS)] = 0x0,
1845 },
1846 [C(OP_PREFETCH)] = {
1847 [C(RESULT_ACCESS)] = 0x0,
1848 [C(RESULT_MISS)] = 0x0,
1849 },
1850 },
1851 [C(L1I)] = {
1852 [C(OP_READ)] = {
1853 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1854 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1855 },
1856 [C(OP_WRITE)] = {
1857 [C(RESULT_ACCESS)] = -1,
1858 [C(RESULT_MISS)] = -1,
1859 },
1860 [C(OP_PREFETCH)] = {
1861 [C(RESULT_ACCESS)] = 0x0,
1862 [C(RESULT_MISS)] = 0x0,
1863 },
1864 },
1865 [C(LL)] = {
1866 [C(OP_READ)] = {
1867 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1868 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1869 },
1870 [C(OP_WRITE)] = {
1871 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1872 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1873 },
1874 [C(OP_PREFETCH)] = {
1875 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1876 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1877 },
1878 },
1879 [C(DTLB)] = {
1880 [C(OP_READ)] = {
1881 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1882 [C(RESULT_MISS)] = 0x0,
1883 },
1884 [C(OP_WRITE)] = {
1885 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1886 [C(RESULT_MISS)] = 0x0,
1887 },
1888 [C(OP_PREFETCH)] = {
1889 [C(RESULT_ACCESS)] = 0x0,
1890 [C(RESULT_MISS)] = 0x0,
1891 },
1892 },
1893 [C(ITLB)] = {
1894 [C(OP_READ)] = {
1895 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
1896 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
1897 },
1898 [C(OP_WRITE)] = {
1899 [C(RESULT_ACCESS)] = -1,
1900 [C(RESULT_MISS)] = -1,
1901 },
1902 [C(OP_PREFETCH)] = {
1903 [C(RESULT_ACCESS)] = -1,
1904 [C(RESULT_MISS)] = -1,
1905 },
1906 },
1907 [C(BPU)] = {
1908 [C(OP_READ)] = {
1909 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1910 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
1911 },
1912 [C(OP_WRITE)] = {
1913 [C(RESULT_ACCESS)] = -1,
1914 [C(RESULT_MISS)] = -1,
1915 },
1916 [C(OP_PREFETCH)] = {
1917 [C(RESULT_ACCESS)] = -1,
1918 [C(RESULT_MISS)] = -1,
1919 },
1920 },
1921};
1922
1923static __initconst const u64 glm_hw_cache_extra_regs
1924 [PERF_COUNT_HW_CACHE_MAX]
1925 [PERF_COUNT_HW_CACHE_OP_MAX]
1926 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1927 [C(LL)] = {
1928 [C(OP_READ)] = {
1929 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1930 GLM_LLC_ACCESS,
1931 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1932 GLM_LLC_MISS,
1933 },
1934 [C(OP_WRITE)] = {
1935 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1936 GLM_LLC_ACCESS,
1937 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1938 GLM_LLC_MISS,
1939 },
1940 [C(OP_PREFETCH)] = {
1941 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1942 GLM_LLC_ACCESS,
1943 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1944 GLM_LLC_MISS,
1945 },
1946 },
1947};
1948
1949static __initconst const u64 glp_hw_cache_event_ids
1950 [PERF_COUNT_HW_CACHE_MAX]
1951 [PERF_COUNT_HW_CACHE_OP_MAX]
1952 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1953 [C(L1D)] = {
1954 [C(OP_READ)] = {
1955 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1956 [C(RESULT_MISS)] = 0x0,
1957 },
1958 [C(OP_WRITE)] = {
1959 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
1960 [C(RESULT_MISS)] = 0x0,
1961 },
1962 [C(OP_PREFETCH)] = {
1963 [C(RESULT_ACCESS)] = 0x0,
1964 [C(RESULT_MISS)] = 0x0,
1965 },
1966 },
1967 [C(L1I)] = {
1968 [C(OP_READ)] = {
1969 [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
1970 [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
1971 },
1972 [C(OP_WRITE)] = {
1973 [C(RESULT_ACCESS)] = -1,
1974 [C(RESULT_MISS)] = -1,
1975 },
1976 [C(OP_PREFETCH)] = {
1977 [C(RESULT_ACCESS)] = 0x0,
1978 [C(RESULT_MISS)] = 0x0,
1979 },
1980 },
1981 [C(LL)] = {
1982 [C(OP_READ)] = {
1983 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1984 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1985 },
1986 [C(OP_WRITE)] = {
1987 [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
1988 [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
1989 },
1990 [C(OP_PREFETCH)] = {
1991 [C(RESULT_ACCESS)] = 0x0,
1992 [C(RESULT_MISS)] = 0x0,
1993 },
1994 },
1995 [C(DTLB)] = {
1996 [C(OP_READ)] = {
1997 [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
1998 [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1999 },
2000 [C(OP_WRITE)] = {
2001 [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
2002 [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
2003 },
2004 [C(OP_PREFETCH)] = {
2005 [C(RESULT_ACCESS)] = 0x0,
2006 [C(RESULT_MISS)] = 0x0,
2007 },
2008 },
2009 [C(ITLB)] = {
2010 [C(OP_READ)] = {
2011 [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
2012 [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
2013 },
2014 [C(OP_WRITE)] = {
2015 [C(RESULT_ACCESS)] = -1,
2016 [C(RESULT_MISS)] = -1,
2017 },
2018 [C(OP_PREFETCH)] = {
2019 [C(RESULT_ACCESS)] = -1,
2020 [C(RESULT_MISS)] = -1,
2021 },
2022 },
2023 [C(BPU)] = {
2024 [C(OP_READ)] = {
2025 [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
2026 [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
2027 },
2028 [C(OP_WRITE)] = {
2029 [C(RESULT_ACCESS)] = -1,
2030 [C(RESULT_MISS)] = -1,
2031 },
2032 [C(OP_PREFETCH)] = {
2033 [C(RESULT_ACCESS)] = -1,
2034 [C(RESULT_MISS)] = -1,
2035 },
2036 },
2037};
2038
2039static __initconst const u64 glp_hw_cache_extra_regs
2040 [PERF_COUNT_HW_CACHE_MAX]
2041 [PERF_COUNT_HW_CACHE_OP_MAX]
2042 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2043 [C(LL)] = {
2044 [C(OP_READ)] = {
2045 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2046 GLM_LLC_ACCESS,
2047 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2048 GLM_LLC_MISS,
2049 },
2050 [C(OP_WRITE)] = {
2051 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2052 GLM_LLC_ACCESS,
2053 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2054 GLM_LLC_MISS,
2055 },
2056 [C(OP_PREFETCH)] = {
2057 [C(RESULT_ACCESS)] = 0x0,
2058 [C(RESULT_MISS)] = 0x0,
2059 },
2060 },
2061};
2062
2063#define TNT_LOCAL_DRAM BIT_ULL(26)
2064#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2065#define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2066#define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2067#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2068 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2069#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2070
2071static __initconst const u64 tnt_hw_cache_extra_regs
2072 [PERF_COUNT_HW_CACHE_MAX]
2073 [PERF_COUNT_HW_CACHE_OP_MAX]
2074 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2075 [C(LL)] = {
2076 [C(OP_READ)] = {
2077 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2078 TNT_LLC_ACCESS,
2079 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2080 TNT_LLC_MISS,
2081 },
2082 [C(OP_WRITE)] = {
2083 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2084 TNT_LLC_ACCESS,
2085 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2086 TNT_LLC_MISS,
2087 },
2088 [C(OP_PREFETCH)] = {
2089 [C(RESULT_ACCESS)] = 0x0,
2090 [C(RESULT_MISS)] = 0x0,
2091 },
2092 },
2093};
2094
2095EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2096EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2097EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2098EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2099
2100static struct attribute *tnt_events_attrs[] = {
2101 EVENT_PTR(td_fe_bound_tnt),
2102 EVENT_PTR(td_retiring_tnt),
2103 EVENT_PTR(td_bad_spec_tnt),
2104 EVENT_PTR(td_be_bound_tnt),
2105 NULL,
2106};
2107
2108static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2109 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2110 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2111 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2112 EVENT_EXTRA_END
2113};
2114
2115EVENT_ATTR_STR(mem-loads, mem_ld_grt, "event=0xd0,umask=0x5,ldlat=3");
2116EVENT_ATTR_STR(mem-stores, mem_st_grt, "event=0xd0,umask=0x6");
2117
2118static struct attribute *grt_mem_attrs[] = {
2119 EVENT_PTR(mem_ld_grt),
2120 EVENT_PTR(mem_st_grt),
2121 NULL
2122};
2123
2124static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2125 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2126 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2127 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2128 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2129 EVENT_EXTRA_END
2130};
2131
2132static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
2133 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2134 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
2135 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff3ffffffffffull, RSP_1),
2136 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2137 INTEL_UEVENT_EXTRA_REG(0x0127, MSR_SNOOP_RSP_0, 0xffffffffffffffffull, SNOOP_0),
2138 INTEL_UEVENT_EXTRA_REG(0x0227, MSR_SNOOP_RSP_1, 0xffffffffffffffffull, SNOOP_1),
2139 EVENT_EXTRA_END
2140};
2141
2142#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
2143#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
2144#define KNL_MCDRAM_LOCAL BIT_ULL(21)
2145#define KNL_MCDRAM_FAR BIT_ULL(22)
2146#define KNL_DDR_LOCAL BIT_ULL(23)
2147#define KNL_DDR_FAR BIT_ULL(24)
2148#define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2149 KNL_DDR_LOCAL | KNL_DDR_FAR)
2150#define KNL_L2_READ SLM_DMND_READ
2151#define KNL_L2_WRITE SLM_DMND_WRITE
2152#define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2153#define KNL_L2_ACCESS SLM_LLC_ACCESS
2154#define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2155 KNL_DRAM_ANY | SNB_SNP_ANY | \
2156 SNB_NON_DRAM)
2157
2158static __initconst const u64 knl_hw_cache_extra_regs
2159 [PERF_COUNT_HW_CACHE_MAX]
2160 [PERF_COUNT_HW_CACHE_OP_MAX]
2161 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2162 [C(LL)] = {
2163 [C(OP_READ)] = {
2164 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2165 [C(RESULT_MISS)] = 0,
2166 },
2167 [C(OP_WRITE)] = {
2168 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2169 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2170 },
2171 [C(OP_PREFETCH)] = {
2172 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2173 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2174 },
2175 },
2176};
2177
2178/*
2179 * Used from PMIs where the LBRs are already disabled.
2180 *
2181 * This function could be called consecutively. It is required to remain in
2182 * disabled state if called consecutively.
2183 *
2184 * During consecutive calls, the same disable value will be written to related
2185 * registers, so the PMU state remains unchanged.
2186 *
2187 * intel_bts events don't coexist with intel PMU's BTS events because of
2188 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2189 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2190 *
2191 * Avoid PEBS_ENABLE MSR access in PMIs.
2192 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2193 * It doesn't matter if the PEBS is enabled or not.
2194 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2195 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2196 * However, there are some cases which may change PEBS status, e.g. PMI
2197 * throttle. The PEBS_ENABLE should be updated where the status changes.
2198 */
2199static __always_inline void __intel_pmu_disable_all(bool bts)
2200{
2201 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2202
2203 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2204
2205 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2206 intel_pmu_disable_bts();
2207}
2208
2209static __always_inline void intel_pmu_disable_all(void)
2210{
2211 __intel_pmu_disable_all(true);
2212 intel_pmu_pebs_disable_all();
2213 intel_pmu_lbr_disable_all();
2214}
2215
2216static void __intel_pmu_enable_all(int added, bool pmi)
2217{
2218 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2219 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2220
2221 intel_pmu_lbr_enable_all(pmi);
2222
2223 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) {
2224 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val);
2225 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val;
2226 }
2227
2228 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2229 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2230
2231 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2232 struct perf_event *event =
2233 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2234
2235 if (WARN_ON_ONCE(!event))
2236 return;
2237
2238 intel_pmu_enable_bts(event->hw.config);
2239 }
2240}
2241
2242static void intel_pmu_enable_all(int added)
2243{
2244 intel_pmu_pebs_enable_all();
2245 __intel_pmu_enable_all(added, false);
2246}
2247
2248static noinline int
2249__intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries,
2250 unsigned int cnt, unsigned long flags)
2251{
2252 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2253
2254 intel_pmu_lbr_read();
2255 cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr);
2256
2257 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt);
2258 intel_pmu_enable_all(0);
2259 local_irq_restore(flags);
2260 return cnt;
2261}
2262
2263static int
2264intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2265{
2266 unsigned long flags;
2267
2268 /* must not have branches... */
2269 local_irq_save(flags);
2270 __intel_pmu_disable_all(false); /* we don't care about BTS */
2271 __intel_pmu_lbr_disable();
2272 /* ... until here */
2273 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2274}
2275
2276static int
2277intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt)
2278{
2279 unsigned long flags;
2280
2281 /* must not have branches... */
2282 local_irq_save(flags);
2283 __intel_pmu_disable_all(false); /* we don't care about BTS */
2284 __intel_pmu_arch_lbr_disable();
2285 /* ... until here */
2286 return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
2287}
2288
2289/*
2290 * Workaround for:
2291 * Intel Errata AAK100 (model 26)
2292 * Intel Errata AAP53 (model 30)
2293 * Intel Errata BD53 (model 44)
2294 *
2295 * The official story:
2296 * These chips need to be 'reset' when adding counters by programming the
2297 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2298 * in sequence on the same PMC or on different PMCs.
2299 *
2300 * In practice it appears some of these events do in fact count, and
2301 * we need to program all 4 events.
2302 */
2303static void intel_pmu_nhm_workaround(void)
2304{
2305 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2306 static const unsigned long nhm_magic[4] = {
2307 0x4300B5,
2308 0x4300D2,
2309 0x4300B1,
2310 0x4300B1
2311 };
2312 struct perf_event *event;
2313 int i;
2314
2315 /*
2316 * The Errata requires below steps:
2317 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2318 * 2) Configure 4 PERFEVTSELx with the magic events and clear
2319 * the corresponding PMCx;
2320 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2321 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2322 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2323 */
2324
2325 /*
2326 * The real steps we choose are a little different from above.
2327 * A) To reduce MSR operations, we don't run step 1) as they
2328 * are already cleared before this function is called;
2329 * B) Call x86_perf_event_update to save PMCx before configuring
2330 * PERFEVTSELx with magic number;
2331 * C) With step 5), we do clear only when the PERFEVTSELx is
2332 * not used currently.
2333 * D) Call x86_perf_event_set_period to restore PMCx;
2334 */
2335
2336 /* We always operate 4 pairs of PERF Counters */
2337 for (i = 0; i < 4; i++) {
2338 event = cpuc->events[i];
2339 if (event)
2340 static_call(x86_pmu_update)(event);
2341 }
2342
2343 for (i = 0; i < 4; i++) {
2344 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2345 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2346 }
2347
2348 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2349 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2350
2351 for (i = 0; i < 4; i++) {
2352 event = cpuc->events[i];
2353
2354 if (event) {
2355 static_call(x86_pmu_set_period)(event);
2356 __x86_pmu_enable_event(&event->hw,
2357 ARCH_PERFMON_EVENTSEL_ENABLE);
2358 } else
2359 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2360 }
2361}
2362
2363static void intel_pmu_nhm_enable_all(int added)
2364{
2365 if (added)
2366 intel_pmu_nhm_workaround();
2367 intel_pmu_enable_all(added);
2368}
2369
2370static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2371{
2372 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2373
2374 if (cpuc->tfa_shadow != val) {
2375 cpuc->tfa_shadow = val;
2376 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2377 }
2378}
2379
2380static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2381{
2382 /*
2383 * We're going to use PMC3, make sure TFA is set before we touch it.
2384 */
2385 if (cntr == 3)
2386 intel_set_tfa(cpuc, true);
2387}
2388
2389static void intel_tfa_pmu_enable_all(int added)
2390{
2391 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2392
2393 /*
2394 * If we find PMC3 is no longer used when we enable the PMU, we can
2395 * clear TFA.
2396 */
2397 if (!test_bit(3, cpuc->active_mask))
2398 intel_set_tfa(cpuc, false);
2399
2400 intel_pmu_enable_all(added);
2401}
2402
2403static inline u64 intel_pmu_get_status(void)
2404{
2405 u64 status;
2406
2407 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2408
2409 return status;
2410}
2411
2412static inline void intel_pmu_ack_status(u64 ack)
2413{
2414 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2415}
2416
2417static inline bool event_is_checkpointed(struct perf_event *event)
2418{
2419 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2420}
2421
2422static inline void intel_set_masks(struct perf_event *event, int idx)
2423{
2424 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2425
2426 if (event->attr.exclude_host)
2427 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2428 if (event->attr.exclude_guest)
2429 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2430 if (event_is_checkpointed(event))
2431 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2432}
2433
2434static inline void intel_clear_masks(struct perf_event *event, int idx)
2435{
2436 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2437
2438 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2439 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2440 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2441}
2442
2443static void intel_pmu_disable_fixed(struct perf_event *event)
2444{
2445 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2446 struct hw_perf_event *hwc = &event->hw;
2447 int idx = hwc->idx;
2448 u64 mask;
2449
2450 if (is_topdown_idx(idx)) {
2451 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2452
2453 /*
2454 * When there are other active TopDown events,
2455 * don't disable the fixed counter 3.
2456 */
2457 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2458 return;
2459 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2460 }
2461
2462 intel_clear_masks(event, idx);
2463
2464 mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
2465 cpuc->fixed_ctrl_val &= ~mask;
2466}
2467
2468static void intel_pmu_disable_event(struct perf_event *event)
2469{
2470 struct hw_perf_event *hwc = &event->hw;
2471 int idx = hwc->idx;
2472
2473 switch (idx) {
2474 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2475 intel_clear_masks(event, idx);
2476 x86_pmu_disable_event(event);
2477 break;
2478 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2479 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2480 intel_pmu_disable_fixed(event);
2481 break;
2482 case INTEL_PMC_IDX_FIXED_BTS:
2483 intel_pmu_disable_bts();
2484 intel_pmu_drain_bts_buffer();
2485 return;
2486 case INTEL_PMC_IDX_FIXED_VLBR:
2487 intel_clear_masks(event, idx);
2488 break;
2489 default:
2490 intel_clear_masks(event, idx);
2491 pr_warn("Failed to disable the event with invalid index %d\n",
2492 idx);
2493 return;
2494 }
2495
2496 /*
2497 * Needs to be called after x86_pmu_disable_event,
2498 * so we don't trigger the event without PEBS bit set.
2499 */
2500 if (unlikely(event->attr.precise_ip))
2501 intel_pmu_pebs_disable(event);
2502}
2503
2504static void intel_pmu_assign_event(struct perf_event *event, int idx)
2505{
2506 if (is_pebs_pt(event))
2507 perf_report_aux_output_id(event, idx);
2508}
2509
2510static void intel_pmu_del_event(struct perf_event *event)
2511{
2512 if (needs_branch_stack(event))
2513 intel_pmu_lbr_del(event);
2514 if (event->attr.precise_ip)
2515 intel_pmu_pebs_del(event);
2516}
2517
2518static int icl_set_topdown_event_period(struct perf_event *event)
2519{
2520 struct hw_perf_event *hwc = &event->hw;
2521 s64 left = local64_read(&hwc->period_left);
2522
2523 /*
2524 * The values in PERF_METRICS MSR are derived from fixed counter 3.
2525 * Software should start both registers, PERF_METRICS and fixed
2526 * counter 3, from zero.
2527 * Clear PERF_METRICS and Fixed counter 3 in initialization.
2528 * After that, both MSRs will be cleared for each read.
2529 * Don't need to clear them again.
2530 */
2531 if (left == x86_pmu.max_period) {
2532 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2533 wrmsrl(MSR_PERF_METRICS, 0);
2534 hwc->saved_slots = 0;
2535 hwc->saved_metric = 0;
2536 }
2537
2538 if ((hwc->saved_slots) && is_slots_event(event)) {
2539 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2540 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2541 }
2542
2543 perf_event_update_userpage(event);
2544
2545 return 0;
2546}
2547
2548static int adl_set_topdown_event_period(struct perf_event *event)
2549{
2550 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2551
2552 if (pmu->cpu_type != hybrid_big)
2553 return 0;
2554
2555 return icl_set_topdown_event_period(event);
2556}
2557
2558DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period);
2559
2560static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2561{
2562 u32 val;
2563
2564 /*
2565 * The metric is reported as an 8bit integer fraction
2566 * summing up to 0xff.
2567 * slots-in-metric = (Metric / 0xff) * slots
2568 */
2569 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2570 return mul_u64_u32_div(slots, val, 0xff);
2571}
2572
2573static u64 icl_get_topdown_value(struct perf_event *event,
2574 u64 slots, u64 metrics)
2575{
2576 int idx = event->hw.idx;
2577 u64 delta;
2578
2579 if (is_metric_idx(idx))
2580 delta = icl_get_metrics_event_value(metrics, slots, idx);
2581 else
2582 delta = slots;
2583
2584 return delta;
2585}
2586
2587static void __icl_update_topdown_event(struct perf_event *event,
2588 u64 slots, u64 metrics,
2589 u64 last_slots, u64 last_metrics)
2590{
2591 u64 delta, last = 0;
2592
2593 delta = icl_get_topdown_value(event, slots, metrics);
2594 if (last_slots)
2595 last = icl_get_topdown_value(event, last_slots, last_metrics);
2596
2597 /*
2598 * The 8bit integer fraction of metric may be not accurate,
2599 * especially when the changes is very small.
2600 * For example, if only a few bad_spec happens, the fraction
2601 * may be reduced from 1 to 0. If so, the bad_spec event value
2602 * will be 0 which is definitely less than the last value.
2603 * Avoid update event->count for this case.
2604 */
2605 if (delta > last) {
2606 delta -= last;
2607 local64_add(delta, &event->count);
2608 }
2609}
2610
2611static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2612 u64 metrics, int metric_end)
2613{
2614 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2615 struct perf_event *other;
2616 int idx;
2617
2618 event->hw.saved_slots = slots;
2619 event->hw.saved_metric = metrics;
2620
2621 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2622 if (!is_topdown_idx(idx))
2623 continue;
2624 other = cpuc->events[idx];
2625 other->hw.saved_slots = slots;
2626 other->hw.saved_metric = metrics;
2627 }
2628}
2629
2630/*
2631 * Update all active Topdown events.
2632 *
2633 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2634 * modify by a NMI. PMU has to be disabled before calling this function.
2635 */
2636
2637static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2638{
2639 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2640 struct perf_event *other;
2641 u64 slots, metrics;
2642 bool reset = true;
2643 int idx;
2644
2645 /* read Fixed counter 3 */
2646 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2647 if (!slots)
2648 return 0;
2649
2650 /* read PERF_METRICS */
2651 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2652
2653 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2654 if (!is_topdown_idx(idx))
2655 continue;
2656 other = cpuc->events[idx];
2657 __icl_update_topdown_event(other, slots, metrics,
2658 event ? event->hw.saved_slots : 0,
2659 event ? event->hw.saved_metric : 0);
2660 }
2661
2662 /*
2663 * Check and update this event, which may have been cleared
2664 * in active_mask e.g. x86_pmu_stop()
2665 */
2666 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2667 __icl_update_topdown_event(event, slots, metrics,
2668 event->hw.saved_slots,
2669 event->hw.saved_metric);
2670
2671 /*
2672 * In x86_pmu_stop(), the event is cleared in active_mask first,
2673 * then drain the delta, which indicates context switch for
2674 * counting.
2675 * Save metric and slots for context switch.
2676 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2677 * Because the values will be restored in next schedule in.
2678 */
2679 update_saved_topdown_regs(event, slots, metrics, metric_end);
2680 reset = false;
2681 }
2682
2683 if (reset) {
2684 /* The fixed counter 3 has to be written before the PERF_METRICS. */
2685 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2686 wrmsrl(MSR_PERF_METRICS, 0);
2687 if (event)
2688 update_saved_topdown_regs(event, 0, 0, metric_end);
2689 }
2690
2691 return slots;
2692}
2693
2694static u64 icl_update_topdown_event(struct perf_event *event)
2695{
2696 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2697 x86_pmu.num_topdown_events - 1);
2698}
2699
2700static u64 adl_update_topdown_event(struct perf_event *event)
2701{
2702 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2703
2704 if (pmu->cpu_type != hybrid_big)
2705 return 0;
2706
2707 return icl_update_topdown_event(event);
2708}
2709
2710DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
2711
2712static void intel_pmu_read_topdown_event(struct perf_event *event)
2713{
2714 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2715
2716 /* Only need to call update_topdown_event() once for group read. */
2717 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2718 !is_slots_event(event))
2719 return;
2720
2721 perf_pmu_disable(event->pmu);
2722 static_call(intel_pmu_update_topdown_event)(event);
2723 perf_pmu_enable(event->pmu);
2724}
2725
2726static void intel_pmu_read_event(struct perf_event *event)
2727{
2728 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2729 intel_pmu_auto_reload_read(event);
2730 else if (is_topdown_count(event))
2731 intel_pmu_read_topdown_event(event);
2732 else
2733 x86_perf_event_update(event);
2734}
2735
2736static void intel_pmu_enable_fixed(struct perf_event *event)
2737{
2738 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2739 struct hw_perf_event *hwc = &event->hw;
2740 u64 mask, bits = 0;
2741 int idx = hwc->idx;
2742
2743 if (is_topdown_idx(idx)) {
2744 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2745 /*
2746 * When there are other active TopDown events,
2747 * don't enable the fixed counter 3 again.
2748 */
2749 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2750 return;
2751
2752 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2753 }
2754
2755 intel_set_masks(event, idx);
2756
2757 /*
2758 * Enable IRQ generation (0x8), if not PEBS,
2759 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2760 * if requested:
2761 */
2762 if (!event->attr.precise_ip)
2763 bits |= INTEL_FIXED_0_ENABLE_PMI;
2764 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2765 bits |= INTEL_FIXED_0_USER;
2766 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2767 bits |= INTEL_FIXED_0_KERNEL;
2768
2769 /*
2770 * ANY bit is supported in v3 and up
2771 */
2772 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2773 bits |= INTEL_FIXED_0_ANYTHREAD;
2774
2775 idx -= INTEL_PMC_IDX_FIXED;
2776 bits = intel_fixed_bits_by_idx(idx, bits);
2777 mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
2778
2779 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2780 bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2781 mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
2782 }
2783
2784 cpuc->fixed_ctrl_val &= ~mask;
2785 cpuc->fixed_ctrl_val |= bits;
2786}
2787
2788static void intel_pmu_enable_event(struct perf_event *event)
2789{
2790 struct hw_perf_event *hwc = &event->hw;
2791 int idx = hwc->idx;
2792
2793 if (unlikely(event->attr.precise_ip))
2794 intel_pmu_pebs_enable(event);
2795
2796 switch (idx) {
2797 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2798 intel_set_masks(event, idx);
2799 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2800 break;
2801 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2802 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2803 intel_pmu_enable_fixed(event);
2804 break;
2805 case INTEL_PMC_IDX_FIXED_BTS:
2806 if (!__this_cpu_read(cpu_hw_events.enabled))
2807 return;
2808 intel_pmu_enable_bts(hwc->config);
2809 break;
2810 case INTEL_PMC_IDX_FIXED_VLBR:
2811 intel_set_masks(event, idx);
2812 break;
2813 default:
2814 pr_warn("Failed to enable the event with invalid index %d\n",
2815 idx);
2816 }
2817}
2818
2819static void intel_pmu_add_event(struct perf_event *event)
2820{
2821 if (event->attr.precise_ip)
2822 intel_pmu_pebs_add(event);
2823 if (needs_branch_stack(event))
2824 intel_pmu_lbr_add(event);
2825}
2826
2827/*
2828 * Save and restart an expired event. Called by NMI contexts,
2829 * so it has to be careful about preempting normal event ops:
2830 */
2831int intel_pmu_save_and_restart(struct perf_event *event)
2832{
2833 static_call(x86_pmu_update)(event);
2834 /*
2835 * For a checkpointed counter always reset back to 0. This
2836 * avoids a situation where the counter overflows, aborts the
2837 * transaction and is then set back to shortly before the
2838 * overflow, and overflows and aborts again.
2839 */
2840 if (unlikely(event_is_checkpointed(event))) {
2841 /* No race with NMIs because the counter should not be armed */
2842 wrmsrl(event->hw.event_base, 0);
2843 local64_set(&event->hw.prev_count, 0);
2844 }
2845 return static_call(x86_pmu_set_period)(event);
2846}
2847
2848static int intel_pmu_set_period(struct perf_event *event)
2849{
2850 if (unlikely(is_topdown_count(event)))
2851 return static_call(intel_pmu_set_topdown_event_period)(event);
2852
2853 return x86_perf_event_set_period(event);
2854}
2855
2856static u64 intel_pmu_update(struct perf_event *event)
2857{
2858 if (unlikely(is_topdown_count(event)))
2859 return static_call(intel_pmu_update_topdown_event)(event);
2860
2861 return x86_perf_event_update(event);
2862}
2863
2864static void intel_pmu_reset(void)
2865{
2866 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2867 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2868 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2869 int num_counters = hybrid(cpuc->pmu, num_counters);
2870 unsigned long flags;
2871 int idx;
2872
2873 if (!num_counters)
2874 return;
2875
2876 local_irq_save(flags);
2877
2878 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2879
2880 for (idx = 0; idx < num_counters; idx++) {
2881 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2882 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2883 }
2884 for (idx = 0; idx < num_counters_fixed; idx++) {
2885 if (fixed_counter_disabled(idx, cpuc->pmu))
2886 continue;
2887 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2888 }
2889
2890 if (ds)
2891 ds->bts_index = ds->bts_buffer_base;
2892
2893 /* Ack all overflows and disable fixed counters */
2894 if (x86_pmu.version >= 2) {
2895 intel_pmu_ack_status(intel_pmu_get_status());
2896 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2897 }
2898
2899 /* Reset LBRs and LBR freezing */
2900 if (x86_pmu.lbr_nr) {
2901 update_debugctlmsr(get_debugctlmsr() &
2902 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2903 }
2904
2905 local_irq_restore(flags);
2906}
2907
2908/*
2909 * We may be running with guest PEBS events created by KVM, and the
2910 * PEBS records are logged into the guest's DS and invisible to host.
2911 *
2912 * In the case of guest PEBS overflow, we only trigger a fake event
2913 * to emulate the PEBS overflow PMI for guest PEBS counters in KVM.
2914 * The guest will then vm-entry and check the guest DS area to read
2915 * the guest PEBS records.
2916 *
2917 * The contents and other behavior of the guest event do not matter.
2918 */
2919static void x86_pmu_handle_guest_pebs(struct pt_regs *regs,
2920 struct perf_sample_data *data)
2921{
2922 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2923 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask;
2924 struct perf_event *event = NULL;
2925 int bit;
2926
2927 if (!unlikely(perf_guest_state()))
2928 return;
2929
2930 if (!x86_pmu.pebs_ept || !x86_pmu.pebs_active ||
2931 !guest_pebs_idxs)
2932 return;
2933
2934 for_each_set_bit(bit, (unsigned long *)&guest_pebs_idxs,
2935 INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed) {
2936 event = cpuc->events[bit];
2937 if (!event->attr.precise_ip)
2938 continue;
2939
2940 perf_sample_data_init(data, 0, event->hw.last_period);
2941 if (perf_event_overflow(event, data, regs))
2942 x86_pmu_stop(event, 0);
2943
2944 /* Inject one fake event is enough. */
2945 break;
2946 }
2947}
2948
2949static int handle_pmi_common(struct pt_regs *regs, u64 status)
2950{
2951 struct perf_sample_data data;
2952 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2953 int bit;
2954 int handled = 0;
2955 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2956
2957 inc_irq_stat(apic_perf_irqs);
2958
2959 /*
2960 * Ignore a range of extra bits in status that do not indicate
2961 * overflow by themselves.
2962 */
2963 status &= ~(GLOBAL_STATUS_COND_CHG |
2964 GLOBAL_STATUS_ASIF |
2965 GLOBAL_STATUS_LBRS_FROZEN);
2966 if (!status)
2967 return 0;
2968 /*
2969 * In case multiple PEBS events are sampled at the same time,
2970 * it is possible to have GLOBAL_STATUS bit 62 set indicating
2971 * PEBS buffer overflow and also seeing at most 3 PEBS counters
2972 * having their bits set in the status register. This is a sign
2973 * that there was at least one PEBS record pending at the time
2974 * of the PMU interrupt. PEBS counters must only be processed
2975 * via the drain_pebs() calls and not via the regular sample
2976 * processing loop coming after that the function, otherwise
2977 * phony regular samples may be generated in the sampling buffer
2978 * not marked with the EXACT tag. Another possibility is to have
2979 * one PEBS event and at least one non-PEBS event which overflows
2980 * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2981 * not be set, yet the overflow status bit for the PEBS counter will
2982 * be on Skylake.
2983 *
2984 * To avoid this problem, we systematically ignore the PEBS-enabled
2985 * counters from the GLOBAL_STATUS mask and we always process PEBS
2986 * events via drain_pebs().
2987 */
2988 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable);
2989
2990 /*
2991 * PEBS overflow sets bit 62 in the global status register
2992 */
2993 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2994 u64 pebs_enabled = cpuc->pebs_enabled;
2995
2996 handled++;
2997 x86_pmu_handle_guest_pebs(regs, &data);
2998 x86_pmu.drain_pebs(regs, &data);
2999 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
3000
3001 /*
3002 * PMI throttle may be triggered, which stops the PEBS event.
3003 * Although cpuc->pebs_enabled is updated accordingly, the
3004 * MSR_IA32_PEBS_ENABLE is not updated. Because the
3005 * cpuc->enabled has been forced to 0 in PMI.
3006 * Update the MSR if pebs_enabled is changed.
3007 */
3008 if (pebs_enabled != cpuc->pebs_enabled)
3009 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
3010 }
3011
3012 /*
3013 * Intel PT
3014 */
3015 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
3016 handled++;
3017 if (!perf_guest_handle_intel_pt_intr())
3018 intel_pt_interrupt();
3019 }
3020
3021 /*
3022 * Intel Perf metrics
3023 */
3024 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
3025 handled++;
3026 static_call(intel_pmu_update_topdown_event)(NULL);
3027 }
3028
3029 /*
3030 * Checkpointed counters can lead to 'spurious' PMIs because the
3031 * rollback caused by the PMI will have cleared the overflow status
3032 * bit. Therefore always force probe these counters.
3033 */
3034 status |= cpuc->intel_cp_status;
3035
3036 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
3037 struct perf_event *event = cpuc->events[bit];
3038
3039 handled++;
3040
3041 if (!test_bit(bit, cpuc->active_mask))
3042 continue;
3043
3044 if (!intel_pmu_save_and_restart(event))
3045 continue;
3046
3047 perf_sample_data_init(&data, 0, event->hw.last_period);
3048
3049 if (has_branch_stack(event))
3050 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
3051
3052 if (perf_event_overflow(event, &data, regs))
3053 x86_pmu_stop(event, 0);
3054 }
3055
3056 return handled;
3057}
3058
3059/*
3060 * This handler is triggered by the local APIC, so the APIC IRQ handling
3061 * rules apply:
3062 */
3063static int intel_pmu_handle_irq(struct pt_regs *regs)
3064{
3065 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3066 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
3067 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
3068 int loops;
3069 u64 status;
3070 int handled;
3071 int pmu_enabled;
3072
3073 /*
3074 * Save the PMU state.
3075 * It needs to be restored when leaving the handler.
3076 */
3077 pmu_enabled = cpuc->enabled;
3078 /*
3079 * In general, the early ACK is only applied for old platforms.
3080 * For the big core starts from Haswell, the late ACK should be
3081 * applied.
3082 * For the small core after Tremont, we have to do the ACK right
3083 * before re-enabling counters, which is in the middle of the
3084 * NMI handler.
3085 */
3086 if (!late_ack && !mid_ack)
3087 apic_write(APIC_LVTPC, APIC_DM_NMI);
3088 intel_bts_disable_local();
3089 cpuc->enabled = 0;
3090 __intel_pmu_disable_all(true);
3091 handled = intel_pmu_drain_bts_buffer();
3092 handled += intel_bts_interrupt();
3093 status = intel_pmu_get_status();
3094 if (!status)
3095 goto done;
3096
3097 loops = 0;
3098again:
3099 intel_pmu_lbr_read();
3100 intel_pmu_ack_status(status);
3101 if (++loops > 100) {
3102 static bool warned;
3103
3104 if (!warned) {
3105 WARN(1, "perfevents: irq loop stuck!\n");
3106 perf_event_print_debug();
3107 warned = true;
3108 }
3109 intel_pmu_reset();
3110 goto done;
3111 }
3112
3113 handled += handle_pmi_common(regs, status);
3114
3115 /*
3116 * Repeat if there is more work to be done:
3117 */
3118 status = intel_pmu_get_status();
3119 if (status)
3120 goto again;
3121
3122done:
3123 if (mid_ack)
3124 apic_write(APIC_LVTPC, APIC_DM_NMI);
3125 /* Only restore PMU state when it's active. See x86_pmu_disable(). */
3126 cpuc->enabled = pmu_enabled;
3127 if (pmu_enabled)
3128 __intel_pmu_enable_all(0, true);
3129 intel_bts_enable_local();
3130
3131 /*
3132 * Only unmask the NMI after the overflow counters
3133 * have been reset. This avoids spurious NMIs on
3134 * Haswell CPUs.
3135 */
3136 if (late_ack)
3137 apic_write(APIC_LVTPC, APIC_DM_NMI);
3138 return handled;
3139}
3140
3141static struct event_constraint *
3142intel_bts_constraints(struct perf_event *event)
3143{
3144 if (unlikely(intel_pmu_has_bts(event)))
3145 return &bts_constraint;
3146
3147 return NULL;
3148}
3149
3150/*
3151 * Note: matches a fake event, like Fixed2.
3152 */
3153static struct event_constraint *
3154intel_vlbr_constraints(struct perf_event *event)
3155{
3156 struct event_constraint *c = &vlbr_constraint;
3157
3158 if (unlikely(constraint_match(c, event->hw.config))) {
3159 event->hw.flags |= c->flags;
3160 return c;
3161 }
3162
3163 return NULL;
3164}
3165
3166static int intel_alt_er(struct cpu_hw_events *cpuc,
3167 int idx, u64 config)
3168{
3169 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3170 int alt_idx = idx;
3171
3172 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3173 return idx;
3174
3175 if (idx == EXTRA_REG_RSP_0)
3176 alt_idx = EXTRA_REG_RSP_1;
3177
3178 if (idx == EXTRA_REG_RSP_1)
3179 alt_idx = EXTRA_REG_RSP_0;
3180
3181 if (config & ~extra_regs[alt_idx].valid_mask)
3182 return idx;
3183
3184 return alt_idx;
3185}
3186
3187static void intel_fixup_er(struct perf_event *event, int idx)
3188{
3189 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3190 event->hw.extra_reg.idx = idx;
3191
3192 if (idx == EXTRA_REG_RSP_0) {
3193 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3194 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3195 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3196 } else if (idx == EXTRA_REG_RSP_1) {
3197 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3198 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3199 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3200 }
3201}
3202
3203/*
3204 * manage allocation of shared extra msr for certain events
3205 *
3206 * sharing can be:
3207 * per-cpu: to be shared between the various events on a single PMU
3208 * per-core: per-cpu + shared by HT threads
3209 */
3210static struct event_constraint *
3211__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3212 struct perf_event *event,
3213 struct hw_perf_event_extra *reg)
3214{
3215 struct event_constraint *c = &emptyconstraint;
3216 struct er_account *era;
3217 unsigned long flags;
3218 int idx = reg->idx;
3219
3220 /*
3221 * reg->alloc can be set due to existing state, so for fake cpuc we
3222 * need to ignore this, otherwise we might fail to allocate proper fake
3223 * state for this extra reg constraint. Also see the comment below.
3224 */
3225 if (reg->alloc && !cpuc->is_fake)
3226 return NULL; /* call x86_get_event_constraint() */
3227
3228again:
3229 era = &cpuc->shared_regs->regs[idx];
3230 /*
3231 * we use spin_lock_irqsave() to avoid lockdep issues when
3232 * passing a fake cpuc
3233 */
3234 raw_spin_lock_irqsave(&era->lock, flags);
3235
3236 if (!atomic_read(&era->ref) || era->config == reg->config) {
3237
3238 /*
3239 * If its a fake cpuc -- as per validate_{group,event}() we
3240 * shouldn't touch event state and we can avoid doing so
3241 * since both will only call get_event_constraints() once
3242 * on each event, this avoids the need for reg->alloc.
3243 *
3244 * Not doing the ER fixup will only result in era->reg being
3245 * wrong, but since we won't actually try and program hardware
3246 * this isn't a problem either.
3247 */
3248 if (!cpuc->is_fake) {
3249 if (idx != reg->idx)
3250 intel_fixup_er(event, idx);
3251
3252 /*
3253 * x86_schedule_events() can call get_event_constraints()
3254 * multiple times on events in the case of incremental
3255 * scheduling(). reg->alloc ensures we only do the ER
3256 * allocation once.
3257 */
3258 reg->alloc = 1;
3259 }
3260
3261 /* lock in msr value */
3262 era->config = reg->config;
3263 era->reg = reg->reg;
3264
3265 /* one more user */
3266 atomic_inc(&era->ref);
3267
3268 /*
3269 * need to call x86_get_event_constraint()
3270 * to check if associated event has constraints
3271 */
3272 c = NULL;
3273 } else {
3274 idx = intel_alt_er(cpuc, idx, reg->config);
3275 if (idx != reg->idx) {
3276 raw_spin_unlock_irqrestore(&era->lock, flags);
3277 goto again;
3278 }
3279 }
3280 raw_spin_unlock_irqrestore(&era->lock, flags);
3281
3282 return c;
3283}
3284
3285static void
3286__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3287 struct hw_perf_event_extra *reg)
3288{
3289 struct er_account *era;
3290
3291 /*
3292 * Only put constraint if extra reg was actually allocated. Also takes
3293 * care of event which do not use an extra shared reg.
3294 *
3295 * Also, if this is a fake cpuc we shouldn't touch any event state
3296 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3297 * either since it'll be thrown out.
3298 */
3299 if (!reg->alloc || cpuc->is_fake)
3300 return;
3301
3302 era = &cpuc->shared_regs->regs[reg->idx];
3303
3304 /* one fewer user */
3305 atomic_dec(&era->ref);
3306
3307 /* allocate again next time */
3308 reg->alloc = 0;
3309}
3310
3311static struct event_constraint *
3312intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3313 struct perf_event *event)
3314{
3315 struct event_constraint *c = NULL, *d;
3316 struct hw_perf_event_extra *xreg, *breg;
3317
3318 xreg = &event->hw.extra_reg;
3319 if (xreg->idx != EXTRA_REG_NONE) {
3320 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3321 if (c == &emptyconstraint)
3322 return c;
3323 }
3324 breg = &event->hw.branch_reg;
3325 if (breg->idx != EXTRA_REG_NONE) {
3326 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3327 if (d == &emptyconstraint) {
3328 __intel_shared_reg_put_constraints(cpuc, xreg);
3329 c = d;
3330 }
3331 }
3332 return c;
3333}
3334
3335struct event_constraint *
3336x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3337 struct perf_event *event)
3338{
3339 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3340 struct event_constraint *c;
3341
3342 if (event_constraints) {
3343 for_each_event_constraint(c, event_constraints) {
3344 if (constraint_match(c, event->hw.config)) {
3345 event->hw.flags |= c->flags;
3346 return c;
3347 }
3348 }
3349 }
3350
3351 return &hybrid_var(cpuc->pmu, unconstrained);
3352}
3353
3354static struct event_constraint *
3355__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3356 struct perf_event *event)
3357{
3358 struct event_constraint *c;
3359
3360 c = intel_vlbr_constraints(event);
3361 if (c)
3362 return c;
3363
3364 c = intel_bts_constraints(event);
3365 if (c)
3366 return c;
3367
3368 c = intel_shared_regs_constraints(cpuc, event);
3369 if (c)
3370 return c;
3371
3372 c = intel_pebs_constraints(event);
3373 if (c)
3374 return c;
3375
3376 return x86_get_event_constraints(cpuc, idx, event);
3377}
3378
3379static void
3380intel_start_scheduling(struct cpu_hw_events *cpuc)
3381{
3382 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3383 struct intel_excl_states *xl;
3384 int tid = cpuc->excl_thread_id;
3385
3386 /*
3387 * nothing needed if in group validation mode
3388 */
3389 if (cpuc->is_fake || !is_ht_workaround_enabled())
3390 return;
3391
3392 /*
3393 * no exclusion needed
3394 */
3395 if (WARN_ON_ONCE(!excl_cntrs))
3396 return;
3397
3398 xl = &excl_cntrs->states[tid];
3399
3400 xl->sched_started = true;
3401 /*
3402 * lock shared state until we are done scheduling
3403 * in stop_event_scheduling()
3404 * makes scheduling appear as a transaction
3405 */
3406 raw_spin_lock(&excl_cntrs->lock);
3407}
3408
3409static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3410{
3411 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3412 struct event_constraint *c = cpuc->event_constraint[idx];
3413 struct intel_excl_states *xl;
3414 int tid = cpuc->excl_thread_id;
3415
3416 if (cpuc->is_fake || !is_ht_workaround_enabled())
3417 return;
3418
3419 if (WARN_ON_ONCE(!excl_cntrs))
3420 return;
3421
3422 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3423 return;
3424
3425 xl = &excl_cntrs->states[tid];
3426
3427 lockdep_assert_held(&excl_cntrs->lock);
3428
3429 if (c->flags & PERF_X86_EVENT_EXCL)
3430 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3431 else
3432 xl->state[cntr] = INTEL_EXCL_SHARED;
3433}
3434
3435static void
3436intel_stop_scheduling(struct cpu_hw_events *cpuc)
3437{
3438 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3439 struct intel_excl_states *xl;
3440 int tid = cpuc->excl_thread_id;
3441
3442 /*
3443 * nothing needed if in group validation mode
3444 */
3445 if (cpuc->is_fake || !is_ht_workaround_enabled())
3446 return;
3447 /*
3448 * no exclusion needed
3449 */
3450 if (WARN_ON_ONCE(!excl_cntrs))
3451 return;
3452
3453 xl = &excl_cntrs->states[tid];
3454
3455 xl->sched_started = false;
3456 /*
3457 * release shared state lock (acquired in intel_start_scheduling())
3458 */
3459 raw_spin_unlock(&excl_cntrs->lock);
3460}
3461
3462static struct event_constraint *
3463dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3464{
3465 WARN_ON_ONCE(!cpuc->constraint_list);
3466
3467 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3468 struct event_constraint *cx;
3469
3470 /*
3471 * grab pre-allocated constraint entry
3472 */
3473 cx = &cpuc->constraint_list[idx];
3474
3475 /*
3476 * initialize dynamic constraint
3477 * with static constraint
3478 */
3479 *cx = *c;
3480
3481 /*
3482 * mark constraint as dynamic
3483 */
3484 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3485 c = cx;
3486 }
3487
3488 return c;
3489}
3490
3491static struct event_constraint *
3492intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3493 int idx, struct event_constraint *c)
3494{
3495 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3496 struct intel_excl_states *xlo;
3497 int tid = cpuc->excl_thread_id;
3498 int is_excl, i, w;
3499
3500 /*
3501 * validating a group does not require
3502 * enforcing cross-thread exclusion
3503 */
3504 if (cpuc->is_fake || !is_ht_workaround_enabled())
3505 return c;
3506
3507 /*
3508 * no exclusion needed
3509 */
3510 if (WARN_ON_ONCE(!excl_cntrs))
3511 return c;
3512
3513 /*
3514 * because we modify the constraint, we need
3515 * to make a copy. Static constraints come
3516 * from static const tables.
3517 *
3518 * only needed when constraint has not yet
3519 * been cloned (marked dynamic)
3520 */
3521 c = dyn_constraint(cpuc, c, idx);
3522
3523 /*
3524 * From here on, the constraint is dynamic.
3525 * Either it was just allocated above, or it
3526 * was allocated during a earlier invocation
3527 * of this function
3528 */
3529
3530 /*
3531 * state of sibling HT
3532 */
3533 xlo = &excl_cntrs->states[tid ^ 1];
3534
3535 /*
3536 * event requires exclusive counter access
3537 * across HT threads
3538 */
3539 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3540 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3541 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3542 if (!cpuc->n_excl++)
3543 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3544 }
3545
3546 /*
3547 * Modify static constraint with current dynamic
3548 * state of thread
3549 *
3550 * EXCLUSIVE: sibling counter measuring exclusive event
3551 * SHARED : sibling counter measuring non-exclusive event
3552 * UNUSED : sibling counter unused
3553 */
3554 w = c->weight;
3555 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3556 /*
3557 * exclusive event in sibling counter
3558 * our corresponding counter cannot be used
3559 * regardless of our event
3560 */
3561 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3562 __clear_bit(i, c->idxmsk);
3563 w--;
3564 continue;
3565 }
3566 /*
3567 * if measuring an exclusive event, sibling
3568 * measuring non-exclusive, then counter cannot
3569 * be used
3570 */
3571 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3572 __clear_bit(i, c->idxmsk);
3573 w--;
3574 continue;
3575 }
3576 }
3577
3578 /*
3579 * if we return an empty mask, then switch
3580 * back to static empty constraint to avoid
3581 * the cost of freeing later on
3582 */
3583 if (!w)
3584 c = &emptyconstraint;
3585
3586 c->weight = w;
3587
3588 return c;
3589}
3590
3591static struct event_constraint *
3592intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3593 struct perf_event *event)
3594{
3595 struct event_constraint *c1, *c2;
3596
3597 c1 = cpuc->event_constraint[idx];
3598
3599 /*
3600 * first time only
3601 * - static constraint: no change across incremental scheduling calls
3602 * - dynamic constraint: handled by intel_get_excl_constraints()
3603 */
3604 c2 = __intel_get_event_constraints(cpuc, idx, event);
3605 if (c1) {
3606 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3607 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3608 c1->weight = c2->weight;
3609 c2 = c1;
3610 }
3611
3612 if (cpuc->excl_cntrs)
3613 return intel_get_excl_constraints(cpuc, event, idx, c2);
3614
3615 return c2;
3616}
3617
3618static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3619 struct perf_event *event)
3620{
3621 struct hw_perf_event *hwc = &event->hw;
3622 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3623 int tid = cpuc->excl_thread_id;
3624 struct intel_excl_states *xl;
3625
3626 /*
3627 * nothing needed if in group validation mode
3628 */
3629 if (cpuc->is_fake)
3630 return;
3631
3632 if (WARN_ON_ONCE(!excl_cntrs))
3633 return;
3634
3635 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3636 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3637 if (!--cpuc->n_excl)
3638 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3639 }
3640
3641 /*
3642 * If event was actually assigned, then mark the counter state as
3643 * unused now.
3644 */
3645 if (hwc->idx >= 0) {
3646 xl = &excl_cntrs->states[tid];
3647
3648 /*
3649 * put_constraint may be called from x86_schedule_events()
3650 * which already has the lock held so here make locking
3651 * conditional.
3652 */
3653 if (!xl->sched_started)
3654 raw_spin_lock(&excl_cntrs->lock);
3655
3656 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3657
3658 if (!xl->sched_started)
3659 raw_spin_unlock(&excl_cntrs->lock);
3660 }
3661}
3662
3663static void
3664intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3665 struct perf_event *event)
3666{
3667 struct hw_perf_event_extra *reg;
3668
3669 reg = &event->hw.extra_reg;
3670 if (reg->idx != EXTRA_REG_NONE)
3671 __intel_shared_reg_put_constraints(cpuc, reg);
3672
3673 reg = &event->hw.branch_reg;
3674 if (reg->idx != EXTRA_REG_NONE)
3675 __intel_shared_reg_put_constraints(cpuc, reg);
3676}
3677
3678static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3679 struct perf_event *event)
3680{
3681 intel_put_shared_regs_event_constraints(cpuc, event);
3682
3683 /*
3684 * is PMU has exclusive counter restrictions, then
3685 * all events are subject to and must call the
3686 * put_excl_constraints() routine
3687 */
3688 if (cpuc->excl_cntrs)
3689 intel_put_excl_constraints(cpuc, event);
3690}
3691
3692static void intel_pebs_aliases_core2(struct perf_event *event)
3693{
3694 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3695 /*
3696 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3697 * (0x003c) so that we can use it with PEBS.
3698 *
3699 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3700 * PEBS capable. However we can use INST_RETIRED.ANY_P
3701 * (0x00c0), which is a PEBS capable event, to get the same
3702 * count.
3703 *
3704 * INST_RETIRED.ANY_P counts the number of cycles that retires
3705 * CNTMASK instructions. By setting CNTMASK to a value (16)
3706 * larger than the maximum number of instructions that can be
3707 * retired per cycle (4) and then inverting the condition, we
3708 * count all cycles that retire 16 or less instructions, which
3709 * is every cycle.
3710 *
3711 * Thereby we gain a PEBS capable cycle counter.
3712 */
3713 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3714
3715 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3716 event->hw.config = alt_config;
3717 }
3718}
3719
3720static void intel_pebs_aliases_snb(struct perf_event *event)
3721{
3722 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3723 /*
3724 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3725 * (0x003c) so that we can use it with PEBS.
3726 *
3727 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3728 * PEBS capable. However we can use UOPS_RETIRED.ALL
3729 * (0x01c2), which is a PEBS capable event, to get the same
3730 * count.
3731 *
3732 * UOPS_RETIRED.ALL counts the number of cycles that retires
3733 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3734 * larger than the maximum number of micro-ops that can be
3735 * retired per cycle (4) and then inverting the condition, we
3736 * count all cycles that retire 16 or less micro-ops, which
3737 * is every cycle.
3738 *
3739 * Thereby we gain a PEBS capable cycle counter.
3740 */
3741 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3742
3743 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3744 event->hw.config = alt_config;
3745 }
3746}
3747
3748static void intel_pebs_aliases_precdist(struct perf_event *event)
3749{
3750 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3751 /*
3752 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3753 * (0x003c) so that we can use it with PEBS.
3754 *
3755 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3756 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3757 * (0x01c0), which is a PEBS capable event, to get the same
3758 * count.
3759 *
3760 * The PREC_DIST event has special support to minimize sample
3761 * shadowing effects. One drawback is that it can be
3762 * only programmed on counter 1, but that seems like an
3763 * acceptable trade off.
3764 */
3765 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3766
3767 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3768 event->hw.config = alt_config;
3769 }
3770}
3771
3772static void intel_pebs_aliases_ivb(struct perf_event *event)
3773{
3774 if (event->attr.precise_ip < 3)
3775 return intel_pebs_aliases_snb(event);
3776 return intel_pebs_aliases_precdist(event);
3777}
3778
3779static void intel_pebs_aliases_skl(struct perf_event *event)
3780{
3781 if (event->attr.precise_ip < 3)
3782 return intel_pebs_aliases_core2(event);
3783 return intel_pebs_aliases_precdist(event);
3784}
3785
3786static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3787{
3788 unsigned long flags = x86_pmu.large_pebs_flags;
3789
3790 if (event->attr.use_clockid)
3791 flags &= ~PERF_SAMPLE_TIME;
3792 if (!event->attr.exclude_kernel)
3793 flags &= ~PERF_SAMPLE_REGS_USER;
3794 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3795 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3796 return flags;
3797}
3798
3799static int intel_pmu_bts_config(struct perf_event *event)
3800{
3801 struct perf_event_attr *attr = &event->attr;
3802
3803 if (unlikely(intel_pmu_has_bts(event))) {
3804 /* BTS is not supported by this architecture. */
3805 if (!x86_pmu.bts_active)
3806 return -EOPNOTSUPP;
3807
3808 /* BTS is currently only allowed for user-mode. */
3809 if (!attr->exclude_kernel)
3810 return -EOPNOTSUPP;
3811
3812 /* BTS is not allowed for precise events. */
3813 if (attr->precise_ip)
3814 return -EOPNOTSUPP;
3815
3816 /* disallow bts if conflicting events are present */
3817 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3818 return -EBUSY;
3819
3820 event->destroy = hw_perf_lbr_event_destroy;
3821 }
3822
3823 return 0;
3824}
3825
3826static int core_pmu_hw_config(struct perf_event *event)
3827{
3828 int ret = x86_pmu_hw_config(event);
3829
3830 if (ret)
3831 return ret;
3832
3833 return intel_pmu_bts_config(event);
3834}
3835
3836#define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3837 ((x86_pmu.num_topdown_events - 1) << 8))
3838
3839static bool is_available_metric_event(struct perf_event *event)
3840{
3841 return is_metric_event(event) &&
3842 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3843}
3844
3845static inline bool is_mem_loads_event(struct perf_event *event)
3846{
3847 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3848}
3849
3850static inline bool is_mem_loads_aux_event(struct perf_event *event)
3851{
3852 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3853}
3854
3855static inline bool require_mem_loads_aux_event(struct perf_event *event)
3856{
3857 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3858 return false;
3859
3860 if (is_hybrid())
3861 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3862
3863 return true;
3864}
3865
3866static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3867{
3868 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3869
3870 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3871}
3872
3873static int intel_pmu_hw_config(struct perf_event *event)
3874{
3875 int ret = x86_pmu_hw_config(event);
3876
3877 if (ret)
3878 return ret;
3879
3880 ret = intel_pmu_bts_config(event);
3881 if (ret)
3882 return ret;
3883
3884 if (event->attr.precise_ip) {
3885 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3886 return -EINVAL;
3887
3888 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3889 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3890 if (!(event->attr.sample_type &
3891 ~intel_pmu_large_pebs_flags(event))) {
3892 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3893 event->attach_state |= PERF_ATTACH_SCHED_CB;
3894 }
3895 }
3896 if (x86_pmu.pebs_aliases)
3897 x86_pmu.pebs_aliases(event);
3898 }
3899
3900 if (needs_branch_stack(event)) {
3901 ret = intel_pmu_setup_lbr_filter(event);
3902 if (ret)
3903 return ret;
3904 event->attach_state |= PERF_ATTACH_SCHED_CB;
3905
3906 /*
3907 * BTS is set up earlier in this path, so don't account twice
3908 */
3909 if (!unlikely(intel_pmu_has_bts(event))) {
3910 /* disallow lbr if conflicting events are present */
3911 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3912 return -EBUSY;
3913
3914 event->destroy = hw_perf_lbr_event_destroy;
3915 }
3916 }
3917
3918 if (event->attr.aux_output) {
3919 if (!event->attr.precise_ip)
3920 return -EINVAL;
3921
3922 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3923 }
3924
3925 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3926 (event->attr.type == PERF_TYPE_HW_CACHE))
3927 return 0;
3928
3929 /*
3930 * Config Topdown slots and metric events
3931 *
3932 * The slots event on Fixed Counter 3 can support sampling,
3933 * which will be handled normally in x86_perf_event_update().
3934 *
3935 * Metric events don't support sampling and require being paired
3936 * with a slots event as group leader. When the slots event
3937 * is used in a metrics group, it too cannot support sampling.
3938 */
3939 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3940 if (event->attr.config1 || event->attr.config2)
3941 return -EINVAL;
3942
3943 /*
3944 * The TopDown metrics events and slots event don't
3945 * support any filters.
3946 */
3947 if (event->attr.config & X86_ALL_EVENT_FLAGS)
3948 return -EINVAL;
3949
3950 if (is_available_metric_event(event)) {
3951 struct perf_event *leader = event->group_leader;
3952
3953 /* The metric events don't support sampling. */
3954 if (is_sampling_event(event))
3955 return -EINVAL;
3956
3957 /* The metric events require a slots group leader. */
3958 if (!is_slots_event(leader))
3959 return -EINVAL;
3960
3961 /*
3962 * The leader/SLOTS must not be a sampling event for
3963 * metric use; hardware requires it starts at 0 when used
3964 * in conjunction with MSR_PERF_METRICS.
3965 */
3966 if (is_sampling_event(leader))
3967 return -EINVAL;
3968
3969 event->event_caps |= PERF_EV_CAP_SIBLING;
3970 /*
3971 * Only once we have a METRICs sibling do we
3972 * need TopDown magic.
3973 */
3974 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3975 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3976 }
3977 }
3978
3979 /*
3980 * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
3981 * doesn't function quite right. As a work-around it needs to always be
3982 * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
3983 * The actual count of this second event is irrelevant it just needs
3984 * to be active to make the first event function correctly.
3985 *
3986 * In a group, the auxiliary event must be in front of the load latency
3987 * event. The rule is to simplify the implementation of the check.
3988 * That's because perf cannot have a complete group at the moment.
3989 */
3990 if (require_mem_loads_aux_event(event) &&
3991 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
3992 is_mem_loads_event(event)) {
3993 struct perf_event *leader = event->group_leader;
3994 struct perf_event *sibling = NULL;
3995
3996 /*
3997 * When this memload event is also the first event (no group
3998 * exists yet), then there is no aux event before it.
3999 */
4000 if (leader == event)
4001 return -ENODATA;
4002
4003 if (!is_mem_loads_aux_event(leader)) {
4004 for_each_sibling_event(sibling, leader) {
4005 if (is_mem_loads_aux_event(sibling))
4006 break;
4007 }
4008 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
4009 return -ENODATA;
4010 }
4011 }
4012
4013 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
4014 return 0;
4015
4016 if (x86_pmu.version < 3)
4017 return -EINVAL;
4018
4019 ret = perf_allow_cpu(&event->attr);
4020 if (ret)
4021 return ret;
4022
4023 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
4024
4025 return 0;
4026}
4027
4028/*
4029 * Currently, the only caller of this function is the atomic_switch_perf_msrs().
4030 * The host perf conext helps to prepare the values of the real hardware for
4031 * a set of msrs that need to be switched atomically in a vmx transaction.
4032 *
4033 * For example, the pseudocode needed to add a new msr should look like:
4034 *
4035 * arr[(*nr)++] = (struct perf_guest_switch_msr){
4036 * .msr = the hardware msr address,
4037 * .host = the value the hardware has when it doesn't run a guest,
4038 * .guest = the value the hardware has when it runs a guest,
4039 * };
4040 *
4041 * These values have nothing to do with the emulated values the guest sees
4042 * when it uses {RD,WR}MSR, which should be handled by the KVM context,
4043 * specifically in the intel_pmu_{get,set}_msr().
4044 */
4045static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
4046{
4047 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4048 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4049 struct kvm_pmu *kvm_pmu = (struct kvm_pmu *)data;
4050 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
4051 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable;
4052 int global_ctrl, pebs_enable;
4053
4054 *nr = 0;
4055 global_ctrl = (*nr)++;
4056 arr[global_ctrl] = (struct perf_guest_switch_msr){
4057 .msr = MSR_CORE_PERF_GLOBAL_CTRL,
4058 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask,
4059 .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask),
4060 };
4061
4062 if (!x86_pmu.pebs)
4063 return arr;
4064
4065 /*
4066 * If PMU counter has PEBS enabled it is not enough to
4067 * disable counter on a guest entry since PEBS memory
4068 * write can overshoot guest entry and corrupt guest
4069 * memory. Disabling PEBS solves the problem.
4070 *
4071 * Don't do this if the CPU already enforces it.
4072 */
4073 if (x86_pmu.pebs_no_isolation) {
4074 arr[(*nr)++] = (struct perf_guest_switch_msr){
4075 .msr = MSR_IA32_PEBS_ENABLE,
4076 .host = cpuc->pebs_enabled,
4077 .guest = 0,
4078 };
4079 return arr;
4080 }
4081
4082 if (!kvm_pmu || !x86_pmu.pebs_ept)
4083 return arr;
4084
4085 arr[(*nr)++] = (struct perf_guest_switch_msr){
4086 .msr = MSR_IA32_DS_AREA,
4087 .host = (unsigned long)cpuc->ds,
4088 .guest = kvm_pmu->ds_area,
4089 };
4090
4091 if (x86_pmu.intel_cap.pebs_baseline) {
4092 arr[(*nr)++] = (struct perf_guest_switch_msr){
4093 .msr = MSR_PEBS_DATA_CFG,
4094 .host = cpuc->active_pebs_data_cfg,
4095 .guest = kvm_pmu->pebs_data_cfg,
4096 };
4097 }
4098
4099 pebs_enable = (*nr)++;
4100 arr[pebs_enable] = (struct perf_guest_switch_msr){
4101 .msr = MSR_IA32_PEBS_ENABLE,
4102 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
4103 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask,
4104 };
4105
4106 if (arr[pebs_enable].host) {
4107 /* Disable guest PEBS if host PEBS is enabled. */
4108 arr[pebs_enable].guest = 0;
4109 } else {
4110 /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
4111 arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
4112 arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
4113 /* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
4114 arr[global_ctrl].guest |= arr[pebs_enable].guest;
4115 }
4116
4117 return arr;
4118}
4119
4120static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr, void *data)
4121{
4122 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4123 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
4124 int idx;
4125
4126 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4127 struct perf_event *event = cpuc->events[idx];
4128
4129 arr[idx].msr = x86_pmu_config_addr(idx);
4130 arr[idx].host = arr[idx].guest = 0;
4131
4132 if (!test_bit(idx, cpuc->active_mask))
4133 continue;
4134
4135 arr[idx].host = arr[idx].guest =
4136 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
4137
4138 if (event->attr.exclude_host)
4139 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4140 else if (event->attr.exclude_guest)
4141 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
4142 }
4143
4144 *nr = x86_pmu.num_counters;
4145 return arr;
4146}
4147
4148static void core_pmu_enable_event(struct perf_event *event)
4149{
4150 if (!event->attr.exclude_host)
4151 x86_pmu_enable_event(event);
4152}
4153
4154static void core_pmu_enable_all(int added)
4155{
4156 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
4157 int idx;
4158
4159 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
4160 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
4161
4162 if (!test_bit(idx, cpuc->active_mask) ||
4163 cpuc->events[idx]->attr.exclude_host)
4164 continue;
4165
4166 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
4167 }
4168}
4169
4170static int hsw_hw_config(struct perf_event *event)
4171{
4172 int ret = intel_pmu_hw_config(event);
4173
4174 if (ret)
4175 return ret;
4176 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
4177 return 0;
4178 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
4179
4180 /*
4181 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
4182 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
4183 * this combination.
4184 */
4185 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
4186 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
4187 event->attr.precise_ip > 0))
4188 return -EOPNOTSUPP;
4189
4190 if (event_is_checkpointed(event)) {
4191 /*
4192 * Sampling of checkpointed events can cause situations where
4193 * the CPU constantly aborts because of a overflow, which is
4194 * then checkpointed back and ignored. Forbid checkpointing
4195 * for sampling.
4196 *
4197 * But still allow a long sampling period, so that perf stat
4198 * from KVM works.
4199 */
4200 if (event->attr.sample_period > 0 &&
4201 event->attr.sample_period < 0x7fffffff)
4202 return -EOPNOTSUPP;
4203 }
4204 return 0;
4205}
4206
4207static struct event_constraint counter0_constraint =
4208 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
4209
4210static struct event_constraint counter1_constraint =
4211 INTEL_ALL_EVENT_CONSTRAINT(0, 0x2);
4212
4213static struct event_constraint counter0_1_constraint =
4214 INTEL_ALL_EVENT_CONSTRAINT(0, 0x3);
4215
4216static struct event_constraint counter2_constraint =
4217 EVENT_CONSTRAINT(0, 0x4, 0);
4218
4219static struct event_constraint fixed0_constraint =
4220 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
4221
4222static struct event_constraint fixed0_counter0_constraint =
4223 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
4224
4225static struct event_constraint fixed0_counter0_1_constraint =
4226 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000003ULL);
4227
4228static struct event_constraint counters_1_7_constraint =
4229 INTEL_ALL_EVENT_CONSTRAINT(0, 0xfeULL);
4230
4231static struct event_constraint *
4232hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4233 struct perf_event *event)
4234{
4235 struct event_constraint *c;
4236
4237 c = intel_get_event_constraints(cpuc, idx, event);
4238
4239 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4240 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4241 if (c->idxmsk64 & (1U << 2))
4242 return &counter2_constraint;
4243 return &emptyconstraint;
4244 }
4245
4246 return c;
4247}
4248
4249static struct event_constraint *
4250icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4251 struct perf_event *event)
4252{
4253 /*
4254 * Fixed counter 0 has less skid.
4255 * Force instruction:ppp in Fixed counter 0
4256 */
4257 if ((event->attr.precise_ip == 3) &&
4258 constraint_match(&fixed0_constraint, event->hw.config))
4259 return &fixed0_constraint;
4260
4261 return hsw_get_event_constraints(cpuc, idx, event);
4262}
4263
4264static struct event_constraint *
4265spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4266 struct perf_event *event)
4267{
4268 struct event_constraint *c;
4269
4270 c = icl_get_event_constraints(cpuc, idx, event);
4271
4272 /*
4273 * The :ppp indicates the Precise Distribution (PDist) facility, which
4274 * is only supported on the GP counter 0. If a :ppp event which is not
4275 * available on the GP counter 0, error out.
4276 * Exception: Instruction PDIR is only available on the fixed counter 0.
4277 */
4278 if ((event->attr.precise_ip == 3) &&
4279 !constraint_match(&fixed0_constraint, event->hw.config)) {
4280 if (c->idxmsk64 & BIT_ULL(0))
4281 return &counter0_constraint;
4282
4283 return &emptyconstraint;
4284 }
4285
4286 return c;
4287}
4288
4289static struct event_constraint *
4290glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4291 struct perf_event *event)
4292{
4293 struct event_constraint *c;
4294
4295 /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4296 if (event->attr.precise_ip == 3)
4297 return &counter0_constraint;
4298
4299 c = intel_get_event_constraints(cpuc, idx, event);
4300
4301 return c;
4302}
4303
4304static struct event_constraint *
4305tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4306 struct perf_event *event)
4307{
4308 struct event_constraint *c;
4309
4310 c = intel_get_event_constraints(cpuc, idx, event);
4311
4312 /*
4313 * :ppp means to do reduced skid PEBS,
4314 * which is available on PMC0 and fixed counter 0.
4315 */
4316 if (event->attr.precise_ip == 3) {
4317 /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4318 if (constraint_match(&fixed0_constraint, event->hw.config))
4319 return &fixed0_counter0_constraint;
4320
4321 return &counter0_constraint;
4322 }
4323
4324 return c;
4325}
4326
4327static bool allow_tsx_force_abort = true;
4328
4329static struct event_constraint *
4330tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4331 struct perf_event *event)
4332{
4333 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4334
4335 /*
4336 * Without TFA we must not use PMC3.
4337 */
4338 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4339 c = dyn_constraint(cpuc, c, idx);
4340 c->idxmsk64 &= ~(1ULL << 3);
4341 c->weight--;
4342 }
4343
4344 return c;
4345}
4346
4347static struct event_constraint *
4348adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4349 struct perf_event *event)
4350{
4351 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4352
4353 if (pmu->cpu_type == hybrid_big)
4354 return spr_get_event_constraints(cpuc, idx, event);
4355 else if (pmu->cpu_type == hybrid_small)
4356 return tnt_get_event_constraints(cpuc, idx, event);
4357
4358 WARN_ON(1);
4359 return &emptyconstraint;
4360}
4361
4362static struct event_constraint *
4363cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4364 struct perf_event *event)
4365{
4366 struct event_constraint *c;
4367
4368 c = intel_get_event_constraints(cpuc, idx, event);
4369
4370 /*
4371 * The :ppp indicates the Precise Distribution (PDist) facility, which
4372 * is only supported on the GP counter 0 & 1 and Fixed counter 0.
4373 * If a :ppp event which is not available on the above eligible counters,
4374 * error out.
4375 */
4376 if (event->attr.precise_ip == 3) {
4377 /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */
4378 if (constraint_match(&fixed0_constraint, event->hw.config))
4379 return &fixed0_counter0_1_constraint;
4380
4381 switch (c->idxmsk64 & 0x3ull) {
4382 case 0x1:
4383 return &counter0_constraint;
4384 case 0x2:
4385 return &counter1_constraint;
4386 case 0x3:
4387 return &counter0_1_constraint;
4388 }
4389 return &emptyconstraint;
4390 }
4391
4392 return c;
4393}
4394
4395static struct event_constraint *
4396rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4397 struct perf_event *event)
4398{
4399 struct event_constraint *c;
4400
4401 c = spr_get_event_constraints(cpuc, idx, event);
4402
4403 /* The Retire Latency is not supported by the fixed counter 0. */
4404 if (event->attr.precise_ip &&
4405 (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
4406 constraint_match(&fixed0_constraint, event->hw.config)) {
4407 /*
4408 * The Instruction PDIR is only available
4409 * on the fixed counter 0. Error out for this case.
4410 */
4411 if (event->attr.precise_ip == 3)
4412 return &emptyconstraint;
4413 return &counters_1_7_constraint;
4414 }
4415
4416 return c;
4417}
4418
4419static struct event_constraint *
4420mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4421 struct perf_event *event)
4422{
4423 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4424
4425 if (pmu->cpu_type == hybrid_big)
4426 return rwc_get_event_constraints(cpuc, idx, event);
4427 if (pmu->cpu_type == hybrid_small)
4428 return cmt_get_event_constraints(cpuc, idx, event);
4429
4430 WARN_ON(1);
4431 return &emptyconstraint;
4432}
4433
4434static int adl_hw_config(struct perf_event *event)
4435{
4436 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4437
4438 if (pmu->cpu_type == hybrid_big)
4439 return hsw_hw_config(event);
4440 else if (pmu->cpu_type == hybrid_small)
4441 return intel_pmu_hw_config(event);
4442
4443 WARN_ON(1);
4444 return -EOPNOTSUPP;
4445}
4446
4447static u8 adl_get_hybrid_cpu_type(void)
4448{
4449 return hybrid_big;
4450}
4451
4452/*
4453 * Broadwell:
4454 *
4455 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4456 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4457 * the two to enforce a minimum period of 128 (the smallest value that has bits
4458 * 0-5 cleared and >= 100).
4459 *
4460 * Because of how the code in x86_perf_event_set_period() works, the truncation
4461 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4462 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4463 *
4464 * Therefore the effective (average) period matches the requested period,
4465 * despite coarser hardware granularity.
4466 */
4467static void bdw_limit_period(struct perf_event *event, s64 *left)
4468{
4469 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4470 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4471 if (*left < 128)
4472 *left = 128;
4473 *left &= ~0x3fULL;
4474 }
4475}
4476
4477static void nhm_limit_period(struct perf_event *event, s64 *left)
4478{
4479 *left = max(*left, 32LL);
4480}
4481
4482static void spr_limit_period(struct perf_event *event, s64 *left)
4483{
4484 if (event->attr.precise_ip == 3)
4485 *left = max(*left, 128LL);
4486}
4487
4488PMU_FORMAT_ATTR(event, "config:0-7" );
4489PMU_FORMAT_ATTR(umask, "config:8-15" );
4490PMU_FORMAT_ATTR(edge, "config:18" );
4491PMU_FORMAT_ATTR(pc, "config:19" );
4492PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
4493PMU_FORMAT_ATTR(inv, "config:23" );
4494PMU_FORMAT_ATTR(cmask, "config:24-31" );
4495PMU_FORMAT_ATTR(in_tx, "config:32");
4496PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4497
4498static struct attribute *intel_arch_formats_attr[] = {
4499 &format_attr_event.attr,
4500 &format_attr_umask.attr,
4501 &format_attr_edge.attr,
4502 &format_attr_pc.attr,
4503 &format_attr_inv.attr,
4504 &format_attr_cmask.attr,
4505 NULL,
4506};
4507
4508ssize_t intel_event_sysfs_show(char *page, u64 config)
4509{
4510 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4511
4512 return x86_event_sysfs_show(page, config, event);
4513}
4514
4515static struct intel_shared_regs *allocate_shared_regs(int cpu)
4516{
4517 struct intel_shared_regs *regs;
4518 int i;
4519
4520 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4521 GFP_KERNEL, cpu_to_node(cpu));
4522 if (regs) {
4523 /*
4524 * initialize the locks to keep lockdep happy
4525 */
4526 for (i = 0; i < EXTRA_REG_MAX; i++)
4527 raw_spin_lock_init(®s->regs[i].lock);
4528
4529 regs->core_id = -1;
4530 }
4531 return regs;
4532}
4533
4534static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4535{
4536 struct intel_excl_cntrs *c;
4537
4538 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4539 GFP_KERNEL, cpu_to_node(cpu));
4540 if (c) {
4541 raw_spin_lock_init(&c->lock);
4542 c->core_id = -1;
4543 }
4544 return c;
4545}
4546
4547
4548int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4549{
4550 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4551
4552 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4553 cpuc->shared_regs = allocate_shared_regs(cpu);
4554 if (!cpuc->shared_regs)
4555 goto err;
4556 }
4557
4558 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4559 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4560
4561 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4562 if (!cpuc->constraint_list)
4563 goto err_shared_regs;
4564 }
4565
4566 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4567 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4568 if (!cpuc->excl_cntrs)
4569 goto err_constraint_list;
4570
4571 cpuc->excl_thread_id = 0;
4572 }
4573
4574 return 0;
4575
4576err_constraint_list:
4577 kfree(cpuc->constraint_list);
4578 cpuc->constraint_list = NULL;
4579
4580err_shared_regs:
4581 kfree(cpuc->shared_regs);
4582 cpuc->shared_regs = NULL;
4583
4584err:
4585 return -ENOMEM;
4586}
4587
4588static int intel_pmu_cpu_prepare(int cpu)
4589{
4590 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4591}
4592
4593static void flip_smm_bit(void *data)
4594{
4595 unsigned long set = *(unsigned long *)data;
4596
4597 if (set > 0) {
4598 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4599 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4600 } else {
4601 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4602 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4603 }
4604}
4605
4606static void intel_pmu_check_num_counters(int *num_counters,
4607 int *num_counters_fixed,
4608 u64 *intel_ctrl, u64 fixed_mask);
4609
4610static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
4611{
4612 unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF);
4613 unsigned int eax, ebx, ecx, edx;
4614
4615 if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
4616 cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
4617 &eax, &ebx, &ecx, &edx);
4618 pmu->num_counters = fls(eax);
4619 pmu->num_counters_fixed = fls(ebx);
4620 intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
4621 &pmu->intel_ctrl, ebx);
4622 }
4623}
4624
4625static bool init_hybrid_pmu(int cpu)
4626{
4627 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4628 u8 cpu_type = get_this_hybrid_cpu_type();
4629 struct x86_hybrid_pmu *pmu = NULL;
4630 int i;
4631
4632 if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4633 cpu_type = x86_pmu.get_hybrid_cpu_type();
4634
4635 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4636 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4637 pmu = &x86_pmu.hybrid_pmu[i];
4638 break;
4639 }
4640 }
4641 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4642 cpuc->pmu = NULL;
4643 return false;
4644 }
4645
4646 /* Only check and dump the PMU information for the first CPU */
4647 if (!cpumask_empty(&pmu->supported_cpus))
4648 goto end;
4649
4650 if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT))
4651 update_pmu_cap(pmu);
4652
4653 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4654 return false;
4655
4656 pr_info("%s PMU driver: ", pmu->name);
4657
4658 if (pmu->intel_cap.pebs_output_pt_available)
4659 pr_cont("PEBS-via-PT ");
4660
4661 pr_cont("\n");
4662
4663 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4664 pmu->intel_ctrl);
4665
4666end:
4667 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4668 cpuc->pmu = &pmu->pmu;
4669
4670 return true;
4671}
4672
4673static void intel_pmu_cpu_starting(int cpu)
4674{
4675 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4676 int core_id = topology_core_id(cpu);
4677 int i;
4678
4679 if (is_hybrid() && !init_hybrid_pmu(cpu))
4680 return;
4681
4682 init_debug_store_on_cpu(cpu);
4683 /*
4684 * Deal with CPUs that don't clear their LBRs on power-up.
4685 */
4686 intel_pmu_lbr_reset();
4687
4688 cpuc->lbr_sel = NULL;
4689
4690 if (x86_pmu.flags & PMU_FL_TFA) {
4691 WARN_ON_ONCE(cpuc->tfa_shadow);
4692 cpuc->tfa_shadow = ~0ULL;
4693 intel_set_tfa(cpuc, false);
4694 }
4695
4696 if (x86_pmu.version > 1)
4697 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4698
4699 /*
4700 * Disable perf metrics if any added CPU doesn't support it.
4701 *
4702 * Turn off the check for a hybrid architecture, because the
4703 * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4704 * the architecture features. The perf metrics is a model-specific
4705 * feature for now. The corresponding bit should always be 0 on
4706 * a hybrid platform, e.g., Alder Lake.
4707 */
4708 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4709 union perf_capabilities perf_cap;
4710
4711 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4712 if (!perf_cap.perf_metrics) {
4713 x86_pmu.intel_cap.perf_metrics = 0;
4714 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4715 }
4716 }
4717
4718 if (!cpuc->shared_regs)
4719 return;
4720
4721 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4722 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4723 struct intel_shared_regs *pc;
4724
4725 pc = per_cpu(cpu_hw_events, i).shared_regs;
4726 if (pc && pc->core_id == core_id) {
4727 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4728 cpuc->shared_regs = pc;
4729 break;
4730 }
4731 }
4732 cpuc->shared_regs->core_id = core_id;
4733 cpuc->shared_regs->refcnt++;
4734 }
4735
4736 if (x86_pmu.lbr_sel_map)
4737 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4738
4739 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4740 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4741 struct cpu_hw_events *sibling;
4742 struct intel_excl_cntrs *c;
4743
4744 sibling = &per_cpu(cpu_hw_events, i);
4745 c = sibling->excl_cntrs;
4746 if (c && c->core_id == core_id) {
4747 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4748 cpuc->excl_cntrs = c;
4749 if (!sibling->excl_thread_id)
4750 cpuc->excl_thread_id = 1;
4751 break;
4752 }
4753 }
4754 cpuc->excl_cntrs->core_id = core_id;
4755 cpuc->excl_cntrs->refcnt++;
4756 }
4757}
4758
4759static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4760{
4761 struct intel_excl_cntrs *c;
4762
4763 c = cpuc->excl_cntrs;
4764 if (c) {
4765 if (c->core_id == -1 || --c->refcnt == 0)
4766 kfree(c);
4767 cpuc->excl_cntrs = NULL;
4768 }
4769
4770 kfree(cpuc->constraint_list);
4771 cpuc->constraint_list = NULL;
4772}
4773
4774static void intel_pmu_cpu_dying(int cpu)
4775{
4776 fini_debug_store_on_cpu(cpu);
4777}
4778
4779void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4780{
4781 struct intel_shared_regs *pc;
4782
4783 pc = cpuc->shared_regs;
4784 if (pc) {
4785 if (pc->core_id == -1 || --pc->refcnt == 0)
4786 kfree(pc);
4787 cpuc->shared_regs = NULL;
4788 }
4789
4790 free_excl_cntrs(cpuc);
4791}
4792
4793static void intel_pmu_cpu_dead(int cpu)
4794{
4795 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4796
4797 intel_cpuc_finish(cpuc);
4798
4799 if (is_hybrid() && cpuc->pmu)
4800 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
4801}
4802
4803static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
4804 bool sched_in)
4805{
4806 intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
4807 intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
4808}
4809
4810static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
4811 struct perf_event_pmu_context *next_epc)
4812{
4813 intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
4814}
4815
4816static int intel_pmu_check_period(struct perf_event *event, u64 value)
4817{
4818 return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
4819}
4820
4821static void intel_aux_output_init(void)
4822{
4823 /* Refer also intel_pmu_aux_output_match() */
4824 if (x86_pmu.intel_cap.pebs_output_pt_available)
4825 x86_pmu.assign = intel_pmu_assign_event;
4826}
4827
4828static int intel_pmu_aux_output_match(struct perf_event *event)
4829{
4830 /* intel_pmu_assign_event() is needed, refer intel_aux_output_init() */
4831 if (!x86_pmu.intel_cap.pebs_output_pt_available)
4832 return 0;
4833
4834 return is_intel_pt_event(event);
4835}
4836
4837static void intel_pmu_filter(struct pmu *pmu, int cpu, bool *ret)
4838{
4839 struct x86_hybrid_pmu *hpmu = hybrid_pmu(pmu);
4840
4841 *ret = !cpumask_test_cpu(cpu, &hpmu->supported_cpus);
4842}
4843
4844PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
4845
4846PMU_FORMAT_ATTR(ldlat, "config1:0-15");
4847
4848PMU_FORMAT_ATTR(frontend, "config1:0-23");
4849
4850static struct attribute *intel_arch3_formats_attr[] = {
4851 &format_attr_event.attr,
4852 &format_attr_umask.attr,
4853 &format_attr_edge.attr,
4854 &format_attr_pc.attr,
4855 &format_attr_any.attr,
4856 &format_attr_inv.attr,
4857 &format_attr_cmask.attr,
4858 NULL,
4859};
4860
4861static struct attribute *hsw_format_attr[] = {
4862 &format_attr_in_tx.attr,
4863 &format_attr_in_tx_cp.attr,
4864 &format_attr_offcore_rsp.attr,
4865 &format_attr_ldlat.attr,
4866 NULL
4867};
4868
4869static struct attribute *nhm_format_attr[] = {
4870 &format_attr_offcore_rsp.attr,
4871 &format_attr_ldlat.attr,
4872 NULL
4873};
4874
4875static struct attribute *slm_format_attr[] = {
4876 &format_attr_offcore_rsp.attr,
4877 NULL
4878};
4879
4880static struct attribute *skl_format_attr[] = {
4881 &format_attr_frontend.attr,
4882 NULL,
4883};
4884
4885static __initconst const struct x86_pmu core_pmu = {
4886 .name = "core",
4887 .handle_irq = x86_pmu_handle_irq,
4888 .disable_all = x86_pmu_disable_all,
4889 .enable_all = core_pmu_enable_all,
4890 .enable = core_pmu_enable_event,
4891 .disable = x86_pmu_disable_event,
4892 .hw_config = core_pmu_hw_config,
4893 .schedule_events = x86_schedule_events,
4894 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4895 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4896 .event_map = intel_pmu_event_map,
4897 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4898 .apic = 1,
4899 .large_pebs_flags = LARGE_PEBS_FLAGS,
4900
4901 /*
4902 * Intel PMCs cannot be accessed sanely above 32-bit width,
4903 * so we install an artificial 1<<31 period regardless of
4904 * the generic event period:
4905 */
4906 .max_period = (1ULL<<31) - 1,
4907 .get_event_constraints = intel_get_event_constraints,
4908 .put_event_constraints = intel_put_event_constraints,
4909 .event_constraints = intel_core_event_constraints,
4910 .guest_get_msrs = core_guest_get_msrs,
4911 .format_attrs = intel_arch_formats_attr,
4912 .events_sysfs_show = intel_event_sysfs_show,
4913
4914 /*
4915 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
4916 * together with PMU version 1 and thus be using core_pmu with
4917 * shared_regs. We need following callbacks here to allocate
4918 * it properly.
4919 */
4920 .cpu_prepare = intel_pmu_cpu_prepare,
4921 .cpu_starting = intel_pmu_cpu_starting,
4922 .cpu_dying = intel_pmu_cpu_dying,
4923 .cpu_dead = intel_pmu_cpu_dead,
4924
4925 .check_period = intel_pmu_check_period,
4926
4927 .lbr_reset = intel_pmu_lbr_reset_64,
4928 .lbr_read = intel_pmu_lbr_read_64,
4929 .lbr_save = intel_pmu_lbr_save,
4930 .lbr_restore = intel_pmu_lbr_restore,
4931};
4932
4933static __initconst const struct x86_pmu intel_pmu = {
4934 .name = "Intel",
4935 .handle_irq = intel_pmu_handle_irq,
4936 .disable_all = intel_pmu_disable_all,
4937 .enable_all = intel_pmu_enable_all,
4938 .enable = intel_pmu_enable_event,
4939 .disable = intel_pmu_disable_event,
4940 .add = intel_pmu_add_event,
4941 .del = intel_pmu_del_event,
4942 .read = intel_pmu_read_event,
4943 .set_period = intel_pmu_set_period,
4944 .update = intel_pmu_update,
4945 .hw_config = intel_pmu_hw_config,
4946 .schedule_events = x86_schedule_events,
4947 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
4948 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
4949 .event_map = intel_pmu_event_map,
4950 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
4951 .apic = 1,
4952 .large_pebs_flags = LARGE_PEBS_FLAGS,
4953 /*
4954 * Intel PMCs cannot be accessed sanely above 32 bit width,
4955 * so we install an artificial 1<<31 period regardless of
4956 * the generic event period:
4957 */
4958 .max_period = (1ULL << 31) - 1,
4959 .get_event_constraints = intel_get_event_constraints,
4960 .put_event_constraints = intel_put_event_constraints,
4961 .pebs_aliases = intel_pebs_aliases_core2,
4962
4963 .format_attrs = intel_arch3_formats_attr,
4964 .events_sysfs_show = intel_event_sysfs_show,
4965
4966 .cpu_prepare = intel_pmu_cpu_prepare,
4967 .cpu_starting = intel_pmu_cpu_starting,
4968 .cpu_dying = intel_pmu_cpu_dying,
4969 .cpu_dead = intel_pmu_cpu_dead,
4970
4971 .guest_get_msrs = intel_guest_get_msrs,
4972 .sched_task = intel_pmu_sched_task,
4973 .swap_task_ctx = intel_pmu_swap_task_ctx,
4974
4975 .check_period = intel_pmu_check_period,
4976
4977 .aux_output_match = intel_pmu_aux_output_match,
4978
4979 .lbr_reset = intel_pmu_lbr_reset_64,
4980 .lbr_read = intel_pmu_lbr_read_64,
4981 .lbr_save = intel_pmu_lbr_save,
4982 .lbr_restore = intel_pmu_lbr_restore,
4983
4984 /*
4985 * SMM has access to all 4 rings and while traditionally SMM code only
4986 * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
4987 *
4988 * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
4989 * between SMM or not, this results in what should be pure userspace
4990 * counters including SMM data.
4991 *
4992 * This is a clear privilege issue, therefore globally disable
4993 * counting SMM by default.
4994 */
4995 .attr_freeze_on_smi = 1,
4996};
4997
4998static __init void intel_clovertown_quirk(void)
4999{
5000 /*
5001 * PEBS is unreliable due to:
5002 *
5003 * AJ67 - PEBS may experience CPL leaks
5004 * AJ68 - PEBS PMI may be delayed by one event
5005 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
5006 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
5007 *
5008 * AJ67 could be worked around by restricting the OS/USR flags.
5009 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
5010 *
5011 * AJ106 could possibly be worked around by not allowing LBR
5012 * usage from PEBS, including the fixup.
5013 * AJ68 could possibly be worked around by always programming
5014 * a pebs_event_reset[0] value and coping with the lost events.
5015 *
5016 * But taken together it might just make sense to not enable PEBS on
5017 * these chips.
5018 */
5019 pr_warn("PEBS disabled due to CPU errata\n");
5020 x86_pmu.pebs = 0;
5021 x86_pmu.pebs_constraints = NULL;
5022}
5023
5024static const struct x86_cpu_desc isolation_ucodes[] = {
5025 INTEL_CPU_DESC(INTEL_FAM6_HASWELL, 3, 0x0000001f),
5026 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_L, 1, 0x0000001e),
5027 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_G, 1, 0x00000015),
5028 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037),
5029 INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a),
5030 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL, 4, 0x00000023),
5031 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_G, 1, 0x00000014),
5032 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 2, 0x00000010),
5033 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
5034 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
5035 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
5036 INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
5037 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
5038 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
5039 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
5040 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000),
5041 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000),
5042 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000),
5043 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c),
5044 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c),
5045 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
5046 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 9, 0x0000004e),
5047 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 10, 0x0000004e),
5048 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 11, 0x0000004e),
5049 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_L, 12, 0x0000004e),
5050 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 10, 0x0000004e),
5051 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 11, 0x0000004e),
5052 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 12, 0x0000004e),
5053 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 13, 0x0000004e),
5054 {}
5055};
5056
5057static void intel_check_pebs_isolation(void)
5058{
5059 x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
5060}
5061
5062static __init void intel_pebs_isolation_quirk(void)
5063{
5064 WARN_ON_ONCE(x86_pmu.check_microcode);
5065 x86_pmu.check_microcode = intel_check_pebs_isolation;
5066 intel_check_pebs_isolation();
5067}
5068
5069static const struct x86_cpu_desc pebs_ucodes[] = {
5070 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028),
5071 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618),
5072 INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c),
5073 {}
5074};
5075
5076static bool intel_snb_pebs_broken(void)
5077{
5078 return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
5079}
5080
5081static void intel_snb_check_microcode(void)
5082{
5083 if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
5084 return;
5085
5086 /*
5087 * Serialized by the microcode lock..
5088 */
5089 if (x86_pmu.pebs_broken) {
5090 pr_info("PEBS enabled due to microcode update\n");
5091 x86_pmu.pebs_broken = 0;
5092 } else {
5093 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
5094 x86_pmu.pebs_broken = 1;
5095 }
5096}
5097
5098static bool is_lbr_from(unsigned long msr)
5099{
5100 unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
5101
5102 return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
5103}
5104
5105/*
5106 * Under certain circumstances, access certain MSR may cause #GP.
5107 * The function tests if the input MSR can be safely accessed.
5108 */
5109static bool check_msr(unsigned long msr, u64 mask)
5110{
5111 u64 val_old, val_new, val_tmp;
5112
5113 /*
5114 * Disable the check for real HW, so we don't
5115 * mess with potentially enabled registers:
5116 */
5117 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
5118 return true;
5119
5120 /*
5121 * Read the current value, change it and read it back to see if it
5122 * matches, this is needed to detect certain hardware emulators
5123 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
5124 */
5125 if (rdmsrl_safe(msr, &val_old))
5126 return false;
5127
5128 /*
5129 * Only change the bits which can be updated by wrmsrl.
5130 */
5131 val_tmp = val_old ^ mask;
5132
5133 if (is_lbr_from(msr))
5134 val_tmp = lbr_from_signext_quirk_wr(val_tmp);
5135
5136 if (wrmsrl_safe(msr, val_tmp) ||
5137 rdmsrl_safe(msr, &val_new))
5138 return false;
5139
5140 /*
5141 * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
5142 * should equal rdmsrl()'s even with the quirk.
5143 */
5144 if (val_new != val_tmp)
5145 return false;
5146
5147 if (is_lbr_from(msr))
5148 val_old = lbr_from_signext_quirk_wr(val_old);
5149
5150 /* Here it's sure that the MSR can be safely accessed.
5151 * Restore the old value and return.
5152 */
5153 wrmsrl(msr, val_old);
5154
5155 return true;
5156}
5157
5158static __init void intel_sandybridge_quirk(void)
5159{
5160 x86_pmu.check_microcode = intel_snb_check_microcode;
5161 cpus_read_lock();
5162 intel_snb_check_microcode();
5163 cpus_read_unlock();
5164}
5165
5166static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
5167 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
5168 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
5169 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
5170 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
5171 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
5172 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
5173 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
5174};
5175
5176static __init void intel_arch_events_quirk(void)
5177{
5178 int bit;
5179
5180 /* disable event that reported as not present by cpuid */
5181 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
5182 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
5183 pr_warn("CPUID marked event: \'%s\' unavailable\n",
5184 intel_arch_events_map[bit].name);
5185 }
5186}
5187
5188static __init void intel_nehalem_quirk(void)
5189{
5190 union cpuid10_ebx ebx;
5191
5192 ebx.full = x86_pmu.events_maskl;
5193 if (ebx.split.no_branch_misses_retired) {
5194 /*
5195 * Erratum AAJ80 detected, we work it around by using
5196 * the BR_MISP_EXEC.ANY event. This will over-count
5197 * branch-misses, but it's still much better than the
5198 * architectural event which is often completely bogus:
5199 */
5200 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
5201 ebx.split.no_branch_misses_retired = 0;
5202 x86_pmu.events_maskl = ebx.full;
5203 pr_info("CPU erratum AAJ80 worked around\n");
5204 }
5205}
5206
5207/*
5208 * enable software workaround for errata:
5209 * SNB: BJ122
5210 * IVB: BV98
5211 * HSW: HSD29
5212 *
5213 * Only needed when HT is enabled. However detecting
5214 * if HT is enabled is difficult (model specific). So instead,
5215 * we enable the workaround in the early boot, and verify if
5216 * it is needed in a later initcall phase once we have valid
5217 * topology information to check if HT is actually enabled
5218 */
5219static __init void intel_ht_bug(void)
5220{
5221 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
5222
5223 x86_pmu.start_scheduling = intel_start_scheduling;
5224 x86_pmu.commit_scheduling = intel_commit_scheduling;
5225 x86_pmu.stop_scheduling = intel_stop_scheduling;
5226}
5227
5228EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
5229EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
5230
5231/* Haswell special events */
5232EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
5233EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
5234EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
5235EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
5236EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
5237EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
5238EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
5239EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
5240EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
5241EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
5242EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
5243EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
5244
5245static struct attribute *hsw_events_attrs[] = {
5246 EVENT_PTR(td_slots_issued),
5247 EVENT_PTR(td_slots_retired),
5248 EVENT_PTR(td_fetch_bubbles),
5249 EVENT_PTR(td_total_slots),
5250 EVENT_PTR(td_total_slots_scale),
5251 EVENT_PTR(td_recovery_bubbles),
5252 EVENT_PTR(td_recovery_bubbles_scale),
5253 NULL
5254};
5255
5256static struct attribute *hsw_mem_events_attrs[] = {
5257 EVENT_PTR(mem_ld_hsw),
5258 EVENT_PTR(mem_st_hsw),
5259 NULL,
5260};
5261
5262static struct attribute *hsw_tsx_events_attrs[] = {
5263 EVENT_PTR(tx_start),
5264 EVENT_PTR(tx_commit),
5265 EVENT_PTR(tx_abort),
5266 EVENT_PTR(tx_capacity),
5267 EVENT_PTR(tx_conflict),
5268 EVENT_PTR(el_start),
5269 EVENT_PTR(el_commit),
5270 EVENT_PTR(el_abort),
5271 EVENT_PTR(el_capacity),
5272 EVENT_PTR(el_conflict),
5273 EVENT_PTR(cycles_t),
5274 EVENT_PTR(cycles_ct),
5275 NULL
5276};
5277
5278EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
5279EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
5280EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
5281EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
5282
5283static struct attribute *icl_events_attrs[] = {
5284 EVENT_PTR(mem_ld_hsw),
5285 EVENT_PTR(mem_st_hsw),
5286 NULL,
5287};
5288
5289static struct attribute *icl_td_events_attrs[] = {
5290 EVENT_PTR(slots),
5291 EVENT_PTR(td_retiring),
5292 EVENT_PTR(td_bad_spec),
5293 EVENT_PTR(td_fe_bound),
5294 EVENT_PTR(td_be_bound),
5295 NULL,
5296};
5297
5298static struct attribute *icl_tsx_events_attrs[] = {
5299 EVENT_PTR(tx_start),
5300 EVENT_PTR(tx_abort),
5301 EVENT_PTR(tx_commit),
5302 EVENT_PTR(tx_capacity_read),
5303 EVENT_PTR(tx_capacity_write),
5304 EVENT_PTR(tx_conflict),
5305 EVENT_PTR(el_start),
5306 EVENT_PTR(el_abort),
5307 EVENT_PTR(el_commit),
5308 EVENT_PTR(el_capacity_read),
5309 EVENT_PTR(el_capacity_write),
5310 EVENT_PTR(el_conflict),
5311 EVENT_PTR(cycles_t),
5312 EVENT_PTR(cycles_ct),
5313 NULL,
5314};
5315
5316
5317EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2");
5318EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82");
5319
5320static struct attribute *spr_events_attrs[] = {
5321 EVENT_PTR(mem_ld_hsw),
5322 EVENT_PTR(mem_st_spr),
5323 EVENT_PTR(mem_ld_aux),
5324 NULL,
5325};
5326
5327static struct attribute *spr_td_events_attrs[] = {
5328 EVENT_PTR(slots),
5329 EVENT_PTR(td_retiring),
5330 EVENT_PTR(td_bad_spec),
5331 EVENT_PTR(td_fe_bound),
5332 EVENT_PTR(td_be_bound),
5333 EVENT_PTR(td_heavy_ops),
5334 EVENT_PTR(td_br_mispredict),
5335 EVENT_PTR(td_fetch_lat),
5336 EVENT_PTR(td_mem_bound),
5337 NULL,
5338};
5339
5340static struct attribute *spr_tsx_events_attrs[] = {
5341 EVENT_PTR(tx_start),
5342 EVENT_PTR(tx_abort),
5343 EVENT_PTR(tx_commit),
5344 EVENT_PTR(tx_capacity_read),
5345 EVENT_PTR(tx_capacity_write),
5346 EVENT_PTR(tx_conflict),
5347 EVENT_PTR(cycles_t),
5348 EVENT_PTR(cycles_ct),
5349 NULL,
5350};
5351
5352static ssize_t freeze_on_smi_show(struct device *cdev,
5353 struct device_attribute *attr,
5354 char *buf)
5355{
5356 return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
5357}
5358
5359static DEFINE_MUTEX(freeze_on_smi_mutex);
5360
5361static ssize_t freeze_on_smi_store(struct device *cdev,
5362 struct device_attribute *attr,
5363 const char *buf, size_t count)
5364{
5365 unsigned long val;
5366 ssize_t ret;
5367
5368 ret = kstrtoul(buf, 0, &val);
5369 if (ret)
5370 return ret;
5371
5372 if (val > 1)
5373 return -EINVAL;
5374
5375 mutex_lock(&freeze_on_smi_mutex);
5376
5377 if (x86_pmu.attr_freeze_on_smi == val)
5378 goto done;
5379
5380 x86_pmu.attr_freeze_on_smi = val;
5381
5382 cpus_read_lock();
5383 on_each_cpu(flip_smm_bit, &val, 1);
5384 cpus_read_unlock();
5385done:
5386 mutex_unlock(&freeze_on_smi_mutex);
5387
5388 return count;
5389}
5390
5391static void update_tfa_sched(void *ignored)
5392{
5393 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
5394
5395 /*
5396 * check if PMC3 is used
5397 * and if so force schedule out for all event types all contexts
5398 */
5399 if (test_bit(3, cpuc->active_mask))
5400 perf_pmu_resched(x86_get_pmu(smp_processor_id()));
5401}
5402
5403static ssize_t show_sysctl_tfa(struct device *cdev,
5404 struct device_attribute *attr,
5405 char *buf)
5406{
5407 return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
5408}
5409
5410static ssize_t set_sysctl_tfa(struct device *cdev,
5411 struct device_attribute *attr,
5412 const char *buf, size_t count)
5413{
5414 bool val;
5415 ssize_t ret;
5416
5417 ret = kstrtobool(buf, &val);
5418 if (ret)
5419 return ret;
5420
5421 /* no change */
5422 if (val == allow_tsx_force_abort)
5423 return count;
5424
5425 allow_tsx_force_abort = val;
5426
5427 cpus_read_lock();
5428 on_each_cpu(update_tfa_sched, NULL, 1);
5429 cpus_read_unlock();
5430
5431 return count;
5432}
5433
5434
5435static DEVICE_ATTR_RW(freeze_on_smi);
5436
5437static ssize_t branches_show(struct device *cdev,
5438 struct device_attribute *attr,
5439 char *buf)
5440{
5441 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
5442}
5443
5444static DEVICE_ATTR_RO(branches);
5445
5446static struct attribute *lbr_attrs[] = {
5447 &dev_attr_branches.attr,
5448 NULL
5449};
5450
5451static char pmu_name_str[30];
5452
5453static ssize_t pmu_name_show(struct device *cdev,
5454 struct device_attribute *attr,
5455 char *buf)
5456{
5457 return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
5458}
5459
5460static DEVICE_ATTR_RO(pmu_name);
5461
5462static struct attribute *intel_pmu_caps_attrs[] = {
5463 &dev_attr_pmu_name.attr,
5464 NULL
5465};
5466
5467static DEVICE_ATTR(allow_tsx_force_abort, 0644,
5468 show_sysctl_tfa,
5469 set_sysctl_tfa);
5470
5471static struct attribute *intel_pmu_attrs[] = {
5472 &dev_attr_freeze_on_smi.attr,
5473 &dev_attr_allow_tsx_force_abort.attr,
5474 NULL,
5475};
5476
5477static umode_t
5478tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5479{
5480 return boot_cpu_has(X86_FEATURE_RTM) ? attr->mode : 0;
5481}
5482
5483static umode_t
5484pebs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5485{
5486 return x86_pmu.pebs ? attr->mode : 0;
5487}
5488
5489static umode_t
5490mem_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5491{
5492 if (attr == &event_attr_mem_ld_aux.attr.attr)
5493 return x86_pmu.flags & PMU_FL_MEM_LOADS_AUX ? attr->mode : 0;
5494
5495 return pebs_is_visible(kobj, attr, i);
5496}
5497
5498static umode_t
5499lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5500{
5501 return x86_pmu.lbr_nr ? attr->mode : 0;
5502}
5503
5504static umode_t
5505exra_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5506{
5507 return x86_pmu.version >= 2 ? attr->mode : 0;
5508}
5509
5510static umode_t
5511default_is_visible(struct kobject *kobj, struct attribute *attr, int i)
5512{
5513 if (attr == &dev_attr_allow_tsx_force_abort.attr)
5514 return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0;
5515
5516 return attr->mode;
5517}
5518
5519static struct attribute_group group_events_td = {
5520 .name = "events",
5521};
5522
5523static struct attribute_group group_events_mem = {
5524 .name = "events",
5525 .is_visible = mem_is_visible,
5526};
5527
5528static struct attribute_group group_events_tsx = {
5529 .name = "events",
5530 .is_visible = tsx_is_visible,
5531};
5532
5533static struct attribute_group group_caps_gen = {
5534 .name = "caps",
5535 .attrs = intel_pmu_caps_attrs,
5536};
5537
5538static struct attribute_group group_caps_lbr = {
5539 .name = "caps",
5540 .attrs = lbr_attrs,
5541 .is_visible = lbr_is_visible,
5542};
5543
5544static struct attribute_group group_format_extra = {
5545 .name = "format",
5546 .is_visible = exra_is_visible,
5547};
5548
5549static struct attribute_group group_format_extra_skl = {
5550 .name = "format",
5551 .is_visible = exra_is_visible,
5552};
5553
5554static struct attribute_group group_default = {
5555 .attrs = intel_pmu_attrs,
5556 .is_visible = default_is_visible,
5557};
5558
5559static const struct attribute_group *attr_update[] = {
5560 &group_events_td,
5561 &group_events_mem,
5562 &group_events_tsx,
5563 &group_caps_gen,
5564 &group_caps_lbr,
5565 &group_format_extra,
5566 &group_format_extra_skl,
5567 &group_default,
5568 NULL,
5569};
5570
5571EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
5572EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
5573EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
5574EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
5575EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
5576EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
5577EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
5578EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
5579EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
5580
5581static struct attribute *adl_hybrid_events_attrs[] = {
5582 EVENT_PTR(slots_adl),
5583 EVENT_PTR(td_retiring_adl),
5584 EVENT_PTR(td_bad_spec_adl),
5585 EVENT_PTR(td_fe_bound_adl),
5586 EVENT_PTR(td_be_bound_adl),
5587 EVENT_PTR(td_heavy_ops_adl),
5588 EVENT_PTR(td_br_mis_adl),
5589 EVENT_PTR(td_fetch_lat_adl),
5590 EVENT_PTR(td_mem_bound_adl),
5591 NULL,
5592};
5593
5594/* Must be in IDX order */
5595EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
5596EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
5597EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
5598
5599static struct attribute *adl_hybrid_mem_attrs[] = {
5600 EVENT_PTR(mem_ld_adl),
5601 EVENT_PTR(mem_st_adl),
5602 EVENT_PTR(mem_ld_aux_adl),
5603 NULL,
5604};
5605
5606static struct attribute *mtl_hybrid_mem_attrs[] = {
5607 EVENT_PTR(mem_ld_adl),
5608 EVENT_PTR(mem_st_adl),
5609 NULL
5610};
5611
5612EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
5613EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
5614EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
5615EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
5616EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
5617EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
5618EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
5619EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
5620
5621static struct attribute *adl_hybrid_tsx_attrs[] = {
5622 EVENT_PTR(tx_start_adl),
5623 EVENT_PTR(tx_abort_adl),
5624 EVENT_PTR(tx_commit_adl),
5625 EVENT_PTR(tx_capacity_read_adl),
5626 EVENT_PTR(tx_capacity_write_adl),
5627 EVENT_PTR(tx_conflict_adl),
5628 EVENT_PTR(cycles_t_adl),
5629 EVENT_PTR(cycles_ct_adl),
5630 NULL,
5631};
5632
5633FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
5634FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
5635FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
5636FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
5637FORMAT_ATTR_HYBRID(frontend, hybrid_big);
5638
5639#define ADL_HYBRID_RTM_FORMAT_ATTR \
5640 FORMAT_HYBRID_PTR(in_tx), \
5641 FORMAT_HYBRID_PTR(in_tx_cp)
5642
5643#define ADL_HYBRID_FORMAT_ATTR \
5644 FORMAT_HYBRID_PTR(offcore_rsp), \
5645 FORMAT_HYBRID_PTR(ldlat), \
5646 FORMAT_HYBRID_PTR(frontend)
5647
5648static struct attribute *adl_hybrid_extra_attr_rtm[] = {
5649 ADL_HYBRID_RTM_FORMAT_ATTR,
5650 ADL_HYBRID_FORMAT_ATTR,
5651 NULL
5652};
5653
5654static struct attribute *adl_hybrid_extra_attr[] = {
5655 ADL_HYBRID_FORMAT_ATTR,
5656 NULL
5657};
5658
5659PMU_FORMAT_ATTR_SHOW(snoop_rsp, "config1:0-63");
5660FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small);
5661
5662static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
5663 ADL_HYBRID_RTM_FORMAT_ATTR,
5664 ADL_HYBRID_FORMAT_ATTR,
5665 FORMAT_HYBRID_PTR(snoop_rsp),
5666 NULL
5667};
5668
5669static struct attribute *mtl_hybrid_extra_attr[] = {
5670 ADL_HYBRID_FORMAT_ATTR,
5671 FORMAT_HYBRID_PTR(snoop_rsp),
5672 NULL
5673};
5674
5675static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
5676{
5677 struct device *dev = kobj_to_dev(kobj);
5678 struct x86_hybrid_pmu *pmu =
5679 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5680 struct perf_pmu_events_hybrid_attr *pmu_attr =
5681 container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
5682
5683 return pmu->cpu_type & pmu_attr->pmu_type;
5684}
5685
5686static umode_t hybrid_events_is_visible(struct kobject *kobj,
5687 struct attribute *attr, int i)
5688{
5689 return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
5690}
5691
5692static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
5693{
5694 int cpu = cpumask_first(&pmu->supported_cpus);
5695
5696 return (cpu >= nr_cpu_ids) ? -1 : cpu;
5697}
5698
5699static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
5700 struct attribute *attr, int i)
5701{
5702 struct device *dev = kobj_to_dev(kobj);
5703 struct x86_hybrid_pmu *pmu =
5704 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5705 int cpu = hybrid_find_supported_cpu(pmu);
5706
5707 return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
5708}
5709
5710static umode_t hybrid_format_is_visible(struct kobject *kobj,
5711 struct attribute *attr, int i)
5712{
5713 struct device *dev = kobj_to_dev(kobj);
5714 struct x86_hybrid_pmu *pmu =
5715 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5716 struct perf_pmu_format_hybrid_attr *pmu_attr =
5717 container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
5718 int cpu = hybrid_find_supported_cpu(pmu);
5719
5720 return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
5721}
5722
5723static struct attribute_group hybrid_group_events_td = {
5724 .name = "events",
5725 .is_visible = hybrid_events_is_visible,
5726};
5727
5728static struct attribute_group hybrid_group_events_mem = {
5729 .name = "events",
5730 .is_visible = hybrid_events_is_visible,
5731};
5732
5733static struct attribute_group hybrid_group_events_tsx = {
5734 .name = "events",
5735 .is_visible = hybrid_tsx_is_visible,
5736};
5737
5738static struct attribute_group hybrid_group_format_extra = {
5739 .name = "format",
5740 .is_visible = hybrid_format_is_visible,
5741};
5742
5743static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
5744 struct device_attribute *attr,
5745 char *buf)
5746{
5747 struct x86_hybrid_pmu *pmu =
5748 container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
5749
5750 return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
5751}
5752
5753static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
5754static struct attribute *intel_hybrid_cpus_attrs[] = {
5755 &dev_attr_cpus.attr,
5756 NULL,
5757};
5758
5759static struct attribute_group hybrid_group_cpus = {
5760 .attrs = intel_hybrid_cpus_attrs,
5761};
5762
5763static const struct attribute_group *hybrid_attr_update[] = {
5764 &hybrid_group_events_td,
5765 &hybrid_group_events_mem,
5766 &hybrid_group_events_tsx,
5767 &group_caps_gen,
5768 &group_caps_lbr,
5769 &hybrid_group_format_extra,
5770 &group_default,
5771 &hybrid_group_cpus,
5772 NULL,
5773};
5774
5775static struct attribute *empty_attrs;
5776
5777static void intel_pmu_check_num_counters(int *num_counters,
5778 int *num_counters_fixed,
5779 u64 *intel_ctrl, u64 fixed_mask)
5780{
5781 if (*num_counters > INTEL_PMC_MAX_GENERIC) {
5782 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
5783 *num_counters, INTEL_PMC_MAX_GENERIC);
5784 *num_counters = INTEL_PMC_MAX_GENERIC;
5785 }
5786 *intel_ctrl = (1ULL << *num_counters) - 1;
5787
5788 if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
5789 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
5790 *num_counters_fixed, INTEL_PMC_MAX_FIXED);
5791 *num_counters_fixed = INTEL_PMC_MAX_FIXED;
5792 }
5793
5794 *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
5795}
5796
5797static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
5798 int num_counters,
5799 int num_counters_fixed,
5800 u64 intel_ctrl)
5801{
5802 struct event_constraint *c;
5803
5804 if (!event_constraints)
5805 return;
5806
5807 /*
5808 * event on fixed counter2 (REF_CYCLES) only works on this
5809 * counter, so do not extend mask to generic counters
5810 */
5811 for_each_event_constraint(c, event_constraints) {
5812 /*
5813 * Don't extend the topdown slots and metrics
5814 * events to the generic counters.
5815 */
5816 if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
5817 /*
5818 * Disable topdown slots and metrics events,
5819 * if slots event is not in CPUID.
5820 */
5821 if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
5822 c->idxmsk64 = 0;
5823 c->weight = hweight64(c->idxmsk64);
5824 continue;
5825 }
5826
5827 if (c->cmask == FIXED_EVENT_FLAGS) {
5828 /* Disabled fixed counters which are not in CPUID */
5829 c->idxmsk64 &= intel_ctrl;
5830
5831 /*
5832 * Don't extend the pseudo-encoding to the
5833 * generic counters
5834 */
5835 if (!use_fixed_pseudo_encoding(c->code))
5836 c->idxmsk64 |= (1ULL << num_counters) - 1;
5837 }
5838 c->idxmsk64 &=
5839 ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
5840 c->weight = hweight64(c->idxmsk64);
5841 }
5842}
5843
5844static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
5845{
5846 struct extra_reg *er;
5847
5848 /*
5849 * Access extra MSR may cause #GP under certain circumstances.
5850 * E.g. KVM doesn't support offcore event
5851 * Check all extra_regs here.
5852 */
5853 if (!extra_regs)
5854 return;
5855
5856 for (er = extra_regs; er->msr; er++) {
5857 er->extra_msr_access = check_msr(er->msr, 0x11UL);
5858 /* Disable LBR select mapping */
5859 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
5860 x86_pmu.lbr_sel_map = NULL;
5861 }
5862}
5863
5864static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
5865{
5866 struct x86_hybrid_pmu *pmu;
5867 int i;
5868
5869 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
5870 pmu = &x86_pmu.hybrid_pmu[i];
5871
5872 intel_pmu_check_num_counters(&pmu->num_counters,
5873 &pmu->num_counters_fixed,
5874 &pmu->intel_ctrl,
5875 fixed_mask);
5876
5877 if (pmu->intel_cap.perf_metrics) {
5878 pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
5879 pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
5880 }
5881
5882 if (pmu->intel_cap.pebs_output_pt_available)
5883 pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
5884
5885 intel_pmu_check_event_constraints(pmu->event_constraints,
5886 pmu->num_counters,
5887 pmu->num_counters_fixed,
5888 pmu->intel_ctrl);
5889
5890 intel_pmu_check_extra_regs(pmu->extra_regs);
5891 }
5892}
5893
5894static __always_inline bool is_mtl(u8 x86_model)
5895{
5896 return (x86_model == INTEL_FAM6_METEORLAKE) ||
5897 (x86_model == INTEL_FAM6_METEORLAKE_L);
5898}
5899
5900__init int intel_pmu_init(void)
5901{
5902 struct attribute **extra_skl_attr = &empty_attrs;
5903 struct attribute **extra_attr = &empty_attrs;
5904 struct attribute **td_attr = &empty_attrs;
5905 struct attribute **mem_attr = &empty_attrs;
5906 struct attribute **tsx_attr = &empty_attrs;
5907 union cpuid10_edx edx;
5908 union cpuid10_eax eax;
5909 union cpuid10_ebx ebx;
5910 unsigned int fixed_mask;
5911 bool pmem = false;
5912 int version, i;
5913 char *name;
5914 struct x86_hybrid_pmu *pmu;
5915
5916 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
5917 switch (boot_cpu_data.x86) {
5918 case 0x6:
5919 return p6_pmu_init();
5920 case 0xb:
5921 return knc_pmu_init();
5922 case 0xf:
5923 return p4_pmu_init();
5924 }
5925 return -ENODEV;
5926 }
5927
5928 /*
5929 * Check whether the Architectural PerfMon supports
5930 * Branch Misses Retired hw_event or not.
5931 */
5932 cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
5933 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
5934 return -ENODEV;
5935
5936 version = eax.split.version_id;
5937 if (version < 2)
5938 x86_pmu = core_pmu;
5939 else
5940 x86_pmu = intel_pmu;
5941
5942 x86_pmu.version = version;
5943 x86_pmu.num_counters = eax.split.num_counters;
5944 x86_pmu.cntval_bits = eax.split.bit_width;
5945 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
5946
5947 x86_pmu.events_maskl = ebx.full;
5948 x86_pmu.events_mask_len = eax.split.mask_length;
5949
5950 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
5951 x86_pmu.pebs_capable = PEBS_COUNTER_MASK;
5952
5953 /*
5954 * Quirk: v2 perfmon does not report fixed-purpose events, so
5955 * assume at least 3 events, when not running in a hypervisor:
5956 */
5957 if (version > 1 && version < 5) {
5958 int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
5959
5960 x86_pmu.num_counters_fixed =
5961 max((int)edx.split.num_counters_fixed, assume);
5962
5963 fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
5964 } else if (version >= 5)
5965 x86_pmu.num_counters_fixed = fls(fixed_mask);
5966
5967 if (boot_cpu_has(X86_FEATURE_PDCM)) {
5968 u64 capabilities;
5969
5970 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
5971 x86_pmu.intel_cap.capabilities = capabilities;
5972 }
5973
5974 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) {
5975 x86_pmu.lbr_reset = intel_pmu_lbr_reset_32;
5976 x86_pmu.lbr_read = intel_pmu_lbr_read_32;
5977 }
5978
5979 if (boot_cpu_has(X86_FEATURE_ARCH_LBR))
5980 intel_pmu_arch_lbr_init();
5981
5982 intel_ds_init();
5983
5984 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
5985
5986 if (version >= 5) {
5987 x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
5988 if (x86_pmu.intel_cap.anythread_deprecated)
5989 pr_cont(" AnyThread deprecated, ");
5990 }
5991
5992 /*
5993 * Install the hw-cache-events table:
5994 */
5995 switch (boot_cpu_data.x86_model) {
5996 case INTEL_FAM6_CORE_YONAH:
5997 pr_cont("Core events, ");
5998 name = "core";
5999 break;
6000
6001 case INTEL_FAM6_CORE2_MEROM:
6002 x86_add_quirk(intel_clovertown_quirk);
6003 fallthrough;
6004
6005 case INTEL_FAM6_CORE2_MEROM_L:
6006 case INTEL_FAM6_CORE2_PENRYN:
6007 case INTEL_FAM6_CORE2_DUNNINGTON:
6008 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
6009 sizeof(hw_cache_event_ids));
6010
6011 intel_pmu_lbr_init_core();
6012
6013 x86_pmu.event_constraints = intel_core2_event_constraints;
6014 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
6015 pr_cont("Core2 events, ");
6016 name = "core2";
6017 break;
6018
6019 case INTEL_FAM6_NEHALEM:
6020 case INTEL_FAM6_NEHALEM_EP:
6021 case INTEL_FAM6_NEHALEM_EX:
6022 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
6023 sizeof(hw_cache_event_ids));
6024 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6025 sizeof(hw_cache_extra_regs));
6026
6027 intel_pmu_lbr_init_nhm();
6028
6029 x86_pmu.event_constraints = intel_nehalem_event_constraints;
6030 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
6031 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6032 x86_pmu.extra_regs = intel_nehalem_extra_regs;
6033 x86_pmu.limit_period = nhm_limit_period;
6034
6035 mem_attr = nhm_mem_events_attrs;
6036
6037 /* UOPS_ISSUED.STALLED_CYCLES */
6038 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6039 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6040 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6041 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6042 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6043
6044 intel_pmu_pebs_data_source_nhm();
6045 x86_add_quirk(intel_nehalem_quirk);
6046 x86_pmu.pebs_no_tlb = 1;
6047 extra_attr = nhm_format_attr;
6048
6049 pr_cont("Nehalem events, ");
6050 name = "nehalem";
6051 break;
6052
6053 case INTEL_FAM6_ATOM_BONNELL:
6054 case INTEL_FAM6_ATOM_BONNELL_MID:
6055 case INTEL_FAM6_ATOM_SALTWELL:
6056 case INTEL_FAM6_ATOM_SALTWELL_MID:
6057 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
6058 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
6059 sizeof(hw_cache_event_ids));
6060
6061 intel_pmu_lbr_init_atom();
6062
6063 x86_pmu.event_constraints = intel_gen_event_constraints;
6064 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
6065 x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
6066 pr_cont("Atom events, ");
6067 name = "bonnell";
6068 break;
6069
6070 case INTEL_FAM6_ATOM_SILVERMONT:
6071 case INTEL_FAM6_ATOM_SILVERMONT_D:
6072 case INTEL_FAM6_ATOM_SILVERMONT_MID:
6073 case INTEL_FAM6_ATOM_AIRMONT:
6074 case INTEL_FAM6_ATOM_AIRMONT_MID:
6075 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
6076 sizeof(hw_cache_event_ids));
6077 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
6078 sizeof(hw_cache_extra_regs));
6079
6080 intel_pmu_lbr_init_slm();
6081
6082 x86_pmu.event_constraints = intel_slm_event_constraints;
6083 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6084 x86_pmu.extra_regs = intel_slm_extra_regs;
6085 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6086 td_attr = slm_events_attrs;
6087 extra_attr = slm_format_attr;
6088 pr_cont("Silvermont events, ");
6089 name = "silvermont";
6090 break;
6091
6092 case INTEL_FAM6_ATOM_GOLDMONT:
6093 case INTEL_FAM6_ATOM_GOLDMONT_D:
6094 memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
6095 sizeof(hw_cache_event_ids));
6096 memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
6097 sizeof(hw_cache_extra_regs));
6098
6099 intel_pmu_lbr_init_skl();
6100
6101 x86_pmu.event_constraints = intel_slm_event_constraints;
6102 x86_pmu.pebs_constraints = intel_glm_pebs_event_constraints;
6103 x86_pmu.extra_regs = intel_glm_extra_regs;
6104 /*
6105 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6106 * for precise cycles.
6107 * :pp is identical to :ppp
6108 */
6109 x86_pmu.pebs_aliases = NULL;
6110 x86_pmu.pebs_prec_dist = true;
6111 x86_pmu.lbr_pt_coexist = true;
6112 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6113 td_attr = glm_events_attrs;
6114 extra_attr = slm_format_attr;
6115 pr_cont("Goldmont events, ");
6116 name = "goldmont";
6117 break;
6118
6119 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
6120 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6121 sizeof(hw_cache_event_ids));
6122 memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
6123 sizeof(hw_cache_extra_regs));
6124
6125 intel_pmu_lbr_init_skl();
6126
6127 x86_pmu.event_constraints = intel_slm_event_constraints;
6128 x86_pmu.extra_regs = intel_glm_extra_regs;
6129 /*
6130 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6131 * for precise cycles.
6132 */
6133 x86_pmu.pebs_aliases = NULL;
6134 x86_pmu.pebs_prec_dist = true;
6135 x86_pmu.lbr_pt_coexist = true;
6136 x86_pmu.pebs_capable = ~0ULL;
6137 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6138 x86_pmu.flags |= PMU_FL_PEBS_ALL;
6139 x86_pmu.get_event_constraints = glp_get_event_constraints;
6140 td_attr = glm_events_attrs;
6141 /* Goldmont Plus has 4-wide pipeline */
6142 event_attr_td_total_slots_scale_glm.event_str = "4";
6143 extra_attr = slm_format_attr;
6144 pr_cont("Goldmont plus events, ");
6145 name = "goldmont_plus";
6146 break;
6147
6148 case INTEL_FAM6_ATOM_TREMONT_D:
6149 case INTEL_FAM6_ATOM_TREMONT:
6150 case INTEL_FAM6_ATOM_TREMONT_L:
6151 x86_pmu.late_ack = true;
6152 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6153 sizeof(hw_cache_event_ids));
6154 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6155 sizeof(hw_cache_extra_regs));
6156 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6157
6158 intel_pmu_lbr_init_skl();
6159
6160 x86_pmu.event_constraints = intel_slm_event_constraints;
6161 x86_pmu.extra_regs = intel_tnt_extra_regs;
6162 /*
6163 * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
6164 * for precise cycles.
6165 */
6166 x86_pmu.pebs_aliases = NULL;
6167 x86_pmu.pebs_prec_dist = true;
6168 x86_pmu.lbr_pt_coexist = true;
6169 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6170 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6171 td_attr = tnt_events_attrs;
6172 extra_attr = slm_format_attr;
6173 pr_cont("Tremont events, ");
6174 name = "Tremont";
6175 break;
6176
6177 case INTEL_FAM6_ALDERLAKE_N:
6178 x86_pmu.mid_ack = true;
6179 memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
6180 sizeof(hw_cache_event_ids));
6181 memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
6182 sizeof(hw_cache_extra_regs));
6183 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6184
6185 x86_pmu.event_constraints = intel_slm_event_constraints;
6186 x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints;
6187 x86_pmu.extra_regs = intel_grt_extra_regs;
6188
6189 x86_pmu.pebs_aliases = NULL;
6190 x86_pmu.pebs_prec_dist = true;
6191 x86_pmu.pebs_block = true;
6192 x86_pmu.lbr_pt_coexist = true;
6193 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6194 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6195
6196 intel_pmu_pebs_data_source_grt();
6197 x86_pmu.pebs_latency_data = adl_latency_data_small;
6198 x86_pmu.get_event_constraints = tnt_get_event_constraints;
6199 x86_pmu.limit_period = spr_limit_period;
6200 td_attr = tnt_events_attrs;
6201 mem_attr = grt_mem_attrs;
6202 extra_attr = nhm_format_attr;
6203 pr_cont("Gracemont events, ");
6204 name = "gracemont";
6205 break;
6206
6207 case INTEL_FAM6_WESTMERE:
6208 case INTEL_FAM6_WESTMERE_EP:
6209 case INTEL_FAM6_WESTMERE_EX:
6210 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
6211 sizeof(hw_cache_event_ids));
6212 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
6213 sizeof(hw_cache_extra_regs));
6214
6215 intel_pmu_lbr_init_nhm();
6216
6217 x86_pmu.event_constraints = intel_westmere_event_constraints;
6218 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
6219 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
6220 x86_pmu.extra_regs = intel_westmere_extra_regs;
6221 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6222
6223 mem_attr = nhm_mem_events_attrs;
6224
6225 /* UOPS_ISSUED.STALLED_CYCLES */
6226 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6227 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6228 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
6229 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6230 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
6231
6232 intel_pmu_pebs_data_source_nhm();
6233 extra_attr = nhm_format_attr;
6234 pr_cont("Westmere events, ");
6235 name = "westmere";
6236 break;
6237
6238 case INTEL_FAM6_SANDYBRIDGE:
6239 case INTEL_FAM6_SANDYBRIDGE_X:
6240 x86_add_quirk(intel_sandybridge_quirk);
6241 x86_add_quirk(intel_ht_bug);
6242 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6243 sizeof(hw_cache_event_ids));
6244 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6245 sizeof(hw_cache_extra_regs));
6246
6247 intel_pmu_lbr_init_snb();
6248
6249 x86_pmu.event_constraints = intel_snb_event_constraints;
6250 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
6251 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
6252 if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
6253 x86_pmu.extra_regs = intel_snbep_extra_regs;
6254 else
6255 x86_pmu.extra_regs = intel_snb_extra_regs;
6256
6257
6258 /* all extra regs are per-cpu when HT is on */
6259 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6260 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6261
6262 td_attr = snb_events_attrs;
6263 mem_attr = snb_mem_events_attrs;
6264
6265 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6266 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6267 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6268 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
6269 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
6270 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
6271
6272 extra_attr = nhm_format_attr;
6273
6274 pr_cont("SandyBridge events, ");
6275 name = "sandybridge";
6276 break;
6277
6278 case INTEL_FAM6_IVYBRIDGE:
6279 case INTEL_FAM6_IVYBRIDGE_X:
6280 x86_add_quirk(intel_ht_bug);
6281 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
6282 sizeof(hw_cache_event_ids));
6283 /* dTLB-load-misses on IVB is different than SNB */
6284 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
6285
6286 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
6287 sizeof(hw_cache_extra_regs));
6288
6289 intel_pmu_lbr_init_snb();
6290
6291 x86_pmu.event_constraints = intel_ivb_event_constraints;
6292 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
6293 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6294 x86_pmu.pebs_prec_dist = true;
6295 if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
6296 x86_pmu.extra_regs = intel_snbep_extra_regs;
6297 else
6298 x86_pmu.extra_regs = intel_snb_extra_regs;
6299 /* all extra regs are per-cpu when HT is on */
6300 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6301 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6302
6303 td_attr = snb_events_attrs;
6304 mem_attr = snb_mem_events_attrs;
6305
6306 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
6307 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
6308 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
6309
6310 extra_attr = nhm_format_attr;
6311
6312 pr_cont("IvyBridge events, ");
6313 name = "ivybridge";
6314 break;
6315
6316
6317 case INTEL_FAM6_HASWELL:
6318 case INTEL_FAM6_HASWELL_X:
6319 case INTEL_FAM6_HASWELL_L:
6320 case INTEL_FAM6_HASWELL_G:
6321 x86_add_quirk(intel_ht_bug);
6322 x86_add_quirk(intel_pebs_isolation_quirk);
6323 x86_pmu.late_ack = true;
6324 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6325 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6326
6327 intel_pmu_lbr_init_hsw();
6328
6329 x86_pmu.event_constraints = intel_hsw_event_constraints;
6330 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
6331 x86_pmu.extra_regs = intel_snbep_extra_regs;
6332 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6333 x86_pmu.pebs_prec_dist = true;
6334 /* all extra regs are per-cpu when HT is on */
6335 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6336 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6337
6338 x86_pmu.hw_config = hsw_hw_config;
6339 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6340 x86_pmu.lbr_double_abort = true;
6341 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6342 hsw_format_attr : nhm_format_attr;
6343 td_attr = hsw_events_attrs;
6344 mem_attr = hsw_mem_events_attrs;
6345 tsx_attr = hsw_tsx_events_attrs;
6346 pr_cont("Haswell events, ");
6347 name = "haswell";
6348 break;
6349
6350 case INTEL_FAM6_BROADWELL:
6351 case INTEL_FAM6_BROADWELL_D:
6352 case INTEL_FAM6_BROADWELL_G:
6353 case INTEL_FAM6_BROADWELL_X:
6354 x86_add_quirk(intel_pebs_isolation_quirk);
6355 x86_pmu.late_ack = true;
6356 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6357 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6358
6359 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
6360 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
6361 BDW_L3_MISS|HSW_SNOOP_DRAM;
6362 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
6363 HSW_SNOOP_DRAM;
6364 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
6365 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6366 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
6367 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
6368
6369 intel_pmu_lbr_init_hsw();
6370
6371 x86_pmu.event_constraints = intel_bdw_event_constraints;
6372 x86_pmu.pebs_constraints = intel_bdw_pebs_event_constraints;
6373 x86_pmu.extra_regs = intel_snbep_extra_regs;
6374 x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
6375 x86_pmu.pebs_prec_dist = true;
6376 /* all extra regs are per-cpu when HT is on */
6377 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6378 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6379
6380 x86_pmu.hw_config = hsw_hw_config;
6381 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6382 x86_pmu.limit_period = bdw_limit_period;
6383 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6384 hsw_format_attr : nhm_format_attr;
6385 td_attr = hsw_events_attrs;
6386 mem_attr = hsw_mem_events_attrs;
6387 tsx_attr = hsw_tsx_events_attrs;
6388 pr_cont("Broadwell events, ");
6389 name = "broadwell";
6390 break;
6391
6392 case INTEL_FAM6_XEON_PHI_KNL:
6393 case INTEL_FAM6_XEON_PHI_KNM:
6394 memcpy(hw_cache_event_ids,
6395 slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6396 memcpy(hw_cache_extra_regs,
6397 knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6398 intel_pmu_lbr_init_knl();
6399
6400 x86_pmu.event_constraints = intel_slm_event_constraints;
6401 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
6402 x86_pmu.extra_regs = intel_knl_extra_regs;
6403
6404 /* all extra regs are per-cpu when HT is on */
6405 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6406 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6407 extra_attr = slm_format_attr;
6408 pr_cont("Knights Landing/Mill events, ");
6409 name = "knights-landing";
6410 break;
6411
6412 case INTEL_FAM6_SKYLAKE_X:
6413 pmem = true;
6414 fallthrough;
6415 case INTEL_FAM6_SKYLAKE_L:
6416 case INTEL_FAM6_SKYLAKE:
6417 case INTEL_FAM6_KABYLAKE_L:
6418 case INTEL_FAM6_KABYLAKE:
6419 case INTEL_FAM6_COMETLAKE_L:
6420 case INTEL_FAM6_COMETLAKE:
6421 x86_add_quirk(intel_pebs_isolation_quirk);
6422 x86_pmu.late_ack = true;
6423 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6424 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6425 intel_pmu_lbr_init_skl();
6426
6427 /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
6428 event_attr_td_recovery_bubbles.event_str_noht =
6429 "event=0xd,umask=0x1,cmask=1";
6430 event_attr_td_recovery_bubbles.event_str_ht =
6431 "event=0xd,umask=0x1,cmask=1,any=1";
6432
6433 x86_pmu.event_constraints = intel_skl_event_constraints;
6434 x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
6435 x86_pmu.extra_regs = intel_skl_extra_regs;
6436 x86_pmu.pebs_aliases = intel_pebs_aliases_skl;
6437 x86_pmu.pebs_prec_dist = true;
6438 /* all extra regs are per-cpu when HT is on */
6439 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6440 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6441
6442 x86_pmu.hw_config = hsw_hw_config;
6443 x86_pmu.get_event_constraints = hsw_get_event_constraints;
6444 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6445 hsw_format_attr : nhm_format_attr;
6446 extra_skl_attr = skl_format_attr;
6447 td_attr = hsw_events_attrs;
6448 mem_attr = hsw_mem_events_attrs;
6449 tsx_attr = hsw_tsx_events_attrs;
6450 intel_pmu_pebs_data_source_skl(pmem);
6451
6452 /*
6453 * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
6454 * TSX force abort hooks are not required on these systems. Only deploy
6455 * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
6456 */
6457 if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
6458 !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
6459 x86_pmu.flags |= PMU_FL_TFA;
6460 x86_pmu.get_event_constraints = tfa_get_event_constraints;
6461 x86_pmu.enable_all = intel_tfa_pmu_enable_all;
6462 x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
6463 }
6464
6465 pr_cont("Skylake events, ");
6466 name = "skylake";
6467 break;
6468
6469 case INTEL_FAM6_ICELAKE_X:
6470 case INTEL_FAM6_ICELAKE_D:
6471 x86_pmu.pebs_ept = 1;
6472 pmem = true;
6473 fallthrough;
6474 case INTEL_FAM6_ICELAKE_L:
6475 case INTEL_FAM6_ICELAKE:
6476 case INTEL_FAM6_TIGERLAKE_L:
6477 case INTEL_FAM6_TIGERLAKE:
6478 case INTEL_FAM6_ROCKETLAKE:
6479 x86_pmu.late_ack = true;
6480 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6481 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6482 hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6483 intel_pmu_lbr_init_skl();
6484
6485 x86_pmu.event_constraints = intel_icl_event_constraints;
6486 x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
6487 x86_pmu.extra_regs = intel_icl_extra_regs;
6488 x86_pmu.pebs_aliases = NULL;
6489 x86_pmu.pebs_prec_dist = true;
6490 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6491 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6492
6493 x86_pmu.hw_config = hsw_hw_config;
6494 x86_pmu.get_event_constraints = icl_get_event_constraints;
6495 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6496 hsw_format_attr : nhm_format_attr;
6497 extra_skl_attr = skl_format_attr;
6498 mem_attr = icl_events_attrs;
6499 td_attr = icl_td_events_attrs;
6500 tsx_attr = icl_tsx_events_attrs;
6501 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6502 x86_pmu.lbr_pt_coexist = true;
6503 intel_pmu_pebs_data_source_skl(pmem);
6504 x86_pmu.num_topdown_events = 4;
6505 static_call_update(intel_pmu_update_topdown_event,
6506 &icl_update_topdown_event);
6507 static_call_update(intel_pmu_set_topdown_event_period,
6508 &icl_set_topdown_event_period);
6509 pr_cont("Icelake events, ");
6510 name = "icelake";
6511 break;
6512
6513 case INTEL_FAM6_SAPPHIRERAPIDS_X:
6514 case INTEL_FAM6_EMERALDRAPIDS_X:
6515 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6516 x86_pmu.extra_regs = intel_spr_extra_regs;
6517 fallthrough;
6518 case INTEL_FAM6_GRANITERAPIDS_X:
6519 case INTEL_FAM6_GRANITERAPIDS_D:
6520 pmem = true;
6521 x86_pmu.late_ack = true;
6522 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6523 memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6524
6525 x86_pmu.event_constraints = intel_spr_event_constraints;
6526 x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints;
6527 if (!x86_pmu.extra_regs)
6528 x86_pmu.extra_regs = intel_gnr_extra_regs;
6529 x86_pmu.limit_period = spr_limit_period;
6530 x86_pmu.pebs_ept = 1;
6531 x86_pmu.pebs_aliases = NULL;
6532 x86_pmu.pebs_prec_dist = true;
6533 x86_pmu.pebs_block = true;
6534 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6535 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6536 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6537
6538 x86_pmu.hw_config = hsw_hw_config;
6539 x86_pmu.get_event_constraints = spr_get_event_constraints;
6540 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6541 hsw_format_attr : nhm_format_attr;
6542 extra_skl_attr = skl_format_attr;
6543 mem_attr = spr_events_attrs;
6544 td_attr = spr_td_events_attrs;
6545 tsx_attr = spr_tsx_events_attrs;
6546 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6547 x86_pmu.lbr_pt_coexist = true;
6548 intel_pmu_pebs_data_source_skl(pmem);
6549 x86_pmu.num_topdown_events = 8;
6550 static_call_update(intel_pmu_update_topdown_event,
6551 &icl_update_topdown_event);
6552 static_call_update(intel_pmu_set_topdown_event_period,
6553 &icl_set_topdown_event_period);
6554 pr_cont("Sapphire Rapids events, ");
6555 name = "sapphire_rapids";
6556 break;
6557
6558 case INTEL_FAM6_ALDERLAKE:
6559 case INTEL_FAM6_ALDERLAKE_L:
6560 case INTEL_FAM6_RAPTORLAKE:
6561 case INTEL_FAM6_RAPTORLAKE_P:
6562 case INTEL_FAM6_RAPTORLAKE_S:
6563 case INTEL_FAM6_METEORLAKE:
6564 case INTEL_FAM6_METEORLAKE_L:
6565 /*
6566 * Alder Lake has 2 types of CPU, core and atom.
6567 *
6568 * Initialize the common PerfMon capabilities here.
6569 */
6570 x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS,
6571 sizeof(struct x86_hybrid_pmu),
6572 GFP_KERNEL);
6573 if (!x86_pmu.hybrid_pmu)
6574 return -ENOMEM;
6575 static_branch_enable(&perf_is_hybrid);
6576 x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
6577
6578 x86_pmu.pebs_aliases = NULL;
6579 x86_pmu.pebs_prec_dist = true;
6580 x86_pmu.pebs_block = true;
6581 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6582 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6583 x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6584 x86_pmu.lbr_pt_coexist = true;
6585 x86_pmu.pebs_latency_data = adl_latency_data_small;
6586 x86_pmu.num_topdown_events = 8;
6587 static_call_update(intel_pmu_update_topdown_event,
6588 &adl_update_topdown_event);
6589 static_call_update(intel_pmu_set_topdown_event_period,
6590 &adl_set_topdown_event_period);
6591
6592 x86_pmu.filter = intel_pmu_filter;
6593 x86_pmu.get_event_constraints = adl_get_event_constraints;
6594 x86_pmu.hw_config = adl_hw_config;
6595 x86_pmu.limit_period = spr_limit_period;
6596 x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
6597 /*
6598 * The rtm_abort_event is used to check whether to enable GPRs
6599 * for the RTM abort event. Atom doesn't have the RTM abort
6600 * event. There is no harmful to set it in the common
6601 * x86_pmu.rtm_abort_event.
6602 */
6603 x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6604
6605 td_attr = adl_hybrid_events_attrs;
6606 mem_attr = adl_hybrid_mem_attrs;
6607 tsx_attr = adl_hybrid_tsx_attrs;
6608 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6609 adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
6610
6611 /* Initialize big core specific PerfMon capabilities.*/
6612 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6613 pmu->name = "cpu_core";
6614 pmu->cpu_type = hybrid_big;
6615 pmu->late_ack = true;
6616 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
6617 pmu->num_counters = x86_pmu.num_counters + 2;
6618 pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
6619 } else {
6620 pmu->num_counters = x86_pmu.num_counters;
6621 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6622 }
6623
6624 /*
6625 * Quirk: For some Alder Lake machine, when all E-cores are disabled in
6626 * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
6627 * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
6628 * mistakenly add extra counters for P-cores. Correct the number of
6629 * counters here.
6630 */
6631 if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
6632 pmu->num_counters = x86_pmu.num_counters;
6633 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6634 }
6635
6636 pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
6637 pmu->unconstrained = (struct event_constraint)
6638 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6639 0, pmu->num_counters, 0, 0);
6640 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6641 pmu->intel_cap.perf_metrics = 1;
6642 pmu->intel_cap.pebs_output_pt_available = 0;
6643
6644 memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6645 memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6646 pmu->event_constraints = intel_spr_event_constraints;
6647 pmu->pebs_constraints = intel_spr_pebs_event_constraints;
6648 pmu->extra_regs = intel_spr_extra_regs;
6649
6650 /* Initialize Atom core specific PerfMon capabilities.*/
6651 pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6652 pmu->name = "cpu_atom";
6653 pmu->cpu_type = hybrid_small;
6654 pmu->mid_ack = true;
6655 pmu->num_counters = x86_pmu.num_counters;
6656 pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
6657 pmu->max_pebs_events = x86_pmu.max_pebs_events;
6658 pmu->unconstrained = (struct event_constraint)
6659 __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
6660 0, pmu->num_counters, 0, 0);
6661 pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
6662 pmu->intel_cap.perf_metrics = 0;
6663 pmu->intel_cap.pebs_output_pt_available = 1;
6664
6665 memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
6666 memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
6667 pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
6668 pmu->event_constraints = intel_slm_event_constraints;
6669 pmu->pebs_constraints = intel_grt_pebs_event_constraints;
6670 pmu->extra_regs = intel_grt_extra_regs;
6671 if (is_mtl(boot_cpu_data.x86_model)) {
6672 x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs;
6673 x86_pmu.pebs_latency_data = mtl_latency_data_small;
6674 extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6675 mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
6676 mem_attr = mtl_hybrid_mem_attrs;
6677 intel_pmu_pebs_data_source_mtl();
6678 x86_pmu.get_event_constraints = mtl_get_event_constraints;
6679 pmu->extra_regs = intel_cmt_extra_regs;
6680 pr_cont("Meteorlake Hybrid events, ");
6681 name = "meteorlake_hybrid";
6682 } else {
6683 x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
6684 intel_pmu_pebs_data_source_adl();
6685 pr_cont("Alderlake Hybrid events, ");
6686 name = "alderlake_hybrid";
6687 }
6688 break;
6689
6690 default:
6691 switch (x86_pmu.version) {
6692 case 1:
6693 x86_pmu.event_constraints = intel_v1_event_constraints;
6694 pr_cont("generic architected perfmon v1, ");
6695 name = "generic_arch_v1";
6696 break;
6697 case 2:
6698 case 3:
6699 case 4:
6700 /*
6701 * default constraints for v2 and up
6702 */
6703 x86_pmu.event_constraints = intel_gen_event_constraints;
6704 pr_cont("generic architected perfmon, ");
6705 name = "generic_arch_v2+";
6706 break;
6707 default:
6708 /*
6709 * The default constraints for v5 and up can support up to
6710 * 16 fixed counters. For the fixed counters 4 and later,
6711 * the pseudo-encoding is applied.
6712 * The constraints may be cut according to the CPUID enumeration
6713 * by inserting the EVENT_CONSTRAINT_END.
6714 */
6715 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED)
6716 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
6717 intel_v5_gen_event_constraints[x86_pmu.num_counters_fixed].weight = -1;
6718 x86_pmu.event_constraints = intel_v5_gen_event_constraints;
6719 pr_cont("generic architected perfmon, ");
6720 name = "generic_arch_v5+";
6721 break;
6722 }
6723 }
6724
6725 snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
6726
6727 if (!is_hybrid()) {
6728 group_events_td.attrs = td_attr;
6729 group_events_mem.attrs = mem_attr;
6730 group_events_tsx.attrs = tsx_attr;
6731 group_format_extra.attrs = extra_attr;
6732 group_format_extra_skl.attrs = extra_skl_attr;
6733
6734 x86_pmu.attr_update = attr_update;
6735 } else {
6736 hybrid_group_events_td.attrs = td_attr;
6737 hybrid_group_events_mem.attrs = mem_attr;
6738 hybrid_group_events_tsx.attrs = tsx_attr;
6739 hybrid_group_format_extra.attrs = extra_attr;
6740
6741 x86_pmu.attr_update = hybrid_attr_update;
6742 }
6743
6744 intel_pmu_check_num_counters(&x86_pmu.num_counters,
6745 &x86_pmu.num_counters_fixed,
6746 &x86_pmu.intel_ctrl,
6747 (u64)fixed_mask);
6748
6749 /* AnyThread may be deprecated on arch perfmon v5 or later */
6750 if (x86_pmu.intel_cap.anythread_deprecated)
6751 x86_pmu.format_attrs = intel_arch_formats_attr;
6752
6753 intel_pmu_check_event_constraints(x86_pmu.event_constraints,
6754 x86_pmu.num_counters,
6755 x86_pmu.num_counters_fixed,
6756 x86_pmu.intel_ctrl);
6757 /*
6758 * Access LBR MSR may cause #GP under certain circumstances.
6759 * Check all LBR MSR here.
6760 * Disable LBR access if any LBR MSRs can not be accessed.
6761 */
6762 if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
6763 x86_pmu.lbr_nr = 0;
6764 for (i = 0; i < x86_pmu.lbr_nr; i++) {
6765 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
6766 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
6767 x86_pmu.lbr_nr = 0;
6768 }
6769
6770 if (x86_pmu.lbr_nr) {
6771 intel_pmu_lbr_init();
6772
6773 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
6774
6775 /* only support branch_stack snapshot for perfmon >= v2 */
6776 if (x86_pmu.disable_all == intel_pmu_disable_all) {
6777 if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) {
6778 static_call_update(perf_snapshot_branch_stack,
6779 intel_pmu_snapshot_arch_branch_stack);
6780 } else {
6781 static_call_update(perf_snapshot_branch_stack,
6782 intel_pmu_snapshot_branch_stack);
6783 }
6784 }
6785 }
6786
6787 intel_pmu_check_extra_regs(x86_pmu.extra_regs);
6788
6789 /* Support full width counters using alternative MSR range */
6790 if (x86_pmu.intel_cap.full_width_write) {
6791 x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
6792 x86_pmu.perfctr = MSR_IA32_PMC0;
6793 pr_cont("full-width counters, ");
6794 }
6795
6796 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
6797 x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
6798
6799 if (is_hybrid())
6800 intel_pmu_check_hybrid_pmus((u64)fixed_mask);
6801
6802 if (x86_pmu.intel_cap.pebs_timing_info)
6803 x86_pmu.flags |= PMU_FL_RETIRE_LATENCY;
6804
6805 intel_aux_output_init();
6806
6807 return 0;
6808}
6809
6810/*
6811 * HT bug: phase 2 init
6812 * Called once we have valid topology information to check
6813 * whether or not HT is enabled
6814 * If HT is off, then we disable the workaround
6815 */
6816static __init int fixup_ht_bug(void)
6817{
6818 int c;
6819 /*
6820 * problem not present on this CPU model, nothing to do
6821 */
6822 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
6823 return 0;
6824
6825 if (topology_max_smt_threads() > 1) {
6826 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
6827 return 0;
6828 }
6829
6830 cpus_read_lock();
6831
6832 hardlockup_detector_perf_stop();
6833
6834 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
6835
6836 x86_pmu.start_scheduling = NULL;
6837 x86_pmu.commit_scheduling = NULL;
6838 x86_pmu.stop_scheduling = NULL;
6839
6840 hardlockup_detector_perf_restart();
6841
6842 for_each_online_cpu(c)
6843 free_excl_cntrs(&per_cpu(cpu_hw_events, c));
6844
6845 cpus_read_unlock();
6846 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
6847 return 0;
6848}
6849subsys_initcall(fixup_ht_bug)