Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ARMv8 PMUv3 Performance Events handling code.
4 *
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
7 *
8 * This code is based heavily on the ARMv7 perf event code.
9 */
10
11#include <asm/irq_regs.h>
12#include <asm/perf_event.h>
13#include <asm/virt.h>
14
15#include <clocksource/arm_arch_timer.h>
16
17#include <linux/acpi.h>
18#include <linux/clocksource.h>
19#include <linux/of.h>
20#include <linux/perf/arm_pmu.h>
21#include <linux/perf/arm_pmuv3.h>
22#include <linux/platform_device.h>
23#include <linux/sched_clock.h>
24#include <linux/smp.h>
25
26#include <asm/arm_pmuv3.h>
27
28/* ARMv8 Cortex-A53 specific event types. */
29#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
30
31/* ARMv8 Cavium ThunderX specific event types. */
32#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
33#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
34#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
35#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
36#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
37
38/*
39 * ARMv8 Architectural defined events, not all of these may
40 * be supported on any given implementation. Unsupported events will
41 * be disabled at run-time based on the PMCEID registers.
42 */
43static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
44 PERF_MAP_ALL_UNSUPPORTED,
45 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
46 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
47 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
48 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
49 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
50 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
51 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
52 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
53};
54
55static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
56 [PERF_COUNT_HW_CACHE_OP_MAX]
57 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
58 PERF_CACHE_MAP_ALL_UNSUPPORTED,
59
60 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
61 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
62
63 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
64 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
65
66 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
67 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
68
69 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
70 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
71
72 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD,
73 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD,
74
75 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
76 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
77};
78
79static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
80 [PERF_COUNT_HW_CACHE_OP_MAX]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
82 PERF_CACHE_MAP_ALL_UNSUPPORTED,
83
84 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
85
86 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
87 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
88};
89
90static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
91 [PERF_COUNT_HW_CACHE_OP_MAX]
92 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
93 PERF_CACHE_MAP_ALL_UNSUPPORTED,
94
95 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
96 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
97 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
98 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
99
100 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
101 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
102
103 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
104 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
105};
106
107static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
108 [PERF_COUNT_HW_CACHE_OP_MAX]
109 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
110 PERF_CACHE_MAP_ALL_UNSUPPORTED,
111
112 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
113 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
114};
115
116static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
117 [PERF_COUNT_HW_CACHE_OP_MAX]
118 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
119 PERF_CACHE_MAP_ALL_UNSUPPORTED,
120
121 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
122 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
123 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
124 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
125 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
126 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
127
128 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
129 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
130
131 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
132 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
133 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
134 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
135};
136
137static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
140 PERF_CACHE_MAP_ALL_UNSUPPORTED,
141
142 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
143 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
144 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
145 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
146
147 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
148 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
149 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
150 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
151
152 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
153 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
154};
155
156static ssize_t
157armv8pmu_events_sysfs_show(struct device *dev,
158 struct device_attribute *attr, char *page)
159{
160 struct perf_pmu_events_attr *pmu_attr;
161
162 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
163
164 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
165}
166
167#define ARMV8_EVENT_ATTR(name, config) \
168 PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
169
170static struct attribute *armv8_pmuv3_event_attrs[] = {
171 ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
172 ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
173 ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
174 ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
175 ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE),
176 ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL),
177 ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED),
178 ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED),
179 ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED),
180 ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN),
181 ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN),
182 ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED),
183 ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED),
184 ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED),
185 ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED),
186 ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED),
187 ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED),
188 ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES),
189 ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED),
190 ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS),
191 ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE),
192 ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB),
193 ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE),
194 ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL),
195 ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB),
196 ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS),
197 ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR),
198 ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC),
199 ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED),
200 ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES),
201 /* Don't expose the chain event in /sys, since it's useless in isolation */
202 ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE),
203 ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE),
204 ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED),
205 ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED),
206 ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND),
207 ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND),
208 ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB),
209 ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB),
210 ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE),
211 ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL),
212 ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE),
213 ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL),
214 ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE),
215 ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB),
216 ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL),
217 ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL),
218 ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB),
219 ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB),
220 ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS),
221 ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE),
222 ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS),
223 ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK),
224 ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK),
225 ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
226 ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
227 ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
228 ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
229 ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
230 ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
231 ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
232 ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
233 ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
234 ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
235 ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
236 ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
237 ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
238 ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
239 ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
240 ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
241 ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
242 ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
243 ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
244 ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
245 ARMV8_EVENT_ATTR(trb_wrap, ARMV8_PMUV3_PERFCTR_TRB_WRAP),
246 ARMV8_EVENT_ATTR(trb_trig, ARMV8_PMUV3_PERFCTR_TRB_TRIG),
247 ARMV8_EVENT_ATTR(trcextout0, ARMV8_PMUV3_PERFCTR_TRCEXTOUT0),
248 ARMV8_EVENT_ATTR(trcextout1, ARMV8_PMUV3_PERFCTR_TRCEXTOUT1),
249 ARMV8_EVENT_ATTR(trcextout2, ARMV8_PMUV3_PERFCTR_TRCEXTOUT2),
250 ARMV8_EVENT_ATTR(trcextout3, ARMV8_PMUV3_PERFCTR_TRCEXTOUT3),
251 ARMV8_EVENT_ATTR(cti_trigout4, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4),
252 ARMV8_EVENT_ATTR(cti_trigout5, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5),
253 ARMV8_EVENT_ATTR(cti_trigout6, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6),
254 ARMV8_EVENT_ATTR(cti_trigout7, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7),
255 ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
256 ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
257 ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
258 ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
259 ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
260 ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
261 NULL,
262};
263
264static umode_t
265armv8pmu_event_attr_is_visible(struct kobject *kobj,
266 struct attribute *attr, int unused)
267{
268 struct device *dev = kobj_to_dev(kobj);
269 struct pmu *pmu = dev_get_drvdata(dev);
270 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
271 struct perf_pmu_events_attr *pmu_attr;
272
273 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
274
275 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
276 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
277 return attr->mode;
278
279 if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
280 u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
281
282 if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
283 test_bit(id, cpu_pmu->pmceid_ext_bitmap))
284 return attr->mode;
285 }
286
287 return 0;
288}
289
290static const struct attribute_group armv8_pmuv3_events_attr_group = {
291 .name = "events",
292 .attrs = armv8_pmuv3_event_attrs,
293 .is_visible = armv8pmu_event_attr_is_visible,
294};
295
296PMU_FORMAT_ATTR(event, "config:0-15");
297PMU_FORMAT_ATTR(long, "config1:0");
298PMU_FORMAT_ATTR(rdpmc, "config1:1");
299
300static int sysctl_perf_user_access __read_mostly;
301
302static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
303{
304 return event->attr.config1 & 0x1;
305}
306
307static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
308{
309 return event->attr.config1 & 0x2;
310}
311
312static struct attribute *armv8_pmuv3_format_attrs[] = {
313 &format_attr_event.attr,
314 &format_attr_long.attr,
315 &format_attr_rdpmc.attr,
316 NULL,
317};
318
319static const struct attribute_group armv8_pmuv3_format_attr_group = {
320 .name = "format",
321 .attrs = armv8_pmuv3_format_attrs,
322};
323
324static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
325 char *page)
326{
327 struct pmu *pmu = dev_get_drvdata(dev);
328 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
329 u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
330
331 return sysfs_emit(page, "0x%08x\n", slots);
332}
333
334static DEVICE_ATTR_RO(slots);
335
336static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
337 char *page)
338{
339 struct pmu *pmu = dev_get_drvdata(dev);
340 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
341 u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
342 & ARMV8_PMU_BUS_SLOTS_MASK;
343
344 return sysfs_emit(page, "0x%08x\n", bus_slots);
345}
346
347static DEVICE_ATTR_RO(bus_slots);
348
349static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
350 char *page)
351{
352 struct pmu *pmu = dev_get_drvdata(dev);
353 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
354 u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
355 & ARMV8_PMU_BUS_WIDTH_MASK;
356 u32 val = 0;
357
358 /* Encoded as Log2(number of bytes), plus one */
359 if (bus_width > 2 && bus_width < 13)
360 val = 1 << (bus_width - 1);
361
362 return sysfs_emit(page, "0x%08x\n", val);
363}
364
365static DEVICE_ATTR_RO(bus_width);
366
367static struct attribute *armv8_pmuv3_caps_attrs[] = {
368 &dev_attr_slots.attr,
369 &dev_attr_bus_slots.attr,
370 &dev_attr_bus_width.attr,
371 NULL,
372};
373
374static const struct attribute_group armv8_pmuv3_caps_attr_group = {
375 .name = "caps",
376 .attrs = armv8_pmuv3_caps_attrs,
377};
378
379/*
380 * Perf Events' indices
381 */
382#define ARMV8_IDX_CYCLE_COUNTER 0
383#define ARMV8_IDX_COUNTER0 1
384#define ARMV8_IDX_CYCLE_COUNTER_USER 32
385
386/*
387 * We unconditionally enable ARMv8.5-PMU long event counter support
388 * (64-bit events) where supported. Indicate if this arm_pmu has long
389 * event counter support.
390 *
391 * On AArch32, long counters make no sense (you can't access the top
392 * bits), so we only enable this on AArch64.
393 */
394static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
395{
396 return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
397}
398
399static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
400{
401 return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
402}
403
404/*
405 * We must chain two programmable counters for 64 bit events,
406 * except when we have allocated the 64bit cycle counter (for CPU
407 * cycles event) or when user space counter access is enabled.
408 */
409static inline bool armv8pmu_event_is_chained(struct perf_event *event)
410{
411 int idx = event->hw.idx;
412 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
413
414 return !armv8pmu_event_has_user_read(event) &&
415 armv8pmu_event_is_64bit(event) &&
416 !armv8pmu_has_long_event(cpu_pmu) &&
417 (idx != ARMV8_IDX_CYCLE_COUNTER);
418}
419
420/*
421 * ARMv8 low level PMU access
422 */
423
424/*
425 * Perf Event to low level counters mapping
426 */
427#define ARMV8_IDX_TO_COUNTER(x) \
428 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
429
430static inline u32 armv8pmu_pmcr_read(void)
431{
432 return read_pmcr();
433}
434
435static inline void armv8pmu_pmcr_write(u32 val)
436{
437 val &= ARMV8_PMU_PMCR_MASK;
438 isb();
439 write_pmcr(val);
440}
441
442static inline int armv8pmu_has_overflowed(u32 pmovsr)
443{
444 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
445}
446
447static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
448{
449 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
450}
451
452static inline u64 armv8pmu_read_evcntr(int idx)
453{
454 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
455
456 return read_pmevcntrn(counter);
457}
458
459static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
460{
461 int idx = event->hw.idx;
462 u64 val = armv8pmu_read_evcntr(idx);
463
464 if (armv8pmu_event_is_chained(event))
465 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
466 return val;
467}
468
469/*
470 * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP
471 * is set the event counters also become 64-bit counters. Unless the
472 * user has requested a long counter (attr.config1) then we want to
473 * interrupt upon 32-bit overflow - we achieve this by applying a bias.
474 */
475static bool armv8pmu_event_needs_bias(struct perf_event *event)
476{
477 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
478 struct hw_perf_event *hwc = &event->hw;
479 int idx = hwc->idx;
480
481 if (armv8pmu_event_is_64bit(event))
482 return false;
483
484 if (armv8pmu_has_long_event(cpu_pmu) ||
485 idx == ARMV8_IDX_CYCLE_COUNTER)
486 return true;
487
488 return false;
489}
490
491static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
492{
493 if (armv8pmu_event_needs_bias(event))
494 value |= GENMASK_ULL(63, 32);
495
496 return value;
497}
498
499static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
500{
501 if (armv8pmu_event_needs_bias(event))
502 value &= ~GENMASK_ULL(63, 32);
503
504 return value;
505}
506
507static u64 armv8pmu_read_counter(struct perf_event *event)
508{
509 struct hw_perf_event *hwc = &event->hw;
510 int idx = hwc->idx;
511 u64 value;
512
513 if (idx == ARMV8_IDX_CYCLE_COUNTER)
514 value = read_pmccntr();
515 else
516 value = armv8pmu_read_hw_counter(event);
517
518 return armv8pmu_unbias_long_counter(event, value);
519}
520
521static inline void armv8pmu_write_evcntr(int idx, u64 value)
522{
523 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
524
525 write_pmevcntrn(counter, value);
526}
527
528static inline void armv8pmu_write_hw_counter(struct perf_event *event,
529 u64 value)
530{
531 int idx = event->hw.idx;
532
533 if (armv8pmu_event_is_chained(event)) {
534 armv8pmu_write_evcntr(idx, upper_32_bits(value));
535 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
536 } else {
537 armv8pmu_write_evcntr(idx, value);
538 }
539}
540
541static void armv8pmu_write_counter(struct perf_event *event, u64 value)
542{
543 struct hw_perf_event *hwc = &event->hw;
544 int idx = hwc->idx;
545
546 value = armv8pmu_bias_long_counter(event, value);
547
548 if (idx == ARMV8_IDX_CYCLE_COUNTER)
549 write_pmccntr(value);
550 else
551 armv8pmu_write_hw_counter(event, value);
552}
553
554static inline void armv8pmu_write_evtype(int idx, u32 val)
555{
556 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
557
558 val &= ARMV8_PMU_EVTYPE_MASK;
559 write_pmevtypern(counter, val);
560}
561
562static inline void armv8pmu_write_event_type(struct perf_event *event)
563{
564 struct hw_perf_event *hwc = &event->hw;
565 int idx = hwc->idx;
566
567 /*
568 * For chained events, the low counter is programmed to count
569 * the event of interest and the high counter is programmed
570 * with CHAIN event code with filters set to count at all ELs.
571 */
572 if (armv8pmu_event_is_chained(event)) {
573 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
574 ARMV8_PMU_INCLUDE_EL2;
575
576 armv8pmu_write_evtype(idx - 1, hwc->config_base);
577 armv8pmu_write_evtype(idx, chain_evt);
578 } else {
579 if (idx == ARMV8_IDX_CYCLE_COUNTER)
580 write_pmccfiltr(hwc->config_base);
581 else
582 armv8pmu_write_evtype(idx, hwc->config_base);
583 }
584}
585
586static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
587{
588 int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
589 u32 mask = BIT(counter);
590
591 if (armv8pmu_event_is_chained(event))
592 mask |= BIT(counter - 1);
593 return mask;
594}
595
596static inline void armv8pmu_enable_counter(u32 mask)
597{
598 /*
599 * Make sure event configuration register writes are visible before we
600 * enable the counter.
601 * */
602 isb();
603 write_pmcntenset(mask);
604}
605
606static inline void armv8pmu_enable_event_counter(struct perf_event *event)
607{
608 struct perf_event_attr *attr = &event->attr;
609 u32 mask = armv8pmu_event_cnten_mask(event);
610
611 kvm_set_pmu_events(mask, attr);
612
613 /* We rely on the hypervisor switch code to enable guest counters */
614 if (!kvm_pmu_counter_deferred(attr))
615 armv8pmu_enable_counter(mask);
616}
617
618static inline void armv8pmu_disable_counter(u32 mask)
619{
620 write_pmcntenclr(mask);
621 /*
622 * Make sure the effects of disabling the counter are visible before we
623 * start configuring the event.
624 */
625 isb();
626}
627
628static inline void armv8pmu_disable_event_counter(struct perf_event *event)
629{
630 struct perf_event_attr *attr = &event->attr;
631 u32 mask = armv8pmu_event_cnten_mask(event);
632
633 kvm_clr_pmu_events(mask);
634
635 /* We rely on the hypervisor switch code to disable guest counters */
636 if (!kvm_pmu_counter_deferred(attr))
637 armv8pmu_disable_counter(mask);
638}
639
640static inline void armv8pmu_enable_intens(u32 mask)
641{
642 write_pmintenset(mask);
643}
644
645static inline void armv8pmu_enable_event_irq(struct perf_event *event)
646{
647 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
648 armv8pmu_enable_intens(BIT(counter));
649}
650
651static inline void armv8pmu_disable_intens(u32 mask)
652{
653 write_pmintenclr(mask);
654 isb();
655 /* Clear the overflow flag in case an interrupt is pending. */
656 write_pmovsclr(mask);
657 isb();
658}
659
660static inline void armv8pmu_disable_event_irq(struct perf_event *event)
661{
662 u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
663 armv8pmu_disable_intens(BIT(counter));
664}
665
666static inline u32 armv8pmu_getreset_flags(void)
667{
668 u32 value;
669
670 /* Read */
671 value = read_pmovsclr();
672
673 /* Write to clear flags */
674 value &= ARMV8_PMU_OVSR_MASK;
675 write_pmovsclr(value);
676
677 return value;
678}
679
680static void armv8pmu_disable_user_access(void)
681{
682 write_pmuserenr(0);
683}
684
685static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
686{
687 int i;
688 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
689
690 /* Clear any unused counters to avoid leaking their contents */
691 for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
692 if (i == ARMV8_IDX_CYCLE_COUNTER)
693 write_pmccntr(0);
694 else
695 armv8pmu_write_evcntr(i, 0);
696 }
697
698 write_pmuserenr(0);
699 write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
700}
701
702static void armv8pmu_enable_event(struct perf_event *event)
703{
704 /*
705 * Enable counter and interrupt, and set the counter to count
706 * the event that we're interested in.
707 */
708
709 /*
710 * Disable counter
711 */
712 armv8pmu_disable_event_counter(event);
713
714 /*
715 * Set event.
716 */
717 armv8pmu_write_event_type(event);
718
719 /*
720 * Enable interrupt for this counter
721 */
722 armv8pmu_enable_event_irq(event);
723
724 /*
725 * Enable counter
726 */
727 armv8pmu_enable_event_counter(event);
728}
729
730static void armv8pmu_disable_event(struct perf_event *event)
731{
732 /*
733 * Disable counter
734 */
735 armv8pmu_disable_event_counter(event);
736
737 /*
738 * Disable interrupt for this counter
739 */
740 armv8pmu_disable_event_irq(event);
741}
742
743static void armv8pmu_start(struct arm_pmu *cpu_pmu)
744{
745 struct perf_event_context *ctx;
746 int nr_user = 0;
747
748 ctx = perf_cpu_task_ctx();
749 if (ctx)
750 nr_user = ctx->nr_user;
751
752 if (sysctl_perf_user_access && nr_user)
753 armv8pmu_enable_user_access(cpu_pmu);
754 else
755 armv8pmu_disable_user_access();
756
757 /* Enable all counters */
758 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
759}
760
761static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
762{
763 /* Disable all counters */
764 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
765}
766
767static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
768{
769 u32 pmovsr;
770 struct perf_sample_data data;
771 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
772 struct pt_regs *regs;
773 int idx;
774
775 /*
776 * Get and reset the IRQ flags
777 */
778 pmovsr = armv8pmu_getreset_flags();
779
780 /*
781 * Did an overflow occur?
782 */
783 if (!armv8pmu_has_overflowed(pmovsr))
784 return IRQ_NONE;
785
786 /*
787 * Handle the counter(s) overflow(s)
788 */
789 regs = get_irq_regs();
790
791 /*
792 * Stop the PMU while processing the counter overflows
793 * to prevent skews in group events.
794 */
795 armv8pmu_stop(cpu_pmu);
796 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
797 struct perf_event *event = cpuc->events[idx];
798 struct hw_perf_event *hwc;
799
800 /* Ignore if we don't have an event. */
801 if (!event)
802 continue;
803
804 /*
805 * We have a single interrupt for all counters. Check that
806 * each counter has overflowed before we process it.
807 */
808 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
809 continue;
810
811 hwc = &event->hw;
812 armpmu_event_update(event);
813 perf_sample_data_init(&data, 0, hwc->last_period);
814 if (!armpmu_event_set_period(event))
815 continue;
816
817 /*
818 * Perf event overflow will queue the processing of the event as
819 * an irq_work which will be taken care of in the handling of
820 * IPI_IRQ_WORK.
821 */
822 if (perf_event_overflow(event, &data, regs))
823 cpu_pmu->disable(event);
824 }
825 armv8pmu_start(cpu_pmu);
826
827 return IRQ_HANDLED;
828}
829
830static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
831 struct arm_pmu *cpu_pmu)
832{
833 int idx;
834
835 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
836 if (!test_and_set_bit(idx, cpuc->used_mask))
837 return idx;
838 }
839 return -EAGAIN;
840}
841
842static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
843 struct arm_pmu *cpu_pmu)
844{
845 int idx;
846
847 /*
848 * Chaining requires two consecutive event counters, where
849 * the lower idx must be even.
850 */
851 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
852 if (!test_and_set_bit(idx, cpuc->used_mask)) {
853 /* Check if the preceding even counter is available */
854 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
855 return idx;
856 /* Release the Odd counter */
857 clear_bit(idx, cpuc->used_mask);
858 }
859 }
860 return -EAGAIN;
861}
862
863static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
864 struct perf_event *event)
865{
866 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
867 struct hw_perf_event *hwc = &event->hw;
868 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
869
870 /* Always prefer to place a cycle counter into the cycle counter. */
871 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
872 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
873 return ARMV8_IDX_CYCLE_COUNTER;
874 else if (armv8pmu_event_is_64bit(event) &&
875 armv8pmu_event_want_user_access(event) &&
876 !armv8pmu_has_long_event(cpu_pmu))
877 return -EAGAIN;
878 }
879
880 /*
881 * Otherwise use events counters
882 */
883 if (armv8pmu_event_is_chained(event))
884 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
885 else
886 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
887}
888
889static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
890 struct perf_event *event)
891{
892 int idx = event->hw.idx;
893
894 clear_bit(idx, cpuc->used_mask);
895 if (armv8pmu_event_is_chained(event))
896 clear_bit(idx - 1, cpuc->used_mask);
897}
898
899static int armv8pmu_user_event_idx(struct perf_event *event)
900{
901 if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
902 return 0;
903
904 /*
905 * We remap the cycle counter index to 32 to
906 * match the offset applied to the rest of
907 * the counter indices.
908 */
909 if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
910 return ARMV8_IDX_CYCLE_COUNTER_USER;
911
912 return event->hw.idx;
913}
914
915/*
916 * Add an event filter to a given event.
917 */
918static int armv8pmu_set_event_filter(struct hw_perf_event *event,
919 struct perf_event_attr *attr)
920{
921 unsigned long config_base = 0;
922
923 if (attr->exclude_idle)
924 return -EPERM;
925
926 /*
927 * If we're running in hyp mode, then we *are* the hypervisor.
928 * Therefore we ignore exclude_hv in this configuration, since
929 * there's no hypervisor to sample anyway. This is consistent
930 * with other architectures (x86 and Power).
931 */
932 if (is_kernel_in_hyp_mode()) {
933 if (!attr->exclude_kernel && !attr->exclude_host)
934 config_base |= ARMV8_PMU_INCLUDE_EL2;
935 if (attr->exclude_guest)
936 config_base |= ARMV8_PMU_EXCLUDE_EL1;
937 if (attr->exclude_host)
938 config_base |= ARMV8_PMU_EXCLUDE_EL0;
939 } else {
940 if (!attr->exclude_hv && !attr->exclude_host)
941 config_base |= ARMV8_PMU_INCLUDE_EL2;
942 }
943
944 /*
945 * Filter out !VHE kernels and guest kernels
946 */
947 if (attr->exclude_kernel)
948 config_base |= ARMV8_PMU_EXCLUDE_EL1;
949
950 if (attr->exclude_user)
951 config_base |= ARMV8_PMU_EXCLUDE_EL0;
952
953 /*
954 * Install the filter into config_base as this is used to
955 * construct the event type.
956 */
957 event->config_base = config_base;
958
959 return 0;
960}
961
962static void armv8pmu_reset(void *info)
963{
964 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
965 u32 pmcr;
966
967 /* The counter and interrupt enable registers are unknown at reset. */
968 armv8pmu_disable_counter(U32_MAX);
969 armv8pmu_disable_intens(U32_MAX);
970
971 /* Clear the counters we flip at guest entry/exit */
972 kvm_clr_pmu_events(U32_MAX);
973
974 /*
975 * Initialize & Reset PMNC. Request overflow interrupt for
976 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
977 */
978 pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC;
979
980 /* Enable long event counter support where available */
981 if (armv8pmu_has_long_event(cpu_pmu))
982 pmcr |= ARMV8_PMU_PMCR_LP;
983
984 armv8pmu_pmcr_write(pmcr);
985}
986
987static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
988 struct perf_event *event)
989{
990 if (event->attr.type == PERF_TYPE_HARDWARE &&
991 event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
992
993 if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
994 armpmu->pmceid_bitmap))
995 return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
996
997 if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
998 armpmu->pmceid_bitmap))
999 return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
1000
1001 return HW_OP_UNSUPPORTED;
1002 }
1003
1004 return armpmu_map_event(event, &armv8_pmuv3_perf_map,
1005 &armv8_pmuv3_perf_cache_map,
1006 ARMV8_PMU_EVTYPE_EVENT);
1007}
1008
1009static int __armv8_pmuv3_map_event(struct perf_event *event,
1010 const unsigned (*extra_event_map)
1011 [PERF_COUNT_HW_MAX],
1012 const unsigned (*extra_cache_map)
1013 [PERF_COUNT_HW_CACHE_MAX]
1014 [PERF_COUNT_HW_CACHE_OP_MAX]
1015 [PERF_COUNT_HW_CACHE_RESULT_MAX])
1016{
1017 int hw_event_id;
1018 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
1019
1020 hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event);
1021
1022 /*
1023 * CHAIN events only work when paired with an adjacent counter, and it
1024 * never makes sense for a user to open one in isolation, as they'll be
1025 * rotated arbitrarily.
1026 */
1027 if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
1028 return -EINVAL;
1029
1030 if (armv8pmu_event_is_64bit(event))
1031 event->hw.flags |= ARMPMU_EVT_64BIT;
1032
1033 /*
1034 * User events must be allocated into a single counter, and so
1035 * must not be chained.
1036 *
1037 * Most 64-bit events require long counter support, but 64-bit
1038 * CPU_CYCLES events can be placed into the dedicated cycle
1039 * counter when this is free.
1040 */
1041 if (armv8pmu_event_want_user_access(event)) {
1042 if (!(event->attach_state & PERF_ATTACH_TASK))
1043 return -EINVAL;
1044 if (armv8pmu_event_is_64bit(event) &&
1045 (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
1046 !armv8pmu_has_long_event(armpmu))
1047 return -EOPNOTSUPP;
1048
1049 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
1050 }
1051
1052 /* Only expose micro/arch events supported by this PMU */
1053 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
1054 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
1055 return hw_event_id;
1056 }
1057
1058 return armpmu_map_event(event, extra_event_map, extra_cache_map,
1059 ARMV8_PMU_EVTYPE_EVENT);
1060}
1061
1062static int armv8_pmuv3_map_event(struct perf_event *event)
1063{
1064 return __armv8_pmuv3_map_event(event, NULL, NULL);
1065}
1066
1067static int armv8_a53_map_event(struct perf_event *event)
1068{
1069 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
1070}
1071
1072static int armv8_a57_map_event(struct perf_event *event)
1073{
1074 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
1075}
1076
1077static int armv8_a73_map_event(struct perf_event *event)
1078{
1079 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
1080}
1081
1082static int armv8_thunder_map_event(struct perf_event *event)
1083{
1084 return __armv8_pmuv3_map_event(event, NULL,
1085 &armv8_thunder_perf_cache_map);
1086}
1087
1088static int armv8_vulcan_map_event(struct perf_event *event)
1089{
1090 return __armv8_pmuv3_map_event(event, NULL,
1091 &armv8_vulcan_perf_cache_map);
1092}
1093
1094struct armv8pmu_probe_info {
1095 struct arm_pmu *pmu;
1096 bool present;
1097};
1098
1099static void __armv8pmu_probe_pmu(void *info)
1100{
1101 struct armv8pmu_probe_info *probe = info;
1102 struct arm_pmu *cpu_pmu = probe->pmu;
1103 u64 pmceid_raw[2];
1104 u32 pmceid[2];
1105 int pmuver;
1106
1107 pmuver = read_pmuver();
1108 if (!pmuv3_implemented(pmuver))
1109 return;
1110
1111 cpu_pmu->pmuver = pmuver;
1112 probe->present = true;
1113
1114 /* Read the nb of CNTx counters supported from PMNC */
1115 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
1116 & ARMV8_PMU_PMCR_N_MASK;
1117
1118 /* Add the CPU cycles counter */
1119 cpu_pmu->num_events += 1;
1120
1121 pmceid[0] = pmceid_raw[0] = read_pmceid0();
1122 pmceid[1] = pmceid_raw[1] = read_pmceid1();
1123
1124 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
1125 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1126
1127 pmceid[0] = pmceid_raw[0] >> 32;
1128 pmceid[1] = pmceid_raw[1] >> 32;
1129
1130 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
1131 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
1132
1133 /* store PMMIR register for sysfs */
1134 if (is_pmuv3p4(pmuver) && (pmceid_raw[1] & BIT(31)))
1135 cpu_pmu->reg_pmmir = read_pmmir();
1136 else
1137 cpu_pmu->reg_pmmir = 0;
1138}
1139
1140static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
1141{
1142 struct armv8pmu_probe_info probe = {
1143 .pmu = cpu_pmu,
1144 .present = false,
1145 };
1146 int ret;
1147
1148 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
1149 __armv8pmu_probe_pmu,
1150 &probe, 1);
1151 if (ret)
1152 return ret;
1153
1154 return probe.present ? 0 : -ENODEV;
1155}
1156
1157static void armv8pmu_disable_user_access_ipi(void *unused)
1158{
1159 armv8pmu_disable_user_access();
1160}
1161
1162static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
1163 void *buffer, size_t *lenp, loff_t *ppos)
1164{
1165 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
1166 if (ret || !write || sysctl_perf_user_access)
1167 return ret;
1168
1169 on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1);
1170 return 0;
1171}
1172
1173static struct ctl_table armv8_pmu_sysctl_table[] = {
1174 {
1175 .procname = "perf_user_access",
1176 .data = &sysctl_perf_user_access,
1177 .maxlen = sizeof(unsigned int),
1178 .mode = 0644,
1179 .proc_handler = armv8pmu_proc_user_access_handler,
1180 .extra1 = SYSCTL_ZERO,
1181 .extra2 = SYSCTL_ONE,
1182 },
1183 { }
1184};
1185
1186static void armv8_pmu_register_sysctl_table(void)
1187{
1188 static u32 tbl_registered = 0;
1189
1190 if (!cmpxchg_relaxed(&tbl_registered, 0, 1))
1191 register_sysctl("kernel", armv8_pmu_sysctl_table);
1192}
1193
1194static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
1195 int (*map_event)(struct perf_event *event),
1196 const struct attribute_group *events,
1197 const struct attribute_group *format,
1198 const struct attribute_group *caps)
1199{
1200 int ret = armv8pmu_probe_pmu(cpu_pmu);
1201 if (ret)
1202 return ret;
1203
1204 cpu_pmu->handle_irq = armv8pmu_handle_irq;
1205 cpu_pmu->enable = armv8pmu_enable_event;
1206 cpu_pmu->disable = armv8pmu_disable_event;
1207 cpu_pmu->read_counter = armv8pmu_read_counter;
1208 cpu_pmu->write_counter = armv8pmu_write_counter;
1209 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
1210 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
1211 cpu_pmu->start = armv8pmu_start;
1212 cpu_pmu->stop = armv8pmu_stop;
1213 cpu_pmu->reset = armv8pmu_reset;
1214 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1215
1216 cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
1217
1218 cpu_pmu->name = name;
1219 cpu_pmu->map_event = map_event;
1220 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
1221 events : &armv8_pmuv3_events_attr_group;
1222 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
1223 format : &armv8_pmuv3_format_attr_group;
1224 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
1225 caps : &armv8_pmuv3_caps_attr_group;
1226
1227 armv8_pmu_register_sysctl_table();
1228 return 0;
1229}
1230
1231static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
1232 int (*map_event)(struct perf_event *event))
1233{
1234 return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
1235}
1236
1237#define PMUV3_INIT_SIMPLE(name) \
1238static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
1239{ \
1240 return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\
1241}
1242
1243PMUV3_INIT_SIMPLE(armv8_pmuv3)
1244
1245PMUV3_INIT_SIMPLE(armv8_cortex_a34)
1246PMUV3_INIT_SIMPLE(armv8_cortex_a55)
1247PMUV3_INIT_SIMPLE(armv8_cortex_a65)
1248PMUV3_INIT_SIMPLE(armv8_cortex_a75)
1249PMUV3_INIT_SIMPLE(armv8_cortex_a76)
1250PMUV3_INIT_SIMPLE(armv8_cortex_a77)
1251PMUV3_INIT_SIMPLE(armv8_cortex_a78)
1252PMUV3_INIT_SIMPLE(armv9_cortex_a510)
1253PMUV3_INIT_SIMPLE(armv9_cortex_a710)
1254PMUV3_INIT_SIMPLE(armv8_cortex_x1)
1255PMUV3_INIT_SIMPLE(armv9_cortex_x2)
1256PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
1257PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
1258PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
1259PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
1260
1261PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
1262PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
1263
1264static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1265{
1266 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
1267 armv8_a53_map_event);
1268}
1269
1270static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1271{
1272 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
1273 armv8_a53_map_event);
1274}
1275
1276static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1277{
1278 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
1279 armv8_a57_map_event);
1280}
1281
1282static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1283{
1284 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
1285 armv8_a57_map_event);
1286}
1287
1288static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1289{
1290 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
1291 armv8_a73_map_event);
1292}
1293
1294static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1295{
1296 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
1297 armv8_thunder_map_event);
1298}
1299
1300static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1301{
1302 return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
1303 armv8_vulcan_map_event);
1304}
1305
1306static const struct of_device_id armv8_pmu_of_device_ids[] = {
1307 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
1308 {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
1309 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
1310 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
1311 {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
1312 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
1313 {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
1314 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
1315 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
1316 {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
1317 {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
1318 {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
1319 {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
1320 {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
1321 {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
1322 {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
1323 {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
1324 {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
1325 {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
1326 {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
1327 {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
1328 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
1329 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
1330 {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
1331 {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
1332 {},
1333};
1334
1335static int armv8_pmu_device_probe(struct platform_device *pdev)
1336{
1337 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
1338}
1339
1340static struct platform_driver armv8_pmu_driver = {
1341 .driver = {
1342 .name = ARMV8_PMU_PDEV_NAME,
1343 .of_match_table = armv8_pmu_of_device_ids,
1344 .suppress_bind_attrs = true,
1345 },
1346 .probe = armv8_pmu_device_probe,
1347};
1348
1349static int __init armv8_pmu_driver_init(void)
1350{
1351 if (acpi_disabled)
1352 return platform_driver_register(&armv8_pmu_driver);
1353 else
1354 return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init);
1355}
1356device_initcall(armv8_pmu_driver_init)
1357
1358void arch_perf_update_userpage(struct perf_event *event,
1359 struct perf_event_mmap_page *userpg, u64 now)
1360{
1361 struct clock_read_data *rd;
1362 unsigned int seq;
1363 u64 ns;
1364
1365 userpg->cap_user_time = 0;
1366 userpg->cap_user_time_zero = 0;
1367 userpg->cap_user_time_short = 0;
1368 userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event);
1369
1370 if (userpg->cap_user_rdpmc) {
1371 if (event->hw.flags & ARMPMU_EVT_64BIT)
1372 userpg->pmc_width = 64;
1373 else
1374 userpg->pmc_width = 32;
1375 }
1376
1377 do {
1378 rd = sched_clock_read_begin(&seq);
1379
1380 if (rd->read_sched_clock != arch_timer_read_counter)
1381 return;
1382
1383 userpg->time_mult = rd->mult;
1384 userpg->time_shift = rd->shift;
1385 userpg->time_zero = rd->epoch_ns;
1386 userpg->time_cycles = rd->epoch_cyc;
1387 userpg->time_mask = rd->sched_clock_mask;
1388
1389 /*
1390 * Subtract the cycle base, such that software that
1391 * doesn't know about cap_user_time_short still 'works'
1392 * assuming no wraps.
1393 */
1394 ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
1395 userpg->time_zero -= ns;
1396
1397 } while (sched_clock_read_retry(seq));
1398
1399 userpg->time_offset = userpg->time_zero - now;
1400
1401 /*
1402 * time_shift is not expected to be greater than 31 due to
1403 * the original published conversion algorithm shifting a
1404 * 32-bit value (now specifies a 64-bit value) - refer
1405 * perf_event_mmap_page documentation in perf_event.h.
1406 */
1407 if (userpg->time_shift == 32) {
1408 userpg->time_shift = 31;
1409 userpg->time_mult >>= 1;
1410 }
1411
1412 /*
1413 * Internal timekeeping for enabled/running/stopped times
1414 * is always computed with the sched_clock.
1415 */
1416 userpg->cap_user_time = 1;
1417 userpg->cap_user_time_zero = 1;
1418 userpg->cap_user_time_short = 1;
1419}