Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * Copyright(c) 2023 Huawei
5 *
6 * The CXL 3.0 specification includes a standard Performance Monitoring Unit,
7 * called the CXL PMU, or CPMU. In order to allow a high degree of
8 * implementation flexibility the specification provides a wide range of
9 * options all of which are self describing.
10 *
11 * Details in CXL rev 3.0 section 8.2.7 CPMU Register Interface
12 */
13
14#include <linux/io-64-nonatomic-lo-hi.h>
15#include <linux/perf_event.h>
16#include <linux/bitops.h>
17#include <linux/device.h>
18#include <linux/bits.h>
19#include <linux/list.h>
20#include <linux/bug.h>
21#include <linux/pci.h>
22
23#include "../cxl/cxlpci.h"
24#include "../cxl/cxl.h"
25#include "../cxl/pmu.h"
26
27#define CXL_PMU_CAP_REG 0x0
28#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(5, 0)
29#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
30#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
31#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
32#define CXL_PMU_FILTER_HDM BIT(0)
33#define CXL_PMU_FILTER_CHAN_RANK_BANK BIT(1)
34#define CXL_PMU_CAP_MSI_N_MSK GENMASK_ULL(47, 44)
35#define CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN BIT_ULL(48)
36#define CXL_PMU_CAP_FREEZE BIT_ULL(49)
37#define CXL_PMU_CAP_INT BIT_ULL(50)
38#define CXL_PMU_CAP_VERSION_MSK GENMASK_ULL(63, 60)
39
40#define CXL_PMU_OVERFLOW_REG 0x10
41#define CXL_PMU_FREEZE_REG 0x18
42#define CXL_PMU_EVENT_CAP_REG(n) (0x100 + 8 * (n))
43#define CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK GENMASK_ULL(31, 0)
44#define CXL_PMU_EVENT_CAP_GROUP_ID_MSK GENMASK_ULL(47, 32)
45#define CXL_PMU_EVENT_CAP_VENDOR_ID_MSK GENMASK_ULL(63, 48)
46
47#define CXL_PMU_COUNTER_CFG_REG(n) (0x200 + 8 * (n))
48#define CXL_PMU_COUNTER_CFG_TYPE_MSK GENMASK_ULL(1, 0)
49#define CXL_PMU_COUNTER_CFG_TYPE_FREE_RUN 0
50#define CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN 1
51#define CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE 2
52#define CXL_PMU_COUNTER_CFG_ENABLE BIT_ULL(8)
53#define CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW BIT_ULL(9)
54#define CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW BIT_ULL(10)
55#define CXL_PMU_COUNTER_CFG_EDGE BIT_ULL(11)
56#define CXL_PMU_COUNTER_CFG_INVERT BIT_ULL(12)
57#define CXL_PMU_COUNTER_CFG_THRESHOLD_MSK GENMASK_ULL(23, 16)
58#define CXL_PMU_COUNTER_CFG_EVENTS_MSK GENMASK_ULL(55, 24)
59#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
60
61#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
62#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
63
64#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
65
66/* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
67#define CXL_PMU_GID_CLOCK_TICKS 0x00
68#define CXL_PMU_GID_D2H_REQ 0x0010
69#define CXL_PMU_GID_D2H_RSP 0x0011
70#define CXL_PMU_GID_H2D_REQ 0x0012
71#define CXL_PMU_GID_H2D_RSP 0x0013
72#define CXL_PMU_GID_CACHE_DATA 0x0014
73#define CXL_PMU_GID_M2S_REQ 0x0020
74#define CXL_PMU_GID_M2S_RWD 0x0021
75#define CXL_PMU_GID_M2S_BIRSP 0x0022
76#define CXL_PMU_GID_S2M_BISNP 0x0023
77#define CXL_PMU_GID_S2M_NDR 0x0024
78#define CXL_PMU_GID_S2M_DRS 0x0025
79#define CXL_PMU_GID_DDR 0x8000
80
81static int cxl_pmu_cpuhp_state_num;
82
83struct cxl_pmu_ev_cap {
84 u16 vid;
85 u16 gid;
86 u32 msk;
87 union {
88 int counter_idx; /* fixed counters */
89 int event_idx; /* configurable counters */
90 };
91 struct list_head node;
92};
93
94#define CXL_PMU_MAX_COUNTERS 64
95struct cxl_pmu_info {
96 struct pmu pmu;
97 void __iomem *base;
98 struct perf_event **hw_events;
99 struct list_head event_caps_configurable;
100 struct list_head event_caps_fixed;
101 DECLARE_BITMAP(used_counter_bm, CXL_PMU_MAX_COUNTERS);
102 DECLARE_BITMAP(conf_counter_bm, CXL_PMU_MAX_COUNTERS);
103 u16 counter_width;
104 u8 num_counters;
105 u8 num_event_capabilities;
106 int on_cpu;
107 struct hlist_node node;
108 bool filter_hdm;
109 int irq;
110};
111
112#define pmu_to_cxl_pmu_info(_pmu) container_of(_pmu, struct cxl_pmu_info, pmu)
113
114/*
115 * All CPMU counters are discoverable via the Event Capabilities Registers.
116 * Each Event Capability register contains a a VID / GroupID.
117 * A counter may then count any combination (by summing) of events in
118 * that group which are in the Supported Events Bitmask.
119 * However, there are some complexities to the scheme.
120 * - Fixed function counters refer to an Event Capabilities register.
121 * That event capability register is not then used for Configurable
122 * counters.
123 */
124static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
125{
126 unsigned long fixed_counter_event_cap_bm = 0;
127 void __iomem *base = info->base;
128 bool freeze_for_enable;
129 u64 val, eval;
130 int i;
131
132 val = readq(base + CXL_PMU_CAP_REG);
133 freeze_for_enable = FIELD_GET(CXL_PMU_CAP_WRITEABLE_WHEN_FROZEN, val) &&
134 FIELD_GET(CXL_PMU_CAP_FREEZE, val);
135 if (!freeze_for_enable) {
136 dev_err(dev, "Counters not writable while frozen\n");
137 return -ENODEV;
138 }
139
140 info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1;
141 info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val);
142 info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1;
143
144 info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM;
145 if (FIELD_GET(CXL_PMU_CAP_INT, val))
146 info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val);
147 else
148 info->irq = -1;
149
150 /* First handle fixed function counters; note if configurable counters found */
151 for (i = 0; i < info->num_counters; i++) {
152 struct cxl_pmu_ev_cap *pmu_ev;
153 u32 events_msk;
154 u8 group_idx;
155
156 val = readq(base + CXL_PMU_COUNTER_CFG_REG(i));
157
158 if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) ==
159 CXL_PMU_COUNTER_CFG_TYPE_CONFIGURABLE) {
160 set_bit(i, info->conf_counter_bm);
161 }
162
163 if (FIELD_GET(CXL_PMU_COUNTER_CFG_TYPE_MSK, val) !=
164 CXL_PMU_COUNTER_CFG_TYPE_FIXED_FUN)
165 continue;
166
167 /* In this case we know which fields are const */
168 group_idx = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK, val);
169 events_msk = FIELD_GET(CXL_PMU_COUNTER_CFG_EVENTS_MSK, val);
170 eval = readq(base + CXL_PMU_EVENT_CAP_REG(group_idx));
171 pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
172 if (!pmu_ev)
173 return -ENOMEM;
174
175 pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
176 pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
177 /* For a fixed purpose counter use the events mask from the counter CFG */
178 pmu_ev->msk = events_msk;
179 pmu_ev->counter_idx = i;
180 /* This list add is never unwound as all entries deleted on remove */
181 list_add(&pmu_ev->node, &info->event_caps_fixed);
182 /*
183 * Configurable counters must not use an Event Capability registers that
184 * is in use for a Fixed counter
185 */
186 set_bit(group_idx, &fixed_counter_event_cap_bm);
187 }
188
189 if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) {
190 struct cxl_pmu_ev_cap *pmu_ev;
191 int j;
192 /* Walk event capabilities unused by fixed counters */
193 for_each_clear_bit(j, &fixed_counter_event_cap_bm,
194 info->num_event_capabilities) {
195 pmu_ev = devm_kzalloc(dev, sizeof(*pmu_ev), GFP_KERNEL);
196 if (!pmu_ev)
197 return -ENOMEM;
198
199 eval = readq(base + CXL_PMU_EVENT_CAP_REG(j));
200 pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval);
201 pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval);
202 pmu_ev->msk = FIELD_GET(CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK, eval);
203 pmu_ev->event_idx = j;
204 list_add(&pmu_ev->node, &info->event_caps_configurable);
205 }
206 }
207
208 return 0;
209}
210
211#define CXL_PMU_FORMAT_ATTR(_name, _format)\
212 (&((struct dev_ext_attribute[]) { \
213 { \
214 .attr = __ATTR(_name, 0444, device_show_string, NULL), \
215 .var = (void *)_format \
216 } \
217 })[0].attr.attr)
218
219enum {
220 cxl_pmu_mask_attr,
221 cxl_pmu_gid_attr,
222 cxl_pmu_vid_attr,
223 cxl_pmu_threshold_attr,
224 cxl_pmu_invert_attr,
225 cxl_pmu_edge_attr,
226 cxl_pmu_hdm_filter_en_attr,
227 cxl_pmu_hdm_attr,
228};
229
230static struct attribute *cxl_pmu_format_attr[] = {
231 [cxl_pmu_mask_attr] = CXL_PMU_FORMAT_ATTR(mask, "config:0-31"),
232 [cxl_pmu_gid_attr] = CXL_PMU_FORMAT_ATTR(gid, "config:32-47"),
233 [cxl_pmu_vid_attr] = CXL_PMU_FORMAT_ATTR(vid, "config:48-63"),
234 [cxl_pmu_threshold_attr] = CXL_PMU_FORMAT_ATTR(threshold, "config1:0-15"),
235 [cxl_pmu_invert_attr] = CXL_PMU_FORMAT_ATTR(invert, "config1:16"),
236 [cxl_pmu_edge_attr] = CXL_PMU_FORMAT_ATTR(edge, "config1:17"),
237 [cxl_pmu_hdm_filter_en_attr] = CXL_PMU_FORMAT_ATTR(hdm_filter_en, "config1:18"),
238 [cxl_pmu_hdm_attr] = CXL_PMU_FORMAT_ATTR(hdm, "config2:0-15"),
239 NULL
240};
241
242#define CXL_PMU_ATTR_CONFIG_MASK_MSK GENMASK_ULL(31, 0)
243#define CXL_PMU_ATTR_CONFIG_GID_MSK GENMASK_ULL(47, 32)
244#define CXL_PMU_ATTR_CONFIG_VID_MSK GENMASK_ULL(63, 48)
245#define CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK GENMASK_ULL(15, 0)
246#define CXL_PMU_ATTR_CONFIG1_INVERT_MSK BIT(16)
247#define CXL_PMU_ATTR_CONFIG1_EDGE_MSK BIT(17)
248#define CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK BIT(18)
249#define CXL_PMU_ATTR_CONFIG2_HDM_MSK GENMASK(15, 0)
250
251static umode_t cxl_pmu_format_is_visible(struct kobject *kobj,
252 struct attribute *attr, int a)
253{
254 struct device *dev = kobj_to_dev(kobj);
255 struct cxl_pmu_info *info = dev_get_drvdata(dev);
256
257 /*
258 * Filter capability at the CPMU level, so hide the attributes if the particular
259 * filter is not supported.
260 */
261 if (!info->filter_hdm &&
262 (attr == cxl_pmu_format_attr[cxl_pmu_hdm_filter_en_attr] ||
263 attr == cxl_pmu_format_attr[cxl_pmu_hdm_attr]))
264 return 0;
265
266 return attr->mode;
267}
268
269static const struct attribute_group cxl_pmu_format_group = {
270 .name = "format",
271 .attrs = cxl_pmu_format_attr,
272 .is_visible = cxl_pmu_format_is_visible,
273};
274
275static u32 cxl_pmu_config_get_mask(struct perf_event *event)
276{
277 return FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, event->attr.config);
278}
279
280static u16 cxl_pmu_config_get_gid(struct perf_event *event)
281{
282 return FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, event->attr.config);
283}
284
285static u16 cxl_pmu_config_get_vid(struct perf_event *event)
286{
287 return FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, event->attr.config);
288}
289
290static u8 cxl_pmu_config1_get_threshold(struct perf_event *event)
291{
292 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK, event->attr.config1);
293}
294
295static bool cxl_pmu_config1_get_invert(struct perf_event *event)
296{
297 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_INVERT_MSK, event->attr.config1);
298}
299
300static bool cxl_pmu_config1_get_edge(struct perf_event *event)
301{
302 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_EDGE_MSK, event->attr.config1);
303}
304
305/*
306 * CPMU specification allows for 8 filters, each with a 32 bit value...
307 * So we need to find 8x32bits to store it in.
308 * As the value used for disable is 0xffff_ffff, a separate enable switch
309 * is needed.
310 */
311
312static bool cxl_pmu_config1_hdm_filter_en(struct perf_event *event)
313{
314 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK, event->attr.config1);
315}
316
317static u16 cxl_pmu_config2_get_hdm_decoder(struct perf_event *event)
318{
319 return FIELD_GET(CXL_PMU_ATTR_CONFIG2_HDM_MSK, event->attr.config2);
320}
321
322static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct perf_pmu_events_attr *pmu_attr =
326 container_of(attr, struct perf_pmu_events_attr, attr);
327
328 return sysfs_emit(buf, "config=%#llx\n", pmu_attr->id);
329}
330
331#define CXL_PMU_EVENT_ATTR(_name, _vid, _gid, _msk) \
332 PMU_EVENT_ATTR_ID(_name, cxl_pmu_event_sysfs_show, \
333 ((u64)(_vid) << 48) | ((u64)(_gid) << 32) | (u64)(_msk))
334
335/* For CXL spec defined events */
336#define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk) \
337 CXL_PMU_EVENT_ATTR(_name, PCI_VENDOR_ID_CXL, _gid, _msk)
338
339static struct attribute *cxl_pmu_event_attrs[] = {
340 CXL_PMU_EVENT_CXL_ATTR(clock_ticks, CXL_PMU_GID_CLOCK_TICKS, BIT(0)),
341 /* CXL rev 3.0 Table 3-17 - Device to Host Requests */
342 CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdcurr, CXL_PMU_GID_D2H_REQ, BIT(1)),
343 CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdown, CXL_PMU_GID_D2H_REQ, BIT(2)),
344 CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdshared, CXL_PMU_GID_D2H_REQ, BIT(3)),
345 CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdany, CXL_PMU_GID_D2H_REQ, BIT(4)),
346 CXL_PMU_EVENT_CXL_ATTR(d2h_req_rdownnodata, CXL_PMU_GID_D2H_REQ, BIT(5)),
347 CXL_PMU_EVENT_CXL_ATTR(d2h_req_itomwr, CXL_PMU_GID_D2H_REQ, BIT(6)),
348 CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrcurr, CXL_PMU_GID_D2H_REQ, BIT(7)),
349 CXL_PMU_EVENT_CXL_ATTR(d2h_req_clflush, CXL_PMU_GID_D2H_REQ, BIT(8)),
350 CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevict, CXL_PMU_GID_D2H_REQ, BIT(9)),
351 CXL_PMU_EVENT_CXL_ATTR(d2h_req_dirtyevict, CXL_PMU_GID_D2H_REQ, BIT(10)),
352 CXL_PMU_EVENT_CXL_ATTR(d2h_req_cleanevictnodata, CXL_PMU_GID_D2H_REQ, BIT(11)),
353 CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinv, CXL_PMU_GID_D2H_REQ, BIT(12)),
354 CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinvf, CXL_PMU_GID_D2H_REQ, BIT(13)),
355 CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrinv, CXL_PMU_GID_D2H_REQ, BIT(14)),
356 CXL_PMU_EVENT_CXL_ATTR(d2h_req_cacheflushed, CXL_PMU_GID_D2H_REQ, BIT(16)),
357 /* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
358 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihiti, CXL_PMU_GID_D2H_RSP, BIT(4)),
359 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvhitv, CXL_PMU_GID_D2H_RSP, BIT(6)),
360 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihitse, CXL_PMU_GID_D2H_RSP, BIT(5)),
361 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspshitse, CXL_PMU_GID_D2H_RSP, BIT(1)),
362 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspsfwdm, CXL_PMU_GID_D2H_RSP, BIT(7)),
363 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspifwdm, CXL_PMU_GID_D2H_RSP, BIT(15)),
364 CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvfwdv, CXL_PMU_GID_D2H_RSP, BIT(22)),
365 /* CXL rev 3.0 Table 3-21 - CXL.cache - Mapping of H2D Requests to D2H Responses */
366 CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpdata, CXL_PMU_GID_H2D_REQ, BIT(1)),
367 CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpinv, CXL_PMU_GID_H2D_REQ, BIT(2)),
368 CXL_PMU_EVENT_CXL_ATTR(h2d_req_snpcur, CXL_PMU_GID_H2D_REQ, BIT(3)),
369 /* CXL rev 3.0 Table 3-22 - H2D Response Opcode Encodings */
370 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_writepull, CXL_PMU_GID_H2D_RSP, BIT(1)),
371 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_go, CXL_PMU_GID_H2D_RSP, BIT(4)),
372 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepull, CXL_PMU_GID_H2D_RSP, BIT(5)),
373 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_extcmp, CXL_PMU_GID_H2D_RSP, BIT(6)),
374 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_gowritepulldrop, CXL_PMU_GID_H2D_RSP, BIT(8)),
375 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_fastgowritepull, CXL_PMU_GID_H2D_RSP, BIT(13)),
376 CXL_PMU_EVENT_CXL_ATTR(h2d_rsp_goerrwritepull, CXL_PMU_GID_H2D_RSP, BIT(15)),
377 /* CXL rev 3.0 Table 13-5 directly lists these */
378 CXL_PMU_EVENT_CXL_ATTR(cachedata_d2h_data, CXL_PMU_GID_CACHE_DATA, BIT(0)),
379 CXL_PMU_EVENT_CXL_ATTR(cachedata_h2d_data, CXL_PMU_GID_CACHE_DATA, BIT(1)),
380 /* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
381 CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminv, CXL_PMU_GID_M2S_REQ, BIT(0)),
382 CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrd, CXL_PMU_GID_M2S_REQ, BIT(1)),
383 CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddata, CXL_PMU_GID_M2S_REQ, BIT(2)),
384 CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdfwd, CXL_PMU_GID_M2S_REQ, BIT(3)),
385 CXL_PMU_EVENT_CXL_ATTR(m2s_req_memwrfwd, CXL_PMU_GID_M2S_REQ, BIT(4)),
386 CXL_PMU_EVENT_CXL_ATTR(m2s_req_memspecrd, CXL_PMU_GID_M2S_REQ, BIT(8)),
387 CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminvnt, CXL_PMU_GID_M2S_REQ, BIT(9)),
388 CXL_PMU_EVENT_CXL_ATTR(m2s_req_memcleanevict, CXL_PMU_GID_M2S_REQ, BIT(10)),
389 /* CXL rev 3.0 Table 3-35 M2S RwD Memory Opcodes */
390 CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwr, CXL_PMU_GID_M2S_RWD, BIT(1)),
391 CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_memwrptl, CXL_PMU_GID_M2S_RWD, BIT(2)),
392 CXL_PMU_EVENT_CXL_ATTR(m2s_rwd_biconflict, CXL_PMU_GID_M2S_RWD, BIT(4)),
393 /* CXL rev 3.0 Table 3-38 M2S BIRsp Memory Opcodes */
394 CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_i, CXL_PMU_GID_M2S_BIRSP, BIT(0)),
395 CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_s, CXL_PMU_GID_M2S_BIRSP, BIT(1)),
396 CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_e, CXL_PMU_GID_M2S_BIRSP, BIT(2)),
397 CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_iblk, CXL_PMU_GID_M2S_BIRSP, BIT(4)),
398 CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_sblk, CXL_PMU_GID_M2S_BIRSP, BIT(5)),
399 CXL_PMU_EVENT_CXL_ATTR(m2s_birsp_eblk, CXL_PMU_GID_M2S_BIRSP, BIT(6)),
400 /* CXL rev 3.0 Table 3-40 S2M BISnp Opcodes */
401 CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_cur, CXL_PMU_GID_S2M_BISNP, BIT(0)),
402 CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_data, CXL_PMU_GID_S2M_BISNP, BIT(1)),
403 CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_inv, CXL_PMU_GID_S2M_BISNP, BIT(2)),
404 CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_curblk, CXL_PMU_GID_S2M_BISNP, BIT(4)),
405 CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_datblk, CXL_PMU_GID_S2M_BISNP, BIT(5)),
406 CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_invblk, CXL_PMU_GID_S2M_BISNP, BIT(6)),
407 /* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
408 CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
409 CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
410 CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
411 CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
412 /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
413 CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
414 CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
415 /* CXL rev 3.0 Table 13-5 directly lists these */
416 CXL_PMU_EVENT_CXL_ATTR(ddr_act, CXL_PMU_GID_DDR, BIT(0)),
417 CXL_PMU_EVENT_CXL_ATTR(ddr_pre, CXL_PMU_GID_DDR, BIT(1)),
418 CXL_PMU_EVENT_CXL_ATTR(ddr_casrd, CXL_PMU_GID_DDR, BIT(2)),
419 CXL_PMU_EVENT_CXL_ATTR(ddr_caswr, CXL_PMU_GID_DDR, BIT(3)),
420 CXL_PMU_EVENT_CXL_ATTR(ddr_refresh, CXL_PMU_GID_DDR, BIT(4)),
421 CXL_PMU_EVENT_CXL_ATTR(ddr_selfrefreshent, CXL_PMU_GID_DDR, BIT(5)),
422 CXL_PMU_EVENT_CXL_ATTR(ddr_rfm, CXL_PMU_GID_DDR, BIT(6)),
423 NULL
424};
425
426static struct cxl_pmu_ev_cap *cxl_pmu_find_fixed_counter_ev_cap(struct cxl_pmu_info *info,
427 int vid, int gid, int msk)
428{
429 struct cxl_pmu_ev_cap *pmu_ev;
430
431 list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) {
432 if (vid != pmu_ev->vid || gid != pmu_ev->gid)
433 continue;
434
435 /* Precise match for fixed counter */
436 if (msk == pmu_ev->msk)
437 return pmu_ev;
438 }
439
440 return ERR_PTR(-EINVAL);
441}
442
443static struct cxl_pmu_ev_cap *cxl_pmu_find_config_counter_ev_cap(struct cxl_pmu_info *info,
444 int vid, int gid, int msk)
445{
446 struct cxl_pmu_ev_cap *pmu_ev;
447
448 list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) {
449 if (vid != pmu_ev->vid || gid != pmu_ev->gid)
450 continue;
451
452 /* Request mask must be subset of supported */
453 if (msk & ~pmu_ev->msk)
454 continue;
455
456 return pmu_ev;
457 }
458
459 return ERR_PTR(-EINVAL);
460}
461
462static umode_t cxl_pmu_event_is_visible(struct kobject *kobj, struct attribute *attr, int a)
463{
464 struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr);
465 struct perf_pmu_events_attr *pmu_attr =
466 container_of(dev_attr, struct perf_pmu_events_attr, attr);
467 struct device *dev = kobj_to_dev(kobj);
468 struct cxl_pmu_info *info = dev_get_drvdata(dev);
469 int vid = FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, pmu_attr->id);
470 int gid = FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, pmu_attr->id);
471 int msk = FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, pmu_attr->id);
472
473 if (!IS_ERR(cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, msk)))
474 return attr->mode;
475
476 if (!IS_ERR(cxl_pmu_find_config_counter_ev_cap(info, vid, gid, msk)))
477 return attr->mode;
478
479 return 0;
480}
481
482static const struct attribute_group cxl_pmu_events = {
483 .name = "events",
484 .attrs = cxl_pmu_event_attrs,
485 .is_visible = cxl_pmu_event_is_visible,
486};
487
488static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
489 char *buf)
490{
491 struct cxl_pmu_info *info = dev_get_drvdata(dev);
492
493 return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu));
494}
495static DEVICE_ATTR_RO(cpumask);
496
497static struct attribute *cxl_pmu_cpumask_attrs[] = {
498 &dev_attr_cpumask.attr,
499 NULL
500};
501
502static const struct attribute_group cxl_pmu_cpumask_group = {
503 .attrs = cxl_pmu_cpumask_attrs,
504};
505
506static const struct attribute_group *cxl_pmu_attr_groups[] = {
507 &cxl_pmu_events,
508 &cxl_pmu_format_group,
509 &cxl_pmu_cpumask_group,
510 NULL
511};
512
513/* If counter_idx == NULL, don't try to allocate a counter. */
514static int cxl_pmu_get_event_idx(struct perf_event *event, int *counter_idx,
515 int *event_idx)
516{
517 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
518 DECLARE_BITMAP(configurable_and_free, CXL_PMU_MAX_COUNTERS);
519 struct cxl_pmu_ev_cap *pmu_ev;
520 u32 mask;
521 u16 gid, vid;
522 int i;
523
524 vid = cxl_pmu_config_get_vid(event);
525 gid = cxl_pmu_config_get_gid(event);
526 mask = cxl_pmu_config_get_mask(event);
527
528 pmu_ev = cxl_pmu_find_fixed_counter_ev_cap(info, vid, gid, mask);
529 if (!IS_ERR(pmu_ev)) {
530 if (!counter_idx)
531 return 0;
532 if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) {
533 *counter_idx = pmu_ev->counter_idx;
534 return 0;
535 }
536 /* Fixed counter is in use, but maybe a configurable one? */
537 }
538
539 pmu_ev = cxl_pmu_find_config_counter_ev_cap(info, vid, gid, mask);
540 if (!IS_ERR(pmu_ev)) {
541 if (!counter_idx)
542 return 0;
543
544 bitmap_andnot(configurable_and_free, info->conf_counter_bm,
545 info->used_counter_bm, CXL_PMU_MAX_COUNTERS);
546
547 i = find_first_bit(configurable_and_free, CXL_PMU_MAX_COUNTERS);
548 if (i == CXL_PMU_MAX_COUNTERS)
549 return -EINVAL;
550
551 *counter_idx = i;
552 return 0;
553 }
554
555 return -EINVAL;
556}
557
558static int cxl_pmu_event_init(struct perf_event *event)
559{
560 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
561 int rc;
562
563 /* Top level type sanity check - is this a Hardware Event being requested */
564 if (event->attr.type != event->pmu->type)
565 return -ENOENT;
566
567 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
568 return -EOPNOTSUPP;
569 /* TODO: Validation of any filter */
570
571 /*
572 * Verify that it is possible to count what was requested. Either must
573 * be a fixed counter that is a precise match or a configurable counter
574 * where this is a subset.
575 */
576 rc = cxl_pmu_get_event_idx(event, NULL, NULL);
577 if (rc < 0)
578 return rc;
579
580 event->cpu = info->on_cpu;
581
582 return 0;
583}
584
585static void cxl_pmu_enable(struct pmu *pmu)
586{
587 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
588 void __iomem *base = info->base;
589
590 /* Can assume frozen at this stage */
591 writeq(0, base + CXL_PMU_FREEZE_REG);
592}
593
594static void cxl_pmu_disable(struct pmu *pmu)
595{
596 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(pmu);
597 void __iomem *base = info->base;
598
599 /*
600 * Whilst bits above number of counters are RsvdZ
601 * they are unlikely to be repurposed given
602 * number of counters is allowed to be 64 leaving
603 * no reserved bits. Hence this is only slightly
604 * naughty.
605 */
606 writeq(GENMASK_ULL(63, 0), base + CXL_PMU_FREEZE_REG);
607}
608
609static void cxl_pmu_event_start(struct perf_event *event, int flags)
610{
611 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
612 struct hw_perf_event *hwc = &event->hw;
613 void __iomem *base = info->base;
614 u64 cfg;
615
616 /*
617 * All paths to here should either set these flags directly or
618 * call cxl_pmu_event_stop() which will ensure the correct state.
619 */
620 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
621 return;
622
623 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
624 hwc->state = 0;
625
626 /*
627 * Currently only hdm filter control is implemnted, this code will
628 * want generalizing when more filters are added.
629 */
630 if (info->filter_hdm) {
631 if (cxl_pmu_config1_hdm_filter_en(event))
632 cfg = cxl_pmu_config2_get_hdm_decoder(event);
633 else
634 cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
635 writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
636 }
637
638 cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
639 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1);
640 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW, 1);
641 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1);
642 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EDGE,
643 cxl_pmu_config1_get_edge(event) ? 1 : 0);
644 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INVERT,
645 cxl_pmu_config1_get_invert(event) ? 1 : 0);
646
647 /* Fixed purpose counters have next two fields RO */
648 if (test_bit(hwc->idx, info->conf_counter_bm)) {
649 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK,
650 hwc->event_base);
651 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENTS_MSK,
652 cxl_pmu_config_get_mask(event));
653 }
654 cfg &= ~CXL_PMU_COUNTER_CFG_THRESHOLD_MSK;
655 /*
656 * For events that generate only 1 count per clock the CXL 3.0 spec
657 * states the threshold shall be set to 1 but if set to 0 it will
658 * count the raw value anwyay?
659 * There is no definition of what events will count multiple per cycle
660 * and hence to which non 1 values of threshold can apply.
661 * (CXL 3.0 8.2.7.2.1 Counter Configuration - threshold field definition)
662 */
663 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_THRESHOLD_MSK,
664 cxl_pmu_config1_get_threshold(event));
665 writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
666
667 local64_set(&hwc->prev_count, 0);
668 writeq(0, base + CXL_PMU_COUNTER_REG(hwc->idx));
669
670 perf_event_update_userpage(event);
671}
672
673static u64 cxl_pmu_read_counter(struct perf_event *event)
674{
675 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
676 void __iomem *base = info->base;
677
678 return readq(base + CXL_PMU_COUNTER_REG(event->hw.idx));
679}
680
681static void __cxl_pmu_read(struct perf_event *event, bool overflow)
682{
683 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
684 struct hw_perf_event *hwc = &event->hw;
685 u64 new_cnt, prev_cnt, delta;
686
687 do {
688 prev_cnt = local64_read(&hwc->prev_count);
689 new_cnt = cxl_pmu_read_counter(event);
690 } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != prev_cnt);
691
692 /*
693 * If we know an overflow occur then take that into account.
694 * Note counter is not reset as that would lose events
695 */
696 delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0);
697 if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0))
698 delta += (1UL << info->counter_width);
699
700 local64_add(delta, &event->count);
701}
702
703static void cxl_pmu_read(struct perf_event *event)
704{
705 __cxl_pmu_read(event, false);
706}
707
708static void cxl_pmu_event_stop(struct perf_event *event, int flags)
709{
710 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
711 void __iomem *base = info->base;
712 struct hw_perf_event *hwc = &event->hw;
713 u64 cfg;
714
715 cxl_pmu_read(event);
716 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
717 hwc->state |= PERF_HES_STOPPED;
718
719 cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
720 cfg &= ~(FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1) |
721 FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1));
722 writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx));
723
724 hwc->state |= PERF_HES_UPTODATE;
725}
726
727static int cxl_pmu_event_add(struct perf_event *event, int flags)
728{
729 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
730 struct hw_perf_event *hwc = &event->hw;
731 int idx, rc;
732 int event_idx = 0;
733
734 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
735
736 rc = cxl_pmu_get_event_idx(event, &idx, &event_idx);
737 if (rc < 0)
738 return rc;
739
740 hwc->idx = idx;
741
742 /* Only set for configurable counters */
743 hwc->event_base = event_idx;
744 info->hw_events[idx] = event;
745 set_bit(idx, info->used_counter_bm);
746
747 if (flags & PERF_EF_START)
748 cxl_pmu_event_start(event, PERF_EF_RELOAD);
749
750 return 0;
751}
752
753static void cxl_pmu_event_del(struct perf_event *event, int flags)
754{
755 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu);
756 struct hw_perf_event *hwc = &event->hw;
757
758 cxl_pmu_event_stop(event, PERF_EF_UPDATE);
759 clear_bit(hwc->idx, info->used_counter_bm);
760 info->hw_events[hwc->idx] = NULL;
761 perf_event_update_userpage(event);
762}
763
764static irqreturn_t cxl_pmu_irq(int irq, void *data)
765{
766 struct cxl_pmu_info *info = data;
767 void __iomem *base = info->base;
768 u64 overflowed;
769 DECLARE_BITMAP(overflowedbm, 64);
770 int i;
771
772 overflowed = readq(base + CXL_PMU_OVERFLOW_REG);
773
774 /* Interrupt may be shared, so maybe it isn't ours */
775 if (!overflowed)
776 return IRQ_NONE;
777
778 bitmap_from_arr64(overflowedbm, &overflowed, 64);
779 for_each_set_bit(i, overflowedbm, info->num_counters) {
780 struct perf_event *event = info->hw_events[i];
781
782 if (!event) {
783 dev_dbg(info->pmu.dev,
784 "overflow but on non enabled counter %d\n", i);
785 continue;
786 }
787
788 __cxl_pmu_read(event, true);
789 }
790
791 writeq(overflowed, base + CXL_PMU_OVERFLOW_REG);
792
793 return IRQ_HANDLED;
794}
795
796static void cxl_pmu_perf_unregister(void *_info)
797{
798 struct cxl_pmu_info *info = _info;
799
800 perf_pmu_unregister(&info->pmu);
801}
802
803static void cxl_pmu_cpuhp_remove(void *_info)
804{
805 struct cxl_pmu_info *info = _info;
806
807 cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node);
808}
809
810static int cxl_pmu_probe(struct device *dev)
811{
812 struct cxl_pmu *pmu = to_cxl_pmu(dev);
813 struct pci_dev *pdev = to_pci_dev(dev->parent);
814 struct cxl_pmu_info *info;
815 char *irq_name;
816 char *dev_name;
817 int rc, irq;
818
819 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
820 if (!info)
821 return -ENOMEM;
822
823 dev_set_drvdata(dev, info);
824 INIT_LIST_HEAD(&info->event_caps_fixed);
825 INIT_LIST_HEAD(&info->event_caps_configurable);
826
827 info->base = pmu->base;
828
829 info->on_cpu = -1;
830 rc = cxl_pmu_parse_caps(dev, info);
831 if (rc)
832 return rc;
833
834 info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events),
835 info->num_counters, GFP_KERNEL);
836 if (!info->hw_events)
837 return -ENOMEM;
838
839 switch (pmu->type) {
840 case CXL_PMU_MEMDEV:
841 dev_name = devm_kasprintf(dev, GFP_KERNEL, "cxl_pmu_mem%d.%d",
842 pmu->assoc_id, pmu->index);
843 break;
844 }
845 if (!dev_name)
846 return -ENOMEM;
847
848 info->pmu = (struct pmu) {
849 .name = dev_name,
850 .parent = dev,
851 .module = THIS_MODULE,
852 .event_init = cxl_pmu_event_init,
853 .pmu_enable = cxl_pmu_enable,
854 .pmu_disable = cxl_pmu_disable,
855 .add = cxl_pmu_event_add,
856 .del = cxl_pmu_event_del,
857 .start = cxl_pmu_event_start,
858 .stop = cxl_pmu_event_stop,
859 .read = cxl_pmu_read,
860 .task_ctx_nr = perf_invalid_context,
861 .attr_groups = cxl_pmu_attr_groups,
862 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
863 };
864
865 if (info->irq <= 0)
866 return -EINVAL;
867
868 rc = pci_irq_vector(pdev, info->irq);
869 if (rc < 0)
870 return rc;
871 irq = rc;
872
873 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name);
874 if (!irq_name)
875 return -ENOMEM;
876
877 rc = devm_request_irq(dev, irq, cxl_pmu_irq, IRQF_SHARED | IRQF_ONESHOT,
878 irq_name, info);
879 if (rc)
880 return rc;
881 info->irq = irq;
882
883 rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node);
884 if (rc)
885 return rc;
886
887 rc = devm_add_action_or_reset(dev, cxl_pmu_cpuhp_remove, info);
888 if (rc)
889 return rc;
890
891 rc = perf_pmu_register(&info->pmu, info->pmu.name, -1);
892 if (rc)
893 return rc;
894
895 rc = devm_add_action_or_reset(dev, cxl_pmu_perf_unregister, info);
896 if (rc)
897 return rc;
898
899 return 0;
900}
901
902static struct cxl_driver cxl_pmu_driver = {
903 .name = "cxl_pmu",
904 .probe = cxl_pmu_probe,
905 .id = CXL_DEVICE_PMU,
906};
907
908static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
909{
910 struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
911
912 if (info->on_cpu != -1)
913 return 0;
914
915 info->on_cpu = cpu;
916 /*
917 * CPU HP lock is held so we should be guaranteed that the CPU hasn't yet
918 * gone away again.
919 */
920 WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu)));
921
922 return 0;
923}
924
925static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
926{
927 struct cxl_pmu_info *info = hlist_entry_safe(node, struct cxl_pmu_info, node);
928 unsigned int target;
929
930 if (info->on_cpu != cpu)
931 return 0;
932
933 info->on_cpu = -1;
934 target = cpumask_any_but(cpu_online_mask, cpu);
935 if (target >= nr_cpu_ids) {
936 dev_err(info->pmu.dev, "Unable to find a suitable CPU\n");
937 return 0;
938 }
939
940 perf_pmu_migrate_context(&info->pmu, cpu, target);
941 info->on_cpu = target;
942 /*
943 * CPU HP lock is held so we should be guaranteed that this CPU hasn't yet
944 * gone away.
945 */
946 WARN_ON(irq_set_affinity(info->irq, cpumask_of(target)));
947
948 return 0;
949}
950
951static __init int cxl_pmu_init(void)
952{
953 int rc;
954
955 rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
956 "AP_PERF_CXL_PMU_ONLINE",
957 cxl_pmu_online_cpu, cxl_pmu_offline_cpu);
958 if (rc < 0)
959 return rc;
960 cxl_pmu_cpuhp_state_num = rc;
961
962 rc = cxl_driver_register(&cxl_pmu_driver);
963 if (rc)
964 cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
965
966 return rc;
967}
968
969static __exit void cxl_pmu_exit(void)
970{
971 cxl_driver_unregister(&cxl_pmu_driver);
972 cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
973}
974
975MODULE_LICENSE("GPL");
976MODULE_IMPORT_NS(CXL);
977module_init(cxl_pmu_init);
978module_exit(cxl_pmu_exit);
979MODULE_ALIAS_CXL(CXL_DEVICE_PMU);