Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * HiSilicon SoC DDRC uncore Hardware event counters support
4 *
5 * Copyright (C) 2017 HiSilicon Limited
6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com>
7 * Anurup M <anurup.m@huawei.com>
8 *
9 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
10 */
11#include <linux/acpi.h>
12#include <linux/bug.h>
13#include <linux/cpuhotplug.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16#include <linux/list.h>
17#include <linux/smp.h>
18
19#include "hisi_uncore_pmu.h"
20
21/* DDRC register definition in v1 */
22#define DDRC_PERF_CTRL 0x010
23#define DDRC_FLUX_WR 0x380
24#define DDRC_FLUX_RD 0x384
25#define DDRC_FLUX_WCMD 0x388
26#define DDRC_FLUX_RCMD 0x38c
27#define DDRC_PRE_CMD 0x3c0
28#define DDRC_ACT_CMD 0x3c4
29#define DDRC_RNK_CHG 0x3cc
30#define DDRC_RW_CHG 0x3d0
31#define DDRC_EVENT_CTRL 0x6C0
32#define DDRC_INT_MASK 0x6c8
33#define DDRC_INT_STATUS 0x6cc
34#define DDRC_INT_CLEAR 0x6d0
35#define DDRC_VERSION 0x710
36
37/* DDRC register definition in v2 */
38#define DDRC_V2_INT_MASK 0x528
39#define DDRC_V2_INT_STATUS 0x52c
40#define DDRC_V2_INT_CLEAR 0x530
41#define DDRC_V2_EVENT_CNT 0xe00
42#define DDRC_V2_EVENT_CTRL 0xe70
43#define DDRC_V2_EVENT_TYPE 0xe74
44#define DDRC_V2_PERF_CTRL 0xeA0
45
46/* DDRC interrupt registers definition in v3 */
47#define DDRC_V3_INT_MASK 0x534
48#define DDRC_V3_INT_STATUS 0x538
49#define DDRC_V3_INT_CLEAR 0x53C
50
51/* DDRC has 8-counters */
52#define DDRC_NR_COUNTERS 0x8
53#define DDRC_V1_PERF_CTRL_EN 0x2
54#define DDRC_V2_PERF_CTRL_EN 0x1
55#define DDRC_V1_NR_EVENTS 0x7
56#define DDRC_V2_NR_EVENTS 0xFF
57
58#define DDRC_EVENT_CNTn(base, n) ((base) + (n) * 8)
59#define DDRC_EVENT_TYPEn(base, n) ((base) + (n) * 4)
60#define DDRC_UNIMPLEMENTED_REG GENMASK(31, 0)
61
62/*
63 * For PMU v1, there are eight-events and every event has been mapped
64 * to fixed-purpose counters which register offset is not consistent.
65 * Therefore there is no write event type and we assume that event
66 * code (0 to 7) is equal to counter index in PMU driver.
67 */
68#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7)
69
70static const u32 ddrc_reg_off[] = {
71 DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD,
72 DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
73};
74
75struct hisi_ddrc_pmu_regs {
76 u32 event_cnt;
77 u32 event_ctrl;
78 u32 event_type;
79 u32 perf_ctrl;
80 u32 perf_ctrl_en;
81 u32 int_mask;
82 u32 int_clear;
83 u32 int_status;
84};
85
86static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
87 struct hw_perf_event *hwc)
88{
89 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
90
91 if (regs->event_cnt == DDRC_UNIMPLEMENTED_REG)
92 return readl(ddrc_pmu->base + ddrc_reg_off[hwc->idx]);
93
94 return readq(ddrc_pmu->base + DDRC_EVENT_CNTn(regs->event_cnt, hwc->idx));
95}
96
97static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
98 struct hw_perf_event *hwc, u64 val)
99{
100 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
101
102 if (regs->event_cnt == DDRC_UNIMPLEMENTED_REG)
103 writel((u32)val, ddrc_pmu->base + ddrc_reg_off[hwc->idx]);
104 else
105 writeq(val, ddrc_pmu->base + DDRC_EVENT_CNTn(regs->event_cnt, hwc->idx));
106}
107
108/*
109 * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware,
110 * so there is no need to write event type, while it is programmable counter in
111 * PMU v2.
112 */
113static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *ddrc_pmu, int idx,
114 u32 type)
115{
116 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
117
118 if (regs->event_type == DDRC_UNIMPLEMENTED_REG)
119 return;
120
121 writel(type, ddrc_pmu->base + DDRC_EVENT_TYPEn(regs->event_type, idx));
122}
123
124static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
125{
126 struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
127 unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask;
128 struct hw_perf_event *hwc = &event->hw;
129 /* For DDRC PMU, we use event code as counter index */
130 int idx = GET_DDRC_EVENTID(hwc);
131
132 if (test_bit(idx, used_mask))
133 return -EAGAIN;
134
135 set_bit(idx, used_mask);
136
137 return idx;
138}
139
140static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
141{
142 struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
143 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
144
145 if (regs->event_type == DDRC_UNIMPLEMENTED_REG)
146 return hisi_ddrc_pmu_v1_get_event_idx(event);
147
148 return hisi_uncore_pmu_get_event_idx(event);
149}
150
151static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
152{
153 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
154 u32 val;
155
156 val = readl(ddrc_pmu->base + regs->perf_ctrl);
157 val |= regs->perf_ctrl_en;
158 writel(val, ddrc_pmu->base + regs->perf_ctrl);
159}
160
161static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
162{
163 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
164 u32 val;
165
166 val = readl(ddrc_pmu->base + regs->perf_ctrl);
167 val &= ~regs->perf_ctrl_en;
168 writel(val, ddrc_pmu->base + regs->perf_ctrl);
169}
170
171static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
172 struct hw_perf_event *hwc)
173{
174 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
175 u32 val;
176
177 val = readl(ddrc_pmu->base + regs->event_ctrl);
178 val |= BIT_ULL(hwc->idx);
179 writel(val, ddrc_pmu->base + regs->event_ctrl);
180}
181
182static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
183 struct hw_perf_event *hwc)
184{
185 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
186 u32 val;
187
188 val = readl(ddrc_pmu->base + regs->event_ctrl);
189 val &= ~BIT_ULL(hwc->idx);
190 writel(val, ddrc_pmu->base + regs->event_ctrl);
191}
192
193static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
194 struct hw_perf_event *hwc)
195{
196 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
197 u32 val;
198
199 val = readl(ddrc_pmu->base + regs->int_mask);
200 val &= ~BIT_ULL(hwc->idx);
201 writel(val, ddrc_pmu->base + regs->int_mask);
202}
203
204static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
205 struct hw_perf_event *hwc)
206{
207 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
208 u32 val;
209
210 val = readl(ddrc_pmu->base + regs->int_mask);
211 val |= BIT_ULL(hwc->idx);
212 writel(val, ddrc_pmu->base + regs->int_mask);
213}
214
215static u32 hisi_ddrc_pmu_get_int_status(struct hisi_pmu *ddrc_pmu)
216{
217 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
218
219 return readl(ddrc_pmu->base + regs->int_status);
220}
221
222static void hisi_ddrc_pmu_clear_int_status(struct hisi_pmu *ddrc_pmu,
223 int idx)
224{
225 struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
226
227 writel(1 << idx, ddrc_pmu->base + regs->int_clear);
228}
229
230static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
231 struct hisi_pmu *ddrc_pmu)
232{
233 hisi_uncore_pmu_init_topology(ddrc_pmu, &pdev->dev);
234
235 /*
236 * Use the SCCL_ID and DDRC channel ID to identify the
237 * DDRC PMU, while SCCL_ID is in MPIDR[aff2].
238 */
239 if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
240 &ddrc_pmu->topo.index_id)) {
241 dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
242 return -EINVAL;
243 }
244
245 if (ddrc_pmu->topo.sccl_id < 0) {
246 dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
247 return -EINVAL;
248 }
249
250 ddrc_pmu->dev_info = device_get_match_data(&pdev->dev);
251 if (!ddrc_pmu->dev_info)
252 return -ENODEV;
253
254 ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
255 if (IS_ERR(ddrc_pmu->base)) {
256 dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n");
257 return PTR_ERR(ddrc_pmu->base);
258 }
259
260 ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
261 if (ddrc_pmu->identifier >= HISI_PMU_V2) {
262 if (ddrc_pmu->topo.sub_id < 0) {
263 dev_err(&pdev->dev, "Can not read sub-id!\n");
264 return -EINVAL;
265 }
266 }
267
268 return 0;
269}
270
271static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = {
272 HISI_PMU_FORMAT_ATTR(event, "config:0-4"),
273 NULL,
274};
275
276static const struct attribute_group hisi_ddrc_pmu_v1_format_group = {
277 .name = "format",
278 .attrs = hisi_ddrc_pmu_v1_format_attr,
279};
280
281static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = {
282 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
283 NULL
284};
285
286static const struct attribute_group hisi_ddrc_pmu_v2_format_group = {
287 .name = "format",
288 .attrs = hisi_ddrc_pmu_v2_format_attr,
289};
290
291static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = {
292 HISI_PMU_EVENT_ATTR(flux_wr, 0x00),
293 HISI_PMU_EVENT_ATTR(flux_rd, 0x01),
294 HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02),
295 HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03),
296 HISI_PMU_EVENT_ATTR(pre_cmd, 0x04),
297 HISI_PMU_EVENT_ATTR(act_cmd, 0x05),
298 HISI_PMU_EVENT_ATTR(rnk_chg, 0x06),
299 HISI_PMU_EVENT_ATTR(rw_chg, 0x07),
300 NULL,
301};
302
303static const struct attribute_group hisi_ddrc_pmu_v1_events_group = {
304 .name = "events",
305 .attrs = hisi_ddrc_pmu_v1_events_attr,
306};
307
308static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = {
309 HISI_PMU_EVENT_ATTR(cycles, 0x00),
310 HISI_PMU_EVENT_ATTR(flux_wr, 0x83),
311 HISI_PMU_EVENT_ATTR(flux_rd, 0x84),
312 NULL
313};
314
315static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
316 .name = "events",
317 .attrs = hisi_ddrc_pmu_v2_events_attr,
318};
319
320static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
321 &hisi_ddrc_pmu_v1_format_group,
322 &hisi_ddrc_pmu_v1_events_group,
323 &hisi_pmu_cpumask_attr_group,
324 &hisi_pmu_identifier_group,
325 NULL,
326};
327
328static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
329 &hisi_ddrc_pmu_v2_format_group,
330 &hisi_ddrc_pmu_v2_events_group,
331 &hisi_pmu_cpumask_attr_group,
332 &hisi_pmu_identifier_group,
333 NULL
334};
335
336static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
337 .write_evtype = hisi_ddrc_pmu_write_evtype,
338 .get_event_idx = hisi_ddrc_pmu_get_event_idx,
339 .start_counters = hisi_ddrc_pmu_start_counters,
340 .stop_counters = hisi_ddrc_pmu_stop_counters,
341 .enable_counter = hisi_ddrc_pmu_enable_counter,
342 .disable_counter = hisi_ddrc_pmu_disable_counter,
343 .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
344 .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
345 .write_counter = hisi_ddrc_pmu_write_counter,
346 .read_counter = hisi_ddrc_pmu_read_counter,
347 .get_int_status = hisi_ddrc_pmu_get_int_status,
348 .clear_int_status = hisi_ddrc_pmu_clear_int_status,
349};
350
351static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
352 struct hisi_pmu *ddrc_pmu)
353{
354 int ret;
355
356 ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu);
357 if (ret)
358 return ret;
359
360 ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev);
361 if (ret)
362 return ret;
363
364 ddrc_pmu->pmu_events.attr_groups = ddrc_pmu->dev_info->attr_groups;
365 ddrc_pmu->counter_bits = ddrc_pmu->dev_info->counter_bits;
366 ddrc_pmu->check_event = ddrc_pmu->dev_info->check_event;
367 ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
368 ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
369 ddrc_pmu->dev = &pdev->dev;
370 ddrc_pmu->on_cpu = -1;
371
372 return 0;
373}
374
375static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
376{
377 struct hisi_pmu *ddrc_pmu;
378 char *name;
379 int ret;
380
381 ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL);
382 if (!ddrc_pmu)
383 return -ENOMEM;
384
385 platform_set_drvdata(pdev, ddrc_pmu);
386
387 ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu);
388 if (ret)
389 return ret;
390
391 if (ddrc_pmu->identifier >= HISI_PMU_V2)
392 name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
393 "hisi_sccl%d_ddrc%d_%d",
394 ddrc_pmu->topo.sccl_id, ddrc_pmu->topo.index_id,
395 ddrc_pmu->topo.sub_id);
396 else
397 name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
398 "hisi_sccl%d_ddrc%d", ddrc_pmu->topo.sccl_id,
399 ddrc_pmu->topo.index_id);
400
401 if (!name)
402 return -ENOMEM;
403
404 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
405 &ddrc_pmu->node);
406 if (ret) {
407 dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret);
408 return ret;
409 }
410
411 hisi_pmu_init(ddrc_pmu, THIS_MODULE);
412
413 ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
414 if (ret) {
415 dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n");
416 cpuhp_state_remove_instance_nocalls(
417 CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node);
418 }
419
420 return ret;
421}
422
423static void hisi_ddrc_pmu_remove(struct platform_device *pdev)
424{
425 struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
426
427 perf_pmu_unregister(&ddrc_pmu->pmu);
428 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
429 &ddrc_pmu->node);
430}
431
432static struct hisi_ddrc_pmu_regs hisi_ddrc_v1_pmu_regs = {
433 .event_cnt = DDRC_UNIMPLEMENTED_REG,
434 .event_ctrl = DDRC_EVENT_CTRL,
435 .event_type = DDRC_UNIMPLEMENTED_REG,
436 .perf_ctrl = DDRC_PERF_CTRL,
437 .perf_ctrl_en = DDRC_V1_PERF_CTRL_EN,
438 .int_mask = DDRC_INT_MASK,
439 .int_clear = DDRC_INT_CLEAR,
440 .int_status = DDRC_INT_STATUS,
441};
442
443static const struct hisi_pmu_dev_info hisi_ddrc_v1 = {
444 .counter_bits = 32,
445 .check_event = DDRC_V1_NR_EVENTS,
446 .attr_groups = hisi_ddrc_pmu_v1_attr_groups,
447 .private = &hisi_ddrc_v1_pmu_regs,
448};
449
450static struct hisi_ddrc_pmu_regs hisi_ddrc_v2_pmu_regs = {
451 .event_cnt = DDRC_V2_EVENT_CNT,
452 .event_ctrl = DDRC_V2_EVENT_CTRL,
453 .event_type = DDRC_V2_EVENT_TYPE,
454 .perf_ctrl = DDRC_V2_PERF_CTRL,
455 .perf_ctrl_en = DDRC_V2_PERF_CTRL_EN,
456 .int_mask = DDRC_V2_INT_MASK,
457 .int_clear = DDRC_V2_INT_CLEAR,
458 .int_status = DDRC_V2_INT_STATUS,
459};
460
461static const struct hisi_pmu_dev_info hisi_ddrc_v2 = {
462 .counter_bits = 48,
463 .check_event = DDRC_V2_NR_EVENTS,
464 .attr_groups = hisi_ddrc_pmu_v2_attr_groups,
465 .private = &hisi_ddrc_v2_pmu_regs,
466};
467
468static struct hisi_ddrc_pmu_regs hisi_ddrc_v3_pmu_regs = {
469 .event_cnt = DDRC_V2_EVENT_CNT,
470 .event_ctrl = DDRC_V2_EVENT_CTRL,
471 .event_type = DDRC_V2_EVENT_TYPE,
472 .perf_ctrl = DDRC_V2_PERF_CTRL,
473 .perf_ctrl_en = DDRC_V2_PERF_CTRL_EN,
474 .int_mask = DDRC_V3_INT_MASK,
475 .int_clear = DDRC_V3_INT_CLEAR,
476 .int_status = DDRC_V3_INT_STATUS,
477};
478
479static const struct hisi_pmu_dev_info hisi_ddrc_v3 = {
480 .counter_bits = 48,
481 .check_event = DDRC_V2_NR_EVENTS,
482 .attr_groups = hisi_ddrc_pmu_v2_attr_groups,
483 .private = &hisi_ddrc_v3_pmu_regs,
484};
485
486static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
487 { "HISI0233", (kernel_ulong_t)&hisi_ddrc_v1 },
488 { "HISI0234", (kernel_ulong_t)&hisi_ddrc_v2 },
489 { "HISI0235", (kernel_ulong_t)&hisi_ddrc_v3 },
490 {}
491};
492MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
493
494static struct platform_driver hisi_ddrc_pmu_driver = {
495 .driver = {
496 .name = "hisi_ddrc_pmu",
497 .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
498 .suppress_bind_attrs = true,
499 },
500 .probe = hisi_ddrc_pmu_probe,
501 .remove = hisi_ddrc_pmu_remove,
502};
503
504static int __init hisi_ddrc_pmu_module_init(void)
505{
506 int ret;
507
508 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
509 "AP_PERF_ARM_HISI_DDRC_ONLINE",
510 hisi_uncore_pmu_online_cpu,
511 hisi_uncore_pmu_offline_cpu);
512 if (ret) {
513 pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret);
514 return ret;
515 }
516
517 ret = platform_driver_register(&hisi_ddrc_pmu_driver);
518 if (ret)
519 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
520
521 return ret;
522}
523module_init(hisi_ddrc_pmu_module_init);
524
525static void __exit hisi_ddrc_pmu_module_exit(void)
526{
527 platform_driver_unregister(&hisi_ddrc_pmu_driver);
528 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
529}
530module_exit(hisi_ddrc_pmu_module_exit);
531
532MODULE_IMPORT_NS("HISI_PMU");
533MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
534MODULE_LICENSE("GPL v2");
535MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
536MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");