Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2#undef DEBUG
3
4/*
5 * ARM performance counter support.
6 *
7 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 *
10 * This code is based on the sparc64 perf event code, which is in turn based
11 * on the x86 code.
12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
15#include <linux/bitmap.h>
16#include <linux/cpumask.h>
17#include <linux/cpu_pm.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20#include <linux/perf/arm_pmu.h>
21#include <linux/slab.h>
22#include <linux/sched/clock.h>
23#include <linux/spinlock.h>
24#include <linux/irq.h>
25#include <linux/irqdesc.h>
26
27#include <asm/irq_regs.h>
28
29static int armpmu_count_irq_users(const int irq);
30
31struct pmu_irq_ops {
32 void (*enable_pmuirq)(unsigned int irq);
33 void (*disable_pmuirq)(unsigned int irq);
34 void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
35};
36
37static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
38{
39 free_irq(irq, per_cpu_ptr(devid, cpu));
40}
41
42static const struct pmu_irq_ops pmuirq_ops = {
43 .enable_pmuirq = enable_irq,
44 .disable_pmuirq = disable_irq_nosync,
45 .free_pmuirq = armpmu_free_pmuirq
46};
47
48static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
49{
50 free_nmi(irq, per_cpu_ptr(devid, cpu));
51}
52
53static const struct pmu_irq_ops pmunmi_ops = {
54 .enable_pmuirq = enable_nmi,
55 .disable_pmuirq = disable_nmi_nosync,
56 .free_pmuirq = armpmu_free_pmunmi
57};
58
59static void armpmu_enable_percpu_pmuirq(unsigned int irq)
60{
61 enable_percpu_irq(irq, IRQ_TYPE_NONE);
62}
63
64static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
65 void __percpu *devid)
66{
67 if (armpmu_count_irq_users(irq) == 1)
68 free_percpu_irq(irq, devid);
69}
70
71static const struct pmu_irq_ops percpu_pmuirq_ops = {
72 .enable_pmuirq = armpmu_enable_percpu_pmuirq,
73 .disable_pmuirq = disable_percpu_irq,
74 .free_pmuirq = armpmu_free_percpu_pmuirq
75};
76
77static void armpmu_enable_percpu_pmunmi(unsigned int irq)
78{
79 if (!prepare_percpu_nmi(irq))
80 enable_percpu_nmi(irq, IRQ_TYPE_NONE);
81}
82
83static void armpmu_disable_percpu_pmunmi(unsigned int irq)
84{
85 disable_percpu_nmi(irq);
86 teardown_percpu_nmi(irq);
87}
88
89static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
90 void __percpu *devid)
91{
92 if (armpmu_count_irq_users(irq) == 1)
93 free_percpu_nmi(irq, devid);
94}
95
96static const struct pmu_irq_ops percpu_pmunmi_ops = {
97 .enable_pmuirq = armpmu_enable_percpu_pmunmi,
98 .disable_pmuirq = armpmu_disable_percpu_pmunmi,
99 .free_pmuirq = armpmu_free_percpu_pmunmi
100};
101
102static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
103static DEFINE_PER_CPU(int, cpu_irq);
104static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
105
106static bool has_nmi;
107
108static inline u64 arm_pmu_event_max_period(struct perf_event *event)
109{
110 if (event->hw.flags & ARMPMU_EVT_64BIT)
111 return GENMASK_ULL(63, 0);
112 else if (event->hw.flags & ARMPMU_EVT_47BIT)
113 return GENMASK_ULL(46, 0);
114 else
115 return GENMASK_ULL(31, 0);
116}
117
118static int
119armpmu_map_cache_event(const unsigned (*cache_map)
120 [PERF_COUNT_HW_CACHE_MAX]
121 [PERF_COUNT_HW_CACHE_OP_MAX]
122 [PERF_COUNT_HW_CACHE_RESULT_MAX],
123 u64 config)
124{
125 unsigned int cache_type, cache_op, cache_result, ret;
126
127 cache_type = (config >> 0) & 0xff;
128 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
129 return -EINVAL;
130
131 cache_op = (config >> 8) & 0xff;
132 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
133 return -EINVAL;
134
135 cache_result = (config >> 16) & 0xff;
136 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
137 return -EINVAL;
138
139 if (!cache_map)
140 return -ENOENT;
141
142 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
143
144 if (ret == CACHE_OP_UNSUPPORTED)
145 return -ENOENT;
146
147 return ret;
148}
149
150static int
151armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
152{
153 int mapping;
154
155 if (config >= PERF_COUNT_HW_MAX)
156 return -EINVAL;
157
158 if (!event_map)
159 return -ENOENT;
160
161 mapping = (*event_map)[config];
162 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
163}
164
165static int
166armpmu_map_raw_event(u32 raw_event_mask, u64 config)
167{
168 return (int)(config & raw_event_mask);
169}
170
171int
172armpmu_map_event(struct perf_event *event,
173 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
174 const unsigned (*cache_map)
175 [PERF_COUNT_HW_CACHE_MAX]
176 [PERF_COUNT_HW_CACHE_OP_MAX]
177 [PERF_COUNT_HW_CACHE_RESULT_MAX],
178 u32 raw_event_mask)
179{
180 u64 config = event->attr.config;
181 int type = event->attr.type;
182
183 if (type == event->pmu->type)
184 return armpmu_map_raw_event(raw_event_mask, config);
185
186 switch (type) {
187 case PERF_TYPE_HARDWARE:
188 return armpmu_map_hw_event(event_map, config);
189 case PERF_TYPE_HW_CACHE:
190 return armpmu_map_cache_event(cache_map, config);
191 case PERF_TYPE_RAW:
192 return armpmu_map_raw_event(raw_event_mask, config);
193 }
194
195 return -ENOENT;
196}
197
198int armpmu_event_set_period(struct perf_event *event)
199{
200 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
201 struct hw_perf_event *hwc = &event->hw;
202 s64 left = local64_read(&hwc->period_left);
203 s64 period = hwc->sample_period;
204 u64 max_period;
205 int ret = 0;
206
207 max_period = arm_pmu_event_max_period(event);
208 if (unlikely(left <= -period)) {
209 left = period;
210 local64_set(&hwc->period_left, left);
211 hwc->last_period = period;
212 ret = 1;
213 }
214
215 if (unlikely(left <= 0)) {
216 left += period;
217 local64_set(&hwc->period_left, left);
218 hwc->last_period = period;
219 ret = 1;
220 }
221
222 /*
223 * Limit the maximum period to prevent the counter value
224 * from overtaking the one we are about to program. In
225 * effect we are reducing max_period to account for
226 * interrupt latency (and we are being very conservative).
227 */
228 if (left > (max_period >> 1))
229 left = (max_period >> 1);
230
231 local64_set(&hwc->prev_count, (u64)-left);
232
233 armpmu->write_counter(event, (u64)(-left) & max_period);
234
235 perf_event_update_userpage(event);
236
237 return ret;
238}
239
240u64 armpmu_event_update(struct perf_event *event)
241{
242 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
243 struct hw_perf_event *hwc = &event->hw;
244 u64 delta, prev_raw_count, new_raw_count;
245 u64 max_period = arm_pmu_event_max_period(event);
246
247again:
248 prev_raw_count = local64_read(&hwc->prev_count);
249 new_raw_count = armpmu->read_counter(event);
250
251 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
252 new_raw_count) != prev_raw_count)
253 goto again;
254
255 delta = (new_raw_count - prev_raw_count) & max_period;
256
257 local64_add(delta, &event->count);
258 local64_sub(delta, &hwc->period_left);
259
260 return new_raw_count;
261}
262
263static void
264armpmu_read(struct perf_event *event)
265{
266 armpmu_event_update(event);
267}
268
269static void
270armpmu_stop(struct perf_event *event, int flags)
271{
272 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
273 struct hw_perf_event *hwc = &event->hw;
274
275 /*
276 * ARM pmu always has to update the counter, so ignore
277 * PERF_EF_UPDATE, see comments in armpmu_start().
278 */
279 if (!(hwc->state & PERF_HES_STOPPED)) {
280 armpmu->disable(event);
281 armpmu_event_update(event);
282 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
283 }
284}
285
286static void armpmu_start(struct perf_event *event, int flags)
287{
288 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
289 struct hw_perf_event *hwc = &event->hw;
290
291 /*
292 * ARM pmu always has to reprogram the period, so ignore
293 * PERF_EF_RELOAD, see the comment below.
294 */
295 if (flags & PERF_EF_RELOAD)
296 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
297
298 hwc->state = 0;
299 /*
300 * Set the period again. Some counters can't be stopped, so when we
301 * were stopped we simply disabled the IRQ source and the counter
302 * may have been left counting. If we don't do this step then we may
303 * get an interrupt too soon or *way* too late if the overflow has
304 * happened since disabling.
305 */
306 armpmu_event_set_period(event);
307 armpmu->enable(event);
308}
309
310static void
311armpmu_del(struct perf_event *event, int flags)
312{
313 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
314 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
315 struct hw_perf_event *hwc = &event->hw;
316 int idx = hwc->idx;
317
318 armpmu_stop(event, PERF_EF_UPDATE);
319 hw_events->events[idx] = NULL;
320 armpmu->clear_event_idx(hw_events, event);
321 perf_event_update_userpage(event);
322 /* Clear the allocated counter */
323 hwc->idx = -1;
324}
325
326static int
327armpmu_add(struct perf_event *event, int flags)
328{
329 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
330 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
331 struct hw_perf_event *hwc = &event->hw;
332 int idx;
333
334 /* An event following a process won't be stopped earlier */
335 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
336 return -ENOENT;
337
338 /* If we don't have a space for the counter then finish early. */
339 idx = armpmu->get_event_idx(hw_events, event);
340 if (idx < 0)
341 return idx;
342
343 /*
344 * If there is an event in the counter we are going to use then make
345 * sure it is disabled.
346 */
347 event->hw.idx = idx;
348 armpmu->disable(event);
349 hw_events->events[idx] = event;
350
351 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
352 if (flags & PERF_EF_START)
353 armpmu_start(event, PERF_EF_RELOAD);
354
355 /* Propagate our changes to the userspace mapping. */
356 perf_event_update_userpage(event);
357
358 return 0;
359}
360
361static int
362validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
363 struct perf_event *event)
364{
365 struct arm_pmu *armpmu;
366
367 if (is_software_event(event))
368 return 1;
369
370 /*
371 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
372 * core perf code won't check that the pmu->ctx == leader->ctx
373 * until after pmu->event_init(event).
374 */
375 if (event->pmu != pmu)
376 return 0;
377
378 if (event->state < PERF_EVENT_STATE_OFF)
379 return 1;
380
381 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
382 return 1;
383
384 armpmu = to_arm_pmu(event->pmu);
385 return armpmu->get_event_idx(hw_events, event) >= 0;
386}
387
388static int
389validate_group(struct perf_event *event)
390{
391 struct perf_event *sibling, *leader = event->group_leader;
392 struct pmu_hw_events fake_pmu;
393
394 /*
395 * Initialise the fake PMU. We only need to populate the
396 * used_mask for the purposes of validation.
397 */
398 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
399
400 if (!validate_event(event->pmu, &fake_pmu, leader))
401 return -EINVAL;
402
403 for_each_sibling_event(sibling, leader) {
404 if (!validate_event(event->pmu, &fake_pmu, sibling))
405 return -EINVAL;
406 }
407
408 if (!validate_event(event->pmu, &fake_pmu, event))
409 return -EINVAL;
410
411 return 0;
412}
413
414static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
415{
416 struct arm_pmu *armpmu;
417 int ret;
418 u64 start_clock, finish_clock;
419
420 /*
421 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
422 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
423 * do any necessary shifting, we just need to perform the first
424 * dereference.
425 */
426 armpmu = *(void **)dev;
427 if (WARN_ON_ONCE(!armpmu))
428 return IRQ_NONE;
429
430 start_clock = sched_clock();
431 ret = armpmu->handle_irq(armpmu);
432 finish_clock = sched_clock();
433
434 perf_sample_event_took(finish_clock - start_clock);
435 return ret;
436}
437
438static int
439__hw_perf_event_init(struct perf_event *event)
440{
441 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
442 struct hw_perf_event *hwc = &event->hw;
443 int mapping;
444
445 hwc->flags = 0;
446 mapping = armpmu->map_event(event);
447
448 if (mapping < 0) {
449 pr_debug("event %x:%llx not supported\n", event->attr.type,
450 event->attr.config);
451 return mapping;
452 }
453
454 /*
455 * We don't assign an index until we actually place the event onto
456 * hardware. Use -1 to signify that we haven't decided where to put it
457 * yet. For SMP systems, each core has it's own PMU so we can't do any
458 * clever allocation or constraints checking at this point.
459 */
460 hwc->idx = -1;
461 hwc->config_base = 0;
462 hwc->config = 0;
463 hwc->event_base = 0;
464
465 /*
466 * Check whether we need to exclude the counter from certain modes.
467 */
468 if (armpmu->set_event_filter &&
469 armpmu->set_event_filter(hwc, &event->attr)) {
470 pr_debug("ARM performance counters do not support "
471 "mode exclusion\n");
472 return -EOPNOTSUPP;
473 }
474
475 /*
476 * Store the event encoding into the config_base field.
477 */
478 hwc->config_base |= (unsigned long)mapping;
479
480 if (!is_sampling_event(event)) {
481 /*
482 * For non-sampling runs, limit the sample_period to half
483 * of the counter width. That way, the new counter value
484 * is far less likely to overtake the previous one unless
485 * you have some serious IRQ latency issues.
486 */
487 hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
488 hwc->last_period = hwc->sample_period;
489 local64_set(&hwc->period_left, hwc->sample_period);
490 }
491
492 if (event->group_leader != event) {
493 if (validate_group(event) != 0)
494 return -EINVAL;
495 }
496
497 return 0;
498}
499
500static int armpmu_event_init(struct perf_event *event)
501{
502 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
503
504 /*
505 * Reject CPU-affine events for CPUs that are of a different class to
506 * that which this PMU handles. Process-following events (where
507 * event->cpu == -1) can be migrated between CPUs, and thus we have to
508 * reject them later (in armpmu_add) if they're scheduled on a
509 * different class of CPU.
510 */
511 if (event->cpu != -1 &&
512 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
513 return -ENOENT;
514
515 /* does not support taken branch sampling */
516 if (has_branch_stack(event))
517 return -EOPNOTSUPP;
518
519 if (armpmu->map_event(event) == -ENOENT)
520 return -ENOENT;
521
522 return __hw_perf_event_init(event);
523}
524
525static void armpmu_enable(struct pmu *pmu)
526{
527 struct arm_pmu *armpmu = to_arm_pmu(pmu);
528 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
529 bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
530
531 /* For task-bound events we may be called on other CPUs */
532 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
533 return;
534
535 if (enabled)
536 armpmu->start(armpmu);
537}
538
539static void armpmu_disable(struct pmu *pmu)
540{
541 struct arm_pmu *armpmu = to_arm_pmu(pmu);
542
543 /* For task-bound events we may be called on other CPUs */
544 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
545 return;
546
547 armpmu->stop(armpmu);
548}
549
550/*
551 * In heterogeneous systems, events are specific to a particular
552 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
553 * the same microarchitecture.
554 */
555static int armpmu_filter_match(struct perf_event *event)
556{
557 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
558 unsigned int cpu = smp_processor_id();
559 int ret;
560
561 ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
562 if (ret && armpmu->filter_match)
563 return armpmu->filter_match(event);
564
565 return ret;
566}
567
568static ssize_t cpus_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
570{
571 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
572 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
573}
574
575static DEVICE_ATTR_RO(cpus);
576
577static struct attribute *armpmu_common_attrs[] = {
578 &dev_attr_cpus.attr,
579 NULL,
580};
581
582static const struct attribute_group armpmu_common_attr_group = {
583 .attrs = armpmu_common_attrs,
584};
585
586static int armpmu_count_irq_users(const int irq)
587{
588 int cpu, count = 0;
589
590 for_each_possible_cpu(cpu) {
591 if (per_cpu(cpu_irq, cpu) == irq)
592 count++;
593 }
594
595 return count;
596}
597
598static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
599{
600 const struct pmu_irq_ops *ops = NULL;
601 int cpu;
602
603 for_each_possible_cpu(cpu) {
604 if (per_cpu(cpu_irq, cpu) != irq)
605 continue;
606
607 ops = per_cpu(cpu_irq_ops, cpu);
608 if (ops)
609 break;
610 }
611
612 return ops;
613}
614
615void armpmu_free_irq(int irq, int cpu)
616{
617 if (per_cpu(cpu_irq, cpu) == 0)
618 return;
619 if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
620 return;
621
622 per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
623
624 per_cpu(cpu_irq, cpu) = 0;
625 per_cpu(cpu_irq_ops, cpu) = NULL;
626}
627
628int armpmu_request_irq(int irq, int cpu)
629{
630 int err = 0;
631 const irq_handler_t handler = armpmu_dispatch_irq;
632 const struct pmu_irq_ops *irq_ops;
633
634 if (!irq)
635 return 0;
636
637 if (!irq_is_percpu_devid(irq)) {
638 unsigned long irq_flags;
639
640 err = irq_force_affinity(irq, cpumask_of(cpu));
641
642 if (err && num_possible_cpus() > 1) {
643 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
644 irq, cpu);
645 goto err_out;
646 }
647
648 irq_flags = IRQF_PERCPU |
649 IRQF_NOBALANCING | IRQF_NO_AUTOEN |
650 IRQF_NO_THREAD;
651
652 err = request_nmi(irq, handler, irq_flags, "arm-pmu",
653 per_cpu_ptr(&cpu_armpmu, cpu));
654
655 /* If cannot get an NMI, get a normal interrupt */
656 if (err) {
657 err = request_irq(irq, handler, irq_flags, "arm-pmu",
658 per_cpu_ptr(&cpu_armpmu, cpu));
659 irq_ops = &pmuirq_ops;
660 } else {
661 has_nmi = true;
662 irq_ops = &pmunmi_ops;
663 }
664 } else if (armpmu_count_irq_users(irq) == 0) {
665 err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
666
667 /* If cannot get an NMI, get a normal interrupt */
668 if (err) {
669 err = request_percpu_irq(irq, handler, "arm-pmu",
670 &cpu_armpmu);
671 irq_ops = &percpu_pmuirq_ops;
672 } else {
673 has_nmi = true;
674 irq_ops = &percpu_pmunmi_ops;
675 }
676 } else {
677 /* Per cpudevid irq was already requested by another CPU */
678 irq_ops = armpmu_find_irq_ops(irq);
679
680 if (WARN_ON(!irq_ops))
681 err = -EINVAL;
682 }
683
684 if (err)
685 goto err_out;
686
687 per_cpu(cpu_irq, cpu) = irq;
688 per_cpu(cpu_irq_ops, cpu) = irq_ops;
689 return 0;
690
691err_out:
692 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
693 return err;
694}
695
696static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
697{
698 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
699 return per_cpu(hw_events->irq, cpu);
700}
701
702/*
703 * PMU hardware loses all context when a CPU goes offline.
704 * When a CPU is hotplugged back in, since some hardware registers are
705 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
706 * junk values out of them.
707 */
708static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
709{
710 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
711 int irq;
712
713 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
714 return 0;
715 if (pmu->reset)
716 pmu->reset(pmu);
717
718 per_cpu(cpu_armpmu, cpu) = pmu;
719
720 irq = armpmu_get_cpu_irq(pmu, cpu);
721 if (irq)
722 per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
723
724 return 0;
725}
726
727static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
728{
729 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
730 int irq;
731
732 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
733 return 0;
734
735 irq = armpmu_get_cpu_irq(pmu, cpu);
736 if (irq)
737 per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
738
739 per_cpu(cpu_armpmu, cpu) = NULL;
740
741 return 0;
742}
743
744#ifdef CONFIG_CPU_PM
745static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
746{
747 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
748 struct perf_event *event;
749 int idx;
750
751 for (idx = 0; idx < armpmu->num_events; idx++) {
752 event = hw_events->events[idx];
753 if (!event)
754 continue;
755
756 switch (cmd) {
757 case CPU_PM_ENTER:
758 /*
759 * Stop and update the counter
760 */
761 armpmu_stop(event, PERF_EF_UPDATE);
762 break;
763 case CPU_PM_EXIT:
764 case CPU_PM_ENTER_FAILED:
765 /*
766 * Restore and enable the counter.
767 * armpmu_start() indirectly calls
768 *
769 * perf_event_update_userpage()
770 *
771 * that requires RCU read locking to be functional,
772 * wrap the call within RCU_NONIDLE to make the
773 * RCU subsystem aware this cpu is not idle from
774 * an RCU perspective for the armpmu_start() call
775 * duration.
776 */
777 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
778 break;
779 default:
780 break;
781 }
782 }
783}
784
785static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
786 void *v)
787{
788 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
789 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
790 bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
791
792 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
793 return NOTIFY_DONE;
794
795 /*
796 * Always reset the PMU registers on power-up even if
797 * there are no events running.
798 */
799 if (cmd == CPU_PM_EXIT && armpmu->reset)
800 armpmu->reset(armpmu);
801
802 if (!enabled)
803 return NOTIFY_OK;
804
805 switch (cmd) {
806 case CPU_PM_ENTER:
807 armpmu->stop(armpmu);
808 cpu_pm_pmu_setup(armpmu, cmd);
809 break;
810 case CPU_PM_EXIT:
811 case CPU_PM_ENTER_FAILED:
812 cpu_pm_pmu_setup(armpmu, cmd);
813 armpmu->start(armpmu);
814 break;
815 default:
816 return NOTIFY_DONE;
817 }
818
819 return NOTIFY_OK;
820}
821
822static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
823{
824 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
825 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
826}
827
828static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
829{
830 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
831}
832#else
833static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
834static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
835#endif
836
837static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
838{
839 int err;
840
841 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
842 &cpu_pmu->node);
843 if (err)
844 goto out;
845
846 err = cpu_pm_pmu_register(cpu_pmu);
847 if (err)
848 goto out_unregister;
849
850 return 0;
851
852out_unregister:
853 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
854 &cpu_pmu->node);
855out:
856 return err;
857}
858
859static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
860{
861 cpu_pm_pmu_unregister(cpu_pmu);
862 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
863 &cpu_pmu->node);
864}
865
866static struct arm_pmu *__armpmu_alloc(gfp_t flags)
867{
868 struct arm_pmu *pmu;
869 int cpu;
870
871 pmu = kzalloc(sizeof(*pmu), flags);
872 if (!pmu)
873 goto out;
874
875 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
876 if (!pmu->hw_events) {
877 pr_info("failed to allocate per-cpu PMU data.\n");
878 goto out_free_pmu;
879 }
880
881 pmu->pmu = (struct pmu) {
882 .pmu_enable = armpmu_enable,
883 .pmu_disable = armpmu_disable,
884 .event_init = armpmu_event_init,
885 .add = armpmu_add,
886 .del = armpmu_del,
887 .start = armpmu_start,
888 .stop = armpmu_stop,
889 .read = armpmu_read,
890 .filter_match = armpmu_filter_match,
891 .attr_groups = pmu->attr_groups,
892 /*
893 * This is a CPU PMU potentially in a heterogeneous
894 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
895 * and we have taken ctx sharing into account (e.g. with our
896 * pmu::filter_match callback and pmu::event_init group
897 * validation).
898 */
899 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
900 };
901
902 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
903 &armpmu_common_attr_group;
904
905 for_each_possible_cpu(cpu) {
906 struct pmu_hw_events *events;
907
908 events = per_cpu_ptr(pmu->hw_events, cpu);
909 raw_spin_lock_init(&events->pmu_lock);
910 events->percpu_pmu = pmu;
911 }
912
913 return pmu;
914
915out_free_pmu:
916 kfree(pmu);
917out:
918 return NULL;
919}
920
921struct arm_pmu *armpmu_alloc(void)
922{
923 return __armpmu_alloc(GFP_KERNEL);
924}
925
926struct arm_pmu *armpmu_alloc_atomic(void)
927{
928 return __armpmu_alloc(GFP_ATOMIC);
929}
930
931
932void armpmu_free(struct arm_pmu *pmu)
933{
934 free_percpu(pmu->hw_events);
935 kfree(pmu);
936}
937
938int armpmu_register(struct arm_pmu *pmu)
939{
940 int ret;
941
942 ret = cpu_pmu_init(pmu);
943 if (ret)
944 return ret;
945
946 if (!pmu->set_event_filter)
947 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
948
949 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
950 if (ret)
951 goto out_destroy;
952
953 pr_info("enabled with %s PMU driver, %d counters available%s\n",
954 pmu->name, pmu->num_events,
955 has_nmi ? ", using NMIs" : "");
956
957 kvm_host_pmu_init(pmu);
958
959 return 0;
960
961out_destroy:
962 cpu_pmu_destroy(pmu);
963 return ret;
964}
965
966static int arm_pmu_hp_init(void)
967{
968 int ret;
969
970 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
971 "perf/arm/pmu:starting",
972 arm_perf_starting_cpu,
973 arm_perf_teardown_cpu);
974 if (ret)
975 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
976 ret);
977 return ret;
978}
979subsys_initcall(arm_pmu_hp_init);