Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.2 955 lines 23 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2#undef DEBUG 3 4/* 5 * ARM performance counter support. 6 * 7 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles 8 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> 9 * 10 * This code is based on the sparc64 perf event code, which is in turn based 11 * on the x86 code. 12 */ 13#define pr_fmt(fmt) "hw perfevents: " fmt 14 15#include <linux/bitmap.h> 16#include <linux/cpumask.h> 17#include <linux/cpu_pm.h> 18#include <linux/export.h> 19#include <linux/kernel.h> 20#include <linux/perf/arm_pmu.h> 21#include <linux/slab.h> 22#include <linux/sched/clock.h> 23#include <linux/spinlock.h> 24#include <linux/irq.h> 25#include <linux/irqdesc.h> 26 27#include <asm/irq_regs.h> 28 29static int armpmu_count_irq_users(const int irq); 30 31struct pmu_irq_ops { 32 void (*enable_pmuirq)(unsigned int irq); 33 void (*disable_pmuirq)(unsigned int irq); 34 void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); 35}; 36 37static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) 38{ 39 free_irq(irq, per_cpu_ptr(devid, cpu)); 40} 41 42static const struct pmu_irq_ops pmuirq_ops = { 43 .enable_pmuirq = enable_irq, 44 .disable_pmuirq = disable_irq_nosync, 45 .free_pmuirq = armpmu_free_pmuirq 46}; 47 48static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) 49{ 50 free_nmi(irq, per_cpu_ptr(devid, cpu)); 51} 52 53static const struct pmu_irq_ops pmunmi_ops = { 54 .enable_pmuirq = enable_nmi, 55 .disable_pmuirq = disable_nmi_nosync, 56 .free_pmuirq = armpmu_free_pmunmi 57}; 58 59static void armpmu_enable_percpu_pmuirq(unsigned int irq) 60{ 61 enable_percpu_irq(irq, IRQ_TYPE_NONE); 62} 63 64static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, 65 void __percpu *devid) 66{ 67 if (armpmu_count_irq_users(irq) == 1) 68 free_percpu_irq(irq, devid); 69} 70 71static const struct pmu_irq_ops percpu_pmuirq_ops = { 72 .enable_pmuirq = armpmu_enable_percpu_pmuirq, 73 .disable_pmuirq = disable_percpu_irq, 74 .free_pmuirq = armpmu_free_percpu_pmuirq 75}; 76 77static void armpmu_enable_percpu_pmunmi(unsigned int irq) 78{ 79 if (!prepare_percpu_nmi(irq)) 80 enable_percpu_nmi(irq, IRQ_TYPE_NONE); 81} 82 83static void armpmu_disable_percpu_pmunmi(unsigned int irq) 84{ 85 disable_percpu_nmi(irq); 86 teardown_percpu_nmi(irq); 87} 88 89static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, 90 void __percpu *devid) 91{ 92 if (armpmu_count_irq_users(irq) == 1) 93 free_percpu_nmi(irq, devid); 94} 95 96static const struct pmu_irq_ops percpu_pmunmi_ops = { 97 .enable_pmuirq = armpmu_enable_percpu_pmunmi, 98 .disable_pmuirq = armpmu_disable_percpu_pmunmi, 99 .free_pmuirq = armpmu_free_percpu_pmunmi 100}; 101 102static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); 103static DEFINE_PER_CPU(int, cpu_irq); 104static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); 105 106static bool has_nmi; 107 108static inline u64 arm_pmu_event_max_period(struct perf_event *event) 109{ 110 if (event->hw.flags & ARMPMU_EVT_64BIT) 111 return GENMASK_ULL(63, 0); 112 else if (event->hw.flags & ARMPMU_EVT_47BIT) 113 return GENMASK_ULL(46, 0); 114 else 115 return GENMASK_ULL(31, 0); 116} 117 118static int 119armpmu_map_cache_event(const unsigned (*cache_map) 120 [PERF_COUNT_HW_CACHE_MAX] 121 [PERF_COUNT_HW_CACHE_OP_MAX] 122 [PERF_COUNT_HW_CACHE_RESULT_MAX], 123 u64 config) 124{ 125 unsigned int cache_type, cache_op, cache_result, ret; 126 127 cache_type = (config >> 0) & 0xff; 128 if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 129 return -EINVAL; 130 131 cache_op = (config >> 8) & 0xff; 132 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 133 return -EINVAL; 134 135 cache_result = (config >> 16) & 0xff; 136 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 137 return -EINVAL; 138 139 if (!cache_map) 140 return -ENOENT; 141 142 ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; 143 144 if (ret == CACHE_OP_UNSUPPORTED) 145 return -ENOENT; 146 147 return ret; 148} 149 150static int 151armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 152{ 153 int mapping; 154 155 if (config >= PERF_COUNT_HW_MAX) 156 return -EINVAL; 157 158 if (!event_map) 159 return -ENOENT; 160 161 mapping = (*event_map)[config]; 162 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 163} 164 165static int 166armpmu_map_raw_event(u32 raw_event_mask, u64 config) 167{ 168 return (int)(config & raw_event_mask); 169} 170 171int 172armpmu_map_event(struct perf_event *event, 173 const unsigned (*event_map)[PERF_COUNT_HW_MAX], 174 const unsigned (*cache_map) 175 [PERF_COUNT_HW_CACHE_MAX] 176 [PERF_COUNT_HW_CACHE_OP_MAX] 177 [PERF_COUNT_HW_CACHE_RESULT_MAX], 178 u32 raw_event_mask) 179{ 180 u64 config = event->attr.config; 181 int type = event->attr.type; 182 183 if (type == event->pmu->type) 184 return armpmu_map_raw_event(raw_event_mask, config); 185 186 switch (type) { 187 case PERF_TYPE_HARDWARE: 188 return armpmu_map_hw_event(event_map, config); 189 case PERF_TYPE_HW_CACHE: 190 return armpmu_map_cache_event(cache_map, config); 191 case PERF_TYPE_RAW: 192 return armpmu_map_raw_event(raw_event_mask, config); 193 } 194 195 return -ENOENT; 196} 197 198int armpmu_event_set_period(struct perf_event *event) 199{ 200 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 201 struct hw_perf_event *hwc = &event->hw; 202 s64 left = local64_read(&hwc->period_left); 203 s64 period = hwc->sample_period; 204 u64 max_period; 205 int ret = 0; 206 207 max_period = arm_pmu_event_max_period(event); 208 if (unlikely(left <= -period)) { 209 left = period; 210 local64_set(&hwc->period_left, left); 211 hwc->last_period = period; 212 ret = 1; 213 } 214 215 if (unlikely(left <= 0)) { 216 left += period; 217 local64_set(&hwc->period_left, left); 218 hwc->last_period = period; 219 ret = 1; 220 } 221 222 /* 223 * Limit the maximum period to prevent the counter value 224 * from overtaking the one we are about to program. In 225 * effect we are reducing max_period to account for 226 * interrupt latency (and we are being very conservative). 227 */ 228 if (left > (max_period >> 1)) 229 left = (max_period >> 1); 230 231 local64_set(&hwc->prev_count, (u64)-left); 232 233 armpmu->write_counter(event, (u64)(-left) & max_period); 234 235 perf_event_update_userpage(event); 236 237 return ret; 238} 239 240u64 armpmu_event_update(struct perf_event *event) 241{ 242 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 243 struct hw_perf_event *hwc = &event->hw; 244 u64 delta, prev_raw_count, new_raw_count; 245 u64 max_period = arm_pmu_event_max_period(event); 246 247again: 248 prev_raw_count = local64_read(&hwc->prev_count); 249 new_raw_count = armpmu->read_counter(event); 250 251 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 252 new_raw_count) != prev_raw_count) 253 goto again; 254 255 delta = (new_raw_count - prev_raw_count) & max_period; 256 257 local64_add(delta, &event->count); 258 local64_sub(delta, &hwc->period_left); 259 260 return new_raw_count; 261} 262 263static void 264armpmu_read(struct perf_event *event) 265{ 266 armpmu_event_update(event); 267} 268 269static void 270armpmu_stop(struct perf_event *event, int flags) 271{ 272 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 273 struct hw_perf_event *hwc = &event->hw; 274 275 /* 276 * ARM pmu always has to update the counter, so ignore 277 * PERF_EF_UPDATE, see comments in armpmu_start(). 278 */ 279 if (!(hwc->state & PERF_HES_STOPPED)) { 280 armpmu->disable(event); 281 armpmu_event_update(event); 282 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 283 } 284} 285 286static void armpmu_start(struct perf_event *event, int flags) 287{ 288 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 289 struct hw_perf_event *hwc = &event->hw; 290 291 /* 292 * ARM pmu always has to reprogram the period, so ignore 293 * PERF_EF_RELOAD, see the comment below. 294 */ 295 if (flags & PERF_EF_RELOAD) 296 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 297 298 hwc->state = 0; 299 /* 300 * Set the period again. Some counters can't be stopped, so when we 301 * were stopped we simply disabled the IRQ source and the counter 302 * may have been left counting. If we don't do this step then we may 303 * get an interrupt too soon or *way* too late if the overflow has 304 * happened since disabling. 305 */ 306 armpmu_event_set_period(event); 307 armpmu->enable(event); 308} 309 310static void 311armpmu_del(struct perf_event *event, int flags) 312{ 313 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 314 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 315 struct hw_perf_event *hwc = &event->hw; 316 int idx = hwc->idx; 317 318 armpmu_stop(event, PERF_EF_UPDATE); 319 hw_events->events[idx] = NULL; 320 armpmu->clear_event_idx(hw_events, event); 321 perf_event_update_userpage(event); 322 /* Clear the allocated counter */ 323 hwc->idx = -1; 324} 325 326static int 327armpmu_add(struct perf_event *event, int flags) 328{ 329 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 330 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 331 struct hw_perf_event *hwc = &event->hw; 332 int idx; 333 334 /* An event following a process won't be stopped earlier */ 335 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 336 return -ENOENT; 337 338 /* If we don't have a space for the counter then finish early. */ 339 idx = armpmu->get_event_idx(hw_events, event); 340 if (idx < 0) 341 return idx; 342 343 /* 344 * If there is an event in the counter we are going to use then make 345 * sure it is disabled. 346 */ 347 event->hw.idx = idx; 348 armpmu->disable(event); 349 hw_events->events[idx] = event; 350 351 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 352 if (flags & PERF_EF_START) 353 armpmu_start(event, PERF_EF_RELOAD); 354 355 /* Propagate our changes to the userspace mapping. */ 356 perf_event_update_userpage(event); 357 358 return 0; 359} 360 361static int 362validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, 363 struct perf_event *event) 364{ 365 struct arm_pmu *armpmu; 366 367 if (is_software_event(event)) 368 return 1; 369 370 /* 371 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The 372 * core perf code won't check that the pmu->ctx == leader->ctx 373 * until after pmu->event_init(event). 374 */ 375 if (event->pmu != pmu) 376 return 0; 377 378 if (event->state < PERF_EVENT_STATE_OFF) 379 return 1; 380 381 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 382 return 1; 383 384 armpmu = to_arm_pmu(event->pmu); 385 return armpmu->get_event_idx(hw_events, event) >= 0; 386} 387 388static int 389validate_group(struct perf_event *event) 390{ 391 struct perf_event *sibling, *leader = event->group_leader; 392 struct pmu_hw_events fake_pmu; 393 394 /* 395 * Initialise the fake PMU. We only need to populate the 396 * used_mask for the purposes of validation. 397 */ 398 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 399 400 if (!validate_event(event->pmu, &fake_pmu, leader)) 401 return -EINVAL; 402 403 if (event == leader) 404 return 0; 405 406 for_each_sibling_event(sibling, leader) { 407 if (!validate_event(event->pmu, &fake_pmu, sibling)) 408 return -EINVAL; 409 } 410 411 if (!validate_event(event->pmu, &fake_pmu, event)) 412 return -EINVAL; 413 414 return 0; 415} 416 417static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) 418{ 419 struct arm_pmu *armpmu; 420 int ret; 421 u64 start_clock, finish_clock; 422 423 /* 424 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but 425 * the handlers expect a struct arm_pmu*. The percpu_irq framework will 426 * do any necessary shifting, we just need to perform the first 427 * dereference. 428 */ 429 armpmu = *(void **)dev; 430 if (WARN_ON_ONCE(!armpmu)) 431 return IRQ_NONE; 432 433 start_clock = sched_clock(); 434 ret = armpmu->handle_irq(armpmu); 435 finish_clock = sched_clock(); 436 437 perf_sample_event_took(finish_clock - start_clock); 438 return ret; 439} 440 441static int 442__hw_perf_event_init(struct perf_event *event) 443{ 444 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 445 struct hw_perf_event *hwc = &event->hw; 446 int mapping; 447 448 hwc->flags = 0; 449 mapping = armpmu->map_event(event); 450 451 if (mapping < 0) { 452 pr_debug("event %x:%llx not supported\n", event->attr.type, 453 event->attr.config); 454 return mapping; 455 } 456 457 /* 458 * We don't assign an index until we actually place the event onto 459 * hardware. Use -1 to signify that we haven't decided where to put it 460 * yet. For SMP systems, each core has it's own PMU so we can't do any 461 * clever allocation or constraints checking at this point. 462 */ 463 hwc->idx = -1; 464 hwc->config_base = 0; 465 hwc->config = 0; 466 hwc->event_base = 0; 467 468 /* 469 * Check whether we need to exclude the counter from certain modes. 470 */ 471 if (armpmu->set_event_filter && 472 armpmu->set_event_filter(hwc, &event->attr)) { 473 pr_debug("ARM performance counters do not support " 474 "mode exclusion\n"); 475 return -EOPNOTSUPP; 476 } 477 478 /* 479 * Store the event encoding into the config_base field. 480 */ 481 hwc->config_base |= (unsigned long)mapping; 482 483 if (!is_sampling_event(event)) { 484 /* 485 * For non-sampling runs, limit the sample_period to half 486 * of the counter width. That way, the new counter value 487 * is far less likely to overtake the previous one unless 488 * you have some serious IRQ latency issues. 489 */ 490 hwc->sample_period = arm_pmu_event_max_period(event) >> 1; 491 hwc->last_period = hwc->sample_period; 492 local64_set(&hwc->period_left, hwc->sample_period); 493 } 494 495 return validate_group(event); 496} 497 498static int armpmu_event_init(struct perf_event *event) 499{ 500 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 501 502 /* 503 * Reject CPU-affine events for CPUs that are of a different class to 504 * that which this PMU handles. Process-following events (where 505 * event->cpu == -1) can be migrated between CPUs, and thus we have to 506 * reject them later (in armpmu_add) if they're scheduled on a 507 * different class of CPU. 508 */ 509 if (event->cpu != -1 && 510 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) 511 return -ENOENT; 512 513 /* does not support taken branch sampling */ 514 if (has_branch_stack(event)) 515 return -EOPNOTSUPP; 516 517 return __hw_perf_event_init(event); 518} 519 520static void armpmu_enable(struct pmu *pmu) 521{ 522 struct arm_pmu *armpmu = to_arm_pmu(pmu); 523 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 524 bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); 525 526 /* For task-bound events we may be called on other CPUs */ 527 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 528 return; 529 530 if (enabled) 531 armpmu->start(armpmu); 532} 533 534static void armpmu_disable(struct pmu *pmu) 535{ 536 struct arm_pmu *armpmu = to_arm_pmu(pmu); 537 538 /* For task-bound events we may be called on other CPUs */ 539 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 540 return; 541 542 armpmu->stop(armpmu); 543} 544 545/* 546 * In heterogeneous systems, events are specific to a particular 547 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of 548 * the same microarchitecture. 549 */ 550static bool armpmu_filter(struct pmu *pmu, int cpu) 551{ 552 struct arm_pmu *armpmu = to_arm_pmu(pmu); 553 return !cpumask_test_cpu(cpu, &armpmu->supported_cpus); 554} 555 556static ssize_t cpus_show(struct device *dev, 557 struct device_attribute *attr, char *buf) 558{ 559 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); 560 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); 561} 562 563static DEVICE_ATTR_RO(cpus); 564 565static struct attribute *armpmu_common_attrs[] = { 566 &dev_attr_cpus.attr, 567 NULL, 568}; 569 570static const struct attribute_group armpmu_common_attr_group = { 571 .attrs = armpmu_common_attrs, 572}; 573 574static int armpmu_count_irq_users(const int irq) 575{ 576 int cpu, count = 0; 577 578 for_each_possible_cpu(cpu) { 579 if (per_cpu(cpu_irq, cpu) == irq) 580 count++; 581 } 582 583 return count; 584} 585 586static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) 587{ 588 const struct pmu_irq_ops *ops = NULL; 589 int cpu; 590 591 for_each_possible_cpu(cpu) { 592 if (per_cpu(cpu_irq, cpu) != irq) 593 continue; 594 595 ops = per_cpu(cpu_irq_ops, cpu); 596 if (ops) 597 break; 598 } 599 600 return ops; 601} 602 603void armpmu_free_irq(int irq, int cpu) 604{ 605 if (per_cpu(cpu_irq, cpu) == 0) 606 return; 607 if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) 608 return; 609 610 per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); 611 612 per_cpu(cpu_irq, cpu) = 0; 613 per_cpu(cpu_irq_ops, cpu) = NULL; 614} 615 616int armpmu_request_irq(int irq, int cpu) 617{ 618 int err = 0; 619 const irq_handler_t handler = armpmu_dispatch_irq; 620 const struct pmu_irq_ops *irq_ops; 621 622 if (!irq) 623 return 0; 624 625 if (!irq_is_percpu_devid(irq)) { 626 unsigned long irq_flags; 627 628 err = irq_force_affinity(irq, cpumask_of(cpu)); 629 630 if (err && num_possible_cpus() > 1) { 631 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", 632 irq, cpu); 633 goto err_out; 634 } 635 636 irq_flags = IRQF_PERCPU | 637 IRQF_NOBALANCING | IRQF_NO_AUTOEN | 638 IRQF_NO_THREAD; 639 640 err = request_nmi(irq, handler, irq_flags, "arm-pmu", 641 per_cpu_ptr(&cpu_armpmu, cpu)); 642 643 /* If cannot get an NMI, get a normal interrupt */ 644 if (err) { 645 err = request_irq(irq, handler, irq_flags, "arm-pmu", 646 per_cpu_ptr(&cpu_armpmu, cpu)); 647 irq_ops = &pmuirq_ops; 648 } else { 649 has_nmi = true; 650 irq_ops = &pmunmi_ops; 651 } 652 } else if (armpmu_count_irq_users(irq) == 0) { 653 err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); 654 655 /* If cannot get an NMI, get a normal interrupt */ 656 if (err) { 657 err = request_percpu_irq(irq, handler, "arm-pmu", 658 &cpu_armpmu); 659 irq_ops = &percpu_pmuirq_ops; 660 } else { 661 has_nmi = true; 662 irq_ops = &percpu_pmunmi_ops; 663 } 664 } else { 665 /* Per cpudevid irq was already requested by another CPU */ 666 irq_ops = armpmu_find_irq_ops(irq); 667 668 if (WARN_ON(!irq_ops)) 669 err = -EINVAL; 670 } 671 672 if (err) 673 goto err_out; 674 675 per_cpu(cpu_irq, cpu) = irq; 676 per_cpu(cpu_irq_ops, cpu) = irq_ops; 677 return 0; 678 679err_out: 680 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); 681 return err; 682} 683 684static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) 685{ 686 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; 687 return per_cpu(hw_events->irq, cpu); 688} 689 690/* 691 * PMU hardware loses all context when a CPU goes offline. 692 * When a CPU is hotplugged back in, since some hardware registers are 693 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 694 * junk values out of them. 695 */ 696static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) 697{ 698 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); 699 int irq; 700 701 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 702 return 0; 703 if (pmu->reset) 704 pmu->reset(pmu); 705 706 per_cpu(cpu_armpmu, cpu) = pmu; 707 708 irq = armpmu_get_cpu_irq(pmu, cpu); 709 if (irq) 710 per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); 711 712 return 0; 713} 714 715static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) 716{ 717 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); 718 int irq; 719 720 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) 721 return 0; 722 723 irq = armpmu_get_cpu_irq(pmu, cpu); 724 if (irq) 725 per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); 726 727 per_cpu(cpu_armpmu, cpu) = NULL; 728 729 return 0; 730} 731 732#ifdef CONFIG_CPU_PM 733static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) 734{ 735 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 736 struct perf_event *event; 737 int idx; 738 739 for (idx = 0; idx < armpmu->num_events; idx++) { 740 event = hw_events->events[idx]; 741 if (!event) 742 continue; 743 744 switch (cmd) { 745 case CPU_PM_ENTER: 746 /* 747 * Stop and update the counter 748 */ 749 armpmu_stop(event, PERF_EF_UPDATE); 750 break; 751 case CPU_PM_EXIT: 752 case CPU_PM_ENTER_FAILED: 753 /* 754 * Restore and enable the counter. 755 * armpmu_start() indirectly calls 756 * 757 * perf_event_update_userpage() 758 * 759 * that requires RCU read locking to be functional, 760 * wrap the call within RCU_NONIDLE to make the 761 * RCU subsystem aware this cpu is not idle from 762 * an RCU perspective for the armpmu_start() call 763 * duration. 764 */ 765 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); 766 break; 767 default: 768 break; 769 } 770 } 771} 772 773static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, 774 void *v) 775{ 776 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); 777 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); 778 bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events); 779 780 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) 781 return NOTIFY_DONE; 782 783 /* 784 * Always reset the PMU registers on power-up even if 785 * there are no events running. 786 */ 787 if (cmd == CPU_PM_EXIT && armpmu->reset) 788 armpmu->reset(armpmu); 789 790 if (!enabled) 791 return NOTIFY_OK; 792 793 switch (cmd) { 794 case CPU_PM_ENTER: 795 armpmu->stop(armpmu); 796 cpu_pm_pmu_setup(armpmu, cmd); 797 break; 798 case CPU_PM_EXIT: 799 case CPU_PM_ENTER_FAILED: 800 cpu_pm_pmu_setup(armpmu, cmd); 801 armpmu->start(armpmu); 802 break; 803 default: 804 return NOTIFY_DONE; 805 } 806 807 return NOTIFY_OK; 808} 809 810static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) 811{ 812 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; 813 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); 814} 815 816static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) 817{ 818 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); 819} 820#else 821static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } 822static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } 823#endif 824 825static int cpu_pmu_init(struct arm_pmu *cpu_pmu) 826{ 827 int err; 828 829 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, 830 &cpu_pmu->node); 831 if (err) 832 goto out; 833 834 err = cpu_pm_pmu_register(cpu_pmu); 835 if (err) 836 goto out_unregister; 837 838 return 0; 839 840out_unregister: 841 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, 842 &cpu_pmu->node); 843out: 844 return err; 845} 846 847static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 848{ 849 cpu_pm_pmu_unregister(cpu_pmu); 850 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, 851 &cpu_pmu->node); 852} 853 854struct arm_pmu *armpmu_alloc(void) 855{ 856 struct arm_pmu *pmu; 857 int cpu; 858 859 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); 860 if (!pmu) 861 goto out; 862 863 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL); 864 if (!pmu->hw_events) { 865 pr_info("failed to allocate per-cpu PMU data.\n"); 866 goto out_free_pmu; 867 } 868 869 pmu->pmu = (struct pmu) { 870 .pmu_enable = armpmu_enable, 871 .pmu_disable = armpmu_disable, 872 .event_init = armpmu_event_init, 873 .add = armpmu_add, 874 .del = armpmu_del, 875 .start = armpmu_start, 876 .stop = armpmu_stop, 877 .read = armpmu_read, 878 .filter = armpmu_filter, 879 .attr_groups = pmu->attr_groups, 880 /* 881 * This is a CPU PMU potentially in a heterogeneous 882 * configuration (e.g. big.LITTLE). This is not an uncore PMU, 883 * and we have taken ctx sharing into account (e.g. with our 884 * pmu::filter callback and pmu::event_init group validation). 885 */ 886 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS, 887 }; 888 889 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = 890 &armpmu_common_attr_group; 891 892 for_each_possible_cpu(cpu) { 893 struct pmu_hw_events *events; 894 895 events = per_cpu_ptr(pmu->hw_events, cpu); 896 raw_spin_lock_init(&events->pmu_lock); 897 events->percpu_pmu = pmu; 898 } 899 900 return pmu; 901 902out_free_pmu: 903 kfree(pmu); 904out: 905 return NULL; 906} 907 908void armpmu_free(struct arm_pmu *pmu) 909{ 910 free_percpu(pmu->hw_events); 911 kfree(pmu); 912} 913 914int armpmu_register(struct arm_pmu *pmu) 915{ 916 int ret; 917 918 ret = cpu_pmu_init(pmu); 919 if (ret) 920 return ret; 921 922 if (!pmu->set_event_filter) 923 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; 924 925 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); 926 if (ret) 927 goto out_destroy; 928 929 pr_info("enabled with %s PMU driver, %d counters available%s\n", 930 pmu->name, pmu->num_events, 931 has_nmi ? ", using NMIs" : ""); 932 933 kvm_host_pmu_init(pmu); 934 935 return 0; 936 937out_destroy: 938 cpu_pmu_destroy(pmu); 939 return ret; 940} 941 942static int arm_pmu_hp_init(void) 943{ 944 int ret; 945 946 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, 947 "perf/arm/pmu:starting", 948 arm_perf_starting_cpu, 949 arm_perf_teardown_cpu); 950 if (ret) 951 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", 952 ret); 953 return ret; 954} 955subsys_initcall(arm_pmu_hp_init);