Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: perf: Abstract system register accesses away

As we want to enable 32bit support, we need to distanciate the
PMUv3 driver from the AArch64 system register names.

This patch moves all system register accesses to an architecture
specific include file, allowing the 32bit counterpart to be
slotted in at a later time.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Co-developed-by: Zaid Al-Bassam <zalbassam@google.com>
Signed-off-by: Zaid Al-Bassam <zalbassam@google.com>
Tested-by: Florian Fainelli <f.fainelli@gmail.com>
Link: https://lore.kernel.org/r/20230317195027.3746949-3-zalbassam@google.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Marc Zyngier and committed by
Will Deacon
df29ddf4 7755cec6

+205 -92
+137
arch/arm64/include/asm/arm_pmuv3.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + */ 5 + 6 + #ifndef __ASM_PMUV3_H 7 + #define __ASM_PMUV3_H 8 + 9 + #include <asm/cpufeature.h> 10 + #include <asm/sysreg.h> 11 + 12 + #define RETURN_READ_PMEVCNTRN(n) \ 13 + return read_sysreg(pmevcntr##n##_el0) 14 + static unsigned long read_pmevcntrn(int n) 15 + { 16 + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); 17 + return 0; 18 + } 19 + 20 + #define WRITE_PMEVCNTRN(n) \ 21 + write_sysreg(val, pmevcntr##n##_el0) 22 + static void write_pmevcntrn(int n, unsigned long val) 23 + { 24 + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); 25 + } 26 + 27 + #define WRITE_PMEVTYPERN(n) \ 28 + write_sysreg(val, pmevtyper##n##_el0) 29 + static void write_pmevtypern(int n, unsigned long val) 30 + { 31 + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); 32 + } 33 + 34 + static inline unsigned long read_pmmir(void) 35 + { 36 + return read_cpuid(PMMIR_EL1); 37 + } 38 + 39 + static inline u32 read_pmuver(void) 40 + { 41 + u64 dfr0 = read_sysreg(id_aa64dfr0_el1); 42 + 43 + return cpuid_feature_extract_unsigned_field(dfr0, 44 + ID_AA64DFR0_EL1_PMUVer_SHIFT); 45 + } 46 + 47 + static inline void write_pmcr(u32 val) 48 + { 49 + write_sysreg(val, pmcr_el0); 50 + } 51 + 52 + static inline u32 read_pmcr(void) 53 + { 54 + return read_sysreg(pmcr_el0); 55 + } 56 + 57 + static inline void write_pmselr(u32 val) 58 + { 59 + write_sysreg(val, pmselr_el0); 60 + } 61 + 62 + static inline void write_pmccntr(u64 val) 63 + { 64 + write_sysreg(val, pmccntr_el0); 65 + } 66 + 67 + static inline u64 read_pmccntr(void) 68 + { 69 + return read_sysreg(pmccntr_el0); 70 + } 71 + 72 + static inline void write_pmxevcntr(u32 val) 73 + { 74 + write_sysreg(val, pmxevcntr_el0); 75 + } 76 + 77 + static inline u32 read_pmxevcntr(void) 78 + { 79 + return read_sysreg(pmxevcntr_el0); 80 + } 81 + 82 + static inline void write_pmxevtyper(u32 val) 83 + { 84 + write_sysreg(val, pmxevtyper_el0); 85 + } 86 + 87 + static inline void write_pmcntenset(u32 val) 88 + { 89 + write_sysreg(val, pmcntenset_el0); 90 + } 91 + 92 + static inline void write_pmcntenclr(u32 val) 93 + { 94 + write_sysreg(val, pmcntenclr_el0); 95 + } 96 + 97 + static inline void write_pmintenset(u32 val) 98 + { 99 + write_sysreg(val, pmintenset_el1); 100 + } 101 + 102 + static inline void write_pmintenclr(u32 val) 103 + { 104 + write_sysreg(val, pmintenclr_el1); 105 + } 106 + 107 + static inline void write_pmccfiltr(u32 val) 108 + { 109 + write_sysreg(val, pmccfiltr_el0); 110 + } 111 + 112 + static inline void write_pmovsclr(u32 val) 113 + { 114 + write_sysreg(val, pmovsclr_el0); 115 + } 116 + 117 + static inline u32 read_pmovsclr(void) 118 + { 119 + return read_sysreg(pmovsclr_el0); 120 + } 121 + 122 + static inline void write_pmuserenr(u32 val) 123 + { 124 + write_sysreg(val, pmuserenr_el0); 125 + } 126 + 127 + static inline u32 read_pmceid0(void) 128 + { 129 + return read_sysreg(pmceid0_el0); 130 + } 131 + 132 + static inline u32 read_pmceid1(void) 133 + { 134 + return read_sysreg(pmceid1_el0); 135 + } 136 + 137 + #endif
+23 -92
drivers/perf/arm_pmuv3.c
··· 10 10 11 11 #include <asm/irq_regs.h> 12 12 #include <asm/perf_event.h> 13 - #include <asm/sysreg.h> 14 13 #include <asm/virt.h> 15 14 16 15 #include <clocksource/arm_arch_timer.h> ··· 23 24 #include <linux/platform_device.h> 24 25 #include <linux/sched_clock.h> 25 26 #include <linux/smp.h> 27 + 28 + #include <asm/arm_pmuv3.h> 26 29 27 30 /* ARMv8 Cortex-A53 specific event types. */ 28 31 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 ··· 426 425 #define ARMV8_IDX_TO_COUNTER(x) \ 427 426 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) 428 427 429 - /* 430 - * This code is really good 431 - */ 432 - 433 - #define PMEVN_CASE(n, case_macro) \ 434 - case n: case_macro(n); break 435 - 436 - #define PMEVN_SWITCH(x, case_macro) \ 437 - do { \ 438 - switch (x) { \ 439 - PMEVN_CASE(0, case_macro); \ 440 - PMEVN_CASE(1, case_macro); \ 441 - PMEVN_CASE(2, case_macro); \ 442 - PMEVN_CASE(3, case_macro); \ 443 - PMEVN_CASE(4, case_macro); \ 444 - PMEVN_CASE(5, case_macro); \ 445 - PMEVN_CASE(6, case_macro); \ 446 - PMEVN_CASE(7, case_macro); \ 447 - PMEVN_CASE(8, case_macro); \ 448 - PMEVN_CASE(9, case_macro); \ 449 - PMEVN_CASE(10, case_macro); \ 450 - PMEVN_CASE(11, case_macro); \ 451 - PMEVN_CASE(12, case_macro); \ 452 - PMEVN_CASE(13, case_macro); \ 453 - PMEVN_CASE(14, case_macro); \ 454 - PMEVN_CASE(15, case_macro); \ 455 - PMEVN_CASE(16, case_macro); \ 456 - PMEVN_CASE(17, case_macro); \ 457 - PMEVN_CASE(18, case_macro); \ 458 - PMEVN_CASE(19, case_macro); \ 459 - PMEVN_CASE(20, case_macro); \ 460 - PMEVN_CASE(21, case_macro); \ 461 - PMEVN_CASE(22, case_macro); \ 462 - PMEVN_CASE(23, case_macro); \ 463 - PMEVN_CASE(24, case_macro); \ 464 - PMEVN_CASE(25, case_macro); \ 465 - PMEVN_CASE(26, case_macro); \ 466 - PMEVN_CASE(27, case_macro); \ 467 - PMEVN_CASE(28, case_macro); \ 468 - PMEVN_CASE(29, case_macro); \ 469 - PMEVN_CASE(30, case_macro); \ 470 - default: WARN(1, "Invalid PMEV* index\n"); \ 471 - } \ 472 - } while (0) 473 - 474 - #define RETURN_READ_PMEVCNTRN(n) \ 475 - return read_sysreg(pmevcntr##n##_el0) 476 - static unsigned long read_pmevcntrn(int n) 477 - { 478 - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); 479 - return 0; 480 - } 481 - 482 - #define WRITE_PMEVCNTRN(n) \ 483 - write_sysreg(val, pmevcntr##n##_el0) 484 - static void write_pmevcntrn(int n, unsigned long val) 485 - { 486 - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); 487 - } 488 - 489 - #define WRITE_PMEVTYPERN(n) \ 490 - write_sysreg(val, pmevtyper##n##_el0) 491 - static void write_pmevtypern(int n, unsigned long val) 492 - { 493 - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); 494 - } 495 - 496 428 static inline u32 armv8pmu_pmcr_read(void) 497 429 { 498 - return read_sysreg(pmcr_el0); 430 + return read_pmcr(); 499 431 } 500 432 501 433 static inline void armv8pmu_pmcr_write(u32 val) 502 434 { 503 435 val &= ARMV8_PMU_PMCR_MASK; 504 436 isb(); 505 - write_sysreg(val, pmcr_el0); 437 + write_pmcr(val); 506 438 } 507 439 508 440 static inline int armv8pmu_has_overflowed(u32 pmovsr) ··· 510 576 u64 value; 511 577 512 578 if (idx == ARMV8_IDX_CYCLE_COUNTER) 513 - value = read_sysreg(pmccntr_el0); 579 + value = read_pmccntr(); 514 580 else 515 581 value = armv8pmu_read_hw_counter(event); 516 582 ··· 545 611 value = armv8pmu_bias_long_counter(event, value); 546 612 547 613 if (idx == ARMV8_IDX_CYCLE_COUNTER) 548 - write_sysreg(value, pmccntr_el0); 614 + write_pmccntr(value); 549 615 else 550 616 armv8pmu_write_hw_counter(event, value); 551 617 } ··· 576 642 armv8pmu_write_evtype(idx, chain_evt); 577 643 } else { 578 644 if (idx == ARMV8_IDX_CYCLE_COUNTER) 579 - write_sysreg(hwc->config_base, pmccfiltr_el0); 645 + write_pmccfiltr(hwc->config_base); 580 646 else 581 647 armv8pmu_write_evtype(idx, hwc->config_base); 582 648 } ··· 599 665 * enable the counter. 600 666 * */ 601 667 isb(); 602 - write_sysreg(mask, pmcntenset_el0); 668 + write_pmcntenset(mask); 603 669 } 604 670 605 671 static inline void armv8pmu_enable_event_counter(struct perf_event *event) ··· 616 682 617 683 static inline void armv8pmu_disable_counter(u32 mask) 618 684 { 619 - write_sysreg(mask, pmcntenclr_el0); 685 + write_pmcntenclr(mask); 620 686 /* 621 687 * Make sure the effects of disabling the counter are visible before we 622 688 * start configuring the event. ··· 638 704 639 705 static inline void armv8pmu_enable_intens(u32 mask) 640 706 { 641 - write_sysreg(mask, pmintenset_el1); 707 + write_pmintenset(mask); 642 708 } 643 709 644 710 static inline void armv8pmu_enable_event_irq(struct perf_event *event) ··· 649 715 650 716 static inline void armv8pmu_disable_intens(u32 mask) 651 717 { 652 - write_sysreg(mask, pmintenclr_el1); 718 + write_pmintenclr(mask); 653 719 isb(); 654 720 /* Clear the overflow flag in case an interrupt is pending. */ 655 - write_sysreg(mask, pmovsclr_el0); 721 + write_pmovsclr(mask); 656 722 isb(); 657 723 } 658 724 ··· 667 733 u32 value; 668 734 669 735 /* Read */ 670 - value = read_sysreg(pmovsclr_el0); 736 + value = read_pmovsclr(); 671 737 672 738 /* Write to clear flags */ 673 739 value &= ARMV8_PMU_OVSR_MASK; 674 - write_sysreg(value, pmovsclr_el0); 740 + write_pmovsclr(value); 675 741 676 742 return value; 677 743 } 678 744 679 745 static void armv8pmu_disable_user_access(void) 680 746 { 681 - write_sysreg(0, pmuserenr_el0); 747 + write_pmuserenr(0); 682 748 } 683 749 684 750 static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) ··· 689 755 /* Clear any unused counters to avoid leaking their contents */ 690 756 for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { 691 757 if (i == ARMV8_IDX_CYCLE_COUNTER) 692 - write_sysreg(0, pmccntr_el0); 758 + write_pmccntr(0); 693 759 else 694 760 armv8pmu_write_evcntr(i, 0); 695 761 } 696 762 697 - write_sysreg(0, pmuserenr_el0); 698 - write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0); 763 + write_pmuserenr(0); 764 + write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); 699 765 } 700 766 701 767 static void armv8pmu_enable_event(struct perf_event *event) ··· 1079 1145 { 1080 1146 struct armv8pmu_probe_info *probe = info; 1081 1147 struct arm_pmu *cpu_pmu = probe->pmu; 1082 - u64 dfr0; 1083 1148 u64 pmceid_raw[2]; 1084 1149 u32 pmceid[2]; 1085 1150 int pmuver; 1086 1151 1087 - dfr0 = read_sysreg(id_aa64dfr0_el1); 1088 - pmuver = cpuid_feature_extract_unsigned_field(dfr0, 1089 - ID_AA64DFR0_EL1_PMUVer_SHIFT); 1152 + pmuver = read_pmuver(); 1090 1153 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || 1091 1154 pmuver == ID_AA64DFR0_EL1_PMUVer_NI) 1092 1155 return; ··· 1098 1167 /* Add the CPU cycles counter */ 1099 1168 cpu_pmu->num_events += 1; 1100 1169 1101 - pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); 1102 - pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); 1170 + pmceid[0] = pmceid_raw[0] = read_pmceid0(); 1171 + pmceid[1] = pmceid_raw[1] = read_pmceid1(); 1103 1172 1104 1173 bitmap_from_arr32(cpu_pmu->pmceid_bitmap, 1105 1174 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); ··· 1110 1179 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, 1111 1180 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); 1112 1181 1113 - /* store PMMIR_EL1 register for sysfs */ 1182 + /* store PMMIR register for sysfs */ 1114 1183 if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31))) 1115 - cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); 1184 + cpu_pmu->reg_pmmir = read_pmmir(); 1116 1185 else 1117 1186 cpu_pmu->reg_pmmir = 0; 1118 1187 }
+45
include/linux/perf/arm_pmuv3.h
··· 255 255 #define ARMV8_PMU_BUS_WIDTH_SHIFT 16 256 256 #define ARMV8_PMU_BUS_WIDTH_MASK 0xf 257 257 258 + /* 259 + * This code is really good 260 + */ 261 + 262 + #define PMEVN_CASE(n, case_macro) \ 263 + case n: case_macro(n); break 264 + 265 + #define PMEVN_SWITCH(x, case_macro) \ 266 + do { \ 267 + switch (x) { \ 268 + PMEVN_CASE(0, case_macro); \ 269 + PMEVN_CASE(1, case_macro); \ 270 + PMEVN_CASE(2, case_macro); \ 271 + PMEVN_CASE(3, case_macro); \ 272 + PMEVN_CASE(4, case_macro); \ 273 + PMEVN_CASE(5, case_macro); \ 274 + PMEVN_CASE(6, case_macro); \ 275 + PMEVN_CASE(7, case_macro); \ 276 + PMEVN_CASE(8, case_macro); \ 277 + PMEVN_CASE(9, case_macro); \ 278 + PMEVN_CASE(10, case_macro); \ 279 + PMEVN_CASE(11, case_macro); \ 280 + PMEVN_CASE(12, case_macro); \ 281 + PMEVN_CASE(13, case_macro); \ 282 + PMEVN_CASE(14, case_macro); \ 283 + PMEVN_CASE(15, case_macro); \ 284 + PMEVN_CASE(16, case_macro); \ 285 + PMEVN_CASE(17, case_macro); \ 286 + PMEVN_CASE(18, case_macro); \ 287 + PMEVN_CASE(19, case_macro); \ 288 + PMEVN_CASE(20, case_macro); \ 289 + PMEVN_CASE(21, case_macro); \ 290 + PMEVN_CASE(22, case_macro); \ 291 + PMEVN_CASE(23, case_macro); \ 292 + PMEVN_CASE(24, case_macro); \ 293 + PMEVN_CASE(25, case_macro); \ 294 + PMEVN_CASE(26, case_macro); \ 295 + PMEVN_CASE(27, case_macro); \ 296 + PMEVN_CASE(28, case_macro); \ 297 + PMEVN_CASE(29, case_macro); \ 298 + PMEVN_CASE(30, case_macro); \ 299 + default: WARN(1, "Invalid PMEV* index\n"); \ 300 + } \ 301 + } while (0) 302 + 258 303 #endif