Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/arm_pmuv3: Add PMUv3.9 per counter EL0 access control

Armv8.9/9.4 PMUv3.9 adds per counter EL0 access controls. Per counter
access is enabled with the UEN bit in PMUSERENR_EL1 register. Individual
counters are enabled/disabled in the PMUACR_EL1 register. When UEN is
set, the CR/ER bits control EL0 write access and must be set to disable
write access.

With the access controls, the clearing of unused counters can be
skipped.

KVM also configures PMUSERENR_EL1 in order to trap to EL2. UEN does not
need to be set for it since only PMUv3.5 is exposed to guests.

Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
Link: https://lore.kernel.org/r/20241002184326.1105499-1-robh@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Rob Herring (Arm) and committed by
Will Deacon
0bbff9ed 759b5fc6

+44 -10
+6
arch/arm/include/asm/arm_pmuv3.h
··· 231 231 #define ARMV8_PMU_DFR_VER_V3P1 0x4 232 232 #define ARMV8_PMU_DFR_VER_V3P4 0x5 233 233 #define ARMV8_PMU_DFR_VER_V3P5 0x6 234 + #define ARMV8_PMU_DFR_VER_V3P9 0x9 234 235 #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF 235 236 236 237 static inline bool pmuv3_implemented(int pmuver) ··· 248 247 static inline bool is_pmuv3p5(int pmuver) 249 248 { 250 249 return pmuver >= ARMV8_PMU_DFR_VER_V3P5; 250 + } 251 + 252 + static inline bool is_pmuv3p9(int pmuver) 253 + { 254 + return pmuver >= ARMV8_PMU_DFR_VER_V3P9; 251 255 } 252 256 253 257 static inline u64 read_pmceid0(void)
+10
arch/arm64/include/asm/arm_pmuv3.h
··· 152 152 write_sysreg(val, pmuserenr_el0); 153 153 } 154 154 155 + static inline void write_pmuacr(u64 val) 156 + { 157 + write_sysreg_s(val, SYS_PMUACR_EL1); 158 + } 159 + 155 160 static inline u64 read_pmceid0(void) 156 161 { 157 162 return read_sysreg(pmceid0_el0); ··· 181 176 static inline bool is_pmuv3p5(int pmuver) 182 177 { 183 178 return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; 179 + } 180 + 181 + static inline bool is_pmuv3p9(int pmuver) 182 + { 183 + return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P9; 184 184 } 185 185 186 186 #endif
+8
arch/arm64/tools/sysreg
··· 1238 1238 0b0110 V3P5 1239 1239 0b0111 V3P7 1240 1240 0b1000 V3P8 1241 + 0b1001 V3P9 1241 1242 0b1111 IMP_DEF 1242 1243 EndEnum 1243 1244 UnsignedEnum 7:4 TraceVer ··· 2177 2176 Field 5 F 2178 2177 Field 4 P 2179 2178 Field 3:0 ALIGN 2179 + EndSysreg 2180 + 2181 + Sysreg PMUACR_EL1 3 0 9 14 4 2182 + Res0 63:33 2183 + Field 32 F0 2184 + Field 31 C 2185 + Field 30:0 P 2180 2186 EndSysreg 2181 2187 2182 2188 Sysreg PMSELR_EL0 3 3 9 12 5
+19 -10
drivers/perf/arm_pmuv3.c
··· 770 770 int i; 771 771 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); 772 772 773 - /* Clear any unused counters to avoid leaking their contents */ 774 - for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask, 775 - ARMPMU_MAX_HWEVENTS) { 776 - if (i == ARMV8_PMU_CYCLE_IDX) 777 - write_pmccntr(0); 778 - else if (i == ARMV8_PMU_INSTR_IDX) 779 - write_pmicntr(0); 780 - else 781 - armv8pmu_write_evcntr(i, 0); 773 + if (is_pmuv3p9(cpu_pmu->pmuver)) { 774 + u64 mask = 0; 775 + for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) { 776 + if (armv8pmu_event_has_user_read(cpuc->events[i])) 777 + mask |= BIT(i); 778 + } 779 + write_pmuacr(mask); 780 + } else { 781 + /* Clear any unused counters to avoid leaking their contents */ 782 + for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask, 783 + ARMPMU_MAX_HWEVENTS) { 784 + if (i == ARMV8_PMU_CYCLE_IDX) 785 + write_pmccntr(0); 786 + else if (i == ARMV8_PMU_INSTR_IDX) 787 + write_pmicntr(0); 788 + else 789 + armv8pmu_write_evcntr(i, 0); 790 + } 782 791 } 783 792 784 - update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); 793 + update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_UEN); 785 794 } 786 795 787 796 static void armv8pmu_enable_event(struct perf_event *event)
+1
include/linux/perf/arm_pmuv3.h
··· 257 257 #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ 258 258 #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ 259 259 #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ 260 + #define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */ 260 261 /* Mask for writable bits */ 261 262 #define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \ 262 263 ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)