Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf: add qcom l2 cache perf events driver

Adds perf events support for L2 cache PMU.

The L2 cache PMU driver is named 'l2cache_0' and can be used
with perf events to profile L2 events such as cache hits
and misses on Qualcomm Technologies processors.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Neil Leeder <nleeder@codeaurora.org>
[will: minimise nesting in l2_cache_associate_cpu_with_cluster]
[will: use kstrtoul for unsigned long, remove redunant .owner setting]
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

Neil Leeder and committed by
Will Deacon
21bdbb71 fe0a7ef7

+1062
+38
Documentation/perf/qcom_l2_pmu.txt
··· 1 + Qualcomm Technologies Level-2 Cache Performance Monitoring Unit (PMU) 2 + ===================================================================== 3 + 4 + This driver supports the L2 cache clusters found in Qualcomm Technologies 5 + Centriq SoCs. There are multiple physical L2 cache clusters, each with their 6 + own PMU. Each cluster has one or more CPUs associated with it. 7 + 8 + There is one logical L2 PMU exposed, which aggregates the results from 9 + the physical PMUs. 10 + 11 + The driver provides a description of its available events and configuration 12 + options in sysfs, see /sys/devices/l2cache_0. 13 + 14 + The "format" directory describes the format of the events. 15 + 16 + Events can be envisioned as a 2-dimensional array. Each column represents 17 + a group of events. There are 8 groups. Only one entry from each 18 + group can be in use at a time. If multiple events from the same group 19 + are specified, the conflicting events cannot be counted at the same time. 20 + 21 + Events are specified as 0xCCG, where CC is 2 hex digits specifying 22 + the code (array row) and G specifies the group (column) 0-7. 23 + 24 + In addition there is a cycle counter event specified by the value 0xFE 25 + which is outside the above scheme. 26 + 27 + The driver provides a "cpumask" sysfs attribute which contains a mask 28 + consisting of one CPU per cluster which will be used to handle all the PMU 29 + events on that cluster. 30 + 31 + Examples for use with perf: 32 + 33 + perf stat -e l2cache_0/config=0x001/,l2cache_0/config=0x042/ -a sleep 1 34 + 35 + perf stat -e l2cache_0/config=0xfe/ -C 2 sleep 1 36 + 37 + The driver does not support sampling, therefore "perf record" will 38 + not work. Per-task perf sessions are not supported.
+9
drivers/perf/Kconfig
··· 12 12 Say y if you want to use CPU performance monitors on ARM-based 13 13 systems. 14 14 15 + config QCOM_L2_PMU 16 + bool "Qualcomm Technologies L2-cache PMU" 17 + depends on ARCH_QCOM && ARM64 && PERF_EVENTS && ACPI 18 + help 19 + Provides support for the L2 cache performance monitor unit (PMU) 20 + in Qualcomm Technologies processors. 21 + Adds the L2 cache PMU into the perf events subsystem for 22 + monitoring L2 cache events. 23 + 15 24 config XGENE_PMU 16 25 depends on PERF_EVENTS && ARCH_XGENE 17 26 bool "APM X-Gene SoC PMU"
+1
drivers/perf/Makefile
··· 1 1 obj-$(CONFIG_ARM_PMU) += arm_pmu.o 2 + obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o 2 3 obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
+1013
drivers/perf/qcom_l2_pmu.c
··· 1 + /* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + #include <linux/acpi.h> 13 + #include <linux/bitops.h> 14 + #include <linux/bug.h> 15 + #include <linux/cpuhotplug.h> 16 + #include <linux/cpumask.h> 17 + #include <linux/device.h> 18 + #include <linux/errno.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/irq.h> 21 + #include <linux/kernel.h> 22 + #include <linux/list.h> 23 + #include <linux/percpu.h> 24 + #include <linux/perf_event.h> 25 + #include <linux/platform_device.h> 26 + #include <linux/smp.h> 27 + #include <linux/spinlock.h> 28 + #include <linux/sysfs.h> 29 + #include <linux/types.h> 30 + 31 + #include <asm/barrier.h> 32 + #include <asm/local64.h> 33 + #include <asm/sysreg.h> 34 + 35 + #define MAX_L2_CTRS 9 36 + 37 + #define L2PMCR_NUM_EV_SHIFT 11 38 + #define L2PMCR_NUM_EV_MASK 0x1F 39 + 40 + #define L2PMCR 0x400 41 + #define L2PMCNTENCLR 0x403 42 + #define L2PMCNTENSET 0x404 43 + #define L2PMINTENCLR 0x405 44 + #define L2PMINTENSET 0x406 45 + #define L2PMOVSCLR 0x407 46 + #define L2PMOVSSET 0x408 47 + #define L2PMCCNTCR 0x409 48 + #define L2PMCCNTR 0x40A 49 + #define L2PMCCNTSR 0x40C 50 + #define L2PMRESR 0x410 51 + #define IA_L2PMXEVCNTCR_BASE 0x420 52 + #define IA_L2PMXEVCNTR_BASE 0x421 53 + #define IA_L2PMXEVFILTER_BASE 0x423 54 + #define IA_L2PMXEVTYPER_BASE 0x424 55 + 56 + #define IA_L2_REG_OFFSET 0x10 57 + 58 + #define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000 59 + #define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004 60 + #define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003 61 + 62 + #define L2EVTYPER_REG_SHIFT 3 63 + 64 + #define L2PMRESR_GROUP_BITS 8 65 + #define L2PMRESR_GROUP_MASK GENMASK(7, 0) 66 + 67 + #define L2CYCLE_CTR_BIT 31 68 + #define L2CYCLE_CTR_RAW_CODE 0xFE 69 + 70 + #define L2PMCR_RESET_ALL 0x6 71 + #define L2PMCR_COUNTERS_ENABLE 0x1 72 + #define L2PMCR_COUNTERS_DISABLE 0x0 73 + 74 + #define L2PMRESR_EN BIT_ULL(63) 75 + 76 + #define L2_EVT_MASK 0x00000FFF 77 + #define L2_EVT_CODE_MASK 0x00000FF0 78 + #define L2_EVT_GRP_MASK 0x0000000F 79 + #define L2_EVT_CODE_SHIFT 4 80 + #define L2_EVT_GRP_SHIFT 0 81 + 82 + #define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT) 83 + #define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT) 84 + 85 + #define L2_EVT_GROUP_MAX 7 86 + 87 + #define L2_COUNTER_RELOAD BIT_ULL(31) 88 + #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) 89 + 90 + #define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) 91 + #define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) 92 + 93 + #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) 94 + 95 + static DEFINE_RAW_SPINLOCK(l2_access_lock); 96 + 97 + /** 98 + * set_l2_indirect_reg: write value to an L2 register 99 + * @reg: Address of L2 register. 100 + * @value: Value to be written to register. 101 + * 102 + * Use architecturally required barriers for ordering between system register 103 + * accesses 104 + */ 105 + static void set_l2_indirect_reg(u64 reg, u64 val) 106 + { 107 + unsigned long flags; 108 + 109 + raw_spin_lock_irqsave(&l2_access_lock, flags); 110 + write_sysreg_s(reg, L2CPUSRSELR_EL1); 111 + isb(); 112 + write_sysreg_s(val, L2CPUSRDR_EL1); 113 + isb(); 114 + raw_spin_unlock_irqrestore(&l2_access_lock, flags); 115 + } 116 + 117 + /** 118 + * get_l2_indirect_reg: read an L2 register value 119 + * @reg: Address of L2 register. 120 + * 121 + * Use architecturally required barriers for ordering between system register 122 + * accesses 123 + */ 124 + static u64 get_l2_indirect_reg(u64 reg) 125 + { 126 + u64 val; 127 + unsigned long flags; 128 + 129 + raw_spin_lock_irqsave(&l2_access_lock, flags); 130 + write_sysreg_s(reg, L2CPUSRSELR_EL1); 131 + isb(); 132 + val = read_sysreg_s(L2CPUSRDR_EL1); 133 + raw_spin_unlock_irqrestore(&l2_access_lock, flags); 134 + 135 + return val; 136 + } 137 + 138 + struct cluster_pmu; 139 + 140 + /* 141 + * Aggregate PMU. Implements the core pmu functions and manages 142 + * the hardware PMUs. 143 + */ 144 + struct l2cache_pmu { 145 + struct hlist_node node; 146 + u32 num_pmus; 147 + struct pmu pmu; 148 + int num_counters; 149 + cpumask_t cpumask; 150 + struct platform_device *pdev; 151 + struct cluster_pmu * __percpu *pmu_cluster; 152 + struct list_head clusters; 153 + }; 154 + 155 + /* 156 + * The cache is made up of one or more clusters, each cluster has its own PMU. 157 + * Each cluster is associated with one or more CPUs. 158 + * This structure represents one of the hardware PMUs. 159 + * 160 + * Events can be envisioned as a 2-dimensional array. Each column represents 161 + * a group of events. There are 8 groups. Only one entry from each 162 + * group can be in use at a time. 163 + * 164 + * Events are specified as 0xCCG, where CC is 2 hex digits specifying 165 + * the code (array row) and G specifies the group (column). 166 + * 167 + * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE 168 + * which is outside the above scheme. 169 + */ 170 + struct cluster_pmu { 171 + struct list_head next; 172 + struct perf_event *events[MAX_L2_CTRS]; 173 + struct l2cache_pmu *l2cache_pmu; 174 + DECLARE_BITMAP(used_counters, MAX_L2_CTRS); 175 + DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1); 176 + int irq; 177 + int cluster_id; 178 + /* The CPU that is used for collecting events on this cluster */ 179 + int on_cpu; 180 + /* All the CPUs associated with this cluster */ 181 + cpumask_t cluster_cpus; 182 + spinlock_t pmu_lock; 183 + }; 184 + 185 + #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu)) 186 + 187 + static u32 l2_cycle_ctr_idx; 188 + static u32 l2_counter_present_mask; 189 + 190 + static inline u32 idx_to_reg_bit(u32 idx) 191 + { 192 + if (idx == l2_cycle_ctr_idx) 193 + return BIT(L2CYCLE_CTR_BIT); 194 + 195 + return BIT(idx); 196 + } 197 + 198 + static inline struct cluster_pmu *get_cluster_pmu( 199 + struct l2cache_pmu *l2cache_pmu, int cpu) 200 + { 201 + return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); 202 + } 203 + 204 + static void cluster_pmu_reset(void) 205 + { 206 + /* Reset all counters */ 207 + set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); 208 + set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); 209 + set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); 210 + set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); 211 + } 212 + 213 + static inline void cluster_pmu_enable(void) 214 + { 215 + set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); 216 + } 217 + 218 + static inline void cluster_pmu_disable(void) 219 + { 220 + set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); 221 + } 222 + 223 + static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) 224 + { 225 + if (idx == l2_cycle_ctr_idx) 226 + set_l2_indirect_reg(L2PMCCNTR, value); 227 + else 228 + set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); 229 + } 230 + 231 + static inline u64 cluster_pmu_counter_get_value(u32 idx) 232 + { 233 + u64 value; 234 + 235 + if (idx == l2_cycle_ctr_idx) 236 + value = get_l2_indirect_reg(L2PMCCNTR); 237 + else 238 + value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); 239 + 240 + return value; 241 + } 242 + 243 + static inline void cluster_pmu_counter_enable(u32 idx) 244 + { 245 + set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); 246 + } 247 + 248 + static inline void cluster_pmu_counter_disable(u32 idx) 249 + { 250 + set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); 251 + } 252 + 253 + static inline void cluster_pmu_counter_enable_interrupt(u32 idx) 254 + { 255 + set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); 256 + } 257 + 258 + static inline void cluster_pmu_counter_disable_interrupt(u32 idx) 259 + { 260 + set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); 261 + } 262 + 263 + static inline void cluster_pmu_set_evccntcr(u32 val) 264 + { 265 + set_l2_indirect_reg(L2PMCCNTCR, val); 266 + } 267 + 268 + static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) 269 + { 270 + set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); 271 + } 272 + 273 + static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) 274 + { 275 + set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); 276 + } 277 + 278 + static void cluster_pmu_set_resr(struct cluster_pmu *cluster, 279 + u32 event_group, u32 event_cc) 280 + { 281 + u64 field; 282 + u64 resr_val; 283 + u32 shift; 284 + unsigned long flags; 285 + 286 + shift = L2PMRESR_GROUP_BITS * event_group; 287 + field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift); 288 + 289 + spin_lock_irqsave(&cluster->pmu_lock, flags); 290 + 291 + resr_val = get_l2_indirect_reg(L2PMRESR); 292 + resr_val &= ~(L2PMRESR_GROUP_MASK << shift); 293 + resr_val |= field; 294 + resr_val |= L2PMRESR_EN; 295 + set_l2_indirect_reg(L2PMRESR, resr_val); 296 + 297 + spin_unlock_irqrestore(&cluster->pmu_lock, flags); 298 + } 299 + 300 + /* 301 + * Hardware allows filtering of events based on the originating 302 + * CPU. Turn this off by setting filter bits to allow events from 303 + * all CPUS, subunits and ID independent events in this cluster. 304 + */ 305 + static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr) 306 + { 307 + u32 val = L2PMXEVFILTER_SUFILTER_ALL | 308 + L2PMXEVFILTER_ORGFILTER_IDINDEP | 309 + L2PMXEVFILTER_ORGFILTER_ALL; 310 + 311 + set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); 312 + } 313 + 314 + static inline u32 cluster_pmu_getreset_ovsr(void) 315 + { 316 + u32 result = get_l2_indirect_reg(L2PMOVSSET); 317 + 318 + set_l2_indirect_reg(L2PMOVSCLR, result); 319 + return result; 320 + } 321 + 322 + static inline bool cluster_pmu_has_overflowed(u32 ovsr) 323 + { 324 + return !!(ovsr & l2_counter_present_mask); 325 + } 326 + 327 + static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx) 328 + { 329 + return !!(ovsr & idx_to_reg_bit(idx)); 330 + } 331 + 332 + static void l2_cache_event_update(struct perf_event *event) 333 + { 334 + struct hw_perf_event *hwc = &event->hw; 335 + u64 delta, prev, now; 336 + u32 idx = hwc->idx; 337 + 338 + do { 339 + prev = local64_read(&hwc->prev_count); 340 + now = cluster_pmu_counter_get_value(idx); 341 + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); 342 + 343 + /* 344 + * The cycle counter is 64-bit, but all other counters are 345 + * 32-bit, and we must handle 32-bit overflow explicitly. 346 + */ 347 + delta = now - prev; 348 + if (idx != l2_cycle_ctr_idx) 349 + delta &= 0xffffffff; 350 + 351 + local64_add(delta, &event->count); 352 + } 353 + 354 + static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, 355 + struct hw_perf_event *hwc) 356 + { 357 + u32 idx = hwc->idx; 358 + u64 new; 359 + 360 + /* 361 + * We limit the max period to half the max counter value so 362 + * that even in the case of extreme interrupt latency the 363 + * counter will (hopefully) not wrap past its initial value. 364 + */ 365 + if (idx == l2_cycle_ctr_idx) 366 + new = L2_CYCLE_COUNTER_RELOAD; 367 + else 368 + new = L2_COUNTER_RELOAD; 369 + 370 + local64_set(&hwc->prev_count, new); 371 + cluster_pmu_counter_set_value(idx, new); 372 + } 373 + 374 + static int l2_cache_get_event_idx(struct cluster_pmu *cluster, 375 + struct perf_event *event) 376 + { 377 + struct hw_perf_event *hwc = &event->hw; 378 + int idx; 379 + int num_ctrs = cluster->l2cache_pmu->num_counters - 1; 380 + unsigned int group; 381 + 382 + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { 383 + if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) 384 + return -EAGAIN; 385 + 386 + return l2_cycle_ctr_idx; 387 + } 388 + 389 + idx = find_first_zero_bit(cluster->used_counters, num_ctrs); 390 + if (idx == num_ctrs) 391 + /* The counters are all in use. */ 392 + return -EAGAIN; 393 + 394 + /* 395 + * Check for column exclusion: event column already in use by another 396 + * event. This is for events which are not in the same group. 397 + * Conflicting events in the same group are detected in event_init. 398 + */ 399 + group = L2_EVT_GROUP(hwc->config_base); 400 + if (test_bit(group, cluster->used_groups)) 401 + return -EAGAIN; 402 + 403 + set_bit(idx, cluster->used_counters); 404 + set_bit(group, cluster->used_groups); 405 + 406 + return idx; 407 + } 408 + 409 + static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, 410 + struct perf_event *event) 411 + { 412 + struct hw_perf_event *hwc = &event->hw; 413 + int idx = hwc->idx; 414 + 415 + clear_bit(idx, cluster->used_counters); 416 + if (hwc->config_base != L2CYCLE_CTR_RAW_CODE) 417 + clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); 418 + } 419 + 420 + static irqreturn_t l2_cache_handle_irq(int irq_num, void *data) 421 + { 422 + struct cluster_pmu *cluster = data; 423 + int num_counters = cluster->l2cache_pmu->num_counters; 424 + u32 ovsr; 425 + int idx; 426 + 427 + ovsr = cluster_pmu_getreset_ovsr(); 428 + if (!cluster_pmu_has_overflowed(ovsr)) 429 + return IRQ_NONE; 430 + 431 + for_each_set_bit(idx, cluster->used_counters, num_counters) { 432 + struct perf_event *event = cluster->events[idx]; 433 + struct hw_perf_event *hwc; 434 + 435 + if (WARN_ON_ONCE(!event)) 436 + continue; 437 + 438 + if (!cluster_pmu_counter_has_overflowed(ovsr, idx)) 439 + continue; 440 + 441 + l2_cache_event_update(event); 442 + hwc = &event->hw; 443 + 444 + l2_cache_cluster_set_period(cluster, hwc); 445 + } 446 + 447 + return IRQ_HANDLED; 448 + } 449 + 450 + /* 451 + * Implementation of abstract pmu functionality required by 452 + * the core perf events code. 453 + */ 454 + 455 + static void l2_cache_pmu_enable(struct pmu *pmu) 456 + { 457 + /* 458 + * Although there is only one PMU (per socket) controlling multiple 459 + * physical PMUs (per cluster), because we do not support per-task mode 460 + * each event is associated with a CPU. Each event has pmu_enable 461 + * called on its CPU, so here it is only necessary to enable the 462 + * counters for the current CPU. 463 + */ 464 + 465 + cluster_pmu_enable(); 466 + } 467 + 468 + static void l2_cache_pmu_disable(struct pmu *pmu) 469 + { 470 + cluster_pmu_disable(); 471 + } 472 + 473 + static int l2_cache_event_init(struct perf_event *event) 474 + { 475 + struct hw_perf_event *hwc = &event->hw; 476 + struct cluster_pmu *cluster; 477 + struct perf_event *sibling; 478 + struct l2cache_pmu *l2cache_pmu; 479 + 480 + if (event->attr.type != event->pmu->type) 481 + return -ENOENT; 482 + 483 + l2cache_pmu = to_l2cache_pmu(event->pmu); 484 + 485 + if (hwc->sample_period) { 486 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 487 + "Sampling not supported\n"); 488 + return -EOPNOTSUPP; 489 + } 490 + 491 + if (event->cpu < 0) { 492 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 493 + "Per-task mode not supported\n"); 494 + return -EOPNOTSUPP; 495 + } 496 + 497 + /* We cannot filter accurately so we just don't allow it. */ 498 + if (event->attr.exclude_user || event->attr.exclude_kernel || 499 + event->attr.exclude_hv || event->attr.exclude_idle) { 500 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 501 + "Can't exclude execution levels\n"); 502 + return -EOPNOTSUPP; 503 + } 504 + 505 + if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || 506 + ((event->attr.config & ~L2_EVT_MASK) != 0)) && 507 + (event->attr.config != L2CYCLE_CTR_RAW_CODE)) { 508 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 509 + "Invalid config %llx\n", 510 + event->attr.config); 511 + return -EINVAL; 512 + } 513 + 514 + /* Don't allow groups with mixed PMUs, except for s/w events */ 515 + if (event->group_leader->pmu != event->pmu && 516 + !is_software_event(event->group_leader)) { 517 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 518 + "Can't create mixed PMU group\n"); 519 + return -EINVAL; 520 + } 521 + 522 + list_for_each_entry(sibling, &event->group_leader->sibling_list, 523 + group_entry) 524 + if (sibling->pmu != event->pmu && 525 + !is_software_event(sibling)) { 526 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 527 + "Can't create mixed PMU group\n"); 528 + return -EINVAL; 529 + } 530 + 531 + cluster = get_cluster_pmu(l2cache_pmu, event->cpu); 532 + if (!cluster) { 533 + /* CPU has not been initialised */ 534 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 535 + "CPU%d not associated with L2 cluster\n", event->cpu); 536 + return -EINVAL; 537 + } 538 + 539 + /* Ensure all events in a group are on the same cpu */ 540 + if ((event->group_leader != event) && 541 + (cluster->on_cpu != event->group_leader->cpu)) { 542 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 543 + "Can't create group on CPUs %d and %d", 544 + event->cpu, event->group_leader->cpu); 545 + return -EINVAL; 546 + } 547 + 548 + if ((event != event->group_leader) && 549 + (L2_EVT_GROUP(event->group_leader->attr.config) == 550 + L2_EVT_GROUP(event->attr.config))) { 551 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 552 + "Column exclusion: conflicting events %llx %llx\n", 553 + event->group_leader->attr.config, 554 + event->attr.config); 555 + return -EINVAL; 556 + } 557 + 558 + list_for_each_entry(sibling, &event->group_leader->sibling_list, 559 + group_entry) { 560 + if ((sibling != event) && 561 + (L2_EVT_GROUP(sibling->attr.config) == 562 + L2_EVT_GROUP(event->attr.config))) { 563 + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 564 + "Column exclusion: conflicting events %llx %llx\n", 565 + sibling->attr.config, 566 + event->attr.config); 567 + return -EINVAL; 568 + } 569 + } 570 + 571 + hwc->idx = -1; 572 + hwc->config_base = event->attr.config; 573 + 574 + /* 575 + * Ensure all events are on the same cpu so all events are in the 576 + * same cpu context, to avoid races on pmu_enable etc. 577 + */ 578 + event->cpu = cluster->on_cpu; 579 + 580 + return 0; 581 + } 582 + 583 + static void l2_cache_event_start(struct perf_event *event, int flags) 584 + { 585 + struct cluster_pmu *cluster; 586 + struct hw_perf_event *hwc = &event->hw; 587 + int idx = hwc->idx; 588 + u32 config; 589 + u32 event_cc, event_group; 590 + 591 + hwc->state = 0; 592 + 593 + cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); 594 + 595 + l2_cache_cluster_set_period(cluster, hwc); 596 + 597 + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { 598 + cluster_pmu_set_evccntcr(0); 599 + } else { 600 + config = hwc->config_base; 601 + event_cc = L2_EVT_CODE(config); 602 + event_group = L2_EVT_GROUP(config); 603 + 604 + cluster_pmu_set_evcntcr(idx, 0); 605 + cluster_pmu_set_evtyper(idx, event_group); 606 + cluster_pmu_set_resr(cluster, event_group, event_cc); 607 + cluster_pmu_set_evfilter_sys_mode(idx); 608 + } 609 + 610 + cluster_pmu_counter_enable_interrupt(idx); 611 + cluster_pmu_counter_enable(idx); 612 + } 613 + 614 + static void l2_cache_event_stop(struct perf_event *event, int flags) 615 + { 616 + struct hw_perf_event *hwc = &event->hw; 617 + int idx = hwc->idx; 618 + 619 + if (hwc->state & PERF_HES_STOPPED) 620 + return; 621 + 622 + cluster_pmu_counter_disable_interrupt(idx); 623 + cluster_pmu_counter_disable(idx); 624 + 625 + if (flags & PERF_EF_UPDATE) 626 + l2_cache_event_update(event); 627 + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 628 + } 629 + 630 + static int l2_cache_event_add(struct perf_event *event, int flags) 631 + { 632 + struct hw_perf_event *hwc = &event->hw; 633 + int idx; 634 + int err = 0; 635 + struct cluster_pmu *cluster; 636 + 637 + cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); 638 + 639 + idx = l2_cache_get_event_idx(cluster, event); 640 + if (idx < 0) 641 + return idx; 642 + 643 + hwc->idx = idx; 644 + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 645 + cluster->events[idx] = event; 646 + local64_set(&hwc->prev_count, 0); 647 + 648 + if (flags & PERF_EF_START) 649 + l2_cache_event_start(event, flags); 650 + 651 + /* Propagate changes to the userspace mapping. */ 652 + perf_event_update_userpage(event); 653 + 654 + return err; 655 + } 656 + 657 + static void l2_cache_event_del(struct perf_event *event, int flags) 658 + { 659 + struct hw_perf_event *hwc = &event->hw; 660 + struct cluster_pmu *cluster; 661 + int idx = hwc->idx; 662 + 663 + cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); 664 + 665 + l2_cache_event_stop(event, flags | PERF_EF_UPDATE); 666 + cluster->events[idx] = NULL; 667 + l2_cache_clear_event_idx(cluster, event); 668 + 669 + perf_event_update_userpage(event); 670 + } 671 + 672 + static void l2_cache_event_read(struct perf_event *event) 673 + { 674 + l2_cache_event_update(event); 675 + } 676 + 677 + static ssize_t l2_cache_pmu_cpumask_show(struct device *dev, 678 + struct device_attribute *attr, 679 + char *buf) 680 + { 681 + struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev)); 682 + 683 + return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask); 684 + } 685 + 686 + static struct device_attribute l2_cache_pmu_cpumask_attr = 687 + __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL); 688 + 689 + static struct attribute *l2_cache_pmu_cpumask_attrs[] = { 690 + &l2_cache_pmu_cpumask_attr.attr, 691 + NULL, 692 + }; 693 + 694 + static struct attribute_group l2_cache_pmu_cpumask_group = { 695 + .attrs = l2_cache_pmu_cpumask_attrs, 696 + }; 697 + 698 + /* CCG format for perf RAW codes. */ 699 + PMU_FORMAT_ATTR(l2_code, "config:4-11"); 700 + PMU_FORMAT_ATTR(l2_group, "config:0-3"); 701 + static struct attribute *l2_cache_pmu_formats[] = { 702 + &format_attr_l2_code.attr, 703 + &format_attr_l2_group.attr, 704 + NULL, 705 + }; 706 + 707 + static struct attribute_group l2_cache_pmu_format_group = { 708 + .name = "format", 709 + .attrs = l2_cache_pmu_formats, 710 + }; 711 + 712 + static const struct attribute_group *l2_cache_pmu_attr_grps[] = { 713 + &l2_cache_pmu_format_group, 714 + &l2_cache_pmu_cpumask_group, 715 + NULL, 716 + }; 717 + 718 + /* 719 + * Generic device handlers 720 + */ 721 + 722 + static const struct acpi_device_id l2_cache_pmu_acpi_match[] = { 723 + { "QCOM8130", }, 724 + { } 725 + }; 726 + 727 + static int get_num_counters(void) 728 + { 729 + int val; 730 + 731 + val = get_l2_indirect_reg(L2PMCR); 732 + 733 + /* 734 + * Read number of counters from L2PMCR and add 1 735 + * for the cycle counter. 736 + */ 737 + return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1; 738 + } 739 + 740 + static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( 741 + struct l2cache_pmu *l2cache_pmu, int cpu) 742 + { 743 + u64 mpidr; 744 + int cpu_cluster_id; 745 + struct cluster_pmu *cluster = NULL; 746 + 747 + /* 748 + * This assumes that the cluster_id is in MPIDR[aff1] for 749 + * single-threaded cores, and MPIDR[aff2] for multi-threaded 750 + * cores. This logic will have to be updated if this changes. 751 + */ 752 + mpidr = read_cpuid_mpidr(); 753 + if (mpidr & MPIDR_MT_BITMASK) 754 + cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); 755 + else 756 + cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); 757 + 758 + list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { 759 + if (cluster->cluster_id != cpu_cluster_id) 760 + continue; 761 + 762 + dev_info(&l2cache_pmu->pdev->dev, 763 + "CPU%d associated with cluster %d\n", cpu, 764 + cluster->cluster_id); 765 + cpumask_set_cpu(cpu, &cluster->cluster_cpus); 766 + *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; 767 + break; 768 + } 769 + 770 + return cluster; 771 + } 772 + 773 + static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) 774 + { 775 + struct cluster_pmu *cluster; 776 + struct l2cache_pmu *l2cache_pmu; 777 + 778 + l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); 779 + cluster = get_cluster_pmu(l2cache_pmu, cpu); 780 + if (!cluster) { 781 + /* First time this CPU has come online */ 782 + cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); 783 + if (!cluster) { 784 + /* Only if broken firmware doesn't list every cluster */ 785 + WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu); 786 + return 0; 787 + } 788 + } 789 + 790 + /* If another CPU is managing this cluster, we're done */ 791 + if (cluster->on_cpu != -1) 792 + return 0; 793 + 794 + /* 795 + * All CPUs on this cluster were down, use this one. 796 + * Reset to put it into sane state. 797 + */ 798 + cluster->on_cpu = cpu; 799 + cpumask_set_cpu(cpu, &l2cache_pmu->cpumask); 800 + cluster_pmu_reset(); 801 + 802 + WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); 803 + enable_irq(cluster->irq); 804 + 805 + return 0; 806 + } 807 + 808 + static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) 809 + { 810 + struct cluster_pmu *cluster; 811 + struct l2cache_pmu *l2cache_pmu; 812 + cpumask_t cluster_online_cpus; 813 + unsigned int target; 814 + 815 + l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); 816 + cluster = get_cluster_pmu(l2cache_pmu, cpu); 817 + if (!cluster) 818 + return 0; 819 + 820 + /* If this CPU is not managing the cluster, we're done */ 821 + if (cluster->on_cpu != cpu) 822 + return 0; 823 + 824 + /* Give up ownership of cluster */ 825 + cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask); 826 + cluster->on_cpu = -1; 827 + 828 + /* Any other CPU for this cluster which is still online */ 829 + cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, 830 + cpu_online_mask); 831 + target = cpumask_any_but(&cluster_online_cpus, cpu); 832 + if (target >= nr_cpu_ids) { 833 + disable_irq(cluster->irq); 834 + return 0; 835 + } 836 + 837 + perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target); 838 + cluster->on_cpu = target; 839 + cpumask_set_cpu(target, &l2cache_pmu->cpumask); 840 + WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); 841 + 842 + return 0; 843 + } 844 + 845 + static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) 846 + { 847 + struct platform_device *pdev = to_platform_device(dev->parent); 848 + struct platform_device *sdev = to_platform_device(dev); 849 + struct l2cache_pmu *l2cache_pmu = data; 850 + struct cluster_pmu *cluster; 851 + struct acpi_device *device; 852 + unsigned long fw_cluster_id; 853 + int err; 854 + int irq; 855 + 856 + if (acpi_bus_get_device(ACPI_HANDLE(dev), &device)) 857 + return -ENODEV; 858 + 859 + if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) { 860 + dev_err(&pdev->dev, "unable to read ACPI uid\n"); 861 + return -ENODEV; 862 + } 863 + 864 + cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); 865 + if (!cluster) 866 + return -ENOMEM; 867 + 868 + INIT_LIST_HEAD(&cluster->next); 869 + list_add(&cluster->next, &l2cache_pmu->clusters); 870 + cluster->cluster_id = fw_cluster_id; 871 + 872 + irq = platform_get_irq(sdev, 0); 873 + if (irq < 0) { 874 + dev_err(&pdev->dev, 875 + "Failed to get valid irq for cluster %ld\n", 876 + fw_cluster_id); 877 + return irq; 878 + } 879 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 880 + cluster->irq = irq; 881 + 882 + cluster->l2cache_pmu = l2cache_pmu; 883 + cluster->on_cpu = -1; 884 + 885 + err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq, 886 + IRQF_NOBALANCING | IRQF_NO_THREAD, 887 + "l2-cache-pmu", cluster); 888 + if (err) { 889 + dev_err(&pdev->dev, 890 + "Unable to request IRQ%d for L2 PMU counters\n", irq); 891 + return err; 892 + } 893 + 894 + dev_info(&pdev->dev, 895 + "Registered L2 cache PMU cluster %ld\n", fw_cluster_id); 896 + 897 + spin_lock_init(&cluster->pmu_lock); 898 + 899 + l2cache_pmu->num_pmus++; 900 + 901 + return 0; 902 + } 903 + 904 + static int l2_cache_pmu_probe(struct platform_device *pdev) 905 + { 906 + int err; 907 + struct l2cache_pmu *l2cache_pmu; 908 + 909 + l2cache_pmu = 910 + devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL); 911 + if (!l2cache_pmu) 912 + return -ENOMEM; 913 + 914 + INIT_LIST_HEAD(&l2cache_pmu->clusters); 915 + 916 + platform_set_drvdata(pdev, l2cache_pmu); 917 + l2cache_pmu->pmu = (struct pmu) { 918 + /* suffix is instance id for future use with multiple sockets */ 919 + .name = "l2cache_0", 920 + .task_ctx_nr = perf_invalid_context, 921 + .pmu_enable = l2_cache_pmu_enable, 922 + .pmu_disable = l2_cache_pmu_disable, 923 + .event_init = l2_cache_event_init, 924 + .add = l2_cache_event_add, 925 + .del = l2_cache_event_del, 926 + .start = l2_cache_event_start, 927 + .stop = l2_cache_event_stop, 928 + .read = l2_cache_event_read, 929 + .attr_groups = l2_cache_pmu_attr_grps, 930 + }; 931 + 932 + l2cache_pmu->num_counters = get_num_counters(); 933 + l2cache_pmu->pdev = pdev; 934 + l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, 935 + struct cluster_pmu *); 936 + if (!l2cache_pmu->pmu_cluster) 937 + return -ENOMEM; 938 + 939 + l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; 940 + l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | 941 + BIT(L2CYCLE_CTR_BIT); 942 + 943 + cpumask_clear(&l2cache_pmu->cpumask); 944 + 945 + /* Read cluster info and initialize each cluster */ 946 + err = device_for_each_child(&pdev->dev, l2cache_pmu, 947 + l2_cache_pmu_probe_cluster); 948 + if (err) 949 + return err; 950 + 951 + if (l2cache_pmu->num_pmus == 0) { 952 + dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n"); 953 + return -ENODEV; 954 + } 955 + 956 + err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, 957 + &l2cache_pmu->node); 958 + if (err) { 959 + dev_err(&pdev->dev, "Error %d registering hotplug", err); 960 + return err; 961 + } 962 + 963 + err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1); 964 + if (err) { 965 + dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err); 966 + goto out_unregister; 967 + } 968 + 969 + dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n", 970 + l2cache_pmu->num_pmus); 971 + 972 + return err; 973 + 974 + out_unregister: 975 + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, 976 + &l2cache_pmu->node); 977 + return err; 978 + } 979 + 980 + static int l2_cache_pmu_remove(struct platform_device *pdev) 981 + { 982 + struct l2cache_pmu *l2cache_pmu = 983 + to_l2cache_pmu(platform_get_drvdata(pdev)); 984 + 985 + perf_pmu_unregister(&l2cache_pmu->pmu); 986 + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, 987 + &l2cache_pmu->node); 988 + return 0; 989 + } 990 + 991 + static struct platform_driver l2_cache_pmu_driver = { 992 + .driver = { 993 + .name = "qcom-l2cache-pmu", 994 + .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), 995 + }, 996 + .probe = l2_cache_pmu_probe, 997 + .remove = l2_cache_pmu_remove, 998 + }; 999 + 1000 + static int __init register_l2_cache_pmu_driver(void) 1001 + { 1002 + int err; 1003 + 1004 + err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, 1005 + "AP_PERF_ARM_QCOM_L2_ONLINE", 1006 + l2cache_pmu_online_cpu, 1007 + l2cache_pmu_offline_cpu); 1008 + if (err) 1009 + return err; 1010 + 1011 + return platform_driver_register(&l2_cache_pmu_driver); 1012 + } 1013 + device_initcall(register_l2_cache_pmu_driver);
+1
include/linux/cpuhotplug.h
··· 138 138 CPUHP_AP_PERF_ARM_CCI_ONLINE, 139 139 CPUHP_AP_PERF_ARM_CCN_ONLINE, 140 140 CPUHP_AP_PERF_ARM_L2X0_ONLINE, 141 + CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, 141 142 CPUHP_AP_WORKQUEUE_ONLINE, 142 143 CPUHP_AP_RCUTREE_ONLINE, 143 144 CPUHP_AP_ONLINE_DYN,