Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf: arm_pmu: Request specific affinities for per CPU NMIs/interrupts

Let the PMU driver request both NMIs and normal interrupts with an affinity mask
matching the PMU affinity.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Will Deacon <will@kernel.org>
Link: https://patch.msgid.link/20251020122944.3074811-19-maz@kernel.org

authored by

Will Deacon and committed by
Thomas Gleixner
54b350fa c734af3b

+31 -23
+26 -18
drivers/perf/arm_pmu.c
··· 26 26 27 27 #include <asm/irq_regs.h> 28 28 29 - static int armpmu_count_irq_users(const int irq); 29 + static int armpmu_count_irq_users(const struct cpumask *affinity, 30 + const int irq); 30 31 31 32 struct pmu_irq_ops { 32 33 void (*enable_pmuirq)(unsigned int irq); ··· 65 64 static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, 66 65 void __percpu *devid) 67 66 { 68 - if (armpmu_count_irq_users(irq) == 1) 67 + struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu); 68 + 69 + if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1) 69 70 free_percpu_irq(irq, devid); 70 71 } 71 72 ··· 92 89 static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, 93 90 void __percpu *devid) 94 91 { 95 - if (armpmu_count_irq_users(irq) == 1) 92 + struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu); 93 + 94 + if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1) 96 95 free_percpu_nmi(irq, devid); 97 96 } 98 97 ··· 585 580 .attrs = armpmu_common_attrs, 586 581 }; 587 582 588 - static int armpmu_count_irq_users(const int irq) 583 + static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq) 589 584 { 590 585 int cpu, count = 0; 591 586 592 - for_each_possible_cpu(cpu) { 587 + for_each_cpu(cpu, affinity) { 593 588 if (per_cpu(cpu_irq, cpu) == irq) 594 589 count++; 595 590 } ··· 597 592 return count; 598 593 } 599 594 600 - static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) 595 + static const struct pmu_irq_ops * 596 + armpmu_find_irq_ops(const struct cpumask *affinity, int irq) 601 597 { 602 598 const struct pmu_irq_ops *ops = NULL; 603 599 int cpu; 604 600 605 - for_each_possible_cpu(cpu) { 601 + for_each_cpu(cpu, affinity) { 606 602 if (per_cpu(cpu_irq, cpu) != irq) 607 603 continue; 608 604 ··· 615 609 return ops; 616 610 } 617 611 618 - void armpmu_free_irq(int irq, int cpu) 612 + void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu) 619 613 { 620 614 if (per_cpu(cpu_irq, cpu) == 0) 621 615 return; 622 616 if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) 623 617 return; 624 618 625 - per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); 619 + per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu); 626 620 627 621 per_cpu(cpu_irq, cpu) = 0; 628 622 per_cpu(cpu_irq_ops, cpu) = NULL; 629 623 } 630 624 631 - int armpmu_request_irq(int irq, int cpu) 625 + int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu) 632 626 { 633 627 int err = 0; 628 + struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu); 629 + const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus : 630 + cpu_possible_mask; /* ACPI */ 634 631 const irq_handler_t handler = armpmu_dispatch_irq; 635 632 const struct pmu_irq_ops *irq_ops; 636 633 ··· 655 646 IRQF_NOBALANCING | IRQF_NO_AUTOEN | 656 647 IRQF_NO_THREAD; 657 648 658 - err = request_nmi(irq, handler, irq_flags, "arm-pmu", 659 - per_cpu_ptr(&cpu_armpmu, cpu)); 649 + err = request_nmi(irq, handler, irq_flags, "arm-pmu", armpmu); 660 650 661 651 /* If cannot get an NMI, get a normal interrupt */ 662 652 if (err) { 663 653 err = request_irq(irq, handler, irq_flags, "arm-pmu", 664 - per_cpu_ptr(&cpu_armpmu, cpu)); 654 + armpmu); 665 655 irq_ops = &pmuirq_ops; 666 656 } else { 667 657 has_nmi = true; 668 658 irq_ops = &pmunmi_ops; 669 659 } 670 - } else if (armpmu_count_irq_users(irq) == 0) { 671 - err = request_percpu_nmi(irq, handler, "arm-pmu", NULL, &cpu_armpmu); 660 + } else if (armpmu_count_irq_users(affinity, irq) == 0) { 661 + err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu); 672 662 673 663 /* If cannot get an NMI, get a normal interrupt */ 674 664 if (err) { 675 - err = request_percpu_irq(irq, handler, "arm-pmu", 676 - &cpu_armpmu); 665 + err = request_percpu_irq_affinity(irq, handler, "arm-pmu", 666 + affinity, pcpu_armpmu); 677 667 irq_ops = &percpu_pmuirq_ops; 678 668 } else { 679 669 has_nmi = true; ··· 680 672 } 681 673 } else { 682 674 /* Per cpudevid irq was already requested by another CPU */ 683 - irq_ops = armpmu_find_irq_ops(irq); 675 + irq_ops = armpmu_find_irq_ops(affinity, irq); 684 676 685 677 if (WARN_ON(!irq_ops)) 686 678 err = -EINVAL;
+1 -1
drivers/perf/arm_pmu_acpi.c
··· 218 218 * them with their PMUs. 219 219 */ 220 220 per_cpu(pmu_irqs, cpu) = irq; 221 - err = armpmu_request_irq(irq, cpu); 221 + err = armpmu_request_irq(&probed_pmus, irq, cpu); 222 222 if (err) 223 223 goto out_err; 224 224 }
+2 -2
drivers/perf/arm_pmu_platform.c
··· 165 165 if (!irq) 166 166 continue; 167 167 168 - err = armpmu_request_irq(irq, cpu); 168 + err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu); 169 169 if (err) 170 170 break; 171 171 } ··· 181 181 for_each_cpu(cpu, &armpmu->supported_cpus) { 182 182 int irq = per_cpu(hw_events->irq, cpu); 183 183 184 - armpmu_free_irq(irq, cpu); 184 + armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu); 185 185 } 186 186 } 187 187
+2 -2
include/linux/perf/arm_pmu.h
··· 190 190 struct arm_pmu *armpmu_alloc(void); 191 191 void armpmu_free(struct arm_pmu *pmu); 192 192 int armpmu_register(struct arm_pmu *pmu); 193 - int armpmu_request_irq(int irq, int cpu); 194 - void armpmu_free_irq(int irq, int cpu); 193 + int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu); 194 + void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu); 195 195 196 196 #define ARMV8_PMU_PDEV_NAME "armv8-pmu" 197 197