Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: perf: Move PMU register related defines to asm/perf_event.h

To use the ARMv8 PMU related register defines from the KVM code, we move
the relevant definitions to asm/perf_event.h header file and rename them
with prefix ARMV8_PMU_. This allows us to get rid of kvm_perf_event.h.

Signed-off-by: Anup Patel <anup.patel@linaro.org>
Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

Shannon Zhao and committed by
Will Deacon
b8cfadfc a6002ec5

+66 -123
-1
arch/arm64/include/asm/kvm_host.h
··· 27 27 #include <asm/kvm.h> 28 28 #include <asm/kvm_asm.h> 29 29 #include <asm/kvm_mmio.h> 30 - #include <asm/kvm_perf_event.h> 31 30 32 31 #define __KVM_HAVE_ARCH_INTC_INITIALIZED 33 32
-1
arch/arm64/include/asm/kvm_hyp.h
··· 21 21 #include <linux/compiler.h> 22 22 #include <linux/kvm_host.h> 23 23 #include <asm/kvm_mmu.h> 24 - #include <asm/kvm_perf_event.h> 25 24 #include <asm/sysreg.h> 26 25 27 26 #define __hyp_text __section(.hyp.text) notrace
-68
arch/arm64/include/asm/kvm_perf_event.h
··· 1 - /* 2 - * Copyright (C) 2012 ARM Ltd. 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License version 2 as 6 - * published by the Free Software Foundation. 7 - * 8 - * This program is distributed in the hope that it will be useful, 9 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 - * GNU General Public License for more details. 12 - * 13 - * You should have received a copy of the GNU General Public License 14 - * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 - */ 16 - 17 - #ifndef __ASM_KVM_PERF_EVENT_H 18 - #define __ASM_KVM_PERF_EVENT_H 19 - 20 - #define ARMV8_PMU_MAX_COUNTERS 32 21 - #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) 22 - 23 - /* 24 - * Per-CPU PMCR: config reg 25 - */ 26 - #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ 27 - #define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ 28 - #define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ 29 - #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ 30 - #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ 31 - #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ 32 - /* Determines which bit of PMCCNTR_EL0 generates an overflow */ 33 - #define ARMV8_PMU_PMCR_LC (1 << 6) 34 - #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ 35 - #define ARMV8_PMU_PMCR_N_MASK 0x1f 36 - #define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ 37 - 38 - /* 39 - * PMOVSR: counters overflow flag status reg 40 - */ 41 - #define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ 42 - #define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK 43 - 44 - /* 45 - * PMXEVTYPER: Event selection reg 46 - */ 47 - #define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ 48 - #define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ 49 - 50 - #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ 51 - 52 - /* 53 - * Event filters for PMUv3 54 - */ 55 - #define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) 56 - #define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) 57 - #define ARMV8_PMU_INCLUDE_EL2 (1 << 27) 58 - 59 - /* 60 - * PMUSERENR: user enable reg 61 - */ 62 - #define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ 63 - #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ 64 - #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ 65 - #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ 66 - #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ 67 - 68 - #endif
+47
arch/arm64/include/asm/perf_event.h
··· 17 17 #ifndef __ASM_PERF_EVENT_H 18 18 #define __ASM_PERF_EVENT_H 19 19 20 + #define ARMV8_PMU_MAX_COUNTERS 32 21 + #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) 22 + 23 + /* 24 + * Per-CPU PMCR: config reg 25 + */ 26 + #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ 27 + #define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ 28 + #define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ 29 + #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ 30 + #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ 31 + #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ 32 + #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ 33 + #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ 34 + #define ARMV8_PMU_PMCR_N_MASK 0x1f 35 + #define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */ 36 + 37 + /* 38 + * PMOVSR: counters overflow flag status reg 39 + */ 40 + #define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ 41 + #define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK 42 + 43 + /* 44 + * PMXEVTYPER: Event selection reg 45 + */ 46 + #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 47 + #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 48 + 49 + #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ 50 + 51 + /* 52 + * Event filters for PMUv3 53 + */ 54 + #define ARMV8_PMU_EXCLUDE_EL1 (1 << 31) 55 + #define ARMV8_PMU_EXCLUDE_EL0 (1 << 30) 56 + #define ARMV8_PMU_INCLUDE_EL2 (1 << 27) 57 + 58 + /* 59 + * PMUSERENR: user enable reg 60 + */ 61 + #define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ 62 + #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ 63 + #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ 64 + #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ 65 + #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ 66 + 20 67 #ifdef CONFIG_PERF_EVENTS 21 68 struct pt_regs; 22 69 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+19 -53
arch/arm64/kernel/perf_event.c
··· 20 20 */ 21 21 22 22 #include <asm/irq_regs.h> 23 + #include <asm/perf_event.h> 23 24 #include <asm/virt.h> 24 25 25 26 #include <linux/of.h> ··· 385 384 #define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \ 386 385 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) 387 386 388 - #define ARMV8_MAX_COUNTERS 32 389 - #define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) 390 - 391 387 /* 392 388 * ARMv8 low level PMU access 393 389 */ ··· 393 395 * Perf Event to low level counters mapping 394 396 */ 395 397 #define ARMV8_IDX_TO_COUNTER(x) \ 396 - (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) 397 - 398 - /* 399 - * Per-CPU PMCR: config reg 400 - */ 401 - #define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ 402 - #define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ 403 - #define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ 404 - #define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ 405 - #define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ 406 - #define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ 407 - #define ARMV8_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ 408 - #define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ 409 - #define ARMV8_PMCR_N_MASK 0x1f 410 - #define ARMV8_PMCR_MASK 0x7f /* Mask for writable bits */ 411 - 412 - /* 413 - * PMOVSR: counters overflow flag status reg 414 - */ 415 - #define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ 416 - #define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK 417 - 418 - /* 419 - * PMXEVTYPER: Event selection reg 420 - */ 421 - #define ARMV8_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 422 - #define ARMV8_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 423 - 424 - /* 425 - * Event filters for PMUv3 426 - */ 427 - #define ARMV8_EXCLUDE_EL1 (1 << 31) 428 - #define ARMV8_EXCLUDE_EL0 (1 << 30) 429 - #define ARMV8_INCLUDE_EL2 (1 << 27) 398 + (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) 430 399 431 400 static inline u32 armv8pmu_pmcr_read(void) 432 401 { ··· 404 439 405 440 static inline void armv8pmu_pmcr_write(u32 val) 406 441 { 407 - val &= ARMV8_PMCR_MASK; 442 + val &= ARMV8_PMU_PMCR_MASK; 408 443 isb(); 409 444 asm volatile("msr pmcr_el0, %0" :: "r" (val)); 410 445 } 411 446 412 447 static inline int armv8pmu_has_overflowed(u32 pmovsr) 413 448 { 414 - return pmovsr & ARMV8_OVERFLOWED_MASK; 449 + return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; 415 450 } 416 451 417 452 static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx) ··· 477 512 static inline void armv8pmu_write_evtype(int idx, u32 val) 478 513 { 479 514 if (armv8pmu_select_counter(idx) == idx) { 480 - val &= ARMV8_EVTYPE_MASK; 515 + val &= ARMV8_PMU_EVTYPE_MASK; 481 516 asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); 482 517 } 483 518 } ··· 523 558 asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); 524 559 525 560 /* Write to clear flags */ 526 - value &= ARMV8_OVSR_MASK; 561 + value &= ARMV8_PMU_OVSR_MASK; 527 562 asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); 528 563 529 564 return value; ··· 661 696 662 697 raw_spin_lock_irqsave(&events->pmu_lock, flags); 663 698 /* Enable all counters */ 664 - armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); 699 + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); 665 700 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 666 701 } 667 702 ··· 672 707 673 708 raw_spin_lock_irqsave(&events->pmu_lock, flags); 674 709 /* Disable all counters */ 675 - armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); 710 + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); 676 711 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 677 712 } 678 713 ··· 682 717 int idx; 683 718 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); 684 719 struct hw_perf_event *hwc = &event->hw; 685 - unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT; 720 + unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; 686 721 687 722 /* Always place a cycle counter into the cycle counter. */ 688 723 if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { ··· 719 754 attr->exclude_kernel != attr->exclude_hv) 720 755 return -EINVAL; 721 756 if (attr->exclude_user) 722 - config_base |= ARMV8_EXCLUDE_EL0; 757 + config_base |= ARMV8_PMU_EXCLUDE_EL0; 723 758 if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) 724 - config_base |= ARMV8_EXCLUDE_EL1; 759 + config_base |= ARMV8_PMU_EXCLUDE_EL1; 725 760 if (!attr->exclude_hv) 726 - config_base |= ARMV8_INCLUDE_EL2; 761 + config_base |= ARMV8_PMU_INCLUDE_EL2; 727 762 728 763 /* 729 764 * Install the filter into config_base as this is used to ··· 749 784 * Initialize & Reset PMNC. Request overflow interrupt for 750 785 * 64 bit cycle counter but cheat in armv8pmu_write_counter(). 751 786 */ 752 - armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C | ARMV8_PMCR_LC); 787 + armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | 788 + ARMV8_PMU_PMCR_LC); 753 789 } 754 790 755 791 static int armv8_pmuv3_map_event(struct perf_event *event) 756 792 { 757 793 return armpmu_map_event(event, &armv8_pmuv3_perf_map, 758 794 &armv8_pmuv3_perf_cache_map, 759 - ARMV8_EVTYPE_EVENT); 795 + ARMV8_PMU_EVTYPE_EVENT); 760 796 } 761 797 762 798 static int armv8_a53_map_event(struct perf_event *event) 763 799 { 764 800 return armpmu_map_event(event, &armv8_a53_perf_map, 765 801 &armv8_a53_perf_cache_map, 766 - ARMV8_EVTYPE_EVENT); 802 + ARMV8_PMU_EVTYPE_EVENT); 767 803 } 768 804 769 805 static int armv8_a57_map_event(struct perf_event *event) 770 806 { 771 807 return armpmu_map_event(event, &armv8_a57_perf_map, 772 808 &armv8_a57_perf_cache_map, 773 - ARMV8_EVTYPE_EVENT); 809 + ARMV8_PMU_EVTYPE_EVENT); 774 810 } 775 811 776 812 static int armv8_thunder_map_event(struct perf_event *event) 777 813 { 778 814 return armpmu_map_event(event, &armv8_thunder_perf_map, 779 815 &armv8_thunder_perf_cache_map, 780 - ARMV8_EVTYPE_EVENT); 816 + ARMV8_PMU_EVTYPE_EVENT); 781 817 } 782 818 783 819 static void armv8pmu_read_num_pmnc_events(void *info) ··· 786 820 int *nb_cnt = info; 787 821 788 822 /* Read the nb of CNTx counters supported from PMNC */ 789 - *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; 823 + *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; 790 824 791 825 /* Add the CPU cycles counter */ 792 826 *nb_cnt += 1;