Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf regs: Remove __weak attributive arch__xxx_reg_mask() functions

Currently, some architecture-specific perf-regs functions, such as
arch__intr_reg_mask() and arch__user_reg_mask(), are defined with the
__weak attribute.

This approach ensures that only functions matching the architecture of
the build/run host are compiled and executed, reducing build time and
binary size.

However, this __weak attribute restricts these functions to be called
only on the same architecture, preventing cross-architecture
functionality.

For example, a perf.data file captured on x86 cannot be parsed on an ARM
platform.

To address this limitation, this patch removes the __weak attribute from
these perf-regs functions.

The architecture-specific code is moved from the arch/ directory to the
util/perf-regs-arch/ directory.

The appropriate architectural functions are then called based on the
EM_HOST.

No functional changes are intended.

Suggested-by: Ian Rogers <irogers@google.com>
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Guo Ren <guoren@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <pjw@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Falcon <thomas.falcon@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xudong Hao <xudong.hao@intel.com>
Cc: Zide Chen <zide.chen@intel.com>
[ Fixed up somme fuzz with s390 and riscv Build files wrt removing perf_regs.o ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Dapeng Mi and committed by
Arnaldo Carvalho de Melo
16dccbb8 e716e69c

+332 -236
-2
tools/perf/arch/arm/util/Build
··· 1 - perf-util-y += perf_regs.o 2 - 3 1 perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o 4 2 5 3 perf-util-y += pmu.o auxtrace.o cs-etm.o
-13
tools/perf/arch/arm/util/perf_regs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "perf_regs.h" 3 - #include "../../../util/perf_regs.h" 4 - 5 - uint64_t arch__intr_reg_mask(void) 6 - { 7 - return PERF_REGS_MASK; 8 - } 9 - 10 - uint64_t arch__user_reg_mask(void) 11 - { 12 - return PERF_REGS_MASK; 13 - }
-36
tools/perf/arch/arm64/util/perf_regs.c
··· 103 103 104 104 return SDT_ARG_VALID; 105 105 } 106 - 107 - uint64_t arch__intr_reg_mask(void) 108 - { 109 - return PERF_REGS_MASK; 110 - } 111 - 112 - uint64_t arch__user_reg_mask(void) 113 - { 114 - struct perf_event_attr attr = { 115 - .type = PERF_TYPE_HARDWARE, 116 - .config = PERF_COUNT_HW_CPU_CYCLES, 117 - .sample_type = PERF_SAMPLE_REGS_USER, 118 - .disabled = 1, 119 - .exclude_kernel = 1, 120 - .sample_period = 1, 121 - .sample_regs_user = PERF_REGS_MASK 122 - }; 123 - int fd; 124 - 125 - if (getauxval(AT_HWCAP) & HWCAP_SVE) 126 - attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG); 127 - 128 - /* 129 - * Check if the pmu supports perf extended regs, before 130 - * returning the register mask to sample. 131 - */ 132 - if (attr.sample_regs_user != PERF_REGS_MASK) { 133 - event_attr_init(&attr); 134 - fd = sys_perf_event_open(&attr, 0, -1, -1, 0); 135 - if (fd != -1) { 136 - close(fd); 137 - return attr.sample_regs_user; 138 - } 139 - } 140 - return PERF_REGS_MASK; 141 - }
-1
tools/perf/arch/csky/Build
··· 1 - perf-util-y += util/
-1
tools/perf/arch/csky/util/Build
··· 1 - perf-util-y += perf_regs.o
-13
tools/perf/arch/csky/util/perf_regs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "perf_regs.h" 3 - #include "../../util/perf_regs.h" 4 - 5 - uint64_t arch__intr_reg_mask(void) 6 - { 7 - return PERF_REGS_MASK; 8 - } 9 - 10 - uint64_t arch__user_reg_mask(void) 11 - { 12 - return PERF_REGS_MASK; 13 - }
-1
tools/perf/arch/loongarch/util/Build
··· 1 1 perf-util-y += header.o 2 - perf-util-y += perf_regs.o 3 2 4 3 perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o 5 4 perf-util-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
-13
tools/perf/arch/loongarch/util/perf_regs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "perf_regs.h" 3 - #include "../../../util/perf_regs.h" 4 - 5 - uint64_t arch__intr_reg_mask(void) 6 - { 7 - return PERF_REGS_MASK; 8 - } 9 - 10 - uint64_t arch__user_reg_mask(void) 11 - { 12 - return PERF_REGS_MASK; 13 - }
-1
tools/perf/arch/mips/util/Build
··· 1 - perf-util-y += perf_regs.o 2 1 perf-util-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
-13
tools/perf/arch/mips/util/perf_regs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "perf_regs.h" 3 - #include "../../util/perf_regs.h" 4 - 5 - uint64_t arch__intr_reg_mask(void) 6 - { 7 - return PERF_REGS_MASK; 8 - } 9 - 10 - uint64_t arch__user_reg_mask(void) 11 - { 12 - return PERF_REGS_MASK; 13 - }
-47
tools/perf/arch/powerpc/util/perf_regs.c
··· 123 123 124 124 return SDT_ARG_VALID; 125 125 } 126 - 127 - uint64_t arch__intr_reg_mask(void) 128 - { 129 - struct perf_event_attr attr = { 130 - .type = PERF_TYPE_HARDWARE, 131 - .config = PERF_COUNT_HW_CPU_CYCLES, 132 - .sample_type = PERF_SAMPLE_REGS_INTR, 133 - .precise_ip = 1, 134 - .disabled = 1, 135 - .exclude_kernel = 1, 136 - }; 137 - int fd; 138 - u32 version; 139 - u64 extended_mask = 0, mask = PERF_REGS_MASK; 140 - 141 - /* 142 - * Get the PVR value to set the extended 143 - * mask specific to platform. 144 - */ 145 - version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF); 146 - if (version == PVR_POWER9) 147 - extended_mask = PERF_REG_PMU_MASK_300; 148 - else if ((version == PVR_POWER10) || (version == PVR_POWER11)) 149 - extended_mask = PERF_REG_PMU_MASK_31; 150 - else 151 - return mask; 152 - 153 - attr.sample_regs_intr = extended_mask; 154 - attr.sample_period = 1; 155 - event_attr_init(&attr); 156 - 157 - /* 158 - * check if the pmu supports perf extended regs, before 159 - * returning the register mask to sample. 160 - */ 161 - fd = sys_perf_event_open(&attr, 0, -1, -1, 0); 162 - if (fd != -1) { 163 - close(fd); 164 - mask |= extended_mask; 165 - } 166 - return mask; 167 - } 168 - 169 - uint64_t arch__user_reg_mask(void) 170 - { 171 - return PERF_REGS_MASK; 172 - }
+6 -1
tools/perf/arch/riscv/include/perf_regs.h
··· 10 10 11 11 #define PERF_REGS_MASK ((1ULL << PERF_REG_RISCV_MAX) - 1) 12 12 #define PERF_REGS_MAX PERF_REG_RISCV_MAX 13 + 14 + #if defined(__riscv_xlen) 13 15 #if __riscv_xlen == 64 14 - #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 16 + #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 15 17 #else 16 18 #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32 19 + #endif 20 + #else 21 + #define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_NONE 17 22 #endif 18 23 19 24 #endif /* ARCH_PERF_REGS_H */
-1
tools/perf/arch/riscv/util/Build
··· 1 - perf-util-y += perf_regs.o 2 1 perf-util-y += header.o
-13
tools/perf/arch/riscv/util/perf_regs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "perf_regs.h" 3 - #include "../../util/perf_regs.h" 4 - 5 - uint64_t arch__intr_reg_mask(void) 6 - { 7 - return PERF_REGS_MASK; 8 - } 9 - 10 - uint64_t arch__user_reg_mask(void) 11 - { 12 - return PERF_REGS_MASK; 13 - }
-1
tools/perf/arch/s390/util/Build
··· 1 1 perf-util-y += header.o 2 - perf-util-y += perf_regs.o 3 2 4 3 perf-util-y += machine.o 5 4 perf-util-y += pmu.o
-13
tools/perf/arch/s390/util/perf_regs.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include "perf_regs.h" 3 - #include "../../util/perf_regs.h" 4 - 5 - uint64_t arch__intr_reg_mask(void) 6 - { 7 - return PERF_REGS_MASK; 8 - } 9 - 10 - uint64_t arch__user_reg_mask(void) 11 - { 12 - return PERF_REGS_MASK; 13 - }
-48
tools/perf/arch/x86/util/perf_regs.c
··· 233 233 234 234 return SDT_ARG_VALID; 235 235 } 236 - 237 - uint64_t arch__intr_reg_mask(void) 238 - { 239 - struct perf_event_attr attr = { 240 - .type = PERF_TYPE_HARDWARE, 241 - .config = PERF_COUNT_HW_CPU_CYCLES, 242 - .sample_type = PERF_SAMPLE_REGS_INTR, 243 - .sample_regs_intr = PERF_REG_EXTENDED_MASK, 244 - .precise_ip = 1, 245 - .disabled = 1, 246 - .exclude_kernel = 1, 247 - }; 248 - int fd; 249 - /* 250 - * In an unnamed union, init it here to build on older gcc versions 251 - */ 252 - attr.sample_period = 1; 253 - 254 - if (perf_pmus__num_core_pmus() > 1) { 255 - struct perf_pmu *pmu = NULL; 256 - __u64 type = PERF_TYPE_RAW; 257 - 258 - /* 259 - * The same register set is supported among different hybrid PMUs. 260 - * Only check the first available one. 261 - */ 262 - while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 263 - type = pmu->type; 264 - break; 265 - } 266 - attr.config |= type << PERF_PMU_TYPE_SHIFT; 267 - } 268 - 269 - event_attr_init(&attr); 270 - 271 - fd = sys_perf_event_open(&attr, 0, -1, -1, 0); 272 - if (fd != -1) { 273 - close(fd); 274 - return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK); 275 - } 276 - 277 - return PERF_REGS_MASK; 278 - } 279 - 280 - uint64_t arch__user_reg_mask(void) 281 - { 282 - return PERF_REGS_MASK; 283 - }
+2 -2
tools/perf/util/evsel.c
··· 1055 1055 evsel__set_sample_bit(evsel, REGS_USER); 1056 1056 evsel__set_sample_bit(evsel, STACK_USER); 1057 1057 if (opts->sample_user_regs && 1058 - DWARF_MINIMAL_REGS(e_machine) != arch__user_reg_mask()) { 1058 + DWARF_MINIMAL_REGS(e_machine) != perf_user_reg_mask(EM_HOST)) { 1059 1059 attr->sample_regs_user |= DWARF_MINIMAL_REGS(e_machine); 1060 1060 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, " 1061 1061 "specifying a subset with --user-regs may render DWARF unwinding unreliable, " 1062 1062 "so the minimal registers set (IP, SP) is explicitly forced.\n"); 1063 1063 } else { 1064 - attr->sample_regs_user |= arch__user_reg_mask(); 1064 + attr->sample_regs_user |= perf_user_reg_mask(EM_HOST); 1065 1065 } 1066 1066 attr->sample_stack_user = param->dump_size; 1067 1067 attr->exclude_callchain_user = 1;
+1 -1
tools/perf/util/parse-regs-options.c
··· 66 66 if (*mode) 67 67 return -1; 68 68 69 - mask = intr ? arch__intr_reg_mask() : arch__user_reg_mask(); 69 + mask = intr ? perf_intr_reg_mask(EM_HOST) : perf_user_reg_mask(EM_HOST); 70 70 71 71 /* str may be NULL in case no arg is passed to -I */ 72 72 if (!str) {
+52 -1
tools/perf/util/perf-regs-arch/perf_regs_aarch64.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <errno.h> 3 + #include <regex.h> 4 + #include <string.h> 5 + #include <sys/auxv.h> 6 + #include <linux/kernel.h> 7 + #include <linux/zalloc.h> 2 8 9 + #include "../debug.h" 10 + #include "../event.h" 3 11 #include "../perf_regs.h" 4 - #include "../../../arch/arm64/include/uapi/asm/perf_regs.h" 12 + #include "../../perf-sys.h" 13 + #include "../../arch/arm64/include/perf_regs.h" 14 + 15 + #define SMPL_REG_MASK(b) (1ULL << (b)) 16 + 17 + #ifndef HWCAP_SVE 18 + #define HWCAP_SVE (1 << 22) 19 + #endif 20 + 21 + uint64_t __perf_reg_mask_arm64(bool intr) 22 + { 23 + struct perf_event_attr attr = { 24 + .type = PERF_TYPE_HARDWARE, 25 + .config = PERF_COUNT_HW_CPU_CYCLES, 26 + .sample_type = PERF_SAMPLE_REGS_USER, 27 + .disabled = 1, 28 + .exclude_kernel = 1, 29 + .sample_period = 1, 30 + .sample_regs_user = PERF_REGS_MASK 31 + }; 32 + int fd; 33 + 34 + if (intr) 35 + return PERF_REGS_MASK; 36 + 37 + if (getauxval(AT_HWCAP) & HWCAP_SVE) 38 + attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG); 39 + 40 + /* 41 + * Check if the pmu supports perf extended regs, before 42 + * returning the register mask to sample. Open the event 43 + * on the perf process to check this. 44 + */ 45 + if (attr.sample_regs_user != PERF_REGS_MASK) { 46 + event_attr_init(&attr); 47 + fd = sys_perf_event_open(&attr, /*pid=*/0, /*cpu=*/-1, 48 + /*group_fd=*/-1, /*flags=*/0); 49 + if (fd != -1) { 50 + close(fd); 51 + return attr.sample_regs_user; 52 + } 53 + } 54 + return PERF_REGS_MASK; 55 + } 5 56 6 57 const char *__perf_reg_name_arm64(int id) 7 58 {
+6 -1
tools/perf/util/perf-regs-arch/perf_regs_arm.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include "../perf_regs.h" 4 - #include "../../../arch/arm/include/uapi/asm/perf_regs.h" 4 + #include "../../arch/arm/include/perf_regs.h" 5 + 6 + uint64_t __perf_reg_mask_arm(bool intr __maybe_unused) 7 + { 8 + return PERF_REGS_MASK; 9 + } 5 10 6 11 const char *__perf_reg_name_arm(int id) 7 12 {
+6 -1
tools/perf/util/perf-regs-arch/perf_regs_csky.c
··· 9 9 #include "../perf_regs.h" 10 10 #undef __CSKYABIV2__ 11 11 #define __CSKYABIV2__ 1 // Always want the V2 register definitions. 12 - #include "../../arch/csky/include/uapi/asm/perf_regs.h" 12 + #include "../../arch/csky/include/perf_regs.h" 13 + 14 + uint64_t __perf_reg_mask_csky(bool intr __maybe_unused) 15 + { 16 + return PERF_REGS_MASK; 17 + } 13 18 14 19 const char *__perf_reg_name_csky(int id, uint32_t e_flags) 15 20 {
+6 -1
tools/perf/util/perf-regs-arch/perf_regs_loongarch.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include "../perf_regs.h" 4 - #include "../../../arch/loongarch/include/uapi/asm/perf_regs.h" 4 + #include "../../arch/loongarch/include/perf_regs.h" 5 + 6 + uint64_t __perf_reg_mask_loongarch(bool intr __maybe_unused) 7 + { 8 + return PERF_REGS_MASK; 9 + } 5 10 6 11 const char *__perf_reg_name_loongarch(int id) 7 12 {
+6 -1
tools/perf/util/perf-regs-arch/perf_regs_mips.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include "../perf_regs.h" 4 - #include "../../../arch/mips/include/uapi/asm/perf_regs.h" 4 + #include "../../arch/mips/include/perf_regs.h" 5 + 6 + uint64_t __perf_reg_mask_mips(bool intr __maybe_unused) 7 + { 8 + return PERF_REGS_MASK; 9 + } 5 10 6 11 const char *__perf_reg_name_mips(int id) 7 12 {
+76 -1
tools/perf/util/perf-regs-arch/perf_regs_powerpc.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 + #include <errno.h> 4 + #include <string.h> 5 + #include <regex.h> 6 + #include <linux/zalloc.h> 7 + 8 + #include "../debug.h" 9 + #include "../event.h" 10 + #include "../header.h" 3 11 #include "../perf_regs.h" 4 - #include "../../../arch/powerpc/include/uapi/asm/perf_regs.h" 12 + #include "../../perf-sys.h" 13 + #include "../../arch/powerpc/util/utils_header.h" 14 + #include "../../arch/powerpc/include/perf_regs.h" 15 + 16 + #include <linux/kernel.h> 17 + 18 + #define PVR_POWER9 0x004E 19 + #define PVR_POWER10 0x0080 20 + #define PVR_POWER11 0x0082 21 + 22 + /* 23 + * mfspr is a POWERPC specific instruction, ensure it's only 24 + * built and called on POWERPC by guarding with __powerpc64__ 25 + * or __powerpc__. 26 + */ 27 + #if defined(__powerpc64__) && defined(__powerpc__) 28 + uint64_t __perf_reg_mask_powerpc(bool intr) 29 + { 30 + struct perf_event_attr attr = { 31 + .type = PERF_TYPE_HARDWARE, 32 + .config = PERF_COUNT_HW_CPU_CYCLES, 33 + .sample_type = PERF_SAMPLE_REGS_INTR, 34 + .precise_ip = 1, 35 + .disabled = 1, 36 + .exclude_kernel = 1, 37 + }; 38 + int fd; 39 + u32 version; 40 + u64 extended_mask = 0, mask = PERF_REGS_MASK; 41 + 42 + if (!intr) 43 + return PERF_REGS_MASK; 44 + 45 + /* 46 + * Get the PVR value to set the extended 47 + * mask specific to platform. 48 + */ 49 + version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF); 50 + if (version == PVR_POWER9) 51 + extended_mask = PERF_REG_PMU_MASK_300; 52 + else if ((version == PVR_POWER10) || (version == PVR_POWER11)) 53 + extended_mask = PERF_REG_PMU_MASK_31; 54 + else 55 + return mask; 56 + 57 + attr.sample_regs_intr = extended_mask; 58 + attr.sample_period = 1; 59 + event_attr_init(&attr); 60 + 61 + /* 62 + * Check if the pmu supports perf extended regs, before 63 + * returning the register mask to sample. Open the event 64 + * on the perf process to check this. 65 + */ 66 + fd = sys_perf_event_open(&attr, /*pid=*/0, /*cpu=*/-1, 67 + /*group_fd=*/-1, /*flags=*/0); 68 + if (fd != -1) { 69 + close(fd); 70 + mask |= extended_mask; 71 + } 72 + return mask; 73 + } 74 + #else 75 + uint64_t __perf_reg_mask_powerpc(bool intr __maybe_unused) 76 + { 77 + return PERF_REGS_MASK; 78 + } 79 + #endif 5 80 6 81 const char *__perf_reg_name_powerpc(int id) 7 82 {
+6 -1
tools/perf/util/perf-regs-arch/perf_regs_riscv.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include "../perf_regs.h" 4 - #include "../../../arch/riscv/include/uapi/asm/perf_regs.h" 4 + #include "../../arch/riscv/include/perf_regs.h" 5 + 6 + uint64_t __perf_reg_mask_riscv(bool intr __maybe_unused) 7 + { 8 + return PERF_REGS_MASK; 9 + } 5 10 6 11 const char *__perf_reg_name_riscv(int id) 7 12 {
+6 -1
tools/perf/util/perf-regs-arch/perf_regs_s390.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include "../perf_regs.h" 4 - #include "../../../arch/s390/include/uapi/asm/perf_regs.h" 4 + #include "../../arch/s390/include/perf_regs.h" 5 + 6 + uint64_t __perf_reg_mask_s390(bool intr __maybe_unused) 7 + { 8 + return PERF_REGS_MASK; 9 + } 5 10 6 11 const char *__perf_reg_name_s390(int id) 7 12 {
+59 -1
tools/perf/util/perf-regs-arch/perf_regs_x86.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 + #include <errno.h> 4 + #include <string.h> 5 + #include <regex.h> 6 + #include <linux/kernel.h> 7 + #include <linux/zalloc.h> 8 + 9 + #include "../debug.h" 10 + #include "../event.h" 11 + #include "../pmu.h" 12 + #include "../pmus.h" 3 13 #include "../perf_regs.h" 4 - #include "../../../arch/x86/include/uapi/asm/perf_regs.h" 14 + #include "../../perf-sys.h" 15 + #include "../../arch/x86/include/perf_regs.h" 16 + 17 + uint64_t __perf_reg_mask_x86(bool intr) 18 + { 19 + struct perf_event_attr attr = { 20 + .type = PERF_TYPE_HARDWARE, 21 + .config = PERF_COUNT_HW_CPU_CYCLES, 22 + .sample_type = PERF_SAMPLE_REGS_INTR, 23 + .sample_regs_intr = PERF_REG_EXTENDED_MASK, 24 + .precise_ip = 1, 25 + .disabled = 1, 26 + .exclude_kernel = 1, 27 + }; 28 + int fd; 29 + 30 + if (!intr) 31 + return PERF_REGS_MASK; 32 + 33 + /* 34 + * In an unnamed union, init it here to build on older gcc versions 35 + */ 36 + attr.sample_period = 1; 37 + 38 + if (perf_pmus__num_core_pmus() > 1) { 39 + struct perf_pmu *pmu = NULL; 40 + __u64 type = PERF_TYPE_RAW; 41 + 42 + /* 43 + * The same register set is supported among different hybrid PMUs. 44 + * Only check the first available one. 45 + */ 46 + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { 47 + type = pmu->type; 48 + break; 49 + } 50 + attr.config |= type << PERF_PMU_TYPE_SHIFT; 51 + } 52 + 53 + event_attr_init(&attr); 54 + fd = sys_perf_event_open(&attr, /*pid=*/0, /*cpu=*/-1, 55 + /*group_fd=*/-1, /*flags=*/0); 56 + if (fd != -1) { 57 + close(fd); 58 + return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK); 59 + } 60 + 61 + return PERF_REGS_MASK; 62 + } 5 63 6 64 const char *__perf_reg_name_x86(int id) 7 65 {
+80 -4
tools/perf/util/perf_regs.c
··· 13 13 return SDT_ARG_SKIP; 14 14 } 15 15 16 - uint64_t __weak arch__intr_reg_mask(void) 16 + uint64_t perf_intr_reg_mask(uint16_t e_machine) 17 17 { 18 - return 0; 18 + uint64_t mask = 0; 19 + 20 + switch (e_machine) { 21 + case EM_ARM: 22 + mask = __perf_reg_mask_arm(/*intr=*/true); 23 + break; 24 + case EM_AARCH64: 25 + mask = __perf_reg_mask_arm64(/*intr=*/true); 26 + break; 27 + case EM_CSKY: 28 + mask = __perf_reg_mask_csky(/*intr=*/true); 29 + break; 30 + case EM_LOONGARCH: 31 + mask = __perf_reg_mask_loongarch(/*intr=*/true); 32 + break; 33 + case EM_MIPS: 34 + mask = __perf_reg_mask_mips(/*intr=*/true); 35 + break; 36 + case EM_PPC: 37 + case EM_PPC64: 38 + mask = __perf_reg_mask_powerpc(/*intr=*/true); 39 + break; 40 + case EM_RISCV: 41 + mask = __perf_reg_mask_riscv(/*intr=*/true); 42 + break; 43 + case EM_S390: 44 + mask = __perf_reg_mask_s390(/*intr=*/true); 45 + break; 46 + case EM_386: 47 + case EM_X86_64: 48 + mask = __perf_reg_mask_x86(/*intr=*/true); 49 + break; 50 + default: 51 + pr_debug("Unknown ELF machine %d, interrupt sampling register mask will be empty.\n", 52 + e_machine); 53 + break; 54 + } 55 + 56 + return mask; 19 57 } 20 58 21 - uint64_t __weak arch__user_reg_mask(void) 59 + uint64_t perf_user_reg_mask(uint16_t e_machine) 22 60 { 23 - return 0; 61 + uint64_t mask = 0; 62 + 63 + switch (e_machine) { 64 + case EM_ARM: 65 + mask = __perf_reg_mask_arm(/*intr=*/false); 66 + break; 67 + case EM_AARCH64: 68 + mask = __perf_reg_mask_arm64(/*intr=*/false); 69 + break; 70 + case EM_CSKY: 71 + mask = __perf_reg_mask_csky(/*intr=*/false); 72 + break; 73 + case EM_LOONGARCH: 74 + mask = __perf_reg_mask_loongarch(/*intr=*/false); 75 + break; 76 + case EM_MIPS: 77 + mask = __perf_reg_mask_mips(/*intr=*/false); 78 + break; 79 + case EM_PPC: 80 + case EM_PPC64: 81 + mask = __perf_reg_mask_powerpc(/*intr=*/false); 82 + break; 83 + case EM_RISCV: 84 + mask = __perf_reg_mask_riscv(/*intr=*/false); 85 + break; 86 + case EM_S390: 87 + mask = __perf_reg_mask_s390(/*intr=*/false); 88 + break; 89 + case EM_386: 90 + case EM_X86_64: 91 + mask = __perf_reg_mask_x86(/*intr=*/false); 92 + break; 93 + default: 94 + pr_debug("Unknown ELF machine %d, user sampling register mask will be empty.\n", 95 + e_machine); 96 + break; 97 + } 98 + 99 + return mask; 24 100 } 25 101 26 102 const char *perf_reg_name(int id, uint16_t e_machine, uint32_t e_flags)
+20 -2
tools/perf/util/perf_regs.h
··· 13 13 }; 14 14 15 15 int arch_sdt_arg_parse_op(char *old_op, char **new_op); 16 - uint64_t arch__intr_reg_mask(void); 17 - uint64_t arch__user_reg_mask(void); 16 + uint64_t perf_intr_reg_mask(uint16_t e_machine); 17 + uint64_t perf_user_reg_mask(uint16_t e_machine); 18 18 19 19 const char *perf_reg_name(int id, uint16_t e_machine, uint32_t e_flags); 20 20 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); 21 21 uint64_t perf_arch_reg_ip(uint16_t e_machine); 22 22 uint64_t perf_arch_reg_sp(uint16_t e_machine); 23 + 24 + uint64_t __perf_reg_mask_arm64(bool intr); 23 25 const char *__perf_reg_name_arm64(int id); 24 26 uint64_t __perf_reg_ip_arm64(void); 25 27 uint64_t __perf_reg_sp_arm64(void); 28 + 29 + uint64_t __perf_reg_mask_arm(bool intr); 26 30 const char *__perf_reg_name_arm(int id); 27 31 uint64_t __perf_reg_ip_arm(void); 28 32 uint64_t __perf_reg_sp_arm(void); 33 + 34 + uint64_t __perf_reg_mask_csky(bool intr); 29 35 const char *__perf_reg_name_csky(int id, uint32_t e_flags); 30 36 uint64_t __perf_reg_ip_csky(void); 31 37 uint64_t __perf_reg_sp_csky(void); 38 + 39 + uint64_t __perf_reg_mask_loongarch(bool intr); 32 40 const char *__perf_reg_name_loongarch(int id); 33 41 uint64_t __perf_reg_ip_loongarch(void); 34 42 uint64_t __perf_reg_sp_loongarch(void); 43 + 44 + uint64_t __perf_reg_mask_mips(bool intr); 35 45 const char *__perf_reg_name_mips(int id); 36 46 uint64_t __perf_reg_ip_mips(void); 37 47 uint64_t __perf_reg_sp_mips(void); 48 + 49 + uint64_t __perf_reg_mask_powerpc(bool intr); 38 50 const char *__perf_reg_name_powerpc(int id); 39 51 uint64_t __perf_reg_ip_powerpc(void); 40 52 uint64_t __perf_reg_sp_powerpc(void); 53 + 54 + uint64_t __perf_reg_mask_riscv(bool intr); 41 55 const char *__perf_reg_name_riscv(int id); 42 56 uint64_t __perf_reg_ip_riscv(void); 43 57 uint64_t __perf_reg_sp_riscv(void); 58 + 59 + uint64_t __perf_reg_mask_s390(bool intr); 44 60 const char *__perf_reg_name_s390(int id); 45 61 uint64_t __perf_reg_ip_s390(void); 46 62 uint64_t __perf_reg_sp_s390(void); 63 + 64 + uint64_t __perf_reg_mask_x86(bool intr); 47 65 const char *__perf_reg_name_x86(int id); 48 66 uint64_t __perf_reg_ip_x86(void); 49 67 uint64_t __perf_reg_sp_x86(void);