Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
"ACPI:

- Improve error reporting when failing to manage SDEI on AGDI device
removal

Assembly routines:

- Improve register constraints so that the compiler can make use of
the zero register instead of moving an immediate #0 into a GPR

- Allow the compiler to allocate the registers used for CAS
instructions

CPU features and system registers:

- Cleanups to the way in which CPU features are identified from the
ID register fields

- Extend system register definition generation to handle Enum types
when defining shared register fields

- Generate definitions for new _EL2 registers and add new fields for
ID_AA64PFR1_EL1

- Allow SVE to be disabled separately from SME on the kernel
command-line

Tracing:

- Support for "direct calls" in ftrace, which enables BPF tracing for
arm64

Kdump:

- Don't bother unmapping the crashkernel from the linear mapping,
which then allows us to use huge (block) mappings and reduce TLB
pressure when a crashkernel is loaded.

Memory management:

- Try again to remove data cache invalidation from the coherent DMA
allocation path

- Simplify the fixmap code by mapping at page granularity

- Allow the kfence pool to be allocated early, preventing the rest of
the linear mapping from being forced to page granularity

Perf and PMU:

- Move CPU PMU code out to drivers/perf/ where it can be reused by
the 32-bit ARM architecture when running on ARMv8 CPUs

- Fix race between CPU PMU probing and pKVM host de-privilege

- Add support for Apple M2 CPU PMU

- Adjust the generic PERF_COUNT_HW_BRANCH_INSTRUCTIONS event
dynamically, depending on what the CPU actually supports

- Minor fixes and cleanups to system PMU drivers

Stack tracing:

- Use the XPACLRI instruction to strip PAC from pointers, rather than
rolling our own function in C

- Remove redundant PAC removal for toolchains that handle this in
their builtins

- Make backtracing more resilient in the face of instrumentation

Miscellaneous:

- Fix single-step with KGDB

- Remove harmless warning when 'nokaslr' is passed on the kernel
command-line

- Minor fixes and cleanups across the board"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (72 commits)
KVM: arm64: Ensure CPU PMU probes before pKVM host de-privilege
arm64: kexec: include reboot.h
arm64: delete dead code in this_cpu_set_vectors()
arm64/cpufeature: Use helper macro to specify ID register for capabilites
drivers/perf: hisi: add NULL check for name
drivers/perf: hisi: Remove redundant initialized of pmu->name
arm64/cpufeature: Consistently use symbolic constants for min_field_value
arm64/cpufeature: Pull out helper for CPUID register definitions
arm64/sysreg: Convert HFGITR_EL2 to automatic generation
ACPI: AGDI: Improve error reporting for problems during .remove()
arm64: kernel: Fix kernel warning when nokaslr is passed to commandline
perf/arm-cmn: Fix port detection for CMN-700
arm64: kgdb: Set PSTATE.SS to 1 to re-enable single-step
arm64: move PAC masks to <asm/pointer_auth.h>
arm64: use XPACLRI to strip PAC
arm64: avoid redundant PAC stripping in __builtin_return_address()
arm64/sme: Fix some comments of ARM SME
arm64/signal: Alloc tpidr2 sigframe after checking system_supports_tpidr2()
arm64/signal: Use system_supports_tpidr2() to check TPIDR2
arm64/idreg: Don't disable SME when disabling SVE
...

+1934 -1723
+2
Documentation/devicetree/bindings/arm/pmu.yaml
··· 20 20 items: 21 21 - enum: 22 22 - apm,potenza-pmu 23 + - apple,avalanche-pmu 24 + - apple,blizzard-pmu 23 25 - apple,firestorm-pmu 24 26 - apple,icestorm-pmu 25 27 - arm,armv8-pmuv3 # Only for s/w models
+247
arch/arm/include/asm/arm_pmuv3.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + */ 5 + 6 + #ifndef __ASM_PMUV3_H 7 + #define __ASM_PMUV3_H 8 + 9 + #include <asm/cp15.h> 10 + #include <asm/cputype.h> 11 + 12 + #define PMCCNTR __ACCESS_CP15_64(0, c9) 13 + 14 + #define PMCR __ACCESS_CP15(c9, 0, c12, 0) 15 + #define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1) 16 + #define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2) 17 + #define PMOVSR __ACCESS_CP15(c9, 0, c12, 3) 18 + #define PMSELR __ACCESS_CP15(c9, 0, c12, 5) 19 + #define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6) 20 + #define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7) 21 + #define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1) 22 + #define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2) 23 + #define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0) 24 + #define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1) 25 + #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) 26 + #define PMMIR __ACCESS_CP15(c9, 0, c14, 6) 27 + #define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7) 28 + 29 + #define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0) 30 + #define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1) 31 + #define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2) 32 + #define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3) 33 + #define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4) 34 + #define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5) 35 + #define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6) 36 + #define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7) 37 + #define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0) 38 + #define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1) 39 + #define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2) 40 + #define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3) 41 + #define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4) 42 + #define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5) 43 + #define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6) 44 + #define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7) 45 + #define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0) 46 + #define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1) 47 + #define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2) 48 + #define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3) 49 + #define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4) 50 + #define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5) 51 + #define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6) 52 + #define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7) 53 + #define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0) 54 + #define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1) 55 + #define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2) 56 + #define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3) 57 + #define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4) 58 + #define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5) 59 + #define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6) 60 + 61 + #define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0) 62 + #define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1) 63 + #define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2) 64 + #define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3) 65 + #define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4) 66 + #define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5) 67 + #define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6) 68 + #define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7) 69 + #define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0) 70 + #define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1) 71 + #define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2) 72 + #define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3) 73 + #define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4) 74 + #define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5) 75 + #define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6) 76 + #define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7) 77 + #define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0) 78 + #define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1) 79 + #define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2) 80 + #define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3) 81 + #define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4) 82 + #define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5) 83 + #define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6) 84 + #define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7) 85 + #define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0) 86 + #define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1) 87 + #define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2) 88 + #define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3) 89 + #define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4) 90 + #define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5) 91 + #define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6) 92 + 93 + #define RETURN_READ_PMEVCNTRN(n) \ 94 + return read_sysreg(PMEVCNTR##n) 95 + static unsigned long read_pmevcntrn(int n) 96 + { 97 + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); 98 + return 0; 99 + } 100 + 101 + #define WRITE_PMEVCNTRN(n) \ 102 + write_sysreg(val, PMEVCNTR##n) 103 + static void write_pmevcntrn(int n, unsigned long val) 104 + { 105 + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); 106 + } 107 + 108 + #define WRITE_PMEVTYPERN(n) \ 109 + write_sysreg(val, PMEVTYPER##n) 110 + static void write_pmevtypern(int n, unsigned long val) 111 + { 112 + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); 113 + } 114 + 115 + static inline unsigned long read_pmmir(void) 116 + { 117 + return read_sysreg(PMMIR); 118 + } 119 + 120 + static inline u32 read_pmuver(void) 121 + { 122 + /* PMUVers is not a signed field */ 123 + u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0); 124 + 125 + return (dfr0 >> 24) & 0xf; 126 + } 127 + 128 + static inline void write_pmcr(u32 val) 129 + { 130 + write_sysreg(val, PMCR); 131 + } 132 + 133 + static inline u32 read_pmcr(void) 134 + { 135 + return read_sysreg(PMCR); 136 + } 137 + 138 + static inline void write_pmselr(u32 val) 139 + { 140 + write_sysreg(val, PMSELR); 141 + } 142 + 143 + static inline void write_pmccntr(u64 val) 144 + { 145 + write_sysreg(val, PMCCNTR); 146 + } 147 + 148 + static inline u64 read_pmccntr(void) 149 + { 150 + return read_sysreg(PMCCNTR); 151 + } 152 + 153 + static inline void write_pmxevcntr(u32 val) 154 + { 155 + write_sysreg(val, PMXEVCNTR); 156 + } 157 + 158 + static inline u32 read_pmxevcntr(void) 159 + { 160 + return read_sysreg(PMXEVCNTR); 161 + } 162 + 163 + static inline void write_pmxevtyper(u32 val) 164 + { 165 + write_sysreg(val, PMXEVTYPER); 166 + } 167 + 168 + static inline void write_pmcntenset(u32 val) 169 + { 170 + write_sysreg(val, PMCNTENSET); 171 + } 172 + 173 + static inline void write_pmcntenclr(u32 val) 174 + { 175 + write_sysreg(val, PMCNTENCLR); 176 + } 177 + 178 + static inline void write_pmintenset(u32 val) 179 + { 180 + write_sysreg(val, PMINTENSET); 181 + } 182 + 183 + static inline void write_pmintenclr(u32 val) 184 + { 185 + write_sysreg(val, PMINTENCLR); 186 + } 187 + 188 + static inline void write_pmccfiltr(u32 val) 189 + { 190 + write_sysreg(val, PMCCFILTR); 191 + } 192 + 193 + static inline void write_pmovsclr(u32 val) 194 + { 195 + write_sysreg(val, PMOVSR); 196 + } 197 + 198 + static inline u32 read_pmovsclr(void) 199 + { 200 + return read_sysreg(PMOVSR); 201 + } 202 + 203 + static inline void write_pmuserenr(u32 val) 204 + { 205 + write_sysreg(val, PMUSERENR); 206 + } 207 + 208 + static inline u32 read_pmceid0(void) 209 + { 210 + return read_sysreg(PMCEID0); 211 + } 212 + 213 + static inline u32 read_pmceid1(void) 214 + { 215 + return read_sysreg(PMCEID1); 216 + } 217 + 218 + static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} 219 + static inline void kvm_clr_pmu_events(u32 clr) {} 220 + static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) 221 + { 222 + return false; 223 + } 224 + 225 + /* PMU Version in DFR Register */ 226 + #define ARMV8_PMU_DFR_VER_NI 0 227 + #define ARMV8_PMU_DFR_VER_V3P4 0x5 228 + #define ARMV8_PMU_DFR_VER_V3P5 0x6 229 + #define ARMV8_PMU_DFR_VER_IMP_DEF 0xF 230 + 231 + static inline bool pmuv3_implemented(int pmuver) 232 + { 233 + return !(pmuver == ARMV8_PMU_DFR_VER_IMP_DEF || 234 + pmuver == ARMV8_PMU_DFR_VER_NI); 235 + } 236 + 237 + static inline bool is_pmuv3p4(int pmuver) 238 + { 239 + return pmuver >= ARMV8_PMU_DFR_VER_V3P4; 240 + } 241 + 242 + static inline bool is_pmuv3p5(int pmuver) 243 + { 244 + return pmuver >= ARMV8_PMU_DFR_VER_V3P5; 245 + } 246 + 247 + #endif
+1 -1
arch/arm/mm/Kconfig
··· 403 403 select CPU_THUMB_CAPABLE 404 404 select CPU_TLB_V6 if MMU 405 405 406 - # ARMv7 406 + # ARMv7 and ARMv8 architectures 407 407 config CPU_V7 408 408 bool 409 409 select CPU_32v6K
+18
arch/arm64/Kconfig
··· 186 186 select HAVE_DEBUG_KMEMLEAK 187 187 select HAVE_DMA_CONTIGUOUS 188 188 select HAVE_DYNAMIC_FTRACE 189 + select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ 190 + if $(cc-option,-fpatchable-function-entry=2) 191 + select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \ 192 + if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS 189 193 select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ 190 194 if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \ 191 195 !CC_OPTIMIZE_FOR_SIZE) ··· 366 362 367 363 config BROKEN_GAS_INST 368 364 def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n) 365 + 366 + config BUILTIN_RETURN_ADDRESS_STRIPS_PAC 367 + bool 368 + # Clang's __builtin_return_adddress() strips the PAC since 12.0.0 369 + # https://reviews.llvm.org/D75044 370 + default y if CC_IS_CLANG && (CLANG_VERSION >= 120000) 371 + # GCC's __builtin_return_address() strips the PAC since 11.1.0, 372 + # and this was backported to 10.2.0, 9.4.0, 8.5.0, but not earlier 373 + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94891 374 + default y if CC_IS_GCC && (GCC_VERSION >= 110100) 375 + default y if CC_IS_GCC && (GCC_VERSION >= 100200) && (GCC_VERSION < 110000) 376 + default y if CC_IS_GCC && (GCC_VERSION >= 90400) && (GCC_VERSION < 100000) 377 + default y if CC_IS_GCC && (GCC_VERSION >= 80500) && (GCC_VERSION < 90000) 378 + default n 369 379 370 380 config KASAN_SHADOW_OFFSET 371 381 hex
+155
arch/arm64/include/asm/arm_pmuv3.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + */ 5 + 6 + #ifndef __ASM_PMUV3_H 7 + #define __ASM_PMUV3_H 8 + 9 + #include <linux/kvm_host.h> 10 + 11 + #include <asm/cpufeature.h> 12 + #include <asm/sysreg.h> 13 + 14 + #define RETURN_READ_PMEVCNTRN(n) \ 15 + return read_sysreg(pmevcntr##n##_el0) 16 + static unsigned long read_pmevcntrn(int n) 17 + { 18 + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); 19 + return 0; 20 + } 21 + 22 + #define WRITE_PMEVCNTRN(n) \ 23 + write_sysreg(val, pmevcntr##n##_el0) 24 + static void write_pmevcntrn(int n, unsigned long val) 25 + { 26 + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); 27 + } 28 + 29 + #define WRITE_PMEVTYPERN(n) \ 30 + write_sysreg(val, pmevtyper##n##_el0) 31 + static void write_pmevtypern(int n, unsigned long val) 32 + { 33 + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); 34 + } 35 + 36 + static inline unsigned long read_pmmir(void) 37 + { 38 + return read_cpuid(PMMIR_EL1); 39 + } 40 + 41 + static inline u32 read_pmuver(void) 42 + { 43 + u64 dfr0 = read_sysreg(id_aa64dfr0_el1); 44 + 45 + return cpuid_feature_extract_unsigned_field(dfr0, 46 + ID_AA64DFR0_EL1_PMUVer_SHIFT); 47 + } 48 + 49 + static inline void write_pmcr(u32 val) 50 + { 51 + write_sysreg(val, pmcr_el0); 52 + } 53 + 54 + static inline u32 read_pmcr(void) 55 + { 56 + return read_sysreg(pmcr_el0); 57 + } 58 + 59 + static inline void write_pmselr(u32 val) 60 + { 61 + write_sysreg(val, pmselr_el0); 62 + } 63 + 64 + static inline void write_pmccntr(u64 val) 65 + { 66 + write_sysreg(val, pmccntr_el0); 67 + } 68 + 69 + static inline u64 read_pmccntr(void) 70 + { 71 + return read_sysreg(pmccntr_el0); 72 + } 73 + 74 + static inline void write_pmxevcntr(u32 val) 75 + { 76 + write_sysreg(val, pmxevcntr_el0); 77 + } 78 + 79 + static inline u32 read_pmxevcntr(void) 80 + { 81 + return read_sysreg(pmxevcntr_el0); 82 + } 83 + 84 + static inline void write_pmxevtyper(u32 val) 85 + { 86 + write_sysreg(val, pmxevtyper_el0); 87 + } 88 + 89 + static inline void write_pmcntenset(u32 val) 90 + { 91 + write_sysreg(val, pmcntenset_el0); 92 + } 93 + 94 + static inline void write_pmcntenclr(u32 val) 95 + { 96 + write_sysreg(val, pmcntenclr_el0); 97 + } 98 + 99 + static inline void write_pmintenset(u32 val) 100 + { 101 + write_sysreg(val, pmintenset_el1); 102 + } 103 + 104 + static inline void write_pmintenclr(u32 val) 105 + { 106 + write_sysreg(val, pmintenclr_el1); 107 + } 108 + 109 + static inline void write_pmccfiltr(u32 val) 110 + { 111 + write_sysreg(val, pmccfiltr_el0); 112 + } 113 + 114 + static inline void write_pmovsclr(u32 val) 115 + { 116 + write_sysreg(val, pmovsclr_el0); 117 + } 118 + 119 + static inline u32 read_pmovsclr(void) 120 + { 121 + return read_sysreg(pmovsclr_el0); 122 + } 123 + 124 + static inline void write_pmuserenr(u32 val) 125 + { 126 + write_sysreg(val, pmuserenr_el0); 127 + } 128 + 129 + static inline u32 read_pmceid0(void) 130 + { 131 + return read_sysreg(pmceid0_el0); 132 + } 133 + 134 + static inline u32 read_pmceid1(void) 135 + { 136 + return read_sysreg(pmceid1_el0); 137 + } 138 + 139 + static inline bool pmuv3_implemented(int pmuver) 140 + { 141 + return !(pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || 142 + pmuver == ID_AA64DFR0_EL1_PMUVer_NI); 143 + } 144 + 145 + static inline bool is_pmuv3p4(int pmuver) 146 + { 147 + return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4; 148 + } 149 + 150 + static inline bool is_pmuv3p5(int pmuver) 151 + { 152 + return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; 153 + } 154 + 155 + #endif
+5 -12
arch/arm64/include/asm/atomic_lse.h
··· 251 251 u##sz old, \ 252 252 u##sz new) \ 253 253 { \ 254 - register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ 255 - register u##sz x1 asm ("x1") = old; \ 256 - register u##sz x2 asm ("x2") = new; \ 257 - unsigned long tmp; \ 258 - \ 259 254 asm volatile( \ 260 255 __LSE_PREAMBLE \ 261 - " mov %" #w "[tmp], %" #w "[old]\n" \ 262 - " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ 263 - " mov %" #w "[ret], %" #w "[tmp]" \ 264 - : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \ 265 - [tmp] "=&r" (tmp) \ 266 - : [old] "r" (x1), [new] "r" (x2) \ 256 + " cas" #mb #sfx " %" #w "[old], %" #w "[new], %[v]\n" \ 257 + : [v] "+Q" (*(u##sz *)ptr), \ 258 + [old] "+r" (old) \ 259 + : [new] "rZ" (new) \ 267 260 : cl); \ 268 261 \ 269 - return x0; \ 262 + return old; \ 270 263 } 271 264 272 265 __CMPXCHG_CASE(w, b, , 8, )
+5 -5
arch/arm64/include/asm/barrier.h
··· 131 131 case 1: \ 132 132 asm volatile ("stlrb %w1, %0" \ 133 133 : "=Q" (*__p) \ 134 - : "r" (*(__u8 *)__u.__c) \ 134 + : "rZ" (*(__u8 *)__u.__c) \ 135 135 : "memory"); \ 136 136 break; \ 137 137 case 2: \ 138 138 asm volatile ("stlrh %w1, %0" \ 139 139 : "=Q" (*__p) \ 140 - : "r" (*(__u16 *)__u.__c) \ 140 + : "rZ" (*(__u16 *)__u.__c) \ 141 141 : "memory"); \ 142 142 break; \ 143 143 case 4: \ 144 144 asm volatile ("stlr %w1, %0" \ 145 145 : "=Q" (*__p) \ 146 - : "r" (*(__u32 *)__u.__c) \ 146 + : "rZ" (*(__u32 *)__u.__c) \ 147 147 : "memory"); \ 148 148 break; \ 149 149 case 8: \ 150 - asm volatile ("stlr %1, %0" \ 150 + asm volatile ("stlr %x1, %0" \ 151 151 : "=Q" (*__p) \ 152 - : "r" (*(__u64 *)__u.__c) \ 152 + : "rZ" (*(__u64 *)__u.__c) \ 153 153 : "memory"); \ 154 154 break; \ 155 155 } \
-4
arch/arm64/include/asm/compat.h
··· 83 83 int f_spare[4]; 84 84 }; 85 85 86 - #define COMPAT_RLIM_INFINITY 0xffffffff 87 - 88 - #define COMPAT_OFF_T_MAX 0x7fffffff 89 - 90 86 #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) 91 87 #define COMPAT_MINSIGSTKSZ 2048 92 88
+25 -11
arch/arm64/include/asm/compiler.h
··· 8 8 #define ARM64_ASM_PREAMBLE 9 9 #endif 10 10 11 - /* 12 - * The EL0/EL1 pointer bits used by a pointer authentication code. 13 - * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. 14 - */ 15 - #define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) 16 - #define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) 11 + #define xpaclri(ptr) \ 12 + ({ \ 13 + register unsigned long __xpaclri_ptr asm("x30") = (ptr); \ 14 + \ 15 + asm( \ 16 + ARM64_ASM_PREAMBLE \ 17 + " hint #7\n" \ 18 + : "+r" (__xpaclri_ptr)); \ 19 + \ 20 + __xpaclri_ptr; \ 21 + }) 17 22 18 - /* Valid for EL0 TTBR0 and EL1 TTBR1 instruction pointers */ 19 - #define ptrauth_clear_pac(ptr) \ 20 - ((ptr & BIT_ULL(55)) ? (ptr | ptrauth_kernel_pac_mask()) : \ 21 - (ptr & ~ptrauth_user_pac_mask())) 23 + #ifdef CONFIG_ARM64_PTR_AUTH_KERNEL 24 + #define ptrauth_strip_kernel_insn_pac(ptr) xpaclri(ptr) 25 + #else 26 + #define ptrauth_strip_kernel_insn_pac(ptr) (ptr) 27 + #endif 22 28 29 + #ifdef CONFIG_ARM64_PTR_AUTH 30 + #define ptrauth_strip_user_insn_pac(ptr) xpaclri(ptr) 31 + #else 32 + #define ptrauth_strip_user_insn_pac(ptr) (ptr) 33 + #endif 34 + 35 + #if !defined(CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC) 23 36 #define __builtin_return_address(val) \ 24 - (void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val))) 37 + (void *)(ptrauth_strip_kernel_insn_pac((unsigned long)__builtin_return_address(val))) 38 + #endif 25 39 26 40 #endif /* __ASM_COMPILER_H */
+1
arch/arm64/include/asm/debug-monitors.h
··· 104 104 void kernel_enable_single_step(struct pt_regs *regs); 105 105 void kernel_disable_single_step(void); 106 106 int kernel_active_single_step(void); 107 + void kernel_rewind_single_step(struct pt_regs *regs); 107 108 108 109 #ifdef CONFIG_HAVE_HW_BREAKPOINT 109 110 int reinstall_suspended_bps(struct pt_regs *regs);
+11 -11
arch/arm64/include/asm/fixmap.h
··· 17 17 18 18 #ifndef __ASSEMBLY__ 19 19 #include <linux/kernel.h> 20 + #include <linux/math.h> 20 21 #include <linux/sizes.h> 21 22 #include <asm/boot.h> 22 23 #include <asm/page.h> ··· 37 36 FIX_HOLE, 38 37 39 38 /* 40 - * Reserve a virtual window for the FDT that is 2 MB larger than the 41 - * maximum supported size, and put it at the top of the fixmap region. 42 - * The additional space ensures that any FDT that does not exceed 43 - * MAX_FDT_SIZE can be mapped regardless of whether it crosses any 44 - * 2 MB alignment boundaries. 45 - * 46 - * Keep this at the top so it remains 2 MB aligned. 39 + * Reserve a virtual window for the FDT that is a page bigger than the 40 + * maximum supported size. The additional space ensures that any FDT 41 + * that does not exceed MAX_FDT_SIZE can be mapped regardless of 42 + * whether it crosses any page boundary. 47 43 */ 48 - #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) 49 44 FIX_FDT_END, 50 - FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, 45 + FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1, 51 46 52 47 FIX_EARLYCON_MEM_BASE, 53 48 FIX_TEXT_POKE0, ··· 92 95 __end_of_fixed_addresses 93 96 }; 94 97 95 - #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 96 - #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 98 + #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 99 + #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 100 + #define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 101 + #define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE) 97 102 98 103 #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) 99 104 100 105 void __init early_fixmap_init(void); 106 + void __init fixmap_copy(pgd_t *pgdir); 101 107 102 108 #define __early_set_fixmap __set_fixmap 103 109
+22
arch/arm64/include/asm/ftrace.h
··· 70 70 71 71 #define arch_ftrace_get_regs(regs) NULL 72 72 73 + /* 74 + * Note: sizeof(struct ftrace_regs) must be a multiple of 16 to ensure correct 75 + * stack alignment 76 + */ 73 77 struct ftrace_regs { 74 78 /* x0 - x8 */ 75 79 unsigned long regs[9]; 80 + 81 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 82 + unsigned long direct_tramp; 83 + #else 76 84 unsigned long __unused; 85 + #endif 77 86 78 87 unsigned long fp; 79 88 unsigned long lr; ··· 145 136 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 146 137 struct ftrace_ops *op, struct ftrace_regs *fregs); 147 138 #define ftrace_graph_func ftrace_graph_func 139 + 140 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 141 + static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, 142 + unsigned long addr) 143 + { 144 + /* 145 + * The ftrace trampoline will return to this address instead of the 146 + * instrumented function. 147 + */ 148 + fregs->direct_tramp = addr; 149 + } 150 + #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 151 + 148 152 #endif 149 153 150 154 #define ftrace_return_address(n) return_address(n)
+4 -1
arch/arm64/include/asm/kernel-pgtable.h
··· 59 59 #define EARLY_KASLR (0) 60 60 #endif 61 61 62 + #define SPAN_NR_ENTRIES(vstart, vend, shift) \ 63 + ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1) 64 + 62 65 #define EARLY_ENTRIES(vstart, vend, shift, add) \ 63 - ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add) 66 + (SPAN_NR_ENTRIES(vstart, vend, shift) + (add)) 64 67 65 68 #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add)) 66 69
-6
arch/arm64/include/asm/kexec.h
··· 102 102 103 103 int machine_kexec_post_load(struct kimage *image); 104 104 #define machine_kexec_post_load machine_kexec_post_load 105 - 106 - void arch_kexec_protect_crashkres(void); 107 - #define arch_kexec_protect_crashkres arch_kexec_protect_crashkres 108 - 109 - void arch_kexec_unprotect_crashkres(void); 110 - #define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres 111 105 #endif 112 106 113 107 #define ARCH_HAS_KIMAGE_ARCH
+10
arch/arm64/include/asm/kfence.h
··· 19 19 return true; 20 20 } 21 21 22 + #ifdef CONFIG_KFENCE 23 + extern bool kfence_early_init; 24 + static inline bool arm64_kfence_can_set_direct_map(void) 25 + { 26 + return !kfence_early_init; 27 + } 28 + #else /* CONFIG_KFENCE */ 29 + static inline bool arm64_kfence_can_set_direct_map(void) { return false; } 30 + #endif /* CONFIG_KFENCE */ 31 + 22 32 #endif /* __ASM_KFENCE_H */
-5
arch/arm64/include/asm/memory.h
··· 374 374 }) 375 375 376 376 void dump_mem_limit(void); 377 - 378 - static inline bool defer_reserve_crashkernel(void) 379 - { 380 - return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32); 381 - } 382 377 #endif /* !ASSEMBLY */ 383 378 384 379 /*
+2
arch/arm64/include/asm/mmu.h
··· 65 65 extern void bootmem_init(void); 66 66 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); 67 67 extern void init_mem_pgprot(void); 68 + extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 69 + phys_addr_t size, pgprot_t prot); 68 70 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 69 71 unsigned long virt, phys_addr_t size, 70 72 pgprot_t prot, bool page_mappings_only);
-249
arch/arm64/include/asm/perf_event.h
··· 9 9 #include <asm/stack_pointer.h> 10 10 #include <asm/ptrace.h> 11 11 12 - #define ARMV8_PMU_MAX_COUNTERS 32 13 - #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) 14 - 15 - /* 16 - * Common architectural and microarchitectural event numbers. 17 - */ 18 - #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000 19 - #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001 20 - #define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002 21 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003 22 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004 23 - #define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005 24 - #define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006 25 - #define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007 26 - #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008 27 - #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009 28 - #define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A 29 - #define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B 30 - #define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C 31 - #define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D 32 - #define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E 33 - #define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F 34 - #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010 35 - #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011 36 - #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012 37 - #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013 38 - #define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014 39 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015 40 - #define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016 41 - #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017 42 - #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018 43 - #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019 44 - #define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A 45 - #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B 46 - #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C 47 - #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D 48 - #define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E 49 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F 50 - #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020 51 - #define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021 52 - #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022 53 - #define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023 54 - #define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024 55 - #define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025 56 - #define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026 57 - #define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027 58 - #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028 59 - #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029 60 - #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A 61 - #define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B 62 - #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C 63 - #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D 64 - #define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E 65 - #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F 66 - #define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030 67 - #define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031 68 - #define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032 69 - #define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033 70 - #define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034 71 - #define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035 72 - #define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036 73 - #define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037 74 - #define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038 75 - #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039 76 - #define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A 77 - #define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B 78 - #define ARMV8_PMUV3_PERFCTR_STALL 0x003C 79 - #define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D 80 - #define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E 81 - #define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F 82 - 83 - /* Statistical profiling extension microarchitectural events */ 84 - #define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 85 - #define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001 86 - #define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 87 - #define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 88 - 89 - /* AMUv1 architecture events */ 90 - #define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004 91 - #define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005 92 - 93 - /* long-latency read miss events */ 94 - #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006 95 - #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009 96 - #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A 97 - #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B 98 - 99 - /* Trace buffer events */ 100 - #define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C 101 - #define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E 102 - 103 - /* Trace unit events */ 104 - #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010 105 - #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011 106 - #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012 107 - #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013 108 - #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018 109 - #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019 110 - #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A 111 - #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B 112 - 113 - /* additional latency from alignment events */ 114 - #define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020 115 - #define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021 116 - #define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022 117 - 118 - /* Armv8.5 Memory Tagging Extension events */ 119 - #define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024 120 - #define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025 121 - #define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026 122 - 123 - /* ARMv8 recommended implementation defined event types */ 124 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040 125 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041 126 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042 127 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043 128 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044 129 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045 130 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046 131 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047 132 - #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048 133 - 134 - #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C 135 - #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D 136 - #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E 137 - #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F 138 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050 139 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051 140 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052 141 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053 142 - 143 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056 144 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057 145 - #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058 146 - 147 - #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C 148 - #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D 149 - #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E 150 - #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F 151 - #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060 152 - #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061 153 - #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062 154 - #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063 155 - #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064 156 - #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065 157 - #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066 158 - #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067 159 - #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068 160 - #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069 161 - #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A 162 - 163 - #define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C 164 - #define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D 165 - #define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E 166 - #define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F 167 - #define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070 168 - #define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071 169 - #define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072 170 - #define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073 171 - #define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074 172 - #define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075 173 - #define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076 174 - #define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077 175 - #define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078 176 - #define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079 177 - #define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A 178 - 179 - #define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C 180 - #define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D 181 - #define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E 182 - 183 - #define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081 184 - #define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082 185 - #define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083 186 - #define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084 187 - 188 - #define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086 189 - #define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087 190 - #define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088 191 - 192 - #define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A 193 - #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B 194 - #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C 195 - #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D 196 - #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E 197 - #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F 198 - #define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090 199 - #define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091 200 - 201 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0 202 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1 203 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2 204 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3 205 - 206 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6 207 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7 208 - #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8 209 - 210 - /* 211 - * Per-CPU PMCR: config reg 212 - */ 213 - #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ 214 - #define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ 215 - #define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ 216 - #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ 217 - #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ 218 - #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ 219 - #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ 220 - #define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ 221 - #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ 222 - #define ARMV8_PMU_PMCR_N_MASK 0x1f 223 - #define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ 224 - 225 - /* 226 - * PMOVSR: counters overflow flag status reg 227 - */ 228 - #define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ 229 - #define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK 230 - 231 - /* 232 - * PMXEVTYPER: Event selection reg 233 - */ 234 - #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 235 - #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 236 - 237 - /* 238 - * Event filters for PMUv3 239 - */ 240 - #define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) 241 - #define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) 242 - #define ARMV8_PMU_INCLUDE_EL2 (1U << 27) 243 - 244 - /* 245 - * PMUSERENR: user enable reg 246 - */ 247 - #define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ 248 - #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ 249 - #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ 250 - #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ 251 - #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ 252 - 253 - /* PMMIR_EL1.SLOTS mask */ 254 - #define ARMV8_PMU_SLOTS_MASK 0xff 255 - 256 - #define ARMV8_PMU_BUS_SLOTS_SHIFT 8 257 - #define ARMV8_PMU_BUS_SLOTS_MASK 0xff 258 - #define ARMV8_PMU_BUS_WIDTH_SHIFT 16 259 - #define ARMV8_PMU_BUS_WIDTH_MASK 0xf 260 - 261 12 #ifdef CONFIG_PERF_EVENTS 262 13 struct pt_regs; 263 14 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+7 -6
arch/arm64/include/asm/pointer_auth.h
··· 10 10 #include <asm/memory.h> 11 11 #include <asm/sysreg.h> 12 12 13 + /* 14 + * The EL0/EL1 pointer bits used by a pointer authentication code. 15 + * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. 16 + */ 17 + #define ptrauth_user_pac_mask() GENMASK_ULL(54, vabits_actual) 18 + #define ptrauth_kernel_pac_mask() GENMASK_ULL(63, vabits_actual) 19 + 13 20 #define PR_PAC_ENABLED_KEYS_MASK \ 14 21 (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) 15 22 ··· 104 97 unsigned long enabled); 105 98 extern int ptrauth_get_enabled_keys(struct task_struct *tsk); 106 99 107 - static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) 108 - { 109 - return ptrauth_clear_pac(ptr); 110 - } 111 - 112 100 static __always_inline void ptrauth_enable(void) 113 101 { 114 102 if (!system_supports_address_auth()) ··· 135 133 #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL) 136 134 #define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL) 137 135 #define ptrauth_get_enabled_keys(tsk) (-EINVAL) 138 - #define ptrauth_strip_insn_pac(lr) (lr) 139 136 #define ptrauth_suspend_exit() 140 137 #define ptrauth_thread_init_user() 141 138 #define ptrauth_thread_switch_user(tsk)
-9
arch/arm64/include/asm/sysreg.h
··· 419 419 #define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) 420 420 #define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) 421 421 #define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3) 422 - #define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4) 423 - #define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) 424 - #define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) 425 422 #define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7) 426 423 427 424 #define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0) ··· 754 757 #define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) 755 758 #define ICH_VTR_TDS_SHIFT 19 756 759 #define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT) 757 - 758 - /* HFG[WR]TR_EL2 bit definitions */ 759 - #define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55 760 - #define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT) 761 - #define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54 762 - #define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT) 763 760 764 761 #define ARM64_FEATURE_FIELD_BITS 4 765 762
+2 -4
arch/arm64/include/asm/uaccess.h
··· 237 237 "1: " load " " reg "1, [%2]\n" \ 238 238 "2:\n" \ 239 239 _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ 240 - : "+r" (err), "=&r" (x) \ 240 + : "+r" (err), "=r" (x) \ 241 241 : "r" (addr)) 242 242 243 243 #define __raw_get_mem(ldr, x, ptr, err, type) \ ··· 327 327 "2:\n" \ 328 328 _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \ 329 329 : "+r" (err) \ 330 - : "r" (x), "r" (addr)) 330 + : "rZ" (x), "r" (addr)) 331 331 332 332 #define __raw_put_mem(str, x, ptr, err, type) \ 333 333 do { \ ··· 449 449 extern __must_check long strnlen_user(const char __user *str, long n); 450 450 451 451 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 452 - struct page; 453 - void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); 454 452 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 455 453 456 454 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
-1
arch/arm64/kernel/Makefile
··· 45 45 obj-$(CONFIG_MODULES) += module.o 46 46 obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o 47 47 obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 48 - obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 49 48 obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 50 49 obj-$(CONFIG_CPU_PM) += sleep.o suspend.o 51 50 obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+2 -2
arch/arm64/kernel/armv8_deprecated.c
··· 420 420 421 421 static void enable_insn_hw_mode(void *data) 422 422 { 423 - struct insn_emulation *insn = (struct insn_emulation *)data; 423 + struct insn_emulation *insn = data; 424 424 if (insn->set_hw_mode) 425 425 insn->set_hw_mode(true); 426 426 } 427 427 428 428 static void disable_insn_hw_mode(void *data) 429 429 { 430 - struct insn_emulation *insn = (struct insn_emulation *)data; 430 + struct insn_emulation *insn = data; 431 431 if (insn->set_hw_mode) 432 432 insn->set_hw_mode(false); 433 433 }
+6
arch/arm64/kernel/asm-offsets.c
··· 93 93 DEFINE(FREGS_LR, offsetof(struct ftrace_regs, lr)); 94 94 DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp)); 95 95 DEFINE(FREGS_PC, offsetof(struct ftrace_regs, pc)); 96 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 97 + DEFINE(FREGS_DIRECT_TRAMP, offsetof(struct ftrace_regs, direct_tramp)); 98 + #endif 96 99 DEFINE(FREGS_SIZE, sizeof(struct ftrace_regs)); 97 100 BLANK(); 98 101 #endif ··· 200 197 #endif 201 198 #ifdef CONFIG_FUNCTION_TRACER 202 199 DEFINE(FTRACE_OPS_FUNC, offsetof(struct ftrace_ops, func)); 200 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 201 + DEFINE(FTRACE_OPS_DIRECT_CALL, offsetof(struct ftrace_ops, direct_call)); 202 + #endif 203 203 #endif 204 204 return 0; 205 205 }
+59 -213
arch/arm64/kernel/cpufeature.c
··· 140 140 pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps); 141 141 } 142 142 143 + #define ARM64_CPUID_FIELDS(reg, field, min_value) \ 144 + .sys_reg = SYS_##reg, \ 145 + .field_pos = reg##_##field##_SHIFT, \ 146 + .field_width = reg##_##field##_WIDTH, \ 147 + .sign = reg##_##field##_SIGNED, \ 148 + .min_field_value = reg##_##field##_##min_value, 149 + 143 150 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 144 151 { \ 145 152 .sign = SIGNED, \ ··· 2213 2206 .capability = ARM64_HAS_GIC_CPUIF_SYSREGS, 2214 2207 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 2215 2208 .matches = has_useable_gicv3_cpuif, 2216 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2217 - .field_pos = ID_AA64PFR0_EL1_GIC_SHIFT, 2218 - .field_width = 4, 2219 - .sign = FTR_UNSIGNED, 2220 - .min_field_value = 1, 2209 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, GIC, IMP) 2221 2210 }, 2222 2211 { 2223 2212 .desc = "Enhanced Counter Virtualization", 2224 2213 .capability = ARM64_HAS_ECV, 2225 2214 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2226 2215 .matches = has_cpuid_feature, 2227 - .sys_reg = SYS_ID_AA64MMFR0_EL1, 2228 - .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT, 2229 - .field_width = 4, 2230 - .sign = FTR_UNSIGNED, 2231 - .min_field_value = 1, 2216 + ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, ECV, IMP) 2232 2217 }, 2233 2218 #ifdef CONFIG_ARM64_PAN 2234 2219 { ··· 2228 2229 .capability = ARM64_HAS_PAN, 2229 2230 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2230 2231 .matches = has_cpuid_feature, 2231 - .sys_reg = SYS_ID_AA64MMFR1_EL1, 2232 - .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT, 2233 - .field_width = 4, 2234 - .sign = FTR_UNSIGNED, 2235 - .min_field_value = 1, 2236 2232 .cpu_enable = cpu_enable_pan, 2233 + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, IMP) 2237 2234 }, 2238 2235 #endif /* CONFIG_ARM64_PAN */ 2239 2236 #ifdef CONFIG_ARM64_EPAN ··· 2238 2243 .capability = ARM64_HAS_EPAN, 2239 2244 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2240 2245 .matches = has_cpuid_feature, 2241 - .sys_reg = SYS_ID_AA64MMFR1_EL1, 2242 - .field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT, 2243 - .field_width = 4, 2244 - .sign = FTR_UNSIGNED, 2245 - .min_field_value = 3, 2246 + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, PAN, PAN3) 2246 2247 }, 2247 2248 #endif /* CONFIG_ARM64_EPAN */ 2248 2249 #ifdef CONFIG_ARM64_LSE_ATOMICS ··· 2247 2256 .capability = ARM64_HAS_LSE_ATOMICS, 2248 2257 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2249 2258 .matches = has_cpuid_feature, 2250 - .sys_reg = SYS_ID_AA64ISAR0_EL1, 2251 - .field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 2252 - .field_width = 4, 2253 - .sign = FTR_UNSIGNED, 2254 - .min_field_value = 2, 2259 + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, ATOMIC, IMP) 2255 2260 }, 2256 2261 #endif /* CONFIG_ARM64_LSE_ATOMICS */ 2257 2262 { ··· 2268 2281 .capability = ARM64_HAS_NESTED_VIRT, 2269 2282 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2270 2283 .matches = has_nested_virt_support, 2271 - .sys_reg = SYS_ID_AA64MMFR2_EL1, 2272 - .sign = FTR_UNSIGNED, 2273 - .field_pos = ID_AA64MMFR2_EL1_NV_SHIFT, 2274 - .field_width = 4, 2275 - .min_field_value = ID_AA64MMFR2_EL1_NV_IMP, 2284 + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, IMP) 2276 2285 }, 2277 2286 { 2278 2287 .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE, 2279 2288 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2280 2289 .matches = has_32bit_el0, 2281 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2282 - .sign = FTR_UNSIGNED, 2283 - .field_pos = ID_AA64PFR0_EL1_EL0_SHIFT, 2284 - .field_width = 4, 2285 - .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT, 2290 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL0, AARCH32) 2286 2291 }, 2287 2292 #ifdef CONFIG_KVM 2288 2293 { ··· 2282 2303 .capability = ARM64_HAS_32BIT_EL1, 2283 2304 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2284 2305 .matches = has_cpuid_feature, 2285 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2286 - .sign = FTR_UNSIGNED, 2287 - .field_pos = ID_AA64PFR0_EL1_EL1_SHIFT, 2288 - .field_width = 4, 2289 - .min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT, 2306 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, EL1, AARCH32) 2290 2307 }, 2291 2308 { 2292 2309 .desc = "Protected KVM", ··· 2295 2320 .desc = "Kernel page table isolation (KPTI)", 2296 2321 .capability = ARM64_UNMAP_KERNEL_AT_EL0, 2297 2322 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, 2323 + .cpu_enable = kpti_install_ng_mappings, 2324 + .matches = unmap_kernel_at_el0, 2298 2325 /* 2299 2326 * The ID feature fields below are used to indicate that 2300 2327 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for 2301 2328 * more details. 2302 2329 */ 2303 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2304 - .field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT, 2305 - .field_width = 4, 2306 - .min_field_value = 1, 2307 - .matches = unmap_kernel_at_el0, 2308 - .cpu_enable = kpti_install_ng_mappings, 2330 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP) 2309 2331 }, 2310 2332 { 2311 2333 /* FP/SIMD is not implemented */ ··· 2317 2345 .capability = ARM64_HAS_DCPOP, 2318 2346 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2319 2347 .matches = has_cpuid_feature, 2320 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2321 - .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT, 2322 - .field_width = 4, 2323 - .min_field_value = 1, 2348 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, IMP) 2324 2349 }, 2325 2350 { 2326 2351 .desc = "Data cache clean to Point of Deep Persistence", 2327 2352 .capability = ARM64_HAS_DCPODP, 2328 2353 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2329 2354 .matches = has_cpuid_feature, 2330 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2331 - .sign = FTR_UNSIGNED, 2332 - .field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT, 2333 - .field_width = 4, 2334 - .min_field_value = 2, 2355 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, DPB, DPB2) 2335 2356 }, 2336 2357 #endif 2337 2358 #ifdef CONFIG_ARM64_SVE ··· 2332 2367 .desc = "Scalable Vector Extension", 2333 2368 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2334 2369 .capability = ARM64_SVE, 2335 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2336 - .sign = FTR_UNSIGNED, 2337 - .field_pos = ID_AA64PFR0_EL1_SVE_SHIFT, 2338 - .field_width = 4, 2339 - .min_field_value = ID_AA64PFR0_EL1_SVE_IMP, 2340 - .matches = has_cpuid_feature, 2341 2370 .cpu_enable = sve_kernel_enable, 2371 + .matches = has_cpuid_feature, 2372 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP) 2342 2373 }, 2343 2374 #endif /* CONFIG_ARM64_SVE */ 2344 2375 #ifdef CONFIG_ARM64_RAS_EXTN ··· 2343 2382 .capability = ARM64_HAS_RAS_EXTN, 2344 2383 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2345 2384 .matches = has_cpuid_feature, 2346 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2347 - .sign = FTR_UNSIGNED, 2348 - .field_pos = ID_AA64PFR0_EL1_RAS_SHIFT, 2349 - .field_width = 4, 2350 - .min_field_value = ID_AA64PFR0_EL1_RAS_IMP, 2351 2385 .cpu_enable = cpu_clear_disr, 2386 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, RAS, IMP) 2352 2387 }, 2353 2388 #endif /* CONFIG_ARM64_RAS_EXTN */ 2354 2389 #ifdef CONFIG_ARM64_AMU_EXTN ··· 2358 2401 .capability = ARM64_HAS_AMU_EXTN, 2359 2402 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 2360 2403 .matches = has_amu, 2361 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2362 - .sign = FTR_UNSIGNED, 2363 - .field_pos = ID_AA64PFR0_EL1_AMU_SHIFT, 2364 - .field_width = 4, 2365 - .min_field_value = ID_AA64PFR0_EL1_AMU_IMP, 2366 2404 .cpu_enable = cpu_amu_enable, 2405 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP) 2367 2406 }, 2368 2407 #endif /* CONFIG_ARM64_AMU_EXTN */ 2369 2408 { ··· 2379 2426 .desc = "Stage-2 Force Write-Back", 2380 2427 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2381 2428 .capability = ARM64_HAS_STAGE2_FWB, 2382 - .sys_reg = SYS_ID_AA64MMFR2_EL1, 2383 - .sign = FTR_UNSIGNED, 2384 - .field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT, 2385 - .field_width = 4, 2386 - .min_field_value = 1, 2387 2429 .matches = has_cpuid_feature, 2430 + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, FWB, IMP) 2388 2431 }, 2389 2432 { 2390 2433 .desc = "ARMv8.4 Translation Table Level", 2391 2434 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2392 2435 .capability = ARM64_HAS_ARMv8_4_TTL, 2393 - .sys_reg = SYS_ID_AA64MMFR2_EL1, 2394 - .sign = FTR_UNSIGNED, 2395 - .field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT, 2396 - .field_width = 4, 2397 - .min_field_value = 1, 2398 2436 .matches = has_cpuid_feature, 2437 + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, TTL, IMP) 2399 2438 }, 2400 2439 { 2401 2440 .desc = "TLB range maintenance instructions", 2402 2441 .capability = ARM64_HAS_TLB_RANGE, 2403 2442 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2404 2443 .matches = has_cpuid_feature, 2405 - .sys_reg = SYS_ID_AA64ISAR0_EL1, 2406 - .field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT, 2407 - .field_width = 4, 2408 - .sign = FTR_UNSIGNED, 2409 - .min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE, 2444 + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, TLB, RANGE) 2410 2445 }, 2411 2446 #ifdef CONFIG_ARM64_HW_AFDBM 2412 2447 { ··· 2408 2467 */ 2409 2468 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 2410 2469 .capability = ARM64_HW_DBM, 2411 - .sys_reg = SYS_ID_AA64MMFR1_EL1, 2412 - .sign = FTR_UNSIGNED, 2413 - .field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 2414 - .field_width = 4, 2415 - .min_field_value = 2, 2416 2470 .matches = has_hw_dbm, 2417 2471 .cpu_enable = cpu_enable_hw_dbm, 2472 + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM) 2418 2473 }, 2419 2474 #endif 2420 2475 { ··· 2418 2481 .capability = ARM64_HAS_CRC32, 2419 2482 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2420 2483 .matches = has_cpuid_feature, 2421 - .sys_reg = SYS_ID_AA64ISAR0_EL1, 2422 - .field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT, 2423 - .field_width = 4, 2424 - .min_field_value = 1, 2484 + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, CRC32, IMP) 2425 2485 }, 2426 2486 { 2427 2487 .desc = "Speculative Store Bypassing Safe (SSBS)", 2428 2488 .capability = ARM64_SSBS, 2429 2489 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2430 2490 .matches = has_cpuid_feature, 2431 - .sys_reg = SYS_ID_AA64PFR1_EL1, 2432 - .field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT, 2433 - .field_width = 4, 2434 - .sign = FTR_UNSIGNED, 2435 - .min_field_value = ID_AA64PFR1_EL1_SSBS_IMP, 2491 + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SSBS, IMP) 2436 2492 }, 2437 2493 #ifdef CONFIG_ARM64_CNP 2438 2494 { ··· 2433 2503 .capability = ARM64_HAS_CNP, 2434 2504 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2435 2505 .matches = has_useable_cnp, 2436 - .sys_reg = SYS_ID_AA64MMFR2_EL1, 2437 - .sign = FTR_UNSIGNED, 2438 - .field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT, 2439 - .field_width = 4, 2440 - .min_field_value = 1, 2441 2506 .cpu_enable = cpu_enable_cnp, 2507 + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, CnP, IMP) 2442 2508 }, 2443 2509 #endif 2444 2510 { ··· 2442 2516 .capability = ARM64_HAS_SB, 2443 2517 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2444 2518 .matches = has_cpuid_feature, 2445 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2446 - .field_pos = ID_AA64ISAR1_EL1_SB_SHIFT, 2447 - .field_width = 4, 2448 - .sign = FTR_UNSIGNED, 2449 - .min_field_value = 1, 2519 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, SB, IMP) 2450 2520 }, 2451 2521 #ifdef CONFIG_ARM64_PTR_AUTH 2452 2522 { 2453 2523 .desc = "Address authentication (architected QARMA5 algorithm)", 2454 2524 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5, 2455 2525 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, 2456 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2457 - .sign = FTR_UNSIGNED, 2458 - .field_pos = ID_AA64ISAR1_EL1_APA_SHIFT, 2459 - .field_width = 4, 2460 - .min_field_value = ID_AA64ISAR1_EL1_APA_PAuth, 2461 2526 .matches = has_address_auth_cpucap, 2527 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth) 2462 2528 }, 2463 2529 { 2464 2530 .desc = "Address authentication (architected QARMA3 algorithm)", 2465 2531 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3, 2466 2532 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, 2467 - .sys_reg = SYS_ID_AA64ISAR2_EL1, 2468 - .sign = FTR_UNSIGNED, 2469 - .field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT, 2470 - .field_width = 4, 2471 - .min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth, 2472 2533 .matches = has_address_auth_cpucap, 2534 + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth) 2473 2535 }, 2474 2536 { 2475 2537 .desc = "Address authentication (IMP DEF algorithm)", 2476 2538 .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF, 2477 2539 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, 2478 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2479 - .sign = FTR_UNSIGNED, 2480 - .field_pos = ID_AA64ISAR1_EL1_API_SHIFT, 2481 - .field_width = 4, 2482 - .min_field_value = ID_AA64ISAR1_EL1_API_PAuth, 2483 2540 .matches = has_address_auth_cpucap, 2541 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth) 2484 2542 }, 2485 2543 { 2486 2544 .capability = ARM64_HAS_ADDRESS_AUTH, ··· 2475 2565 .desc = "Generic authentication (architected QARMA5 algorithm)", 2476 2566 .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5, 2477 2567 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2478 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2479 - .sign = FTR_UNSIGNED, 2480 - .field_pos = ID_AA64ISAR1_EL1_GPA_SHIFT, 2481 - .field_width = 4, 2482 - .min_field_value = ID_AA64ISAR1_EL1_GPA_IMP, 2483 2568 .matches = has_cpuid_feature, 2569 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP) 2484 2570 }, 2485 2571 { 2486 2572 .desc = "Generic authentication (architected QARMA3 algorithm)", 2487 2573 .capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3, 2488 2574 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2489 - .sys_reg = SYS_ID_AA64ISAR2_EL1, 2490 - .sign = FTR_UNSIGNED, 2491 - .field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT, 2492 - .field_width = 4, 2493 - .min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP, 2494 2575 .matches = has_cpuid_feature, 2576 + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP) 2495 2577 }, 2496 2578 { 2497 2579 .desc = "Generic authentication (IMP DEF algorithm)", 2498 2580 .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF, 2499 2581 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2500 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2501 - .sign = FTR_UNSIGNED, 2502 - .field_pos = ID_AA64ISAR1_EL1_GPI_SHIFT, 2503 - .field_width = 4, 2504 - .min_field_value = ID_AA64ISAR1_EL1_GPI_IMP, 2505 2582 .matches = has_cpuid_feature, 2583 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP) 2506 2584 }, 2507 2585 { 2508 2586 .capability = ARM64_HAS_GENERIC_AUTH, ··· 2522 2624 .desc = "E0PD", 2523 2625 .capability = ARM64_HAS_E0PD, 2524 2626 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2525 - .sys_reg = SYS_ID_AA64MMFR2_EL1, 2526 - .sign = FTR_UNSIGNED, 2527 - .field_width = 4, 2528 - .field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT, 2529 - .matches = has_cpuid_feature, 2530 - .min_field_value = 1, 2531 2627 .cpu_enable = cpu_enable_e0pd, 2628 + .matches = has_cpuid_feature, 2629 + ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, E0PD, IMP) 2532 2630 }, 2533 2631 #endif 2534 2632 { ··· 2532 2638 .capability = ARM64_HAS_RNG, 2533 2639 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2534 2640 .matches = has_cpuid_feature, 2535 - .sys_reg = SYS_ID_AA64ISAR0_EL1, 2536 - .field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT, 2537 - .field_width = 4, 2538 - .sign = FTR_UNSIGNED, 2539 - .min_field_value = 1, 2641 + ARM64_CPUID_FIELDS(ID_AA64ISAR0_EL1, RNDR, IMP) 2540 2642 }, 2541 2643 #ifdef CONFIG_ARM64_BTI 2542 2644 { ··· 2545 2655 #endif 2546 2656 .matches = has_cpuid_feature, 2547 2657 .cpu_enable = bti_enable, 2548 - .sys_reg = SYS_ID_AA64PFR1_EL1, 2549 - .field_pos = ID_AA64PFR1_EL1_BT_SHIFT, 2550 - .field_width = 4, 2551 - .min_field_value = ID_AA64PFR1_EL1_BT_IMP, 2552 - .sign = FTR_UNSIGNED, 2658 + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, BT, IMP) 2553 2659 }, 2554 2660 #endif 2555 2661 #ifdef CONFIG_ARM64_MTE ··· 2554 2668 .capability = ARM64_MTE, 2555 2669 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 2556 2670 .matches = has_cpuid_feature, 2557 - .sys_reg = SYS_ID_AA64PFR1_EL1, 2558 - .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, 2559 - .field_width = 4, 2560 - .min_field_value = ID_AA64PFR1_EL1_MTE_MTE2, 2561 - .sign = FTR_UNSIGNED, 2562 2671 .cpu_enable = cpu_enable_mte, 2672 + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE2) 2563 2673 }, 2564 2674 { 2565 2675 .desc = "Asymmetric MTE Tag Check Fault", 2566 2676 .capability = ARM64_MTE_ASYMM, 2567 2677 .type = ARM64_CPUCAP_BOOT_CPU_FEATURE, 2568 2678 .matches = has_cpuid_feature, 2569 - .sys_reg = SYS_ID_AA64PFR1_EL1, 2570 - .field_pos = ID_AA64PFR1_EL1_MTE_SHIFT, 2571 - .field_width = 4, 2572 - .min_field_value = ID_AA64PFR1_EL1_MTE_MTE3, 2573 - .sign = FTR_UNSIGNED, 2679 + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, MTE, MTE3) 2574 2680 }, 2575 2681 #endif /* CONFIG_ARM64_MTE */ 2576 2682 { 2577 2683 .desc = "RCpc load-acquire (LDAPR)", 2578 2684 .capability = ARM64_HAS_LDAPR, 2579 2685 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2580 - .sys_reg = SYS_ID_AA64ISAR1_EL1, 2581 - .sign = FTR_UNSIGNED, 2582 - .field_pos = ID_AA64ISAR1_EL1_LRCPC_SHIFT, 2583 - .field_width = 4, 2584 2686 .matches = has_cpuid_feature, 2585 - .min_field_value = 1, 2687 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP) 2586 2688 }, 2587 2689 #ifdef CONFIG_ARM64_SME 2588 2690 { 2589 2691 .desc = "Scalable Matrix Extension", 2590 2692 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2591 2693 .capability = ARM64_SME, 2592 - .sys_reg = SYS_ID_AA64PFR1_EL1, 2593 - .sign = FTR_UNSIGNED, 2594 - .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, 2595 - .field_width = 4, 2596 - .min_field_value = ID_AA64PFR1_EL1_SME_IMP, 2597 2694 .matches = has_cpuid_feature, 2598 2695 .cpu_enable = sme_kernel_enable, 2696 + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP) 2599 2697 }, 2600 2698 /* FA64 should be sorted after the base SME capability */ 2601 2699 { 2602 2700 .desc = "FA64", 2603 2701 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2604 2702 .capability = ARM64_SME_FA64, 2605 - .sys_reg = SYS_ID_AA64SMFR0_EL1, 2606 - .sign = FTR_UNSIGNED, 2607 - .field_pos = ID_AA64SMFR0_EL1_FA64_SHIFT, 2608 - .field_width = 1, 2609 - .min_field_value = ID_AA64SMFR0_EL1_FA64_IMP, 2610 2703 .matches = has_cpuid_feature, 2611 2704 .cpu_enable = fa64_kernel_enable, 2705 + ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP) 2612 2706 }, 2613 2707 { 2614 2708 .desc = "SME2", 2615 2709 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2616 2710 .capability = ARM64_SME2, 2617 - .sys_reg = SYS_ID_AA64PFR1_EL1, 2618 - .sign = FTR_UNSIGNED, 2619 - .field_pos = ID_AA64PFR1_EL1_SME_SHIFT, 2620 - .field_width = ID_AA64PFR1_EL1_SME_WIDTH, 2621 - .min_field_value = ID_AA64PFR1_EL1_SME_SME2, 2622 2711 .matches = has_cpuid_feature, 2623 2712 .cpu_enable = sme2_kernel_enable, 2713 + ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2) 2624 2714 }, 2625 2715 #endif /* CONFIG_ARM64_SME */ 2626 2716 { 2627 2717 .desc = "WFx with timeout", 2628 2718 .capability = ARM64_HAS_WFXT, 2629 2719 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2630 - .sys_reg = SYS_ID_AA64ISAR2_EL1, 2631 - .sign = FTR_UNSIGNED, 2632 - .field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT, 2633 - .field_width = 4, 2634 2720 .matches = has_cpuid_feature, 2635 - .min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP, 2721 + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, WFxT, IMP) 2636 2722 }, 2637 2723 { 2638 2724 .desc = "Trap EL0 IMPLEMENTATION DEFINED functionality", 2639 2725 .capability = ARM64_HAS_TIDCP1, 2640 2726 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2641 - .sys_reg = SYS_ID_AA64MMFR1_EL1, 2642 - .sign = FTR_UNSIGNED, 2643 - .field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 2644 - .field_width = 4, 2645 - .min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP, 2646 2727 .matches = has_cpuid_feature, 2647 2728 .cpu_enable = cpu_trap_el0_impdef, 2729 + ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TIDCP1, IMP) 2648 2730 }, 2649 2731 { 2650 2732 .desc = "Data independent timing control (DIT)", 2651 2733 .capability = ARM64_HAS_DIT, 2652 2734 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 2653 - .sys_reg = SYS_ID_AA64PFR0_EL1, 2654 - .sign = FTR_UNSIGNED, 2655 - .field_pos = ID_AA64PFR0_EL1_DIT_SHIFT, 2656 - .field_width = 4, 2657 - .min_field_value = ID_AA64PFR0_EL1_DIT_IMP, 2658 2735 .matches = has_cpuid_feature, 2659 2736 .cpu_enable = cpu_enable_dit, 2737 + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) 2660 2738 }, 2661 2739 {}, 2662 2740 }; 2663 2741 2664 2742 #define HWCAP_CPUID_MATCH(reg, field, min_value) \ 2665 - .matches = has_user_cpuid_feature, \ 2666 - .sys_reg = SYS_##reg, \ 2667 - .field_pos = reg##_##field##_SHIFT, \ 2668 - .field_width = reg##_##field##_WIDTH, \ 2669 - .sign = reg##_##field##_SIGNED, \ 2670 - .min_field_value = reg##_##field##_##min_value, 2743 + .matches = has_user_cpuid_feature, \ 2744 + ARM64_CPUID_FIELDS(reg, field, min_value) 2671 2745 2672 2746 #define __HWCAP_CAP(name, cap_type, cap) \ 2673 2747 .desc = name, \ ··· 2657 2811 #ifdef CONFIG_ARM64_PTR_AUTH 2658 2812 static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = { 2659 2813 { 2660 - HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, APA, PAuth) 2814 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, APA, PAuth) 2661 2815 }, 2662 2816 { 2663 - HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, APA3, PAuth) 2817 + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, APA3, PAuth) 2664 2818 }, 2665 2819 { 2666 - HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, API, PAuth) 2820 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, API, PAuth) 2667 2821 }, 2668 2822 {}, 2669 2823 }; 2670 2824 2671 2825 static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = { 2672 2826 { 2673 - HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPA, IMP) 2827 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPA, IMP) 2674 2828 }, 2675 2829 { 2676 - HWCAP_CPUID_MATCH(ID_AA64ISAR2_EL1, GPA3, IMP) 2830 + ARM64_CPUID_FIELDS(ID_AA64ISAR2_EL1, GPA3, IMP) 2677 2831 }, 2678 2832 { 2679 - HWCAP_CPUID_MATCH(ID_AA64ISAR1_EL1, GPI, IMP) 2833 + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, GPI, IMP) 2680 2834 }, 2681 2835 {}, 2682 2836 };
+1
arch/arm64/kernel/crash_core.c
··· 8 8 #include <asm/cpufeature.h> 9 9 #include <asm/memory.h> 10 10 #include <asm/pgtable-hwdef.h> 11 + #include <asm/pointer_auth.h> 11 12 12 13 static inline u64 get_tcr_el1_t1sz(void); 13 14
+5
arch/arm64/kernel/debug-monitors.c
··· 438 438 } 439 439 NOKPROBE_SYMBOL(kernel_active_single_step); 440 440 441 + void kernel_rewind_single_step(struct pt_regs *regs) 442 + { 443 + set_regs_spsr_ss(regs); 444 + } 445 + 441 446 /* ptrace API */ 442 447 void user_enable_single_step(struct task_struct *task) 443 448 {
+75 -15
arch/arm64/kernel/entry-ftrace.S
··· 36 36 SYM_CODE_START(ftrace_caller) 37 37 bti c 38 38 39 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS 40 + /* 41 + * The literal pointer to the ops is at an 8-byte aligned boundary 42 + * which is either 12 or 16 bytes before the BL instruction in the call 43 + * site. See ftrace_call_adjust() for details. 44 + * 45 + * Therefore here the LR points at `literal + 16` or `literal + 20`, 46 + * and we can find the address of the literal in either case by 47 + * aligning to an 8-byte boundary and subtracting 16. We do the 48 + * alignment first as this allows us to fold the subtraction into the 49 + * LDR. 50 + */ 51 + bic x11, x30, 0x7 52 + ldr x11, [x11, #-(4 * AARCH64_INSN_SIZE)] // op 53 + 54 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 55 + /* 56 + * If the op has a direct call, handle it immediately without 57 + * saving/restoring registers. 58 + */ 59 + ldr x17, [x11, #FTRACE_OPS_DIRECT_CALL] // op->direct_call 60 + cbnz x17, ftrace_caller_direct 61 + #endif 62 + #endif 63 + 39 64 /* Save original SP */ 40 65 mov x10, sp 41 66 ··· 73 48 stp x4, x5, [sp, #FREGS_X4] 74 49 stp x6, x7, [sp, #FREGS_X6] 75 50 str x8, [sp, #FREGS_X8] 51 + 52 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 53 + str xzr, [sp, #FREGS_DIRECT_TRAMP] 54 + #endif 76 55 77 56 /* Save the callsite's FP, LR, SP */ 78 57 str x29, [sp, #FREGS_FP] ··· 100 71 mov x3, sp // regs 101 72 102 73 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS 103 - /* 104 - * The literal pointer to the ops is at an 8-byte aligned boundary 105 - * which is either 12 or 16 bytes before the BL instruction in the call 106 - * site. See ftrace_call_adjust() for details. 107 - * 108 - * Therefore here the LR points at `literal + 16` or `literal + 20`, 109 - * and we can find the address of the literal in either case by 110 - * aligning to an 8-byte boundary and subtracting 16. We do the 111 - * alignment first as this allows us to fold the subtraction into the 112 - * LDR. 113 - */ 114 - bic x2, x30, 0x7 115 - ldr x2, [x2, #-16] // op 116 - 74 + mov x2, x11 // op 117 75 ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func 118 76 blr x4 // op->func(ip, parent_ip, op, regs) 119 77 ··· 123 107 ldp x6, x7, [sp, #FREGS_X6] 124 108 ldr x8, [sp, #FREGS_X8] 125 109 126 - /* Restore the callsite's FP, LR, PC */ 110 + /* Restore the callsite's FP */ 127 111 ldr x29, [sp, #FREGS_FP] 112 + 113 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 114 + ldr x17, [sp, #FREGS_DIRECT_TRAMP] 115 + cbnz x17, ftrace_caller_direct_late 116 + #endif 117 + 118 + /* Restore the callsite's LR and PC */ 128 119 ldr x30, [sp, #FREGS_LR] 129 120 ldr x9, [sp, #FREGS_PC] 130 121 ··· 139 116 add sp, sp, #FREGS_SIZE + 32 140 117 141 118 ret x9 119 + 120 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 121 + SYM_INNER_LABEL(ftrace_caller_direct_late, SYM_L_LOCAL) 122 + /* 123 + * Head to a direct trampoline in x17 after having run other tracers. 124 + * The ftrace_regs are live, and x0-x8 and FP have been restored. The 125 + * LR, PC, and SP have not been restored. 126 + */ 127 + 128 + /* 129 + * Restore the callsite's LR and PC matching the trampoline calling 130 + * convention. 131 + */ 132 + ldr x9, [sp, #FREGS_LR] 133 + ldr x30, [sp, #FREGS_PC] 134 + 135 + /* Restore the callsite's SP */ 136 + add sp, sp, #FREGS_SIZE + 32 137 + 138 + SYM_INNER_LABEL(ftrace_caller_direct, SYM_L_LOCAL) 139 + /* 140 + * Head to a direct trampoline in x17. 141 + * 142 + * We use `BR X17` as this can safely land on a `BTI C` or `PACIASP` in 143 + * the trampoline, and will not unbalance any return stack. 144 + */ 145 + br x17 146 + #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 142 147 SYM_CODE_END(ftrace_caller) 148 + 149 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 150 + SYM_CODE_START(ftrace_stub_direct_tramp) 151 + bti c 152 + mov x10, x30 153 + mov x30, x9 154 + ret x10 155 + SYM_CODE_END(ftrace_stub_direct_tramp) 156 + #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 143 157 144 158 #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ 145 159
+2 -2
arch/arm64/kernel/fpsimd.c
··· 299 299 /* 300 300 * TIF_SME controls whether a task can use SME without trapping while 301 301 * in userspace, when TIF_SME is set then we must have storage 302 - * alocated in sve_state and sme_state to store the contents of both ZA 302 + * allocated in sve_state and sme_state to store the contents of both ZA 303 303 * and the SVE registers for both streaming and non-streaming modes. 304 304 * 305 305 * If both SVCR.ZA and SVCR.SM are disabled then at any point we ··· 1477 1477 * 1478 1478 * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state() 1479 1479 * would have disabled the SME access trap for userspace during 1480 - * ret_to_user, making an SVE access trap impossible in that case. 1480 + * ret_to_user, making an SME access trap impossible in that case. 1481 1481 */ 1482 1482 void do_sme_acc(unsigned long esr, struct pt_regs *regs) 1483 1483 {
+36 -10
arch/arm64/kernel/ftrace.c
··· 195 195 return ftrace_modify_code(pc, 0, new, false); 196 196 } 197 197 198 - static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) 198 + static struct plt_entry *get_ftrace_plt(struct module *mod) 199 199 { 200 200 #ifdef CONFIG_ARM64_MODULE_PLTS 201 201 struct plt_entry *plt = mod->arch.ftrace_trampolines; 202 202 203 - if (addr == FTRACE_ADDR) 204 - return &plt[FTRACE_PLT_IDX]; 205 - #endif 203 + return &plt[FTRACE_PLT_IDX]; 204 + #else 206 205 return NULL; 206 + #endif 207 + } 208 + 209 + static bool reachable_by_bl(unsigned long addr, unsigned long pc) 210 + { 211 + long offset = (long)addr - (long)pc; 212 + 213 + return offset >= -SZ_128M && offset < SZ_128M; 207 214 } 208 215 209 216 /* ··· 227 220 unsigned long *addr) 228 221 { 229 222 unsigned long pc = rec->ip; 230 - long offset = (long)*addr - (long)pc; 231 223 struct plt_entry *plt; 224 + 225 + /* 226 + * If a custom trampoline is unreachable, rely on the ftrace_caller 227 + * trampoline which knows how to indirectly reach that trampoline 228 + * through ops->direct_call. 229 + */ 230 + if (*addr != FTRACE_ADDR && !reachable_by_bl(*addr, pc)) 231 + *addr = FTRACE_ADDR; 232 232 233 233 /* 234 234 * When the target is within range of the 'BL' instruction, use 'addr' 235 235 * as-is and branch to that directly. 236 236 */ 237 - if (offset >= -SZ_128M && offset < SZ_128M) 237 + if (reachable_by_bl(*addr, pc)) 238 238 return true; 239 239 240 240 /* ··· 270 256 if (WARN_ON(!mod)) 271 257 return false; 272 258 273 - plt = get_ftrace_plt(mod, *addr); 259 + plt = get_ftrace_plt(mod); 274 260 if (!plt) { 275 261 pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); 276 262 return false; ··· 344 330 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 345 331 unsigned long addr) 346 332 { 347 - if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller)) 333 + unsigned long pc = rec->ip; 334 + u32 old, new; 335 + int ret; 336 + 337 + ret = ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec)); 338 + if (ret) 339 + return ret; 340 + 341 + if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) 348 342 return -EINVAL; 349 - if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller)) 343 + if (!ftrace_find_callable_addr(rec, NULL, &addr)) 350 344 return -EINVAL; 351 345 352 - return ftrace_rec_update_ops(rec); 346 + old = aarch64_insn_gen_branch_imm(pc, old_addr, 347 + AARCH64_INSN_BRANCH_LINK); 348 + new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); 349 + 350 + return ftrace_modify_code(pc, old, new, true); 353 351 } 354 352 #endif 355 353
+8 -1
arch/arm64/kernel/idreg-override.c
··· 167 167 } aliases[] __initconst = { 168 168 { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" }, 169 169 { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" }, 170 - { "arm64.nosve", "id_aa64pfr0.sve=0 id_aa64pfr1.sme=0" }, 170 + { "arm64.nosve", "id_aa64pfr0.sve=0" }, 171 171 { "arm64.nosme", "id_aa64pfr1.sme=0" }, 172 172 { "arm64.nobti", "id_aa64pfr1.bt=0" }, 173 173 { "arm64.nopauth", ··· 177 177 { "arm64.nomte", "id_aa64pfr1.mte=0" }, 178 178 { "nokaslr", "kaslr.disabled=1" }, 179 179 }; 180 + 181 + static int __init parse_nokaslr(char *unused) 182 + { 183 + /* nokaslr param handling is done by early cpufeature code */ 184 + return 0; 185 + } 186 + early_param("nokaslr", parse_nokaslr); 180 187 181 188 static int __init find_field(const char *cmdline, 182 189 const struct ftr_set_desc *reg, int f, u64 *v)
+2
arch/arm64/kernel/kgdb.c
··· 224 224 */ 225 225 if (!kernel_active_single_step()) 226 226 kernel_enable_single_step(linux_regs); 227 + else 228 + kernel_rewind_single_step(linux_regs); 227 229 err = 0; 228 230 break; 229 231 default:
+2 -21
arch/arm64/kernel/machine_kexec.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/kexec.h> 13 13 #include <linux/page-flags.h> 14 + #include <linux/reboot.h> 14 15 #include <linux/set_memory.h> 15 16 #include <linux/smp.h> 16 17 ··· 103 102 /* Allocates pages for kexec page table */ 104 103 static void *kexec_page_alloc(void *arg) 105 104 { 106 - struct kimage *kimage = (struct kimage *)arg; 105 + struct kimage *kimage = arg; 107 106 struct page *page = kimage_alloc_control_pages(kimage, 0); 108 107 void *vaddr = NULL; 109 108 ··· 267 266 machine_kexec_mask_interrupts(); 268 267 269 268 pr_info("Starting crashdump kernel...\n"); 270 - } 271 - 272 - void arch_kexec_protect_crashkres(void) 273 - { 274 - int i; 275 - 276 - for (i = 0; i < kexec_crash_image->nr_segments; i++) 277 - set_memory_valid( 278 - __phys_to_virt(kexec_crash_image->segment[i].mem), 279 - kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0); 280 - } 281 - 282 - void arch_kexec_unprotect_crashkres(void) 283 - { 284 - int i; 285 - 286 - for (i = 0; i < kexec_crash_image->nr_segments; i++) 287 - set_memory_valid( 288 - __phys_to_virt(kexec_crash_image->segment[i].mem), 289 - kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1); 290 269 } 291 270 292 271 #ifdef CONFIG_HIBERNATION
+1 -1
arch/arm64/kernel/perf_callchain.c
··· 38 38 if (err) 39 39 return NULL; 40 40 41 - lr = ptrauth_strip_insn_pac(buftail.lr); 41 + lr = ptrauth_strip_user_insn_pac(buftail.lr); 42 42 43 43 perf_callchain_store(entry, lr); 44 44
+55 -103
arch/arm64/kernel/perf_event.c drivers/perf/arm_pmuv3.c
··· 10 10 11 11 #include <asm/irq_regs.h> 12 12 #include <asm/perf_event.h> 13 - #include <asm/sysreg.h> 14 13 #include <asm/virt.h> 15 14 16 15 #include <clocksource/arm_arch_timer.h> 17 16 18 17 #include <linux/acpi.h> 19 18 #include <linux/clocksource.h> 20 - #include <linux/kvm_host.h> 21 19 #include <linux/of.h> 22 20 #include <linux/perf/arm_pmu.h> 21 + #include <linux/perf/arm_pmuv3.h> 23 22 #include <linux/platform_device.h> 24 23 #include <linux/sched_clock.h> 25 24 #include <linux/smp.h> 25 + 26 + #include <asm/arm_pmuv3.h> 26 27 27 28 /* ARMv8 Cortex-A53 specific event types. */ 28 29 #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 ··· 46 45 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED, 47 46 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, 48 47 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, 49 - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, 50 48 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, 51 49 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, 52 50 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND, ··· 387 387 * We unconditionally enable ARMv8.5-PMU long event counter support 388 388 * (64-bit events) where supported. Indicate if this arm_pmu has long 389 389 * event counter support. 390 + * 391 + * On AArch32, long counters make no sense (you can't access the top 392 + * bits), so we only enable this on AArch64. 390 393 */ 391 394 static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) 392 395 { 393 - return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5); 396 + return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver)); 394 397 } 395 398 396 399 static inline bool armv8pmu_event_has_user_read(struct perf_event *event) ··· 427 424 #define ARMV8_IDX_TO_COUNTER(x) \ 428 425 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) 429 426 430 - /* 431 - * This code is really good 432 - */ 433 - 434 - #define PMEVN_CASE(n, case_macro) \ 435 - case n: case_macro(n); break 436 - 437 - #define PMEVN_SWITCH(x, case_macro) \ 438 - do { \ 439 - switch (x) { \ 440 - PMEVN_CASE(0, case_macro); \ 441 - PMEVN_CASE(1, case_macro); \ 442 - PMEVN_CASE(2, case_macro); \ 443 - PMEVN_CASE(3, case_macro); \ 444 - PMEVN_CASE(4, case_macro); \ 445 - PMEVN_CASE(5, case_macro); \ 446 - PMEVN_CASE(6, case_macro); \ 447 - PMEVN_CASE(7, case_macro); \ 448 - PMEVN_CASE(8, case_macro); \ 449 - PMEVN_CASE(9, case_macro); \ 450 - PMEVN_CASE(10, case_macro); \ 451 - PMEVN_CASE(11, case_macro); \ 452 - PMEVN_CASE(12, case_macro); \ 453 - PMEVN_CASE(13, case_macro); \ 454 - PMEVN_CASE(14, case_macro); \ 455 - PMEVN_CASE(15, case_macro); \ 456 - PMEVN_CASE(16, case_macro); \ 457 - PMEVN_CASE(17, case_macro); \ 458 - PMEVN_CASE(18, case_macro); \ 459 - PMEVN_CASE(19, case_macro); \ 460 - PMEVN_CASE(20, case_macro); \ 461 - PMEVN_CASE(21, case_macro); \ 462 - PMEVN_CASE(22, case_macro); \ 463 - PMEVN_CASE(23, case_macro); \ 464 - PMEVN_CASE(24, case_macro); \ 465 - PMEVN_CASE(25, case_macro); \ 466 - PMEVN_CASE(26, case_macro); \ 467 - PMEVN_CASE(27, case_macro); \ 468 - PMEVN_CASE(28, case_macro); \ 469 - PMEVN_CASE(29, case_macro); \ 470 - PMEVN_CASE(30, case_macro); \ 471 - default: WARN(1, "Invalid PMEV* index\n"); \ 472 - } \ 473 - } while (0) 474 - 475 - #define RETURN_READ_PMEVCNTRN(n) \ 476 - return read_sysreg(pmevcntr##n##_el0) 477 - static unsigned long read_pmevcntrn(int n) 478 - { 479 - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); 480 - return 0; 481 - } 482 - 483 - #define WRITE_PMEVCNTRN(n) \ 484 - write_sysreg(val, pmevcntr##n##_el0) 485 - static void write_pmevcntrn(int n, unsigned long val) 486 - { 487 - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); 488 - } 489 - 490 - #define WRITE_PMEVTYPERN(n) \ 491 - write_sysreg(val, pmevtyper##n##_el0) 492 - static void write_pmevtypern(int n, unsigned long val) 493 - { 494 - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); 495 - } 496 - 497 427 static inline u32 armv8pmu_pmcr_read(void) 498 428 { 499 - return read_sysreg(pmcr_el0); 429 + return read_pmcr(); 500 430 } 501 431 502 432 static inline void armv8pmu_pmcr_write(u32 val) 503 433 { 504 434 val &= ARMV8_PMU_PMCR_MASK; 505 435 isb(); 506 - write_sysreg(val, pmcr_el0); 436 + write_pmcr(val); 507 437 } 508 438 509 439 static inline int armv8pmu_has_overflowed(u32 pmovsr) ··· 491 555 static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value) 492 556 { 493 557 if (armv8pmu_event_needs_bias(event)) 494 - value |= GENMASK(63, 32); 558 + value |= GENMASK_ULL(63, 32); 495 559 496 560 return value; 497 561 } ··· 499 563 static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) 500 564 { 501 565 if (armv8pmu_event_needs_bias(event)) 502 - value &= ~GENMASK(63, 32); 566 + value &= ~GENMASK_ULL(63, 32); 503 567 504 568 return value; 505 569 } ··· 511 575 u64 value; 512 576 513 577 if (idx == ARMV8_IDX_CYCLE_COUNTER) 514 - value = read_sysreg(pmccntr_el0); 578 + value = read_pmccntr(); 515 579 else 516 580 value = armv8pmu_read_hw_counter(event); 517 581 ··· 546 610 value = armv8pmu_bias_long_counter(event, value); 547 611 548 612 if (idx == ARMV8_IDX_CYCLE_COUNTER) 549 - write_sysreg(value, pmccntr_el0); 613 + write_pmccntr(value); 550 614 else 551 615 armv8pmu_write_hw_counter(event, value); 552 616 } ··· 577 641 armv8pmu_write_evtype(idx, chain_evt); 578 642 } else { 579 643 if (idx == ARMV8_IDX_CYCLE_COUNTER) 580 - write_sysreg(hwc->config_base, pmccfiltr_el0); 644 + write_pmccfiltr(hwc->config_base); 581 645 else 582 646 armv8pmu_write_evtype(idx, hwc->config_base); 583 647 } ··· 600 664 * enable the counter. 601 665 * */ 602 666 isb(); 603 - write_sysreg(mask, pmcntenset_el0); 667 + write_pmcntenset(mask); 604 668 } 605 669 606 670 static inline void armv8pmu_enable_event_counter(struct perf_event *event) ··· 617 681 618 682 static inline void armv8pmu_disable_counter(u32 mask) 619 683 { 620 - write_sysreg(mask, pmcntenclr_el0); 684 + write_pmcntenclr(mask); 621 685 /* 622 686 * Make sure the effects of disabling the counter are visible before we 623 687 * start configuring the event. ··· 639 703 640 704 static inline void armv8pmu_enable_intens(u32 mask) 641 705 { 642 - write_sysreg(mask, pmintenset_el1); 706 + write_pmintenset(mask); 643 707 } 644 708 645 709 static inline void armv8pmu_enable_event_irq(struct perf_event *event) ··· 650 714 651 715 static inline void armv8pmu_disable_intens(u32 mask) 652 716 { 653 - write_sysreg(mask, pmintenclr_el1); 717 + write_pmintenclr(mask); 654 718 isb(); 655 719 /* Clear the overflow flag in case an interrupt is pending. */ 656 - write_sysreg(mask, pmovsclr_el0); 720 + write_pmovsclr(mask); 657 721 isb(); 658 722 } 659 723 ··· 668 732 u32 value; 669 733 670 734 /* Read */ 671 - value = read_sysreg(pmovsclr_el0); 735 + value = read_pmovsclr(); 672 736 673 737 /* Write to clear flags */ 674 738 value &= ARMV8_PMU_OVSR_MASK; 675 - write_sysreg(value, pmovsclr_el0); 739 + write_pmovsclr(value); 676 740 677 741 return value; 678 742 } 679 743 680 744 static void armv8pmu_disable_user_access(void) 681 745 { 682 - write_sysreg(0, pmuserenr_el0); 746 + write_pmuserenr(0); 683 747 } 684 748 685 749 static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) ··· 690 754 /* Clear any unused counters to avoid leaking their contents */ 691 755 for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { 692 756 if (i == ARMV8_IDX_CYCLE_COUNTER) 693 - write_sysreg(0, pmccntr_el0); 757 + write_pmccntr(0); 694 758 else 695 759 armv8pmu_write_evcntr(i, 0); 696 760 } 697 761 698 - write_sysreg(0, pmuserenr_el0); 699 - write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0); 762 + write_pmuserenr(0); 763 + write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR); 700 764 } 701 765 702 766 static void armv8pmu_enable_event(struct perf_event *event) ··· 984 1048 armv8pmu_pmcr_write(pmcr); 985 1049 } 986 1050 1051 + static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu, 1052 + struct perf_event *event) 1053 + { 1054 + if (event->attr.type == PERF_TYPE_HARDWARE && 1055 + event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) { 1056 + 1057 + if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, 1058 + armpmu->pmceid_bitmap)) 1059 + return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED; 1060 + 1061 + if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED, 1062 + armpmu->pmceid_bitmap)) 1063 + return ARMV8_PMUV3_PERFCTR_BR_RETIRED; 1064 + 1065 + return HW_OP_UNSUPPORTED; 1066 + } 1067 + 1068 + return armpmu_map_event(event, &armv8_pmuv3_perf_map, 1069 + &armv8_pmuv3_perf_cache_map, 1070 + ARMV8_PMU_EVTYPE_EVENT); 1071 + } 1072 + 987 1073 static int __armv8_pmuv3_map_event(struct perf_event *event, 988 1074 const unsigned (*extra_event_map) 989 1075 [PERF_COUNT_HW_MAX], ··· 1017 1059 int hw_event_id; 1018 1060 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 1019 1061 1020 - hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, 1021 - &armv8_pmuv3_perf_cache_map, 1022 - ARMV8_PMU_EVTYPE_EVENT); 1062 + hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event); 1023 1063 1024 1064 /* 1025 1065 * CHAIN events only work when paired with an adjacent counter, and it ··· 1100 1144 { 1101 1145 struct armv8pmu_probe_info *probe = info; 1102 1146 struct arm_pmu *cpu_pmu = probe->pmu; 1103 - u64 dfr0; 1104 1147 u64 pmceid_raw[2]; 1105 1148 u32 pmceid[2]; 1106 1149 int pmuver; 1107 1150 1108 - dfr0 = read_sysreg(id_aa64dfr0_el1); 1109 - pmuver = cpuid_feature_extract_unsigned_field(dfr0, 1110 - ID_AA64DFR0_EL1_PMUVer_SHIFT); 1111 - if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || 1112 - pmuver == ID_AA64DFR0_EL1_PMUVer_NI) 1151 + pmuver = read_pmuver(); 1152 + if (!pmuv3_implemented(pmuver)) 1113 1153 return; 1114 1154 1115 1155 cpu_pmu->pmuver = pmuver; ··· 1118 1166 /* Add the CPU cycles counter */ 1119 1167 cpu_pmu->num_events += 1; 1120 1168 1121 - pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); 1122 - pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); 1169 + pmceid[0] = pmceid_raw[0] = read_pmceid0(); 1170 + pmceid[1] = pmceid_raw[1] = read_pmceid1(); 1123 1171 1124 1172 bitmap_from_arr32(cpu_pmu->pmceid_bitmap, 1125 1173 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); ··· 1130 1178 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, 1131 1179 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); 1132 1180 1133 - /* store PMMIR_EL1 register for sysfs */ 1134 - if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31))) 1135 - cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); 1181 + /* store PMMIR register for sysfs */ 1182 + if (is_pmuv3p4(pmuver) && (pmceid_raw[1] & BIT(31))) 1183 + cpu_pmu->reg_pmmir = read_pmmir(); 1136 1184 else 1137 1185 cpu_pmu->reg_pmmir = 0; 1138 1186 }
+1 -1
arch/arm64/kernel/process.c
··· 217 217 218 218 if (!user_mode(regs)) { 219 219 printk("pc : %pS\n", (void *)regs->pc); 220 - printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); 220 + printk("lr : %pS\n", (void *)ptrauth_strip_kernel_insn_pac(lr)); 221 221 } else { 222 222 printk("pc : %016llx\n", regs->pc); 223 223 printk("lr : %016llx\n", lr);
-3
arch/arm64/kernel/proton-pack.c
··· 966 966 { 967 967 const char *v = arm64_get_bp_hardening_vector(slot); 968 968 969 - if (slot < 0) 970 - return; 971 - 972 969 __this_cpu_write(this_cpu_vector, v); 973 970 974 971 /*
+10 -8
arch/arm64/kernel/signal.c
··· 651 651 break; 652 652 653 653 case TPIDR2_MAGIC: 654 - if (!system_supports_sme()) 654 + if (!system_supports_tpidr2()) 655 655 goto invalid; 656 656 657 657 if (user->tpidr2) ··· 802 802 err = restore_fpsimd_context(&user); 803 803 } 804 804 805 - if (err == 0 && system_supports_sme() && user.tpidr2) 805 + if (err == 0 && system_supports_tpidr2() && user.tpidr2) 806 806 err = restore_tpidr2_context(&user); 807 807 808 808 if (err == 0 && system_supports_sme() && user.za) ··· 893 893 return err; 894 894 } 895 895 896 + if (system_supports_tpidr2()) { 897 + err = sigframe_alloc(user, &user->tpidr2_offset, 898 + sizeof(struct tpidr2_context)); 899 + if (err) 900 + return err; 901 + } 902 + 896 903 if (system_supports_sme()) { 897 904 unsigned int vl; 898 905 unsigned int vq = 0; ··· 908 901 vl = sme_max_vl(); 909 902 else 910 903 vl = task_get_sme_vl(current); 911 - 912 - err = sigframe_alloc(user, &user->tpidr2_offset, 913 - sizeof(struct tpidr2_context)); 914 - if (err) 915 - return err; 916 904 917 905 if (thread_za_enabled(&current->thread)) 918 906 vq = sve_vq_from_vl(vl); ··· 976 974 } 977 975 978 976 /* TPIDR2 if supported */ 979 - if (system_supports_sme() && err == 0) { 977 + if (system_supports_tpidr2() && err == 0) { 980 978 struct tpidr2_context __user *tpidr2_ctx = 981 979 apply_user_offset(user, user->tpidr2_offset); 982 980 err |= preserve_tpidr2_context(tpidr2_ctx);
+77 -67
arch/arm64/kernel/stacktrace.c
··· 25 25 * 26 26 * The regs must be on a stack currently owned by the calling task. 27 27 */ 28 - static __always_inline void unwind_init_from_regs(struct unwind_state *state, 29 - struct pt_regs *regs) 28 + static __always_inline void 29 + unwind_init_from_regs(struct unwind_state *state, 30 + struct pt_regs *regs) 30 31 { 31 32 unwind_init_common(state, current); 32 33 ··· 43 42 * 44 43 * The function which invokes this must be noinline. 45 44 */ 46 - static __always_inline void unwind_init_from_caller(struct unwind_state *state) 45 + static __always_inline void 46 + unwind_init_from_caller(struct unwind_state *state) 47 47 { 48 48 unwind_init_common(state, current); 49 49 ··· 62 60 * duration of the unwind, or the unwind will be bogus. It is never valid to 63 61 * call this for the current task. 64 62 */ 65 - static __always_inline void unwind_init_from_task(struct unwind_state *state, 66 - struct task_struct *task) 63 + static __always_inline void 64 + unwind_init_from_task(struct unwind_state *state, 65 + struct task_struct *task) 67 66 { 68 67 unwind_init_common(state, task); 69 68 70 69 state->fp = thread_saved_fp(task); 71 70 state->pc = thread_saved_pc(task); 71 + } 72 + 73 + static __always_inline int 74 + unwind_recover_return_address(struct unwind_state *state) 75 + { 76 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 77 + if (state->task->ret_stack && 78 + (state->pc == (unsigned long)return_to_handler)) { 79 + unsigned long orig_pc; 80 + orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc, 81 + (void *)state->fp); 82 + if (WARN_ON_ONCE(state->pc == orig_pc)) 83 + return -EINVAL; 84 + state->pc = orig_pc; 85 + } 86 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 87 + 88 + #ifdef CONFIG_KRETPROBES 89 + if (is_kretprobe_trampoline(state->pc)) { 90 + state->pc = kretprobe_find_ret_addr(state->task, 91 + (void *)state->fp, 92 + &state->kr_cur); 93 + } 94 + #endif /* CONFIG_KRETPROBES */ 95 + 96 + return 0; 72 97 } 73 98 74 99 /* ··· 105 76 * records (e.g. a cycle), determined based on the location and fp value of A 106 77 * and the location (but not the fp value) of B. 107 78 */ 108 - static int notrace unwind_next(struct unwind_state *state) 79 + static __always_inline int 80 + unwind_next(struct unwind_state *state) 109 81 { 110 82 struct task_struct *tsk = state->task; 111 83 unsigned long fp = state->fp; ··· 120 90 if (err) 121 91 return err; 122 92 123 - state->pc = ptrauth_strip_insn_pac(state->pc); 93 + state->pc = ptrauth_strip_kernel_insn_pac(state->pc); 124 94 125 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 126 - if (tsk->ret_stack && 127 - (state->pc == (unsigned long)return_to_handler)) { 128 - unsigned long orig_pc; 129 - /* 130 - * This is a case where function graph tracer has 131 - * modified a return address (LR) in a stack frame 132 - * to hook a function return. 133 - * So replace it to an original value. 134 - */ 135 - orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, 136 - (void *)state->fp); 137 - if (WARN_ON_ONCE(state->pc == orig_pc)) 138 - return -EINVAL; 139 - state->pc = orig_pc; 140 - } 141 - #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 142 - #ifdef CONFIG_KRETPROBES 143 - if (is_kretprobe_trampoline(state->pc)) 144 - state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); 145 - #endif 146 - 147 - return 0; 95 + return unwind_recover_return_address(state); 148 96 } 149 - NOKPROBE_SYMBOL(unwind_next); 150 97 151 - static void notrace unwind(struct unwind_state *state, 152 - stack_trace_consume_fn consume_entry, void *cookie) 98 + static __always_inline void 99 + unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, 100 + void *cookie) 153 101 { 102 + if (unwind_recover_return_address(state)) 103 + return; 104 + 154 105 while (1) { 155 106 int ret; 156 107 ··· 141 130 if (ret < 0) 142 131 break; 143 132 } 144 - } 145 - NOKPROBE_SYMBOL(unwind); 146 - 147 - static bool dump_backtrace_entry(void *arg, unsigned long where) 148 - { 149 - char *loglvl = arg; 150 - printk("%s %pSb\n", loglvl, (void *)where); 151 - return true; 152 - } 153 - 154 - void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, 155 - const char *loglvl) 156 - { 157 - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 158 - 159 - if (regs && user_mode(regs)) 160 - return; 161 - 162 - if (!tsk) 163 - tsk = current; 164 - 165 - if (!try_get_task_stack(tsk)) 166 - return; 167 - 168 - printk("%sCall trace:\n", loglvl); 169 - arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); 170 - 171 - put_task_stack(tsk); 172 - } 173 - 174 - void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) 175 - { 176 - dump_backtrace(NULL, tsk, loglvl); 177 - barrier(); 178 133 } 179 134 180 135 /* ··· 206 229 } 207 230 208 231 unwind(&state, consume_entry, cookie); 232 + } 233 + 234 + static bool dump_backtrace_entry(void *arg, unsigned long where) 235 + { 236 + char *loglvl = arg; 237 + printk("%s %pSb\n", loglvl, (void *)where); 238 + return true; 239 + } 240 + 241 + void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, 242 + const char *loglvl) 243 + { 244 + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 245 + 246 + if (regs && user_mode(regs)) 247 + return; 248 + 249 + if (!tsk) 250 + tsk = current; 251 + 252 + if (!try_get_task_stack(tsk)) 253 + return; 254 + 255 + printk("%sCall trace:\n", loglvl); 256 + arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); 257 + 258 + put_task_stack(tsk); 259 + } 260 + 261 + void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) 262 + { 263 + dump_backtrace(NULL, tsk, loglvl); 264 + barrier(); 209 265 }
-45
arch/arm64/kvm/arm.c
··· 16 16 #include <linux/fs.h> 17 17 #include <linux/mman.h> 18 18 #include <linux/sched.h> 19 - #include <linux/kmemleak.h> 20 19 #include <linux/kvm.h> 21 20 #include <linux/kvm_irqfd.h> 22 21 #include <linux/irqbypass.h> ··· 45 46 #include <kvm/arm_psci.h> 46 47 47 48 static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; 48 - DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); 49 49 50 50 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); 51 51 ··· 2128 2130 return err; 2129 2131 } 2130 2132 2131 - static void __init _kvm_host_prot_finalize(void *arg) 2132 - { 2133 - int *err = arg; 2134 - 2135 - if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize))) 2136 - WRITE_ONCE(*err, -EINVAL); 2137 - } 2138 - 2139 - static int __init pkvm_drop_host_privileges(void) 2140 - { 2141 - int ret = 0; 2142 - 2143 - /* 2144 - * Flip the static key upfront as that may no longer be possible 2145 - * once the host stage 2 is installed. 2146 - */ 2147 - static_branch_enable(&kvm_protected_mode_initialized); 2148 - on_each_cpu(_kvm_host_prot_finalize, &ret, 1); 2149 - return ret; 2150 - } 2151 - 2152 - static int __init finalize_hyp_mode(void) 2153 - { 2154 - if (!is_protected_kvm_enabled()) 2155 - return 0; 2156 - 2157 - /* 2158 - * Exclude HYP sections from kmemleak so that they don't get peeked 2159 - * at, which would end badly once inaccessible. 2160 - */ 2161 - kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); 2162 - kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size); 2163 - return pkvm_drop_host_privileges(); 2164 - } 2165 - 2166 2133 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 2167 2134 { 2168 2135 struct kvm_vcpu *vcpu; ··· 2244 2281 err = init_subsystems(); 2245 2282 if (err) 2246 2283 goto out_hyp; 2247 - 2248 - if (!in_hyp_mode) { 2249 - err = finalize_hyp_mode(); 2250 - if (err) { 2251 - kvm_err("Failed to finalize Hyp protection\n"); 2252 - goto out_subs; 2253 - } 2254 - } 2255 2284 2256 2285 if (is_protected_kvm_enabled()) { 2257 2286 kvm_info("Protected nVHE mode initialized successfully\n");
+47
arch/arm64/kvm/pkvm.c
··· 4 4 * Author: Quentin Perret <qperret@google.com> 5 5 */ 6 6 7 + #include <linux/init.h> 8 + #include <linux/kmemleak.h> 7 9 #include <linux/kvm_host.h> 8 10 #include <linux/memblock.h> 9 11 #include <linux/mutex.h> ··· 14 12 #include <asm/kvm_pkvm.h> 15 13 16 14 #include "hyp_constants.h" 15 + 16 + DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); 17 17 18 18 static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory); 19 19 static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); ··· 217 213 mutex_init(&host_kvm->lock); 218 214 return 0; 219 215 } 216 + 217 + static void __init _kvm_host_prot_finalize(void *arg) 218 + { 219 + int *err = arg; 220 + 221 + if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize))) 222 + WRITE_ONCE(*err, -EINVAL); 223 + } 224 + 225 + static int __init pkvm_drop_host_privileges(void) 226 + { 227 + int ret = 0; 228 + 229 + /* 230 + * Flip the static key upfront as that may no longer be possible 231 + * once the host stage 2 is installed. 232 + */ 233 + static_branch_enable(&kvm_protected_mode_initialized); 234 + on_each_cpu(_kvm_host_prot_finalize, &ret, 1); 235 + return ret; 236 + } 237 + 238 + static int __init finalize_pkvm(void) 239 + { 240 + int ret; 241 + 242 + if (!is_protected_kvm_enabled()) 243 + return 0; 244 + 245 + /* 246 + * Exclude HYP sections from kmemleak so that they don't get peeked 247 + * at, which would end badly once inaccessible. 248 + */ 249 + kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); 250 + kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size); 251 + 252 + ret = pkvm_drop_host_privileges(); 253 + if (ret) 254 + pr_err("Failed to finalize Hyp protection: %d\n", ret); 255 + 256 + return ret; 257 + } 258 + device_initcall_sync(finalize_pkvm);
-6
arch/arm64/lib/uaccess_flushcache.c
··· 19 19 } 20 20 EXPORT_SYMBOL_GPL(memcpy_flushcache); 21 21 22 - void memcpy_page_flushcache(char *to, struct page *page, size_t offset, 23 - size_t len) 24 - { 25 - memcpy_flushcache(to, page_address(page) + offset, len); 26 - } 27 - 28 22 unsigned long __copy_user_flushcache(void *to, const void __user *from, 29 23 unsigned long n) 30 24 {
+1 -1
arch/arm64/mm/Makefile
··· 2 2 obj-y := dma-mapping.o extable.o fault.o init.o \ 3 3 cache.o copypage.o flush.o \ 4 4 ioremap.o mmap.o pgd.o mmu.o \ 5 - context.o proc.o pageattr.o 5 + context.o proc.o pageattr.o fixmap.o 6 6 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 7 7 obj-$(CONFIG_PTDUMP_CORE) += ptdump.o 8 8 obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
+1 -16
arch/arm64/mm/dma-mapping.c
··· 36 36 { 37 37 unsigned long start = (unsigned long)page_address(page); 38 38 39 - /* 40 - * The architecture only requires a clean to the PoC here in order to 41 - * meet the requirements of the DMA API. However, some vendors (i.e. 42 - * Qualcomm) abuse the DMA API for transferring buffers from the 43 - * non-secure to the secure world, resetting the system if a non-secure 44 - * access shows up after the buffer has been transferred: 45 - * 46 - * https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org 47 - * 48 - * Using clean+invalidate appears to make this issue less likely, but 49 - * the drivers themselves still need fixing as the CPU could issue a 50 - * speculative read from the buffer via the linear mapping irrespective 51 - * of the cache maintenance we use. Once the drivers are fixed, we can 52 - * relax this to a clean operation. 53 - */ 54 - dcache_clean_inval_poc(start, start + size); 39 + dcache_clean_poc(start, start + size); 55 40 } 56 41 57 42 #ifdef CONFIG_IOMMU_DMA
+203
arch/arm64/mm/fixmap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Fixmap manipulation code 4 + */ 5 + 6 + #include <linux/bug.h> 7 + #include <linux/init.h> 8 + #include <linux/kernel.h> 9 + #include <linux/libfdt.h> 10 + #include <linux/memory.h> 11 + #include <linux/mm.h> 12 + #include <linux/sizes.h> 13 + 14 + #include <asm/fixmap.h> 15 + #include <asm/kernel-pgtable.h> 16 + #include <asm/pgalloc.h> 17 + #include <asm/tlbflush.h> 18 + 19 + #define NR_BM_PTE_TABLES \ 20 + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT) 21 + #define NR_BM_PMD_TABLES \ 22 + SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT) 23 + 24 + static_assert(NR_BM_PMD_TABLES == 1); 25 + 26 + #define __BM_TABLE_IDX(addr, shift) \ 27 + (((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift))) 28 + 29 + #define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) 30 + 31 + static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss; 32 + static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 33 + static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 34 + 35 + static inline pte_t *fixmap_pte(unsigned long addr) 36 + { 37 + return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)]; 38 + } 39 + 40 + static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr) 41 + { 42 + pmd_t pmd = READ_ONCE(*pmdp); 43 + pte_t *ptep; 44 + 45 + if (pmd_none(pmd)) { 46 + ptep = bm_pte[BM_PTE_TABLE_IDX(addr)]; 47 + __pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE); 48 + } 49 + } 50 + 51 + static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr, 52 + unsigned long end) 53 + { 54 + unsigned long next; 55 + pud_t pud = READ_ONCE(*pudp); 56 + pmd_t *pmdp; 57 + 58 + if (pud_none(pud)) 59 + __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); 60 + 61 + pmdp = pmd_offset_kimg(pudp, addr); 62 + do { 63 + next = pmd_addr_end(addr, end); 64 + early_fixmap_init_pte(pmdp, addr); 65 + } while (pmdp++, addr = next, addr != end); 66 + } 67 + 68 + 69 + static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr, 70 + unsigned long end) 71 + { 72 + p4d_t p4d = READ_ONCE(*p4dp); 73 + pud_t *pudp; 74 + 75 + if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) && 76 + p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) { 77 + /* 78 + * We only end up here if the kernel mapping and the fixmap 79 + * share the top level pgd entry, which should only happen on 80 + * 16k/4 levels configurations. 81 + */ 82 + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 83 + } 84 + 85 + if (p4d_none(p4d)) 86 + __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); 87 + 88 + pudp = pud_offset_kimg(p4dp, addr); 89 + early_fixmap_init_pmd(pudp, addr, end); 90 + } 91 + 92 + /* 93 + * The p*d_populate functions call virt_to_phys implicitly so they can't be used 94 + * directly on kernel symbols (bm_p*d). This function is called too early to use 95 + * lm_alias so __p*d_populate functions must be used to populate with the 96 + * physical address from __pa_symbol. 97 + */ 98 + void __init early_fixmap_init(void) 99 + { 100 + unsigned long addr = FIXADDR_TOT_START; 101 + unsigned long end = FIXADDR_TOP; 102 + 103 + pgd_t *pgdp = pgd_offset_k(addr); 104 + p4d_t *p4dp = p4d_offset(pgdp, addr); 105 + 106 + early_fixmap_init_pud(p4dp, addr, end); 107 + } 108 + 109 + /* 110 + * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 111 + * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 112 + */ 113 + void __set_fixmap(enum fixed_addresses idx, 114 + phys_addr_t phys, pgprot_t flags) 115 + { 116 + unsigned long addr = __fix_to_virt(idx); 117 + pte_t *ptep; 118 + 119 + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 120 + 121 + ptep = fixmap_pte(addr); 122 + 123 + if (pgprot_val(flags)) { 124 + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 125 + } else { 126 + pte_clear(&init_mm, addr, ptep); 127 + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 128 + } 129 + } 130 + 131 + void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 132 + { 133 + const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 134 + phys_addr_t dt_phys_base; 135 + int offset; 136 + void *dt_virt; 137 + 138 + /* 139 + * Check whether the physical FDT address is set and meets the minimum 140 + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 141 + * at least 8 bytes so that we can always access the magic and size 142 + * fields of the FDT header after mapping the first chunk, double check 143 + * here if that is indeed the case. 144 + */ 145 + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 146 + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 147 + return NULL; 148 + 149 + dt_phys_base = round_down(dt_phys, PAGE_SIZE); 150 + offset = dt_phys % PAGE_SIZE; 151 + dt_virt = (void *)dt_virt_base + offset; 152 + 153 + /* map the first chunk so we can read the size from the header */ 154 + create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot); 155 + 156 + if (fdt_magic(dt_virt) != FDT_MAGIC) 157 + return NULL; 158 + 159 + *size = fdt_totalsize(dt_virt); 160 + if (*size > MAX_FDT_SIZE) 161 + return NULL; 162 + 163 + if (offset + *size > PAGE_SIZE) { 164 + create_mapping_noalloc(dt_phys_base, dt_virt_base, 165 + offset + *size, prot); 166 + } 167 + 168 + return dt_virt; 169 + } 170 + 171 + /* 172 + * Copy the fixmap region into a new pgdir. 173 + */ 174 + void __init fixmap_copy(pgd_t *pgdir) 175 + { 176 + if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) { 177 + /* 178 + * The fixmap falls in a separate pgd to the kernel, and doesn't 179 + * live in the carveout for the swapper_pg_dir. We can simply 180 + * re-use the existing dir for the fixmap. 181 + */ 182 + set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START), 183 + READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START))); 184 + } else if (CONFIG_PGTABLE_LEVELS > 3) { 185 + pgd_t *bm_pgdp; 186 + p4d_t *bm_p4dp; 187 + pud_t *bm_pudp; 188 + /* 189 + * The fixmap shares its top level pgd entry with the kernel 190 + * mapping. This can really only occur when we are running 191 + * with 16k/4 levels, so we can simply reuse the pud level 192 + * entry instead. 193 + */ 194 + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 195 + bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START); 196 + bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START); 197 + bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START); 198 + pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); 199 + pud_clear_fixmap(); 200 + } else { 201 + BUG(); 202 + } 203 + }
+3 -31
arch/arm64/mm/init.c
··· 61 61 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). 62 62 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, 63 63 * otherwise it is empty. 64 - * 65 - * Memory reservation for crash kernel either done early or deferred 66 - * depending on DMA memory zones configs (ZONE_DMA) -- 67 - * 68 - * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized 69 - * here instead of max_zone_phys(). This lets early reservation of 70 - * crash kernel memory which has a dependency on arm64_dma_phys_limit. 71 - * Reserving memory early for crash kernel allows linear creation of block 72 - * mappings (greater than page-granularity) for all the memory bank rangs. 73 - * In this scheme a comparatively quicker boot is observed. 74 - * 75 - * If ZONE_DMA configs are defined, crash kernel memory reservation 76 - * is delayed until DMA zone memory range size initialization performed in 77 - * zone_sizes_init(). The defer is necessary to steer clear of DMA zone 78 - * memory range to avoid overlap allocation. So crash kernel memory boundaries 79 - * are not known when mapping all bank memory ranges, which otherwise means 80 - * not possible to exclude crash kernel range from creating block mappings 81 - * so page-granularity mappings are created for the entire memory range. 82 - * Hence a slightly slower boot is observed. 83 - * 84 - * Note: Page-granularity mappings are necessary for crash kernel memory 85 - * range for shrinking its size via /sys/kernel/kexec_crash_size interface. 86 64 */ 87 - #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) 88 65 phys_addr_t __ro_after_init arm64_dma_phys_limit; 89 - #else 90 - phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; 91 - #endif 92 66 93 67 /* Current arm64 boot protocol requires 2MB alignment */ 94 68 #define CRASH_ALIGN SZ_2M ··· 222 248 if (!arm64_dma_phys_limit) 223 249 arm64_dma_phys_limit = dma32_phys_limit; 224 250 #endif 251 + if (!arm64_dma_phys_limit) 252 + arm64_dma_phys_limit = PHYS_MASK + 1; 225 253 max_zone_pfns[ZONE_NORMAL] = max_pfn; 226 254 227 255 free_area_init(max_zone_pfns); ··· 384 408 385 409 early_init_fdt_scan_reserved_mem(); 386 410 387 - if (!defer_reserve_crashkernel()) 388 - reserve_crashkernel(); 389 - 390 411 high_memory = __va(memblock_end_of_DRAM() - 1) + 1; 391 412 } 392 413 ··· 430 457 * request_standard_resources() depends on crashkernel's memory being 431 458 * reserved, so do it here. 432 459 */ 433 - if (defer_reserve_crashkernel()) 434 - reserve_crashkernel(); 460 + reserve_crashkernel(); 435 461 436 462 memblock_dump_all(); 437 463 }
+57 -231
arch/arm64/mm/mmu.c
··· 24 24 #include <linux/mm.h> 25 25 #include <linux/vmalloc.h> 26 26 #include <linux/set_memory.h> 27 + #include <linux/kfence.h> 27 28 28 29 #include <asm/barrier.h> 29 30 #include <asm/cputype.h> ··· 39 38 #include <asm/ptdump.h> 40 39 #include <asm/tlbflush.h> 41 40 #include <asm/pgalloc.h> 41 + #include <asm/kfence.h> 42 42 43 43 #define NO_BLOCK_MAPPINGS BIT(0) 44 44 #define NO_CONT_MAPPINGS BIT(1) ··· 72 70 */ 73 71 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 74 72 EXPORT_SYMBOL(empty_zero_page); 75 - 76 - static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 77 - static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 78 - static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 79 73 80 74 static DEFINE_SPINLOCK(swapper_pgdir_lock); 81 75 static DEFINE_MUTEX(fixmap_lock); ··· 448 450 * without allocating new levels of table. Note that this permits the 449 451 * creation of new section or page entries. 450 452 */ 451 - static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 452 - phys_addr_t size, pgprot_t prot) 453 + void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 454 + phys_addr_t size, pgprot_t prot) 453 455 { 454 456 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 455 457 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", ··· 508 510 PAGE_KERNEL_RO); 509 511 } 510 512 511 - static bool crash_mem_map __initdata; 513 + #ifdef CONFIG_KFENCE 512 514 513 - static int __init enable_crash_mem_map(char *arg) 515 + bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; 516 + 517 + /* early_param() will be parsed before map_mem() below. */ 518 + static int __init parse_kfence_early_init(char *arg) 514 519 { 515 - /* 516 - * Proper parameter parsing is done by reserve_crashkernel(). We only 517 - * need to know if the linear map has to avoid block mappings so that 518 - * the crashkernel reservations can be unmapped later. 519 - */ 520 - crash_mem_map = true; 520 + int val; 521 521 522 + if (get_option(&arg, &val)) 523 + kfence_early_init = !!val; 522 524 return 0; 523 525 } 524 - early_param("crashkernel", enable_crash_mem_map); 526 + early_param("kfence.sample_interval", parse_kfence_early_init); 527 + 528 + static phys_addr_t __init arm64_kfence_alloc_pool(void) 529 + { 530 + phys_addr_t kfence_pool; 531 + 532 + if (!kfence_early_init) 533 + return 0; 534 + 535 + kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 536 + if (!kfence_pool) { 537 + pr_err("failed to allocate kfence pool\n"); 538 + kfence_early_init = false; 539 + return 0; 540 + } 541 + 542 + /* Temporarily mark as NOMAP. */ 543 + memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); 544 + 545 + return kfence_pool; 546 + } 547 + 548 + static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) 549 + { 550 + if (!kfence_pool) 551 + return; 552 + 553 + /* KFENCE pool needs page-level mapping. */ 554 + __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, 555 + pgprot_tagged(PAGE_KERNEL), 556 + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 557 + memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); 558 + __kfence_pool = phys_to_virt(kfence_pool); 559 + } 560 + #else /* CONFIG_KFENCE */ 561 + 562 + static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } 563 + static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } 564 + 565 + #endif /* CONFIG_KFENCE */ 525 566 526 567 static void __init map_mem(pgd_t *pgdp) 527 568 { ··· 568 531 phys_addr_t kernel_start = __pa_symbol(_stext); 569 532 phys_addr_t kernel_end = __pa_symbol(__init_begin); 570 533 phys_addr_t start, end; 534 + phys_addr_t early_kfence_pool; 571 535 int flags = NO_EXEC_MAPPINGS; 572 536 u64 i; 573 537 ··· 581 543 */ 582 544 BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); 583 545 546 + early_kfence_pool = arm64_kfence_alloc_pool(); 547 + 584 548 if (can_set_direct_map()) 585 549 flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 586 550 ··· 593 553 * the following for-loop 594 554 */ 595 555 memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 596 - 597 - #ifdef CONFIG_KEXEC_CORE 598 - if (crash_mem_map) { 599 - if (defer_reserve_crashkernel()) 600 - flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 601 - else if (crashk_res.end) 602 - memblock_mark_nomap(crashk_res.start, 603 - resource_size(&crashk_res)); 604 - } 605 - #endif 606 556 607 557 /* map all the memory banks */ 608 558 for_each_mem_range(i, &start, &end) { ··· 620 590 __map_memblock(pgdp, kernel_start, kernel_end, 621 591 PAGE_KERNEL, NO_CONT_MAPPINGS); 622 592 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 623 - 624 - /* 625 - * Use page-level mappings here so that we can shrink the region 626 - * in page granularity and put back unused memory to buddy system 627 - * through /sys/kernel/kexec_crash_size interface. 628 - */ 629 - #ifdef CONFIG_KEXEC_CORE 630 - if (crash_mem_map && !defer_reserve_crashkernel()) { 631 - if (crashk_res.end) { 632 - __map_memblock(pgdp, crashk_res.start, 633 - crashk_res.end + 1, 634 - PAGE_KERNEL, 635 - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 636 - memblock_clear_nomap(crashk_res.start, 637 - resource_size(&crashk_res)); 638 - } 639 - } 640 - #endif 593 + arm64_kfence_map_pool(early_kfence_pool, pgdp); 641 594 } 642 595 643 596 void mark_rodata_ro(void) ··· 747 734 &vmlinux_initdata, 0, VM_NO_GUARD); 748 735 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 749 736 750 - if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { 751 - /* 752 - * The fixmap falls in a separate pgd to the kernel, and doesn't 753 - * live in the carveout for the swapper_pg_dir. We can simply 754 - * re-use the existing dir for the fixmap. 755 - */ 756 - set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), 757 - READ_ONCE(*pgd_offset_k(FIXADDR_START))); 758 - } else if (CONFIG_PGTABLE_LEVELS > 3) { 759 - pgd_t *bm_pgdp; 760 - p4d_t *bm_p4dp; 761 - pud_t *bm_pudp; 762 - /* 763 - * The fixmap shares its top level pgd entry with the kernel 764 - * mapping. This can really only occur when we are running 765 - * with 16k/4 levels, so we can simply reuse the pud level 766 - * entry instead. 767 - */ 768 - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 769 - bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); 770 - bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); 771 - bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); 772 - pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); 773 - pud_clear_fixmap(); 774 - } else { 775 - BUG(); 776 - } 777 - 737 + fixmap_copy(pgdp); 778 738 kasan_copy_shadow(pgdp); 779 739 } 780 740 ··· 1161 1175 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 1162 1176 } 1163 1177 #endif /* CONFIG_MEMORY_HOTPLUG */ 1164 - 1165 - static inline pud_t *fixmap_pud(unsigned long addr) 1166 - { 1167 - pgd_t *pgdp = pgd_offset_k(addr); 1168 - p4d_t *p4dp = p4d_offset(pgdp, addr); 1169 - p4d_t p4d = READ_ONCE(*p4dp); 1170 - 1171 - BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); 1172 - 1173 - return pud_offset_kimg(p4dp, addr); 1174 - } 1175 - 1176 - static inline pmd_t *fixmap_pmd(unsigned long addr) 1177 - { 1178 - pud_t *pudp = fixmap_pud(addr); 1179 - pud_t pud = READ_ONCE(*pudp); 1180 - 1181 - BUG_ON(pud_none(pud) || pud_bad(pud)); 1182 - 1183 - return pmd_offset_kimg(pudp, addr); 1184 - } 1185 - 1186 - static inline pte_t *fixmap_pte(unsigned long addr) 1187 - { 1188 - return &bm_pte[pte_index(addr)]; 1189 - } 1190 - 1191 - /* 1192 - * The p*d_populate functions call virt_to_phys implicitly so they can't be used 1193 - * directly on kernel symbols (bm_p*d). This function is called too early to use 1194 - * lm_alias so __p*d_populate functions must be used to populate with the 1195 - * physical address from __pa_symbol. 1196 - */ 1197 - void __init early_fixmap_init(void) 1198 - { 1199 - pgd_t *pgdp; 1200 - p4d_t *p4dp, p4d; 1201 - pud_t *pudp; 1202 - pmd_t *pmdp; 1203 - unsigned long addr = FIXADDR_START; 1204 - 1205 - pgdp = pgd_offset_k(addr); 1206 - p4dp = p4d_offset(pgdp, addr); 1207 - p4d = READ_ONCE(*p4dp); 1208 - if (CONFIG_PGTABLE_LEVELS > 3 && 1209 - !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { 1210 - /* 1211 - * We only end up here if the kernel mapping and the fixmap 1212 - * share the top level pgd entry, which should only happen on 1213 - * 16k/4 levels configurations. 1214 - */ 1215 - BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 1216 - pudp = pud_offset_kimg(p4dp, addr); 1217 - } else { 1218 - if (p4d_none(p4d)) 1219 - __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); 1220 - pudp = fixmap_pud(addr); 1221 - } 1222 - if (pud_none(READ_ONCE(*pudp))) 1223 - __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); 1224 - pmdp = fixmap_pmd(addr); 1225 - __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 1226 - 1227 - /* 1228 - * The boot-ioremap range spans multiple pmds, for which 1229 - * we are not prepared: 1230 - */ 1231 - BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 1232 - != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 1233 - 1234 - if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 1235 - || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 1236 - WARN_ON(1); 1237 - pr_warn("pmdp %p != %p, %p\n", 1238 - pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 1239 - fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 1240 - pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 1241 - fix_to_virt(FIX_BTMAP_BEGIN)); 1242 - pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 1243 - fix_to_virt(FIX_BTMAP_END)); 1244 - 1245 - pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 1246 - pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 1247 - } 1248 - } 1249 - 1250 - /* 1251 - * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 1252 - * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 1253 - */ 1254 - void __set_fixmap(enum fixed_addresses idx, 1255 - phys_addr_t phys, pgprot_t flags) 1256 - { 1257 - unsigned long addr = __fix_to_virt(idx); 1258 - pte_t *ptep; 1259 - 1260 - BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 1261 - 1262 - ptep = fixmap_pte(addr); 1263 - 1264 - if (pgprot_val(flags)) { 1265 - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 1266 - } else { 1267 - pte_clear(&init_mm, addr, ptep); 1268 - flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 1269 - } 1270 - } 1271 - 1272 - void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 1273 - { 1274 - const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 1275 - int offset; 1276 - void *dt_virt; 1277 - 1278 - /* 1279 - * Check whether the physical FDT address is set and meets the minimum 1280 - * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 1281 - * at least 8 bytes so that we can always access the magic and size 1282 - * fields of the FDT header after mapping the first chunk, double check 1283 - * here if that is indeed the case. 1284 - */ 1285 - BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 1286 - if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 1287 - return NULL; 1288 - 1289 - /* 1290 - * Make sure that the FDT region can be mapped without the need to 1291 - * allocate additional translation table pages, so that it is safe 1292 - * to call create_mapping_noalloc() this early. 1293 - * 1294 - * On 64k pages, the FDT will be mapped using PTEs, so we need to 1295 - * be in the same PMD as the rest of the fixmap. 1296 - * On 4k pages, we'll use section mappings for the FDT so we only 1297 - * have to be in the same PUD. 1298 - */ 1299 - BUILD_BUG_ON(dt_virt_base % SZ_2M); 1300 - 1301 - BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 1302 - __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 1303 - 1304 - offset = dt_phys % SWAPPER_BLOCK_SIZE; 1305 - dt_virt = (void *)dt_virt_base + offset; 1306 - 1307 - /* map the first chunk so we can read the size from the header */ 1308 - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 1309 - dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 1310 - 1311 - if (fdt_magic(dt_virt) != FDT_MAGIC) 1312 - return NULL; 1313 - 1314 - *size = fdt_totalsize(dt_virt); 1315 - if (*size > MAX_FDT_SIZE) 1316 - return NULL; 1317 - 1318 - if (offset + *size > SWAPPER_BLOCK_SIZE) 1319 - create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 1320 - round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 1321 - 1322 - return dt_virt; 1323 - } 1324 1178 1325 1179 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1326 1180 {
+5 -2
arch/arm64/mm/pageattr.c
··· 11 11 #include <asm/cacheflush.h> 12 12 #include <asm/set_memory.h> 13 13 #include <asm/tlbflush.h> 14 + #include <asm/kfence.h> 14 15 15 16 struct page_change_data { 16 17 pgprot_t set_mask; ··· 23 22 bool can_set_direct_map(void) 24 23 { 25 24 /* 26 - * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be 25 + * rodata_full and DEBUG_PAGEALLOC require linear map to be 27 26 * mapped at page granularity, so that it is possible to 28 27 * protect/unprotect single pages. 28 + * 29 + * KFENCE pool requires page-granular mapping if initialized late. 29 30 */ 30 31 return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || 31 - IS_ENABLED(CONFIG_KFENCE); 32 + arm64_kfence_can_set_direct_map(); 32 33 } 33 34 34 35 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
+1 -1
arch/arm64/mm/ptdump.c
··· 45 45 { MODULES_END, "Modules end" }, 46 46 { VMALLOC_START, "vmalloc() area" }, 47 47 { VMALLOC_END, "vmalloc() end" }, 48 - { FIXADDR_START, "Fixmap start" }, 48 + { FIXADDR_TOT_START, "Fixmap start" }, 49 49 { FIXADDR_TOP, "Fixmap end" }, 50 50 { PCI_IO_START, "PCI I/O start" }, 51 51 { PCI_IO_END, "PCI I/O end" },
+57 -36
arch/arm64/tools/gen-sysreg.awk
··· 4 4 # 5 5 # Usage: awk -f gen-sysreg.awk sysregs.txt 6 6 7 + function block_current() { 8 + return __current_block[__current_block_depth]; 9 + } 10 + 7 11 # Log an error and terminate 8 12 function fatal(msg) { 9 13 print "Error at " NR ": " msg > "/dev/stderr" 14 + 15 + printf "Current block nesting:" 16 + 17 + for (i = 0; i <= __current_block_depth; i++) { 18 + printf " " __current_block[i] 19 + } 20 + printf "\n" 21 + 10 22 exit 1 11 23 } 12 24 13 - # Sanity check that the start or end of a block makes sense at this point in 14 - # the file. If not, produce an error and terminate. 15 - # 16 - # @this - the $Block or $EndBlock 17 - # @prev - the only valid block to already be in (value of @block) 18 - # @new - the new value of @block 19 - function change_block(this, prev, new) { 20 - if (block != prev) 21 - fatal("unexpected " this " (inside " block ")") 25 + # Enter a new block, setting the active block to @block 26 + function block_push(block) { 27 + __current_block[++__current_block_depth] = block 28 + } 22 29 23 - block = new 30 + # Exit a block, setting the active block to the parent block 31 + function block_pop() { 32 + if (__current_block_depth == 0) 33 + fatal("error: block_pop() in root block") 34 + 35 + __current_block_depth--; 24 36 } 25 37 26 38 # Sanity check the number of records for a field makes sense. If not, produce ··· 96 84 print "/* Generated file - do not edit */" 97 85 print "" 98 86 99 - block = "None" 87 + __current_block_depth = 0 88 + __current_block[__current_block_depth] = "Root" 100 89 } 101 90 102 91 END { 92 + if (__current_block_depth != 0) 93 + fatal("Missing terminator for " block_current() " block") 94 + 103 95 print "#endif /* __ASM_SYSREG_DEFS_H */" 104 96 } 105 97 ··· 111 95 /^$/ { next } 112 96 /^[\t ]*#/ { next } 113 97 114 - /^SysregFields/ { 115 - change_block("SysregFields", "None", "SysregFields") 98 + /^SysregFields/ && block_current() == "Root" { 99 + block_push("SysregFields") 100 + 116 101 expect_fields(2) 117 102 118 103 reg = $2 ··· 127 110 next 128 111 } 129 112 130 - /^EndSysregFields/ { 113 + /^EndSysregFields/ && block_current() == "SysregFields" { 131 114 if (next_bit > 0) 132 115 fatal("Unspecified bits in " reg) 133 - 134 - change_block("EndSysregFields", "SysregFields", "None") 135 116 136 117 define(reg "_RES0", "(" res0 ")") 137 118 define(reg "_RES1", "(" res1 ")") ··· 141 126 res1 = null 142 127 unkn = null 143 128 129 + block_pop() 144 130 next 145 131 } 146 132 147 - /^Sysreg/ { 148 - change_block("Sysreg", "None", "Sysreg") 133 + /^Sysreg/ && block_current() == "Root" { 134 + block_push("Sysreg") 135 + 149 136 expect_fields(7) 150 137 151 138 reg = $2 ··· 177 160 next 178 161 } 179 162 180 - /^EndSysreg/ { 163 + /^EndSysreg/ && block_current() == "Sysreg" { 181 164 if (next_bit > 0) 182 165 fatal("Unspecified bits in " reg) 183 - 184 - change_block("EndSysreg", "Sysreg", "None") 185 166 186 167 if (res0 != null) 187 168 define(reg "_RES0", "(" res0 ")") ··· 200 185 res1 = null 201 186 unkn = null 202 187 188 + block_pop() 203 189 next 204 190 } 205 191 206 192 # Currently this is effectivey a comment, in future we may want to emit 207 193 # defines for the fields. 208 - /^Fields/ && (block == "Sysreg") { 194 + /^Fields/ && block_current() == "Sysreg" { 209 195 expect_fields(2) 210 196 211 197 if (next_bit != 63) ··· 224 208 } 225 209 226 210 227 - /^Res0/ && (block == "Sysreg" || block == "SysregFields") { 211 + /^Res0/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 228 212 expect_fields(2) 229 213 parse_bitdef(reg, "RES0", $2) 230 214 field = "RES0_" msb "_" lsb ··· 234 218 next 235 219 } 236 220 237 - /^Res1/ && (block == "Sysreg" || block == "SysregFields") { 221 + /^Res1/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 238 222 expect_fields(2) 239 223 parse_bitdef(reg, "RES1", $2) 240 224 field = "RES1_" msb "_" lsb ··· 244 228 next 245 229 } 246 230 247 - /^Unkn/ && (block == "Sysreg" || block == "SysregFields") { 231 + /^Unkn/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 248 232 expect_fields(2) 249 233 parse_bitdef(reg, "UNKN", $2) 250 234 field = "UNKN_" msb "_" lsb ··· 254 238 next 255 239 } 256 240 257 - /^Field/ && (block == "Sysreg" || block == "SysregFields") { 241 + /^Field/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 258 242 expect_fields(3) 259 243 field = $3 260 244 parse_bitdef(reg, field, $2) ··· 265 249 next 266 250 } 267 251 268 - /^Raz/ && (block == "Sysreg" || block == "SysregFields") { 252 + /^Raz/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 269 253 expect_fields(2) 270 254 parse_bitdef(reg, field, $2) 271 255 272 256 next 273 257 } 274 258 275 - /^SignedEnum/ { 276 - change_block("Enum<", "Sysreg", "Enum") 259 + /^SignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 260 + block_push("Enum") 261 + 277 262 expect_fields(3) 278 263 field = $3 279 264 parse_bitdef(reg, field, $2) ··· 285 268 next 286 269 } 287 270 288 - /^UnsignedEnum/ { 289 - change_block("Enum<", "Sysreg", "Enum") 271 + /^UnsignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 272 + block_push("Enum") 273 + 290 274 expect_fields(3) 291 275 field = $3 292 276 parse_bitdef(reg, field, $2) ··· 298 280 next 299 281 } 300 282 301 - /^Enum/ { 302 - change_block("Enum", "Sysreg", "Enum") 283 + /^Enum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") { 284 + block_push("Enum") 285 + 303 286 expect_fields(3) 304 287 field = $3 305 288 parse_bitdef(reg, field, $2) ··· 310 291 next 311 292 } 312 293 313 - /^EndEnum/ { 314 - change_block("EndEnum", "Enum", "Sysreg") 294 + /^EndEnum/ && block_current() == "Enum" { 295 + 315 296 field = null 316 297 msb = null 317 298 lsb = null 318 299 print "" 300 + 301 + block_pop() 319 302 next 320 303 } 321 304 322 - /0b[01]+/ && block == "Enum" { 305 + /0b[01]+/ && block_current() == "Enum" { 323 306 expect_fields(2) 324 307 val = $1 325 308 name = $2
+164 -1
arch/arm64/tools/sysreg
··· 879 879 EndSysreg 880 880 881 881 Sysreg ID_AA64PFR1_EL1 3 0 0 4 1 882 - Res0 63:40 882 + UnsignedEnum 63:60 PFAR 883 + 0b0000 NI 884 + 0b0001 IMP 885 + EndEnum 886 + UnsignedEnum 59:56 DF2 887 + 0b0000 NI 888 + 0b0001 IMP 889 + EndEnum 890 + UnsignedEnum 55:52 MTEX 891 + 0b0000 MTE 892 + 0b0001 MTE4 893 + EndEnum 894 + UnsignedEnum 51:48 THE 895 + 0b0000 NI 896 + 0b0001 IMP 897 + EndEnum 898 + UnsignedEnum 47:44 GCS 899 + 0b0000 NI 900 + 0b0001 IMP 901 + EndEnum 902 + Enum 43:40 MTE_frac 903 + 0b0000 ASYNC 904 + 0b1111 NI 905 + EndEnum 883 906 UnsignedEnum 39:36 NMI 884 907 0b0000 NI 885 908 0b0001 IMP ··· 1887 1864 Res0 63:2 1888 1865 Field 1 ZA 1889 1866 Field 0 SM 1867 + EndSysreg 1868 + 1869 + SysregFields HFGxTR_EL2 1870 + Field 63 nAMIAIR2_EL1 1871 + Field 62 nMAIR2_EL1 1872 + Field 61 nS2POR_EL1 1873 + Field 60 nPOR_EL1 1874 + Field 59 nPOR_EL0 1875 + Field 58 nPIR_EL1 1876 + Field 57 nPIRE0_EL1 1877 + Field 56 nRCWMASK_EL1 1878 + Field 55 nTPIDR2_EL0 1879 + Field 54 nSMPRI_EL1 1880 + Field 53 nGCS_EL1 1881 + Field 52 nGCS_EL0 1882 + Res0 51 1883 + Field 50 nACCDATA_EL1 1884 + Field 49 ERXADDR_EL1 1885 + Field 48 EXRPFGCDN_EL1 1886 + Field 47 EXPFGCTL_EL1 1887 + Field 46 EXPFGF_EL1 1888 + Field 45 ERXMISCn_EL1 1889 + Field 44 ERXSTATUS_EL1 1890 + Field 43 ERXCTLR_EL1 1891 + Field 42 ERXFR_EL1 1892 + Field 41 ERRSELR_EL1 1893 + Field 40 ERRIDR_EL1 1894 + Field 39 ICC_IGRPENn_EL1 1895 + Field 38 VBAR_EL1 1896 + Field 37 TTBR1_EL1 1897 + Field 36 TTBR0_EL1 1898 + Field 35 TPIDR_EL0 1899 + Field 34 TPIDRRO_EL0 1900 + Field 33 TPIDR_EL1 1901 + Field 32 TCR_EL1 1902 + Field 31 SCTXNUM_EL0 1903 + Field 30 SCTXNUM_EL1 1904 + Field 29 SCTLR_EL1 1905 + Field 28 REVIDR_EL1 1906 + Field 27 PAR_EL1 1907 + Field 26 MPIDR_EL1 1908 + Field 25 MIDR_EL1 1909 + Field 24 MAIR_EL1 1910 + Field 23 LORSA_EL1 1911 + Field 22 LORN_EL1 1912 + Field 21 LORID_EL1 1913 + Field 20 LOREA_EL1 1914 + Field 19 LORC_EL1 1915 + Field 18 ISR_EL1 1916 + Field 17 FAR_EL1 1917 + Field 16 ESR_EL1 1918 + Field 15 DCZID_EL0 1919 + Field 14 CTR_EL0 1920 + Field 13 CSSELR_EL1 1921 + Field 12 CPACR_EL1 1922 + Field 11 CONTEXTIDR_EL1 1923 + Field 10 CLIDR_EL1 1924 + Field 9 CCSIDR_EL1 1925 + Field 8 APIBKey 1926 + Field 7 APIAKey 1927 + Field 6 APGAKey 1928 + Field 5 APDBKey 1929 + Field 4 APDAKey 1930 + Field 3 AMAIR_EL1 1931 + Field 2 AIDR_EL1 1932 + Field 1 AFSR1_EL1 1933 + Field 0 AFSR0_EL1 1934 + EndSysregFields 1935 + 1936 + Sysreg HFGRTR_EL2 3 4 1 1 4 1937 + Fields HFGxTR_EL2 1938 + EndSysreg 1939 + 1940 + Sysreg HFGWTR_EL2 3 4 1 1 5 1941 + Fields HFGxTR_EL2 1942 + EndSysreg 1943 + 1944 + Sysreg HFGITR_EL2 3 4 1 1 6 1945 + Res0 63:61 1946 + Field 60 COSPRCTX 1947 + Field 59 nGCSEPP 1948 + Field 58 nGCSSTR_EL1 1949 + Field 57 nGCSPUSHM_EL1 1950 + Field 56 nBRBIALL 1951 + Field 55 nBRBINJ 1952 + Field 54 DCCVAC 1953 + Field 53 SVC_EL1 1954 + Field 52 SVC_EL0 1955 + Field 51 ERET 1956 + Field 50 CPPRCTX 1957 + Field 49 DVPRCTX 1958 + Field 48 CFPRCTX 1959 + Field 47 TLBIVAALE1 1960 + Field 46 TLBIVALE1 1961 + Field 45 TLBIVAAE1 1962 + Field 44 TLBIASIDE1 1963 + Field 43 TLBIVAE1 1964 + Field 42 TLBIVMALLE1 1965 + Field 41 TLBIRVAALE1 1966 + Field 40 TLBIRVALE1 1967 + Field 39 TLBIRVAAE1 1968 + Field 38 TLBIRVAE1 1969 + Field 37 TLBIRVAALE1IS 1970 + Field 36 TLBIRVALE1IS 1971 + Field 35 TLBIRVAAE1IS 1972 + Field 34 TLBIRVAE1IS 1973 + Field 33 TLBIVAALE1IS 1974 + Field 32 TLBIVALE1IS 1975 + Field 31 TLBIVAAE1IS 1976 + Field 30 TLBIASIDE1IS 1977 + Field 29 TLBIVAE1IS 1978 + Field 28 TLBIVMALLE1IS 1979 + Field 27 TLBIRVAALE1OS 1980 + Field 26 TLBIRVALE1OS 1981 + Field 25 TLBIRVAAE1OS 1982 + Field 24 TLBIRVAE1OS 1983 + Field 23 TLBIVAALE1OS 1984 + Field 22 TLBIVALE1OS 1985 + Field 21 TLBIVAAE1OS 1986 + Field 20 TLBIASIDE1OS 1987 + Field 19 TLBIVAE1OS 1988 + Field 18 TLBIVMALLE1OS 1989 + Field 17 ATS1E1WP 1990 + Field 16 ATS1E1RP 1991 + Field 15 ATS1E0W 1992 + Field 14 ATS1E0R 1993 + Field 13 ATS1E1W 1994 + Field 12 ATS1E1R 1995 + Field 11 DCZVA 1996 + Field 10 DCCIVAC 1997 + Field 9 DCCVADP 1998 + Field 8 DCCVAP 1999 + Field 7 DCCVAU 2000 + Field 6 DCCISW 2001 + Field 5 DCCSW 2002 + Field 4 DCISW 2003 + Field 3 DCIVAC 2004 + Field 2 ICIVAU 2005 + Field 1 ICIALLU 2006 + Field 0 ICIALLUIS 1890 2007 EndSysreg 1891 2008 1892 2009 Sysreg ZCR_EL2 3 4 1 2 0
+5
arch/s390/kernel/mcount.S
··· 32 32 BR_EX %r14 33 33 ENDPROC(ftrace_stub) 34 34 35 + SYM_CODE_START(ftrace_stub_direct_tramp) 36 + lgr %r1, %r0 37 + BR_EX %r1 38 + SYM_CODE_END(ftrace_stub_direct_tramp) 39 + 35 40 .macro ftrace_regs_entry, allregs=0 36 41 stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller 37 42
+5
arch/x86/kernel/ftrace_32.S
··· 163 163 jmp .Lftrace_ret 164 164 SYM_CODE_END(ftrace_regs_caller) 165 165 166 + SYM_FUNC_START(ftrace_stub_direct_tramp) 167 + CALL_DEPTH_ACCOUNT 168 + RET 169 + SYM_FUNC_END(ftrace_stub_direct_tramp) 170 + 166 171 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 167 172 SYM_CODE_START(ftrace_graph_caller) 168 173 pushl %eax
+4
arch/x86/kernel/ftrace_64.S
··· 309 309 SYM_FUNC_END(ftrace_regs_caller) 310 310 STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) 311 311 312 + SYM_FUNC_START(ftrace_stub_direct_tramp) 313 + CALL_DEPTH_ACCOUNT 314 + RET 315 + SYM_FUNC_END(ftrace_stub_direct_tramp) 312 316 313 317 #else /* ! CONFIG_DYNAMIC_FTRACE */ 314 318
+10 -3
drivers/acpi/arm64/agdi.c
··· 64 64 int err, i; 65 65 66 66 err = sdei_event_disable(adata->sdei_event); 67 - if (err) 68 - return err; 67 + if (err) { 68 + dev_err(&pdev->dev, "Failed to disable sdei-event #%d (%pe)\n", 69 + adata->sdei_event, ERR_PTR(err)); 70 + return 0; 71 + } 69 72 70 73 for (i = 0; i < 3; i++) { 71 74 err = sdei_event_unregister(adata->sdei_event); ··· 78 75 schedule(); 79 76 } 80 77 81 - return err; 78 + if (err) 79 + dev_err(&pdev->dev, "Failed to unregister sdei-event #%d (%pe)\n", 80 + adata->sdei_event, ERR_PTR(err)); 81 + 82 + return 0; 82 83 } 83 84 84 85 static struct platform_driver agdi_driver = {
+20 -17
drivers/firmware/arm_sdei.c
··· 43 43 /* entry point from firmware to arch asm code */ 44 44 static unsigned long sdei_entry_point; 45 45 46 + static int sdei_hp_state; 47 + 46 48 struct sdei_event { 47 49 /* These three are protected by the sdei_list_lock */ 48 50 struct list_head list; ··· 303 301 { 304 302 int err; 305 303 306 - WARN_ON_ONCE(preemptible()); 307 - 308 304 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL); 309 305 if (err && err != -EIO) { 310 306 pr_warn_once("failed to mask CPU[%u]: %d\n", ··· 315 315 316 316 static void _ipi_mask_cpu(void *ignored) 317 317 { 318 + WARN_ON_ONCE(preemptible()); 318 319 sdei_mask_local_cpu(); 319 320 } 320 321 321 322 int sdei_unmask_local_cpu(void) 322 323 { 323 324 int err; 324 - 325 - WARN_ON_ONCE(preemptible()); 326 325 327 326 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL); 328 327 if (err && err != -EIO) { ··· 335 336 336 337 static void _ipi_unmask_cpu(void *ignored) 337 338 { 339 + WARN_ON_ONCE(preemptible()); 338 340 sdei_unmask_local_cpu(); 339 341 } 340 342 341 343 static void _ipi_private_reset(void *ignored) 342 344 { 343 345 int err; 346 + 347 + WARN_ON_ONCE(preemptible()); 344 348 345 349 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0, 346 350 NULL); ··· 390 388 { 391 389 int err; 392 390 struct sdei_crosscall_args *arg = data; 393 - 394 - WARN_ON_ONCE(preemptible()); 395 391 396 392 err = sdei_api_event_enable(arg->event->event_num); 397 393 ··· 479 479 int err; 480 480 struct sdei_crosscall_args *arg = data; 481 481 482 - WARN_ON_ONCE(preemptible()); 483 - 484 482 err = sdei_api_event_unregister(arg->event->event_num); 485 483 486 484 sdei_cross_call_return(arg, err); ··· 558 560 int err; 559 561 struct sdei_registered_event *reg; 560 562 struct sdei_crosscall_args *arg = data; 561 - 562 - WARN_ON(preemptible()); 563 563 564 564 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id()); 565 565 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point, ··· 713 717 { 714 718 int rv; 715 719 720 + WARN_ON_ONCE(preemptible()); 721 + 716 722 switch (action) { 717 723 case CPU_PM_ENTER: 718 724 rv = sdei_mask_local_cpu(); ··· 763 765 int err; 764 766 765 767 /* unregister private events */ 766 - cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 768 + cpuhp_remove_state(sdei_entry_point); 767 769 768 770 err = sdei_unregister_shared(); 769 771 if (err) ··· 784 786 return err; 785 787 } 786 788 787 - err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 789 + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", 788 790 &sdei_cpuhp_up, &sdei_cpuhp_down); 789 - if (err) 791 + if (err < 0) { 790 792 pr_warn("Failed to re-register CPU hotplug notifier...\n"); 793 + return err; 794 + } 791 795 792 - return err; 796 + sdei_hp_state = err; 797 + return 0; 793 798 } 794 799 795 800 static int sdei_device_restore(struct device *dev) ··· 824 823 * We are going to reset the interface, after this there is no point 825 824 * doing work when we take CPUs offline. 826 825 */ 827 - cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING); 826 + cpuhp_remove_state(sdei_hp_state); 828 827 829 828 sdei_platform_reset(); 830 829 ··· 1004 1003 goto remove_cpupm; 1005 1004 } 1006 1005 1007 - err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI", 1006 + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", 1008 1007 &sdei_cpuhp_up, &sdei_cpuhp_down); 1009 - if (err) { 1008 + if (err < 0) { 1010 1009 pr_warn("Failed to register CPU hotplug notifier...\n"); 1011 1010 goto remove_reboot; 1012 1011 } 1012 + 1013 + sdei_hp_state = err; 1013 1014 1014 1015 return 0; 1015 1016
+10
drivers/perf/Kconfig
··· 100 100 through the SMMU and allow the resulting information to be filtered 101 101 based on the Stream ID of the corresponding master. 102 102 103 + config ARM_PMUV3 104 + depends on HW_PERF_EVENTS && ((ARM && CPU_V7) || ARM64) 105 + bool "ARM PMUv3 support" if !ARM64 106 + default ARM64 107 + help 108 + Say y if you want to use the ARM performance monitor unit (PMU) 109 + version 3. The PMUv3 is the CPU performance monitors on ARMv8 110 + (aarch32 and aarch64) systems that implement the PMUv3 111 + architecture. 112 + 103 113 config ARM_DSU_PMU 104 114 tristate "ARM DynamIQ Shared Unit (DSU) PMU" 105 115 depends on ARM64
+1
drivers/perf/Makefile
··· 5 5 obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o 6 6 obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o 7 7 obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o 8 + obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o 8 9 obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o 9 10 obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o 10 11 obj-$(CONFIG_HISI_PMU) += hisilicon/
+1 -2
drivers/perf/alibaba_uncore_drw_pmu.c
··· 656 656 drw_pmu->dev = &pdev->dev; 657 657 platform_set_drvdata(pdev, drw_pmu); 658 658 659 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 660 - drw_pmu->cfg_base = devm_ioremap_resource(&pdev->dev, res); 659 + drw_pmu->cfg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 661 660 if (IS_ERR(drw_pmu->cfg_base)) 662 661 return PTR_ERR(drw_pmu->cfg_base); 663 662
+6 -2
drivers/perf/amlogic/meson_ddr_pmu_core.c
··· 156 156 u64 config2 = event->attr.config2; 157 157 int i; 158 158 159 - for_each_set_bit(i, (const unsigned long *)&config1, sizeof(config1)) 159 + for_each_set_bit(i, 160 + (const unsigned long *)&config1, 161 + BITS_PER_TYPE(config1)) 160 162 meson_ddr_set_axi_filter(event, i); 161 163 162 - for_each_set_bit(i, (const unsigned long *)&config2, sizeof(config2)) 164 + for_each_set_bit(i, 165 + (const unsigned long *)&config2, 166 + BITS_PER_TYPE(config2)) 163 167 meson_ddr_set_axi_filter(event, i + 64); 164 168 165 169 if (flags & PERF_EF_START)
+14 -1
drivers/perf/apple_m1_cpu_pmu.c
··· 559 559 return m1_pmu_init(cpu_pmu); 560 560 } 561 561 562 + static int m2_pmu_avalanche_init(struct arm_pmu *cpu_pmu) 563 + { 564 + cpu_pmu->name = "apple_avalanche_pmu"; 565 + return m1_pmu_init(cpu_pmu); 566 + } 567 + 568 + static int m2_pmu_blizzard_init(struct arm_pmu *cpu_pmu) 569 + { 570 + cpu_pmu->name = "apple_blizzard_pmu"; 571 + return m1_pmu_init(cpu_pmu); 572 + } 573 + 562 574 static const struct of_device_id m1_pmu_of_device_ids[] = { 575 + { .compatible = "apple,avalanche-pmu", .data = m2_pmu_avalanche_init, }, 576 + { .compatible = "apple,blizzard-pmu", .data = m2_pmu_blizzard_init, }, 563 577 { .compatible = "apple,icestorm-pmu", .data = m1_pmu_ice_init, }, 564 578 { .compatible = "apple,firestorm-pmu", .data = m1_pmu_fire_init, }, 565 579 { }, ··· 595 581 }; 596 582 597 583 module_platform_driver(m1_pmu_driver); 598 - MODULE_LICENSE("GPL v2");
+31 -28
drivers/perf/arm-cmn.c
··· 57 57 #define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0) 58 58 59 59 /* XPs also have some local topology info which has uses too */ 60 - #define CMN_MXP__CONNECT_INFO_P0 0x0008 61 - #define CMN_MXP__CONNECT_INFO_P1 0x0010 62 - #define CMN_MXP__CONNECT_INFO_P2 0x0028 63 - #define CMN_MXP__CONNECT_INFO_P3 0x0030 64 - #define CMN_MXP__CONNECT_INFO_P4 0x0038 65 - #define CMN_MXP__CONNECT_INFO_P5 0x0040 60 + #define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p)) 66 61 #define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0) 62 + 63 + #define CMN_MAX_PORTS 6 64 + #define CI700_CONNECT_INFO_P2_5_OFFSET 0x10 67 65 68 66 /* PMU registers occupy the 3rd 4KB page of each node's region */ 69 67 #define CMN_PMU_OFFSET 0x2000 ··· 164 166 #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config) 165 167 #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) 166 168 167 - #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24) 169 + #define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27) 168 170 #define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) 169 171 #define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) 170 172 /* Note that we don't yet support the tertiary match group on newer IPs */ ··· 394 396 return NULL; 395 397 } 396 398 399 + static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn, 400 + const struct arm_cmn_node *xp, int port) 401 + { 402 + int offset = CMN_MXP__CONNECT_INFO(port); 403 + 404 + if (port >= 2) { 405 + if (cmn->model & (CMN600 | CMN650)) 406 + return 0; 407 + /* 408 + * CI-700 may have extra ports, but still has the 409 + * mesh_port_connect_info registers in the way. 410 + */ 411 + if (cmn->model == CI700) 412 + offset += CI700_CONNECT_INFO_P2_5_OFFSET; 413 + } 414 + 415 + return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset); 416 + } 417 + 397 418 static struct dentry *arm_cmn_debugfs; 398 419 399 420 #ifdef CONFIG_DEBUG_FS ··· 486 469 y = cmn->mesh_y; 487 470 while (y--) { 488 471 int xp_base = cmn->mesh_x * y; 489 - u8 port[6][CMN_MAX_DIMENSION]; 472 + u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION]; 490 473 491 474 for (x = 0; x < cmn->mesh_x; x++) 492 475 seq_puts(s, "--------+"); ··· 494 477 seq_printf(s, "\n%d |", y); 495 478 for (x = 0; x < cmn->mesh_x; x++) { 496 479 struct arm_cmn_node *xp = cmn->xps + xp_base + x; 497 - void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET; 498 480 499 - port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0); 500 - port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1); 501 - port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2); 502 - port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3); 503 - port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4); 504 - port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5); 481 + for (p = 0; p < CMN_MAX_PORTS; p++) 482 + port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); 505 483 seq_printf(s, " XP #%-2d |", xp_base + x); 506 484 } 507 485 ··· 1558 1546 type = CMN_EVENT_TYPE(event); 1559 1547 /* DTC events (i.e. cycles) already have everything they need */ 1560 1548 if (type == CMN_TYPE_DTC) 1561 - return 0; 1549 + return arm_cmn_validate_group(cmn, event); 1562 1550 1563 1551 eventid = CMN_EVENT_EVENTID(event); 1564 1552 /* For watchpoints we need the actual XP node here */ ··· 2095 2083 * from this, since in that case we will see at least one XP 2096 2084 * with port 2 connected, for the HN-D. 2097 2085 */ 2098 - if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0)) 2099 - xp_ports |= BIT(0); 2100 - if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1)) 2101 - xp_ports |= BIT(1); 2102 - if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2)) 2103 - xp_ports |= BIT(2); 2104 - if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3)) 2105 - xp_ports |= BIT(3); 2106 - if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4)) 2107 - xp_ports |= BIT(4); 2108 - if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5)) 2109 - xp_ports |= BIT(5); 2086 + for (int p = 0; p < CMN_MAX_PORTS; p++) 2087 + if (arm_cmn_device_connect_info(cmn, xp, p)) 2088 + xp_ports |= BIT(p); 2110 2089 2111 2090 if (cmn->multi_dtm && (xp_ports & 0xc)) 2112 2091 arm_cmn_init_dtm(dtm++, xp, 1);
+4 -2
drivers/perf/arm_cspmu/arm_cspmu.c
··· 1078 1078 static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid) 1079 1079 { 1080 1080 u32 acpi_uid; 1081 - struct device *cpu_dev = get_cpu_device(cpu); 1082 - struct acpi_device *acpi_dev = ACPI_COMPANION(cpu_dev); 1081 + struct device *cpu_dev; 1082 + struct acpi_device *acpi_dev; 1083 1083 1084 + cpu_dev = get_cpu_device(cpu); 1084 1085 if (!cpu_dev) 1085 1086 return -ENODEV; 1086 1087 1088 + acpi_dev = ACPI_COMPANION(cpu_dev); 1087 1089 while (acpi_dev) { 1088 1090 if (!strcmp(acpi_device_hid(acpi_dev), 1089 1091 ACPI_PROCESSOR_CONTAINER_HID) &&
+1 -2
drivers/perf/arm_dmc620_pmu.c
··· 655 655 .attr_groups = dmc620_pmu_attr_groups, 656 656 }; 657 657 658 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 659 - dmc620_pmu->base = devm_ioremap_resource(&pdev->dev, res); 658 + dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 660 659 if (IS_ERR(dmc620_pmu->base)) 661 660 return PTR_ERR(dmc620_pmu->base); 662 661
+1 -1
drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
··· 316 316 if (!name) 317 317 return -ENOMEM; 318 318 319 - hisi_pmu_init(cpa_pmu, name, THIS_MODULE); 319 + hisi_pmu_init(cpa_pmu, THIS_MODULE); 320 320 321 321 /* Power Management should be disabled before using CPA PMU. */ 322 322 hisi_cpa_pmu_disable_pm(cpa_pmu);
+11 -8
drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
··· 499 499 if (ret) 500 500 return ret; 501 501 502 - ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, 503 - &ddrc_pmu->node); 504 - if (ret) { 505 - dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); 506 - return ret; 507 - } 508 - 509 502 if (ddrc_pmu->identifier >= HISI_PMU_V2) 510 503 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, 511 504 "hisi_sccl%u_ddrc%u_%u", ··· 509 516 "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, 510 517 ddrc_pmu->index_id); 511 518 512 - hisi_pmu_init(ddrc_pmu, name, THIS_MODULE); 519 + if (!name) 520 + return -ENOMEM; 521 + 522 + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, 523 + &ddrc_pmu->node); 524 + if (ret) { 525 + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); 526 + return ret; 527 + } 528 + 529 + hisi_pmu_init(ddrc_pmu, THIS_MODULE); 513 530 514 531 ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); 515 532 if (ret) {
+6 -3
drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
··· 510 510 if (ret) 511 511 return ret; 512 512 513 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", 514 + hha_pmu->sccl_id, hha_pmu->index_id); 515 + if (!name) 516 + return -ENOMEM; 517 + 513 518 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 514 519 &hha_pmu->node); 515 520 if (ret) { ··· 522 517 return ret; 523 518 } 524 519 525 - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", 526 - hha_pmu->sccl_id, hha_pmu->index_id); 527 - hisi_pmu_init(hha_pmu, name, THIS_MODULE); 520 + hisi_pmu_init(hha_pmu, THIS_MODULE); 528 521 529 522 ret = perf_pmu_register(&hha_pmu->pmu, name, -1); 530 523 if (ret) {
+6 -7
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
··· 544 544 if (ret) 545 545 return ret; 546 546 547 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", 548 + l3c_pmu->sccl_id, l3c_pmu->ccl_id); 549 + if (!name) 550 + return -ENOMEM; 551 + 547 552 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, 548 553 &l3c_pmu->node); 549 554 if (ret) { ··· 556 551 return ret; 557 552 } 558 553 559 - /* 560 - * CCL_ID is used to identify the L3C in the same SCCL which was 561 - * used _UID by mistake. 562 - */ 563 - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", 564 - l3c_pmu->sccl_id, l3c_pmu->ccl_id); 565 - hisi_pmu_init(l3c_pmu, name, THIS_MODULE); 554 + hisi_pmu_init(l3c_pmu, THIS_MODULE); 566 555 567 556 ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); 568 557 if (ret) {
+1 -1
drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
··· 412 412 return ret; 413 413 } 414 414 415 - hisi_pmu_init(pa_pmu, name, THIS_MODULE); 415 + hisi_pmu_init(pa_pmu, THIS_MODULE); 416 416 ret = perf_pmu_register(&pa_pmu->pmu, name, -1); 417 417 if (ret) { 418 418 dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
+1 -3
drivers/perf/hisilicon/hisi_uncore_pmu.c
··· 531 531 } 532 532 EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); 533 533 534 - void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name, 535 - struct module *module) 534 + void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module) 536 535 { 537 536 struct pmu *pmu = &hisi_pmu->pmu; 538 537 539 - pmu->name = name; 540 538 pmu->module = module; 541 539 pmu->task_ctx_nr = perf_invalid_context; 542 540 pmu->event_init = hisi_uncore_pmu_event_init;
+1 -2
drivers/perf/hisilicon/hisi_uncore_pmu.h
··· 121 121 int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, 122 122 struct platform_device *pdev); 123 123 124 - void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name, 125 - struct module *module); 124 + void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module); 126 125 #endif /* __HISI_UNCORE_PMU_H__ */
+1 -1
drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
··· 445 445 return ret; 446 446 } 447 447 448 - hisi_pmu_init(sllc_pmu, name, THIS_MODULE); 448 + hisi_pmu_init(sllc_pmu, THIS_MODULE); 449 449 450 450 ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); 451 451 if (ret) {
+1 -2
drivers/perf/qcom_l3_pmu.c
··· 763 763 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 764 764 }; 765 765 766 - memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); 767 - l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc); 766 + l3pmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc); 768 767 if (IS_ERR(l3pmu->regs)) 769 768 return PTR_ERR(l3pmu->regs); 770 769
+1 -1
include/kvm/arm_pmu.h
··· 8 8 #define __ASM_ARM_KVM_PMU_H 9 9 10 10 #include <linux/perf_event.h> 11 - #include <asm/perf_event.h> 11 + #include <linux/perf/arm_pmuv3.h> 12 12 13 13 #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 14 14
-1
include/linux/cpuhotplug.h
··· 163 163 CPUHP_AP_PERF_X86_CSTATE_STARTING, 164 164 CPUHP_AP_PERF_XTENSA_STARTING, 165 165 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, 166 - CPUHP_AP_ARM_SDEI_STARTING, 167 166 CPUHP_AP_ARM_VFP_STARTING, 168 167 CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, 169 168 CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+21 -40
include/linux/ftrace.h
··· 241 241 FTRACE_OPS_FL_DIRECT = BIT(17), 242 242 }; 243 243 244 + #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 245 + #define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS 246 + #else 247 + #define FTRACE_OPS_FL_SAVE_ARGS 0 248 + #endif 249 + 244 250 /* 245 251 * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes 246 252 * to a ftrace_ops. Note, the requests may fail. ··· 327 321 unsigned long trampoline_size; 328 322 struct list_head list; 329 323 ftrace_ops_func_t ops_func; 324 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 325 + unsigned long direct_call; 326 + #endif 330 327 #endif 331 328 }; 332 329 ··· 406 397 407 398 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 408 399 extern int ftrace_direct_func_count; 409 - int register_ftrace_direct(unsigned long ip, unsigned long addr); 410 - int unregister_ftrace_direct(unsigned long ip, unsigned long addr); 411 - int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); 412 - struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); 413 - int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 414 - struct dyn_ftrace *rec, 415 - unsigned long old_addr, 416 - unsigned long new_addr); 417 400 unsigned long ftrace_find_rec_direct(unsigned long ip); 418 - int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); 419 - int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); 420 - int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); 421 - int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr); 401 + int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); 402 + int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, 403 + bool free_filters); 404 + int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); 405 + int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr); 406 + 407 + void ftrace_stub_direct_tramp(void); 422 408 423 409 #else 424 410 struct ftrace_ops; 425 411 # define ftrace_direct_func_count 0 426 - static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) 427 - { 428 - return -ENOTSUPP; 429 - } 430 - static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 431 - { 432 - return -ENOTSUPP; 433 - } 434 - static inline int modify_ftrace_direct(unsigned long ip, 435 - unsigned long old_addr, unsigned long new_addr) 436 - { 437 - return -ENOTSUPP; 438 - } 439 - static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 440 - { 441 - return NULL; 442 - } 443 - static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 444 - struct dyn_ftrace *rec, 445 - unsigned long old_addr, 446 - unsigned long new_addr) 447 - { 448 - return -ENODEV; 449 - } 450 412 static inline unsigned long ftrace_find_rec_direct(unsigned long ip) 451 413 { 452 414 return 0; 453 415 } 454 - static inline int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 416 + static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 455 417 { 456 418 return -ENODEV; 457 419 } 458 - static inline int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 420 + static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, 421 + bool free_filters) 459 422 { 460 423 return -ENODEV; 461 424 } 462 - static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 425 + static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 463 426 { 464 427 return -ENODEV; 465 428 } 466 - static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr) 429 + static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) 467 430 { 468 431 return -ENODEV; 469 432 }
+303
include/linux/perf/arm_pmuv3.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + */ 5 + 6 + #ifndef __PERF_ARM_PMUV3_H 7 + #define __PERF_ARM_PMUV3_H 8 + 9 + #define ARMV8_PMU_MAX_COUNTERS 32 10 + #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) 11 + 12 + /* 13 + * Common architectural and microarchitectural event numbers. 14 + */ 15 + #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000 16 + #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001 17 + #define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002 18 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003 19 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004 20 + #define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005 21 + #define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006 22 + #define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007 23 + #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008 24 + #define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009 25 + #define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A 26 + #define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B 27 + #define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C 28 + #define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D 29 + #define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E 30 + #define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F 31 + #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010 32 + #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011 33 + #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012 34 + #define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013 35 + #define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014 36 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015 37 + #define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016 38 + #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017 39 + #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018 40 + #define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019 41 + #define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A 42 + #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B 43 + #define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C 44 + #define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D 45 + #define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E 46 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F 47 + #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020 48 + #define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021 49 + #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022 50 + #define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023 51 + #define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024 52 + #define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025 53 + #define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026 54 + #define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027 55 + #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028 56 + #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029 57 + #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A 58 + #define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B 59 + #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C 60 + #define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D 61 + #define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E 62 + #define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F 63 + #define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030 64 + #define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031 65 + #define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032 66 + #define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033 67 + #define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034 68 + #define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035 69 + #define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036 70 + #define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037 71 + #define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038 72 + #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039 73 + #define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A 74 + #define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B 75 + #define ARMV8_PMUV3_PERFCTR_STALL 0x003C 76 + #define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D 77 + #define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E 78 + #define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F 79 + 80 + /* Statistical profiling extension microarchitectural events */ 81 + #define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 82 + #define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001 83 + #define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 84 + #define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 85 + 86 + /* AMUv1 architecture events */ 87 + #define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004 88 + #define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005 89 + 90 + /* long-latency read miss events */ 91 + #define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006 92 + #define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009 93 + #define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A 94 + #define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B 95 + 96 + /* Trace buffer events */ 97 + #define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C 98 + #define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E 99 + 100 + /* Trace unit events */ 101 + #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010 102 + #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011 103 + #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012 104 + #define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013 105 + #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018 106 + #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019 107 + #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A 108 + #define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B 109 + 110 + /* additional latency from alignment events */ 111 + #define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020 112 + #define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021 113 + #define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022 114 + 115 + /* Armv8.5 Memory Tagging Extension events */ 116 + #define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024 117 + #define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025 118 + #define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026 119 + 120 + /* ARMv8 recommended implementation defined event types */ 121 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040 122 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041 123 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042 124 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043 125 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044 126 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045 127 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046 128 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047 129 + #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048 130 + 131 + #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C 132 + #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D 133 + #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E 134 + #define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F 135 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050 136 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051 137 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052 138 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053 139 + 140 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056 141 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057 142 + #define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058 143 + 144 + #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C 145 + #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D 146 + #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E 147 + #define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F 148 + #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060 149 + #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061 150 + #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062 151 + #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063 152 + #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064 153 + #define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065 154 + #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066 155 + #define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067 156 + #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068 157 + #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069 158 + #define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A 159 + 160 + #define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C 161 + #define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D 162 + #define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E 163 + #define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F 164 + #define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070 165 + #define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071 166 + #define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072 167 + #define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073 168 + #define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074 169 + #define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075 170 + #define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076 171 + #define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077 172 + #define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078 173 + #define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079 174 + #define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A 175 + 176 + #define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C 177 + #define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D 178 + #define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E 179 + 180 + #define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081 181 + #define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082 182 + #define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083 183 + #define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084 184 + 185 + #define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086 186 + #define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087 187 + #define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088 188 + 189 + #define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A 190 + #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B 191 + #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C 192 + #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D 193 + #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E 194 + #define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F 195 + #define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090 196 + #define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091 197 + 198 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0 199 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1 200 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2 201 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3 202 + 203 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6 204 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7 205 + #define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8 206 + 207 + /* 208 + * Per-CPU PMCR: config reg 209 + */ 210 + #define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ 211 + #define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ 212 + #define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ 213 + #define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ 214 + #define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ 215 + #define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ 216 + #define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ 217 + #define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ 218 + #define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ 219 + #define ARMV8_PMU_PMCR_N_MASK 0x1f 220 + #define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ 221 + 222 + /* 223 + * PMOVSR: counters overflow flag status reg 224 + */ 225 + #define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ 226 + #define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK 227 + 228 + /* 229 + * PMXEVTYPER: Event selection reg 230 + */ 231 + #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 232 + #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 233 + 234 + /* 235 + * Event filters for PMUv3 236 + */ 237 + #define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) 238 + #define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) 239 + #define ARMV8_PMU_INCLUDE_EL2 (1U << 27) 240 + 241 + /* 242 + * PMUSERENR: user enable reg 243 + */ 244 + #define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ 245 + #define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ 246 + #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ 247 + #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ 248 + #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ 249 + 250 + /* PMMIR_EL1.SLOTS mask */ 251 + #define ARMV8_PMU_SLOTS_MASK 0xff 252 + 253 + #define ARMV8_PMU_BUS_SLOTS_SHIFT 8 254 + #define ARMV8_PMU_BUS_SLOTS_MASK 0xff 255 + #define ARMV8_PMU_BUS_WIDTH_SHIFT 16 256 + #define ARMV8_PMU_BUS_WIDTH_MASK 0xf 257 + 258 + /* 259 + * This code is really good 260 + */ 261 + 262 + #define PMEVN_CASE(n, case_macro) \ 263 + case n: case_macro(n); break 264 + 265 + #define PMEVN_SWITCH(x, case_macro) \ 266 + do { \ 267 + switch (x) { \ 268 + PMEVN_CASE(0, case_macro); \ 269 + PMEVN_CASE(1, case_macro); \ 270 + PMEVN_CASE(2, case_macro); \ 271 + PMEVN_CASE(3, case_macro); \ 272 + PMEVN_CASE(4, case_macro); \ 273 + PMEVN_CASE(5, case_macro); \ 274 + PMEVN_CASE(6, case_macro); \ 275 + PMEVN_CASE(7, case_macro); \ 276 + PMEVN_CASE(8, case_macro); \ 277 + PMEVN_CASE(9, case_macro); \ 278 + PMEVN_CASE(10, case_macro); \ 279 + PMEVN_CASE(11, case_macro); \ 280 + PMEVN_CASE(12, case_macro); \ 281 + PMEVN_CASE(13, case_macro); \ 282 + PMEVN_CASE(14, case_macro); \ 283 + PMEVN_CASE(15, case_macro); \ 284 + PMEVN_CASE(16, case_macro); \ 285 + PMEVN_CASE(17, case_macro); \ 286 + PMEVN_CASE(18, case_macro); \ 287 + PMEVN_CASE(19, case_macro); \ 288 + PMEVN_CASE(20, case_macro); \ 289 + PMEVN_CASE(21, case_macro); \ 290 + PMEVN_CASE(22, case_macro); \ 291 + PMEVN_CASE(23, case_macro); \ 292 + PMEVN_CASE(24, case_macro); \ 293 + PMEVN_CASE(25, case_macro); \ 294 + PMEVN_CASE(26, case_macro); \ 295 + PMEVN_CASE(27, case_macro); \ 296 + PMEVN_CASE(28, case_macro); \ 297 + PMEVN_CASE(29, case_macro); \ 298 + PMEVN_CASE(30, case_macro); \ 299 + default: WARN(1, "Invalid PMEV* index\n"); \ 300 + } \ 301 + } while (0) 302 + 303 + #endif
+6 -6
kernel/bpf/trampoline.c
··· 45 45 lockdep_assert_held_once(&tr->mutex); 46 46 47 47 /* Instead of updating the trampoline here, we propagate 48 - * -EAGAIN to register_ftrace_direct_multi(). Then we can 49 - * retry register_ftrace_direct_multi() after updating the 48 + * -EAGAIN to register_ftrace_direct(). Then we can 49 + * retry register_ftrace_direct() after updating the 50 50 * trampoline. 51 51 */ 52 52 if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) && ··· 198 198 int ret; 199 199 200 200 if (tr->func.ftrace_managed) 201 - ret = unregister_ftrace_direct_multi(tr->fops, (long)old_addr); 201 + ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false); 202 202 else 203 203 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); 204 204 ··· 215 215 216 216 if (tr->func.ftrace_managed) { 217 217 if (lock_direct_mutex) 218 - ret = modify_ftrace_direct_multi(tr->fops, (long)new_addr); 218 + ret = modify_ftrace_direct(tr->fops, (long)new_addr); 219 219 else 220 - ret = modify_ftrace_direct_multi_nolock(tr->fops, (long)new_addr); 220 + ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr); 221 221 } else { 222 222 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); 223 223 } ··· 243 243 244 244 if (tr->func.ftrace_managed) { 245 245 ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1); 246 - ret = register_ftrace_direct_multi(tr->fops, (long)new_addr); 246 + ret = register_ftrace_direct(tr->fops, (long)new_addr); 247 247 } else { 248 248 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); 249 249 }
+1 -1
kernel/trace/Kconfig
··· 257 257 258 258 config DYNAMIC_FTRACE_WITH_DIRECT_CALLS 259 259 def_bool y 260 - depends on DYNAMIC_FTRACE_WITH_REGS 260 + depends on DYNAMIC_FTRACE_WITH_REGS || DYNAMIC_FTRACE_WITH_ARGS 261 261 depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 262 262 263 263 config DYNAMIC_FTRACE_WITH_CALL_OPS
+26 -415
kernel/trace/ftrace.c
··· 2583 2583 static void call_direct_funcs(unsigned long ip, unsigned long pip, 2584 2584 struct ftrace_ops *ops, struct ftrace_regs *fregs) 2585 2585 { 2586 - unsigned long addr; 2586 + unsigned long addr = READ_ONCE(ops->direct_call); 2587 2587 2588 - addr = ftrace_find_rec_direct(ip); 2589 2588 if (!addr) 2590 2589 return; 2591 2590 2592 2591 arch_ftrace_set_direct_caller(fregs, addr); 2593 2592 } 2594 - 2595 - static struct ftrace_ops direct_ops = { 2596 - .func = call_direct_funcs, 2597 - .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS 2598 - | FTRACE_OPS_FL_PERMANENT, 2599 - /* 2600 - * By declaring the main trampoline as this trampoline 2601 - * it will never have one allocated for it. Allocated 2602 - * trampolines should not call direct functions. 2603 - * The direct_ops should only be called by the builtin 2604 - * ftrace_regs_caller trampoline. 2605 - */ 2606 - .trampoline = FTRACE_REGS_ADDR, 2607 - }; 2608 2593 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 2609 2594 2610 2595 /** ··· 5286 5301 5287 5302 static LIST_HEAD(ftrace_direct_funcs); 5288 5303 5289 - /** 5290 - * ftrace_find_direct_func - test an address if it is a registered direct caller 5291 - * @addr: The address of a registered direct caller 5292 - * 5293 - * This searches to see if a ftrace direct caller has been registered 5294 - * at a specific address, and if so, it returns a descriptor for it. 5295 - * 5296 - * This can be used by architecture code to see if an address is 5297 - * a direct caller (trampoline) attached to a fentry/mcount location. 5298 - * This is useful for the function_graph tracer, as it may need to 5299 - * do adjustments if it traced a location that also has a direct 5300 - * trampoline attached to it. 5301 - */ 5302 - struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 5303 - { 5304 - struct ftrace_direct_func *entry; 5305 - bool found = false; 5306 - 5307 - /* May be called by fgraph trampoline (protected by rcu tasks) */ 5308 - list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) { 5309 - if (entry->addr == addr) { 5310 - found = true; 5311 - break; 5312 - } 5313 - } 5314 - if (found) 5315 - return entry; 5316 - 5317 - return NULL; 5318 - } 5319 - 5320 - static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr) 5321 - { 5322 - struct ftrace_direct_func *direct; 5323 - 5324 - direct = kmalloc(sizeof(*direct), GFP_KERNEL); 5325 - if (!direct) 5326 - return NULL; 5327 - direct->addr = addr; 5328 - direct->count = 0; 5329 - list_add_rcu(&direct->next, &ftrace_direct_funcs); 5330 - ftrace_direct_func_count++; 5331 - return direct; 5332 - } 5333 - 5334 5304 static int register_ftrace_function_nolock(struct ftrace_ops *ops); 5335 5305 5336 - /** 5337 - * register_ftrace_direct - Call a custom trampoline directly 5338 - * @ip: The address of the nop at the beginning of a function 5339 - * @addr: The address of the trampoline to call at @ip 5340 - * 5341 - * This is used to connect a direct call from the nop location (@ip) 5342 - * at the start of ftrace traced functions. The location that it calls 5343 - * (@addr) must be able to handle a direct call, and save the parameters 5344 - * of the function being traced, and restore them (or inject new ones 5345 - * if needed), before returning. 5346 - * 5347 - * Returns: 5348 - * 0 on success 5349 - * -EBUSY - Another direct function is already attached (there can be only one) 5350 - * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5351 - * -ENOMEM - There was an allocation failure. 5352 - */ 5353 - int register_ftrace_direct(unsigned long ip, unsigned long addr) 5354 - { 5355 - struct ftrace_direct_func *direct; 5356 - struct ftrace_func_entry *entry; 5357 - struct ftrace_hash *free_hash = NULL; 5358 - struct dyn_ftrace *rec; 5359 - int ret = -ENODEV; 5360 - 5361 - mutex_lock(&direct_mutex); 5362 - 5363 - ip = ftrace_location(ip); 5364 - if (!ip) 5365 - goto out_unlock; 5366 - 5367 - /* See if there's a direct function at @ip already */ 5368 - ret = -EBUSY; 5369 - if (ftrace_find_rec_direct(ip)) 5370 - goto out_unlock; 5371 - 5372 - ret = -ENODEV; 5373 - rec = lookup_rec(ip, ip); 5374 - if (!rec) 5375 - goto out_unlock; 5376 - 5377 - /* 5378 - * Check if the rec says it has a direct call but we didn't 5379 - * find one earlier? 5380 - */ 5381 - if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) 5382 - goto out_unlock; 5383 - 5384 - /* Make sure the ip points to the exact record */ 5385 - if (ip != rec->ip) { 5386 - ip = rec->ip; 5387 - /* Need to check this ip for a direct. */ 5388 - if (ftrace_find_rec_direct(ip)) 5389 - goto out_unlock; 5390 - } 5391 - 5392 - ret = -ENOMEM; 5393 - direct = ftrace_find_direct_func(addr); 5394 - if (!direct) { 5395 - direct = ftrace_alloc_direct_func(addr); 5396 - if (!direct) 5397 - goto out_unlock; 5398 - } 5399 - 5400 - entry = ftrace_add_rec_direct(ip, addr, &free_hash); 5401 - if (!entry) 5402 - goto out_unlock; 5403 - 5404 - ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); 5405 - 5406 - if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { 5407 - ret = register_ftrace_function_nolock(&direct_ops); 5408 - if (ret) 5409 - ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5410 - } 5411 - 5412 - if (ret) { 5413 - remove_hash_entry(direct_functions, entry); 5414 - kfree(entry); 5415 - if (!direct->count) { 5416 - list_del_rcu(&direct->next); 5417 - synchronize_rcu_tasks(); 5418 - kfree(direct); 5419 - if (free_hash) 5420 - free_ftrace_hash(free_hash); 5421 - free_hash = NULL; 5422 - ftrace_direct_func_count--; 5423 - } 5424 - } else { 5425 - direct->count++; 5426 - } 5427 - out_unlock: 5428 - mutex_unlock(&direct_mutex); 5429 - 5430 - if (free_hash) { 5431 - synchronize_rcu_tasks(); 5432 - free_ftrace_hash(free_hash); 5433 - } 5434 - 5435 - return ret; 5436 - } 5437 - EXPORT_SYMBOL_GPL(register_ftrace_direct); 5438 - 5439 - static struct ftrace_func_entry *find_direct_entry(unsigned long *ip, 5440 - struct dyn_ftrace **recp) 5441 - { 5442 - struct ftrace_func_entry *entry; 5443 - struct dyn_ftrace *rec; 5444 - 5445 - rec = lookup_rec(*ip, *ip); 5446 - if (!rec) 5447 - return NULL; 5448 - 5449 - entry = __ftrace_lookup_ip(direct_functions, rec->ip); 5450 - if (!entry) { 5451 - WARN_ON(rec->flags & FTRACE_FL_DIRECT); 5452 - return NULL; 5453 - } 5454 - 5455 - WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); 5456 - 5457 - /* Passed in ip just needs to be on the call site */ 5458 - *ip = rec->ip; 5459 - 5460 - if (recp) 5461 - *recp = rec; 5462 - 5463 - return entry; 5464 - } 5465 - 5466 - int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 5467 - { 5468 - struct ftrace_direct_func *direct; 5469 - struct ftrace_func_entry *entry; 5470 - struct ftrace_hash *hash; 5471 - int ret = -ENODEV; 5472 - 5473 - mutex_lock(&direct_mutex); 5474 - 5475 - ip = ftrace_location(ip); 5476 - if (!ip) 5477 - goto out_unlock; 5478 - 5479 - entry = find_direct_entry(&ip, NULL); 5480 - if (!entry) 5481 - goto out_unlock; 5482 - 5483 - hash = direct_ops.func_hash->filter_hash; 5484 - if (hash->count == 1) 5485 - unregister_ftrace_function(&direct_ops); 5486 - 5487 - ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5488 - 5489 - WARN_ON(ret); 5490 - 5491 - remove_hash_entry(direct_functions, entry); 5492 - 5493 - direct = ftrace_find_direct_func(addr); 5494 - if (!WARN_ON(!direct)) { 5495 - /* This is the good path (see the ! before WARN) */ 5496 - direct->count--; 5497 - WARN_ON(direct->count < 0); 5498 - if (!direct->count) { 5499 - list_del_rcu(&direct->next); 5500 - synchronize_rcu_tasks(); 5501 - kfree(direct); 5502 - kfree(entry); 5503 - ftrace_direct_func_count--; 5504 - } 5505 - } 5506 - out_unlock: 5507 - mutex_unlock(&direct_mutex); 5508 - 5509 - return ret; 5510 - } 5511 - EXPORT_SYMBOL_GPL(unregister_ftrace_direct); 5512 - 5513 - static struct ftrace_ops stub_ops = { 5514 - .func = ftrace_stub, 5515 - }; 5516 - 5517 - /** 5518 - * ftrace_modify_direct_caller - modify ftrace nop directly 5519 - * @entry: The ftrace hash entry of the direct helper for @rec 5520 - * @rec: The record representing the function site to patch 5521 - * @old_addr: The location that the site at @rec->ip currently calls 5522 - * @new_addr: The location that the site at @rec->ip should call 5523 - * 5524 - * An architecture may overwrite this function to optimize the 5525 - * changing of the direct callback on an ftrace nop location. 5526 - * This is called with the ftrace_lock mutex held, and no other 5527 - * ftrace callbacks are on the associated record (@rec). Thus, 5528 - * it is safe to modify the ftrace record, where it should be 5529 - * currently calling @old_addr directly, to call @new_addr. 5530 - * 5531 - * This is called with direct_mutex locked. 5532 - * 5533 - * Safety checks should be made to make sure that the code at 5534 - * @rec->ip is currently calling @old_addr. And this must 5535 - * also update entry->direct to @new_addr. 5536 - */ 5537 - int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 5538 - struct dyn_ftrace *rec, 5539 - unsigned long old_addr, 5540 - unsigned long new_addr) 5541 - { 5542 - unsigned long ip = rec->ip; 5543 - int ret; 5544 - 5545 - lockdep_assert_held(&direct_mutex); 5546 - 5547 - /* 5548 - * The ftrace_lock was used to determine if the record 5549 - * had more than one registered user to it. If it did, 5550 - * we needed to prevent that from changing to do the quick 5551 - * switch. But if it did not (only a direct caller was attached) 5552 - * then this function is called. But this function can deal 5553 - * with attached callers to the rec that we care about, and 5554 - * since this function uses standard ftrace calls that take 5555 - * the ftrace_lock mutex, we need to release it. 5556 - */ 5557 - mutex_unlock(&ftrace_lock); 5558 - 5559 - /* 5560 - * By setting a stub function at the same address, we force 5561 - * the code to call the iterator and the direct_ops helper. 5562 - * This means that @ip does not call the direct call, and 5563 - * we can simply modify it. 5564 - */ 5565 - ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0); 5566 - if (ret) 5567 - goto out_lock; 5568 - 5569 - ret = register_ftrace_function_nolock(&stub_ops); 5570 - if (ret) { 5571 - ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5572 - goto out_lock; 5573 - } 5574 - 5575 - entry->direct = new_addr; 5576 - 5577 - /* 5578 - * By removing the stub, we put back the direct call, calling 5579 - * the @new_addr. 5580 - */ 5581 - unregister_ftrace_function(&stub_ops); 5582 - ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5583 - 5584 - out_lock: 5585 - mutex_lock(&ftrace_lock); 5586 - 5587 - return ret; 5588 - } 5589 - 5590 - /** 5591 - * modify_ftrace_direct - Modify an existing direct call to call something else 5592 - * @ip: The instruction pointer to modify 5593 - * @old_addr: The address that the current @ip calls directly 5594 - * @new_addr: The address that the @ip should call 5595 - * 5596 - * This modifies a ftrace direct caller at an instruction pointer without 5597 - * having to disable it first. The direct call will switch over to the 5598 - * @new_addr without missing anything. 5599 - * 5600 - * Returns: zero on success. Non zero on error, which includes: 5601 - * -ENODEV : the @ip given has no direct caller attached 5602 - * -EINVAL : the @old_addr does not match the current direct caller 5603 - */ 5604 - int modify_ftrace_direct(unsigned long ip, 5605 - unsigned long old_addr, unsigned long new_addr) 5606 - { 5607 - struct ftrace_direct_func *direct, *new_direct = NULL; 5608 - struct ftrace_func_entry *entry; 5609 - struct dyn_ftrace *rec; 5610 - int ret = -ENODEV; 5611 - 5612 - mutex_lock(&direct_mutex); 5613 - 5614 - mutex_lock(&ftrace_lock); 5615 - 5616 - ip = ftrace_location(ip); 5617 - if (!ip) 5618 - goto out_unlock; 5619 - 5620 - entry = find_direct_entry(&ip, &rec); 5621 - if (!entry) 5622 - goto out_unlock; 5623 - 5624 - ret = -EINVAL; 5625 - if (entry->direct != old_addr) 5626 - goto out_unlock; 5627 - 5628 - direct = ftrace_find_direct_func(old_addr); 5629 - if (WARN_ON(!direct)) 5630 - goto out_unlock; 5631 - if (direct->count > 1) { 5632 - ret = -ENOMEM; 5633 - new_direct = ftrace_alloc_direct_func(new_addr); 5634 - if (!new_direct) 5635 - goto out_unlock; 5636 - direct->count--; 5637 - new_direct->count++; 5638 - } else { 5639 - direct->addr = new_addr; 5640 - } 5641 - 5642 - /* 5643 - * If there's no other ftrace callback on the rec->ip location, 5644 - * then it can be changed directly by the architecture. 5645 - * If there is another caller, then we just need to change the 5646 - * direct caller helper to point to @new_addr. 5647 - */ 5648 - if (ftrace_rec_count(rec) == 1) { 5649 - ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr); 5650 - } else { 5651 - entry->direct = new_addr; 5652 - ret = 0; 5653 - } 5654 - 5655 - if (ret) { 5656 - direct->addr = old_addr; 5657 - if (unlikely(new_direct)) { 5658 - direct->count++; 5659 - list_del_rcu(&new_direct->next); 5660 - synchronize_rcu_tasks(); 5661 - kfree(new_direct); 5662 - ftrace_direct_func_count--; 5663 - } 5664 - } 5665 - 5666 - out_unlock: 5667 - mutex_unlock(&ftrace_lock); 5668 - mutex_unlock(&direct_mutex); 5669 - return ret; 5670 - } 5671 - EXPORT_SYMBOL_GPL(modify_ftrace_direct); 5672 - 5673 - #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS) 5306 + #define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS) 5674 5307 5675 5308 static int check_direct_multi(struct ftrace_ops *ops) 5676 5309 { ··· 5317 5714 } 5318 5715 5319 5716 /** 5320 - * register_ftrace_direct_multi - Call a custom trampoline directly 5717 + * register_ftrace_direct - Call a custom trampoline directly 5321 5718 * for multiple functions registered in @ops 5322 5719 * @ops: The address of the struct ftrace_ops object 5323 5720 * @addr: The address of the trampoline to call at @ops functions ··· 5338 5735 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 5339 5736 * -ENOMEM - There was an allocation failure. 5340 5737 */ 5341 - int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5738 + int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 5342 5739 { 5343 5740 struct ftrace_hash *hash, *free_hash = NULL; 5344 5741 struct ftrace_func_entry *entry, *new; ··· 5380 5777 ops->func = call_direct_funcs; 5381 5778 ops->flags = MULTI_FLAGS; 5382 5779 ops->trampoline = FTRACE_REGS_ADDR; 5780 + ops->direct_call = addr; 5383 5781 5384 5782 err = register_ftrace_function_nolock(ops); 5385 5783 ··· 5397 5793 } 5398 5794 return err; 5399 5795 } 5400 - EXPORT_SYMBOL_GPL(register_ftrace_direct_multi); 5796 + EXPORT_SYMBOL_GPL(register_ftrace_direct); 5401 5797 5402 5798 /** 5403 - * unregister_ftrace_direct_multi - Remove calls to custom trampoline 5404 - * previously registered by register_ftrace_direct_multi for @ops object. 5799 + * unregister_ftrace_direct - Remove calls to custom trampoline 5800 + * previously registered by register_ftrace_direct for @ops object. 5405 5801 * @ops: The address of the struct ftrace_ops object 5406 5802 * 5407 5803 * This is used to remove a direct calls to @addr from the nop locations ··· 5412 5808 * 0 on success 5413 5809 * -EINVAL - The @ops object was not properly registered. 5414 5810 */ 5415 - int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5811 + int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, 5812 + bool free_filters) 5416 5813 { 5417 5814 struct ftrace_hash *hash = ops->func_hash->filter_hash; 5418 5815 int err; ··· 5431 5826 /* cleanup for possible another register call */ 5432 5827 ops->func = NULL; 5433 5828 ops->trampoline = 0; 5829 + 5830 + if (free_filters) 5831 + ftrace_free_filter(ops); 5434 5832 return err; 5435 5833 } 5436 - EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi); 5834 + EXPORT_SYMBOL_GPL(unregister_ftrace_direct); 5437 5835 5438 5836 static int 5439 - __modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5837 + __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 5440 5838 { 5441 5839 struct ftrace_hash *hash; 5442 5840 struct ftrace_func_entry *entry, *iter; ··· 5455 5847 /* Enable the tmp_ops to have the same functions as the direct ops */ 5456 5848 ftrace_ops_init(&tmp_ops); 5457 5849 tmp_ops.func_hash = ops->func_hash; 5850 + tmp_ops.direct_call = addr; 5458 5851 5459 5852 err = register_ftrace_function_nolock(&tmp_ops); 5460 5853 if (err) ··· 5477 5868 entry->direct = addr; 5478 5869 } 5479 5870 } 5871 + /* Prevent store tearing if a trampoline concurrently accesses the value */ 5872 + WRITE_ONCE(ops->direct_call, addr); 5480 5873 5481 5874 mutex_unlock(&ftrace_lock); 5482 5875 ··· 5489 5878 } 5490 5879 5491 5880 /** 5492 - * modify_ftrace_direct_multi_nolock - Modify an existing direct 'multi' call 5881 + * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call 5493 5882 * to call something else 5494 5883 * @ops: The address of the struct ftrace_ops object 5495 5884 * @addr: The address of the new trampoline to call at @ops functions ··· 5506 5895 * Returns: zero on success. Non zero on error, which includes: 5507 5896 * -EINVAL - The @ops object was not properly registered. 5508 5897 */ 5509 - int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr) 5898 + int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) 5510 5899 { 5511 5900 if (check_direct_multi(ops)) 5512 5901 return -EINVAL; 5513 5902 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 5514 5903 return -EINVAL; 5515 5904 5516 - return __modify_ftrace_direct_multi(ops, addr); 5905 + return __modify_ftrace_direct(ops, addr); 5517 5906 } 5518 - EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi_nolock); 5907 + EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock); 5519 5908 5520 5909 /** 5521 - * modify_ftrace_direct_multi - Modify an existing direct 'multi' call 5910 + * modify_ftrace_direct - Modify an existing direct 'multi' call 5522 5911 * to call something else 5523 5912 * @ops: The address of the struct ftrace_ops object 5524 5913 * @addr: The address of the new trampoline to call at @ops functions ··· 5532 5921 * Returns: zero on success. Non zero on error, which includes: 5533 5922 * -EINVAL - The @ops object was not properly registered. 5534 5923 */ 5535 - int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr) 5924 + int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) 5536 5925 { 5537 5926 int err; 5538 5927 ··· 5542 5931 return -EINVAL; 5543 5932 5544 5933 mutex_lock(&direct_mutex); 5545 - err = __modify_ftrace_direct_multi(ops, addr); 5934 + err = __modify_ftrace_direct(ops, addr); 5546 5935 mutex_unlock(&direct_mutex); 5547 5936 return err; 5548 5937 } 5549 - EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi); 5938 + EXPORT_SYMBOL_GPL(modify_ftrace_direct); 5550 5939 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 5551 5940 5552 5941 /**
+7 -12
kernel/trace/trace_selftest.c
··· 785 785 }; 786 786 787 787 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 788 - #ifndef CALL_DEPTH_ACCOUNT 789 - #define CALL_DEPTH_ACCOUNT "" 790 - #endif 791 - 792 - noinline __noclone static void trace_direct_tramp(void) 793 - { 794 - asm(CALL_DEPTH_ACCOUNT); 795 - } 788 + static struct ftrace_ops direct; 796 789 #endif 797 790 798 791 /* ··· 863 870 * Register direct function together with graph tracer 864 871 * and make sure we get graph trace. 865 872 */ 866 - ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, 867 - (unsigned long) trace_direct_tramp); 873 + ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0); 874 + ret = register_ftrace_direct(&direct, 875 + (unsigned long)ftrace_stub_direct_tramp); 868 876 if (ret) 869 877 goto out; 870 878 ··· 885 891 886 892 unregister_ftrace_graph(&fgraph_ops); 887 893 888 - ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, 889 - (unsigned long) trace_direct_tramp); 894 + ret = unregister_ftrace_direct(&direct, 895 + (unsigned long)ftrace_stub_direct_tramp, 896 + true); 890 897 if (ret) 891 898 goto out; 892 899
+4
mm/kfence/core.c
··· 818 818 if (!kfence_sample_interval) 819 819 return; 820 820 821 + /* if the pool has already been initialized by arch, skip the below. */ 822 + if (__kfence_pool) 823 + return; 824 + 821 825 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 822 826 823 827 if (!__kfence_pool)
+1 -1
samples/Kconfig
··· 38 38 that hooks to wake_up_process and prints the parameters. 39 39 40 40 config SAMPLE_FTRACE_DIRECT_MULTI 41 - tristate "Build register_ftrace_direct_multi() example" 41 + tristate "Build register_ftrace_direct() on multiple ips example" 42 42 depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m 43 43 depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI 44 44 help
+7 -3
samples/ftrace/ftrace-direct-modify.c
··· 96 96 97 97 #endif /* CONFIG_S390 */ 98 98 99 + static struct ftrace_ops direct; 100 + 99 101 static unsigned long my_tramp = (unsigned long)my_tramp1; 100 102 static unsigned long tramps[2] = { 101 103 (unsigned long)my_tramp1, ··· 116 114 if (ret) 117 115 continue; 118 116 t ^= 1; 119 - ret = modify_ftrace_direct(my_ip, my_tramp, tramps[t]); 117 + ret = modify_ftrace_direct(&direct, tramps[t]); 120 118 if (!ret) 121 119 my_tramp = tramps[t]; 122 120 WARN_ON_ONCE(ret); ··· 131 129 { 132 130 int ret; 133 131 134 - ret = register_ftrace_direct(my_ip, my_tramp); 132 + ftrace_set_filter_ip(&direct, (unsigned long) my_ip, 0, 0); 133 + ret = register_ftrace_direct(&direct, my_tramp); 134 + 135 135 if (!ret) 136 136 simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn"); 137 137 return ret; ··· 142 138 static void __exit ftrace_direct_exit(void) 143 139 { 144 140 kthread_stop(simple_tsk); 145 - unregister_ftrace_direct(my_ip, my_tramp); 141 + unregister_ftrace_direct(&direct, my_tramp, true); 146 142 } 147 143 148 144 module_init(ftrace_direct_init);
+4 -5
samples/ftrace/ftrace-direct-multi-modify.c
··· 123 123 if (ret) 124 124 continue; 125 125 t ^= 1; 126 - ret = modify_ftrace_direct_multi(&direct, tramps[t]); 126 + ret = modify_ftrace_direct(&direct, tramps[t]); 127 127 if (!ret) 128 128 my_tramp = tramps[t]; 129 129 WARN_ON_ONCE(ret); ··· 141 141 ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0); 142 142 ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0); 143 143 144 - ret = register_ftrace_direct_multi(&direct, my_tramp); 144 + ret = register_ftrace_direct(&direct, my_tramp); 145 145 146 146 if (!ret) 147 147 simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn"); ··· 151 151 static void __exit ftrace_direct_multi_exit(void) 152 152 { 153 153 kthread_stop(simple_tsk); 154 - unregister_ftrace_direct_multi(&direct, my_tramp); 155 - ftrace_free_filter(&direct); 154 + unregister_ftrace_direct(&direct, my_tramp, true); 156 155 } 157 156 158 157 module_init(ftrace_direct_multi_init); 159 158 module_exit(ftrace_direct_multi_exit); 160 159 161 160 MODULE_AUTHOR("Jiri Olsa"); 162 - MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()"); 161 + MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct()"); 163 162 MODULE_LICENSE("GPL");
+2 -3
samples/ftrace/ftrace-direct-multi.c
··· 73 73 ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0); 74 74 ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0); 75 75 76 - return register_ftrace_direct_multi(&direct, (unsigned long) my_tramp); 76 + return register_ftrace_direct(&direct, (unsigned long) my_tramp); 77 77 } 78 78 79 79 static void __exit ftrace_direct_multi_exit(void) 80 80 { 81 - unregister_ftrace_direct_multi(&direct, (unsigned long) my_tramp); 82 - ftrace_free_filter(&direct); 81 + unregister_ftrace_direct(&direct, (unsigned long) my_tramp, true); 83 82 } 84 83 85 84 module_init(ftrace_direct_multi_init);
+6 -4
samples/ftrace/ftrace-direct-too.c
··· 70 70 71 71 #endif /* CONFIG_S390 */ 72 72 73 + static struct ftrace_ops direct; 74 + 73 75 static int __init ftrace_direct_init(void) 74 76 { 75 - return register_ftrace_direct((unsigned long)handle_mm_fault, 76 - (unsigned long)my_tramp); 77 + ftrace_set_filter_ip(&direct, (unsigned long) handle_mm_fault, 0, 0); 78 + 79 + return register_ftrace_direct(&direct, (unsigned long) my_tramp); 77 80 } 78 81 79 82 static void __exit ftrace_direct_exit(void) 80 83 { 81 - unregister_ftrace_direct((unsigned long)handle_mm_fault, 82 - (unsigned long)my_tramp); 84 + unregister_ftrace_direct(&direct, (unsigned long)my_tramp, true); 83 85 } 84 86 85 87 module_init(ftrace_direct_init);
+6 -4
samples/ftrace/ftrace-direct.c
··· 63 63 64 64 #endif /* CONFIG_S390 */ 65 65 66 + static struct ftrace_ops direct; 67 + 66 68 static int __init ftrace_direct_init(void) 67 69 { 68 - return register_ftrace_direct((unsigned long)wake_up_process, 69 - (unsigned long)my_tramp); 70 + ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0); 71 + 72 + return register_ftrace_direct(&direct, (unsigned long) my_tramp); 70 73 } 71 74 72 75 static void __exit ftrace_direct_exit(void) 73 76 { 74 - unregister_ftrace_direct((unsigned long)wake_up_process, 75 - (unsigned long)my_tramp); 77 + unregister_ftrace_direct(&direct, (unsigned long)my_tramp, true); 76 78 } 77 79 78 80 module_init(ftrace_direct_init);