Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:
"Nothing particularly stands out here, probably because people were
tied up with spectre/meltdown stuff last time around. Still, the main
pieces are:

- Rework of our CPU features framework so that we can whitelist CPUs
that don't require kpti even in a heterogeneous system

- Support for the IDC/DIC architecture extensions, which allow us to
elide instruction and data cache maintenance when writing out
instructions

- Removal of the large memory model which resulted in suboptimal
codegen by the compiler and increased the use of literal pools,
which could potentially be used as ROP gadgets since they are
mapped as executable

- Rework of forced signal delivery so that the siginfo_t is
well-formed and handling of show_unhandled_signals is consolidated
and made consistent between different fault types

- More siginfo cleanup based on the initial patches from Eric
Biederman

- Workaround for Cortex-A55 erratum #1024718

- Some small ACPI IORT updates and cleanups from Lorenzo Pieralisi

- Misc cleanups and non-critical fixes"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits)
arm64: uaccess: Fix omissions from usercopy whitelist
arm64: fpsimd: Split cpu field out from struct fpsimd_state
arm64: tlbflush: avoid writing RES0 bits
arm64: cmpxchg: Include linux/compiler.h in asm/cmpxchg.h
arm64: move percpu cmpxchg implementation from cmpxchg.h to percpu.h
arm64: cmpxchg: Include build_bug.h instead of bug.h for BUILD_BUG
arm64: lse: Include compiler_types.h and export.h for out-of-line LL/SC
arm64: fpsimd: include <linux/init.h> in fpsimd.h
drivers/perf: arm_pmu_platform: do not warn about affinity on uniprocessor
perf: arm_spe: include linux/vmalloc.h for vmap()
Revert "arm64: Revert L1_CACHE_SHIFT back to 6 (64-byte cache line size)"
arm64: cpufeature: Avoid warnings due to unused symbols
arm64: Add work around for Arm Cortex-A55 Erratum 1024718
arm64: Delay enabling hardware DBM feature
arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35
arm64: capabilities: Handle shared entries
arm64: capabilities: Add support for checks based on a list of MIDRs
arm64: Add helpers for checking CPU MIDR against a range
arm64: capabilities: Clean up midr range helpers
arm64: capabilities: Change scope of VHE to Boot CPU feature
...

+1482 -801
+10 -8
Documentation/arm64/cpu-feature-registers.txt
··· 110 110 x--------------------------------------------------x 111 111 | Name | bits | visible | 112 112 |--------------------------------------------------| 113 - | RES0 | [63-52] | n | 113 + | TS | [55-52] | y | 114 114 |--------------------------------------------------| 115 115 | FHM | [51-48] | y | 116 116 |--------------------------------------------------| ··· 124 124 |--------------------------------------------------| 125 125 | RDM | [31-28] | y | 126 126 |--------------------------------------------------| 127 - | RES0 | [27-24] | n | 128 - |--------------------------------------------------| 129 127 | ATOMICS | [23-20] | y | 130 128 |--------------------------------------------------| 131 129 | CRC32 | [19-16] | y | ··· 133 135 | SHA1 | [11-8] | y | 134 136 |--------------------------------------------------| 135 137 | AES | [7-4] | y | 136 - |--------------------------------------------------| 137 - | RES0 | [3-0] | n | 138 138 x--------------------------------------------------x 139 139 140 140 ··· 140 144 x--------------------------------------------------x 141 145 | Name | bits | visible | 142 146 |--------------------------------------------------| 143 - | RES0 | [63-36] | n | 147 + | DIT | [51-48] | y | 144 148 |--------------------------------------------------| 145 149 | SVE | [35-32] | y | 146 - |--------------------------------------------------| 147 - | RES0 | [31-28] | n | 148 150 |--------------------------------------------------| 149 151 | GIC | [27-24] | n | 150 152 |--------------------------------------------------| ··· 191 197 | JSCVT | [15-12] | y | 192 198 |--------------------------------------------------| 193 199 | DPB | [3-0] | y | 200 + x--------------------------------------------------x 201 + 202 + 5) ID_AA64MMFR2_EL1 - Memory model feature register 2 203 + 204 + x--------------------------------------------------x 205 + | Name | bits | visible | 206 + |--------------------------------------------------| 207 + | AT | [35-32] | y | 194 208 x--------------------------------------------------x 195 209 196 210 Appendix I: Example
+16
Documentation/arm64/elf_hwcaps.txt
··· 162 162 HWCAP_ASIMDFHM 163 163 164 164 Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001. 165 + 166 + HWCAP_DIT 167 + 168 + Functionality implied by ID_AA64PFR0_EL1.DIT == 0b0001. 169 + 170 + HWCAP_USCAT 171 + 172 + Functionality implied by ID_AA64MMFR2_EL1.AT == 0b0001. 173 + 174 + HWCAP_ILRCPC 175 + 176 + Functionality implied by ID_AA64ISR1_EL1.LRCPC == 0b0002. 177 + 178 + HWCAP_FLAGM 179 + 180 + Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
+1
Documentation/arm64/silicon-errata.txt
··· 55 55 | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | 56 56 | ARM | Cortex-A72 | #853709 | N/A | 57 57 | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 | 58 + | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 | 58 59 | ARM | MMU-500 | #841119,#826419 | N/A | 59 60 | | | | | 60 61 | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+37 -10
arch/arm64/Kconfig
··· 459 459 config ARM64_ERRATUM_843419 460 460 bool "Cortex-A53: 843419: A load or store might access an incorrect address" 461 461 default y 462 - select ARM64_MODULE_CMODEL_LARGE if MODULES 462 + select ARM64_MODULE_PLTS if MODULES 463 463 help 464 464 This option links the kernel with '--fix-cortex-a53-843419' and 465 - builds modules using the large memory model in order to avoid the use 466 - of the ADRP instruction, which can cause a subsequent memory access 467 - to use an incorrect address on Cortex-A53 parts up to r0p4. 465 + enables PLT support to replace certain ADRP instructions, which can 466 + cause subsequent memory accesses to use an incorrect address on 467 + Cortex-A53 parts up to r0p4. 468 + 469 + If unsure, say Y. 470 + 471 + config ARM64_ERRATUM_1024718 472 + bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update" 473 + default y 474 + help 475 + This option adds work around for Arm Cortex-A55 Erratum 1024718. 476 + 477 + Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect 478 + update of the hardware dirty bit when the DBM/AP bits are updated 479 + without a break-before-make. The work around is to disable the usage 480 + of hardware DBM locally on the affected cores. CPUs not affected by 481 + erratum will continue to use the feature. 468 482 469 483 If unsure, say Y. 470 484 ··· 1122 1108 1123 1109 To enable use of this extension on CPUs that implement it, say Y. 1124 1110 1125 - config ARM64_MODULE_CMODEL_LARGE 1126 - bool 1111 + Note that for architectural reasons, firmware _must_ implement SVE 1112 + support when running on SVE capable hardware. The required support 1113 + is present in: 1114 + 1115 + * version 1.5 and later of the ARM Trusted Firmware 1116 + * the AArch64 boot wrapper since commit 5e1261e08abf 1117 + ("bootwrapper: SVE: Enable SVE for EL2 and below"). 1118 + 1119 + For other firmware implementations, consult the firmware documentation 1120 + or vendor. 1121 + 1122 + If you need the kernel to boot on SVE-capable hardware with broken 1123 + firmware, you may need to say N here until you get your firmware 1124 + fixed. Otherwise, you may experience firmware panics or lockups when 1125 + booting the kernel. If unsure and you are not observing these 1126 + symptoms, you should assume that it is safe to say Y. 1127 1127 1128 1128 config ARM64_MODULE_PLTS 1129 1129 bool 1130 - select ARM64_MODULE_CMODEL_LARGE 1131 1130 select HAVE_MOD_ARCH_SPECIFIC 1132 1131 1133 1132 config RELOCATABLE ··· 1174 1147 If unsure, say N. 1175 1148 1176 1149 config RANDOMIZE_MODULE_REGION_FULL 1177 - bool "Randomize the module region independently from the core kernel" 1150 + bool "Randomize the module region over a 4 GB range" 1178 1151 depends on RANDOMIZE_BASE 1179 1152 default y 1180 1153 help 1181 - Randomizes the location of the module region without considering the 1182 - location of the core kernel. This way, it is impossible for modules 1154 + Randomizes the location of the module region inside a 4 GB window 1155 + covering the core kernel. This way, it is less likely for modules 1183 1156 to leak information about the location of core kernel data structures 1184 1157 but it does imply that function calls between modules and the core 1185 1158 kernel will need to be resolved via veneers in the module PLT.
+6 -9
arch/arm64/Makefile
··· 51 51 52 52 KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) 53 53 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 54 - KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) 55 54 KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) 56 55 57 56 KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) ··· 76 77 77 78 CHECKFLAGS += -D__aarch64__ -m64 78 79 79 - ifeq ($(CONFIG_ARM64_MODULE_CMODEL_LARGE), y) 80 - KBUILD_CFLAGS_MODULE += -mcmodel=large 81 - endif 82 - 83 80 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y) 84 81 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds 85 82 endif ··· 92 97 TEXT_OFFSET := 0x00080000 93 98 endif 94 99 95 - # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61) 100 + # KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) 101 + # - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) 96 102 # in 32-bit arithmetic 103 + KASAN_SHADOW_SCALE_SHIFT := 3 97 104 KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ 98 - (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ 99 - + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \ 100 - - (1 << (64 - 32 - 3)) )) ) 105 + (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ 106 + + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ 107 + - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) ) 101 108 102 109 export TEXT_OFFSET GZFLAGS 103 110
+2 -32
arch/arm64/include/asm/assembler.h
··· 202 202 203 203 /* 204 204 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where 205 - * <symbol> is within the range +/- 4 GB of the PC when running 206 - * in core kernel context. In module context, a movz/movk sequence 207 - * is used, since modules may be loaded far away from the kernel 208 - * when KASLR is in effect. 205 + * <symbol> is within the range +/- 4 GB of the PC. 209 206 */ 210 207 /* 211 208 * @dst: destination register (64 bit wide) 212 209 * @sym: name of the symbol 213 210 */ 214 211 .macro adr_l, dst, sym 215 - #ifndef MODULE 216 212 adrp \dst, \sym 217 213 add \dst, \dst, :lo12:\sym 218 - #else 219 - movz \dst, #:abs_g3:\sym 220 - movk \dst, #:abs_g2_nc:\sym 221 - movk \dst, #:abs_g1_nc:\sym 222 - movk \dst, #:abs_g0_nc:\sym 223 - #endif 224 214 .endm 225 215 226 216 /* ··· 221 231 * the address 222 232 */ 223 233 .macro ldr_l, dst, sym, tmp= 224 - #ifndef MODULE 225 234 .ifb \tmp 226 235 adrp \dst, \sym 227 236 ldr \dst, [\dst, :lo12:\sym] ··· 228 239 adrp \tmp, \sym 229 240 ldr \dst, [\tmp, :lo12:\sym] 230 241 .endif 231 - #else 232 - .ifb \tmp 233 - adr_l \dst, \sym 234 - ldr \dst, [\dst] 235 - .else 236 - adr_l \tmp, \sym 237 - ldr \dst, [\tmp] 238 - .endif 239 - #endif 240 242 .endm 241 243 242 244 /* ··· 237 257 * while <src> needs to be preserved. 238 258 */ 239 259 .macro str_l, src, sym, tmp 240 - #ifndef MODULE 241 260 adrp \tmp, \sym 242 261 str \src, [\tmp, :lo12:\sym] 243 - #else 244 - adr_l \tmp, \sym 245 - str \src, [\tmp] 246 - #endif 247 262 .endm 248 263 249 264 /* 250 - * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for 251 - * non-module code 265 + * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP) 252 266 * @sym: The name of the per-cpu variable 253 267 * @tmp: scratch register 254 268 */ 255 269 .macro adr_this_cpu, dst, sym, tmp 256 - #ifndef MODULE 257 270 adrp \tmp, \sym 258 271 add \dst, \tmp, #:lo12:\sym 259 - #else 260 - adr_l \dst, \sym 261 - #endif 262 272 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 263 273 mrs \tmp, tpidr_el1 264 274 alternative_else
+4
arch/arm64/include/asm/cache.h
··· 20 20 21 21 #define CTR_L1IP_SHIFT 14 22 22 #define CTR_L1IP_MASK 3 23 + #define CTR_DMINLINE_SHIFT 16 24 + #define CTR_ERG_SHIFT 20 23 25 #define CTR_CWG_SHIFT 24 24 26 #define CTR_CWG_MASK 15 27 + #define CTR_IDC_SHIFT 28 28 + #define CTR_DIC_SHIFT 29 25 29 26 30 #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) 27 31
+3
arch/arm64/include/asm/cacheflush.h
··· 133 133 134 134 static inline void __flush_icache_all(void) 135 135 { 136 + if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) 137 + return; 138 + 136 139 asm("ic ialluis"); 137 140 dsb(ish); 138 141 }
+2 -27
arch/arm64/include/asm/cmpxchg.h
··· 18 18 #ifndef __ASM_CMPXCHG_H 19 19 #define __ASM_CMPXCHG_H 20 20 21 - #include <linux/bug.h> 21 + #include <linux/build_bug.h> 22 + #include <linux/compiler.h> 22 23 23 24 #include <asm/atomic.h> 24 25 #include <asm/barrier.h> ··· 195 194 (unsigned long)(n1), (unsigned long)(n2), \ 196 195 ptr1); \ 197 196 __ret; \ 198 - }) 199 - 200 - /* this_cpu_cmpxchg */ 201 - #define _protect_cmpxchg_local(pcp, o, n) \ 202 - ({ \ 203 - typeof(*raw_cpu_ptr(&(pcp))) __ret; \ 204 - preempt_disable(); \ 205 - __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ 206 - preempt_enable(); \ 207 - __ret; \ 208 - }) 209 - 210 - #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 211 - #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 212 - #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 213 - #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 214 - 215 - #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 216 - ({ \ 217 - int __ret; \ 218 - preempt_disable(); \ 219 - __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ 220 - raw_cpu_ptr(&(ptr2)), \ 221 - o1, o2, n1, n2); \ 222 - preempt_enable(); \ 223 - __ret; \ 224 197 }) 225 198 226 199 #define __CMPWAIT_CASE(w, sz, name) \
+5 -1
arch/arm64/include/asm/cpucaps.h
··· 45 45 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 46 46 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 47 47 #define ARM64_HAS_RAS_EXTN 26 48 + #define ARM64_WORKAROUND_843419 27 49 + #define ARM64_HAS_CACHE_IDC 28 50 + #define ARM64_HAS_CACHE_DIC 29 51 + #define ARM64_HW_DBM 30 48 52 49 - #define ARM64_NCAPS 27 53 + #define ARM64_NCAPS 31 50 54 51 55 #endif /* __ASM_CPUCAPS_H */
+246 -16
arch/arm64/include/asm/cpufeature.h
··· 10 10 #define __ASM_CPUFEATURE_H 11 11 12 12 #include <asm/cpucaps.h> 13 + #include <asm/cputype.h> 13 14 #include <asm/fpsimd.h> 14 15 #include <asm/hwcap.h> 15 16 #include <asm/sigcontext.h> ··· 90 89 91 90 extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; 92 91 93 - /* scope of capability check */ 94 - enum { 95 - SCOPE_SYSTEM, 96 - SCOPE_LOCAL_CPU, 97 - }; 92 + /* 93 + * CPU capabilities: 94 + * 95 + * We use arm64_cpu_capabilities to represent system features, errata work 96 + * arounds (both used internally by kernel and tracked in cpu_hwcaps) and 97 + * ELF HWCAPs (which are exposed to user). 98 + * 99 + * To support systems with heterogeneous CPUs, we need to make sure that we 100 + * detect the capabilities correctly on the system and take appropriate 101 + * measures to ensure there are no incompatibilities. 102 + * 103 + * This comment tries to explain how we treat the capabilities. 104 + * Each capability has the following list of attributes : 105 + * 106 + * 1) Scope of Detection : The system detects a given capability by 107 + * performing some checks at runtime. This could be, e.g, checking the 108 + * value of a field in CPU ID feature register or checking the cpu 109 + * model. The capability provides a call back ( @matches() ) to 110 + * perform the check. Scope defines how the checks should be performed. 111 + * There are three cases: 112 + * 113 + * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one 114 + * matches. This implies, we have to run the check on all the 115 + * booting CPUs, until the system decides that state of the 116 + * capability is finalised. (See section 2 below) 117 + * Or 118 + * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs 119 + * matches. This implies, we run the check only once, when the 120 + * system decides to finalise the state of the capability. If the 121 + * capability relies on a field in one of the CPU ID feature 122 + * registers, we use the sanitised value of the register from the 123 + * CPU feature infrastructure to make the decision. 124 + * Or 125 + * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the 126 + * feature. This category is for features that are "finalised" 127 + * (or used) by the kernel very early even before the SMP cpus 128 + * are brought up. 129 + * 130 + * The process of detection is usually denoted by "update" capability 131 + * state in the code. 132 + * 133 + * 2) Finalise the state : The kernel should finalise the state of a 134 + * capability at some point during its execution and take necessary 135 + * actions if any. Usually, this is done, after all the boot-time 136 + * enabled CPUs are brought up by the kernel, so that it can make 137 + * better decision based on the available set of CPUs. However, there 138 + * are some special cases, where the action is taken during the early 139 + * boot by the primary boot CPU. (e.g, running the kernel at EL2 with 140 + * Virtualisation Host Extensions). The kernel usually disallows any 141 + * changes to the state of a capability once it finalises the capability 142 + * and takes any action, as it may be impossible to execute the actions 143 + * safely. A CPU brought up after a capability is "finalised" is 144 + * referred to as "Late CPU" w.r.t the capability. e.g, all secondary 145 + * CPUs are treated "late CPUs" for capabilities determined by the boot 146 + * CPU. 147 + * 148 + * At the moment there are two passes of finalising the capabilities. 149 + * a) Boot CPU scope capabilities - Finalised by primary boot CPU via 150 + * setup_boot_cpu_capabilities(). 151 + * b) Everything except (a) - Run via setup_system_capabilities(). 152 + * 153 + * 3) Verification: When a CPU is brought online (e.g, by user or by the 154 + * kernel), the kernel should make sure that it is safe to use the CPU, 155 + * by verifying that the CPU is compliant with the state of the 156 + * capabilities finalised already. This happens via : 157 + * 158 + * secondary_start_kernel()-> check_local_cpu_capabilities() 159 + * 160 + * As explained in (2) above, capabilities could be finalised at 161 + * different points in the execution. Each newly booted CPU is verified 162 + * against the capabilities that have been finalised by the time it 163 + * boots. 164 + * 165 + * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability 166 + * except for the primary boot CPU. 167 + * 168 + * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the 169 + * user after the kernel boot are verified against the capability. 170 + * 171 + * If there is a conflict, the kernel takes an action, based on the 172 + * severity (e.g, a CPU could be prevented from booting or cause a 173 + * kernel panic). The CPU is allowed to "affect" the state of the 174 + * capability, if it has not been finalised already. See section 5 175 + * for more details on conflicts. 176 + * 177 + * 4) Action: As mentioned in (2), the kernel can take an action for each 178 + * detected capability, on all CPUs on the system. Appropriate actions 179 + * include, turning on an architectural feature, modifying the control 180 + * registers (e.g, SCTLR, TCR etc.) or patching the kernel via 181 + * alternatives. The kernel patching is batched and performed at later 182 + * point. The actions are always initiated only after the capability 183 + * is finalised. This is usally denoted by "enabling" the capability. 184 + * The actions are initiated as follows : 185 + * a) Action is triggered on all online CPUs, after the capability is 186 + * finalised, invoked within the stop_machine() context from 187 + * enable_cpu_capabilitie(). 188 + * 189 + * b) Any late CPU, brought up after (1), the action is triggered via: 190 + * 191 + * check_local_cpu_capabilities() -> verify_local_cpu_capabilities() 192 + * 193 + * 5) Conflicts: Based on the state of the capability on a late CPU vs. 194 + * the system state, we could have the following combinations : 195 + * 196 + * x-----------------------------x 197 + * | Type | System | Late CPU | 198 + * |-----------------------------| 199 + * | a | y | n | 200 + * |-----------------------------| 201 + * | b | n | y | 202 + * x-----------------------------x 203 + * 204 + * Two separate flag bits are defined to indicate whether each kind of 205 + * conflict can be allowed: 206 + * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed 207 + * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed 208 + * 209 + * Case (a) is not permitted for a capability that the system requires 210 + * all CPUs to have in order for the capability to be enabled. This is 211 + * typical for capabilities that represent enhanced functionality. 212 + * 213 + * Case (b) is not permitted for a capability that must be enabled 214 + * during boot if any CPU in the system requires it in order to run 215 + * safely. This is typical for erratum work arounds that cannot be 216 + * enabled after the corresponding capability is finalised. 217 + * 218 + * In some non-typical cases either both (a) and (b), or neither, 219 + * should be permitted. This can be described by including neither 220 + * or both flags in the capability's type field. 221 + */ 222 + 223 + 224 + /* 225 + * Decide how the capability is detected. 226 + * On any local CPU vs System wide vs the primary boot CPU 227 + */ 228 + #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0)) 229 + #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1)) 230 + /* 231 + * The capabilitiy is detected on the Boot CPU and is used by kernel 232 + * during early boot. i.e, the capability should be "detected" and 233 + * "enabled" as early as possibly on all booting CPUs. 234 + */ 235 + #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2)) 236 + #define ARM64_CPUCAP_SCOPE_MASK \ 237 + (ARM64_CPUCAP_SCOPE_SYSTEM | \ 238 + ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ 239 + ARM64_CPUCAP_SCOPE_BOOT_CPU) 240 + 241 + #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM 242 + #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU 243 + #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU 244 + #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK 245 + 246 + /* 247 + * Is it permitted for a late CPU to have this capability when system 248 + * hasn't already enabled it ? 249 + */ 250 + #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) 251 + /* Is it safe for a late CPU to miss this capability when system has it */ 252 + #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) 253 + 254 + /* 255 + * CPU errata workarounds that need to be enabled at boot time if one or 256 + * more CPUs in the system requires it. When one of these capabilities 257 + * has been enabled, it is safe to allow any CPU to boot that doesn't 258 + * require the workaround. However, it is not safe if a "late" CPU 259 + * requires a workaround and the system hasn't enabled it already. 260 + */ 261 + #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \ 262 + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) 263 + /* 264 + * CPU feature detected at boot time based on system-wide value of a 265 + * feature. It is safe for a late CPU to have this feature even though 266 + * the system hasn't enabled it, although the featuer will not be used 267 + * by Linux in this case. If the system has enabled this feature already, 268 + * then every late CPU must have it. 269 + */ 270 + #define ARM64_CPUCAP_SYSTEM_FEATURE \ 271 + (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) 272 + /* 273 + * CPU feature detected at boot time based on feature of one or more CPUs. 274 + * All possible conflicts for a late CPU are ignored. 275 + */ 276 + #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \ 277 + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ 278 + ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \ 279 + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) 280 + 281 + /* 282 + * CPU feature detected at boot time, on one or more CPUs. A late CPU 283 + * is not allowed to have the capability when the system doesn't have it. 284 + * It is Ok for a late CPU to miss the feature. 285 + */ 286 + #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \ 287 + (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ 288 + ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) 289 + 290 + /* 291 + * CPU feature used early in the boot based on the boot CPU. All secondary 292 + * CPUs must match the state of the capability as detected by the boot CPU. 293 + */ 294 + #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU 98 295 99 296 struct arm64_cpu_capabilities { 100 297 const char *desc; 101 298 u16 capability; 102 - int def_scope; /* default scope */ 299 + u16 type; 103 300 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); 104 - int (*enable)(void *); /* Called on all active CPUs */ 301 + /* 302 + * Take the appropriate actions to enable this capability for this CPU. 303 + * For each successfully booted CPU, this method is called for each 304 + * globally detected capability. 305 + */ 306 + void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); 105 307 union { 106 308 struct { /* To be used for erratum handling only */ 107 - u32 midr_model; 108 - u32 midr_range_min, midr_range_max; 309 + struct midr_range midr_range; 310 + const struct arm64_midr_revidr { 311 + u32 midr_rv; /* revision/variant */ 312 + u32 revidr_mask; 313 + } * const fixed_revs; 109 314 }; 110 315 316 + const struct midr_range *midr_range_list; 111 317 struct { /* Feature register checking */ 112 318 u32 sys_reg; 113 319 u8 field_pos; ··· 323 115 bool sign; 324 116 unsigned long hwcap; 325 117 }; 118 + /* 119 + * A list of "matches/cpu_enable" pair for the same 120 + * "capability" of the same "type" as described by the parent. 121 + * Only matches(), cpu_enable() and fields relevant to these 122 + * methods are significant in the list. The cpu_enable is 123 + * invoked only if the corresponding entry "matches()". 124 + * However, if a cpu_enable() method is associated 125 + * with multiple matches(), care should be taken that either 126 + * the match criteria are mutually exclusive, or that the 127 + * method is robust against being called multiple times. 128 + */ 129 + const struct arm64_cpu_capabilities *match_list; 326 130 }; 327 131 }; 132 + 133 + static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) 134 + { 135 + return cap->type & ARM64_CPUCAP_SCOPE_MASK; 136 + } 137 + 138 + static inline bool 139 + cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) 140 + { 141 + return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); 142 + } 143 + 144 + static inline bool 145 + cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) 146 + { 147 + return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); 148 + } 328 149 329 150 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 330 151 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; ··· 473 236 } 474 237 475 238 void __init setup_cpu_features(void); 476 - 477 - void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 478 - const char *info); 479 - void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps); 480 239 void check_local_cpu_capabilities(void); 481 240 482 - void update_cpu_errata_workarounds(void); 483 - void __init enable_errata_workarounds(void); 484 - void verify_local_cpu_errata_workarounds(void); 485 241 486 242 u64 read_sanitised_ftr_reg(u32 id); 487 243
+43
arch/arm64/include/asm/cputype.h
··· 83 83 #define ARM_CPU_PART_CORTEX_A53 0xD03 84 84 #define ARM_CPU_PART_CORTEX_A73 0xD09 85 85 #define ARM_CPU_PART_CORTEX_A75 0xD0A 86 + #define ARM_CPU_PART_CORTEX_A35 0xD04 87 + #define ARM_CPU_PART_CORTEX_A55 0xD05 86 88 87 89 #define APM_CPU_PART_POTENZA 0x000 88 90 ··· 104 102 #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) 105 103 #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) 106 104 #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) 105 + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) 106 + #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) 107 107 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 108 108 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 109 109 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) ··· 120 116 #include <asm/sysreg.h> 121 117 122 118 #define read_cpuid(reg) read_sysreg_s(SYS_ ## reg) 119 + 120 + /* 121 + * Represent a range of MIDR values for a given CPU model and a 122 + * range of variant/revision values. 123 + * 124 + * @model - CPU model as defined by MIDR_CPU_MODEL 125 + * @rv_min - Minimum value for the revision/variant as defined by 126 + * MIDR_CPU_VAR_REV 127 + * @rv_max - Maximum value for the variant/revision for the range. 128 + */ 129 + struct midr_range { 130 + u32 model; 131 + u32 rv_min; 132 + u32 rv_max; 133 + }; 134 + 135 + #define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \ 136 + { \ 137 + .model = m, \ 138 + .rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \ 139 + .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \ 140 + } 141 + 142 + #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) 143 + 144 + static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) 145 + { 146 + return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, 147 + range->rv_min, range->rv_max); 148 + } 149 + 150 + static inline bool 151 + is_midr_in_range_list(u32 midr, struct midr_range const *ranges) 152 + { 153 + while (ranges->model) 154 + if (is_midr_in_range(midr, ranges++)) 155 + return true; 156 + return false; 157 + } 123 158 124 159 /* 125 160 * The CPU ID never changes at run time, so we might as well tell the
+9
arch/arm64/include/asm/esr.h
··· 240 240 (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \ 241 241 ESR_ELx_SYS64_ISS_OP2_SHIFT)) 242 242 243 + /* 244 + * ISS field definitions for floating-point exception traps 245 + * (FP_EXC_32/FP_EXC_64). 246 + * 247 + * (The FPEXC_* constants are used instead for common bits.) 248 + */ 249 + 250 + #define ESR_ELx_FP_EXC_TFV (UL(1) << 23) 251 + 243 252 #ifndef __ASSEMBLY__ 244 253 #include <asm/types.h> 245 254
+6 -28
arch/arm64/include/asm/fpsimd.h
··· 22 22 #ifndef __ASSEMBLY__ 23 23 24 24 #include <linux/cache.h> 25 + #include <linux/init.h> 25 26 #include <linux/stddef.h> 26 - 27 - /* 28 - * FP/SIMD storage area has: 29 - * - FPSR and FPCR 30 - * - 32 128-bit data registers 31 - * 32 - * Note that user_fpsimd forms a prefix of this structure, which is 33 - * relied upon in the ptrace FP/SIMD accessors. 34 - */ 35 - struct fpsimd_state { 36 - union { 37 - struct user_fpsimd_state user_fpsimd; 38 - struct { 39 - __uint128_t vregs[32]; 40 - u32 fpsr; 41 - u32 fpcr; 42 - /* 43 - * For ptrace compatibility, pad to next 128-bit 44 - * boundary here if extending this struct. 45 - */ 46 - }; 47 - }; 48 - /* the id of the last cpu to have restored this state */ 49 - unsigned int cpu; 50 - }; 51 27 52 28 #if defined(__KERNEL__) && defined(CONFIG_COMPAT) 53 29 /* Masks for extracting the FPSR and FPCR from the FPSCR */ ··· 38 62 39 63 struct task_struct; 40 64 41 - extern void fpsimd_save_state(struct fpsimd_state *state); 42 - extern void fpsimd_load_state(struct fpsimd_state *state); 65 + extern void fpsimd_save_state(struct user_fpsimd_state *state); 66 + extern void fpsimd_load_state(struct user_fpsimd_state *state); 43 67 44 68 extern void fpsimd_thread_switch(struct task_struct *next); 45 69 extern void fpsimd_flush_thread(void); ··· 59 83 extern void sve_load_state(void const *state, u32 const *pfpsr, 60 84 unsigned long vq_minus_1); 61 85 extern unsigned int sve_get_vl(void); 62 - extern int sve_kernel_enable(void *); 86 + 87 + struct arm64_cpu_capabilities; 88 + extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); 63 89 64 90 extern int __ro_after_init sve_max_vl; 65 91
+3
arch/arm64/include/asm/lse.h
··· 4 4 5 5 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) 6 6 7 + #include <linux/compiler_types.h> 8 + #include <linux/export.h> 7 9 #include <linux/stringify.h> 8 10 #include <asm/alternative.h> 11 + #include <asm/cpucaps.h> 9 12 10 13 #ifdef __ASSEMBLER__ 11 14
+2
arch/arm64/include/asm/module.h
··· 39 39 u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, 40 40 Elf64_Sym *sym); 41 41 42 + u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); 43 + 42 44 #ifdef CONFIG_RANDOMIZE_BASE 43 45 extern u64 module_alloc_base; 44 46 #else
+29
arch/arm64/include/asm/percpu.h
··· 16 16 #ifndef __ASM_PERCPU_H 17 17 #define __ASM_PERCPU_H 18 18 19 + #include <linux/preempt.h> 20 + 19 21 #include <asm/alternative.h> 22 + #include <asm/cmpxchg.h> 20 23 #include <asm/stack_pointer.h> 21 24 22 25 static inline void set_my_cpu_offset(unsigned long off) ··· 199 196 200 197 return ret; 201 198 } 199 + 200 + /* this_cpu_cmpxchg */ 201 + #define _protect_cmpxchg_local(pcp, o, n) \ 202 + ({ \ 203 + typeof(*raw_cpu_ptr(&(pcp))) __ret; \ 204 + preempt_disable(); \ 205 + __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ 206 + preempt_enable(); \ 207 + __ret; \ 208 + }) 209 + 210 + #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 211 + #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 212 + #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 213 + #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) 214 + 215 + #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ 216 + ({ \ 217 + int __ret; \ 218 + preempt_disable(); \ 219 + __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ 220 + raw_cpu_ptr(&(ptr2)), \ 221 + o1, o2, n1, n2); \ 222 + preempt_enable(); \ 223 + __ret; \ 224 + }) 202 225 203 226 #define _percpu_read(pcp) \ 204 227 ({ \
+1
arch/arm64/include/asm/pgtable-hwdef.h
··· 291 291 #define TCR_TBI0 (UL(1) << 37) 292 292 #define TCR_HA (UL(1) << 39) 293 293 #define TCR_HD (UL(1) << 40) 294 + #define TCR_NFD1 (UL(1) << 54) 294 295 295 296 /* 296 297 * TTBR.
+30 -17
arch/arm64/include/asm/processor.h
··· 34 34 35 35 #ifdef __KERNEL__ 36 36 37 + #include <linux/build_bug.h> 38 + #include <linux/stddef.h> 37 39 #include <linux/string.h> 38 40 39 41 #include <asm/alternative.h> 40 - #include <asm/fpsimd.h> 42 + #include <asm/cpufeature.h> 41 43 #include <asm/hw_breakpoint.h> 42 44 #include <asm/lse.h> 43 45 #include <asm/pgtable-hwdef.h> ··· 105 103 106 104 struct thread_struct { 107 105 struct cpu_context cpu_context; /* cpu context */ 108 - unsigned long tp_value; /* TLS register */ 109 - #ifdef CONFIG_COMPAT 110 - unsigned long tp2_value; 111 - #endif 112 - struct fpsimd_state fpsimd_state; 106 + 107 + /* 108 + * Whitelisted fields for hardened usercopy: 109 + * Maintainers must ensure manually that this contains no 110 + * implicit padding. 111 + */ 112 + struct { 113 + unsigned long tp_value; /* TLS register */ 114 + unsigned long tp2_value; 115 + struct user_fpsimd_state fpsimd_state; 116 + } uw; 117 + 118 + unsigned int fpsimd_cpu; 113 119 void *sve_state; /* SVE registers, if any */ 114 120 unsigned int sve_vl; /* SVE vector length */ 115 121 unsigned int sve_vl_onexec; /* SVE vl after next exec */ ··· 126 116 struct debug_info debug; /* debugging */ 127 117 }; 128 118 129 - /* 130 - * Everything usercopied to/from thread_struct is statically-sized, so 131 - * no hardened usercopy whitelist is needed. 132 - */ 133 119 static inline void arch_thread_struct_whitelist(unsigned long *offset, 134 120 unsigned long *size) 135 121 { 136 - *offset = *size = 0; 122 + /* Verify that there is no padding among the whitelisted fields: */ 123 + BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) != 124 + sizeof_field(struct thread_struct, uw.tp_value) + 125 + sizeof_field(struct thread_struct, uw.tp2_value) + 126 + sizeof_field(struct thread_struct, uw.fpsimd_state)); 127 + 128 + *offset = offsetof(struct thread_struct, uw); 129 + *size = sizeof_field(struct thread_struct, uw); 137 130 } 138 131 139 132 #ifdef CONFIG_COMPAT ··· 144 131 ({ \ 145 132 unsigned long *__tls; \ 146 133 if (is_compat_thread(task_thread_info(t))) \ 147 - __tls = &(t)->thread.tp2_value; \ 134 + __tls = &(t)->thread.uw.tp2_value; \ 148 135 else \ 149 - __tls = &(t)->thread.tp_value; \ 136 + __tls = &(t)->thread.uw.tp_value; \ 150 137 __tls; \ 151 138 }) 152 139 #else 153 - #define task_user_tls(t) (&(t)->thread.tp_value) 140 + #define task_user_tls(t) (&(t)->thread.uw.tp_value) 154 141 #endif 155 142 156 143 /* Sync TPIDR_EL0 back to thread_struct for current */ ··· 240 227 241 228 #endif 242 229 243 - int cpu_enable_pan(void *__unused); 244 - int cpu_enable_cache_maint_trap(void *__unused); 245 - int cpu_clear_disr(void *__unused); 230 + void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); 231 + void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); 232 + void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); 246 233 247 234 /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ 248 235 #define SVE_SET_VL(arg) sve_set_current_vl(arg)
+3
arch/arm64/include/asm/sysreg.h
··· 490 490 #define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0) 491 491 492 492 /* id_aa64isar0 */ 493 + #define ID_AA64ISAR0_TS_SHIFT 52 493 494 #define ID_AA64ISAR0_FHM_SHIFT 48 494 495 #define ID_AA64ISAR0_DP_SHIFT 44 495 496 #define ID_AA64ISAR0_SM4_SHIFT 40 ··· 512 511 /* id_aa64pfr0 */ 513 512 #define ID_AA64PFR0_CSV3_SHIFT 60 514 513 #define ID_AA64PFR0_CSV2_SHIFT 56 514 + #define ID_AA64PFR0_DIT_SHIFT 48 515 515 #define ID_AA64PFR0_SVE_SHIFT 32 516 516 #define ID_AA64PFR0_RAS_SHIFT 28 517 517 #define ID_AA64PFR0_GIC_SHIFT 24 ··· 570 568 #define ID_AA64MMFR1_VMIDBITS_16 2 571 569 572 570 /* id_aa64mmfr2 */ 571 + #define ID_AA64MMFR2_AT_SHIFT 32 573 572 #define ID_AA64MMFR2_LVA_SHIFT 16 574 573 #define ID_AA64MMFR2_IESB_SHIFT 12 575 574 #define ID_AA64MMFR2_LSM_SHIFT 8
-11
arch/arm64/include/asm/system_misc.h
··· 45 45 46 46 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 47 47 48 - #define show_unhandled_signals_ratelimited() \ 49 - ({ \ 50 - static DEFINE_RATELIMIT_STATE(_rs, \ 51 - DEFAULT_RATELIMIT_INTERVAL, \ 52 - DEFAULT_RATELIMIT_BURST); \ 53 - bool __show_ratelimited = false; \ 54 - if (show_unhandled_signals && __ratelimit(&_rs)) \ 55 - __show_ratelimited = true; \ 56 - __show_ratelimited; \ 57 - }) 58 - 59 48 int handle_guest_sea(phys_addr_t addr, unsigned int esr); 60 49 61 50 #endif /* __ASSEMBLY__ */
+17 -8
arch/arm64/include/asm/tlbflush.h
··· 60 60 __tlbi(op, (arg) | USER_ASID_FLAG); \ 61 61 } while (0) 62 62 63 + /* This macro creates a properly formatted VA operand for the TLBI */ 64 + #define __TLBI_VADDR(addr, asid) \ 65 + ({ \ 66 + unsigned long __ta = (addr) >> 12; \ 67 + __ta &= GENMASK_ULL(43, 0); \ 68 + __ta |= (unsigned long)(asid) << 48; \ 69 + __ta; \ 70 + }) 71 + 63 72 /* 64 73 * TLB Management 65 74 * ============== ··· 126 117 127 118 static inline void flush_tlb_mm(struct mm_struct *mm) 128 119 { 129 - unsigned long asid = ASID(mm) << 48; 120 + unsigned long asid = __TLBI_VADDR(0, ASID(mm)); 130 121 131 122 dsb(ishst); 132 123 __tlbi(aside1is, asid); ··· 137 128 static inline void flush_tlb_page(struct vm_area_struct *vma, 138 129 unsigned long uaddr) 139 130 { 140 - unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48); 131 + unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); 141 132 142 133 dsb(ishst); 143 134 __tlbi(vale1is, addr); ··· 155 146 unsigned long start, unsigned long end, 156 147 bool last_level) 157 148 { 158 - unsigned long asid = ASID(vma->vm_mm) << 48; 149 + unsigned long asid = ASID(vma->vm_mm); 159 150 unsigned long addr; 160 151 161 152 if ((end - start) > MAX_TLB_RANGE) { ··· 163 154 return; 164 155 } 165 156 166 - start = asid | (start >> 12); 167 - end = asid | (end >> 12); 157 + start = __TLBI_VADDR(start, asid); 158 + end = __TLBI_VADDR(end, asid); 168 159 169 160 dsb(ishst); 170 161 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { ··· 194 185 return; 195 186 } 196 187 197 - start >>= 12; 198 - end >>= 12; 188 + start = __TLBI_VADDR(start, 0); 189 + end = __TLBI_VADDR(end, 0); 199 190 200 191 dsb(ishst); 201 192 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) ··· 211 202 static inline void __flush_tlb_pgtable(struct mm_struct *mm, 212 203 unsigned long uaddr) 213 204 { 214 - unsigned long addr = uaddr >> 12 | (ASID(mm) << 48); 205 + unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm)); 215 206 216 207 __tlbi(vae1is, addr); 217 208 __tlbi_user(vae1is, addr);
+4 -4
arch/arm64/include/asm/traps.h
··· 35 35 36 36 void register_undef_hook(struct undef_hook *hook); 37 37 void unregister_undef_hook(struct undef_hook *hook); 38 - void force_signal_inject(int signal, int code, struct pt_regs *regs, 39 - unsigned long address); 40 - 41 - void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr); 38 + void force_signal_inject(int signal, int code, unsigned long address); 39 + void arm64_notify_segfault(unsigned long addr); 40 + void arm64_force_sig_info(struct siginfo *info, const char *str, 41 + struct task_struct *tsk); 42 42 43 43 /* 44 44 * Move regs->pc to next instruction and do necessary setup before it
-6
arch/arm64/include/asm/virt.h
··· 102 102 return false; 103 103 } 104 104 105 - #ifdef CONFIG_ARM64_VHE 106 - extern void verify_cpu_run_el(void); 107 - #else 108 - static inline void verify_cpu_run_el(void) {} 109 - #endif 110 - 111 105 #endif /* __ASSEMBLY__ */ 112 106 113 107 #endif /* ! __ASM__VIRT_H */
+4
arch/arm64/include/uapi/asm/hwcap.h
··· 44 44 #define HWCAP_SHA512 (1 << 21) 45 45 #define HWCAP_SVE (1 << 22) 46 46 #define HWCAP_ASIMDFHM (1 << 23) 47 + #define HWCAP_DIT (1 << 24) 48 + #define HWCAP_USCAT (1 << 25) 49 + #define HWCAP_ILRCPC (1 << 26) 50 + #define HWCAP_FLAGM (1 << 27) 47 51 48 52 #endif /* _UAPI__ASM_HWCAP_H */
-21
arch/arm64/include/uapi/asm/siginfo.h
··· 21 21 22 22 #include <asm-generic/siginfo.h> 23 23 24 - /* 25 - * SIGFPE si_codes 26 - */ 27 - #ifdef __KERNEL__ 28 - #define FPE_FIXME 0 /* Broken dup of SI_USER */ 29 - #endif /* __KERNEL__ */ 30 - 31 - /* 32 - * SIGBUS si_codes 33 - */ 34 - #ifdef __KERNEL__ 35 - #define BUS_FIXME 0 /* Broken dup of SI_USER */ 36 - #endif /* __KERNEL__ */ 37 - 38 - /* 39 - * SIGTRAP si_codes 40 - */ 41 - #ifdef __KERNEL__ 42 - #define TRAP_FIXME 0 /* Broken dup of SI_USER */ 43 - #endif /* __KERNEL__ */ 44 - 45 24 #endif
+1 -1
arch/arm64/kernel/armv8_deprecated.c
··· 429 429 430 430 fault: 431 431 pr_debug("SWP{B} emulation: access caused memory abort!\n"); 432 - arm64_notify_segfault(regs, address); 432 + arm64_notify_segfault(address); 433 433 434 434 return 0; 435 435 }
+179 -137
arch/arm64/kernel/cpu_errata.c
··· 24 24 static bool __maybe_unused 25 25 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 26 26 { 27 + const struct arm64_midr_revidr *fix; 28 + u32 midr = read_cpuid_id(), revidr; 29 + 27 30 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 28 - return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model, 29 - entry->midr_range_min, 30 - entry->midr_range_max); 31 + if (!is_midr_in_range(midr, &entry->midr_range)) 32 + return false; 33 + 34 + midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; 35 + revidr = read_cpuid(REVIDR_EL1); 36 + for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) 37 + if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) 38 + return false; 39 + 40 + return true; 41 + } 42 + 43 + static bool __maybe_unused 44 + is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, 45 + int scope) 46 + { 47 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 48 + return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); 31 49 } 32 50 33 51 static bool __maybe_unused ··· 59 41 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | 60 42 MIDR_ARCHITECTURE_MASK; 61 43 62 - return model == entry->midr_model; 44 + return model == entry->midr_range.model; 63 45 } 64 46 65 47 static bool ··· 71 53 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); 72 54 } 73 55 74 - static int cpu_enable_trap_ctr_access(void *__unused) 56 + static void 57 + cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) 75 58 { 76 59 /* Clear SCTLR_EL1.UCT */ 77 60 config_sctlr_el1(SCTLR_EL1_UCT, 0); 78 - return 0; 79 61 } 80 62 81 63 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR ··· 179 161 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 180 162 } 181 163 182 - static int enable_smccc_arch_workaround_1(void *data) 164 + static void 165 + enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) 183 166 { 184 - const struct arm64_cpu_capabilities *entry = data; 185 167 bp_hardening_cb_t cb; 186 168 void *smccc_start, *smccc_end; 187 169 struct arm_smccc_res res; 188 170 189 171 if (!entry->matches(entry, SCOPE_LOCAL_CPU)) 190 - return 0; 172 + return; 191 173 192 174 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) 193 - return 0; 175 + return; 194 176 195 177 switch (psci_ops.conduit) { 196 178 case PSCI_CONDUIT_HVC: 197 179 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 198 180 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 199 181 if ((int)res.a0 < 0) 200 - return 0; 182 + return; 201 183 cb = call_hvc_arch_workaround_1; 202 184 smccc_start = __smccc_workaround_1_hvc_start; 203 185 smccc_end = __smccc_workaround_1_hvc_end; ··· 207 189 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, 208 190 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 209 191 if ((int)res.a0 < 0) 210 - return 0; 192 + return; 211 193 cb = call_smc_arch_workaround_1; 212 194 smccc_start = __smccc_workaround_1_smc_start; 213 195 smccc_end = __smccc_workaround_1_smc_end; 214 196 break; 215 197 216 198 default: 217 - return 0; 199 + return; 218 200 } 219 201 220 202 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); 221 203 222 - return 0; 204 + return; 223 205 } 224 206 225 207 static void qcom_link_stack_sanitization(void) ··· 234 216 : "=&r" (tmp)); 235 217 } 236 218 237 - static int qcom_enable_link_stack_sanitization(void *data) 219 + static void 220 + qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry) 238 221 { 239 - const struct arm64_cpu_capabilities *entry = data; 240 - 241 222 install_bp_hardening_cb(entry, qcom_link_stack_sanitization, 242 223 __qcom_hyp_sanitize_link_stack_start, 243 224 __qcom_hyp_sanitize_link_stack_end); 244 - 245 - return 0; 246 225 } 247 226 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ 248 227 249 - #define MIDR_RANGE(model, min, max) \ 250 - .def_scope = SCOPE_LOCAL_CPU, \ 251 - .matches = is_affected_midr_range, \ 252 - .midr_model = model, \ 253 - .midr_range_min = min, \ 254 - .midr_range_max = max 228 + #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 229 + .matches = is_affected_midr_range, \ 230 + .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) 255 231 256 - #define MIDR_ALL_VERSIONS(model) \ 257 - .def_scope = SCOPE_LOCAL_CPU, \ 258 - .matches = is_affected_midr_range, \ 259 - .midr_model = model, \ 260 - .midr_range_min = 0, \ 261 - .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK) 232 + #define CAP_MIDR_ALL_VERSIONS(model) \ 233 + .matches = is_affected_midr_range, \ 234 + .midr_range = MIDR_ALL_VERSIONS(model) 235 + 236 + #define MIDR_FIXED(rev, revidr_mask) \ 237 + .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} 238 + 239 + #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 240 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 241 + CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) 242 + 243 + #define CAP_MIDR_RANGE_LIST(list) \ 244 + .matches = is_affected_midr_range_list, \ 245 + .midr_range_list = list 246 + 247 + /* Errata affecting a range of revisions of given model variant */ 248 + #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ 249 + ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) 250 + 251 + /* Errata affecting a single variant/revision of a model */ 252 + #define ERRATA_MIDR_REV(model, var, rev) \ 253 + ERRATA_MIDR_RANGE(model, var, rev, var, rev) 254 + 255 + /* Errata affecting all variants/revisions of a given a model */ 256 + #define ERRATA_MIDR_ALL_VERSIONS(model) \ 257 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 258 + CAP_MIDR_ALL_VERSIONS(model) 259 + 260 + /* Errata affecting a list of midr ranges, with same work around */ 261 + #define ERRATA_MIDR_RANGE_LIST(midr_list) \ 262 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ 263 + CAP_MIDR_RANGE_LIST(midr_list) 264 + 265 + /* 266 + * Generic helper for handling capabilties with multiple (match,enable) pairs 267 + * of call backs, sharing the same capability bit. 268 + * Iterate over each entry to see if at least one matches. 269 + */ 270 + static bool __maybe_unused 271 + multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope) 272 + { 273 + const struct arm64_cpu_capabilities *caps; 274 + 275 + for (caps = entry->match_list; caps->matches; caps++) 276 + if (caps->matches(caps, scope)) 277 + return true; 278 + 279 + return false; 280 + } 281 + 282 + /* 283 + * Take appropriate action for all matching entries in the shared capability 284 + * entry. 285 + */ 286 + static void __maybe_unused 287 + multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) 288 + { 289 + const struct arm64_cpu_capabilities *caps; 290 + 291 + for (caps = entry->match_list; caps->matches; caps++) 292 + if (caps->matches(caps, SCOPE_LOCAL_CPU) && 293 + caps->cpu_enable) 294 + caps->cpu_enable(caps); 295 + } 296 + 297 + #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 298 + 299 + /* 300 + * List of CPUs where we need to issue a psci call to 301 + * harden the branch predictor. 302 + */ 303 + static const struct midr_range arm64_bp_harden_smccc_cpus[] = { 304 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 305 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 306 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 307 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 308 + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 309 + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 310 + {}, 311 + }; 312 + 313 + static const struct midr_range qcom_bp_harden_cpus[] = { 314 + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 315 + MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), 316 + {}, 317 + }; 318 + 319 + static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = { 320 + { 321 + CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), 322 + .cpu_enable = enable_smccc_arch_workaround_1, 323 + }, 324 + { 325 + CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus), 326 + .cpu_enable = qcom_enable_link_stack_sanitization, 327 + }, 328 + {}, 329 + }; 330 + 331 + #endif 262 332 263 333 const struct arm64_cpu_capabilities arm64_errata[] = { 264 334 #if defined(CONFIG_ARM64_ERRATUM_826319) || \ ··· 356 250 /* Cortex-A53 r0p[012] */ 357 251 .desc = "ARM errata 826319, 827319, 824069", 358 252 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 359 - MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), 360 - .enable = cpu_enable_cache_maint_trap, 253 + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), 254 + .cpu_enable = cpu_enable_cache_maint_trap, 361 255 }, 362 256 #endif 363 257 #ifdef CONFIG_ARM64_ERRATUM_819472 ··· 365 259 /* Cortex-A53 r0p[01] */ 366 260 .desc = "ARM errata 819472", 367 261 .capability = ARM64_WORKAROUND_CLEAN_CACHE, 368 - MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), 369 - .enable = cpu_enable_cache_maint_trap, 262 + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), 263 + .cpu_enable = cpu_enable_cache_maint_trap, 370 264 }, 371 265 #endif 372 266 #ifdef CONFIG_ARM64_ERRATUM_832075 ··· 374 268 /* Cortex-A57 r0p0 - r1p2 */ 375 269 .desc = "ARM erratum 832075", 376 270 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, 377 - MIDR_RANGE(MIDR_CORTEX_A57, 378 - MIDR_CPU_VAR_REV(0, 0), 379 - MIDR_CPU_VAR_REV(1, 2)), 271 + ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 272 + 0, 0, 273 + 1, 2), 380 274 }, 381 275 #endif 382 276 #ifdef CONFIG_ARM64_ERRATUM_834220 ··· 384 278 /* Cortex-A57 r0p0 - r1p2 */ 385 279 .desc = "ARM erratum 834220", 386 280 .capability = ARM64_WORKAROUND_834220, 387 - MIDR_RANGE(MIDR_CORTEX_A57, 388 - MIDR_CPU_VAR_REV(0, 0), 389 - MIDR_CPU_VAR_REV(1, 2)), 281 + ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, 282 + 0, 0, 283 + 1, 2), 284 + }, 285 + #endif 286 + #ifdef CONFIG_ARM64_ERRATUM_843419 287 + { 288 + /* Cortex-A53 r0p[01234] */ 289 + .desc = "ARM erratum 843419", 290 + .capability = ARM64_WORKAROUND_843419, 291 + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 292 + MIDR_FIXED(0x4, BIT(8)), 390 293 }, 391 294 #endif 392 295 #ifdef CONFIG_ARM64_ERRATUM_845719 ··· 403 288 /* Cortex-A53 r0p[01234] */ 404 289 .desc = "ARM erratum 845719", 405 290 .capability = ARM64_WORKAROUND_845719, 406 - MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), 291 + ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), 407 292 }, 408 293 #endif 409 294 #ifdef CONFIG_CAVIUM_ERRATUM_23154 ··· 411 296 /* Cavium ThunderX, pass 1.x */ 412 297 .desc = "Cavium erratum 23154", 413 298 .capability = ARM64_WORKAROUND_CAVIUM_23154, 414 - MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01), 299 + ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), 415 300 }, 416 301 #endif 417 302 #ifdef CONFIG_CAVIUM_ERRATUM_27456 ··· 419 304 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 420 305 .desc = "Cavium erratum 27456", 421 306 .capability = ARM64_WORKAROUND_CAVIUM_27456, 422 - MIDR_RANGE(MIDR_THUNDERX, 423 - MIDR_CPU_VAR_REV(0, 0), 424 - MIDR_CPU_VAR_REV(1, 1)), 307 + ERRATA_MIDR_RANGE(MIDR_THUNDERX, 308 + 0, 0, 309 + 1, 1), 425 310 }, 426 311 { 427 312 /* Cavium ThunderX, T81 pass 1.0 */ 428 313 .desc = "Cavium erratum 27456", 429 314 .capability = ARM64_WORKAROUND_CAVIUM_27456, 430 - MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00), 315 + ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), 431 316 }, 432 317 #endif 433 318 #ifdef CONFIG_CAVIUM_ERRATUM_30115 ··· 435 320 /* Cavium ThunderX, T88 pass 1.x - 2.2 */ 436 321 .desc = "Cavium erratum 30115", 437 322 .capability = ARM64_WORKAROUND_CAVIUM_30115, 438 - MIDR_RANGE(MIDR_THUNDERX, 0x00, 439 - (1 << MIDR_VARIANT_SHIFT) | 2), 323 + ERRATA_MIDR_RANGE(MIDR_THUNDERX, 324 + 0, 0, 325 + 1, 2), 440 326 }, 441 327 { 442 328 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ 443 329 .desc = "Cavium erratum 30115", 444 330 .capability = ARM64_WORKAROUND_CAVIUM_30115, 445 - MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02), 331 + ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), 446 332 }, 447 333 { 448 334 /* Cavium ThunderX, T83 pass 1.0 */ 449 335 .desc = "Cavium erratum 30115", 450 336 .capability = ARM64_WORKAROUND_CAVIUM_30115, 451 - MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00), 337 + ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), 452 338 }, 453 339 #endif 454 340 { 455 341 .desc = "Mismatched cache line size", 456 342 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, 457 343 .matches = has_mismatched_cache_line_size, 458 - .def_scope = SCOPE_LOCAL_CPU, 459 - .enable = cpu_enable_trap_ctr_access, 344 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 345 + .cpu_enable = cpu_enable_trap_ctr_access, 460 346 }, 461 347 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 462 348 { 463 349 .desc = "Qualcomm Technologies Falkor erratum 1003", 464 350 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 465 - MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 466 - MIDR_CPU_VAR_REV(0, 0), 467 - MIDR_CPU_VAR_REV(0, 0)), 351 + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 468 352 }, 469 353 { 470 354 .desc = "Qualcomm Technologies Kryo erratum 1003", 471 355 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, 472 - .def_scope = SCOPE_LOCAL_CPU, 473 - .midr_model = MIDR_QCOM_KRYO, 356 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 357 + .midr_range.model = MIDR_QCOM_KRYO, 474 358 .matches = is_kryo_midr, 475 359 }, 476 360 #endif ··· 477 363 { 478 364 .desc = "Qualcomm Technologies Falkor erratum 1009", 479 365 .capability = ARM64_WORKAROUND_REPEAT_TLBI, 480 - MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 481 - MIDR_CPU_VAR_REV(0, 0), 482 - MIDR_CPU_VAR_REV(0, 0)), 366 + ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), 483 367 }, 484 368 #endif 485 369 #ifdef CONFIG_ARM64_ERRATUM_858921 ··· 485 373 /* Cortex-A73 all versions */ 486 374 .desc = "ARM erratum 858921", 487 375 .capability = ARM64_WORKAROUND_858921, 488 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 376 + ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 489 377 }, 490 378 #endif 491 379 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 492 380 { 493 381 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 494 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 495 - .enable = enable_smccc_arch_workaround_1, 496 - }, 497 - { 498 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 499 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 500 - .enable = enable_smccc_arch_workaround_1, 501 - }, 502 - { 503 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 504 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 505 - .enable = enable_smccc_arch_workaround_1, 506 - }, 507 - { 508 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 509 - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 510 - .enable = enable_smccc_arch_workaround_1, 511 - }, 512 - { 513 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 514 - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 515 - .enable = qcom_enable_link_stack_sanitization, 382 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 383 + .matches = multi_entry_cap_matches, 384 + .cpu_enable = multi_entry_cap_cpu_enable, 385 + .match_list = arm64_bp_harden_list, 516 386 }, 517 387 { 518 388 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, 519 - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 520 - }, 521 - { 522 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 523 - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), 524 - .enable = qcom_enable_link_stack_sanitization, 525 - }, 526 - { 527 - .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, 528 - MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), 529 - }, 530 - { 531 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 532 - MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 533 - .enable = enable_smccc_arch_workaround_1, 534 - }, 535 - { 536 - .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 537 - MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 538 - .enable = enable_smccc_arch_workaround_1, 389 + ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus), 539 390 }, 540 391 #endif 541 392 { 542 393 } 543 394 }; 544 - 545 - /* 546 - * The CPU Errata work arounds are detected and applied at boot time 547 - * and the related information is freed soon after. If the new CPU requires 548 - * an errata not detected at boot, fail this CPU. 549 - */ 550 - void verify_local_cpu_errata_workarounds(void) 551 - { 552 - const struct arm64_cpu_capabilities *caps = arm64_errata; 553 - 554 - for (; caps->matches; caps++) { 555 - if (cpus_have_cap(caps->capability)) { 556 - if (caps->enable) 557 - caps->enable((void *)caps); 558 - } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) { 559 - pr_crit("CPU%d: Requires work around for %s, not detected" 560 - " at boot time\n", 561 - smp_processor_id(), 562 - caps->desc ? : "an erratum"); 563 - cpu_die_early(); 564 - } 565 - } 566 - } 567 - 568 - void update_cpu_errata_workarounds(void) 569 - { 570 - update_cpu_capabilities(arm64_errata, "enabling workaround for"); 571 - } 572 - 573 - void __init enable_errata_workarounds(void) 574 - { 575 - enable_cpu_capabilities(arm64_errata); 576 - }
+338 -100
arch/arm64/kernel/cpufeature.c
··· 123 123 * sync with the documentation of the CPU feature register ABI. 124 124 */ 125 125 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { 126 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0), 126 127 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0), 127 128 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0), 128 129 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0), ··· 149 148 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { 150 149 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), 151 150 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), 151 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0), 152 152 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), 153 153 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), 154 154 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0), ··· 192 190 }; 193 191 194 192 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { 193 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), 195 194 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), 196 195 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), 197 196 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), ··· 202 199 }; 203 200 204 201 static const struct arm64_ftr_bits ftr_ctr[] = { 205 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ 206 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */ 207 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ 208 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 209 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ 210 - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 202 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ 203 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1), 204 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1), 205 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0), 206 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0), 207 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1), 211 208 /* 212 209 * Linux can handle differing I-cache policies. Userspace JITs will 213 210 * make use of *minLine. ··· 509 506 reg->user_mask = user_mask; 510 507 } 511 508 509 + extern const struct arm64_cpu_capabilities arm64_errata[]; 510 + static void __init setup_boot_cpu_capabilities(void); 511 + 512 512 void __init init_cpu_features(struct cpuinfo_arm64 *info) 513 513 { 514 514 /* Before we start using the tables, make sure it is sorted */ ··· 554 548 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr); 555 549 sve_init_vq_map(); 556 550 } 551 + 552 + /* 553 + * Detect and enable early CPU capabilities based on the boot CPU, 554 + * after we have initialised the CPU feature infrastructure. 555 + */ 556 + setup_boot_cpu_capabilities(); 557 557 } 558 558 559 559 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) ··· 838 826 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); 839 827 } 840 828 841 - static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) 842 - { 843 - return is_kernel_in_hyp_mode(); 844 - } 845 - 846 829 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry, 847 830 int __unused) 848 831 { ··· 859 852 ID_AA64PFR0_FP_SHIFT) < 0; 860 853 } 861 854 855 + static bool has_cache_idc(const struct arm64_cpu_capabilities *entry, 856 + int __unused) 857 + { 858 + return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT); 859 + } 860 + 861 + static bool has_cache_dic(const struct arm64_cpu_capabilities *entry, 862 + int __unused) 863 + { 864 + return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT); 865 + } 866 + 862 867 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 863 868 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ 864 869 865 870 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, 866 - int __unused) 871 + int scope) 867 872 { 873 + /* List of CPUs that are not vulnerable and don't need KPTI */ 874 + static const struct midr_range kpti_safe_list[] = { 875 + MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 876 + MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 877 + }; 868 878 char const *str = "command line option"; 869 - u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 870 879 871 880 /* 872 881 * For reasons that aren't entirely clear, enabling KPTI on Cavium ··· 906 883 return true; 907 884 908 885 /* Don't force KPTI for CPUs that are not vulnerable */ 909 - switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) { 910 - case MIDR_CAVIUM_THUNDERX2: 911 - case MIDR_BRCM_VULCAN: 886 + if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) 912 887 return false; 913 - } 914 888 915 889 /* Defer to CPU feature registers */ 916 - return !cpuid_feature_extract_unsigned_field(pfr0, 917 - ID_AA64PFR0_CSV3_SHIFT); 890 + return !has_cpuid_feature(entry, scope); 918 891 } 919 892 920 - static int kpti_install_ng_mappings(void *__unused) 893 + static void 894 + kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) 921 895 { 922 896 typedef void (kpti_remap_fn)(int, int, phys_addr_t); 923 897 extern kpti_remap_fn idmap_kpti_install_ng_mappings; ··· 924 904 int cpu = smp_processor_id(); 925 905 926 906 if (kpti_applied) 927 - return 0; 907 + return; 928 908 929 909 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 930 910 ··· 935 915 if (!cpu) 936 916 kpti_applied = true; 937 917 938 - return 0; 918 + return; 939 919 } 940 920 941 921 static int __init parse_kpti(char *str) ··· 952 932 __setup("kpti=", parse_kpti); 953 933 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ 954 934 955 - static int cpu_copy_el2regs(void *__unused) 935 + #ifdef CONFIG_ARM64_HW_AFDBM 936 + static inline void __cpu_enable_hw_dbm(void) 937 + { 938 + u64 tcr = read_sysreg(tcr_el1) | TCR_HD; 939 + 940 + write_sysreg(tcr, tcr_el1); 941 + isb(); 942 + } 943 + 944 + static bool cpu_has_broken_dbm(void) 945 + { 946 + /* List of CPUs which have broken DBM support. */ 947 + static const struct midr_range cpus[] = { 948 + #ifdef CONFIG_ARM64_ERRATUM_1024718 949 + MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0 950 + #endif 951 + {}, 952 + }; 953 + 954 + return is_midr_in_range_list(read_cpuid_id(), cpus); 955 + } 956 + 957 + static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap) 958 + { 959 + return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) && 960 + !cpu_has_broken_dbm(); 961 + } 962 + 963 + static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap) 964 + { 965 + if (cpu_can_use_dbm(cap)) 966 + __cpu_enable_hw_dbm(); 967 + } 968 + 969 + static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, 970 + int __unused) 971 + { 972 + static bool detected = false; 973 + /* 974 + * DBM is a non-conflicting feature. i.e, the kernel can safely 975 + * run a mix of CPUs with and without the feature. So, we 976 + * unconditionally enable the capability to allow any late CPU 977 + * to use the feature. We only enable the control bits on the 978 + * CPU, if it actually supports. 979 + * 980 + * We have to make sure we print the "feature" detection only 981 + * when at least one CPU actually uses it. So check if this CPU 982 + * can actually use it and print the message exactly once. 983 + * 984 + * This is safe as all CPUs (including secondary CPUs - due to the 985 + * LOCAL_CPU scope - and the hotplugged CPUs - via verification) 986 + * goes through the "matches" check exactly once. Also if a CPU 987 + * matches the criteria, it is guaranteed that the CPU will turn 988 + * the DBM on, as the capability is unconditionally enabled. 989 + */ 990 + if (!detected && cpu_can_use_dbm(cap)) { 991 + detected = true; 992 + pr_info("detected: Hardware dirty bit management\n"); 993 + } 994 + 995 + return true; 996 + } 997 + 998 + #endif 999 + 1000 + #ifdef CONFIG_ARM64_VHE 1001 + static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused) 1002 + { 1003 + return is_kernel_in_hyp_mode(); 1004 + } 1005 + 1006 + static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) 956 1007 { 957 1008 /* 958 1009 * Copy register values that aren't redirected by hardware. ··· 1035 944 */ 1036 945 if (!alternatives_applied) 1037 946 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); 1038 - 1039 - return 0; 1040 947 } 948 + #endif 1041 949 1042 950 static const struct arm64_cpu_capabilities arm64_features[] = { 1043 951 { 1044 952 .desc = "GIC system register CPU interface", 1045 953 .capability = ARM64_HAS_SYSREG_GIC_CPUIF, 1046 - .def_scope = SCOPE_SYSTEM, 954 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1047 955 .matches = has_useable_gicv3_cpuif, 1048 956 .sys_reg = SYS_ID_AA64PFR0_EL1, 1049 957 .field_pos = ID_AA64PFR0_GIC_SHIFT, ··· 1053 963 { 1054 964 .desc = "Privileged Access Never", 1055 965 .capability = ARM64_HAS_PAN, 1056 - .def_scope = SCOPE_SYSTEM, 966 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1057 967 .matches = has_cpuid_feature, 1058 968 .sys_reg = SYS_ID_AA64MMFR1_EL1, 1059 969 .field_pos = ID_AA64MMFR1_PAN_SHIFT, 1060 970 .sign = FTR_UNSIGNED, 1061 971 .min_field_value = 1, 1062 - .enable = cpu_enable_pan, 972 + .cpu_enable = cpu_enable_pan, 1063 973 }, 1064 974 #endif /* CONFIG_ARM64_PAN */ 1065 975 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) 1066 976 { 1067 977 .desc = "LSE atomic instructions", 1068 978 .capability = ARM64_HAS_LSE_ATOMICS, 1069 - .def_scope = SCOPE_SYSTEM, 979 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1070 980 .matches = has_cpuid_feature, 1071 981 .sys_reg = SYS_ID_AA64ISAR0_EL1, 1072 982 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT, ··· 1077 987 { 1078 988 .desc = "Software prefetching using PRFM", 1079 989 .capability = ARM64_HAS_NO_HW_PREFETCH, 1080 - .def_scope = SCOPE_SYSTEM, 990 + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 1081 991 .matches = has_no_hw_prefetch, 1082 992 }, 1083 993 #ifdef CONFIG_ARM64_UAO 1084 994 { 1085 995 .desc = "User Access Override", 1086 996 .capability = ARM64_HAS_UAO, 1087 - .def_scope = SCOPE_SYSTEM, 997 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1088 998 .matches = has_cpuid_feature, 1089 999 .sys_reg = SYS_ID_AA64MMFR2_EL1, 1090 1000 .field_pos = ID_AA64MMFR2_UAO_SHIFT, ··· 1098 1008 #ifdef CONFIG_ARM64_PAN 1099 1009 { 1100 1010 .capability = ARM64_ALT_PAN_NOT_UAO, 1101 - .def_scope = SCOPE_SYSTEM, 1011 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1102 1012 .matches = cpufeature_pan_not_uao, 1103 1013 }, 1104 1014 #endif /* CONFIG_ARM64_PAN */ 1015 + #ifdef CONFIG_ARM64_VHE 1105 1016 { 1106 1017 .desc = "Virtualization Host Extensions", 1107 1018 .capability = ARM64_HAS_VIRT_HOST_EXTN, 1108 - .def_scope = SCOPE_SYSTEM, 1019 + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, 1109 1020 .matches = runs_at_el2, 1110 - .enable = cpu_copy_el2regs, 1021 + .cpu_enable = cpu_copy_el2regs, 1111 1022 }, 1023 + #endif /* CONFIG_ARM64_VHE */ 1112 1024 { 1113 1025 .desc = "32-bit EL0 Support", 1114 1026 .capability = ARM64_HAS_32BIT_EL0, 1115 - .def_scope = SCOPE_SYSTEM, 1027 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1116 1028 .matches = has_cpuid_feature, 1117 1029 .sys_reg = SYS_ID_AA64PFR0_EL1, 1118 1030 .sign = FTR_UNSIGNED, ··· 1124 1032 { 1125 1033 .desc = "Reduced HYP mapping offset", 1126 1034 .capability = ARM64_HYP_OFFSET_LOW, 1127 - .def_scope = SCOPE_SYSTEM, 1035 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1128 1036 .matches = hyp_offset_low, 1129 1037 }, 1130 1038 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 1131 1039 { 1132 1040 .desc = "Kernel page table isolation (KPTI)", 1133 1041 .capability = ARM64_UNMAP_KERNEL_AT_EL0, 1134 - .def_scope = SCOPE_SYSTEM, 1042 + .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE, 1043 + /* 1044 + * The ID feature fields below are used to indicate that 1045 + * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for 1046 + * more details. 1047 + */ 1048 + .sys_reg = SYS_ID_AA64PFR0_EL1, 1049 + .field_pos = ID_AA64PFR0_CSV3_SHIFT, 1050 + .min_field_value = 1, 1135 1051 .matches = unmap_kernel_at_el0, 1136 - .enable = kpti_install_ng_mappings, 1052 + .cpu_enable = kpti_install_ng_mappings, 1137 1053 }, 1138 1054 #endif 1139 1055 { 1140 1056 /* FP/SIMD is not implemented */ 1141 1057 .capability = ARM64_HAS_NO_FPSIMD, 1142 - .def_scope = SCOPE_SYSTEM, 1058 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1143 1059 .min_field_value = 0, 1144 1060 .matches = has_no_fpsimd, 1145 1061 }, ··· 1155 1055 { 1156 1056 .desc = "Data cache clean to Point of Persistence", 1157 1057 .capability = ARM64_HAS_DCPOP, 1158 - .def_scope = SCOPE_SYSTEM, 1058 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1159 1059 .matches = has_cpuid_feature, 1160 1060 .sys_reg = SYS_ID_AA64ISAR1_EL1, 1161 1061 .field_pos = ID_AA64ISAR1_DPB_SHIFT, ··· 1165 1065 #ifdef CONFIG_ARM64_SVE 1166 1066 { 1167 1067 .desc = "Scalable Vector Extension", 1068 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1168 1069 .capability = ARM64_SVE, 1169 - .def_scope = SCOPE_SYSTEM, 1170 1070 .sys_reg = SYS_ID_AA64PFR0_EL1, 1171 1071 .sign = FTR_UNSIGNED, 1172 1072 .field_pos = ID_AA64PFR0_SVE_SHIFT, 1173 1073 .min_field_value = ID_AA64PFR0_SVE, 1174 1074 .matches = has_cpuid_feature, 1175 - .enable = sve_kernel_enable, 1075 + .cpu_enable = sve_kernel_enable, 1176 1076 }, 1177 1077 #endif /* CONFIG_ARM64_SVE */ 1178 1078 #ifdef CONFIG_ARM64_RAS_EXTN 1179 1079 { 1180 1080 .desc = "RAS Extension Support", 1181 1081 .capability = ARM64_HAS_RAS_EXTN, 1182 - .def_scope = SCOPE_SYSTEM, 1082 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1183 1083 .matches = has_cpuid_feature, 1184 1084 .sys_reg = SYS_ID_AA64PFR0_EL1, 1185 1085 .sign = FTR_UNSIGNED, 1186 1086 .field_pos = ID_AA64PFR0_RAS_SHIFT, 1187 1087 .min_field_value = ID_AA64PFR0_RAS_V1, 1188 - .enable = cpu_clear_disr, 1088 + .cpu_enable = cpu_clear_disr, 1189 1089 }, 1190 1090 #endif /* CONFIG_ARM64_RAS_EXTN */ 1091 + { 1092 + .desc = "Data cache clean to the PoU not required for I/D coherence", 1093 + .capability = ARM64_HAS_CACHE_IDC, 1094 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1095 + .matches = has_cache_idc, 1096 + }, 1097 + { 1098 + .desc = "Instruction cache invalidation not required for I/D coherence", 1099 + .capability = ARM64_HAS_CACHE_DIC, 1100 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, 1101 + .matches = has_cache_dic, 1102 + }, 1103 + #ifdef CONFIG_ARM64_HW_AFDBM 1104 + { 1105 + /* 1106 + * Since we turn this on always, we don't want the user to 1107 + * think that the feature is available when it may not be. 1108 + * So hide the description. 1109 + * 1110 + * .desc = "Hardware pagetable Dirty Bit Management", 1111 + * 1112 + */ 1113 + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, 1114 + .capability = ARM64_HW_DBM, 1115 + .sys_reg = SYS_ID_AA64MMFR1_EL1, 1116 + .sign = FTR_UNSIGNED, 1117 + .field_pos = ID_AA64MMFR1_HADBS_SHIFT, 1118 + .min_field_value = 2, 1119 + .matches = has_hw_dbm, 1120 + .cpu_enable = cpu_enable_hw_dbm, 1121 + }, 1122 + #endif 1191 1123 {}, 1192 1124 }; 1193 1125 1194 - #define HWCAP_CAP(reg, field, s, min_value, type, cap) \ 1126 + #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \ 1195 1127 { \ 1196 1128 .desc = #cap, \ 1197 - .def_scope = SCOPE_SYSTEM, \ 1129 + .type = ARM64_CPUCAP_SYSTEM_FEATURE, \ 1198 1130 .matches = has_cpuid_feature, \ 1199 1131 .sys_reg = reg, \ 1200 1132 .field_pos = field, \ 1201 1133 .sign = s, \ 1202 1134 .min_field_value = min_value, \ 1203 - .hwcap_type = type, \ 1135 + .hwcap_type = cap_type, \ 1204 1136 .hwcap = cap, \ 1205 1137 } 1206 1138 ··· 1250 1118 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4), 1251 1119 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP), 1252 1120 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM), 1121 + HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM), 1253 1122 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP), 1254 1123 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP), 1255 1124 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD), 1256 1125 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP), 1126 + HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT), 1257 1127 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP), 1258 1128 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT), 1259 1129 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), 1260 1130 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), 1131 + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), 1132 + HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), 1261 1133 #ifdef CONFIG_ARM64_SVE 1262 1134 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), 1263 1135 #endif ··· 1329 1193 /* We support emulation of accesses to CPU ID feature registers */ 1330 1194 elf_hwcap |= HWCAP_CPUID; 1331 1195 for (; hwcaps->matches; hwcaps++) 1332 - if (hwcaps->matches(hwcaps, hwcaps->def_scope)) 1196 + if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps))) 1333 1197 cap_set_elf_hwcap(hwcaps); 1334 1198 } 1335 1199 ··· 1346 1210 return false; 1347 1211 1348 1212 for (caps = cap_array; caps->matches; caps++) 1349 - if (caps->capability == cap && 1350 - caps->matches(caps, SCOPE_LOCAL_CPU)) 1351 - return true; 1213 + if (caps->capability == cap) 1214 + return caps->matches(caps, SCOPE_LOCAL_CPU); 1215 + 1352 1216 return false; 1353 1217 } 1354 1218 1355 - void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 1356 - const char *info) 1219 + static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 1220 + u16 scope_mask, const char *info) 1357 1221 { 1222 + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 1358 1223 for (; caps->matches; caps++) { 1359 - if (!caps->matches(caps, caps->def_scope)) 1224 + if (!(caps->type & scope_mask) || 1225 + !caps->matches(caps, cpucap_default_scope(caps))) 1360 1226 continue; 1361 1227 1362 1228 if (!cpus_have_cap(caps->capability) && caps->desc) ··· 1367 1229 } 1368 1230 } 1369 1231 1232 + static void update_cpu_capabilities(u16 scope_mask) 1233 + { 1234 + __update_cpu_capabilities(arm64_features, scope_mask, "detected:"); 1235 + __update_cpu_capabilities(arm64_errata, scope_mask, 1236 + "enabling workaround for"); 1237 + } 1238 + 1239 + static int __enable_cpu_capability(void *arg) 1240 + { 1241 + const struct arm64_cpu_capabilities *cap = arg; 1242 + 1243 + cap->cpu_enable(cap); 1244 + return 0; 1245 + } 1246 + 1370 1247 /* 1371 1248 * Run through the enabled capabilities and enable() it on all active 1372 1249 * CPUs 1373 1250 */ 1374 - void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) 1251 + static void __init 1252 + __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps, 1253 + u16 scope_mask) 1375 1254 { 1255 + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 1376 1256 for (; caps->matches; caps++) { 1377 1257 unsigned int num = caps->capability; 1378 1258 1379 - if (!cpus_have_cap(num)) 1259 + if (!(caps->type & scope_mask) || !cpus_have_cap(num)) 1380 1260 continue; 1381 1261 1382 1262 /* Ensure cpus_have_const_cap(num) works */ 1383 1263 static_branch_enable(&cpu_hwcap_keys[num]); 1384 1264 1385 - if (caps->enable) { 1265 + if (caps->cpu_enable) { 1386 1266 /* 1387 - * Use stop_machine() as it schedules the work allowing 1388 - * us to modify PSTATE, instead of on_each_cpu() which 1389 - * uses an IPI, giving us a PSTATE that disappears when 1390 - * we return. 1267 + * Capabilities with SCOPE_BOOT_CPU scope are finalised 1268 + * before any secondary CPU boots. Thus, each secondary 1269 + * will enable the capability as appropriate via 1270 + * check_local_cpu_capabilities(). The only exception is 1271 + * the boot CPU, for which the capability must be 1272 + * enabled here. This approach avoids costly 1273 + * stop_machine() calls for this case. 1274 + * 1275 + * Otherwise, use stop_machine() as it schedules the 1276 + * work allowing us to modify PSTATE, instead of 1277 + * on_each_cpu() which uses an IPI, giving us a PSTATE 1278 + * that disappears when we return. 1391 1279 */ 1392 - stop_machine(caps->enable, (void *)caps, cpu_online_mask); 1280 + if (scope_mask & SCOPE_BOOT_CPU) 1281 + caps->cpu_enable(caps); 1282 + else 1283 + stop_machine(__enable_cpu_capability, 1284 + (void *)caps, cpu_online_mask); 1393 1285 } 1394 1286 } 1287 + } 1288 + 1289 + static void __init enable_cpu_capabilities(u16 scope_mask) 1290 + { 1291 + __enable_cpu_capabilities(arm64_features, scope_mask); 1292 + __enable_cpu_capabilities(arm64_errata, scope_mask); 1293 + } 1294 + 1295 + /* 1296 + * Run through the list of capabilities to check for conflicts. 1297 + * If the system has already detected a capability, take necessary 1298 + * action on this CPU. 1299 + * 1300 + * Returns "false" on conflicts. 1301 + */ 1302 + static bool 1303 + __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps, 1304 + u16 scope_mask) 1305 + { 1306 + bool cpu_has_cap, system_has_cap; 1307 + 1308 + scope_mask &= ARM64_CPUCAP_SCOPE_MASK; 1309 + 1310 + for (; caps->matches; caps++) { 1311 + if (!(caps->type & scope_mask)) 1312 + continue; 1313 + 1314 + cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU); 1315 + system_has_cap = cpus_have_cap(caps->capability); 1316 + 1317 + if (system_has_cap) { 1318 + /* 1319 + * Check if the new CPU misses an advertised feature, 1320 + * which is not safe to miss. 1321 + */ 1322 + if (!cpu_has_cap && !cpucap_late_cpu_optional(caps)) 1323 + break; 1324 + /* 1325 + * We have to issue cpu_enable() irrespective of 1326 + * whether the CPU has it or not, as it is enabeld 1327 + * system wide. It is upto the call back to take 1328 + * appropriate action on this CPU. 1329 + */ 1330 + if (caps->cpu_enable) 1331 + caps->cpu_enable(caps); 1332 + } else { 1333 + /* 1334 + * Check if the CPU has this capability if it isn't 1335 + * safe to have when the system doesn't. 1336 + */ 1337 + if (cpu_has_cap && !cpucap_late_cpu_permitted(caps)) 1338 + break; 1339 + } 1340 + } 1341 + 1342 + if (caps->matches) { 1343 + pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n", 1344 + smp_processor_id(), caps->capability, 1345 + caps->desc, system_has_cap, cpu_has_cap); 1346 + return false; 1347 + } 1348 + 1349 + return true; 1350 + } 1351 + 1352 + static bool verify_local_cpu_caps(u16 scope_mask) 1353 + { 1354 + return __verify_local_cpu_caps(arm64_errata, scope_mask) && 1355 + __verify_local_cpu_caps(arm64_features, scope_mask); 1395 1356 } 1396 1357 1397 1358 /* ··· 1499 1262 */ 1500 1263 static void check_early_cpu_features(void) 1501 1264 { 1502 - verify_cpu_run_el(); 1503 1265 verify_cpu_asid_bits(); 1266 + /* 1267 + * Early features are used by the kernel already. If there 1268 + * is a conflict, we cannot proceed further. 1269 + */ 1270 + if (!verify_local_cpu_caps(SCOPE_BOOT_CPU)) 1271 + cpu_panic_kernel(); 1504 1272 } 1505 1273 1506 1274 static void ··· 1518 1276 smp_processor_id(), caps->desc); 1519 1277 cpu_die_early(); 1520 1278 } 1521 - } 1522 - 1523 - static void 1524 - verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list) 1525 - { 1526 - const struct arm64_cpu_capabilities *caps = caps_list; 1527 - for (; caps->matches; caps++) { 1528 - if (!cpus_have_cap(caps->capability)) 1529 - continue; 1530 - /* 1531 - * If the new CPU misses an advertised feature, we cannot proceed 1532 - * further, park the cpu. 1533 - */ 1534 - if (!__this_cpu_has_cap(caps_list, caps->capability)) { 1535 - pr_crit("CPU%d: missing feature: %s\n", 1536 - smp_processor_id(), caps->desc); 1537 - cpu_die_early(); 1538 - } 1539 - if (caps->enable) 1540 - caps->enable((void *)caps); 1541 - } 1542 1279 } 1543 1280 1544 1281 static void verify_sve_features(void) ··· 1537 1316 /* Add checks on other ZCR bits here if necessary */ 1538 1317 } 1539 1318 1319 + 1540 1320 /* 1541 1321 * Run through the enabled system capabilities and enable() it on this CPU. 1542 1322 * The capabilities were decided based on the available CPUs at the boot time. ··· 1548 1326 */ 1549 1327 static void verify_local_cpu_capabilities(void) 1550 1328 { 1551 - verify_local_cpu_errata_workarounds(); 1552 - verify_local_cpu_features(arm64_features); 1329 + /* 1330 + * The capabilities with SCOPE_BOOT_CPU are checked from 1331 + * check_early_cpu_features(), as they need to be verified 1332 + * on all secondary CPUs. 1333 + */ 1334 + if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU)) 1335 + cpu_die_early(); 1336 + 1553 1337 verify_local_elf_hwcaps(arm64_elf_hwcaps); 1554 1338 1555 1339 if (system_supports_32bit_el0()) ··· 1563 1335 1564 1336 if (system_supports_sve()) 1565 1337 verify_sve_features(); 1566 - 1567 - if (system_uses_ttbr0_pan()) 1568 - pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); 1569 1338 } 1570 1339 1571 1340 void check_local_cpu_capabilities(void) ··· 1575 1350 1576 1351 /* 1577 1352 * If we haven't finalised the system capabilities, this CPU gets 1578 - * a chance to update the errata work arounds. 1353 + * a chance to update the errata work arounds and local features. 1579 1354 * Otherwise, this CPU should verify that it has all the system 1580 1355 * advertised capabilities. 1581 1356 */ 1582 1357 if (!sys_caps_initialised) 1583 - update_cpu_errata_workarounds(); 1358 + update_cpu_capabilities(SCOPE_LOCAL_CPU); 1584 1359 else 1585 1360 verify_local_cpu_capabilities(); 1586 1361 } 1587 1362 1588 - static void __init setup_feature_capabilities(void) 1363 + static void __init setup_boot_cpu_capabilities(void) 1589 1364 { 1590 - update_cpu_capabilities(arm64_features, "detected feature:"); 1591 - enable_cpu_capabilities(arm64_features); 1365 + /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */ 1366 + update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU); 1367 + /* Enable the SCOPE_BOOT_CPU capabilities alone right away */ 1368 + enable_cpu_capabilities(SCOPE_BOOT_CPU); 1592 1369 } 1593 1370 1594 1371 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready); ··· 1609 1382 __this_cpu_has_cap(arm64_errata, cap)); 1610 1383 } 1611 1384 1385 + static void __init setup_system_capabilities(void) 1386 + { 1387 + /* 1388 + * We have finalised the system-wide safe feature 1389 + * registers, finalise the capabilities that depend 1390 + * on it. Also enable all the available capabilities, 1391 + * that are not enabled already. 1392 + */ 1393 + update_cpu_capabilities(SCOPE_SYSTEM); 1394 + enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU); 1395 + } 1396 + 1612 1397 void __init setup_cpu_features(void) 1613 1398 { 1614 1399 u32 cwg; 1615 1400 int cls; 1616 1401 1617 - /* Set the CPU feature capabilies */ 1618 - setup_feature_capabilities(); 1619 - enable_errata_workarounds(); 1402 + setup_system_capabilities(); 1620 1403 mark_const_caps_ready(); 1621 1404 setup_elf_hwcaps(arm64_elf_hwcaps); 1622 1405 1623 1406 if (system_supports_32bit_el0()) 1624 1407 setup_elf_hwcaps(compat_elf_hwcaps); 1408 + 1409 + if (system_uses_ttbr0_pan()) 1410 + pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n"); 1625 1411 1626 1412 sve_setup(); 1627 1413 ··· 1758 1518 1759 1519 core_initcall(enable_mrs_emulation); 1760 1520 1761 - int cpu_clear_disr(void *__unused) 1521 + void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused) 1762 1522 { 1763 1523 /* Firmware may have left a deferred SError in this register. */ 1764 1524 write_sysreg_s(0, SYS_DISR_EL1); 1765 - 1766 - return 0; 1767 1525 }
+4
arch/arm64/kernel/cpuinfo.c
··· 77 77 "sha512", 78 78 "sve", 79 79 "asimdfhm", 80 + "dit", 81 + "uscat", 82 + "ilrcpc", 83 + "flagm", 80 84 NULL 81 85 }; 82 86
+2 -1
arch/arm64/kernel/debug-monitors.c
··· 33 33 #include <asm/daifflags.h> 34 34 #include <asm/debug-monitors.h> 35 35 #include <asm/system_misc.h> 36 + #include <asm/traps.h> 36 37 37 38 /* Determine debug architecture. */ 38 39 u8 debug_monitors_arch(void) ··· 224 223 if (interrupts_enabled(regs)) 225 224 local_irq_enable(); 226 225 227 - force_sig_info(SIGTRAP, &info, current); 226 + arm64_force_sig_info(&info, "User debug trap", current); 228 227 } 229 228 230 229 static int single_step_handler(unsigned long addr, unsigned int esr,
+52 -51
arch/arm64/kernel/fpsimd.c
··· 39 39 #include <linux/slab.h> 40 40 #include <linux/sysctl.h> 41 41 42 + #include <asm/esr.h> 42 43 #include <asm/fpsimd.h> 44 + #include <asm/cpufeature.h> 43 45 #include <asm/cputype.h> 44 46 #include <asm/simd.h> 45 47 #include <asm/sigcontext.h> ··· 66 64 * been loaded into its FPSIMD registers most recently, or whether it has 67 65 * been used to perform kernel mode NEON in the meantime. 68 66 * 69 - * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to 67 + * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to 70 68 * the id of the current CPU every time the state is loaded onto a CPU. For (b), 71 69 * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the 72 70 * address of the userland FPSIMD state of the task that was loaded onto the CPU ··· 75 73 * With this in place, we no longer have to restore the next FPSIMD state right 76 74 * when switching between tasks. Instead, we can defer this check to userland 77 75 * resume, at which time we verify whether the CPU's fpsimd_last_state and the 78 - * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we 76 + * task's fpsimd_cpu are still mutually in sync. If this is the case, we 79 77 * can omit the FPSIMD restore. 80 78 * 81 79 * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to ··· 92 90 * flag with local_bh_disable() unless softirqs are already masked. 93 91 * 94 92 * For a certain task, the sequence may look something like this: 95 - * - the task gets scheduled in; if both the task's fpsimd_state.cpu field 93 + * - the task gets scheduled in; if both the task's fpsimd_cpu field 96 94 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu 97 95 * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is 98 96 * cleared, otherwise it is set; 99 97 * 100 98 * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's 101 99 * userland FPSIMD state is copied from memory to the registers, the task's 102 - * fpsimd_state.cpu field is set to the id of the current CPU, the current 100 + * fpsimd_cpu field is set to the id of the current CPU, the current 103 101 * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the 104 102 * TIF_FOREIGN_FPSTATE flag is cleared; 105 103 * ··· 117 115 * whatever is in the FPSIMD registers is not saved to memory, but discarded. 118 116 */ 119 117 struct fpsimd_last_state_struct { 120 - struct fpsimd_state *st; 118 + struct user_fpsimd_state *st; 121 119 bool sve_in_use; 122 120 }; 123 121 ··· 224 222 * sets TIF_SVE. 225 223 * 226 224 * When stored, FPSIMD registers V0-V31 are encoded in 227 - * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are 225 + * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are 228 226 * logically zero but not stored anywhere; P0-P15 and FFR are not 229 227 * stored and have unspecified values from userspace's point of 230 228 * view. For hygiene purposes, the kernel zeroes them on next use, ··· 233 231 * task->thread.sve_state does not need to be non-NULL, valid or any 234 232 * particular size: it must not be dereferenced. 235 233 * 236 - * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of 237 - * whether TIF_SVE is clear or set, since these are not vector length 238 - * dependent. 234 + * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state 235 + * irrespective of whether TIF_SVE is clear or set, since these are 236 + * not vector length dependent. 239 237 */ 240 238 241 239 /* ··· 253 251 254 252 if (system_supports_sve() && test_thread_flag(TIF_SVE)) 255 253 sve_load_state(sve_pffr(current), 256 - &current->thread.fpsimd_state.fpsr, 254 + &current->thread.uw.fpsimd_state.fpsr, 257 255 sve_vq_from_vl(current->thread.sve_vl) - 1); 258 256 else 259 - fpsimd_load_state(&current->thread.fpsimd_state); 257 + fpsimd_load_state(&current->thread.uw.fpsimd_state); 260 258 261 259 if (system_supports_sve()) { 262 260 /* Toggle SVE trapping for userspace if needed */ ··· 287 285 * re-enter user with corrupt state. 288 286 * There's no way to recover, so kill it: 289 287 */ 290 - force_signal_inject( 291 - SIGKILL, 0, current_pt_regs(), 0); 288 + force_signal_inject(SIGKILL, SI_KERNEL, 0); 292 289 return; 293 290 } 294 291 295 292 sve_save_state(sve_pffr(current), 296 - &current->thread.fpsimd_state.fpsr); 293 + &current->thread.uw.fpsimd_state.fpsr); 297 294 } else 298 - fpsimd_save_state(&current->thread.fpsimd_state); 295 + fpsimd_save_state(&current->thread.uw.fpsimd_state); 299 296 } 300 297 } 301 298 ··· 405 404 (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) 406 405 407 406 /* 408 - * Transfer the FPSIMD state in task->thread.fpsimd_state to 407 + * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to 409 408 * task->thread.sve_state. 410 409 * 411 410 * Task can be a non-runnable task, or current. In the latter case, 412 411 * softirqs (and preemption) must be disabled. 413 412 * task->thread.sve_state must point to at least sve_state_size(task) 414 413 * bytes of allocated kernel memory. 415 - * task->thread.fpsimd_state must be up to date before calling this function. 414 + * task->thread.uw.fpsimd_state must be up to date before calling this 415 + * function. 416 416 */ 417 417 static void fpsimd_to_sve(struct task_struct *task) 418 418 { 419 419 unsigned int vq; 420 420 void *sst = task->thread.sve_state; 421 - struct fpsimd_state const *fst = &task->thread.fpsimd_state; 421 + struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; 422 422 unsigned int i; 423 423 424 424 if (!system_supports_sve()) ··· 433 431 434 432 /* 435 433 * Transfer the SVE state in task->thread.sve_state to 436 - * task->thread.fpsimd_state. 434 + * task->thread.uw.fpsimd_state. 437 435 * 438 436 * Task can be a non-runnable task, or current. In the latter case, 439 437 * softirqs (and preemption) must be disabled. ··· 445 443 { 446 444 unsigned int vq; 447 445 void const *sst = task->thread.sve_state; 448 - struct fpsimd_state *fst = &task->thread.fpsimd_state; 446 + struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; 449 447 unsigned int i; 450 448 451 449 if (!system_supports_sve()) ··· 512 510 } 513 511 514 512 /* 515 - * Ensure that task->thread.fpsimd_state is up to date with respect to 513 + * Ensure that task->thread.uw.fpsimd_state is up to date with respect to 516 514 * the user task, irrespective of whether SVE is in use or not. 517 515 * 518 516 * This should only be called by ptrace. task must be non-runnable. ··· 527 525 528 526 /* 529 527 * Ensure that task->thread.sve_state is up to date with respect to 530 - * the task->thread.fpsimd_state. 528 + * the task->thread.uw.fpsimd_state. 531 529 * 532 530 * This should only be called by ptrace to merge new FPSIMD register 533 531 * values into a task for which SVE is currently active. 534 532 * task must be non-runnable. 535 533 * task->thread.sve_state must point to at least sve_state_size(task) 536 534 * bytes of allocated kernel memory. 537 - * task->thread.fpsimd_state must already have been initialised with 535 + * task->thread.uw.fpsimd_state must already have been initialised with 538 536 * the new FPSIMD register values to be merged in. 539 537 */ 540 538 void sve_sync_from_fpsimd_zeropad(struct task_struct *task) 541 539 { 542 540 unsigned int vq; 543 541 void *sst = task->thread.sve_state; 544 - struct fpsimd_state const *fst = &task->thread.fpsimd_state; 542 + struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; 545 543 unsigned int i; 546 544 547 545 if (!test_tsk_thread_flag(task, TIF_SVE)) ··· 759 757 * Enable SVE for EL1. 760 758 * Intended for use by the cpufeatures code during CPU boot. 761 759 */ 762 - int sve_kernel_enable(void *__always_unused p) 760 + void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) 763 761 { 764 762 write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); 765 763 isb(); 766 - 767 - return 0; 768 764 } 769 765 770 766 void __init sve_setup(void) ··· 831 831 { 832 832 /* Even if we chose not to use SVE, the hardware could still trap: */ 833 833 if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { 834 - force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); 834 + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); 835 835 return; 836 836 } 837 837 ··· 867 867 asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) 868 868 { 869 869 siginfo_t info; 870 - unsigned int si_code = FPE_FIXME; 870 + unsigned int si_code = FPE_FLTUNK; 871 871 872 - if (esr & FPEXC_IOF) 873 - si_code = FPE_FLTINV; 874 - else if (esr & FPEXC_DZF) 875 - si_code = FPE_FLTDIV; 876 - else if (esr & FPEXC_OFF) 877 - si_code = FPE_FLTOVF; 878 - else if (esr & FPEXC_UFF) 879 - si_code = FPE_FLTUND; 880 - else if (esr & FPEXC_IXF) 881 - si_code = FPE_FLTRES; 872 + if (esr & ESR_ELx_FP_EXC_TFV) { 873 + if (esr & FPEXC_IOF) 874 + si_code = FPE_FLTINV; 875 + else if (esr & FPEXC_DZF) 876 + si_code = FPE_FLTDIV; 877 + else if (esr & FPEXC_OFF) 878 + si_code = FPE_FLTOVF; 879 + else if (esr & FPEXC_UFF) 880 + si_code = FPE_FLTUND; 881 + else if (esr & FPEXC_IXF) 882 + si_code = FPE_FLTRES; 883 + } 882 884 883 885 memset(&info, 0, sizeof(info)); 884 886 info.si_signo = SIGFPE; ··· 910 908 * the TIF_FOREIGN_FPSTATE flag so the state will be loaded 911 909 * upon the next return to userland. 912 910 */ 913 - struct fpsimd_state *st = &next->thread.fpsimd_state; 914 - 915 - if (__this_cpu_read(fpsimd_last_state.st) == st 916 - && st->cpu == smp_processor_id()) 911 + if (__this_cpu_read(fpsimd_last_state.st) == 912 + &next->thread.uw.fpsimd_state 913 + && next->thread.fpsimd_cpu == smp_processor_id()) 917 914 clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); 918 915 else 919 916 set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); ··· 928 927 929 928 local_bh_disable(); 930 929 931 - memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); 930 + memset(&current->thread.uw.fpsimd_state, 0, 931 + sizeof(current->thread.uw.fpsimd_state)); 932 932 fpsimd_flush_task_state(current); 933 933 934 934 if (system_supports_sve()) { ··· 988 986 989 987 /* 990 988 * Like fpsimd_preserve_current_state(), but ensure that 991 - * current->thread.fpsimd_state is updated so that it can be copied to 989 + * current->thread.uw.fpsimd_state is updated so that it can be copied to 992 990 * the signal frame. 993 991 */ 994 992 void fpsimd_signal_preserve_current_state(void) ··· 1006 1004 { 1007 1005 struct fpsimd_last_state_struct *last = 1008 1006 this_cpu_ptr(&fpsimd_last_state); 1009 - struct fpsimd_state *st = &current->thread.fpsimd_state; 1010 1007 1011 - last->st = st; 1008 + last->st = &current->thread.uw.fpsimd_state; 1012 1009 last->sve_in_use = test_thread_flag(TIF_SVE); 1013 - st->cpu = smp_processor_id(); 1010 + current->thread.fpsimd_cpu = smp_processor_id(); 1014 1011 } 1015 1012 1016 1013 /* ··· 1044 1043 1045 1044 local_bh_disable(); 1046 1045 1047 - current->thread.fpsimd_state.user_fpsimd = *state; 1046 + current->thread.uw.fpsimd_state = *state; 1048 1047 if (system_supports_sve() && test_thread_flag(TIF_SVE)) 1049 1048 fpsimd_to_sve(current); 1050 1049 ··· 1061 1060 */ 1062 1061 void fpsimd_flush_task_state(struct task_struct *t) 1063 1062 { 1064 - t->thread.fpsimd_state.cpu = NR_CPUS; 1063 + t->thread.fpsimd_cpu = NR_CPUS; 1065 1064 } 1066 1065 1067 1066 static inline void fpsimd_flush_cpu_state(void) ··· 1160 1159 1161 1160 #ifdef CONFIG_EFI 1162 1161 1163 - static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); 1162 + static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state); 1164 1163 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); 1165 1164 static DEFINE_PER_CPU(bool, efi_sve_state_used); 1166 1165
+12 -23
arch/arm64/kernel/kaslr.c
··· 117 117 /* 118 118 * OK, so we are proceeding with KASLR enabled. Calculate a suitable 119 119 * kernel image offset from the seed. Let's place the kernel in the 120 - * lower half of the VMALLOC area (VA_BITS - 2). 120 + * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of 121 + * the lower and upper quarters to avoid colliding with other 122 + * allocations. 121 123 * Even if we could randomize at page granularity for 16k and 64k pages, 122 124 * let's always round to 2 MB so we don't interfere with the ability to 123 125 * map using contiguous PTEs 124 126 */ 125 127 mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); 126 - offset = seed & mask; 128 + offset = BIT(VA_BITS - 3) + (seed & mask); 127 129 128 130 /* use the top 16 bits to randomize the linear region */ 129 131 memstart_offset_seed = seed >> 48; 130 - 131 - /* 132 - * The kernel Image should not extend across a 1GB/32MB/512MB alignment 133 - * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this 134 - * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT). 135 - * 136 - * NOTE: The references to _text and _end below will already take the 137 - * modulo offset (the physical displacement modulo 2 MB) into 138 - * account, given that the physical placement is controlled by 139 - * the loader, and will not change as a result of the virtual 140 - * mapping we choose. 141 - */ 142 - if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) != 143 - (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT)) 144 - offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT); 145 132 146 133 if (IS_ENABLED(CONFIG_KASAN)) 147 134 /* ··· 136 149 * vmalloc region, since shadow memory is allocated for each 137 150 * module at load time, whereas the vmalloc region is shadowed 138 151 * by KASAN zero pages. So keep modules out of the vmalloc 139 - * region if KASAN is enabled. 152 + * region if KASAN is enabled, and put the kernel well within 153 + * 4 GB of the module region. 140 154 */ 141 - return offset; 155 + return offset % SZ_2G; 142 156 143 157 if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { 144 158 /* 145 - * Randomize the module region independently from the core 146 - * kernel. This prevents modules from leaking any information 159 + * Randomize the module region over a 4 GB window covering the 160 + * kernel. This reduces the risk of modules leaking information 147 161 * about the address of the kernel itself, but results in 148 162 * branches between modules and the core kernel that are 149 163 * resolved via PLTs. (Branches between modules will be 150 164 * resolved normally.) 151 165 */ 152 - module_range = VMALLOC_END - VMALLOC_START - MODULES_VSIZE; 153 - module_alloc_base = VMALLOC_START; 166 + module_range = SZ_4G - (u64)(_end - _stext); 167 + module_alloc_base = max((u64)_end + offset - SZ_4G, 168 + (u64)MODULES_VADDR); 154 169 } else { 155 170 /* 156 171 * Randomize the module region by setting module_alloc_base to
+16 -5
arch/arm64/kernel/kgdb.c
··· 138 138 void 139 139 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) 140 140 { 141 - struct pt_regs *thread_regs; 141 + struct cpu_context *cpu_context = &task->thread.cpu_context; 142 142 143 143 /* Initialize to zero */ 144 144 memset((char *)gdb_regs, 0, NUMREGBYTES); 145 - thread_regs = task_pt_regs(task); 146 - memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); 147 - /* Special case for PSTATE (check comments in asm/kgdb.h for details) */ 148 - dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs); 145 + 146 + gdb_regs[19] = cpu_context->x19; 147 + gdb_regs[20] = cpu_context->x20; 148 + gdb_regs[21] = cpu_context->x21; 149 + gdb_regs[22] = cpu_context->x22; 150 + gdb_regs[23] = cpu_context->x23; 151 + gdb_regs[24] = cpu_context->x24; 152 + gdb_regs[25] = cpu_context->x25; 153 + gdb_regs[26] = cpu_context->x26; 154 + gdb_regs[27] = cpu_context->x27; 155 + gdb_regs[28] = cpu_context->x28; 156 + gdb_regs[29] = cpu_context->fp; 157 + 158 + gdb_regs[31] = cpu_context->sp; 159 + gdb_regs[32] = cpu_context->pc; 149 160 } 150 161 151 162 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+86 -4
arch/arm64/kernel/module-plts.c
··· 36 36 return (u64)&plt[i - 1]; 37 37 38 38 pltsec->plt_num_entries++; 39 - BUG_ON(pltsec->plt_num_entries > pltsec->plt_max_entries); 39 + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) 40 + return 0; 40 41 41 42 return (u64)&plt[i]; 42 43 } 44 + 45 + #ifdef CONFIG_ARM64_ERRATUM_843419 46 + u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) 47 + { 48 + struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : 49 + &mod->arch.init; 50 + struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr; 51 + int i = pltsec->plt_num_entries++; 52 + u32 mov0, mov1, mov2, br; 53 + int rd; 54 + 55 + if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries)) 56 + return 0; 57 + 58 + /* get the destination register of the ADRP instruction */ 59 + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, 60 + le32_to_cpup((__le32 *)loc)); 61 + 62 + /* generate the veneer instructions */ 63 + mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0, 64 + AARCH64_INSN_VARIANT_64BIT, 65 + AARCH64_INSN_MOVEWIDE_INVERSE); 66 + mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16, 67 + AARCH64_INSN_VARIANT_64BIT, 68 + AARCH64_INSN_MOVEWIDE_KEEP); 69 + mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32, 70 + AARCH64_INSN_VARIANT_64BIT, 71 + AARCH64_INSN_MOVEWIDE_KEEP); 72 + br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4, 73 + AARCH64_INSN_BRANCH_NOLINK); 74 + 75 + plt[i] = (struct plt_entry){ 76 + cpu_to_le32(mov0), 77 + cpu_to_le32(mov1), 78 + cpu_to_le32(mov2), 79 + cpu_to_le32(br) 80 + }; 81 + 82 + return (u64)&plt[i]; 83 + } 84 + #endif 43 85 44 86 #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) 45 87 ··· 110 68 } 111 69 112 70 static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num, 113 - Elf64_Word dstidx) 71 + Elf64_Word dstidx, Elf_Shdr *dstsec) 114 72 { 115 73 unsigned int ret = 0; 116 74 Elf64_Sym *s; 117 75 int i; 118 76 119 77 for (i = 0; i < num; i++) { 78 + u64 min_align; 79 + 120 80 switch (ELF64_R_TYPE(rela[i].r_info)) { 121 81 case R_AARCH64_JUMP26: 122 82 case R_AARCH64_CALL26: 83 + if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 84 + break; 85 + 123 86 /* 124 87 * We only have to consider branch targets that resolve 125 88 * to symbols that are defined in a different section. ··· 155 108 */ 156 109 if (rela[i].r_addend != 0 || !duplicate_rel(rela, i)) 157 110 ret++; 111 + break; 112 + case R_AARCH64_ADR_PREL_PG_HI21_NC: 113 + case R_AARCH64_ADR_PREL_PG_HI21: 114 + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || 115 + !cpus_have_const_cap(ARM64_WORKAROUND_843419)) 116 + break; 117 + 118 + /* 119 + * Determine the minimal safe alignment for this ADRP 120 + * instruction: the section alignment at which it is 121 + * guaranteed not to appear at a vulnerable offset. 122 + * 123 + * This comes down to finding the least significant zero 124 + * bit in bits [11:3] of the section offset, and 125 + * increasing the section's alignment so that the 126 + * resulting address of this instruction is guaranteed 127 + * to equal the offset in that particular bit (as well 128 + * as all less signficant bits). This ensures that the 129 + * address modulo 4 KB != 0xfff8 or 0xfffc (which would 130 + * have all ones in bits [11:3]) 131 + */ 132 + min_align = 2ULL << ffz(rela[i].r_offset | 0x7); 133 + 134 + /* 135 + * Allocate veneer space for each ADRP that may appear 136 + * at a vulnerable offset nonetheless. At relocation 137 + * time, some of these will remain unused since some 138 + * ADRP instructions can be patched to ADR instructions 139 + * instead. 140 + */ 141 + if (min_align > SZ_4K) 142 + ret++; 143 + else 144 + dstsec->sh_addralign = max(dstsec->sh_addralign, 145 + min_align); 158 146 break; 159 147 } 160 148 } ··· 248 166 249 167 if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) 250 168 core_plts += count_plts(syms, rels, numrels, 251 - sechdrs[i].sh_info); 169 + sechdrs[i].sh_info, dstsec); 252 170 else 253 171 init_plts += count_plts(syms, rels, numrels, 254 - sechdrs[i].sh_info); 172 + sechdrs[i].sh_info, dstsec); 255 173 } 256 174 257 175 mod->arch.core.plt->sh_type = SHT_NOBITS;
+37 -7
arch/arm64/kernel/module.c
··· 55 55 * less likely that the module region gets exhausted, so we 56 56 * can simply omit this fallback in that case. 57 57 */ 58 - p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, 59 - VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, 60 - NUMA_NO_NODE, __builtin_return_address(0)); 58 + p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 59 + module_alloc_base + SZ_4G, GFP_KERNEL, 60 + PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 61 + __builtin_return_address(0)); 61 62 62 63 if (p && (kasan_module_alloc(p, size) < 0)) { 63 64 vfree(p); ··· 195 194 if ((u64)(sval + 1) >= 2) 196 195 return -ERANGE; 197 196 197 + return 0; 198 + } 199 + 200 + static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) 201 + { 202 + u32 insn; 203 + 204 + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) || 205 + !cpus_have_const_cap(ARM64_WORKAROUND_843419) || 206 + ((u64)place & 0xfff) < 0xff8) 207 + return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, 208 + AARCH64_INSN_IMM_ADR); 209 + 210 + /* patch ADRP to ADR if it is in range */ 211 + if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, 212 + AARCH64_INSN_IMM_ADR)) { 213 + insn = le32_to_cpu(*place); 214 + insn &= ~BIT(31); 215 + } else { 216 + /* out of range for ADR -> emit a veneer */ 217 + val = module_emit_adrp_veneer(mod, place, val & ~0xfff); 218 + if (!val) 219 + return -ENOEXEC; 220 + insn = aarch64_insn_gen_branch_imm((u64)place, val, 221 + AARCH64_INSN_BRANCH_NOLINK); 222 + } 223 + 224 + *place = cpu_to_le32(insn); 198 225 return 0; 199 226 } 200 227 ··· 365 336 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 366 337 AARCH64_INSN_IMM_ADR); 367 338 break; 368 - #ifndef CONFIG_ARM64_ERRATUM_843419 369 339 case R_AARCH64_ADR_PREL_PG_HI21_NC: 370 340 overflow_check = false; 371 341 case R_AARCH64_ADR_PREL_PG_HI21: 372 - ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, 373 - AARCH64_INSN_IMM_ADR); 342 + ovf = reloc_insn_adrp(me, loc, val); 343 + if (ovf && ovf != -ERANGE) 344 + return ovf; 374 345 break; 375 - #endif 376 346 case R_AARCH64_ADD_ABS_LO12_NC: 377 347 case R_AARCH64_LDST8_ABS_LO12_NC: 378 348 overflow_check = false; ··· 414 386 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 415 387 ovf == -ERANGE) { 416 388 val = module_emit_plt_entry(me, loc, &rel[i], sym); 389 + if (!val) 390 + return -ENOEXEC; 417 391 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 418 392 26, AARCH64_INSN_IMM_26); 419 393 }
+3 -3
arch/arm64/kernel/process.c
··· 257 257 write_sysreg(0, tpidr_el0); 258 258 259 259 if (is_compat_task()) { 260 - current->thread.tp_value = 0; 260 + current->thread.uw.tp_value = 0; 261 261 262 262 /* 263 263 * We need to ensure ordering between the shadow state and the ··· 351 351 * for the new thread. 352 352 */ 353 353 if (clone_flags & CLONE_SETTLS) 354 - p->thread.tp_value = childregs->regs[3]; 354 + p->thread.uw.tp_value = childregs->regs[3]; 355 355 } else { 356 356 memset(childregs, 0, sizeof(struct pt_regs)); 357 357 childregs->pstate = PSR_MODE_EL1h; ··· 379 379 tls_preserve_current_state(); 380 380 381 381 if (is_compat_thread(task_thread_info(next))) 382 - write_sysreg(next->thread.tp_value, tpidrro_el0); 382 + write_sysreg(next->thread.uw.tp_value, tpidrro_el0); 383 383 else if (!arm64_kernel_unmapped_at_el0()) 384 384 write_sysreg(0, tpidrro_el0); 385 385
+16 -16
arch/arm64/kernel/ptrace.c
··· 209 209 force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger); 210 210 } 211 211 #endif 212 - force_sig_info(SIGTRAP, &info, current); 212 + arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current); 213 213 } 214 214 215 215 /* ··· 629 629 630 630 sve_sync_to_fpsimd(target); 631 631 632 - uregs = &target->thread.fpsimd_state.user_fpsimd; 632 + uregs = &target->thread.uw.fpsimd_state; 633 633 634 634 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 635 635 start_pos, start_pos + sizeof(*uregs)); ··· 655 655 struct user_fpsimd_state newstate; 656 656 657 657 /* 658 - * Ensure target->thread.fpsimd_state is up to date, so that a 658 + * Ensure target->thread.uw.fpsimd_state is up to date, so that a 659 659 * short copyin can't resurrect stale data. 660 660 */ 661 661 sve_sync_to_fpsimd(target); 662 662 663 - newstate = target->thread.fpsimd_state.user_fpsimd; 663 + newstate = target->thread.uw.fpsimd_state; 664 664 665 665 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 666 666 start_pos, start_pos + sizeof(newstate)); 667 667 if (ret) 668 668 return ret; 669 669 670 - target->thread.fpsimd_state.user_fpsimd = newstate; 670 + target->thread.uw.fpsimd_state = newstate; 671 671 672 672 return ret; 673 673 } ··· 692 692 unsigned int pos, unsigned int count, 693 693 void *kbuf, void __user *ubuf) 694 694 { 695 - unsigned long *tls = &target->thread.tp_value; 695 + unsigned long *tls = &target->thread.uw.tp_value; 696 696 697 697 if (target == current) 698 698 tls_preserve_current_state(); ··· 705 705 const void *kbuf, const void __user *ubuf) 706 706 { 707 707 int ret; 708 - unsigned long tls = target->thread.tp_value; 708 + unsigned long tls = target->thread.uw.tp_value; 709 709 710 710 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 711 711 if (ret) 712 712 return ret; 713 713 714 - target->thread.tp_value = tls; 714 + target->thread.uw.tp_value = tls; 715 715 return ret; 716 716 } 717 717 ··· 842 842 start = end; 843 843 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 844 844 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 845 - &target->thread.fpsimd_state.fpsr, 845 + &target->thread.uw.fpsimd_state.fpsr, 846 846 start, end); 847 847 if (ret) 848 848 return ret; ··· 941 941 start = end; 942 942 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 943 943 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 944 - &target->thread.fpsimd_state.fpsr, 944 + &target->thread.uw.fpsimd_state.fpsr, 945 945 start, end); 946 946 947 947 out: ··· 1169 1169 compat_ulong_t fpscr; 1170 1170 int ret, vregs_end_pos; 1171 1171 1172 - uregs = &target->thread.fpsimd_state.user_fpsimd; 1172 + uregs = &target->thread.uw.fpsimd_state; 1173 1173 1174 1174 if (target == current) 1175 1175 fpsimd_preserve_current_state(); ··· 1202 1202 compat_ulong_t fpscr; 1203 1203 int ret, vregs_end_pos; 1204 1204 1205 - uregs = &target->thread.fpsimd_state.user_fpsimd; 1205 + uregs = &target->thread.uw.fpsimd_state; 1206 1206 1207 1207 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1208 1208 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, ··· 1225 1225 const struct user_regset *regset, unsigned int pos, 1226 1226 unsigned int count, void *kbuf, void __user *ubuf) 1227 1227 { 1228 - compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; 1228 + compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value; 1229 1229 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1230 1230 } 1231 1231 ··· 1235 1235 const void __user *ubuf) 1236 1236 { 1237 1237 int ret; 1238 - compat_ulong_t tls = target->thread.tp_value; 1238 + compat_ulong_t tls = target->thread.uw.tp_value; 1239 1239 1240 1240 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1241 1241 if (ret) 1242 1242 return ret; 1243 1243 1244 - target->thread.tp_value = tls; 1244 + target->thread.uw.tp_value = tls; 1245 1245 return ret; 1246 1246 } 1247 1247 ··· 1538 1538 break; 1539 1539 1540 1540 case COMPAT_PTRACE_GET_THREAD_AREA: 1541 - ret = put_user((compat_ulong_t)child->thread.tp_value, 1541 + ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1542 1542 (compat_ulong_t __user *)datap); 1543 1543 break; 1544 1544
+2 -2
arch/arm64/kernel/reloc_test_core.c
··· 28 28 asmlinkage u64 signed_movw(void); 29 29 asmlinkage u64 unsigned_movw(void); 30 30 asmlinkage u64 relative_adrp(void); 31 + asmlinkage u64 relative_adrp_far(void); 31 32 asmlinkage u64 relative_adr(void); 32 33 asmlinkage u64 relative_data64(void); 33 34 asmlinkage u64 relative_data32(void); ··· 44 43 { "R_AARCH64_ABS16", absolute_data16, UL(SYM16_ABS_VAL) }, 45 44 { "R_AARCH64_MOVW_SABS_Gn", signed_movw, UL(SYM64_ABS_VAL) }, 46 45 { "R_AARCH64_MOVW_UABS_Gn", unsigned_movw, UL(SYM64_ABS_VAL) }, 47 - #ifndef CONFIG_ARM64_ERRATUM_843419 48 46 { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp, (u64)&sym64_rel }, 49 - #endif 47 + { "R_AARCH64_ADR_PREL_PG_HI21", relative_adrp_far, (u64)&memstart_addr }, 50 48 { "R_AARCH64_ADR_PREL_LO21", relative_adr, (u64)&sym64_rel }, 51 49 { "R_AARCH64_PREL64", relative_data64, (u64)&sym64_rel }, 52 50 { "R_AARCH64_PREL32", relative_data32, (u64)&sym64_rel },
+9 -3
arch/arm64/kernel/reloc_test_syms.S
··· 43 43 ret 44 44 ENDPROC(unsigned_movw) 45 45 46 - #ifndef CONFIG_ARM64_ERRATUM_843419 47 - 46 + .align 12 47 + .space 0xff8 48 48 ENTRY(relative_adrp) 49 49 adrp x0, sym64_rel 50 50 add x0, x0, #:lo12:sym64_rel 51 51 ret 52 52 ENDPROC(relative_adrp) 53 53 54 - #endif 54 + .align 12 55 + .space 0xffc 56 + ENTRY(relative_adrp_far) 57 + adrp x0, memstart_addr 58 + add x0, x0, #:lo12:memstart_addr 59 + ret 60 + ENDPROC(relative_adrp_far) 55 61 56 62 ENTRY(relative_adr) 57 63 adr x0, sym64_rel
+3 -6
arch/arm64/kernel/signal.c
··· 40 40 #include <asm/fpsimd.h> 41 41 #include <asm/ptrace.h> 42 42 #include <asm/signal32.h> 43 + #include <asm/traps.h> 43 44 #include <asm/vdso.h> 44 45 45 46 /* ··· 180 179 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 181 180 { 182 181 struct user_fpsimd_state const *fpsimd = 183 - &current->thread.fpsimd_state.user_fpsimd; 182 + &current->thread.uw.fpsimd_state; 184 183 int err; 185 184 186 185 /* copy the FP and status/control registers */ ··· 566 565 return regs->regs[0]; 567 566 568 567 badframe: 569 - if (show_unhandled_signals) 570 - pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", 571 - current->comm, task_pid_nr(current), __func__, 572 - regs->pc, regs->sp); 573 - force_sig(SIGSEGV, current); 568 + arm64_notify_segfault(regs->sp); 574 569 return 0; 575 570 } 576 571
+4 -11
arch/arm64/kernel/signal32.c
··· 26 26 #include <asm/esr.h> 27 27 #include <asm/fpsimd.h> 28 28 #include <asm/signal32.h> 29 + #include <asm/traps.h> 29 30 #include <linux/uaccess.h> 30 31 #include <asm/unistd.h> 31 32 ··· 150 149 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) 151 150 { 152 151 struct user_fpsimd_state const *fpsimd = 153 - &current->thread.fpsimd_state.user_fpsimd; 152 + &current->thread.uw.fpsimd_state; 154 153 compat_ulong_t magic = VFP_MAGIC; 155 154 compat_ulong_t size = VFP_STORAGE_SIZE; 156 155 compat_ulong_t fpscr, fpexc; ··· 308 307 return regs->regs[0]; 309 308 310 309 badframe: 311 - if (show_unhandled_signals) 312 - pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", 313 - current->comm, task_pid_nr(current), __func__, 314 - regs->pc, regs->compat_sp); 315 - force_sig(SIGSEGV, current); 310 + arm64_notify_segfault(regs->compat_sp); 316 311 return 0; 317 312 } 318 313 ··· 341 344 return regs->regs[0]; 342 345 343 346 badframe: 344 - if (show_unhandled_signals) 345 - pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", 346 - current->comm, task_pid_nr(current), __func__, 347 - regs->pc, regs->compat_sp); 348 - force_sig(SIGSEGV, current); 347 + arm64_notify_segfault(regs->compat_sp); 349 348 return 0; 350 349 } 351 350
-44
arch/arm64/kernel/smp.c
··· 85 85 IPI_WAKEUP 86 86 }; 87 87 88 - #ifdef CONFIG_ARM64_VHE 89 - 90 - /* Whether the boot CPU is running in HYP mode or not*/ 91 - static bool boot_cpu_hyp_mode; 92 - 93 - static inline void save_boot_cpu_run_el(void) 94 - { 95 - boot_cpu_hyp_mode = is_kernel_in_hyp_mode(); 96 - } 97 - 98 - static inline bool is_boot_cpu_in_hyp_mode(void) 99 - { 100 - return boot_cpu_hyp_mode; 101 - } 102 - 103 - /* 104 - * Verify that a secondary CPU is running the kernel at the same 105 - * EL as that of the boot CPU. 106 - */ 107 - void verify_cpu_run_el(void) 108 - { 109 - bool in_el2 = is_kernel_in_hyp_mode(); 110 - bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode(); 111 - 112 - if (in_el2 ^ boot_cpu_el2) { 113 - pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n", 114 - smp_processor_id(), 115 - in_el2 ? 2 : 1, 116 - boot_cpu_el2 ? 2 : 1); 117 - cpu_panic_kernel(); 118 - } 119 - } 120 - 121 - #else 122 - static inline void save_boot_cpu_run_el(void) {} 123 - #endif 124 - 125 88 #ifdef CONFIG_HOTPLUG_CPU 126 89 static int op_cpu_kill(unsigned int cpu); 127 90 #else ··· 410 447 */ 411 448 jump_label_init(); 412 449 cpuinfo_store_boot_cpu(); 413 - save_boot_cpu_run_el(); 414 - /* 415 - * Run the errata work around checks on the boot CPU, once we have 416 - * initialised the cpu feature infrastructure from 417 - * cpuinfo_store_boot_cpu() above. 418 - */ 419 - update_cpu_errata_workarounds(); 420 450 } 421 451 422 452 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
+21 -2
arch/arm64/kernel/sys_compat.c
··· 27 27 #include <linux/uaccess.h> 28 28 29 29 #include <asm/cacheflush.h> 30 + #include <asm/system_misc.h> 30 31 #include <asm/unistd.h> 31 32 32 33 static long ··· 68 67 */ 69 68 long compat_arm_syscall(struct pt_regs *regs) 70 69 { 70 + siginfo_t info; 71 71 unsigned int no = regs->regs[7]; 72 72 73 73 switch (no) { ··· 90 88 return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); 91 89 92 90 case __ARM_NR_compat_set_tls: 93 - current->thread.tp_value = regs->regs[0]; 91 + current->thread.uw.tp_value = regs->regs[0]; 94 92 95 93 /* 96 94 * Protect against register corruption from context switch. ··· 101 99 return 0; 102 100 103 101 default: 104 - return -ENOSYS; 102 + /* 103 + * Calls 9f00xx..9f07ff are defined to return -ENOSYS 104 + * if not implemented, rather than raising SIGILL. This 105 + * way the calling program can gracefully determine whether 106 + * a feature is supported. 107 + */ 108 + if ((no & 0xffff) <= 0x7ff) 109 + return -ENOSYS; 110 + break; 105 111 } 112 + 113 + info.si_signo = SIGILL; 114 + info.si_errno = 0; 115 + info.si_code = ILL_ILLTRP; 116 + info.si_addr = (void __user *)instruction_pointer(regs) - 117 + (compat_thumb_mode(regs) ? 2 : 4); 118 + 119 + arm64_notify_die("Oops - bad compat syscall(2)", regs, &info, no); 120 + return 0; 106 121 }
+52 -24
arch/arm64/kernel/traps.c
··· 38 38 39 39 #include <asm/atomic.h> 40 40 #include <asm/bug.h> 41 + #include <asm/cpufeature.h> 41 42 #include <asm/daifflags.h> 42 43 #include <asm/debug-monitors.h> 43 44 #include <asm/esr.h> ··· 224 223 do_exit(SIGSEGV); 225 224 } 226 225 226 + static bool show_unhandled_signals_ratelimited(void) 227 + { 228 + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, 229 + DEFAULT_RATELIMIT_BURST); 230 + return show_unhandled_signals && __ratelimit(&rs); 231 + } 232 + 233 + void arm64_force_sig_info(struct siginfo *info, const char *str, 234 + struct task_struct *tsk) 235 + { 236 + unsigned int esr = tsk->thread.fault_code; 237 + struct pt_regs *regs = task_pt_regs(tsk); 238 + 239 + if (!unhandled_signal(tsk, info->si_signo)) 240 + goto send_sig; 241 + 242 + if (!show_unhandled_signals_ratelimited()) 243 + goto send_sig; 244 + 245 + pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk)); 246 + if (esr) 247 + pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr); 248 + 249 + pr_cont("%s", str); 250 + print_vma_addr(KERN_CONT " in ", regs->pc); 251 + pr_cont("\n"); 252 + __show_regs(regs); 253 + 254 + send_sig: 255 + force_sig_info(info->si_signo, info, tsk); 256 + } 257 + 227 258 void arm64_notify_die(const char *str, struct pt_regs *regs, 228 259 struct siginfo *info, int err) 229 260 { 230 261 if (user_mode(regs)) { 262 + WARN_ON(regs != current_pt_regs()); 231 263 current->thread.fault_address = 0; 232 264 current->thread.fault_code = err; 233 - force_sig_info(info->si_signo, info, current); 265 + arm64_force_sig_info(info, str, current); 234 266 } else { 235 267 die(str, regs, err); 236 268 } ··· 345 311 return fn ? fn(regs, instr) : 1; 346 312 } 347 313 348 - void force_signal_inject(int signal, int code, struct pt_regs *regs, 349 - unsigned long address) 314 + void force_signal_inject(int signal, int code, unsigned long address) 350 315 { 351 316 siginfo_t info; 352 - void __user *pc = (void __user *)instruction_pointer(regs); 353 317 const char *desc; 318 + struct pt_regs *regs = current_pt_regs(); 319 + 320 + clear_siginfo(&info); 354 321 355 322 switch (signal) { 356 323 case SIGILL: ··· 365 330 break; 366 331 } 367 332 368 - if (unhandled_signal(current, signal) && 369 - show_unhandled_signals_ratelimited()) { 370 - pr_info("%s[%d]: %s: pc=%p\n", 371 - current->comm, task_pid_nr(current), desc, pc); 372 - dump_instr(KERN_INFO, regs); 333 + /* Force signals we don't understand to SIGKILL */ 334 + if (WARN_ON(signal != SIGKILL || 335 + siginfo_layout(signal, code) != SIL_FAULT)) { 336 + signal = SIGKILL; 373 337 } 374 338 375 339 info.si_signo = signal; 376 340 info.si_errno = 0; 377 341 info.si_code = code; 378 - info.si_addr = pc; 342 + info.si_addr = (void __user *)address; 379 343 380 344 arm64_notify_die(desc, regs, &info, 0); 381 345 } ··· 382 348 /* 383 349 * Set up process info to signal segmentation fault - called on access error. 384 350 */ 385 - void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) 351 + void arm64_notify_segfault(unsigned long addr) 386 352 { 387 353 int code; 388 354 ··· 393 359 code = SEGV_ACCERR; 394 360 up_read(&current->mm->mmap_sem); 395 361 396 - force_signal_inject(SIGSEGV, code, regs, addr); 362 + force_signal_inject(SIGSEGV, code, addr); 397 363 } 398 364 399 365 asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ··· 405 371 if (call_undef_hook(regs) == 0) 406 372 return; 407 373 408 - force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); 374 + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); 409 375 } 410 376 411 - int cpu_enable_cache_maint_trap(void *__unused) 377 + void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) 412 378 { 413 379 config_sctlr_el1(SCTLR_EL1_UCI, 0); 414 - return 0; 415 380 } 416 381 417 382 #define __user_cache_maint(insn, address, res) \ ··· 459 426 __user_cache_maint("ic ivau", address, ret); 460 427 break; 461 428 default: 462 - force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); 429 + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); 463 430 return; 464 431 } 465 432 466 433 if (ret) 467 - arm64_notify_segfault(regs, address); 434 + arm64_notify_segfault(address); 468 435 else 469 436 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE); 470 437 } ··· 633 600 { 634 601 siginfo_t info; 635 602 void __user *pc = (void __user *)instruction_pointer(regs); 636 - console_verbose(); 637 - 638 - pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", 639 - smp_processor_id(), esr, esr_get_class_string(esr)); 640 - __show_regs(regs); 641 603 642 604 info.si_signo = SIGILL; 643 605 info.si_errno = 0; ··· 640 612 info.si_addr = pc; 641 613 642 614 current->thread.fault_address = 0; 643 - current->thread.fault_code = 0; 615 + current->thread.fault_code = esr; 644 616 645 - force_sig_info(info.si_signo, &info, current); 617 + arm64_force_sig_info(&info, "Bad EL0 synchronous exception", current); 646 618 } 647 619 648 620 #ifdef CONFIG_VMAP_STACK
+2 -1
arch/arm64/lib/Makefile
··· 17 17 -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ 18 18 -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ 19 19 -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ 20 - -fcall-saved-x18 20 + -fcall-saved-x18 -fomit-frame-pointer 21 + CFLAGS_REMOVE_atomic_ll_sc.o := -pg 21 22 22 23 lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
+20 -1
arch/arm64/mm/cache.S
··· 50 50 */ 51 51 ENTRY(__flush_cache_user_range) 52 52 uaccess_ttbr0_enable x2, x3, x4 53 + alternative_if ARM64_HAS_CACHE_IDC 54 + dsb ishst 55 + b 7f 56 + alternative_else_nop_endif 53 57 dcache_line_size x2, x3 54 58 sub x3, x2, #1 55 59 bic x4, x0, x3 ··· 64 60 b.lo 1b 65 61 dsb ish 66 62 63 + 7: 64 + alternative_if ARM64_HAS_CACHE_DIC 65 + isb 66 + b 8f 67 + alternative_else_nop_endif 67 68 invalidate_icache_by_line x0, x1, x2, x3, 9f 68 - mov x0, #0 69 + 8: mov x0, #0 69 70 1: 70 71 uaccess_ttbr0_disable x1, x2 71 72 ret ··· 89 80 * - end - virtual end address of region 90 81 */ 91 82 ENTRY(invalidate_icache_range) 83 + alternative_if ARM64_HAS_CACHE_DIC 84 + mov x0, xzr 85 + isb 86 + ret 87 + alternative_else_nop_endif 88 + 92 89 uaccess_ttbr0_enable x2, x3, x4 93 90 94 91 invalidate_icache_by_line x0, x1, x2, x3, 2f ··· 131 116 * - size - size in question 132 117 */ 133 118 ENTRY(__clean_dcache_area_pou) 119 + alternative_if ARM64_HAS_CACHE_IDC 120 + dsb ishst 121 + ret 122 + alternative_else_nop_endif 134 123 dcache_by_line_op cvau, ish, x0, x1, x2, x3 135 124 ret 136 125 ENDPROC(__clean_dcache_area_pou)
+103 -133
arch/arm64/mm/fault.c
··· 43 43 #include <asm/system_misc.h> 44 44 #include <asm/pgtable.h> 45 45 #include <asm/tlbflush.h> 46 + #include <asm/traps.h> 46 47 47 48 #include <acpi/ghes.h> 48 49 ··· 290 289 do_exit(SIGKILL); 291 290 } 292 291 293 - static void __do_user_fault(struct task_struct *tsk, unsigned long addr, 294 - unsigned int esr, unsigned int sig, int code, 295 - struct pt_regs *regs, int fault) 292 + static void __do_user_fault(struct siginfo *info, unsigned int esr) 296 293 { 297 - struct siginfo si; 298 - const struct fault_info *inf; 299 - unsigned int lsb = 0; 300 - 301 - if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { 302 - inf = esr_to_fault_info(esr); 303 - pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x", 304 - tsk->comm, task_pid_nr(tsk), inf->name, sig, 305 - addr, esr); 306 - print_vma_addr(KERN_CONT ", in ", regs->pc); 307 - pr_cont("\n"); 308 - __show_regs(regs); 309 - } 310 - 311 - tsk->thread.fault_address = addr; 312 - tsk->thread.fault_code = esr; 313 - si.si_signo = sig; 314 - si.si_errno = 0; 315 - si.si_code = code; 316 - si.si_addr = (void __user *)addr; 317 - /* 318 - * Either small page or large page may be poisoned. 319 - * In other words, VM_FAULT_HWPOISON_LARGE and 320 - * VM_FAULT_HWPOISON are mutually exclusive. 321 - */ 322 - if (fault & VM_FAULT_HWPOISON_LARGE) 323 - lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 324 - else if (fault & VM_FAULT_HWPOISON) 325 - lsb = PAGE_SHIFT; 326 - si.si_addr_lsb = lsb; 327 - 328 - force_sig_info(sig, &si, tsk); 294 + current->thread.fault_address = (unsigned long)info->si_addr; 295 + current->thread.fault_code = esr; 296 + arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); 329 297 } 330 298 331 299 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) 332 300 { 333 - struct task_struct *tsk = current; 334 - const struct fault_info *inf; 335 - 336 301 /* 337 302 * If we are in kernel mode at this point, we have no context to 338 303 * handle this fault with. 339 304 */ 340 305 if (user_mode(regs)) { 341 - inf = esr_to_fault_info(esr); 342 - __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs, 0); 343 - } else 306 + const struct fault_info *inf = esr_to_fault_info(esr); 307 + struct siginfo si = { 308 + .si_signo = inf->sig, 309 + .si_code = inf->code, 310 + .si_addr = (void __user *)addr, 311 + }; 312 + 313 + __do_user_fault(&si, esr); 314 + } else { 344 315 __do_kernel_fault(addr, esr, regs); 316 + } 345 317 } 346 318 347 319 #define VM_FAULT_BADMAP 0x010000 ··· 367 393 { 368 394 struct task_struct *tsk; 369 395 struct mm_struct *mm; 370 - int fault, sig, code, major = 0; 396 + struct siginfo si; 397 + int fault, major = 0; 371 398 unsigned long vm_flags = VM_READ | VM_WRITE; 372 399 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 373 400 ··· 500 525 return 0; 501 526 } 502 527 528 + clear_siginfo(&si); 529 + si.si_addr = (void __user *)addr; 530 + 503 531 if (fault & VM_FAULT_SIGBUS) { 504 532 /* 505 533 * We had some memory, but were unable to successfully fix up 506 534 * this page fault. 507 535 */ 508 - sig = SIGBUS; 509 - code = BUS_ADRERR; 510 - } else if (fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) { 511 - sig = SIGBUS; 512 - code = BUS_MCEERR_AR; 536 + si.si_signo = SIGBUS; 537 + si.si_code = BUS_ADRERR; 538 + } else if (fault & VM_FAULT_HWPOISON_LARGE) { 539 + unsigned int hindex = VM_FAULT_GET_HINDEX(fault); 540 + 541 + si.si_signo = SIGBUS; 542 + si.si_code = BUS_MCEERR_AR; 543 + si.si_addr_lsb = hstate_index_to_shift(hindex); 544 + } else if (fault & VM_FAULT_HWPOISON) { 545 + si.si_signo = SIGBUS; 546 + si.si_code = BUS_MCEERR_AR; 547 + si.si_addr_lsb = PAGE_SHIFT; 513 548 } else { 514 549 /* 515 550 * Something tried to access memory that isn't in our memory 516 551 * map. 517 552 */ 518 - sig = SIGSEGV; 519 - code = fault == VM_FAULT_BADACCESS ? 520 - SEGV_ACCERR : SEGV_MAPERR; 553 + si.si_signo = SIGSEGV; 554 + si.si_code = fault == VM_FAULT_BADACCESS ? 555 + SEGV_ACCERR : SEGV_MAPERR; 521 556 } 522 557 523 - __do_user_fault(tsk, addr, esr, sig, code, regs, fault); 558 + __do_user_fault(&si, esr); 524 559 return 0; 525 560 526 561 no_context: ··· 567 582 const struct fault_info *inf; 568 583 569 584 inf = esr_to_fault_info(esr); 570 - pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n", 571 - inf->name, esr, addr); 572 585 573 586 /* 574 587 * Synchronous aborts may interrupt code which had interrupts masked. ··· 583 600 nmi_exit(); 584 601 } 585 602 586 - info.si_signo = SIGBUS; 603 + info.si_signo = inf->sig; 587 604 info.si_errno = 0; 588 - info.si_code = BUS_FIXME; 605 + info.si_code = inf->code; 589 606 if (esr & ESR_ELx_FnV) 590 607 info.si_addr = NULL; 591 608 else 592 609 info.si_addr = (void __user *)addr; 593 - arm64_notify_die("", regs, &info, esr); 610 + arm64_notify_die(inf->name, regs, &info, esr); 594 611 595 612 return 0; 596 613 } 597 614 598 615 static const struct fault_info fault_info[] = { 599 - { do_bad, SIGBUS, BUS_FIXME, "ttbr address size fault" }, 600 - { do_bad, SIGBUS, BUS_FIXME, "level 1 address size fault" }, 601 - { do_bad, SIGBUS, BUS_FIXME, "level 2 address size fault" }, 602 - { do_bad, SIGBUS, BUS_FIXME, "level 3 address size fault" }, 616 + { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, 617 + { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, 618 + { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" }, 619 + { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" }, 603 620 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, 604 621 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 605 622 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 606 623 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 607 - { do_bad, SIGBUS, BUS_FIXME, "unknown 8" }, 624 + { do_bad, SIGKILL, SI_KERNEL, "unknown 8" }, 608 625 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 609 626 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 610 627 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 611 - { do_bad, SIGBUS, BUS_FIXME, "unknown 12" }, 628 + { do_bad, SIGKILL, SI_KERNEL, "unknown 12" }, 612 629 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, 613 630 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, 614 631 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, 615 - { do_sea, SIGBUS, BUS_FIXME, "synchronous external abort" }, 616 - { do_bad, SIGBUS, BUS_FIXME, "unknown 17" }, 617 - { do_bad, SIGBUS, BUS_FIXME, "unknown 18" }, 618 - { do_bad, SIGBUS, BUS_FIXME, "unknown 19" }, 619 - { do_sea, SIGBUS, BUS_FIXME, "level 0 (translation table walk)" }, 620 - { do_sea, SIGBUS, BUS_FIXME, "level 1 (translation table walk)" }, 621 - { do_sea, SIGBUS, BUS_FIXME, "level 2 (translation table walk)" }, 622 - { do_sea, SIGBUS, BUS_FIXME, "level 3 (translation table walk)" }, 623 - { do_sea, SIGBUS, BUS_FIXME, "synchronous parity or ECC error" }, // Reserved when RAS is implemented 624 - { do_bad, SIGBUS, BUS_FIXME, "unknown 25" }, 625 - { do_bad, SIGBUS, BUS_FIXME, "unknown 26" }, 626 - { do_bad, SIGBUS, BUS_FIXME, "unknown 27" }, 627 - { do_sea, SIGBUS, BUS_FIXME, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 628 - { do_sea, SIGBUS, BUS_FIXME, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 629 - { do_sea, SIGBUS, BUS_FIXME, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 630 - { do_sea, SIGBUS, BUS_FIXME, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 631 - { do_bad, SIGBUS, BUS_FIXME, "unknown 32" }, 632 + { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, 633 + { do_bad, SIGKILL, SI_KERNEL, "unknown 17" }, 634 + { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, 635 + { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, 636 + { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, 637 + { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, 638 + { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, 639 + { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" }, 640 + { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented 641 + { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, 642 + { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, 643 + { do_bad, SIGKILL, SI_KERNEL, "unknown 27" }, 644 + { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 645 + { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 646 + { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 647 + { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented 648 + { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, 632 649 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, 633 - { do_bad, SIGBUS, BUS_FIXME, "unknown 34" }, 634 - { do_bad, SIGBUS, BUS_FIXME, "unknown 35" }, 635 - { do_bad, SIGBUS, BUS_FIXME, "unknown 36" }, 636 - { do_bad, SIGBUS, BUS_FIXME, "unknown 37" }, 637 - { do_bad, SIGBUS, BUS_FIXME, "unknown 38" }, 638 - { do_bad, SIGBUS, BUS_FIXME, "unknown 39" }, 639 - { do_bad, SIGBUS, BUS_FIXME, "unknown 40" }, 640 - { do_bad, SIGBUS, BUS_FIXME, "unknown 41" }, 641 - { do_bad, SIGBUS, BUS_FIXME, "unknown 42" }, 642 - { do_bad, SIGBUS, BUS_FIXME, "unknown 43" }, 643 - { do_bad, SIGBUS, BUS_FIXME, "unknown 44" }, 644 - { do_bad, SIGBUS, BUS_FIXME, "unknown 45" }, 645 - { do_bad, SIGBUS, BUS_FIXME, "unknown 46" }, 646 - { do_bad, SIGBUS, BUS_FIXME, "unknown 47" }, 647 - { do_bad, SIGBUS, BUS_FIXME, "TLB conflict abort" }, 648 - { do_bad, SIGBUS, BUS_FIXME, "Unsupported atomic hardware update fault" }, 649 - { do_bad, SIGBUS, BUS_FIXME, "unknown 50" }, 650 - { do_bad, SIGBUS, BUS_FIXME, "unknown 51" }, 651 - { do_bad, SIGBUS, BUS_FIXME, "implementation fault (lockdown abort)" }, 652 - { do_bad, SIGBUS, BUS_FIXME, "implementation fault (unsupported exclusive)" }, 653 - { do_bad, SIGBUS, BUS_FIXME, "unknown 54" }, 654 - { do_bad, SIGBUS, BUS_FIXME, "unknown 55" }, 655 - { do_bad, SIGBUS, BUS_FIXME, "unknown 56" }, 656 - { do_bad, SIGBUS, BUS_FIXME, "unknown 57" }, 657 - { do_bad, SIGBUS, BUS_FIXME, "unknown 58" }, 658 - { do_bad, SIGBUS, BUS_FIXME, "unknown 59" }, 659 - { do_bad, SIGBUS, BUS_FIXME, "unknown 60" }, 660 - { do_bad, SIGBUS, BUS_FIXME, "section domain fault" }, 661 - { do_bad, SIGBUS, BUS_FIXME, "page domain fault" }, 662 - { do_bad, SIGBUS, BUS_FIXME, "unknown 63" }, 650 + { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, 651 + { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, 652 + { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, 653 + { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, 654 + { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, 655 + { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, 656 + { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, 657 + { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, 658 + { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, 659 + { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, 660 + { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, 661 + { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, 662 + { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, 663 + { do_bad, SIGKILL, SI_KERNEL, "unknown 47" }, 664 + { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" }, 665 + { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" }, 666 + { do_bad, SIGKILL, SI_KERNEL, "unknown 50" }, 667 + { do_bad, SIGKILL, SI_KERNEL, "unknown 51" }, 668 + { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" }, 669 + { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" }, 670 + { do_bad, SIGKILL, SI_KERNEL, "unknown 54" }, 671 + { do_bad, SIGKILL, SI_KERNEL, "unknown 55" }, 672 + { do_bad, SIGKILL, SI_KERNEL, "unknown 56" }, 673 + { do_bad, SIGKILL, SI_KERNEL, "unknown 57" }, 674 + { do_bad, SIGKILL, SI_KERNEL, "unknown 58" }, 675 + { do_bad, SIGKILL, SI_KERNEL, "unknown 59" }, 676 + { do_bad, SIGKILL, SI_KERNEL, "unknown 60" }, 677 + { do_bad, SIGKILL, SI_KERNEL, "section domain fault" }, 678 + { do_bad, SIGKILL, SI_KERNEL, "page domain fault" }, 679 + { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, 663 680 }; 664 681 665 682 int handle_guest_sea(phys_addr_t addr, unsigned int esr) ··· 681 698 if (!inf->fn(addr, esr, regs)) 682 699 return; 683 700 684 - pr_alert("Unhandled fault: %s at 0x%016lx\n", 685 - inf->name, addr); 686 - 687 - mem_abort_decode(esr); 688 - 689 - if (!user_mode(regs)) 701 + if (!user_mode(regs)) { 702 + pr_alert("Unhandled fault at 0x%016lx\n", addr); 703 + mem_abort_decode(esr); 690 704 show_pte(addr); 705 + } 691 706 692 707 info.si_signo = inf->sig; 693 708 info.si_errno = 0; 694 709 info.si_code = inf->code; 695 710 info.si_addr = (void __user *)addr; 696 - arm64_notify_die("", regs, &info, esr); 711 + arm64_notify_die(inf->name, regs, &info, esr); 697 712 } 698 713 699 714 asmlinkage void __exception do_el0_irq_bp_hardening(void) ··· 722 741 struct pt_regs *regs) 723 742 { 724 743 struct siginfo info; 725 - struct task_struct *tsk = current; 726 744 727 745 if (user_mode(regs)) { 728 746 if (instruction_pointer(regs) > TASK_SIZE) ··· 729 749 local_irq_enable(); 730 750 } 731 751 732 - if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) 733 - pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", 734 - tsk->comm, task_pid_nr(tsk), 735 - esr_get_class_string(esr), (void *)regs->pc, 736 - (void *)regs->sp); 737 - 738 752 info.si_signo = SIGBUS; 739 753 info.si_errno = 0; 740 754 info.si_code = BUS_ADRALN; 741 755 info.si_addr = (void __user *)addr; 742 - arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr); 756 + arm64_notify_die("SP/PC alignment exception", regs, &info, esr); 743 757 } 744 758 745 759 int __init early_brk64(unsigned long addr, unsigned int esr, ··· 748 774 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, 749 775 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, 750 776 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, 751 - { do_bad, SIGBUS, BUS_FIXME, "unknown 3" }, 777 + { do_bad, SIGKILL, SI_KERNEL, "unknown 3" }, 752 778 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, 753 - { do_bad, SIGTRAP, TRAP_FIXME, "aarch32 vector catch" }, 779 + { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" }, 754 780 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, 755 - { do_bad, SIGBUS, BUS_FIXME, "unknown 7" }, 781 + { do_bad, SIGKILL, SI_KERNEL, "unknown 7" }, 756 782 }; 757 783 758 784 void __init hook_debug_fault_code(int nr, ··· 788 814 if (!inf->fn(addr, esr, regs)) { 789 815 rv = 1; 790 816 } else { 791 - pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n", 792 - inf->name, esr, addr); 793 - 794 817 info.si_signo = inf->sig; 795 818 info.si_errno = 0; 796 819 info.si_code = inf->code; 797 820 info.si_addr = (void __user *)addr; 798 - arm64_notify_die("", regs, &info, 0); 821 + arm64_notify_die(inf->name, regs, &info, esr); 799 822 rv = 0; 800 823 } 801 824 ··· 804 833 NOKPROBE_SYMBOL(do_debug_exception); 805 834 806 835 #ifdef CONFIG_ARM64_PAN 807 - int cpu_enable_pan(void *__unused) 836 + void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) 808 837 { 809 838 /* 810 839 * We modify PSTATE. This won't work from irq context as the PSTATE ··· 814 843 815 844 config_sctlr_el1(SCTLR_EL1_SPAN, 0); 816 845 asm(SET_PSTATE_PAN(1)); 817 - return 0; 818 846 } 819 847 #endif /* CONFIG_ARM64_PAN */
+14 -8
arch/arm64/mm/proc.S
··· 36 36 #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K 37 37 #endif 38 38 39 + #ifdef CONFIG_RANDOMIZE_BASE 40 + #define TCR_KASLR_FLAGS TCR_NFD1 41 + #else 42 + #define TCR_KASLR_FLAGS 0 43 + #endif 44 + 39 45 #define TCR_SMP_FLAGS TCR_SHARED 40 46 41 47 /* PTWs cacheable, inner/outer WBWA */ ··· 438 432 * both user and kernel. 439 433 */ 440 434 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ 441 - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1 435 + TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ 436 + TCR_TBI0 | TCR_A1 442 437 tcr_set_idmap_t0sz x10, x9 443 438 444 439 /* ··· 448 441 tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 449 442 #ifdef CONFIG_ARM64_HW_AFDBM 450 443 /* 451 - * Hardware update of the Access and Dirty bits. 444 + * Enable hardware update of the Access Flags bit. 445 + * Hardware dirty bit management is enabled later, 446 + * via capabilities. 452 447 */ 453 448 mrs x9, ID_AA64MMFR1_EL1 454 449 and x9, x9, #0xf 455 - cbz x9, 2f 456 - cmp x9, #2 457 - b.lt 1f 458 - orr x10, x10, #TCR_HD // hardware Dirty flag update 459 - 1: orr x10, x10, #TCR_HA // hardware Access flag update 460 - 2: 450 + cbz x9, 1f 451 + orr x10, x10, #TCR_HA // hardware Access flag update 452 + 1: 461 453 #endif /* CONFIG_ARM64_HW_AFDBM */ 462 454 msr tcr_el1, x10 463 455 ret // return to head.S
+1 -1
arch/x86/kernel/signal_compat.c
··· 26 26 * new fields are handled in copy_siginfo_to_user32()! 27 27 */ 28 28 BUILD_BUG_ON(NSIGILL != 11); 29 - BUILD_BUG_ON(NSIGFPE != 13); 29 + BUILD_BUG_ON(NSIGFPE != 14); 30 30 BUILD_BUG_ON(NSIGSEGV != 7); 31 31 BUILD_BUG_ON(NSIGBUS != 5); 32 32 BUILD_BUG_ON(NSIGTRAP != 4);
-12
drivers/acpi/arm64/iort.c
··· 31 31 #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ 32 32 (1 << ACPI_IORT_NODE_SMMU_V3)) 33 33 34 - /* Until ACPICA headers cover IORT rev. C */ 35 - #ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 36 - #define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 37 - #endif 38 - 39 34 struct iort_its_msi_chip { 40 35 struct list_head list; 41 36 struct fwnode_handle *fw_node; ··· 361 366 return NULL; 362 367 } 363 368 364 - #if (ACPI_CA_VERSION > 0x20170929) 365 369 static int iort_get_id_mapping_index(struct acpi_iort_node *node) 366 370 { 367 371 struct acpi_iort_smmu_v3 *smmu; ··· 394 400 return -EINVAL; 395 401 } 396 402 } 397 - #else 398 - static inline int iort_get_id_mapping_index(struct acpi_iort_node *node) 399 - { 400 - return -EINVAL; 401 - } 402 - #endif 403 403 404 404 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node, 405 405 u32 id_in, u32 *id_out,
+1 -1
drivers/perf/arm_pmu_platform.c
··· 122 122 return pmu_parse_percpu_irq(pmu, irq); 123 123 } 124 124 125 - if (!pmu_has_irq_affinity(pdev->dev.of_node)) { 125 + if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) { 126 126 pr_warn("no interrupt-affinity property for %pOF, guessing.\n", 127 127 pdev->dev.of_node); 128 128 }
+14
drivers/perf/arm_spe_pmu.c
··· 23 23 #define DRVNAME PMUNAME "_pmu" 24 24 #define pr_fmt(fmt) DRVNAME ": " fmt 25 25 26 + #include <linux/bitops.h> 27 + #include <linux/bug.h> 28 + #include <linux/capability.h> 26 29 #include <linux/cpuhotplug.h> 30 + #include <linux/cpumask.h> 31 + #include <linux/device.h> 32 + #include <linux/errno.h> 27 33 #include <linux/interrupt.h> 28 34 #include <linux/irq.h> 35 + #include <linux/kernel.h> 36 + #include <linux/list.h> 29 37 #include <linux/module.h> 30 38 #include <linux/of_address.h> 31 39 #include <linux/of_device.h> 32 40 #include <linux/perf_event.h> 33 41 #include <linux/platform_device.h> 42 + #include <linux/printk.h> 34 43 #include <linux/slab.h> 44 + #include <linux/smp.h> 45 + #include <linux/vmalloc.h> 35 46 47 + #include <asm/barrier.h> 48 + #include <asm/cpufeature.h> 49 + #include <asm/mmu.h> 36 50 #include <asm/sysreg.h> 37 51 38 52 #define ARM_SPE_BUF_PAD_BYTE 0
-1
include/asm-generic/vmlinux.lds.h
··· 599 599 IRQCHIP_OF_MATCH_TABLE() \ 600 600 ACPI_PROBE_TABLE(irqchip) \ 601 601 ACPI_PROBE_TABLE(timer) \ 602 - ACPI_PROBE_TABLE(iort) \ 603 602 EARLYCON_TABLE() 604 603 605 604 #define INIT_TEXT \
+4
include/linux/sizes.h
··· 8 8 #ifndef __LINUX_SIZES_H__ 9 9 #define __LINUX_SIZES_H__ 10 10 11 + #include <linux/const.h> 12 + 11 13 #define SZ_1 0x00000001 12 14 #define SZ_2 0x00000002 13 15 #define SZ_4 0x00000004 ··· 45 43 46 44 #define SZ_1G 0x40000000 47 45 #define SZ_2G 0x80000000 46 + 47 + #define SZ_4G _AC(0x100000000, ULL) 48 48 49 49 #endif /* __LINUX_SIZES_H__ */
+2 -1
include/uapi/asm-generic/siginfo.h
··· 207 207 #define __FPE_DECERR 11 /* packed decimal error */ 208 208 #define __FPE_INVASC 12 /* invalid ASCII digit */ 209 209 #define __FPE_INVDEC 13 /* invalid decimal digit */ 210 - #define NSIGFPE 13 210 + #define FPE_FLTUNK 14 /* undiagnosed floating-point exception */ 211 + #define NSIGFPE 14 211 212 212 213 /* 213 214 * SIGSEGV si_codes
-4
kernel/signal.c
··· 2844 2844 if ((sig == SIGFPE) && (si_code == FPE_FIXME)) 2845 2845 layout = SIL_FAULT; 2846 2846 #endif 2847 - #ifdef BUS_FIXME 2848 - if ((sig == SIGBUS) && (si_code == BUS_FIXME)) 2849 - layout = SIL_FAULT; 2850 - #endif 2851 2847 } 2852 2848 return layout; 2853 2849 }
+1
scripts/kallsyms.c
··· 221 221 222 222 static char *special_prefixes[] = { 223 223 "__crc_", /* modversions */ 224 + "__efistub_", /* arm64 EFI stub namespace */ 224 225 NULL }; 225 226 226 227 static char *special_suffixes[] = {