Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: kernel: Add support for Privileged Access Never

'Privileged Access Never' is a new arm8.1 feature which prevents
privileged code from accessing any virtual address where read or write
access is also permitted at EL0.

This patch enables the PAN feature on all CPUs, and modifies {get,put}_user
helpers temporarily to permit access.

This will catch kernel bugs where user memory is accessed directly.
'Unprivileged loads and stores' using ldtrb et al are unaffected by PAN.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use ALTERNATIVE in asm and tidy up pan_enable check]
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

James Morse and committed by
Will Deacon
338d4f49 9ded63aa

+121 -2
+14
arch/arm64/Kconfig
··· 596 596 default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE) 597 597 default "11" 598 598 599 + config ARM64_PAN 600 + bool "Enable support for Privileged Access Never (PAN)" 601 + default y 602 + help 603 + Privileged Access Never (PAN; part of the ARMv8.1 Extensions) 604 + prevents the kernel or hypervisor from accessing user-space (EL0) 605 + memory directly. 606 + 607 + Choosing this option will cause any unprotected (not using 608 + copy_to_user et al) memory access to fail with a permission fault. 609 + 610 + The feature is detected at runtime, and will remain as a 'nop' 611 + instruction if the cpu does not implement the feature. 612 + 599 613 menuconfig ARMV8_DEPRECATED 600 614 bool "Emulate deprecated/obsolete ARMv8 instructions" 601 615 depends on COMPAT
+2 -1
arch/arm64/include/asm/cpufeature.h
··· 25 25 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 26 26 #define ARM64_WORKAROUND_845719 2 27 27 #define ARM64_HAS_SYSREG_GIC_CPUIF 3 28 + #define ARM64_HAS_PAN 4 28 29 29 - #define ARM64_NCAPS 4 30 + #define ARM64_NCAPS 5 30 31 31 32 #ifndef __ASSEMBLY__ 32 33
+8
arch/arm64/include/asm/futex.h
··· 20 20 21 21 #include <linux/futex.h> 22 22 #include <linux/uaccess.h> 23 + 24 + #include <asm/alternative.h> 25 + #include <asm/cpufeature.h> 23 26 #include <asm/errno.h> 27 + #include <asm/sysreg.h> 24 28 25 29 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ 26 30 asm volatile( \ 31 + ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 32 + CONFIG_ARM64_PAN) \ 27 33 "1: ldxr %w1, %2\n" \ 28 34 insn "\n" \ 29 35 "2: stlxr %w3, %w0, %2\n" \ ··· 45 39 " .align 3\n" \ 46 40 " .quad 1b, 4b, 2b, 4b\n" \ 47 41 " .popsection\n" \ 42 + ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 43 + CONFIG_ARM64_PAN) \ 48 44 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ 49 45 : "r" (oparg), "Ir" (-EFAULT) \ 50 46 : "memory")
+2
arch/arm64/include/asm/processor.h
··· 186 186 187 187 #endif 188 188 189 + void cpu_enable_pan(void); 190 + 189 191 #endif /* __ASM_PROCESSOR_H */
+8
arch/arm64/include/asm/sysreg.h
··· 20 20 #ifndef __ASM_SYSREG_H 21 21 #define __ASM_SYSREG_H 22 22 23 + #include <asm/opcodes.h> 24 + 23 25 #define SCTLR_EL1_CP15BEN (0x1 << 5) 24 26 #define SCTLR_EL1_SED (0x1 << 8) 25 27 ··· 37 35 */ 38 36 #define sys_reg(op0, op1, crn, crm, op2) \ 39 37 ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) 38 + 39 + #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4) 40 + #define SCTLR_EL1_SPAN (1 << 23) 41 + 42 + #define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\ 43 + (!!x)<<8 | 0x1f) 40 44 41 45 #ifdef __ASSEMBLY__ 42 46
+11
arch/arm64/include/asm/uaccess.h
··· 24 24 #include <linux/string.h> 25 25 #include <linux/thread_info.h> 26 26 27 + #include <asm/alternative.h> 28 + #include <asm/cpufeature.h> 27 29 #include <asm/ptrace.h> 30 + #include <asm/sysreg.h> 28 31 #include <asm/errno.h> 29 32 #include <asm/memory.h> 30 33 #include <asm/compiler.h> ··· 134 131 do { \ 135 132 unsigned long __gu_val; \ 136 133 __chk_user_ptr(ptr); \ 134 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 135 + CONFIG_ARM64_PAN)); \ 137 136 switch (sizeof(*(ptr))) { \ 138 137 case 1: \ 139 138 __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \ ··· 153 148 BUILD_BUG(); \ 154 149 } \ 155 150 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 151 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 152 + CONFIG_ARM64_PAN)); \ 156 153 } while (0) 157 154 158 155 #define __get_user(x, ptr) \ ··· 201 194 do { \ 202 195 __typeof__(*(ptr)) __pu_val = (x); \ 203 196 __chk_user_ptr(ptr); \ 197 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 198 + CONFIG_ARM64_PAN)); \ 204 199 switch (sizeof(*(ptr))) { \ 205 200 case 1: \ 206 201 __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \ ··· 219 210 default: \ 220 211 BUILD_BUG(); \ 221 212 } \ 213 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 214 + CONFIG_ARM64_PAN)); \ 222 215 } while (0) 223 216 224 217 #define __put_user(x, ptr) \
+1
arch/arm64/include/uapi/asm/ptrace.h
··· 44 44 #define PSR_I_BIT 0x00000080 45 45 #define PSR_A_BIT 0x00000100 46 46 #define PSR_D_BIT 0x00000200 47 + #define PSR_PAN_BIT 0x00400000 47 48 #define PSR_Q_BIT 0x08000000 48 49 #define PSR_V_BIT 0x10000000 49 50 #define PSR_C_BIT 0x20000000
+7 -1
arch/arm64/kernel/armv8_deprecated.c
··· 14 14 #include <linux/slab.h> 15 15 #include <linux/sysctl.h> 16 16 17 + #include <asm/alternative.h> 18 + #include <asm/cpufeature.h> 17 19 #include <asm/insn.h> 18 20 #include <asm/opcodes.h> 19 21 #include <asm/sysreg.h> ··· 282 280 */ 283 281 #define __user_swpX_asm(data, addr, res, temp, B) \ 284 282 __asm__ __volatile__( \ 283 + ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 284 + CONFIG_ARM64_PAN) \ 285 285 " mov %w2, %w1\n" \ 286 286 "0: ldxr"B" %w1, [%3]\n" \ 287 287 "1: stxr"B" %w0, %w2, [%3]\n" \ ··· 299 295 " .align 3\n" \ 300 296 " .quad 0b, 3b\n" \ 301 297 " .quad 1b, 3b\n" \ 302 - " .popsection" \ 298 + " .popsection\n" \ 299 + ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 300 + CONFIG_ARM64_PAN) \ 303 301 : "=&r" (res), "+r" (data), "=&r" (temp) \ 304 302 : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ 305 303 : "memory")
+20
arch/arm64/kernel/cpufeature.c
··· 21 21 #include <linux/types.h> 22 22 #include <asm/cpu.h> 23 23 #include <asm/cpufeature.h> 24 + #include <asm/processor.h> 24 25 25 26 static bool 26 27 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) ··· 40 39 return feature_matches(val, entry); 41 40 } 42 41 42 + static bool __maybe_unused 43 + has_id_aa64mmfr1_feature(const struct arm64_cpu_capabilities *entry) 44 + { 45 + u64 val; 46 + 47 + val = read_cpuid(id_aa64mmfr1_el1); 48 + return feature_matches(val, entry); 49 + } 50 + 43 51 static const struct arm64_cpu_capabilities arm64_features[] = { 44 52 { 45 53 .desc = "GIC system register CPU interface", ··· 57 47 .field_pos = 24, 58 48 .min_field_value = 1, 59 49 }, 50 + #ifdef CONFIG_ARM64_PAN 51 + { 52 + .desc = "Privileged Access Never", 53 + .capability = ARM64_HAS_PAN, 54 + .matches = has_id_aa64mmfr1_feature, 55 + .field_pos = 20, 56 + .min_field_value = 1, 57 + .enable = cpu_enable_pan, 58 + }, 59 + #endif /* CONFIG_ARM64_PAN */ 60 60 {}, 61 61 }; 62 62
+8
arch/arm64/lib/clear_user.S
··· 16 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 18 #include <linux/linkage.h> 19 + 20 + #include <asm/alternative.h> 19 21 #include <asm/assembler.h> 22 + #include <asm/cpufeature.h> 23 + #include <asm/sysreg.h> 20 24 21 25 .text 22 26 ··· 33 29 * Alignment fixed up by hardware. 34 30 */ 35 31 ENTRY(__clear_user) 32 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 33 + CONFIG_ARM64_PAN) 36 34 mov x2, x1 // save the size for fixup return 37 35 subs x1, x1, #8 38 36 b.mi 2f ··· 54 48 b.mi 5f 55 49 USER(9f, strb wzr, [x0] ) 56 50 5: mov x0, #0 51 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 52 + CONFIG_ARM64_PAN) 57 53 ret 58 54 ENDPROC(__clear_user) 59 55
+8
arch/arm64/lib/copy_from_user.S
··· 15 15 */ 16 16 17 17 #include <linux/linkage.h> 18 + 19 + #include <asm/alternative.h> 18 20 #include <asm/assembler.h> 21 + #include <asm/cpufeature.h> 22 + #include <asm/sysreg.h> 19 23 20 24 /* 21 25 * Copy from user space to a kernel buffer (alignment handled by the hardware) ··· 32 28 * x0 - bytes not copied 33 29 */ 34 30 ENTRY(__copy_from_user) 31 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 32 + CONFIG_ARM64_PAN) 35 33 add x5, x1, x2 // upper user buffer boundary 36 34 subs x2, x2, #16 37 35 b.mi 1f ··· 62 56 USER(9f, ldrb w3, [x1] ) 63 57 strb w3, [x0] 64 58 5: mov x0, #0 59 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 60 + CONFIG_ARM64_PAN) 65 61 ret 66 62 ENDPROC(__copy_from_user) 67 63
+8
arch/arm64/lib/copy_in_user.S
··· 17 17 */ 18 18 19 19 #include <linux/linkage.h> 20 + 21 + #include <asm/alternative.h> 20 22 #include <asm/assembler.h> 23 + #include <asm/cpufeature.h> 24 + #include <asm/sysreg.h> 21 25 22 26 /* 23 27 * Copy from user space to user space (alignment handled by the hardware) ··· 34 30 * x0 - bytes not copied 35 31 */ 36 32 ENTRY(__copy_in_user) 33 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 34 + CONFIG_ARM64_PAN) 37 35 add x5, x0, x2 // upper user buffer boundary 38 36 subs x2, x2, #16 39 37 b.mi 1f ··· 64 58 USER(9f, ldrb w3, [x1] ) 65 59 USER(9f, strb w3, [x0] ) 66 60 5: mov x0, #0 61 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 62 + CONFIG_ARM64_PAN) 67 63 ret 68 64 ENDPROC(__copy_in_user) 69 65
+8
arch/arm64/lib/copy_to_user.S
··· 15 15 */ 16 16 17 17 #include <linux/linkage.h> 18 + 19 + #include <asm/alternative.h> 18 20 #include <asm/assembler.h> 21 + #include <asm/cpufeature.h> 22 + #include <asm/sysreg.h> 19 23 20 24 /* 21 25 * Copy to user space from a kernel buffer (alignment handled by the hardware) ··· 32 28 * x0 - bytes not copied 33 29 */ 34 30 ENTRY(__copy_to_user) 31 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 32 + CONFIG_ARM64_PAN) 35 33 add x5, x0, x2 // upper user buffer boundary 36 34 subs x2, x2, #16 37 35 b.mi 1f ··· 62 56 ldrb w3, [x1] 63 57 USER(9f, strb w3, [x0] ) 64 58 5: mov x0, #0 59 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 60 + CONFIG_ARM64_PAN) 65 61 ret 66 62 ENDPROC(__copy_to_user) 67 63
+16
arch/arm64/mm/fault.c
··· 30 30 #include <linux/highmem.h> 31 31 #include <linux/perf_event.h> 32 32 33 + #include <asm/cpufeature.h> 33 34 #include <asm/exception.h> 34 35 #include <asm/debug-monitors.h> 35 36 #include <asm/esr.h> 37 + #include <asm/sysreg.h> 36 38 #include <asm/system_misc.h> 37 39 #include <asm/pgtable.h> 38 40 #include <asm/tlbflush.h> ··· 224 222 vm_flags = VM_WRITE; 225 223 mm_flags |= FAULT_FLAG_WRITE; 226 224 } 225 + 226 + /* 227 + * PAN bit set implies the fault happened in kernel space, but not 228 + * in the arch's user access functions. 229 + */ 230 + if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT)) 231 + goto no_context; 227 232 228 233 /* 229 234 * As per x86, we may deadlock here. However, since the kernel only ··· 545 536 546 537 return 0; 547 538 } 539 + 540 + #ifdef CONFIG_ARM64_PAN 541 + void cpu_enable_pan(void) 542 + { 543 + config_sctlr_el1(SCTLR_EL1_SPAN, 0); 544 + } 545 + #endif /* CONFIG_ARM64_PAN */