Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: kernel: Don't toggle PAN on systems with UAO

If a CPU supports both Privileged Access Never (PAN) and User Access
Override (UAO), we don't need to disable/re-enable PAN round all
copy_to_user() like calls.

UAO alternatives cause these calls to use the 'unprivileged' load/store
instructions, which are overridden to be the privileged kind when
fs==KERNEL_DS.

This patch changes the copy_to_user() calls to have their PAN toggling
depend on a new composite 'feature' ARM64_ALT_PAN_NOT_UAO.

If both features are detected, PAN will be enabled, but the copy_to_user()
alternatives will not be applied. This means PAN will be enabled all the
time for these functions. If only PAN is detected, the toggling will be
enabled as normal.

This will save the time taken to disable/re-enable PAN, and allow us to
catch copy_to_user() accesses that occur with fs==KERNEL_DS.

Futex and swp-emulation code continue to hang their PAN toggling code on
ARM64_HAS_PAN.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

James Morse and committed by
Catalin Marinas
70544196 644c2ae1

+33 -13
+2 -1
arch/arm64/include/asm/cpufeature.h
··· 32 32 #define ARM64_WORKAROUND_834220 7 33 33 #define ARM64_HAS_NO_HW_PREFETCH 8 34 34 #define ARM64_HAS_UAO 9 35 + #define ARM64_ALT_PAN_NOT_UAO 10 35 36 36 - #define ARM64_NCAPS 10 37 + #define ARM64_NCAPS 11 37 38 38 39 #ifndef __ASSEMBLY__ 39 40
+4 -4
arch/arm64/include/asm/uaccess.h
··· 145 145 do { \ 146 146 unsigned long __gu_val; \ 147 147 __chk_user_ptr(ptr); \ 148 - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 148 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\ 149 149 CONFIG_ARM64_PAN)); \ 150 150 switch (sizeof(*(ptr))) { \ 151 151 case 1: \ ··· 168 168 BUILD_BUG(); \ 169 169 } \ 170 170 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 171 - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 171 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\ 172 172 CONFIG_ARM64_PAN)); \ 173 173 } while (0) 174 174 ··· 217 217 do { \ 218 218 __typeof__(*(ptr)) __pu_val = (x); \ 219 219 __chk_user_ptr(ptr); \ 220 - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ 220 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\ 221 221 CONFIG_ARM64_PAN)); \ 222 222 switch (sizeof(*(ptr))) { \ 223 223 case 1: \ ··· 239 239 default: \ 240 240 BUILD_BUG(); \ 241 241 } \ 242 - asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ 242 + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\ 243 243 CONFIG_ARM64_PAN)); \ 244 244 } while (0) 245 245
+16
arch/arm64/kernel/cpufeature.c
··· 67 67 .width = 0, \ 68 68 } 69 69 70 + /* meta feature for alternatives */ 71 + static bool __maybe_unused 72 + cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry); 73 + 70 74 static struct arm64_ftr_bits ftr_id_aa64isar0[] = { 71 75 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 72 76 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), ··· 692 688 .enable = cpu_enable_uao, 693 689 }, 694 690 #endif /* CONFIG_ARM64_UAO */ 691 + #ifdef CONFIG_ARM64_PAN 692 + { 693 + .capability = ARM64_ALT_PAN_NOT_UAO, 694 + .matches = cpufeature_pan_not_uao, 695 + }, 696 + #endif /* CONFIG_ARM64_PAN */ 695 697 {}, 696 698 }; 697 699 ··· 975 965 if (L1_CACHE_BYTES < cls) 976 966 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", 977 967 L1_CACHE_BYTES, cls); 968 + } 969 + 970 + static bool __maybe_unused 971 + cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry) 972 + { 973 + return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO)); 978 974 }
+2 -2
arch/arm64/lib/clear_user.S
··· 33 33 * Alignment fixed up by hardware. 34 34 */ 35 35 ENTRY(__clear_user) 36 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 36 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ 37 37 CONFIG_ARM64_PAN) 38 38 mov x2, x1 // save the size for fixup return 39 39 subs x1, x1, #8 ··· 54 54 b.mi 5f 55 55 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 56 56 5: mov x0, #0 57 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 57 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ 58 58 CONFIG_ARM64_PAN) 59 59 ret 60 60 ENDPROC(__clear_user)
+2 -2
arch/arm64/lib/copy_from_user.S
··· 67 67 68 68 end .req x5 69 69 ENTRY(__copy_from_user) 70 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 70 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ 71 71 CONFIG_ARM64_PAN) 72 72 add end, x0, x2 73 73 #include "copy_template.S" 74 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 74 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ 75 75 CONFIG_ARM64_PAN) 76 76 mov x0, #0 // Nothing to copy 77 77 ret
+2 -2
arch/arm64/lib/copy_in_user.S
··· 68 68 69 69 end .req x5 70 70 ENTRY(__copy_in_user) 71 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 71 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ 72 72 CONFIG_ARM64_PAN) 73 73 add end, x0, x2 74 74 #include "copy_template.S" 75 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 75 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ 76 76 CONFIG_ARM64_PAN) 77 77 mov x0, #0 78 78 ret
+2 -2
arch/arm64/lib/copy_to_user.S
··· 66 66 67 67 end .req x5 68 68 ENTRY(__copy_to_user) 69 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \ 69 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \ 70 70 CONFIG_ARM64_PAN) 71 71 add end, x0, x2 72 72 #include "copy_template.S" 73 - ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \ 73 + ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \ 74 74 CONFIG_ARM64_PAN) 75 75 mov x0, #0 76 76 ret
+3
arch/arm64/mm/fault.c
··· 234 234 } 235 235 236 236 if (permission_fault(esr) && (addr < USER_DS)) { 237 + if (get_thread_info(regs->sp)->addr_limit == KERNEL_DS) 238 + panic("Accessing user space memory with fs=KERNEL_DS"); 239 + 237 240 if (!search_exception_tables(regs->pc)) 238 241 panic("Accessing user space memory outside uaccess.h routines"); 239 242 }