Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: extable: add a dedicated uaccess handler

For inline assembly, we place exception fixups out-of-line in the
`.fixup` section such that these are out of the way of the fast path.
This has a few drawbacks:

* Since the fixup code is anonymous, backtraces will symbolize fixups as
offsets from the nearest prior symbol, currently
`__entry_tramp_text_end`. This is confusing, and painful to debug
without access to the relevant vmlinux.

* Since the exception handler adjusts the PC to execute the fixup, and
the fixup uses a direct branch back into the function it fixes,
backtraces of fixups miss the original function. This is confusing,
and violates requirements for RELIABLE_STACKTRACE (and therefore
LIVEPATCH).

* Inline assembly and associated fixups are generated from templates,
and we have many copies of logically identical fixups which only
differ in which specific registers are written to and which address is
branched to at the end of the fixup. This is potentially wasteful of
I-cache resources, and makes it hard to add additional logic to fixups
without significant bloat.

This patch address all three concerns for inline uaccess fixups by
adding a dedicated exception handler which updates registers in
exception context and subsequent returns back into the function which
faulted, removing the need for fixups specialized to each faulting
instruction.

Other than backtracing, there should be no functional change as a result
of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-12-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Mark Rutland and committed by
Will Deacon
2e77a62c d6e2cc56

+58 -48
+24
arch/arm64/include/asm/asm-extable.h
··· 5 5 #define EX_TYPE_NONE 0 6 6 #define EX_TYPE_FIXUP 1 7 7 #define EX_TYPE_BPF 2 8 + #define EX_TYPE_UACCESS_ERR_ZERO 3 8 9 9 10 #ifdef __ASSEMBLY__ 10 11 ··· 38 37 39 38 #else /* __ASSEMBLY__ */ 40 39 40 + #include <linux/bits.h> 41 41 #include <linux/stringify.h> 42 + 43 + #include <asm/gpr-num.h> 42 44 43 45 #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ 44 46 ".pushsection __ex_table, \"a\"\n" \ ··· 54 50 55 51 #define _ASM_EXTABLE(insn, fixup) \ 56 52 __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") 53 + 54 + #define EX_DATA_REG_ERR_SHIFT 0 55 + #define EX_DATA_REG_ERR GENMASK(4, 0) 56 + #define EX_DATA_REG_ZERO_SHIFT 5 57 + #define EX_DATA_REG_ZERO GENMASK(9, 5) 58 + 59 + #define EX_DATA_REG(reg, gpr) \ 60 + "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" 61 + 62 + #define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ 63 + __DEFINE_ASM_GPR_NUMS \ 64 + __ASM_EXTABLE_RAW(#insn, #fixup, \ 65 + __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ 66 + "(" \ 67 + EX_DATA_REG(ERR, err) " | " \ 68 + EX_DATA_REG(ZERO, zero) \ 69 + ")") 70 + 71 + #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ 72 + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr) 57 73 58 74 #endif /* __ASSEMBLY__ */ 59 75
+8 -17
arch/arm64/include/asm/futex.h
··· 25 25 " cbz %w0, 3f\n" \ 26 26 " sub %w4, %w4, %w0\n" \ 27 27 " cbnz %w4, 1b\n" \ 28 - " mov %w0, %w7\n" \ 28 + " mov %w0, %w6\n" \ 29 29 "3:\n" \ 30 30 " dmb ish\n" \ 31 - " .pushsection .fixup,\"ax\"\n" \ 32 - " .align 2\n" \ 33 - "4: mov %w0, %w6\n" \ 34 - " b 3b\n" \ 35 - " .popsection\n" \ 36 - _ASM_EXTABLE(1b, 4b) \ 37 - _ASM_EXTABLE(2b, 4b) \ 31 + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ 32 + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \ 38 33 : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ 39 34 "+r" (loops) \ 40 - : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \ 35 + : "r" (oparg), "Ir" (-EAGAIN) \ 41 36 : "memory"); \ 42 37 uaccess_disable_privileged(); \ 43 38 } while (0) ··· 100 105 " cbz %w3, 3f\n" 101 106 " sub %w4, %w4, %w3\n" 102 107 " cbnz %w4, 1b\n" 103 - " mov %w0, %w8\n" 108 + " mov %w0, %w7\n" 104 109 "3:\n" 105 110 " dmb ish\n" 106 111 "4:\n" 107 - " .pushsection .fixup,\"ax\"\n" 108 - "5: mov %w0, %w7\n" 109 - " b 4b\n" 110 - " .popsection\n" 111 - _ASM_EXTABLE(1b, 5b) 112 - _ASM_EXTABLE(2b, 5b) 112 + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) 113 + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) 113 114 : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) 114 - : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN) 115 + : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) 115 116 : "memory"); 116 117 uaccess_disable_privileged(); 117 118
+4 -15
arch/arm64/include/asm/uaccess.h
··· 255 255 asm volatile( \ 256 256 "1: " load " " reg "1, [%2]\n" \ 257 257 "2:\n" \ 258 - " .section .fixup, \"ax\"\n" \ 259 - " .align 2\n" \ 260 - "3: mov %w0, %3\n" \ 261 - " mov %1, #0\n" \ 262 - " b 2b\n" \ 263 - " .previous\n" \ 264 - _ASM_EXTABLE(1b, 3b) \ 258 + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ 265 259 : "+r" (err), "=&r" (x) \ 266 - : "r" (addr), "i" (-EFAULT)) 260 + : "r" (addr)) 267 261 268 262 #define __raw_get_mem(ldr, x, ptr, err) \ 269 263 do { \ ··· 326 332 asm volatile( \ 327 333 "1: " store " " reg "1, [%2]\n" \ 328 334 "2:\n" \ 329 - " .section .fixup,\"ax\"\n" \ 330 - " .align 2\n" \ 331 - "3: mov %w0, %3\n" \ 332 - " b 2b\n" \ 333 - " .previous\n" \ 334 - _ASM_EXTABLE(1b, 3b) \ 335 + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ 335 336 : "+r" (err) \ 336 - : "r" (x), "r" (addr), "i" (-EFAULT)) 337 + : "r" (x), "r" (addr)) 337 338 338 339 #define __raw_put_mem(str, x, ptr, err) \ 339 340 do { \
+3 -9
arch/arm64/kernel/armv8_deprecated.c
··· 279 279 do { \ 280 280 uaccess_enable_privileged(); \ 281 281 __asm__ __volatile__( \ 282 - " mov %w3, %w7\n" \ 282 + " mov %w3, %w6\n" \ 283 283 "0: ldxr"B" %w2, [%4]\n" \ 284 284 "1: stxr"B" %w0, %w1, [%4]\n" \ 285 285 " cbz %w0, 2f\n" \ ··· 290 290 "2:\n" \ 291 291 " mov %w1, %w2\n" \ 292 292 "3:\n" \ 293 - " .pushsection .fixup,\"ax\"\n" \ 294 - " .align 2\n" \ 295 - "4: mov %w0, %w6\n" \ 296 - " b 3b\n" \ 297 - " .popsection" \ 298 - _ASM_EXTABLE(0b, 4b) \ 299 - _ASM_EXTABLE(1b, 4b) \ 293 + _ASM_EXTABLE_UACCESS_ERR(0b, 3b, %w0) \ 294 + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ 300 295 : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ 301 296 : "r" ((unsigned long)addr), "i" (-EAGAIN), \ 302 - "i" (-EFAULT), \ 303 297 "i" (__SWP_LL_SC_LOOPS) \ 304 298 : "memory"); \ 305 299 uaccess_disable_privileged(); \
+2 -7
arch/arm64/kernel/traps.c
··· 527 527 "1: " insn ", %1\n" \ 528 528 " mov %w0, #0\n" \ 529 529 "2:\n" \ 530 - " .pushsection .fixup,\"ax\"\n" \ 531 - " .align 2\n" \ 532 - "3: mov %w0, %w2\n" \ 533 - " b 2b\n" \ 534 - " .popsection\n" \ 535 - _ASM_EXTABLE(1b, 3b) \ 530 + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ 536 531 : "=r" (res) \ 537 - : "r" (address), "i" (-EFAULT)); \ 532 + : "r" (address)); \ 538 533 uaccess_ttbr0_disable(); \ 539 534 } 540 535
+17
arch/arm64/mm/extable.c
··· 3 3 * Based on arch/arm/mm/extable.c 4 4 */ 5 5 6 + #include <linux/bitfield.h> 6 7 #include <linux/extable.h> 7 8 #include <linux/uaccess.h> 8 9 9 10 #include <asm/asm-extable.h> 11 + #include <asm/ptrace.h> 10 12 11 13 typedef bool (*ex_handler_t)(const struct exception_table_entry *, 12 14 struct pt_regs *); ··· 26 24 return true; 27 25 } 28 26 27 + static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, 28 + struct pt_regs *regs) 29 + { 30 + int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); 31 + int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data); 32 + 33 + pt_regs_write_reg(regs, reg_err, -EFAULT); 34 + pt_regs_write_reg(regs, reg_zero, 0); 35 + 36 + regs->pc = get_ex_fixup(ex); 37 + return true; 38 + } 39 + 29 40 bool fixup_exception(struct pt_regs *regs) 30 41 { 31 42 const struct exception_table_entry *ex; ··· 52 37 return ex_handler_fixup(ex, regs); 53 38 case EX_TYPE_BPF: 54 39 return ex_handler_bpf(ex, regs); 40 + case EX_TYPE_UACCESS_ERR_ZERO: 41 + return ex_handler_uaccess_err_zero(ex, regs); 55 42 } 56 43 57 44 BUG();