Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
"Three arm64 fixes.

The main one is a fix to the way in which we evaluate the macro
arguments to our uaccess routines, which we _think_ might be the root
cause behind some unkillable tasks we've seen in the Android arm64 CI
farm (testing is ongoing). In any case, it's worth fixing.

Other than that, we've toned down an over-zealous VM_BUG_ON() and
fixed ftrace stack unwinding in a bunch of cases.

Summary:

- Evaluate uaccess macro arguments outside of the critical section

- Tighten up VM_BUG_ON() in pmd_populate_kernel() to avoid false positive

- Fix ftrace stack unwinding using HAVE_FUNCTION_GRAPH_RET_ADDR_PTR"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: uaccess: avoid blocking within critical sections
arm64: mm: Fix VM_BUG_ON(mm != &init_mm) for trans_pgd
arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR

+64 -27
+11
arch/arm64/include/asm/ftrace.h
··· 12 12 13 13 #define HAVE_FUNCTION_GRAPH_FP_TEST 14 14 15 + /* 16 + * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a 17 + * "return address pointer" which can be used to uniquely identify a return 18 + * address which has been overwritten. 19 + * 20 + * On arm64 we use the address of the caller's frame record, which remains the 21 + * same for the lifetime of the instrumented function, unlike the return 22 + * address in the LR. 23 + */ 24 + #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 25 + 15 26 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 16 27 #define ARCH_SUPPORTS_FTRACE_OPS 1 17 28 #else
+1 -1
arch/arm64/include/asm/pgalloc.h
··· 76 76 static inline void 77 77 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) 78 78 { 79 - VM_BUG_ON(mm != &init_mm); 79 + VM_BUG_ON(mm && mm != &init_mm); 80 80 __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN); 81 81 } 82 82
-6
arch/arm64/include/asm/stacktrace.h
··· 47 47 * @prev_type: The type of stack this frame record was on, or a synthetic 48 48 * value of STACK_TYPE_UNKNOWN. This is used to detect a 49 49 * transition from one stack to another. 50 - * 51 - * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a 52 - * replacement lr value in the ftrace graph stack. 53 50 */ 54 51 struct stackframe { 55 52 unsigned long fp; ··· 54 57 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); 55 58 unsigned long prev_fp; 56 59 enum stack_type prev_type; 57 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 58 - int graph; 59 - #endif 60 60 #ifdef CONFIG_KRETPROBES 61 61 struct llist_node *kr_cur; 62 62 #endif
+41 -7
arch/arm64/include/asm/uaccess.h
··· 281 281 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 282 282 } while (0) 283 283 284 + /* 285 + * We must not call into the scheduler between uaccess_ttbr0_enable() and 286 + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, 287 + * we must evaluate these outside of the critical section. 288 + */ 284 289 #define __raw_get_user(x, ptr, err) \ 285 290 do { \ 291 + __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \ 292 + __typeof__(x) __rgu_val; \ 286 293 __chk_user_ptr(ptr); \ 294 + \ 287 295 uaccess_ttbr0_enable(); \ 288 - __raw_get_mem("ldtr", x, ptr, err); \ 296 + __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \ 289 297 uaccess_ttbr0_disable(); \ 298 + \ 299 + (x) = __rgu_val; \ 290 300 } while (0) 291 301 292 302 #define __get_user_error(x, ptr, err) \ ··· 320 310 321 311 #define get_user __get_user 322 312 313 + /* 314 + * We must not call into the scheduler between __uaccess_enable_tco_async() and 315 + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking 316 + * functions, we must evaluate these outside of the critical section. 317 + */ 323 318 #define __get_kernel_nofault(dst, src, type, err_label) \ 324 319 do { \ 320 + __typeof__(dst) __gkn_dst = (dst); \ 321 + __typeof__(src) __gkn_src = (src); \ 325 322 int __gkn_err = 0; \ 326 323 \ 327 324 __uaccess_enable_tco_async(); \ 328 - __raw_get_mem("ldr", *((type *)(dst)), \ 329 - (__force type *)(src), __gkn_err); \ 325 + __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ 326 + (__force type *)(__gkn_src), __gkn_err); \ 330 327 __uaccess_disable_tco_async(); \ 328 + \ 331 329 if (unlikely(__gkn_err)) \ 332 330 goto err_label; \ 333 331 } while (0) ··· 369 351 } \ 370 352 } while (0) 371 353 354 + /* 355 + * We must not call into the scheduler between uaccess_ttbr0_enable() and 356 + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, 357 + * we must evaluate these outside of the critical section. 358 + */ 372 359 #define __raw_put_user(x, ptr, err) \ 373 360 do { \ 374 - __chk_user_ptr(ptr); \ 361 + __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \ 362 + __typeof__(*(ptr)) __rpu_val = (x); \ 363 + __chk_user_ptr(__rpu_ptr); \ 364 + \ 375 365 uaccess_ttbr0_enable(); \ 376 - __raw_put_mem("sttr", x, ptr, err); \ 366 + __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \ 377 367 uaccess_ttbr0_disable(); \ 378 368 } while (0) 379 369 ··· 406 380 407 381 #define put_user __put_user 408 382 383 + /* 384 + * We must not call into the scheduler between __uaccess_enable_tco_async() and 385 + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking 386 + * functions, we must evaluate these outside of the critical section. 387 + */ 409 388 #define __put_kernel_nofault(dst, src, type, err_label) \ 410 389 do { \ 390 + __typeof__(dst) __pkn_dst = (dst); \ 391 + __typeof__(src) __pkn_src = (src); \ 411 392 int __pkn_err = 0; \ 412 393 \ 413 394 __uaccess_enable_tco_async(); \ 414 - __raw_put_mem("str", *((type *)(src)), \ 415 - (__force type *)(dst), __pkn_err); \ 395 + __raw_put_mem("str", *((type *)(__pkn_src)), \ 396 + (__force type *)(__pkn_dst), __pkn_err); \ 416 397 __uaccess_disable_tco_async(); \ 398 + \ 417 399 if (unlikely(__pkn_err)) \ 418 400 goto err_label; \ 419 401 } while(0)
+3 -3
arch/arm64/kernel/ftrace.c
··· 244 244 * on the way back to parent. For this purpose, this function is called 245 245 * in _mcount() or ftrace_caller() to replace return address (*parent) on 246 246 * the call stack to return_to_handler. 247 - * 248 - * Note that @frame_pointer is used only for sanity check later. 249 247 */ 250 248 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 251 249 unsigned long frame_pointer) ··· 261 263 */ 262 264 old = *parent; 263 265 264 - if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) 266 + if (!function_graph_enter(old, self_addr, frame_pointer, 267 + (void *)frame_pointer)) { 265 268 *parent = return_hooker; 269 + } 266 270 } 267 271 268 272 #ifdef CONFIG_DYNAMIC_FTRACE
+8 -10
arch/arm64/kernel/stacktrace.c
··· 38 38 { 39 39 frame->fp = fp; 40 40 frame->pc = pc; 41 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 42 - frame->graph = 0; 43 - #endif 44 41 #ifdef CONFIG_KRETPROBES 45 42 frame->kr_cur = NULL; 46 43 #endif ··· 113 116 frame->prev_fp = fp; 114 117 frame->prev_type = info.type; 115 118 119 + frame->pc = ptrauth_strip_insn_pac(frame->pc); 120 + 116 121 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 117 122 if (tsk->ret_stack && 118 - (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { 119 - struct ftrace_ret_stack *ret_stack; 123 + (frame->pc == (unsigned long)return_to_handler)) { 124 + unsigned long orig_pc; 120 125 /* 121 126 * This is a case where function graph tracer has 122 127 * modified a return address (LR) in a stack frame 123 128 * to hook a function return. 124 129 * So replace it to an original value. 125 130 */ 126 - ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); 127 - if (WARN_ON_ONCE(!ret_stack)) 131 + orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc, 132 + (void *)frame->fp); 133 + if (WARN_ON_ONCE(frame->pc == orig_pc)) 128 134 return -EINVAL; 129 - frame->pc = ret_stack->ret; 135 + frame->pc = orig_pc; 130 136 } 131 137 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 132 138 #ifdef CONFIG_KRETPROBES 133 139 if (is_kretprobe_trampoline(frame->pc)) 134 140 frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); 135 141 #endif 136 - 137 - frame->pc = ptrauth_strip_insn_pac(frame->pc); 138 142 139 143 return 0; 140 144 }