Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next/stacktrace' into for-next/kasan

Merge in stack unwinding work to cater for 8-byte aligned stack frames
which may be generated following optimisations to CONFIG_KASAN_OUTLINE.

* for-next/stacktrace:
arm64: stacktrace: Relax frame record alignment requirement to 8 bytes
arm64: Change the on_*stack functions to take a size argument
arm64: Implement stack trace termination record

+70 -51
+6 -6
arch/arm64/include/asm/processor.h
··· 329 329 * of header definitions for the use of task_stack_page. 330 330 */ 331 331 332 - #define current_top_of_stack() \ 333 - ({ \ 334 - struct stack_info _info; \ 335 - BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ 336 - _info.high; \ 332 + #define current_top_of_stack() \ 333 + ({ \ 334 + struct stack_info _info; \ 335 + BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \ 336 + _info.high; \ 337 337 }) 338 - #define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) 338 + #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) 339 339 340 340 #endif /* __ASSEMBLY__ */ 341 341 #endif /* __ASM_PROCESSOR_H */
+4 -3
arch/arm64/include/asm/sdei.h
··· 42 42 43 43 struct stack_info; 44 44 45 - bool _on_sdei_stack(unsigned long sp, struct stack_info *info); 46 - static inline bool on_sdei_stack(unsigned long sp, 45 + bool _on_sdei_stack(unsigned long sp, unsigned long size, 46 + struct stack_info *info); 47 + static inline bool on_sdei_stack(unsigned long sp, unsigned long size, 47 48 struct stack_info *info) 48 49 { 49 50 if (!IS_ENABLED(CONFIG_VMAP_STACK)) ··· 52 51 if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) 53 52 return false; 54 53 if (in_nmi()) 55 - return _on_sdei_stack(sp, info); 54 + return _on_sdei_stack(sp, size, info); 56 55 57 56 return false; 58 57 }
+16 -16
arch/arm64/include/asm/stacktrace.h
··· 69 69 70 70 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); 71 71 72 - static inline bool on_stack(unsigned long sp, unsigned long low, 73 - unsigned long high, enum stack_type type, 74 - struct stack_info *info) 72 + static inline bool on_stack(unsigned long sp, unsigned long size, 73 + unsigned long low, unsigned long high, 74 + enum stack_type type, struct stack_info *info) 75 75 { 76 76 if (!low) 77 77 return false; 78 78 79 - if (sp < low || sp >= high) 79 + if (sp < low || sp + size < sp || sp + size > high) 80 80 return false; 81 81 82 82 if (info) { ··· 87 87 return true; 88 88 } 89 89 90 - static inline bool on_irq_stack(unsigned long sp, 90 + static inline bool on_irq_stack(unsigned long sp, unsigned long size, 91 91 struct stack_info *info) 92 92 { 93 93 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); 94 94 unsigned long high = low + IRQ_STACK_SIZE; 95 95 96 - return on_stack(sp, low, high, STACK_TYPE_IRQ, info); 96 + return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); 97 97 } 98 98 99 99 static inline bool on_task_stack(const struct task_struct *tsk, 100 - unsigned long sp, 100 + unsigned long sp, unsigned long size, 101 101 struct stack_info *info) 102 102 { 103 103 unsigned long low = (unsigned long)task_stack_page(tsk); 104 104 unsigned long high = low + THREAD_SIZE; 105 105 106 - return on_stack(sp, low, high, STACK_TYPE_TASK, info); 106 + return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); 107 107 } 108 108 109 109 #ifdef CONFIG_VMAP_STACK 110 110 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); 111 111 112 - static inline bool on_overflow_stack(unsigned long sp, 112 + static inline bool on_overflow_stack(unsigned long sp, unsigned long size, 113 113 struct stack_info *info) 114 114 { 115 115 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); 116 116 unsigned long high = low + OVERFLOW_STACK_SIZE; 117 117 118 - return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info); 118 + return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); 119 119 } 120 120 #else 121 - static inline bool on_overflow_stack(unsigned long sp, 121 + static inline bool on_overflow_stack(unsigned long sp, unsigned long size, 122 122 struct stack_info *info) { return false; } 123 123 #endif 124 124 ··· 128 128 * context. 129 129 */ 130 130 static inline bool on_accessible_stack(const struct task_struct *tsk, 131 - unsigned long sp, 131 + unsigned long sp, unsigned long size, 132 132 struct stack_info *info) 133 133 { 134 134 if (info) 135 135 info->type = STACK_TYPE_UNKNOWN; 136 136 137 - if (on_task_stack(tsk, sp, info)) 137 + if (on_task_stack(tsk, sp, size, info)) 138 138 return true; 139 139 if (tsk != current || preemptible()) 140 140 return false; 141 - if (on_irq_stack(sp, info)) 141 + if (on_irq_stack(sp, size, info)) 142 142 return true; 143 - if (on_overflow_stack(sp, info)) 143 + if (on_overflow_stack(sp, size, info)) 144 144 return true; 145 - if (on_sdei_stack(sp, info)) 145 + if (on_sdei_stack(sp, size, info)) 146 146 return true; 147 147 148 148 return false;
+1 -1
arch/arm64/kernel/entry.S
··· 285 285 stp lr, x21, [sp, #S_LR] 286 286 287 287 /* 288 - * For exceptions from EL0, create a terminal frame record. 288 + * For exceptions from EL0, create a final frame record. 289 289 * For exceptions from EL1, create a synthetic frame record so the 290 290 * interrupted code shows up in the backtrace. 291 291 */
+19 -6
arch/arm64/kernel/head.S
··· 16 16 #include <asm/asm_pointer_auth.h> 17 17 #include <asm/assembler.h> 18 18 #include <asm/boot.h> 19 + #include <asm/bug.h> 19 20 #include <asm/ptrace.h> 20 21 #include <asm/asm-offsets.h> 21 22 #include <asm/cache.h> ··· 394 393 ret x28 395 394 SYM_FUNC_END(__create_page_tables) 396 395 396 + /* 397 + * Create a final frame record at task_pt_regs(current)->stackframe, so 398 + * that the unwinder can identify the final frame record of any task by 399 + * its location in the task stack. We reserve the entire pt_regs space 400 + * for consistency with user tasks and kthreads. 401 + */ 402 + .macro setup_final_frame 403 + sub sp, sp, #PT_REGS_SIZE 404 + stp xzr, xzr, [sp, #S_STACKFRAME] 405 + add x29, sp, #S_STACKFRAME 406 + .endm 407 + 397 408 /* 398 409 * The following fragment of code is executed with the MMU enabled. 399 410 * ··· 460 447 #endif 461 448 bl switch_to_vhe // Prefer VHE if possible 462 449 add sp, sp, #16 463 - mov x29, #0 464 - mov x30, #0 465 - b start_kernel 450 + setup_final_frame 451 + bl start_kernel 452 + ASM_BUG() 466 453 SYM_FUNC_END(__primary_switched) 467 454 468 455 .pushsection ".rodata", "a" ··· 652 639 cbz x2, __secondary_too_slow 653 640 msr sp_el0, x2 654 641 scs_load x2, x3 655 - mov x29, #0 656 - mov x30, #0 642 + setup_final_frame 657 643 658 644 #ifdef CONFIG_ARM64_PTR_AUTH 659 645 ptrauth_keys_init_cpu x2, x3, x4, x5 660 646 #endif 661 647 662 - b secondary_start_kernel 648 + bl secondary_start_kernel 649 + ASM_BUG() 663 650 SYM_FUNC_END(__secondary_switched) 664 651 665 652 SYM_FUNC_START_LOCAL(__secondary_too_slow)
+1 -1
arch/arm64/kernel/perf_callchain.c
··· 116 116 tail = (struct frame_tail __user *)regs->regs[29]; 117 117 118 118 while (entry->nr < entry->max_stack && 119 - tail && !((unsigned long)tail & 0xf)) 119 + tail && !((unsigned long)tail & 0x7)) 120 120 tail = user_backtrace(tail, entry); 121 121 } else { 122 122 #ifdef CONFIG_COMPAT
+5
arch/arm64/kernel/process.c
··· 435 435 } 436 436 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; 437 437 p->thread.cpu_context.sp = (unsigned long)childregs; 438 + /* 439 + * For the benefit of the unwinder, set up childregs->stackframe 440 + * as the final frame for the new task. 441 + */ 442 + p->thread.cpu_context.fp = (unsigned long)childregs->stackframe; 438 443 439 444 ptrace_hw_copy_thread(p); 440 445
+1 -1
arch/arm64/kernel/ptrace.c
··· 122 122 { 123 123 return ((addr & ~(THREAD_SIZE - 1)) == 124 124 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 125 - on_irq_stack(addr, NULL); 125 + on_irq_stack(addr, sizeof(unsigned long), NULL); 126 126 } 127 127 128 128 /**
+9 -7
arch/arm64/kernel/sdei.c
··· 162 162 return err; 163 163 } 164 164 165 - static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info) 165 + static bool on_sdei_normal_stack(unsigned long sp, unsigned long size, 166 + struct stack_info *info) 166 167 { 167 168 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); 168 169 unsigned long high = low + SDEI_STACK_SIZE; 169 170 170 - return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info); 171 + return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); 171 172 } 172 173 173 - static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info) 174 + static bool on_sdei_critical_stack(unsigned long sp, unsigned long size, 175 + struct stack_info *info) 174 176 { 175 177 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); 176 178 unsigned long high = low + SDEI_STACK_SIZE; 177 179 178 - return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info); 180 + return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); 179 181 } 180 182 181 - bool _on_sdei_stack(unsigned long sp, struct stack_info *info) 183 + bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info) 182 184 { 183 185 if (!IS_ENABLED(CONFIG_VMAP_STACK)) 184 186 return false; 185 187 186 - if (on_sdei_critical_stack(sp, info)) 188 + if (on_sdei_critical_stack(sp, size, info)) 187 189 return true; 188 190 189 - if (on_sdei_normal_stack(sp, info)) 191 + if (on_sdei_normal_stack(sp, size, info)) 190 192 return true; 191 193 192 194 return false;
+8 -10
arch/arm64/kernel/stacktrace.c
··· 68 68 unsigned long fp = frame->fp; 69 69 struct stack_info info; 70 70 71 - if (fp & 0xf) 72 - return -EINVAL; 73 - 74 71 if (!tsk) 75 72 tsk = current; 76 73 77 - if (!on_accessible_stack(tsk, fp, &info)) 74 + /* Final frame; nothing to unwind */ 75 + if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) 76 + return -ENOENT; 77 + 78 + if (fp & 0x7) 79 + return -EINVAL; 80 + 81 + if (!on_accessible_stack(tsk, fp, 16, &info)) 78 82 return -EINVAL; 79 83 80 84 if (test_bit(info.type, frame->stacks_done)) ··· 131 127 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 132 128 133 129 frame->pc = ptrauth_strip_insn_pac(frame->pc); 134 - 135 - /* 136 - * This is a terminal record, so we have finished unwinding. 137 - */ 138 - if (!frame->fp && !frame->pc) 139 - return -ENOENT; 140 130 141 131 return 0; 142 132 }