Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next/stacktrace' into for-next/core

* for-next/stacktrace:
arm64: Copy the task argument to unwind_state
arm64: Split unwind_init()
arm64: stacktrace: use non-atomic __set_bit
arm64: kasan: do not instrument stacktrace.c

+79 -23
+5
arch/arm64/kernel/Makefile
··· 14 14 CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong 15 15 CFLAGS_syscall.o += -fno-stack-protector 16 16 17 + # When KASAN is enabled, a stack trace is recorded for every alloc/free, which 18 + # can significantly impact performance. Avoid instrumenting the stack trace 19 + # collection code to minimize this impact. 20 + KASAN_SANITIZE_stacktrace.o := n 21 + 17 22 # It's not safe to invoke KCOV when portions of the kernel environment aren't 18 23 # available or are out-of-sync with HW state. Since `noinstr` doesn't always 19 24 # inhibit KCOV instrumentation, disable it for the entire compilation unit.
+74 -23
arch/arm64/kernel/stacktrace.c
··· 38 38 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance 39 39 * associated with the most recently encountered replacement lr 40 40 * value. 41 + * 42 + * @task: The task being unwound. 41 43 */ 42 44 struct unwind_state { 43 45 unsigned long fp; ··· 50 48 #ifdef CONFIG_KRETPROBES 51 49 struct llist_node *kr_cur; 52 50 #endif 51 + struct task_struct *task; 53 52 }; 54 53 55 - static notrace void unwind_init(struct unwind_state *state, unsigned long fp, 56 - unsigned long pc) 54 + static void unwind_init_common(struct unwind_state *state, 55 + struct task_struct *task) 57 56 { 58 - state->fp = fp; 59 - state->pc = pc; 57 + state->task = task; 60 58 #ifdef CONFIG_KRETPROBES 61 59 state->kr_cur = NULL; 62 60 #endif ··· 74 72 state->prev_fp = 0; 75 73 state->prev_type = STACK_TYPE_UNKNOWN; 76 74 } 77 - NOKPROBE_SYMBOL(unwind_init); 75 + 76 + /* 77 + * Start an unwind from a pt_regs. 78 + * 79 + * The unwind will begin at the PC within the regs. 80 + * 81 + * The regs must be on a stack currently owned by the calling task. 82 + */ 83 + static inline void unwind_init_from_regs(struct unwind_state *state, 84 + struct pt_regs *regs) 85 + { 86 + unwind_init_common(state, current); 87 + 88 + state->fp = regs->regs[29]; 89 + state->pc = regs->pc; 90 + } 91 + 92 + /* 93 + * Start an unwind from a caller. 94 + * 95 + * The unwind will begin at the caller of whichever function this is inlined 96 + * into. 97 + * 98 + * The function which invokes this must be noinline. 99 + */ 100 + static __always_inline void unwind_init_from_caller(struct unwind_state *state) 101 + { 102 + unwind_init_common(state, current); 103 + 104 + state->fp = (unsigned long)__builtin_frame_address(1); 105 + state->pc = (unsigned long)__builtin_return_address(0); 106 + } 107 + 108 + /* 109 + * Start an unwind from a blocked task. 110 + * 111 + * The unwind will begin at the blocked tasks saved PC (i.e. the caller of 112 + * cpu_switch_to()). 113 + * 114 + * The caller should ensure the task is blocked in cpu_switch_to() for the 115 + * duration of the unwind, or the unwind will be bogus. It is never valid to 116 + * call this for the current task. 117 + */ 118 + static inline void unwind_init_from_task(struct unwind_state *state, 119 + struct task_struct *task) 120 + { 121 + unwind_init_common(state, task); 122 + 123 + state->fp = thread_saved_fp(task); 124 + state->pc = thread_saved_pc(task); 125 + } 78 126 79 127 /* 80 128 * Unwind from one frame record (A) to the next frame record (B). ··· 133 81 * records (e.g. a cycle), determined based on the location and fp value of A 134 82 * and the location (but not the fp value) of B. 135 83 */ 136 - static int notrace unwind_next(struct task_struct *tsk, 137 - struct unwind_state *state) 84 + static int notrace unwind_next(struct unwind_state *state) 138 85 { 86 + struct task_struct *tsk = state->task; 139 87 unsigned long fp = state->fp; 140 88 struct stack_info info; 141 89 ··· 169 117 if (fp <= state->prev_fp) 170 118 return -EINVAL; 171 119 } else { 172 - set_bit(state->prev_type, state->stacks_done); 120 + __set_bit(state->prev_type, state->stacks_done); 173 121 } 174 122 175 123 /* 176 124 * Record this frame record's values and location. The prev_fp and 177 125 * prev_type are only meaningful to the next unwind_next() invocation. 178 126 */ 179 - state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); 180 - state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); 127 + state->fp = READ_ONCE(*(unsigned long *)(fp)); 128 + state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); 181 129 state->prev_fp = fp; 182 130 state->prev_type = info.type; 183 131 ··· 209 157 } 210 158 NOKPROBE_SYMBOL(unwind_next); 211 159 212 - static void notrace unwind(struct task_struct *tsk, 213 - struct unwind_state *state, 160 + static void notrace unwind(struct unwind_state *state, 214 161 stack_trace_consume_fn consume_entry, void *cookie) 215 162 { 216 163 while (1) { ··· 217 166 218 167 if (!consume_entry(cookie, state->pc)) 219 168 break; 220 - ret = unwind_next(tsk, state); 169 + ret = unwind_next(state); 221 170 if (ret < 0) 222 171 break; 223 172 } ··· 263 212 { 264 213 struct unwind_state state; 265 214 266 - if (regs) 267 - unwind_init(&state, regs->regs[29], regs->pc); 268 - else if (task == current) 269 - unwind_init(&state, 270 - (unsigned long)__builtin_frame_address(1), 271 - (unsigned long)__builtin_return_address(0)); 272 - else 273 - unwind_init(&state, thread_saved_fp(task), 274 - thread_saved_pc(task)); 215 + if (regs) { 216 + if (task != current) 217 + return; 218 + unwind_init_from_regs(&state, regs); 219 + } else if (task == current) { 220 + unwind_init_from_caller(&state); 221 + } else { 222 + unwind_init_from_task(&state, task); 223 + } 275 224 276 - unwind(task, &state, consume_entry, cookie); 225 + unwind(&state, consume_entry, cookie); 277 226 }