Merge branch 'for-next/stacktrace' into for-next/core
* for-next/stacktrace: arm64: Copy the task argument to unwind_state arm64: Split unwind_init() arm64: stacktrace: use non-atomic __set_bit arm64: kasan: do not instrument stacktrace.c
···1414CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong1515CFLAGS_syscall.o += -fno-stack-protector16161717+# When KASAN is enabled, a stack trace is recorded for every alloc/free, which1818+# can significantly impact performance. Avoid instrumenting the stack trace1919+# collection code to minimize this impact.2020+KASAN_SANITIZE_stacktrace.o := n2121+1722# It's not safe to invoke KCOV when portions of the kernel environment aren't1823# available or are out-of-sync with HW state. Since `noinstr` doesn't always1924# inhibit KCOV instrumentation, disable it for the entire compilation unit.
+74-23
arch/arm64/kernel/stacktrace.c
···3838 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance3939 * associated with the most recently encountered replacement lr4040 * value.4141+ *4242+ * @task: The task being unwound.4143 */4244struct unwind_state {4345 unsigned long fp;···5048#ifdef CONFIG_KRETPROBES5149 struct llist_node *kr_cur;5250#endif5151+ struct task_struct *task;5352};54535555-static notrace void unwind_init(struct unwind_state *state, unsigned long fp,5656- unsigned long pc)5454+static void unwind_init_common(struct unwind_state *state,5555+ struct task_struct *task)5756{5858- state->fp = fp;5959- state->pc = pc;5757+ state->task = task;6058#ifdef CONFIG_KRETPROBES6159 state->kr_cur = NULL;6260#endif···7472 state->prev_fp = 0;7573 state->prev_type = STACK_TYPE_UNKNOWN;7674}7777-NOKPROBE_SYMBOL(unwind_init);7575+7676+/*7777+ * Start an unwind from a pt_regs.7878+ *7979+ * The unwind will begin at the PC within the regs.8080+ *8181+ * The regs must be on a stack currently owned by the calling task.8282+ */8383+static inline void unwind_init_from_regs(struct unwind_state *state,8484+ struct pt_regs *regs)8585+{8686+ unwind_init_common(state, current);8787+8888+ state->fp = regs->regs[29];8989+ state->pc = regs->pc;9090+}9191+9292+/*9393+ * Start an unwind from a caller.9494+ *9595+ * The unwind will begin at the caller of whichever function this is inlined9696+ * into.9797+ *9898+ * The function which invokes this must be noinline.9999+ */100100+static __always_inline void unwind_init_from_caller(struct unwind_state *state)101101+{102102+ unwind_init_common(state, current);103103+104104+ state->fp = (unsigned long)__builtin_frame_address(1);105105+ state->pc = (unsigned long)__builtin_return_address(0);106106+}107107+108108+/*109109+ * Start an unwind from a blocked task.110110+ *111111+ * The unwind will begin at the blocked tasks saved PC (i.e. the caller of112112+ * cpu_switch_to()).113113+ *114114+ * The caller should ensure the task is blocked in cpu_switch_to() for the115115+ * duration of the unwind, or the unwind will be bogus. It is never valid to116116+ * call this for the current task.117117+ */118118+static inline void unwind_init_from_task(struct unwind_state *state,119119+ struct task_struct *task)120120+{121121+ unwind_init_common(state, task);122122+123123+ state->fp = thread_saved_fp(task);124124+ state->pc = thread_saved_pc(task);125125+}7812679127/*80128 * Unwind from one frame record (A) to the next frame record (B).···13381 * records (e.g. a cycle), determined based on the location and fp value of A13482 * and the location (but not the fp value) of B.13583 */136136-static int notrace unwind_next(struct task_struct *tsk,137137- struct unwind_state *state)8484+static int notrace unwind_next(struct unwind_state *state)13885{8686+ struct task_struct *tsk = state->task;13987 unsigned long fp = state->fp;14088 struct stack_info info;14189···169117 if (fp <= state->prev_fp)170118 return -EINVAL;171119 } else {172172- set_bit(state->prev_type, state->stacks_done);120120+ __set_bit(state->prev_type, state->stacks_done);173121 }174122175123 /*176124 * Record this frame record's values and location. The prev_fp and177125 * prev_type are only meaningful to the next unwind_next() invocation.178126 */179179- state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));180180- state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));127127+ state->fp = READ_ONCE(*(unsigned long *)(fp));128128+ state->pc = READ_ONCE(*(unsigned long *)(fp + 8));181129 state->prev_fp = fp;182130 state->prev_type = info.type;183131···209157}210158NOKPROBE_SYMBOL(unwind_next);211159212212-static void notrace unwind(struct task_struct *tsk,213213- struct unwind_state *state,160160+static void notrace unwind(struct unwind_state *state,214161 stack_trace_consume_fn consume_entry, void *cookie)215162{216163 while (1) {···217166218167 if (!consume_entry(cookie, state->pc))219168 break;220220- ret = unwind_next(tsk, state);169169+ ret = unwind_next(state);221170 if (ret < 0)222171 break;223172 }···263212{264213 struct unwind_state state;265214266266- if (regs)267267- unwind_init(&state, regs->regs[29], regs->pc);268268- else if (task == current)269269- unwind_init(&state,270270- (unsigned long)__builtin_frame_address(1),271271- (unsigned long)__builtin_return_address(0));272272- else273273- unwind_init(&state, thread_saved_fp(task),274274- thread_saved_pc(task));215215+ if (regs) {216216+ if (task != current)217217+ return;218218+ unwind_init_from_regs(&state, regs);219219+ } else if (task == current) {220220+ unwind_init_from_caller(&state);221221+ } else {222222+ unwind_init_from_task(&state, task);223223+ }275224276276- unwind(task, &state, consume_entry, cookie);225225+ unwind(&state, consume_entry, cookie);277226}