Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/filter.h>
11#include <linux/ftrace.h>
12#include <linux/kprobes.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task_stack.h>
16#include <linux/stacktrace.h>
17
18#include <asm/efi.h>
19#include <asm/irq.h>
20#include <asm/stack_pointer.h>
21#include <asm/stacktrace.h>
22
23/*
24 * Kernel unwind state
25 *
26 * @common: Common unwind state.
27 * @task: The task being unwound.
28 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
29 * associated with the most recently encountered replacement lr
30 * value.
31 */
32struct kunwind_state {
33 struct unwind_state common;
34 struct task_struct *task;
35#ifdef CONFIG_KRETPROBES
36 struct llist_node *kr_cur;
37#endif
38};
39
40static __always_inline void
41kunwind_init(struct kunwind_state *state,
42 struct task_struct *task)
43{
44 unwind_init_common(&state->common);
45 state->task = task;
46}
47
48/*
49 * Start an unwind from a pt_regs.
50 *
51 * The unwind will begin at the PC within the regs.
52 *
53 * The regs must be on a stack currently owned by the calling task.
54 */
55static __always_inline void
56kunwind_init_from_regs(struct kunwind_state *state,
57 struct pt_regs *regs)
58{
59 kunwind_init(state, current);
60
61 state->common.fp = regs->regs[29];
62 state->common.pc = regs->pc;
63}
64
65/*
66 * Start an unwind from a caller.
67 *
68 * The unwind will begin at the caller of whichever function this is inlined
69 * into.
70 *
71 * The function which invokes this must be noinline.
72 */
73static __always_inline void
74kunwind_init_from_caller(struct kunwind_state *state)
75{
76 kunwind_init(state, current);
77
78 state->common.fp = (unsigned long)__builtin_frame_address(1);
79 state->common.pc = (unsigned long)__builtin_return_address(0);
80}
81
82/*
83 * Start an unwind from a blocked task.
84 *
85 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
86 * cpu_switch_to()).
87 *
88 * The caller should ensure the task is blocked in cpu_switch_to() for the
89 * duration of the unwind, or the unwind will be bogus. It is never valid to
90 * call this for the current task.
91 */
92static __always_inline void
93kunwind_init_from_task(struct kunwind_state *state,
94 struct task_struct *task)
95{
96 kunwind_init(state, task);
97
98 state->common.fp = thread_saved_fp(task);
99 state->common.pc = thread_saved_pc(task);
100}
101
102static __always_inline int
103kunwind_recover_return_address(struct kunwind_state *state)
104{
105#ifdef CONFIG_FUNCTION_GRAPH_TRACER
106 if (state->task->ret_stack &&
107 (state->common.pc == (unsigned long)return_to_handler)) {
108 unsigned long orig_pc;
109 orig_pc = ftrace_graph_ret_addr(state->task, NULL,
110 state->common.pc,
111 (void *)state->common.fp);
112 if (WARN_ON_ONCE(state->common.pc == orig_pc))
113 return -EINVAL;
114 state->common.pc = orig_pc;
115 }
116#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
117
118#ifdef CONFIG_KRETPROBES
119 if (is_kretprobe_trampoline(state->common.pc)) {
120 unsigned long orig_pc;
121 orig_pc = kretprobe_find_ret_addr(state->task,
122 (void *)state->common.fp,
123 &state->kr_cur);
124 state->common.pc = orig_pc;
125 }
126#endif /* CONFIG_KRETPROBES */
127
128 return 0;
129}
130
131/*
132 * Unwind from one frame record (A) to the next frame record (B).
133 *
134 * We terminate early if the location of B indicates a malformed chain of frame
135 * records (e.g. a cycle), determined based on the location and fp value of A
136 * and the location (but not the fp value) of B.
137 */
138static __always_inline int
139kunwind_next(struct kunwind_state *state)
140{
141 struct task_struct *tsk = state->task;
142 unsigned long fp = state->common.fp;
143 int err;
144
145 /* Final frame; nothing to unwind */
146 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
147 return -ENOENT;
148
149 err = unwind_next_frame_record(&state->common);
150 if (err)
151 return err;
152
153 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
154
155 return kunwind_recover_return_address(state);
156}
157
158typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
159
160static __always_inline void
161do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
162 void *cookie)
163{
164 if (kunwind_recover_return_address(state))
165 return;
166
167 while (1) {
168 int ret;
169
170 if (!consume_state(state, cookie))
171 break;
172 ret = kunwind_next(state);
173 if (ret < 0)
174 break;
175 }
176}
177
178/*
179 * Per-cpu stacks are only accessible when unwinding the current task in a
180 * non-preemptible context.
181 */
182#define STACKINFO_CPU(name) \
183 ({ \
184 ((task == current) && !preemptible()) \
185 ? stackinfo_get_##name() \
186 : stackinfo_get_unknown(); \
187 })
188
189/*
190 * SDEI stacks are only accessible when unwinding the current task in an NMI
191 * context.
192 */
193#define STACKINFO_SDEI(name) \
194 ({ \
195 ((task == current) && in_nmi()) \
196 ? stackinfo_get_sdei_##name() \
197 : stackinfo_get_unknown(); \
198 })
199
200#define STACKINFO_EFI \
201 ({ \
202 ((task == current) && current_in_efi()) \
203 ? stackinfo_get_efi() \
204 : stackinfo_get_unknown(); \
205 })
206
207static __always_inline void
208kunwind_stack_walk(kunwind_consume_fn consume_state,
209 void *cookie, struct task_struct *task,
210 struct pt_regs *regs)
211{
212 struct stack_info stacks[] = {
213 stackinfo_get_task(task),
214 STACKINFO_CPU(irq),
215#if defined(CONFIG_VMAP_STACK)
216 STACKINFO_CPU(overflow),
217#endif
218#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
219 STACKINFO_SDEI(normal),
220 STACKINFO_SDEI(critical),
221#endif
222#ifdef CONFIG_EFI
223 STACKINFO_EFI,
224#endif
225 };
226 struct kunwind_state state = {
227 .common = {
228 .stacks = stacks,
229 .nr_stacks = ARRAY_SIZE(stacks),
230 },
231 };
232
233 if (regs) {
234 if (task != current)
235 return;
236 kunwind_init_from_regs(&state, regs);
237 } else if (task == current) {
238 kunwind_init_from_caller(&state);
239 } else {
240 kunwind_init_from_task(&state, task);
241 }
242
243 do_kunwind(&state, consume_state, cookie);
244}
245
246struct kunwind_consume_entry_data {
247 stack_trace_consume_fn consume_entry;
248 void *cookie;
249};
250
251static __always_inline bool
252arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
253{
254 struct kunwind_consume_entry_data *data = cookie;
255 return data->consume_entry(data->cookie, state->common.pc);
256}
257
258noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
259 void *cookie, struct task_struct *task,
260 struct pt_regs *regs)
261{
262 struct kunwind_consume_entry_data data = {
263 .consume_entry = consume_entry,
264 .cookie = cookie,
265 };
266
267 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
268}
269
270struct bpf_unwind_consume_entry_data {
271 bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
272 void *cookie;
273};
274
275static bool
276arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
277{
278 struct bpf_unwind_consume_entry_data *data = cookie;
279
280 return data->consume_entry(data->cookie, state->common.pc, 0,
281 state->common.fp);
282}
283
284noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
285 u64 fp), void *cookie)
286{
287 struct bpf_unwind_consume_entry_data data = {
288 .consume_entry = consume_entry,
289 .cookie = cookie,
290 };
291
292 kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
293}
294
295static bool dump_backtrace_entry(void *arg, unsigned long where)
296{
297 char *loglvl = arg;
298 printk("%s %pSb\n", loglvl, (void *)where);
299 return true;
300}
301
302void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
303 const char *loglvl)
304{
305 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
306
307 if (regs && user_mode(regs))
308 return;
309
310 if (!tsk)
311 tsk = current;
312
313 if (!try_get_task_stack(tsk))
314 return;
315
316 printk("%sCall trace:\n", loglvl);
317 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
318
319 put_task_stack(tsk);
320}
321
322void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
323{
324 dump_backtrace(NULL, tsk, loglvl);
325 barrier();
326}
327
328/*
329 * The struct defined for userspace stack frame in AARCH64 mode.
330 */
331struct frame_tail {
332 struct frame_tail __user *fp;
333 unsigned long lr;
334} __attribute__((packed));
335
336/*
337 * Get the return address for a single stackframe and return a pointer to the
338 * next frame tail.
339 */
340static struct frame_tail __user *
341unwind_user_frame(struct frame_tail __user *tail, void *cookie,
342 stack_trace_consume_fn consume_entry)
343{
344 struct frame_tail buftail;
345 unsigned long err;
346 unsigned long lr;
347
348 /* Also check accessibility of one struct frame_tail beyond */
349 if (!access_ok(tail, sizeof(buftail)))
350 return NULL;
351
352 pagefault_disable();
353 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
354 pagefault_enable();
355
356 if (err)
357 return NULL;
358
359 lr = ptrauth_strip_user_insn_pac(buftail.lr);
360
361 if (!consume_entry(cookie, lr))
362 return NULL;
363
364 /*
365 * Frame pointers should strictly progress back up the stack
366 * (towards higher addresses).
367 */
368 if (tail >= buftail.fp)
369 return NULL;
370
371 return buftail.fp;
372}
373
374#ifdef CONFIG_COMPAT
375/*
376 * The registers we're interested in are at the end of the variable
377 * length saved register structure. The fp points at the end of this
378 * structure so the address of this struct is:
379 * (struct compat_frame_tail *)(xxx->fp)-1
380 *
381 * This code has been adapted from the ARM OProfile support.
382 */
383struct compat_frame_tail {
384 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
385 u32 sp;
386 u32 lr;
387} __attribute__((packed));
388
389static struct compat_frame_tail __user *
390unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
391 stack_trace_consume_fn consume_entry)
392{
393 struct compat_frame_tail buftail;
394 unsigned long err;
395
396 /* Also check accessibility of one struct frame_tail beyond */
397 if (!access_ok(tail, sizeof(buftail)))
398 return NULL;
399
400 pagefault_disable();
401 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
402 pagefault_enable();
403
404 if (err)
405 return NULL;
406
407 if (!consume_entry(cookie, buftail.lr))
408 return NULL;
409
410 /*
411 * Frame pointers should strictly progress back up the stack
412 * (towards higher addresses).
413 */
414 if (tail + 1 >= (struct compat_frame_tail __user *)
415 compat_ptr(buftail.fp))
416 return NULL;
417
418 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
419}
420#endif /* CONFIG_COMPAT */
421
422
423void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
424 const struct pt_regs *regs)
425{
426 if (!consume_entry(cookie, regs->pc))
427 return;
428
429 if (!compat_user_mode(regs)) {
430 /* AARCH64 mode */
431 struct frame_tail __user *tail;
432
433 tail = (struct frame_tail __user *)regs->regs[29];
434 while (tail && !((unsigned long)tail & 0x7))
435 tail = unwind_user_frame(tail, cookie, consume_entry);
436 } else {
437#ifdef CONFIG_COMPAT
438 /* AARCH32 compat mode */
439 struct compat_frame_tail __user *tail;
440
441 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
442 while (tail && !((unsigned long)tail & 0x3))
443 tail = unwind_compat_user_frame(tail, cookie, consume_entry);
444#endif
445 }
446}