Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/filter.h>
11#include <linux/ftrace.h>
12#include <linux/kprobes.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task_stack.h>
16#include <linux/stacktrace.h>
17
18#include <asm/efi.h>
19#include <asm/irq.h>
20#include <asm/stack_pointer.h>
21#include <asm/stacktrace.h>
22
23/*
24 * Kernel unwind state
25 *
26 * @common: Common unwind state.
27 * @task: The task being unwound.
28 * @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
29 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
30 * associated with the most recently encountered replacement lr
31 * value.
32 */
33struct kunwind_state {
34 struct unwind_state common;
35 struct task_struct *task;
36 int graph_idx;
37#ifdef CONFIG_KRETPROBES
38 struct llist_node *kr_cur;
39#endif
40};
41
42static __always_inline void
43kunwind_init(struct kunwind_state *state,
44 struct task_struct *task)
45{
46 unwind_init_common(&state->common);
47 state->task = task;
48}
49
50/*
51 * Start an unwind from a pt_regs.
52 *
53 * The unwind will begin at the PC within the regs.
54 *
55 * The regs must be on a stack currently owned by the calling task.
56 */
57static __always_inline void
58kunwind_init_from_regs(struct kunwind_state *state,
59 struct pt_regs *regs)
60{
61 kunwind_init(state, current);
62
63 state->common.fp = regs->regs[29];
64 state->common.pc = regs->pc;
65}
66
67/*
68 * Start an unwind from a caller.
69 *
70 * The unwind will begin at the caller of whichever function this is inlined
71 * into.
72 *
73 * The function which invokes this must be noinline.
74 */
75static __always_inline void
76kunwind_init_from_caller(struct kunwind_state *state)
77{
78 kunwind_init(state, current);
79
80 state->common.fp = (unsigned long)__builtin_frame_address(1);
81 state->common.pc = (unsigned long)__builtin_return_address(0);
82}
83
84/*
85 * Start an unwind from a blocked task.
86 *
87 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
88 * cpu_switch_to()).
89 *
90 * The caller should ensure the task is blocked in cpu_switch_to() for the
91 * duration of the unwind, or the unwind will be bogus. It is never valid to
92 * call this for the current task.
93 */
94static __always_inline void
95kunwind_init_from_task(struct kunwind_state *state,
96 struct task_struct *task)
97{
98 kunwind_init(state, task);
99
100 state->common.fp = thread_saved_fp(task);
101 state->common.pc = thread_saved_pc(task);
102}
103
104static __always_inline int
105kunwind_recover_return_address(struct kunwind_state *state)
106{
107#ifdef CONFIG_FUNCTION_GRAPH_TRACER
108 if (state->task->ret_stack &&
109 (state->common.pc == (unsigned long)return_to_handler)) {
110 unsigned long orig_pc;
111 orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
112 state->common.pc,
113 (void *)state->common.fp);
114 if (WARN_ON_ONCE(state->common.pc == orig_pc))
115 return -EINVAL;
116 state->common.pc = orig_pc;
117 }
118#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
119
120#ifdef CONFIG_KRETPROBES
121 if (is_kretprobe_trampoline(state->common.pc)) {
122 unsigned long orig_pc;
123 orig_pc = kretprobe_find_ret_addr(state->task,
124 (void *)state->common.fp,
125 &state->kr_cur);
126 state->common.pc = orig_pc;
127 }
128#endif /* CONFIG_KRETPROBES */
129
130 return 0;
131}
132
133/*
134 * Unwind from one frame record (A) to the next frame record (B).
135 *
136 * We terminate early if the location of B indicates a malformed chain of frame
137 * records (e.g. a cycle), determined based on the location and fp value of A
138 * and the location (but not the fp value) of B.
139 */
140static __always_inline int
141kunwind_next(struct kunwind_state *state)
142{
143 struct task_struct *tsk = state->task;
144 unsigned long fp = state->common.fp;
145 int err;
146
147 /* Final frame; nothing to unwind */
148 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
149 return -ENOENT;
150
151 err = unwind_next_frame_record(&state->common);
152 if (err)
153 return err;
154
155 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
156
157 return kunwind_recover_return_address(state);
158}
159
160typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
161
162static __always_inline void
163do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
164 void *cookie)
165{
166 if (kunwind_recover_return_address(state))
167 return;
168
169 while (1) {
170 int ret;
171
172 if (!consume_state(state, cookie))
173 break;
174 ret = kunwind_next(state);
175 if (ret < 0)
176 break;
177 }
178}
179
180/*
181 * Per-cpu stacks are only accessible when unwinding the current task in a
182 * non-preemptible context.
183 */
184#define STACKINFO_CPU(name) \
185 ({ \
186 ((task == current) && !preemptible()) \
187 ? stackinfo_get_##name() \
188 : stackinfo_get_unknown(); \
189 })
190
191/*
192 * SDEI stacks are only accessible when unwinding the current task in an NMI
193 * context.
194 */
195#define STACKINFO_SDEI(name) \
196 ({ \
197 ((task == current) && in_nmi()) \
198 ? stackinfo_get_sdei_##name() \
199 : stackinfo_get_unknown(); \
200 })
201
202#define STACKINFO_EFI \
203 ({ \
204 ((task == current) && current_in_efi()) \
205 ? stackinfo_get_efi() \
206 : stackinfo_get_unknown(); \
207 })
208
209static __always_inline void
210kunwind_stack_walk(kunwind_consume_fn consume_state,
211 void *cookie, struct task_struct *task,
212 struct pt_regs *regs)
213{
214 struct stack_info stacks[] = {
215 stackinfo_get_task(task),
216 STACKINFO_CPU(irq),
217#if defined(CONFIG_VMAP_STACK)
218 STACKINFO_CPU(overflow),
219#endif
220#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
221 STACKINFO_SDEI(normal),
222 STACKINFO_SDEI(critical),
223#endif
224#ifdef CONFIG_EFI
225 STACKINFO_EFI,
226#endif
227 };
228 struct kunwind_state state = {
229 .common = {
230 .stacks = stacks,
231 .nr_stacks = ARRAY_SIZE(stacks),
232 },
233 };
234
235 if (regs) {
236 if (task != current)
237 return;
238 kunwind_init_from_regs(&state, regs);
239 } else if (task == current) {
240 kunwind_init_from_caller(&state);
241 } else {
242 kunwind_init_from_task(&state, task);
243 }
244
245 do_kunwind(&state, consume_state, cookie);
246}
247
248struct kunwind_consume_entry_data {
249 stack_trace_consume_fn consume_entry;
250 void *cookie;
251};
252
253static __always_inline bool
254arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
255{
256 struct kunwind_consume_entry_data *data = cookie;
257 return data->consume_entry(data->cookie, state->common.pc);
258}
259
260noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
261 void *cookie, struct task_struct *task,
262 struct pt_regs *regs)
263{
264 struct kunwind_consume_entry_data data = {
265 .consume_entry = consume_entry,
266 .cookie = cookie,
267 };
268
269 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
270}
271
272struct bpf_unwind_consume_entry_data {
273 bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
274 void *cookie;
275};
276
277static bool
278arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
279{
280 struct bpf_unwind_consume_entry_data *data = cookie;
281
282 return data->consume_entry(data->cookie, state->common.pc, 0,
283 state->common.fp);
284}
285
286noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
287 u64 fp), void *cookie)
288{
289 struct bpf_unwind_consume_entry_data data = {
290 .consume_entry = consume_entry,
291 .cookie = cookie,
292 };
293
294 kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
295}
296
297static bool dump_backtrace_entry(void *arg, unsigned long where)
298{
299 char *loglvl = arg;
300 printk("%s %pSb\n", loglvl, (void *)where);
301 return true;
302}
303
304void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
305 const char *loglvl)
306{
307 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
308
309 if (regs && user_mode(regs))
310 return;
311
312 if (!tsk)
313 tsk = current;
314
315 if (!try_get_task_stack(tsk))
316 return;
317
318 printk("%sCall trace:\n", loglvl);
319 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
320
321 put_task_stack(tsk);
322}
323
324void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
325{
326 dump_backtrace(NULL, tsk, loglvl);
327 barrier();
328}
329
330/*
331 * The struct defined for userspace stack frame in AARCH64 mode.
332 */
333struct frame_tail {
334 struct frame_tail __user *fp;
335 unsigned long lr;
336} __attribute__((packed));
337
338/*
339 * Get the return address for a single stackframe and return a pointer to the
340 * next frame tail.
341 */
342static struct frame_tail __user *
343unwind_user_frame(struct frame_tail __user *tail, void *cookie,
344 stack_trace_consume_fn consume_entry)
345{
346 struct frame_tail buftail;
347 unsigned long err;
348 unsigned long lr;
349
350 /* Also check accessibility of one struct frame_tail beyond */
351 if (!access_ok(tail, sizeof(buftail)))
352 return NULL;
353
354 pagefault_disable();
355 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
356 pagefault_enable();
357
358 if (err)
359 return NULL;
360
361 lr = ptrauth_strip_user_insn_pac(buftail.lr);
362
363 if (!consume_entry(cookie, lr))
364 return NULL;
365
366 /*
367 * Frame pointers should strictly progress back up the stack
368 * (towards higher addresses).
369 */
370 if (tail >= buftail.fp)
371 return NULL;
372
373 return buftail.fp;
374}
375
376#ifdef CONFIG_COMPAT
377/*
378 * The registers we're interested in are at the end of the variable
379 * length saved register structure. The fp points at the end of this
380 * structure so the address of this struct is:
381 * (struct compat_frame_tail *)(xxx->fp)-1
382 *
383 * This code has been adapted from the ARM OProfile support.
384 */
385struct compat_frame_tail {
386 compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
387 u32 sp;
388 u32 lr;
389} __attribute__((packed));
390
391static struct compat_frame_tail __user *
392unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
393 stack_trace_consume_fn consume_entry)
394{
395 struct compat_frame_tail buftail;
396 unsigned long err;
397
398 /* Also check accessibility of one struct frame_tail beyond */
399 if (!access_ok(tail, sizeof(buftail)))
400 return NULL;
401
402 pagefault_disable();
403 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
404 pagefault_enable();
405
406 if (err)
407 return NULL;
408
409 if (!consume_entry(cookie, buftail.lr))
410 return NULL;
411
412 /*
413 * Frame pointers should strictly progress back up the stack
414 * (towards higher addresses).
415 */
416 if (tail + 1 >= (struct compat_frame_tail __user *)
417 compat_ptr(buftail.fp))
418 return NULL;
419
420 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
421}
422#endif /* CONFIG_COMPAT */
423
424
425void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
426 const struct pt_regs *regs)
427{
428 if (!consume_entry(cookie, regs->pc))
429 return;
430
431 if (!compat_user_mode(regs)) {
432 /* AARCH64 mode */
433 struct frame_tail __user *tail;
434
435 tail = (struct frame_tail __user *)regs->regs[29];
436 while (tail && !((unsigned long)tail & 0x7))
437 tail = unwind_user_frame(tail, cookie, consume_entry);
438 } else {
439#ifdef CONFIG_COMPAT
440 /* AARCH32 compat mode */
441 struct compat_frame_tail __user *tail;
442
443 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
444 while (tail && !((unsigned long)tail & 0x3))
445 tail = unwind_compat_user_frame(tail, cookie, consume_entry);
446#endif
447 }
448}