Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Fixup calltrace panic

The implementation of show_stack will panic with wrong fp:

addr = *fp++;

because the fp isn't checked properly.

The current implementations of show_stack, wchan and stack_trace
haven't been designed properly, so just deprecate them.

This patch is a reference to riscv's way, all codes are modified from
arm's. The patch is passed with:

- cat /proc/<pid>/stack
- cat /proc/<pid>/wchan
- echo c > /proc/sysrq-trigger

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>

Guo Ren 18c07d23 229a0dde

+167 -127
+2
arch/csky/Kconfig
··· 8 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 9 select ARCH_USE_BUILTIN_BSWAP 10 10 select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 11 + select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 11 12 select COMMON_CLK 12 13 select CLKSRC_MMIO 13 14 select CSKY_MPINTC if CPU_CK860 ··· 39 38 select HAVE_ARCH_TRACEHOOK 40 39 select HAVE_ARCH_AUDITSYSCALL 41 40 select HAVE_COPY_THREAD_TLS 41 + select HAVE_DEBUG_BUGVERBOSE 42 42 select HAVE_DYNAMIC_FTRACE 43 43 select HAVE_DYNAMIC_FTRACE_WITH_REGS 44 44 select HAVE_FUNCTION_TRACER
+1 -1
arch/csky/Makefile
··· 47 47 KBUILD_CFLAGS += -mno-stack-size 48 48 endif 49 49 50 - ifdef CONFIG_STACKTRACE 50 + ifdef CONFIG_FRAME_POINTER 51 51 KBUILD_CFLAGS += -mbacktrace 52 52 endif 53 53
+10
arch/csky/include/asm/ptrace.h
··· 58 58 return regs->usp; 59 59 } 60 60 61 + static inline unsigned long frame_pointer(struct pt_regs *regs) 62 + { 63 + return regs->regs[4]; 64 + } 65 + static inline void frame_pointer_set(struct pt_regs *regs, 66 + unsigned long val) 67 + { 68 + regs->regs[4] = val; 69 + } 70 + 61 71 extern int regs_query_register_offset(const char *name); 62 72 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, 63 73 unsigned int n);
+6
arch/csky/include/asm/thread_info.h
··· 40 40 #define thread_saved_fp(tsk) \ 41 41 ((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8)) 42 42 43 + #define thread_saved_sp(tsk) \ 44 + ((unsigned long)(tsk->thread.ksp)) 45 + 46 + #define thread_saved_lr(tsk) \ 47 + ((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r15)) 48 + 43 49 static inline struct thread_info *current_thread_info(void) 44 50 { 45 51 unsigned long sp;
+1 -1
arch/csky/kernel/Makefile
··· 3 3 4 4 obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o 5 5 obj-y += power.o syscall.o syscall_table.o setup.o 6 - obj-y += process.o cpu-probe.o ptrace.o dumpstack.o 6 + obj-y += process.o cpu-probe.o ptrace.o stacktrace.o 7 7 obj-y += probes/ 8 8 9 9 obj-$(CONFIG_MODULES) += module.o
-49
arch/csky/kernel/dumpstack.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 - 4 - #include <linux/ptrace.h> 5 - 6 - int kstack_depth_to_print = 48; 7 - 8 - void show_trace(unsigned long *stack) 9 - { 10 - unsigned long *stack_end; 11 - unsigned long *stack_start; 12 - unsigned long *fp; 13 - unsigned long addr; 14 - 15 - addr = (unsigned long) stack & THREAD_MASK; 16 - stack_start = (unsigned long *) addr; 17 - stack_end = (unsigned long *) (addr + THREAD_SIZE); 18 - 19 - fp = stack; 20 - pr_info("\nCall Trace:"); 21 - 22 - while (fp > stack_start && fp < stack_end) { 23 - #ifdef CONFIG_STACKTRACE 24 - addr = fp[1]; 25 - fp = (unsigned long *) fp[0]; 26 - #else 27 - addr = *fp++; 28 - #endif 29 - if (__kernel_text_address(addr)) 30 - pr_cont("\n[<%08lx>] %pS", addr, (void *)addr); 31 - } 32 - pr_cont("\n"); 33 - } 34 - 35 - void show_stack(struct task_struct *task, unsigned long *stack) 36 - { 37 - if (!stack) { 38 - if (task) 39 - stack = (unsigned long *)thread_saved_fp(task); 40 - else 41 - #ifdef CONFIG_STACKTRACE 42 - asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); 43 - #else 44 - stack = (unsigned long *)&stack; 45 - #endif 46 - } 47 - 48 - show_trace(stack); 49 - }
-31
arch/csky/kernel/process.c
··· 98 98 return 1; 99 99 } 100 100 101 - unsigned long get_wchan(struct task_struct *p) 102 - { 103 - unsigned long lr; 104 - unsigned long *fp, *stack_start, *stack_end; 105 - int count = 0; 106 - 107 - if (!p || p == current || p->state == TASK_RUNNING) 108 - return 0; 109 - 110 - stack_start = (unsigned long *)end_of_stack(p); 111 - stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE); 112 - 113 - fp = (unsigned long *) thread_saved_fp(p); 114 - do { 115 - if (fp < stack_start || fp > stack_end) 116 - return 0; 117 - #ifdef CONFIG_STACKTRACE 118 - lr = fp[1]; 119 - fp = (unsigned long *)fp[0]; 120 - #else 121 - lr = *fp++; 122 - #endif 123 - if (!in_sched_functions(lr) && 124 - __kernel_text_address(lr)) 125 - return lr; 126 - } while (count++ < 16); 127 - 128 - return 0; 129 - } 130 - EXPORT_SYMBOL(get_wchan); 131 - 132 101 #ifndef CONFIG_CPU_PM_NONE 133 102 void arch_cpu_idle(void) 134 103 {
+147 -45
arch/csky/kernel/stacktrace.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */ 3 2 4 3 #include <linux/sched/debug.h> 5 4 #include <linux/sched/task_stack.h> 6 5 #include <linux/stacktrace.h> 7 6 #include <linux/ftrace.h> 7 + #include <linux/ptrace.h> 8 + 9 + #ifdef CONFIG_FRAME_POINTER 10 + 11 + struct stackframe { 12 + unsigned long fp; 13 + unsigned long ra; 14 + }; 15 + 16 + void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, 17 + bool (*fn)(unsigned long, void *), void *arg) 18 + { 19 + unsigned long fp, sp, pc; 20 + 21 + if (regs) { 22 + fp = frame_pointer(regs); 23 + sp = user_stack_pointer(regs); 24 + pc = instruction_pointer(regs); 25 + } else if (task == NULL || task == current) { 26 + const register unsigned long current_sp __asm__ ("sp"); 27 + const register unsigned long current_fp __asm__ ("r8"); 28 + fp = current_fp; 29 + sp = current_sp; 30 + pc = (unsigned long)walk_stackframe; 31 + } else { 32 + /* task blocked in __switch_to */ 33 + fp = thread_saved_fp(task); 34 + sp = thread_saved_sp(task); 35 + pc = thread_saved_lr(task); 36 + } 37 + 38 + for (;;) { 39 + unsigned long low, high; 40 + struct stackframe *frame; 41 + 42 + if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) 43 + break; 44 + 45 + /* Validate frame pointer */ 46 + low = sp; 47 + high = ALIGN(sp, THREAD_SIZE); 48 + if (unlikely(fp < low || fp > high || fp & 0x3)) 49 + break; 50 + /* Unwind stack frame */ 51 + frame = (struct stackframe *)fp; 52 + sp = fp; 53 + fp = frame->fp; 54 + pc = ftrace_graph_ret_addr(current, NULL, frame->ra, 55 + (unsigned long *)(fp - 8)); 56 + } 57 + } 58 + 59 + #else /* !CONFIG_FRAME_POINTER */ 60 + 61 + static void notrace walk_stackframe(struct task_struct *task, 62 + struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) 63 + { 64 + unsigned long sp, pc; 65 + unsigned long *ksp; 66 + 67 + if (regs) { 68 + sp = user_stack_pointer(regs); 69 + pc = instruction_pointer(regs); 70 + } else if (task == NULL || task == current) { 71 + const register unsigned long current_sp __asm__ ("sp"); 72 + sp = current_sp; 73 + pc = (unsigned long)walk_stackframe; 74 + } else { 75 + /* task blocked in __switch_to */ 76 + sp = thread_saved_sp(task); 77 + pc = thread_saved_lr(task); 78 + } 79 + 80 + if (unlikely(sp & 0x3)) 81 + return; 82 + 83 + ksp = (unsigned long *)sp; 84 + while (!kstack_end(ksp)) { 85 + if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) 86 + break; 87 + pc = (*ksp++) - 0x4; 88 + } 89 + } 90 + #endif /* CONFIG_FRAME_POINTER */ 91 + 92 + static bool print_trace_address(unsigned long pc, void *arg) 93 + { 94 + print_ip_sym(pc); 95 + return false; 96 + } 97 + 98 + void show_stack(struct task_struct *task, unsigned long *sp) 99 + { 100 + pr_cont("Call Trace:\n"); 101 + walk_stackframe(task, NULL, print_trace_address, NULL); 102 + } 103 + 104 + static bool save_wchan(unsigned long pc, void *arg) 105 + { 106 + if (!in_sched_functions(pc)) { 107 + unsigned long *p = arg; 108 + *p = pc; 109 + return true; 110 + } 111 + return false; 112 + } 113 + 114 + unsigned long get_wchan(struct task_struct *task) 115 + { 116 + unsigned long pc = 0; 117 + 118 + if (likely(task && task != current && task->state != TASK_RUNNING)) 119 + walk_stackframe(task, NULL, save_wchan, &pc); 120 + return pc; 121 + } 122 + 123 + #ifdef CONFIG_STACKTRACE 124 + static bool __save_trace(unsigned long pc, void *arg, bool nosched) 125 + { 126 + struct stack_trace *trace = arg; 127 + 128 + if (unlikely(nosched && in_sched_functions(pc))) 129 + return false; 130 + if (unlikely(trace->skip > 0)) { 131 + trace->skip--; 132 + return false; 133 + } 134 + 135 + trace->entries[trace->nr_entries++] = pc; 136 + return (trace->nr_entries >= trace->max_entries); 137 + } 138 + 139 + static bool save_trace(unsigned long pc, void *arg) 140 + { 141 + return __save_trace(pc, arg, false); 142 + } 143 + 144 + /* 145 + * Save stack-backtrace addresses into a stack_trace buffer. 146 + */ 147 + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 148 + { 149 + walk_stackframe(tsk, NULL, save_trace, trace); 150 + } 151 + EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 8 152 9 153 void save_stack_trace(struct stack_trace *trace) 10 154 { 11 - save_stack_trace_tsk(current, trace); 155 + save_stack_trace_tsk(NULL, trace); 12 156 } 13 157 EXPORT_SYMBOL_GPL(save_stack_trace); 14 158 15 - void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 16 - { 17 - unsigned long *fp, *stack_start, *stack_end; 18 - unsigned long addr; 19 - int skip = trace->skip; 20 - int savesched; 21 - int graph_idx = 0; 22 - 23 - if (tsk == current) { 24 - asm volatile("mov %0, r8\n":"=r"(fp)); 25 - savesched = 1; 26 - } else { 27 - fp = (unsigned long *)thread_saved_fp(tsk); 28 - savesched = 0; 29 - } 30 - 31 - addr = (unsigned long) fp & THREAD_MASK; 32 - stack_start = (unsigned long *) addr; 33 - stack_end = (unsigned long *) (addr + THREAD_SIZE); 34 - 35 - while (fp > stack_start && fp < stack_end) { 36 - unsigned long lpp, fpp; 37 - 38 - fpp = fp[0]; 39 - lpp = fp[1]; 40 - if (!__kernel_text_address(lpp)) 41 - break; 42 - else 43 - lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL); 44 - 45 - if (savesched || !in_sched_functions(lpp)) { 46 - if (skip) { 47 - skip--; 48 - } else { 49 - trace->entries[trace->nr_entries++] = lpp; 50 - if (trace->nr_entries >= trace->max_entries) 51 - break; 52 - } 53 - } 54 - fp = (unsigned long *)fpp; 55 - } 56 - } 57 - EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 159 + #endif /* CONFIG_STACKTRACE */