Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Use the generalized stacktrace ops

Copy the stacktrace ops code from x86 and provide a central function for
use by functions that need to dump a callstack.

Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Matt Fleming and committed by
Paul Mundt
4e14dfc7 922b0dc5

+251 -71
+25
arch/sh/include/asm/stacktrace.h
··· 1 + /* 2 + * Copyright (C) 2009 Matt Fleming 3 + * 4 + * Based on: 5 + * The x86 implementation - arch/x86/include/asm/stacktrace.h 6 + */ 7 + #ifndef _ASM_SH_STACKTRACE_H 8 + #define _ASM_SH_STACKTRACE_H 9 + 10 + /* Generic stack tracer with callbacks */ 11 + 12 + struct stacktrace_ops { 13 + void (*warning)(void *data, char *msg); 14 + /* msg must contain %s for the symbol */ 15 + void (*warning_symbol)(void *data, char *msg, unsigned long symbol); 16 + void (*address)(void *data, unsigned long address, int reliable); 17 + /* On negative return stop dumping */ 18 + int (*stack)(void *data, char *name); 19 + }; 20 + 21 + void dump_trace(struct task_struct *tsk, struct pt_regs *regs, 22 + unsigned long *stack, 23 + const struct stacktrace_ops *ops, void *data); 24 + 25 + #endif /* _ASM_SH_STACKTRACE_H */
+1 -1
arch/sh/kernel/Makefile_32
··· 9 9 CFLAGS_REMOVE_ftrace.o = -pg 10 10 endif 11 11 12 - obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \ 12 + obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ 13 13 machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ 14 14 sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ 15 15 traps.o traps_32.o
+128
arch/sh/kernel/dumpstack.c
··· 1 + /* 2 + * Copyright (C) 1991, 1992 Linus Torvalds 3 + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs 4 + * Copyright (C) 2009 Matt Fleming 5 + */ 6 + #include <linux/kallsyms.h> 7 + #include <linux/ftrace.h> 8 + #include <linux/debug_locks.h> 9 + 10 + #include <asm/stacktrace.h> 11 + 12 + void printk_address(unsigned long address, int reliable) 13 + { 14 + printk(" [<%p>] %s%pS\n", (void *) address, 15 + reliable ? "" : "? ", (void *) address); 16 + } 17 + 18 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 19 + static void 20 + print_ftrace_graph_addr(unsigned long addr, void *data, 21 + const struct stacktrace_ops *ops, 22 + struct thread_info *tinfo, int *graph) 23 + { 24 + struct task_struct *task = tinfo->task; 25 + unsigned long ret_addr; 26 + int index = task->curr_ret_stack; 27 + 28 + if (addr != (unsigned long)return_to_handler) 29 + return; 30 + 31 + if (!task->ret_stack || index < *graph) 32 + return; 33 + 34 + index -= *graph; 35 + ret_addr = task->ret_stack[index].ret; 36 + 37 + ops->address(data, ret_addr, 1); 38 + 39 + (*graph)++; 40 + } 41 + #else 42 + static inline void 43 + print_ftrace_graph_addr(unsigned long addr, void *data, 44 + const struct stacktrace_ops *ops, 45 + struct thread_info *tinfo, int *graph) 46 + { } 47 + #endif 48 + 49 + /* 50 + * Unwind the call stack and pass information to the stacktrace_ops 51 + * functions. 52 + */ 53 + void dump_trace(struct task_struct *task, struct pt_regs *regs, 54 + unsigned long *sp, const struct stacktrace_ops *ops, 55 + void *data) 56 + { 57 + struct thread_info *context; 58 + int graph = 0; 59 + 60 + context = (struct thread_info *) 61 + ((unsigned long)sp & (~(THREAD_SIZE - 1))); 62 + 63 + while (!kstack_end(sp)) { 64 + unsigned long addr = *sp++; 65 + 66 + if (__kernel_text_address(addr)) { 67 + ops->address(data, addr, 0); 68 + 69 + print_ftrace_graph_addr(addr, data, ops, 70 + context, &graph); 71 + } 72 + } 73 + } 74 + EXPORT_SYMBOL(dump_trace); 75 + 76 + 77 + static void 78 + print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) 79 + { 80 + printk(data); 81 + print_symbol(msg, symbol); 82 + printk("\n"); 83 + } 84 + 85 + static void print_trace_warning(void *data, char *msg) 86 + { 87 + printk("%s%s\n", (char *)data, msg); 88 + } 89 + 90 + static int print_trace_stack(void *data, char *name) 91 + { 92 + printk("%s <%s> ", (char *)data, name); 93 + return 0; 94 + } 95 + 96 + /* 97 + * Print one address/symbol entries per line. 98 + */ 99 + static void print_trace_address(void *data, unsigned long addr, int reliable) 100 + { 101 + printk(data); 102 + printk_address(addr, reliable); 103 + } 104 + 105 + static const struct stacktrace_ops print_trace_ops = { 106 + .warning = print_trace_warning, 107 + .warning_symbol = print_trace_warning_symbol, 108 + .stack = print_trace_stack, 109 + .address = print_trace_address, 110 + }; 111 + 112 + void show_trace(struct task_struct *tsk, unsigned long *sp, 113 + struct pt_regs *regs) 114 + { 115 + if (regs && user_mode(regs)) 116 + return; 117 + 118 + printk("\nCall trace:\n"); 119 + 120 + dump_trace(tsk, regs, sp, &print_trace_ops, ""); 121 + 122 + printk("\n"); 123 + 124 + if (!tsk) 125 + tsk = current; 126 + 127 + debug_show_held_locks(tsk); 128 + }
+61 -26
arch/sh/kernel/stacktrace.c
··· 14 14 #include <linux/thread_info.h> 15 15 #include <linux/module.h> 16 16 #include <asm/ptrace.h> 17 + #include <asm/stacktrace.h> 18 + 19 + static void save_stack_warning(void *data, char *msg) 20 + { 21 + } 22 + 23 + static void 24 + save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) 25 + { 26 + } 27 + 28 + static int save_stack_stack(void *data, char *name) 29 + { 30 + return 0; 31 + } 17 32 18 33 /* 19 34 * Save stack-backtrace addresses into a stack_trace buffer. 20 35 */ 36 + static void save_stack_address(void *data, unsigned long addr, int reliable) 37 + { 38 + struct stack_trace *trace = data; 39 + 40 + if (trace->skip > 0) { 41 + trace->skip--; 42 + return; 43 + } 44 + 45 + if (trace->nr_entries < trace->max_entries) 46 + trace->entries[trace->nr_entries++] = addr; 47 + } 48 + 49 + static const struct stacktrace_ops save_stack_ops = { 50 + .warning = save_stack_warning, 51 + .warning_symbol = save_stack_warning_symbol, 52 + .stack = save_stack_stack, 53 + .address = save_stack_address, 54 + }; 55 + 21 56 void save_stack_trace(struct stack_trace *trace) 22 57 { 23 58 unsigned long *sp = (unsigned long *)current_stack_pointer; 24 59 25 - while (!kstack_end(sp)) { 26 - unsigned long addr = *sp++; 27 - 28 - if (__kernel_text_address(addr)) { 29 - if (trace->skip > 0) 30 - trace->skip--; 31 - else 32 - trace->entries[trace->nr_entries++] = addr; 33 - if (trace->nr_entries >= trace->max_entries) 34 - break; 35 - } 36 - } 60 + dump_trace(current, NULL, sp, &save_stack_ops, trace); 37 61 } 38 62 EXPORT_SYMBOL_GPL(save_stack_trace); 63 + 64 + static void 65 + save_stack_address_nosched(void *data, unsigned long addr, int reliable) 66 + { 67 + struct stack_trace *trace = (struct stack_trace *)data; 68 + 69 + if (in_sched_functions(addr)) 70 + return; 71 + 72 + if (trace->skip > 0) { 73 + trace->skip--; 74 + return; 75 + } 76 + 77 + if (trace->nr_entries < trace->max_entries) 78 + trace->entries[trace->nr_entries++] = addr; 79 + } 80 + 81 + static const struct stacktrace_ops save_stack_ops_nosched = { 82 + .warning = save_stack_warning, 83 + .warning_symbol = save_stack_warning_symbol, 84 + .stack = save_stack_stack, 85 + .address = save_stack_address_nosched, 86 + }; 39 87 40 88 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 41 89 { 42 90 unsigned long *sp = (unsigned long *)tsk->thread.sp; 43 91 44 - while (!kstack_end(sp)) { 45 - unsigned long addr = *sp++; 46 - 47 - if (__kernel_text_address(addr)) { 48 - if (in_sched_functions(addr)) 49 - break; 50 - if (trace->skip > 0) 51 - trace->skip--; 52 - else 53 - trace->entries[trace->nr_entries++] = addr; 54 - if (trace->nr_entries >= trace->max_entries) 55 - break; 56 - } 57 - } 92 + dump_trace(current, NULL, sp, &save_stack_ops_nosched, trace); 58 93 } 59 94 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-24
arch/sh/kernel/traps_32.c
··· 858 858 per_cpu_trap_init(); 859 859 } 860 860 861 - void show_trace(struct task_struct *tsk, unsigned long *sp, 862 - struct pt_regs *regs) 863 - { 864 - unsigned long addr; 865 - 866 - if (regs && user_mode(regs)) 867 - return; 868 - 869 - printk("\nCall trace:\n"); 870 - 871 - while (!kstack_end(sp)) { 872 - addr = *sp++; 873 - if (kernel_text_address(addr)) 874 - print_ip_sym(addr); 875 - } 876 - 877 - printk("\n"); 878 - 879 - if (!tsk) 880 - tsk = current; 881 - 882 - debug_show_held_locks(tsk); 883 - } 884 - 885 861 void show_stack(struct task_struct *tsk, unsigned long *sp) 886 862 { 887 863 unsigned long stack;
+36 -20
arch/sh/oprofile/backtrace.c
··· 20 20 #include <asm/ptrace.h> 21 21 #include <asm/uaccess.h> 22 22 #include <asm/sections.h> 23 + #include <asm/stacktrace.h> 24 + 25 + static void backtrace_warning_symbol(void *data, char *msg, 26 + unsigned long symbol) 27 + { 28 + /* Ignore warnings */ 29 + } 30 + 31 + static void backtrace_warning(void *data, char *msg) 32 + { 33 + /* Ignore warnings */ 34 + } 35 + 36 + static int backtrace_stack(void *data, char *name) 37 + { 38 + /* Yes, we want all stacks */ 39 + return 0; 40 + } 41 + 42 + static void backtrace_address(void *data, unsigned long addr, int reliable) 43 + { 44 + unsigned int *depth = data; 45 + 46 + if ((*depth)--) 47 + oprofile_add_trace(addr); 48 + } 49 + 50 + static struct stacktrace_ops backtrace_ops = { 51 + .warning = backtrace_warning, 52 + .warning_symbol = backtrace_warning_symbol, 53 + .stack = backtrace_stack, 54 + .address = backtrace_address, 55 + }; 23 56 24 57 /* Limit to stop backtracing too far. */ 25 58 static int backtrace_limit = 20; ··· 107 74 return ((unsigned long)stackaddr > stack) && ((unsigned long)stackaddr < stack_base); 108 75 } 109 76 110 - static unsigned long * 111 - kernel_backtrace(unsigned long *stackaddr, struct pt_regs *regs) 112 - { 113 - unsigned long addr; 114 - 115 - /* 116 - * If not a valid kernel address, keep going till we find one 117 - * or the SP stops being a valid address. 118 - */ 119 - do { 120 - addr = *stackaddr++; 121 - oprofile_add_trace(addr); 122 - } while (valid_kernel_stack(stackaddr, regs)); 123 - 124 - return stackaddr; 125 - } 126 - 127 77 void sh_backtrace(struct pt_regs * const regs, unsigned int depth) 128 78 { 129 79 unsigned long *stackaddr; ··· 119 103 120 104 stackaddr = (unsigned long *)regs->regs[15]; 121 105 if (!user_mode(regs)) { 122 - while (depth-- && valid_kernel_stack(stackaddr, regs)) 123 - stackaddr = kernel_backtrace(stackaddr, regs); 124 - 106 + if (depth) 107 + dump_trace(NULL, regs, stackaddr, 108 + &backtrace_ops, &depth); 125 109 return; 126 110 } 127 111