Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: add support for dynamic ftrace

Signed-off-by: Mike Frysinger <vapier@gentoo.org>

+182 -10
+2
arch/blackfin/Kconfig
··· 25 25 def_bool y 26 26 select HAVE_ARCH_KGDB 27 27 select HAVE_ARCH_TRACEHOOK 28 + select HAVE_DYNAMIC_FTRACE 29 + select HAVE_FTRACE_MCOUNT_RECORD 28 30 select HAVE_FUNCTION_GRAPH_TRACER 29 31 select HAVE_FUNCTION_TRACER 30 32 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+16
arch/blackfin/include/asm/ftrace.h
··· 12 12 13 13 #ifndef __ASSEMBLY__ 14 14 15 + #ifdef CONFIG_DYNAMIC_FTRACE 16 + 17 + extern void _mcount(void); 18 + #define MCOUNT_ADDR ((unsigned long)_mcount) 19 + 20 + static inline unsigned long ftrace_call_adjust(unsigned long addr) 21 + { 22 + return addr; 23 + } 24 + 25 + struct dyn_arch_ftrace { 26 + /* No extra data needed for Blackfin */ 27 + }; 28 + 29 + #endif 30 + 15 31 #ifdef CONFIG_FRAME_POINTER 16 32 #include <linux/mm.h> 17 33
+1
arch/blackfin/kernel/Makefile
··· 16 16 obj-y += time.o 17 17 endif 18 18 19 + obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 19 20 obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o 20 21 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 21 22 CFLAGS_REMOVE_ftrace.o = -pg
+78 -9
arch/blackfin/kernel/ftrace-entry.S
··· 10 10 11 11 .text 12 12 13 + #ifdef CONFIG_DYNAMIC_FTRACE 14 + 15 + /* Simple stub so we can boot the kernel until runtime patching has 16 + * disabled all calls to this. Then it'll be unused. 17 + */ 18 + ENTRY(__mcount) 19 + # if ANOMALY_05000371 20 + nop; nop; nop; nop; 21 + # endif 22 + rts; 23 + ENDPROC(__mcount) 24 + 13 25 /* GCC will have called us before setting up the function prologue, so we 14 26 * can clobber the normal scratch registers, but we need to make sure to 15 27 * save/restore the registers used for argument passing (R0-R2) in case ··· 32 20 * function. And since GCC pushed the previous RETS for us, the previous 33 21 * function will be waiting there. mmmm pie. 34 22 */ 35 - ENTRY(__mcount) 36 - #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 23 + ENTRY(_ftrace_caller) 24 + # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 37 25 /* optional micro optimization: return if stopped */ 38 26 p1.l = _function_trace_stop; 39 27 p1.h = _function_trace_stop; 40 28 r3 = [p1]; 41 29 cc = r3 == 0; 42 30 if ! cc jump _ftrace_stub (bp); 43 - #endif 31 + # endif 32 + 33 + /* save first/second/third function arg and the return register */ 34 + [--sp] = r2; 35 + [--sp] = r0; 36 + [--sp] = r1; 37 + [--sp] = rets; 38 + 39 + /* function_trace_call(unsigned long ip, unsigned long parent_ip): 40 + * ip: this point was called by ... 41 + * parent_ip: ... this function 42 + * the ip itself will need adjusting for the mcount call 43 + */ 44 + r0 = rets; 45 + r1 = [sp + 16]; /* skip the 4 local regs on stack */ 46 + r0 += -MCOUNT_INSN_SIZE; 47 + 48 + .globl _ftrace_call 49 + _ftrace_call: 50 + call _ftrace_stub 51 + 52 + # ifdef CONFIG_FUNCTION_GRAPH_TRACER 53 + .globl _ftrace_graph_call 54 + _ftrace_graph_call: 55 + nop; /* jump _ftrace_graph_caller; */ 56 + # endif 57 + 58 + /* restore state and get out of dodge */ 59 + .Lfinish_trace: 60 + rets = [sp++]; 61 + r1 = [sp++]; 62 + r0 = [sp++]; 63 + r2 = [sp++]; 64 + 65 + .globl _ftrace_stub 66 + _ftrace_stub: 67 + rts; 68 + ENDPROC(_ftrace_caller) 69 + 70 + #else 71 + 72 + /* See documentation for _ftrace_caller */ 73 + ENTRY(__mcount) 74 + # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST 75 + /* optional micro optimization: return if stopped */ 76 + p1.l = _function_trace_stop; 77 + p1.h = _function_trace_stop; 78 + r3 = [p1]; 79 + cc = r3 == 0; 80 + if ! cc jump _ftrace_stub (bp); 81 + # endif 44 82 45 83 /* save third function arg early so we can do testing below */ 46 84 [--sp] = r2; ··· 106 44 cc = r2 == r3; 107 45 if ! cc jump .Ldo_trace; 108 46 109 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 47 + # ifdef CONFIG_FUNCTION_GRAPH_TRACER 110 48 /* if the ftrace_graph_return function pointer is not set to 111 49 * the ftrace_stub entry, call prepare_ftrace_return(). 112 50 */ ··· 126 64 r3 = [p0]; 127 65 cc = r2 == r3; 128 66 if ! cc jump _ftrace_graph_caller; 129 - #endif 67 + # endif 130 68 131 69 r2 = [sp++]; 132 70 rts; ··· 165 103 rts; 166 104 ENDPROC(__mcount) 167 105 106 + #endif 107 + 168 108 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 169 109 /* The prepare_ftrace_return() function is similar to the trace function 170 110 * except it takes a pointer to the location of the frompc. This is so ··· 174 110 * purposes. 175 111 */ 176 112 ENTRY(_ftrace_graph_caller) 113 + # ifndef CONFIG_DYNAMIC_FTRACE 177 114 /* save first/second function arg and the return register */ 178 115 [--sp] = r0; 179 116 [--sp] = r1; ··· 183 118 /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ 184 119 r0 = sp; /* unsigned long *parent */ 185 120 r1 = rets; /* unsigned long self_addr */ 186 - #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 121 + # else 122 + r0 = sp; /* unsigned long *parent */ 123 + r1 = [sp]; /* unsigned long self_addr */ 124 + # endif 125 + # ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 187 126 r2 = fp; /* unsigned long frame_pointer */ 188 - #endif 127 + # endif 189 128 r0 += 16; /* skip the 4 local regs on stack */ 190 129 r1 += -MCOUNT_INSN_SIZE; 191 130 call _prepare_ftrace_return; ··· 208 139 [--sp] = r1; 209 140 210 141 /* get original return address */ 211 - #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 142 + # ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST 212 143 r0 = fp; /* Blackfin is sane, so omit this */ 213 - #endif 144 + # endif 214 145 call _ftrace_return_to_handler; 215 146 rets = r0; 216 147
+85 -1
arch/blackfin/kernel/ftrace.c
··· 1 1 /* 2 2 * ftrace graph code 3 3 * 4 - * Copyright (C) 2009 Analog Devices Inc. 4 + * Copyright (C) 2009-2010 Analog Devices Inc. 5 5 * Licensed under the GPL-2 or later. 6 6 */ 7 7 8 8 #include <linux/ftrace.h> 9 9 #include <linux/kernel.h> 10 10 #include <linux/sched.h> 11 + #include <linux/uaccess.h> 11 12 #include <asm/atomic.h> 13 + #include <asm/cacheflush.h> 14 + 15 + #ifdef CONFIG_DYNAMIC_FTRACE 16 + 17 + static const unsigned char mnop[] = { 18 + 0x03, 0xc0, 0x00, 0x18, /* MNOP; */ 19 + 0x03, 0xc0, 0x00, 0x18, /* MNOP; */ 20 + }; 21 + 22 + static void bfin_make_pcrel24(unsigned char *insn, unsigned long src, 23 + unsigned long dst) 24 + { 25 + uint32_t pcrel = (dst - src) >> 1; 26 + insn[0] = pcrel >> 16; 27 + insn[1] = 0xe3; 28 + insn[2] = pcrel; 29 + insn[3] = pcrel >> 8; 30 + } 31 + #define bfin_make_pcrel24(insn, src, dst) bfin_make_pcrel24(insn, src, (unsigned long)(dst)) 32 + 33 + static int ftrace_modify_code(unsigned long ip, const unsigned char *code, 34 + unsigned long len) 35 + { 36 + int ret = probe_kernel_write((void *)ip, (void *)code, len); 37 + flush_icache_range(ip, ip + len); 38 + return ret; 39 + } 40 + 41 + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 42 + unsigned long addr) 43 + { 44 + /* Turn the mcount call site into two MNOPs as those are 32bit insns */ 45 + return ftrace_modify_code(rec->ip, mnop, sizeof(mnop)); 46 + } 47 + 48 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 49 + { 50 + /* Restore the mcount call site */ 51 + unsigned char call[8]; 52 + call[0] = 0x67; /* [--SP] = RETS; */ 53 + call[1] = 0x01; 54 + bfin_make_pcrel24(&call[2], rec->ip + 2, addr); 55 + call[6] = 0x27; /* RETS = [SP++]; */ 56 + call[7] = 0x01; 57 + return ftrace_modify_code(rec->ip, call, sizeof(call)); 58 + } 59 + 60 + int ftrace_update_ftrace_func(ftrace_func_t func) 61 + { 62 + unsigned char call[4]; 63 + unsigned long ip = (unsigned long)&ftrace_call; 64 + bfin_make_pcrel24(call, ip, func); 65 + return ftrace_modify_code(ip, call, sizeof(call)); 66 + } 67 + 68 + int __init ftrace_dyn_arch_init(void *data) 69 + { 70 + /* return value is done indirectly via data */ 71 + *(unsigned long *)data = 0; 72 + 73 + return 0; 74 + } 75 + 76 + #endif 12 77 13 78 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 79 + 80 + # ifdef CONFIG_DYNAMIC_FTRACE 81 + 82 + extern void ftrace_graph_call(void); 83 + 84 + int ftrace_enable_ftrace_graph_caller(void) 85 + { 86 + unsigned long ip = (unsigned long)&ftrace_graph_call; 87 + uint16_t jump_pcrel12 = ((unsigned long)&ftrace_graph_caller - ip) >> 1; 88 + jump_pcrel12 |= 0x2000; 89 + return ftrace_modify_code(ip, (void *)&jump_pcrel12, sizeof(jump_pcrel12)); 90 + } 91 + 92 + int ftrace_disable_ftrace_graph_caller(void) 93 + { 94 + return ftrace_modify_code((unsigned long)&ftrace_graph_call, empty_zero_page, 2); 95 + } 96 + 97 + # endif 14 98 15 99 /* 16 100 * Hook the return address and push it in the stack of return addrs