Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tile: support ftrace on tilegx

This commit adds support for static ftrace, graph function support,
and dynamic tracer support.

Signed-off-by: Tony Lu <zlu@tilera.com>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

authored by

Tony Lu and committed by
Chris Metcalf
a61fd5e3 9ae09838

+514 -2
+6 -1
arch/tile/Kconfig
··· 119 119 def_bool y 120 120 121 121 config TILEGX 122 - bool "Building with TILE-Gx (64-bit) compiler and toolchain" 122 + bool "Building for TILE-Gx (64-bit) processor" 123 + select HAVE_FUNCTION_TRACER 124 + select HAVE_FUNCTION_TRACE_MCOUNT_TEST 125 + select HAVE_FUNCTION_GRAPH_TRACER 126 + select HAVE_DYNAMIC_FTRACE 127 + select HAVE_FTRACE_MCOUNT_RECORD 123 128 124 129 config TILEPRO 125 130 def_bool !TILEGX
+21 -1
arch/tile/include/asm/ftrace.h
··· 15 15 #ifndef _ASM_TILE_FTRACE_H 16 16 #define _ASM_TILE_FTRACE_H 17 17 18 - /* empty */ 18 + #ifdef CONFIG_FUNCTION_TRACER 19 + 20 + #define MCOUNT_ADDR ((unsigned long)(__mcount)) 21 + #define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */ 22 + 23 + #ifndef __ASSEMBLY__ 24 + extern void __mcount(void); 25 + 26 + #ifdef CONFIG_DYNAMIC_FTRACE 27 + static inline unsigned long ftrace_call_adjust(unsigned long addr) 28 + { 29 + return addr; 30 + } 31 + 32 + struct dyn_arch_ftrace { 33 + }; 34 + #endif /* CONFIG_DYNAMIC_FTRACE */ 35 + 36 + #endif /* __ASSEMBLY__ */ 37 + 38 + #endif /* CONFIG_FUNCTION_TRACER */ 19 39 20 40 #endif /* _ASM_TILE_FTRACE_H */
+6
arch/tile/kernel/Makefile
··· 9 9 sysfs.o time.o traps.o unaligned.o vdso.o \ 10 10 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o 11 11 12 + ifdef CONFIG_FUNCTION_TRACER 13 + CFLAGS_REMOVE_ftrace.o = -pg 14 + CFLAGS_REMOVE_early_printk.o = -pg 15 + endif 16 + 12 17 obj-$(CONFIG_HARDWALL) += hardwall.o 13 18 obj-$(CONFIG_COMPAT) += compat.o compat_signal.o 14 19 obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o ··· 27 22 endif 28 23 obj-$(CONFIG_TILE_USB) += usb.o 29 24 obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o 25 + obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o 30 26 31 27 obj-y += vdso/
+246
arch/tile/kernel/ftrace.c
··· 1 + /* 2 + * Copyright 2012 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + * 14 + * TILE-Gx specific ftrace support 15 + */ 16 + 17 + #include <linux/ftrace.h> 18 + #include <linux/uaccess.h> 19 + 20 + #include <asm/cacheflush.h> 21 + #include <asm/ftrace.h> 22 + #include <asm/sections.h> 23 + 24 + #include <arch/opcode.h> 25 + 26 + #ifdef CONFIG_DYNAMIC_FTRACE 27 + 28 + static inline tilegx_bundle_bits NOP(void) 29 + { 30 + return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) | 31 + create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) | 32 + create_Opcode_X0(RRR_0_OPCODE_X0) | 33 + create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) | 34 + create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) | 35 + create_Opcode_X1(RRR_0_OPCODE_X1); 36 + } 37 + 38 + static int machine_stopped __read_mostly; 39 + 40 + int ftrace_arch_code_modify_prepare(void) 41 + { 42 + machine_stopped = 1; 43 + return 0; 44 + } 45 + 46 + int ftrace_arch_code_modify_post_process(void) 47 + { 48 + flush_icache_range(0, CHIP_L1I_CACHE_SIZE()); 49 + machine_stopped = 0; 50 + return 0; 51 + } 52 + 53 + /* 54 + * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic 55 + * tracer just add one cycle overhead to every kernel function when disabled. 56 + */ 57 + static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, 58 + bool link) 59 + { 60 + tilegx_bundle_bits opcode_x0, opcode_x1; 61 + long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES; 62 + 63 + if (link) { 64 + /* opcode: jal addr */ 65 + opcode_x1 = 66 + create_Opcode_X1(JUMP_OPCODE_X1) | 67 + create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) | 68 + create_JumpOff_X1(pcrel_by_instr); 69 + } else { 70 + /* opcode: j addr */ 71 + opcode_x1 = 72 + create_Opcode_X1(JUMP_OPCODE_X1) | 73 + create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) | 74 + create_JumpOff_X1(pcrel_by_instr); 75 + } 76 + 77 + if (addr == FTRACE_ADDR) { 78 + /* opcode: or r10, lr, zero */ 79 + opcode_x0 = 80 + create_Dest_X0(10) | 81 + create_SrcA_X0(TREG_LR) | 82 + create_SrcB_X0(TREG_ZERO) | 83 + create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) | 84 + create_Opcode_X0(RRR_0_OPCODE_X0); 85 + } else { 86 + /* opcode: fnop */ 87 + opcode_x0 = 88 + create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) | 89 + create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) | 90 + create_Opcode_X0(RRR_0_OPCODE_X0); 91 + } 92 + 93 + return opcode_x1 | opcode_x0; 94 + } 95 + 96 + static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 97 + { 98 + return NOP(); 99 + } 100 + 101 + static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 102 + { 103 + return ftrace_gen_branch(pc, addr, true); 104 + } 105 + 106 + static int ftrace_modify_code(unsigned long pc, unsigned long old, 107 + unsigned long new) 108 + { 109 + unsigned long pc_wr; 110 + 111 + /* Check if the address is in kernel text space and module space. */ 112 + if (!kernel_text_address(pc)) 113 + return -EINVAL; 114 + 115 + /* Operate on writable kernel text mapping. */ 116 + pc_wr = pc - MEM_SV_START + PAGE_OFFSET; 117 + 118 + if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE)) 119 + return -EPERM; 120 + 121 + smp_wmb(); 122 + 123 + if (!machine_stopped && num_online_cpus() > 1) 124 + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); 125 + 126 + return 0; 127 + } 128 + 129 + int ftrace_update_ftrace_func(ftrace_func_t func) 130 + { 131 + unsigned long pc, old; 132 + unsigned long new; 133 + int ret; 134 + 135 + pc = (unsigned long)&ftrace_call; 136 + memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); 137 + new = ftrace_call_replace(pc, (unsigned long)func); 138 + 139 + ret = ftrace_modify_code(pc, old, new); 140 + 141 + return ret; 142 + } 143 + 144 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 145 + { 146 + unsigned long new, old; 147 + unsigned long ip = rec->ip; 148 + 149 + old = ftrace_nop_replace(rec); 150 + new = ftrace_call_replace(ip, addr); 151 + 152 + return ftrace_modify_code(rec->ip, old, new); 153 + } 154 + 155 + int ftrace_make_nop(struct module *mod, 156 + struct dyn_ftrace *rec, unsigned long addr) 157 + { 158 + unsigned long ip = rec->ip; 159 + unsigned long old; 160 + unsigned long new; 161 + int ret; 162 + 163 + old = ftrace_call_replace(ip, addr); 164 + new = ftrace_nop_replace(rec); 165 + ret = ftrace_modify_code(ip, old, new); 166 + 167 + return ret; 168 + } 169 + 170 + int __init ftrace_dyn_arch_init(void *data) 171 + { 172 + *(unsigned long *)data = 0; 173 + 174 + return 0; 175 + } 176 + #endif /* CONFIG_DYNAMIC_FTRACE */ 177 + 178 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 179 + void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 180 + unsigned long frame_pointer) 181 + { 182 + unsigned long return_hooker = (unsigned long) &return_to_handler; 183 + struct ftrace_graph_ent trace; 184 + unsigned long old; 185 + int err; 186 + 187 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 188 + return; 189 + 190 + old = *parent; 191 + *parent = return_hooker; 192 + 193 + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 194 + frame_pointer); 195 + if (err == -EBUSY) { 196 + *parent = old; 197 + return; 198 + } 199 + 200 + trace.func = self_addr; 201 + 202 + /* Only trace if the calling function expects to */ 203 + if (!ftrace_graph_entry(&trace)) { 204 + current->curr_ret_stack--; 205 + *parent = old; 206 + } 207 + } 208 + 209 + #ifdef CONFIG_DYNAMIC_FTRACE 210 + extern unsigned long ftrace_graph_call; 211 + 212 + static int __ftrace_modify_caller(unsigned long *callsite, 213 + void (*func) (void), bool enable) 214 + { 215 + unsigned long caller_fn = (unsigned long) func; 216 + unsigned long pc = (unsigned long) callsite; 217 + unsigned long branch = ftrace_gen_branch(pc, caller_fn, false); 218 + unsigned long nop = NOP(); 219 + unsigned long old = enable ? nop : branch; 220 + unsigned long new = enable ? branch : nop; 221 + 222 + return ftrace_modify_code(pc, old, new); 223 + } 224 + 225 + static int ftrace_modify_graph_caller(bool enable) 226 + { 227 + int ret; 228 + 229 + ret = __ftrace_modify_caller(&ftrace_graph_call, 230 + ftrace_graph_caller, 231 + enable); 232 + 233 + return ret; 234 + } 235 + 236 + int ftrace_enable_ftrace_graph_caller(void) 237 + { 238 + return ftrace_modify_graph_caller(true); 239 + } 240 + 241 + int ftrace_disable_ftrace_graph_caller(void) 242 + { 243 + return ftrace_modify_graph_caller(false); 244 + } 245 + #endif /* CONFIG_DYNAMIC_FTRACE */ 246 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+224
arch/tile/kernel/mcount_64.S
··· 1 + /* 2 + * Copyright 2012 Tilera Corporation. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 + * NON INFRINGEMENT. See the GNU General Public License for 12 + * more details. 13 + * 14 + * TILE-Gx specific __mcount support 15 + */ 16 + 17 + #include <linux/linkage.h> 18 + #include <asm/ftrace.h> 19 + 20 + #define REGSIZE 8 21 + 22 + .text 23 + .global __mcount 24 + 25 + .macro MCOUNT_SAVE_REGS 26 + addli sp, sp, -REGSIZE 27 + { 28 + st sp, lr 29 + addli r29, sp, - (12 * REGSIZE) 30 + } 31 + { 32 + addli sp, sp, - (13 * REGSIZE) 33 + st r29, sp 34 + } 35 + addli r29, r29, REGSIZE 36 + { st r29, r0; addli r29, r29, REGSIZE } 37 + { st r29, r1; addli r29, r29, REGSIZE } 38 + { st r29, r2; addli r29, r29, REGSIZE } 39 + { st r29, r3; addli r29, r29, REGSIZE } 40 + { st r29, r4; addli r29, r29, REGSIZE } 41 + { st r29, r5; addli r29, r29, REGSIZE } 42 + { st r29, r6; addli r29, r29, REGSIZE } 43 + { st r29, r7; addli r29, r29, REGSIZE } 44 + { st r29, r8; addli r29, r29, REGSIZE } 45 + { st r29, r9; addli r29, r29, REGSIZE } 46 + { st r29, r10; addli r29, r29, REGSIZE } 47 + .endm 48 + 49 + .macro MCOUNT_RESTORE_REGS 50 + addli r29, sp, (2 * REGSIZE) 51 + { ld r0, r29; addli r29, r29, REGSIZE } 52 + { ld r1, r29; addli r29, r29, REGSIZE } 53 + { ld r2, r29; addli r29, r29, REGSIZE } 54 + { ld r3, r29; addli r29, r29, REGSIZE } 55 + { ld r4, r29; addli r29, r29, REGSIZE } 56 + { ld r5, r29; addli r29, r29, REGSIZE } 57 + { ld r6, r29; addli r29, r29, REGSIZE } 58 + { ld r7, r29; addli r29, r29, REGSIZE } 59 + { ld r8, r29; addli r29, r29, REGSIZE } 60 + { ld r9, r29; addli r29, r29, REGSIZE } 61 + { ld r10, r29; addli lr, sp, (13 * REGSIZE) } 62 + { ld lr, lr; addli sp, sp, (14 * REGSIZE) } 63 + .endm 64 + 65 + .macro RETURN_BACK 66 + { move r12, lr; move lr, r10 } 67 + jrp r12 68 + .endm 69 + 70 + #ifdef CONFIG_DYNAMIC_FTRACE 71 + 72 + .align 64 73 + STD_ENTRY(__mcount) 74 + __mcount: 75 + j ftrace_stub 76 + STD_ENDPROC(__mcount) 77 + 78 + .align 64 79 + STD_ENTRY(ftrace_caller) 80 + moveli r11, hw2_last(function_trace_stop) 81 + { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } 82 + { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } 83 + ld r11, r11 84 + beqz r11, 1f 85 + jrp r12 86 + 87 + 1: 88 + { move r10, lr; move lr, r12 } 89 + MCOUNT_SAVE_REGS 90 + 91 + /* arg1: self return address */ 92 + /* arg2: parent's return address */ 93 + { move r0, lr; move r1, r10 } 94 + 95 + .global ftrace_call 96 + ftrace_call: 97 + /* 98 + * a placeholder for the call to a real tracing function, i.e. 99 + * ftrace_trace_function() 100 + */ 101 + nop 102 + 103 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 104 + .global ftrace_graph_call 105 + ftrace_graph_call: 106 + /* 107 + * a placeholder for the call to a real tracing function, i.e. 108 + * ftrace_graph_caller() 109 + */ 110 + nop 111 + #endif 112 + MCOUNT_RESTORE_REGS 113 + .global ftrace_stub 114 + ftrace_stub: 115 + RETURN_BACK 116 + STD_ENDPROC(ftrace_caller) 117 + 118 + #else /* ! CONFIG_DYNAMIC_FTRACE */ 119 + 120 + .align 64 121 + STD_ENTRY(__mcount) 122 + moveli r11, hw2_last(function_trace_stop) 123 + { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } 124 + { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } 125 + ld r11, r11 126 + beqz r11, 1f 127 + jrp r12 128 + 129 + 1: 130 + { move r10, lr; move lr, r12 } 131 + { 132 + moveli r11, hw2_last(ftrace_trace_function) 133 + moveli r13, hw2_last(ftrace_stub) 134 + } 135 + { 136 + shl16insli r11, r11, hw1(ftrace_trace_function) 137 + shl16insli r13, r13, hw1(ftrace_stub) 138 + } 139 + { 140 + shl16insli r11, r11, hw0(ftrace_trace_function) 141 + shl16insli r13, r13, hw0(ftrace_stub) 142 + } 143 + 144 + ld r11, r11 145 + sub r14, r13, r11 146 + bnez r14, static_trace 147 + 148 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 149 + moveli r15, hw2_last(ftrace_graph_return) 150 + shl16insli r15, r15, hw1(ftrace_graph_return) 151 + shl16insli r15, r15, hw0(ftrace_graph_return) 152 + ld r15, r15 153 + sub r15, r15, r13 154 + bnez r15, ftrace_graph_caller 155 + 156 + { 157 + moveli r16, hw2_last(ftrace_graph_entry) 158 + moveli r17, hw2_last(ftrace_graph_entry_stub) 159 + } 160 + { 161 + shl16insli r16, r16, hw1(ftrace_graph_entry) 162 + shl16insli r17, r17, hw1(ftrace_graph_entry_stub) 163 + } 164 + { 165 + shl16insli r16, r16, hw0(ftrace_graph_entry) 166 + shl16insli r17, r17, hw0(ftrace_graph_entry_stub) 167 + } 168 + ld r16, r16 169 + sub r17, r16, r17 170 + bnez r17, ftrace_graph_caller 171 + 172 + #endif 173 + RETURN_BACK 174 + 175 + static_trace: 176 + MCOUNT_SAVE_REGS 177 + 178 + /* arg1: self return address */ 179 + /* arg2: parent's return address */ 180 + { move r0, lr; move r1, r10 } 181 + 182 + /* call ftrace_trace_function() */ 183 + jalr r11 184 + 185 + MCOUNT_RESTORE_REGS 186 + 187 + .global ftrace_stub 188 + ftrace_stub: 189 + RETURN_BACK 190 + STD_ENDPROC(__mcount) 191 + 192 + #endif /* ! CONFIG_DYNAMIC_FTRACE */ 193 + 194 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 195 + 196 + STD_ENTRY(ftrace_graph_caller) 197 + ftrace_graph_caller: 198 + #ifndef CONFIG_DYNAMIC_FTRACE 199 + MCOUNT_SAVE_REGS 200 + #endif 201 + 202 + /* arg1: Get the location of the parent's return address */ 203 + addi r0, sp, 12 * REGSIZE 204 + /* arg2: Get self return address */ 205 + move r1, lr 206 + 207 + jal prepare_ftrace_return 208 + 209 + MCOUNT_RESTORE_REGS 210 + RETURN_BACK 211 + STD_ENDPROC(ftrace_graph_caller) 212 + 213 + .global return_to_handler 214 + return_to_handler: 215 + MCOUNT_SAVE_REGS 216 + 217 + jal ftrace_return_to_handler 218 + /* restore the real parent address */ 219 + move r11, r0 220 + 221 + MCOUNT_RESTORE_REGS 222 + jr r11 223 + 224 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+1
arch/tile/kernel/vmlinux.lds.S
··· 43 43 HEAD_TEXT 44 44 SCHED_TEXT 45 45 LOCK_TEXT 46 + IRQENTRY_TEXT 46 47 __fix_text_end = .; /* tile-cpack won't rearrange before this */ 47 48 TEXT_TEXT 48 49 *(.text.*)
+6
arch/tile/lib/exports.c
··· 33 33 /* arch/tile/kernel/head.S */ 34 34 EXPORT_SYMBOL(empty_zero_page); 35 35 36 + #ifdef CONFIG_FUNCTION_TRACER 37 + /* arch/tile/kernel/mcount_64.S */ 38 + #include <asm/ftrace.h> 39 + EXPORT_SYMBOL(__mcount); 40 + #endif /* CONFIG_FUNCTION_TRACER */ 41 + 36 42 /* arch/tile/lib/, various memcpy files */ 37 43 EXPORT_SYMBOL(memcpy); 38 44 EXPORT_SYMBOL(__copy_to_user_inatomic);
+4
scripts/recordmcount.pl
··· 364 364 } elsif ($arch eq "blackfin") { 365 365 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; 366 366 $mcount_adjust = -4; 367 + } elsif ($arch eq "tilegx") { 368 + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s__mcount\$"; 369 + $type = ".quad"; 370 + $alignment = 8; 367 371 } else { 368 372 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 369 373 }