Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ftrace' of git://github.com/rabinv/linux-2.6 into devel-stable

+284 -88
+1
arch/arm/Kconfig
··· 14 14 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 15 15 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) 16 16 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) 17 + select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL) 17 18 select HAVE_GENERIC_DMA_COHERENT 18 19 select HAVE_KERNEL_GZIP 19 20 select HAVE_KERNEL_LZO
+1 -1
arch/arm/Kconfig.debug
··· 23 23 config FRAME_POINTER 24 24 bool 25 25 depends on !THUMB2_KERNEL 26 - default y if !ARM_UNWIND 26 + default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER 27 27 help 28 28 If you say N here, the resulting kernel will be slightly smaller and 29 29 faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
+5
arch/arm/include/asm/system.h
··· 63 63 #include <asm/outercache.h> 64 64 65 65 #define __exception __attribute__((section(".exception.text"))) 66 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 67 + #define __exception_irq_entry __irq_entry 68 + #else 69 + #define __exception_irq_entry __exception 70 + #endif 66 71 67 72 struct thread_info; 68 73 struct task_struct;
+21 -2
arch/arm/include/asm/traps.h
··· 15 15 void register_undef_hook(struct undef_hook *hook); 16 16 void unregister_undef_hook(struct undef_hook *hook); 17 17 18 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 19 + static inline int __in_irqentry_text(unsigned long ptr) 20 + { 21 + extern char __irqentry_text_start[]; 22 + extern char __irqentry_text_end[]; 23 + 24 + return ptr >= (unsigned long)&__irqentry_text_start && 25 + ptr < (unsigned long)&__irqentry_text_end; 26 + } 27 + #else 28 + static inline int __in_irqentry_text(unsigned long ptr) 29 + { 30 + return 0; 31 + } 32 + #endif 33 + 18 34 static inline int in_exception_text(unsigned long ptr) 19 35 { 20 36 extern char __exception_text_start[]; 21 37 extern char __exception_text_end[]; 38 + int in; 22 39 23 - return ptr >= (unsigned long)&__exception_text_start && 24 - ptr < (unsigned long)&__exception_text_end; 40 + in = ptr >= (unsigned long)&__exception_text_start && 41 + ptr < (unsigned long)&__exception_text_end; 42 + 43 + return in ? : __in_irqentry_text(ptr); 25 44 } 26 45 27 46 extern void __init early_trap_init(void);
+2 -1
arch/arm/kernel/Makefile
··· 5 5 CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) 6 6 AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) 7 7 8 - ifdef CONFIG_DYNAMIC_FTRACE 8 + ifdef CONFIG_FUNCTION_TRACER 9 9 CFLAGS_REMOVE_ftrace.o = -pg 10 10 endif 11 11 ··· 33 33 obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 34 34 obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 35 35 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 36 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 36 37 obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 37 38 obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o 38 39 obj-$(CONFIG_ATAGS_PROC) += atags.o
+148 -76
arch/arm/kernel/entry-common.S
··· 141 141 #endif 142 142 #endif 143 143 144 - #ifdef CONFIG_DYNAMIC_FTRACE 145 - ENTRY(__gnu_mcount_nc) 146 - mov ip, lr 147 - ldmia sp!, {lr} 148 - mov pc, ip 149 - ENDPROC(__gnu_mcount_nc) 150 - 151 - ENTRY(ftrace_caller) 152 - stmdb sp!, {r0-r3, lr} 153 - mov r0, lr 154 - sub r0, r0, #MCOUNT_INSN_SIZE 155 - ldr r1, [sp, #20] 156 - 157 - .global ftrace_call 158 - ftrace_call: 159 - bl ftrace_stub 160 - ldmia sp!, {r0-r3, ip, lr} 161 - mov pc, ip 162 - ENDPROC(ftrace_caller) 163 - 164 - #ifdef CONFIG_OLD_MCOUNT 165 - ENTRY(mcount) 166 - stmdb sp!, {lr} 167 - ldr lr, [fp, #-4] 168 - ldmia sp!, {pc} 169 - ENDPROC(mcount) 170 - 171 - ENTRY(ftrace_caller_old) 172 - stmdb sp!, {r0-r3, lr} 173 - ldr r1, [fp, #-4] 174 - mov r0, lr 175 - sub r0, r0, #MCOUNT_INSN_SIZE 176 - 177 - .globl ftrace_call_old 178 - ftrace_call_old: 179 - bl ftrace_stub 180 - ldr lr, [fp, #-4] @ restore lr 181 - ldmia sp!, {r0-r3, pc} 182 - ENDPROC(ftrace_caller_old) 183 - #endif 184 - 185 - #else 186 - 187 - ENTRY(__gnu_mcount_nc) 188 - stmdb sp!, {r0-r3, lr} 144 + .macro __mcount suffix 145 + mcount_enter 189 146 ldr r0, =ftrace_trace_function 190 147 ldr r2, [r0] 191 148 adr r0, .Lftrace_stub 192 149 cmp r0, r2 193 - bne gnu_trace 194 - ldmia sp!, {r0-r3, ip, lr} 195 - mov pc, ip 150 + bne 1f 196 151 197 - gnu_trace: 198 - ldr r1, [sp, #20] @ lr of instrumented routine 199 - mov r0, lr 152 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 153 + ldr r1, =ftrace_graph_return 154 + ldr r2, [r1] 155 + cmp r0, r2 156 + bne ftrace_graph_caller\suffix 157 + 158 + ldr r1, =ftrace_graph_entry 159 + ldr r2, [r1] 160 + ldr r0, =ftrace_graph_entry_stub 161 + cmp r0, r2 162 + bne ftrace_graph_caller\suffix 163 + #endif 164 + 165 + mcount_exit 166 + 167 + 1: mcount_get_lr r1 @ lr of instrumented func 168 + mov r0, lr @ instrumented function 200 169 sub r0, r0, #MCOUNT_INSN_SIZE 201 - adr lr, BSYM(1f) 170 + adr lr, BSYM(2f) 202 171 mov pc, r2 203 - 1: 204 - ldmia sp!, {r0-r3, ip, lr} 205 - mov pc, ip 206 - ENDPROC(__gnu_mcount_nc) 172 + 2: mcount_exit 173 + .endm 174 + 175 + .macro __ftrace_caller suffix 176 + mcount_enter 177 + 178 + mcount_get_lr r1 @ lr of instrumented func 179 + mov r0, lr @ instrumented function 180 + sub r0, r0, #MCOUNT_INSN_SIZE 181 + 182 + .globl ftrace_call\suffix 183 + ftrace_call\suffix: 184 + bl ftrace_stub 185 + 186 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 187 + .globl ftrace_graph_call\suffix 188 + ftrace_graph_call\suffix: 189 + mov r0, r0 190 + #endif 191 + 192 + mcount_exit 193 + .endm 194 + 195 + .macro __ftrace_graph_caller 196 + sub r0, fp, #4 @ &lr of instrumented routine (&parent) 197 + #ifdef CONFIG_DYNAMIC_FTRACE 198 + @ called from __ftrace_caller, saved in mcount_enter 199 + ldr r1, [sp, #16] @ instrumented routine (func) 200 + #else 201 + @ called from __mcount, untouched in lr 202 + mov r1, lr @ instrumented routine (func) 203 + #endif 204 + sub r1, r1, #MCOUNT_INSN_SIZE 205 + mov r2, fp @ frame pointer 206 + bl prepare_ftrace_return 207 + mcount_exit 208 + .endm 207 209 208 210 #ifdef CONFIG_OLD_MCOUNT 209 211 /* 210 - * This is under an ifdef in order to force link-time errors for people trying 211 - * to build with !FRAME_POINTER with a GCC which doesn't use the new-style 212 - * mcount. 212 + * mcount 213 213 */ 214 - ENTRY(mcount) 215 - stmdb sp!, {r0-r3, lr} 216 - ldr r0, =ftrace_trace_function 217 - ldr r2, [r0] 218 - adr r0, ftrace_stub 219 - cmp r0, r2 220 - bne trace 221 - ldr lr, [fp, #-4] @ restore lr 222 - ldmia sp!, {r0-r3, pc} 223 214 224 - trace: 225 - ldr r1, [fp, #-4] @ lr of instrumented routine 226 - mov r0, lr 227 - sub r0, r0, #MCOUNT_INSN_SIZE 228 - mov lr, pc 229 - mov pc, r2 230 - ldr lr, [fp, #-4] @ restore lr 215 + .macro mcount_enter 216 + stmdb sp!, {r0-r3, lr} 217 + .endm 218 + 219 + .macro mcount_get_lr reg 220 + ldr \reg, [fp, #-4] 221 + .endm 222 + 223 + .macro mcount_exit 224 + ldr lr, [fp, #-4] 231 225 ldmia sp!, {r0-r3, pc} 226 + .endm 227 + 228 + ENTRY(mcount) 229 + #ifdef CONFIG_DYNAMIC_FTRACE 230 + stmdb sp!, {lr} 231 + ldr lr, [fp, #-4] 232 + ldmia sp!, {pc} 233 + #else 234 + __mcount _old 235 + #endif 232 236 ENDPROC(mcount) 237 + 238 + #ifdef CONFIG_DYNAMIC_FTRACE 239 + ENTRY(ftrace_caller_old) 240 + __ftrace_caller _old 241 + ENDPROC(ftrace_caller_old) 233 242 #endif 234 243 235 - #endif /* CONFIG_DYNAMIC_FTRACE */ 244 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 245 + ENTRY(ftrace_graph_caller_old) 246 + __ftrace_graph_caller 247 + ENDPROC(ftrace_graph_caller_old) 248 + #endif 249 + 250 + .purgem mcount_enter 251 + .purgem mcount_get_lr 252 + .purgem mcount_exit 253 + #endif 254 + 255 + /* 256 + * __gnu_mcount_nc 257 + */ 258 + 259 + .macro mcount_enter 260 + stmdb sp!, {r0-r3, lr} 261 + .endm 262 + 263 + .macro mcount_get_lr reg 264 + ldr \reg, [sp, #20] 265 + .endm 266 + 267 + .macro mcount_exit 268 + ldmia sp!, {r0-r3, ip, lr} 269 + mov pc, ip 270 + .endm 271 + 272 + ENTRY(__gnu_mcount_nc) 273 + #ifdef CONFIG_DYNAMIC_FTRACE 274 + mov ip, lr 275 + ldmia sp!, {lr} 276 + mov pc, ip 277 + #else 278 + __mcount 279 + #endif 280 + ENDPROC(__gnu_mcount_nc) 281 + 282 + #ifdef CONFIG_DYNAMIC_FTRACE 283 + ENTRY(ftrace_caller) 284 + __ftrace_caller 285 + ENDPROC(ftrace_caller) 286 + #endif 287 + 288 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 289 + ENTRY(ftrace_graph_caller) 290 + __ftrace_graph_caller 291 + ENDPROC(ftrace_graph_caller) 292 + #endif 293 + 294 + .purgem mcount_enter 295 + .purgem mcount_get_lr 296 + .purgem mcount_exit 297 + 298 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 299 + .globl return_to_handler 300 + return_to_handler: 301 + stmdb sp!, {r0-r3} 302 + mov r0, fp @ frame pointer 303 + bl ftrace_return_to_handler 304 + mov lr, r0 @ r0 has real ret addr 305 + ldmia sp!, {r0-r3} 306 + mov pc, lr 307 + #endif 236 308 237 309 ENTRY(ftrace_stub) 238 310 .Lftrace_stub:
+98 -5
arch/arm/kernel/ftrace.c
··· 24 24 #define NOP 0xe8bd4000 /* pop {lr} */ 25 25 #endif 26 26 27 + #ifdef CONFIG_DYNAMIC_FTRACE 27 28 #ifdef CONFIG_OLD_MCOUNT 28 29 #define OLD_MCOUNT_ADDR ((unsigned long) mcount) 29 30 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) ··· 60 59 } 61 60 #endif 62 61 63 - /* construct a branch (BL) instruction to addr */ 64 62 #ifdef CONFIG_THUMB2_KERNEL 65 - static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 63 + static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, 64 + bool link) 66 65 { 67 66 unsigned long s, j1, j2, i1, i2, imm10, imm11; 68 67 unsigned long first, second; ··· 84 83 j2 = (!i2) ^ s; 85 84 86 85 first = 0xf000 | (s << 10) | imm10; 87 - second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11; 86 + second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; 87 + if (link) 88 + second |= 1 << 14; 88 89 89 90 return (second << 16) | first; 90 91 } 91 92 #else 92 - static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 93 + static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, 94 + bool link) 93 95 { 96 + unsigned long opcode = 0xea000000; 94 97 long offset; 98 + 99 + if (link) 100 + opcode |= 1 << 24; 95 101 96 102 offset = (long)addr - (long)(pc + 8); 97 103 if (unlikely(offset < -33554432 || offset > 33554428)) { ··· 111 103 112 104 offset = (offset >> 2) & 0x00ffffff; 113 105 114 - return 0xeb000000 | offset; 106 + return opcode | offset; 115 107 } 116 108 #endif 109 + 110 + static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 111 + { 112 + return ftrace_gen_branch(pc, addr, true); 113 + } 117 114 118 115 static int ftrace_modify_code(unsigned long pc, unsigned long old, 119 116 unsigned long new) ··· 206 193 207 194 return 0; 208 195 } 196 + #endif /* CONFIG_DYNAMIC_FTRACE */ 197 + 198 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 199 + void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 200 + unsigned long frame_pointer) 201 + { 202 + unsigned long return_hooker = (unsigned long) &return_to_handler; 203 + struct ftrace_graph_ent trace; 204 + unsigned long old; 205 + int err; 206 + 207 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 208 + return; 209 + 210 + old = *parent; 211 + *parent = return_hooker; 212 + 213 + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 214 + frame_pointer); 215 + if (err == -EBUSY) { 216 + *parent = old; 217 + return; 218 + } 219 + 220 + trace.func = self_addr; 221 + 222 + /* Only trace if the calling function expects to */ 223 + if (!ftrace_graph_entry(&trace)) { 224 + current->curr_ret_stack--; 225 + *parent = old; 226 + } 227 + } 228 + 229 + #ifdef CONFIG_DYNAMIC_FTRACE 230 + extern unsigned long ftrace_graph_call; 231 + extern unsigned long ftrace_graph_call_old; 232 + extern void ftrace_graph_caller_old(void); 233 + 234 + static int __ftrace_modify_caller(unsigned long *callsite, 235 + void (*func) (void), bool enable) 236 + { 237 + unsigned long caller_fn = (unsigned long) func; 238 + unsigned long pc = (unsigned long) callsite; 239 + unsigned long branch = ftrace_gen_branch(pc, caller_fn, false); 240 + unsigned long nop = 0xe1a00000; /* mov r0, r0 */ 241 + unsigned long old = enable ? nop : branch; 242 + unsigned long new = enable ? branch : nop; 243 + 244 + return ftrace_modify_code(pc, old, new); 245 + } 246 + 247 + static int ftrace_modify_graph_caller(bool enable) 248 + { 249 + int ret; 250 + 251 + ret = __ftrace_modify_caller(&ftrace_graph_call, 252 + ftrace_graph_caller, 253 + enable); 254 + 255 + #ifdef CONFIG_OLD_MCOUNT 256 + if (!ret) 257 + ret = __ftrace_modify_caller(&ftrace_graph_call_old, 258 + ftrace_graph_caller_old, 259 + enable); 260 + #endif 261 + 262 + return ret; 263 + } 264 + 265 + int ftrace_enable_ftrace_graph_caller(void) 266 + { 267 + return ftrace_modify_graph_caller(true); 268 + } 269 + 270 + int ftrace_disable_ftrace_graph_caller(void) 271 + { 272 + return ftrace_modify_graph_caller(false); 273 + } 274 + #endif /* CONFIG_DYNAMIC_FTRACE */ 275 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+3 -1
arch/arm/kernel/irq.c
··· 35 35 #include <linux/list.h> 36 36 #include <linux/kallsyms.h> 37 37 #include <linux/proc_fs.h> 38 + #include <linux/ftrace.h> 38 39 39 40 #include <asm/system.h> 40 41 #include <asm/mach/irq.h> ··· 106 105 * come via this function. Instead, they should provide their 107 106 * own 'handler' 108 107 */ 109 - asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) 108 + asmlinkage void __exception_irq_entry 109 + asm_do_IRQ(unsigned int irq, struct pt_regs *regs) 110 110 { 111 111 struct pt_regs *old_regs = set_irq_regs(regs); 112 112
+3 -2
arch/arm/kernel/smp.c
··· 16 16 #include <linux/cache.h> 17 17 #include <linux/profile.h> 18 18 #include <linux/errno.h> 19 + #include <linux/ftrace.h> 19 20 #include <linux/mm.h> 20 21 #include <linux/err.h> 21 22 #include <linux/cpu.h> ··· 458 457 } 459 458 460 459 #ifdef CONFIG_LOCAL_TIMERS 461 - asmlinkage void __exception do_local_timer(struct pt_regs *regs) 460 + asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) 462 461 { 463 462 struct pt_regs *old_regs = set_irq_regs(regs); 464 463 int cpu = smp_processor_id(); ··· 545 544 * 546 545 * Bit 0 - Inter-processor function call 547 546 */ 548 - asmlinkage void __exception do_IPI(struct pt_regs *regs) 547 + asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs) 549 548 { 550 549 unsigned int cpu = smp_processor_id(); 551 550 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+1
arch/arm/kernel/vmlinux.lds.S
··· 101 101 __exception_text_start = .; 102 102 *(.exception.text) 103 103 __exception_text_end = .; 104 + IRQENTRY_TEXT 104 105 TEXT_TEXT 105 106 SCHED_TEXT 106 107 LOCK_TEXT
+1
arch/arm/plat-versatile/sched-clock.c
··· 20 20 */ 21 21 #include <linux/cnt32_to_63.h> 22 22 #include <linux/io.h> 23 + #include <linux/sched.h> 23 24 #include <asm/div64.h> 24 25 25 26 #include <mach/hardware.h>