Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits)
ftrace: fix current_tracer error return
tracing: fix a build error on alpha
ftrace: use a real variable for ftrace_nop in x86
tracing/ftrace: make boot tracer select the sched_switch tracer
tracepoint: check if the probe has been registered
asm-generic: define DIE_OOPS in asm-generic
trace: fix printk warning for u64
ftrace: warning in kernel/trace/ftrace.c
ftrace: fix build failure
ftrace, powerpc, sparc64, x86: remove notrace from arch ftrace file
ftrace: remove ftrace hash
ftrace: remove mcount set
ftrace: remove daemon
ftrace: disable dynamic ftrace for all archs that use daemon
ftrace: add ftrace warn on to disable ftrace
ftrace: only have ftrace_kill atomic
ftrace: use probe_kernel
ftrace: comment arch ftrace code
ftrace: return error on failed modified text.
ftrace: dynamic ftrace process only text section
...

+284 -716
+1 -1
Makefile
··· 536 536 KBUILD_AFLAGS += -gdwarf-2 537 537 endif 538 538 539 - ifdef CONFIG_FTRACE 539 + ifdef CONFIG_FUNCTION_TRACER 540 540 KBUILD_CFLAGS += -pg 541 541 endif 542 542
+1 -2
arch/arm/Kconfig
··· 16 16 select HAVE_ARCH_KGDB 17 17 select HAVE_KPROBES if (!XIP_KERNEL) 18 18 select HAVE_KRETPROBES if (HAVE_KPROBES) 19 - select HAVE_FTRACE if (!XIP_KERNEL) 20 - select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE) 19 + select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 21 20 select HAVE_GENERIC_DMA_COHERENT 22 21 help 23 22 The ARM series is a line of low-power-consumption RISC chip designs
+1 -1
arch/arm/boot/compressed/Makefile
··· 70 70 targets := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \ 71 71 head.o misc.o $(OBJS) 72 72 73 - ifeq ($(CONFIG_FTRACE),y) 73 + ifeq ($(CONFIG_FUNCTION_TRACER),y) 74 74 ORIG_CFLAGS := $(KBUILD_CFLAGS) 75 75 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 76 76 endif
+1 -1
arch/arm/include/asm/ftrace.h
··· 1 1 #ifndef _ASM_ARM_FTRACE 2 2 #define _ASM_ARM_FTRACE 3 3 4 - #ifdef CONFIG_FTRACE 4 + #ifdef CONFIG_FUNCTION_TRACER 5 5 #define MCOUNT_ADDR ((long)(mcount)) 6 6 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 7 7
+1 -1
arch/arm/kernel/armksyms.c
··· 183 183 184 184 EXPORT_SYMBOL(copy_page); 185 185 186 - #ifdef CONFIG_FTRACE 186 + #ifdef CONFIG_FUNCTION_TRACER 187 187 EXPORT_SYMBOL(mcount); 188 188 #endif
+2 -2
arch/arm/kernel/entry-common.S
··· 101 101 #undef CALL 102 102 #define CALL(x) .long x 103 103 104 - #ifdef CONFIG_FTRACE 104 + #ifdef CONFIG_FUNCTION_TRACER 105 105 #ifdef CONFIG_DYNAMIC_FTRACE 106 106 ENTRY(mcount) 107 107 stmdb sp!, {r0-r3, lr} ··· 149 149 ftrace_stub: 150 150 mov pc, lr 151 151 152 - #endif /* CONFIG_FTRACE */ 152 + #endif /* CONFIG_FUNCTION_TRACER */ 153 153 154 154 /*============================================================================= 155 155 * SWI handler
-13
arch/arm/kernel/ftrace.c
··· 95 95 return ret; 96 96 } 97 97 98 - int ftrace_mcount_set(unsigned long *data) 99 - { 100 - unsigned long pc, old; 101 - unsigned long *addr = data; 102 - unsigned char *new; 103 - 104 - pc = (unsigned long)&mcount_call; 105 - memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE); 106 - new = ftrace_call_replace(pc, *addr); 107 - *addr = ftrace_modify_code(pc, (unsigned char *)&old, new); 108 - return 0; 109 - } 110 - 111 98 /* run from kstop_machine */ 112 99 int __init ftrace_dyn_arch_init(void *data) 113 100 {
+1 -2
arch/powerpc/Kconfig
··· 108 108 config PPC 109 109 bool 110 110 default y 111 - select HAVE_DYNAMIC_FTRACE 112 - select HAVE_FTRACE 111 + select HAVE_FUNCTION_TRACER 113 112 select ARCH_WANT_OPTIONAL_GPIOLIB 114 113 select HAVE_IDE 115 114 select HAVE_IOREMAP_PROT
+1 -1
arch/powerpc/Makefile
··· 122 122 endif 123 123 124 124 # Work around a gcc code-gen bug with -fno-omit-frame-pointer. 125 - ifeq ($(CONFIG_FTRACE),y) 125 + ifeq ($(CONFIG_FUNCTION_TRACER),y) 126 126 KBUILD_CFLAGS += -mno-sched-epilog 127 127 endif 128 128
+1 -1
arch/powerpc/include/asm/ftrace.h
··· 1 1 #ifndef _ASM_POWERPC_FTRACE 2 2 #define _ASM_POWERPC_FTRACE 3 3 4 - #ifdef CONFIG_FTRACE 4 + #ifdef CONFIG_FUNCTION_TRACER 5 5 #define MCOUNT_ADDR ((long)(_mcount)) 6 6 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 7 7
+1 -1
arch/powerpc/kernel/Makefile
··· 12 12 CFLAGS_btext.o += -fPIC 13 13 endif 14 14 15 - ifdef CONFIG_FTRACE 15 + ifdef CONFIG_FUNCTION_TRACER 16 16 # Do not trace early boot code 17 17 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog 18 18 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
+1 -1
arch/powerpc/kernel/entry_32.S
··· 1158 1158 1159 1159 #endif /* CONFIG_PPC_RTAS */ 1160 1160 1161 - #ifdef CONFIG_FTRACE 1161 + #ifdef CONFIG_FUNCTION_TRACER 1162 1162 #ifdef CONFIG_DYNAMIC_FTRACE 1163 1163 _GLOBAL(mcount) 1164 1164 _GLOBAL(_mcount)
+1 -1
arch/powerpc/kernel/entry_64.S
··· 884 884 mtlr r0 885 885 blr 886 886 887 - #ifdef CONFIG_FTRACE 887 + #ifdef CONFIG_FUNCTION_TRACER 888 888 #ifdef CONFIG_DYNAMIC_FTRACE 889 889 _GLOBAL(mcount) 890 890 _GLOBAL(_mcount)
+5 -22
arch/powerpc/kernel/ftrace.c
··· 28 28 #endif 29 29 30 30 31 - static unsigned int notrace ftrace_calc_offset(long ip, long addr) 31 + static unsigned int ftrace_calc_offset(long ip, long addr) 32 32 { 33 33 return (int)(addr - ip); 34 34 } 35 35 36 - notrace unsigned char *ftrace_nop_replace(void) 36 + unsigned char *ftrace_nop_replace(void) 37 37 { 38 38 return (char *)&ftrace_nop; 39 39 } 40 40 41 - notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 41 + unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 42 42 { 43 43 static unsigned int op; 44 44 ··· 68 68 # define _ASM_PTR " .long " 69 69 #endif 70 70 71 - notrace int 71 + int 72 72 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 73 73 unsigned char *new_code) 74 74 { ··· 113 113 return faulted; 114 114 } 115 115 116 - notrace int ftrace_update_ftrace_func(ftrace_func_t func) 116 + int ftrace_update_ftrace_func(ftrace_func_t func) 117 117 { 118 118 unsigned long ip = (unsigned long)(&ftrace_call); 119 119 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 124 124 ret = ftrace_modify_code(ip, old, new); 125 125 126 126 return ret; 127 - } 128 - 129 - notrace int ftrace_mcount_set(unsigned long *data) 130 - { 131 - unsigned long ip = (long)(&mcount_call); 132 - unsigned long *addr = data; 133 - unsigned char old[MCOUNT_INSN_SIZE], *new; 134 - 135 - /* 136 - * Replace the mcount stub with a pointer to the 137 - * ip recorder function. 138 - */ 139 - memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); 140 - new = ftrace_call_replace(ip, *addr); 141 - *addr = ftrace_modify_code(ip, old, new); 142 - 143 - return 0; 144 127 } 145 128 146 129 int __init ftrace_dyn_arch_init(void *data)
+1 -1
arch/powerpc/kernel/ppc_ksyms.c
··· 68 68 EXPORT_SYMBOL(sys_sigreturn); 69 69 #endif 70 70 71 - #ifdef CONFIG_FTRACE 71 + #ifdef CONFIG_FUNCTION_TRACER 72 72 EXPORT_SYMBOL(_mcount); 73 73 #endif 74 74
+1 -1
arch/powerpc/platforms/powermac/Makefile
··· 1 1 CFLAGS_bootx_init.o += -fPIC 2 2 3 - ifdef CONFIG_FTRACE 3 + ifdef CONFIG_FUNCTION_TRACER 4 4 # Do not trace early boot code 5 5 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog 6 6 endif
+1 -2
arch/sparc64/Kconfig
··· 11 11 config SPARC64 12 12 bool 13 13 default y 14 - select HAVE_DYNAMIC_FTRACE 15 - select HAVE_FTRACE 14 + select HAVE_FUNCTION_TRACER 16 15 select HAVE_IDE 17 16 select HAVE_LMB 18 17 select HAVE_ARCH_KGDB
+1 -1
arch/sparc64/Kconfig.debug
··· 33 33 34 34 config MCOUNT 35 35 bool 36 - depends on STACK_DEBUG || FTRACE 36 + depends on STACK_DEBUG || FUNCTION_TRACER 37 37 default y 38 38 39 39 config FRAME_POINTER
+2
arch/sparc64/kernel/Makefile
··· 5 5 EXTRA_AFLAGS := -ansi 6 6 EXTRA_CFLAGS := -Werror 7 7 8 + CFLAGS_REMOVE_ftrace.o = -pg 9 + 8 10 extra-y := head.o init_task.o vmlinux.lds 9 11 10 12 obj-y := process.o setup.o cpu.o idprom.o reboot.o \
+4 -22
arch/sparc64/kernel/ftrace.c
··· 9 9 10 10 static const u32 ftrace_nop = 0x01000000; 11 11 12 - notrace unsigned char *ftrace_nop_replace(void) 12 + unsigned char *ftrace_nop_replace(void) 13 13 { 14 14 return (char *)&ftrace_nop; 15 15 } 16 16 17 - notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 17 + unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 18 18 { 19 19 static u32 call; 20 20 s32 off; ··· 25 25 return (unsigned char *) &call; 26 26 } 27 27 28 - notrace int 28 + int 29 29 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 30 30 unsigned char *new_code) 31 31 { ··· 59 59 return faulted; 60 60 } 61 61 62 - notrace int ftrace_update_ftrace_func(ftrace_func_t func) 62 + int ftrace_update_ftrace_func(ftrace_func_t func) 63 63 { 64 64 unsigned long ip = (unsigned long)(&ftrace_call); 65 65 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 68 68 new = ftrace_call_replace(ip, (unsigned long)func); 69 69 return ftrace_modify_code(ip, old, new); 70 70 } 71 - 72 - notrace int ftrace_mcount_set(unsigned long *data) 73 - { 74 - unsigned long ip = (long)(&mcount_call); 75 - unsigned long *addr = data; 76 - unsigned char old[MCOUNT_INSN_SIZE], *new; 77 - 78 - /* 79 - * Replace the mcount stub with a pointer to the 80 - * ip recorder function. 81 - */ 82 - memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); 83 - new = ftrace_call_replace(ip, *addr); 84 - *addr = ftrace_modify_code(ip, old, new); 85 - 86 - return 0; 87 - } 88 - 89 71 90 72 int __init ftrace_dyn_arch_init(void *data) 91 73 {
+2 -2
arch/sparc64/lib/mcount.S
··· 93 93 nop 94 94 1: 95 95 #endif 96 - #ifdef CONFIG_FTRACE 96 + #ifdef CONFIG_FUNCTION_TRACER 97 97 #ifdef CONFIG_DYNAMIC_FTRACE 98 98 mov %o7, %o0 99 99 .globl mcount_call ··· 119 119 .size _mcount,.-_mcount 120 120 .size mcount,.-mcount 121 121 122 - #ifdef CONFIG_FTRACE 122 + #ifdef CONFIG_FUNCTION_TRACER 123 123 .globl ftrace_stub 124 124 .type ftrace_stub,#function 125 125 ftrace_stub:
+1 -1
arch/x86/Kconfig
··· 28 28 select HAVE_KRETPROBES 29 29 select HAVE_FTRACE_MCOUNT_RECORD 30 30 select HAVE_DYNAMIC_FTRACE 31 - select HAVE_FTRACE 31 + select HAVE_FUNCTION_TRACER 32 32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 33 33 select HAVE_ARCH_KGDB if !X86_VOYAGER 34 34 select HAVE_ARCH_TRACEHOOK
+2 -2
arch/x86/include/asm/ftrace.h
··· 1 1 #ifndef _ASM_X86_FTRACE_H 2 2 #define _ASM_X86_FTRACE_H 3 3 4 - #ifdef CONFIG_FTRACE 4 + #ifdef CONFIG_FUNCTION_TRACER 5 5 #define MCOUNT_ADDR ((long)(mcount)) 6 6 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 7 7 ··· 19 19 } 20 20 #endif 21 21 22 - #endif /* CONFIG_FTRACE */ 22 + #endif /* CONFIG_FUNCTION_TRACER */ 23 23 24 24 #endif /* _ASM_X86_FTRACE_H */
+2 -1
arch/x86/kernel/Makefile
··· 6 6 7 7 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 8 8 9 - ifdef CONFIG_FTRACE 9 + ifdef CONFIG_FUNCTION_TRACER 10 10 # Do not profile debug and lowlevel utilities 11 11 CFLAGS_REMOVE_tsc.o = -pg 12 12 CFLAGS_REMOVE_rtc.o = -pg 13 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 + CFLAGS_REMOVE_ftrace.o = -pg 14 15 endif 15 16 16 17 #
+2 -2
arch/x86/kernel/entry_32.S
··· 1149 1149 1150 1150 #endif /* CONFIG_XEN */ 1151 1151 1152 - #ifdef CONFIG_FTRACE 1152 + #ifdef CONFIG_FUNCTION_TRACER 1153 1153 #ifdef CONFIG_DYNAMIC_FTRACE 1154 1154 1155 1155 ENTRY(mcount) ··· 1204 1204 jmp ftrace_stub 1205 1205 END(mcount) 1206 1206 #endif /* CONFIG_DYNAMIC_FTRACE */ 1207 - #endif /* CONFIG_FTRACE */ 1207 + #endif /* CONFIG_FUNCTION_TRACER */ 1208 1208 1209 1209 .section .rodata,"a" 1210 1210 #include "syscall_table_32.S"
+2 -2
arch/x86/kernel/entry_64.S
··· 61 61 62 62 .code64 63 63 64 - #ifdef CONFIG_FTRACE 64 + #ifdef CONFIG_FUNCTION_TRACER 65 65 #ifdef CONFIG_DYNAMIC_FTRACE 66 66 ENTRY(mcount) 67 67 retq ··· 138 138 jmp ftrace_stub 139 139 END(mcount) 140 140 #endif /* CONFIG_DYNAMIC_FTRACE */ 141 - #endif /* CONFIG_FTRACE */ 141 + #endif /* CONFIG_FUNCTION_TRACER */ 142 142 143 143 #ifndef CONFIG_PREEMPT 144 144 #define retint_kernel retint_restore_args
+21 -29
arch/x86/kernel/ftrace.c
··· 21 21 #include <asm/nops.h> 22 22 23 23 24 - /* Long is fine, even if it is only 4 bytes ;-) */ 25 - static unsigned long *ftrace_nop; 24 + static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 26 25 27 26 union ftrace_code_union { 28 27 char code[MCOUNT_INSN_SIZE]; ··· 32 33 }; 33 34 34 35 35 - static int notrace ftrace_calc_offset(long ip, long addr) 36 + static int ftrace_calc_offset(long ip, long addr) 36 37 { 37 38 return (int)(addr - ip); 38 39 } 39 40 40 - notrace unsigned char *ftrace_nop_replace(void) 41 + unsigned char *ftrace_nop_replace(void) 41 42 { 42 - return (char *)ftrace_nop; 43 + return ftrace_nop; 43 44 } 44 45 45 - notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 46 + unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 46 47 { 47 48 static union ftrace_code_union calc; 48 49 ··· 56 57 return calc.code; 57 58 } 58 59 59 - notrace int 60 + int 60 61 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 61 62 unsigned char *new_code) 62 63 { ··· 65 66 /* 66 67 * Note: Due to modules and __init, code can 67 68 * disappear and change, we need to protect against faulting 68 - * as well as code changing. 69 + * as well as code changing. We do this by using the 70 + * probe_kernel_* functions. 69 71 * 70 72 * No real locking needed, this code is run through 71 73 * kstop_machine, or before SMP starts. 72 74 */ 73 - if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE)) 74 - return 1; 75 75 76 + /* read the text we want to modify */ 77 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 78 + return -EFAULT; 79 + 80 + /* Make sure it is what we expect it to be */ 76 81 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 77 - return 2; 82 + return -EINVAL; 78 83 79 - WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, 80 - MCOUNT_INSN_SIZE)); 84 + /* replace the text with the new text */ 85 + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 86 + return -EPERM; 81 87 82 88 sync_core(); 83 89 84 90 return 0; 85 91 } 86 92 87 - notrace int ftrace_update_ftrace_func(ftrace_func_t func) 93 + int ftrace_update_ftrace_func(ftrace_func_t func) 88 94 { 89 95 unsigned long ip = (unsigned long)(&ftrace_call); 90 96 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 100 96 ret = ftrace_modify_code(ip, old, new); 101 97 102 98 return ret; 103 - } 104 - 105 - notrace int ftrace_mcount_set(unsigned long *data) 106 - { 107 - /* mcount is initialized as a nop */ 108 - *data = 0; 109 - return 0; 110 99 } 111 100 112 101 int __init ftrace_dyn_arch_init(void *data) ··· 124 127 * TODO: check the cpuid to determine the best nop. 125 128 */ 126 129 asm volatile ( 127 - "jmp ftrace_test_jmp\n" 128 - /* This code needs to stay around */ 129 - ".section .text, \"ax\"\n" 130 130 "ftrace_test_jmp:" 131 131 "jmp ftrace_test_p6nop\n" 132 132 "nop\n" ··· 134 140 "jmp 1f\n" 135 141 "ftrace_test_nop5:" 136 142 ".byte 0x66,0x66,0x66,0x66,0x90\n" 137 - "jmp 1f\n" 138 - ".previous\n" 139 143 "1:" 140 144 ".section .fixup, \"ax\"\n" 141 145 "2: movl $1, %0\n" ··· 148 156 switch (faulted) { 149 157 case 0: 150 158 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); 151 - ftrace_nop = (unsigned long *)ftrace_test_p6nop; 159 + memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); 152 160 break; 153 161 case 1: 154 162 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); 155 - ftrace_nop = (unsigned long *)ftrace_test_nop5; 163 + memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); 156 164 break; 157 165 case 2: 158 166 pr_info("ftrace: converting mcount calls to jmp . + 5\n"); 159 - ftrace_nop = (unsigned long *)ftrace_test_jmp; 167 + memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); 160 168 break; 161 169 } 162 170
+1 -1
arch/x86/kernel/i386_ksyms_32.c
··· 5 5 #include <asm/desc.h> 6 6 #include <asm/ftrace.h> 7 7 8 - #ifdef CONFIG_FTRACE 8 + #ifdef CONFIG_FUNCTION_TRACER 9 9 /* mcount is defined in assembly */ 10 10 EXPORT_SYMBOL(mcount); 11 11 #endif
+1 -1
arch/x86/kernel/x8664_ksyms_64.c
··· 12 12 #include <asm/desc.h> 13 13 #include <asm/ftrace.h> 14 14 15 - #ifdef CONFIG_FTRACE 15 + #ifdef CONFIG_FUNCTION_TRACER 16 16 /* mcount is defined in assembly */ 17 17 EXPORT_SYMBOL(mcount); 18 18 #endif
+1 -1
arch/x86/xen/Makefile
··· 1 - ifdef CONFIG_FTRACE 1 + ifdef CONFIG_FUNCTION_TRACER 2 2 # Do not profile debug and lowlevel utilities 3 3 CFLAGS_REMOVE_spinlock.o = -pg 4 4 CFLAGS_REMOVE_time.o = -pg
+1
include/asm-generic/kdebug.h
··· 3 3 4 4 enum die_val { 5 5 DIE_UNUSED, 6 + DIE_OOPS=1 6 7 }; 7 8 8 9 #endif /* _ASM_GENERIC_KDEBUG_H */
+32 -16
include/linux/ftrace.h
··· 8 8 #include <linux/types.h> 9 9 #include <linux/kallsyms.h> 10 10 11 - #ifdef CONFIG_FTRACE 11 + #ifdef CONFIG_FUNCTION_TRACER 12 12 13 13 extern int ftrace_enabled; 14 14 extern int ··· 36 36 37 37 extern void ftrace_stub(unsigned long a0, unsigned long a1); 38 38 39 - #else /* !CONFIG_FTRACE */ 39 + #else /* !CONFIG_FUNCTION_TRACER */ 40 40 # define register_ftrace_function(ops) do { } while (0) 41 41 # define unregister_ftrace_function(ops) do { } while (0) 42 42 # define clear_ftrace_function(ops) do { } while (0) 43 - static inline void ftrace_kill_atomic(void) { } 44 - #endif /* CONFIG_FTRACE */ 43 + static inline void ftrace_kill(void) { } 44 + #endif /* CONFIG_FUNCTION_TRACER */ 45 45 46 46 #ifdef CONFIG_DYNAMIC_FTRACE 47 - # define FTRACE_HASHBITS 10 48 - # define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) 49 47 50 48 enum { 51 49 FTRACE_FL_FREE = (1 << 0), ··· 56 58 }; 57 59 58 60 struct dyn_ftrace { 59 - struct hlist_node node; 60 - unsigned long ip; /* address of mcount call-site */ 61 - unsigned long flags; 61 + struct list_head list; 62 + unsigned long ip; /* address of mcount call-site */ 63 + unsigned long flags; 62 64 }; 63 65 64 66 int ftrace_force_update(void); ··· 69 71 extern unsigned char *ftrace_nop_replace(void); 70 72 extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); 71 73 extern int ftrace_dyn_arch_init(void *data); 72 - extern int ftrace_mcount_set(unsigned long *data); 73 - extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 74 - unsigned char *new_code); 75 74 extern int ftrace_update_ftrace_func(ftrace_func_t func); 76 75 extern void ftrace_caller(void); 77 76 extern void ftrace_call(void); 78 77 extern void mcount_call(void); 78 + 79 + /** 80 + * ftrace_modify_code - modify code segment 81 + * @ip: the address of the code segment 82 + * @old_code: the contents of what is expected to be there 83 + * @new_code: the code to patch in 84 + * 85 + * This is a very sensitive operation and great care needs 86 + * to be taken by the arch. The operation should carefully 87 + * read the location, check to see if what is read is indeed 88 + * what we expect it to be, and then on success of the compare, 89 + * it should write to the location. 90 + * 91 + * Return must be: 92 + * 0 on success 93 + * -EFAULT on error reading the location 94 + * -EINVAL on a failed compare of the contents 95 + * -EPERM on error writing to the location 96 + * Any other value will be considered a failure. 97 + */ 98 + extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 99 + unsigned char *new_code); 79 100 80 101 extern int skip_trace(unsigned long ip); 81 102 ··· 114 97 115 98 /* totally disable ftrace - can not re-enable after this */ 116 99 void ftrace_kill(void); 117 - void ftrace_kill_atomic(void); 118 100 119 101 static inline void tracer_disable(void) 120 102 { 121 - #ifdef CONFIG_FTRACE 103 + #ifdef CONFIG_FUNCTION_TRACER 122 104 ftrace_enabled = 0; 123 105 #endif 124 106 } ··· 129 113 */ 130 114 static inline int __ftrace_enabled_save(void) 131 115 { 132 - #ifdef CONFIG_FTRACE 116 + #ifdef CONFIG_FUNCTION_TRACER 133 117 int saved_ftrace_enabled = ftrace_enabled; 134 118 ftrace_enabled = 0; 135 119 return saved_ftrace_enabled; ··· 140 124 141 125 static inline void __ftrace_enabled_restore(int enabled) 142 126 { 143 - #ifdef CONFIG_FTRACE 127 + #ifdef CONFIG_FUNCTION_TRACER 144 128 ftrace_enabled = enabled; 145 129 #endif 146 130 }
+2 -2
kernel/Makefile
··· 13 13 14 14 CFLAGS_REMOVE_sched.o = -mno-spe 15 15 16 - ifdef CONFIG_FTRACE 16 + ifdef CONFIG_FUNCTION_TRACER 17 17 # Do not trace debug files and internal ftrace files 18 18 CFLAGS_REMOVE_lockdep.o = -pg 19 19 CFLAGS_REMOVE_lockdep_proc.o = -pg ··· 88 88 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o 89 89 obj-$(CONFIG_LATENCYTOP) += latencytop.o 90 90 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 91 - obj-$(CONFIG_FTRACE) += trace/ 91 + obj-$(CONFIG_FUNCTION_TRACER) += trace/ 92 92 obj-$(CONFIG_TRACING) += trace/ 93 93 obj-$(CONFIG_SMP) += sched_cpupri.o 94 94
+1 -1
kernel/sysctl.c
··· 474 474 .mode = 0644, 475 475 .proc_handler = &proc_dointvec, 476 476 }, 477 - #ifdef CONFIG_FTRACE 477 + #ifdef CONFIG_FUNCTION_TRACER 478 478 { 479 479 .ctl_name = CTL_UNNUMBERED, 480 480 .procname = "ftrace_enabled",
+14 -13
kernel/trace/Kconfig
··· 1 1 # 2 - # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: 2 + # Architectures that offer an FUNCTION_TRACER implementation should 3 + # select HAVE_FUNCTION_TRACER: 3 4 # 4 5 5 6 config NOP_TRACER 6 7 bool 7 8 8 - config HAVE_FTRACE 9 + config HAVE_FUNCTION_TRACER 9 10 bool 10 11 select NOP_TRACER 11 12 ··· 29 28 select STACKTRACE 30 29 select TRACEPOINTS 31 30 32 - config FTRACE 31 + menu "Tracers" 32 + 33 + config FUNCTION_TRACER 33 34 bool "Kernel Function Tracer" 34 - depends on HAVE_FTRACE 35 + depends on HAVE_FUNCTION_TRACER 35 36 depends on DEBUG_KERNEL 36 37 select FRAME_POINTER 37 38 select TRACING ··· 52 49 default n 53 50 depends on TRACE_IRQFLAGS_SUPPORT 54 51 depends on GENERIC_TIME 55 - depends on HAVE_FTRACE 56 52 depends on DEBUG_KERNEL 57 53 select TRACE_IRQFLAGS 58 54 select TRACING ··· 75 73 default n 76 74 depends on GENERIC_TIME 77 75 depends on PREEMPT 78 - depends on HAVE_FTRACE 79 76 depends on DEBUG_KERNEL 80 77 select TRACING 81 78 select TRACER_MAX_TRACE ··· 102 101 103 102 config SCHED_TRACER 104 103 bool "Scheduling Latency Tracer" 105 - depends on HAVE_FTRACE 106 104 depends on DEBUG_KERNEL 107 105 select TRACING 108 106 select CONTEXT_SWITCH_TRACER ··· 112 112 113 113 config CONTEXT_SWITCH_TRACER 114 114 bool "Trace process context switches" 115 - depends on HAVE_FTRACE 116 115 depends on DEBUG_KERNEL 117 116 select TRACING 118 117 select MARKERS ··· 121 122 122 123 config BOOT_TRACER 123 124 bool "Trace boot initcalls" 124 - depends on HAVE_FTRACE 125 125 depends on DEBUG_KERNEL 126 126 select TRACING 127 + select CONTEXT_SWITCH_TRACER 127 128 help 128 129 This tracer helps developers to optimize boot times: it records 129 130 the timings of the initcalls and traces key events and the identity ··· 140 141 141 142 config STACK_TRACER 142 143 bool "Trace max stack" 143 - depends on HAVE_FTRACE 144 + depends on HAVE_FUNCTION_TRACER 144 145 depends on DEBUG_KERNEL 145 - select FTRACE 146 + select FUNCTION_TRACER 146 147 select STACKTRACE 147 148 help 148 149 This special tracer records the maximum stack footprint of the ··· 159 160 160 161 config DYNAMIC_FTRACE 161 162 bool "enable/disable ftrace tracepoints dynamically" 162 - depends on FTRACE 163 + depends on FUNCTION_TRACER 163 164 depends on HAVE_DYNAMIC_FTRACE 164 165 depends on DEBUG_KERNEL 165 166 default y ··· 169 170 with a No-Op instruction) as they are called. A table is 170 171 created to dynamically enable them again. 171 172 172 - This way a CONFIG_FTRACE kernel is slightly larger, but otherwise 173 + This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise 173 174 has native performance as long as no tracing is active. 174 175 175 176 The changes to the code are done by a kernel thread that ··· 194 195 a series of tests are made to verify that the tracer is 195 196 functioning properly. It will do tests on all the configured 196 197 tracers of ftrace. 198 + 199 + endmenu
+3 -3
kernel/trace/Makefile
··· 1 1 2 2 # Do not instrument the tracer itself: 3 3 4 - ifdef CONFIG_FTRACE 4 + ifdef CONFIG_FUNCTION_TRACER 5 5 ORIG_CFLAGS := $(KBUILD_CFLAGS) 6 6 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 7 7 ··· 10 10 obj-y += trace_selftest_dynamic.o 11 11 endif 12 12 13 - obj-$(CONFIG_FTRACE) += libftrace.o 13 + obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o 14 14 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 15 15 16 16 obj-$(CONFIG_TRACING) += trace.o 17 17 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 18 18 obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o 19 - obj-$(CONFIG_FTRACE) += trace_functions.o 19 + obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 20 20 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 21 21 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 22 22 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+92 -516
kernel/trace/ftrace.c
··· 25 25 #include <linux/ftrace.h> 26 26 #include <linux/sysctl.h> 27 27 #include <linux/ctype.h> 28 - #include <linux/hash.h> 29 28 #include <linux/list.h> 30 29 31 30 #include <asm/ftrace.h> 32 31 33 32 #include "trace.h" 33 + 34 + #define FTRACE_WARN_ON(cond) \ 35 + do { \ 36 + if (WARN_ON(cond)) \ 37 + ftrace_kill(); \ 38 + } while (0) 39 + 40 + #define FTRACE_WARN_ON_ONCE(cond) \ 41 + do { \ 42 + if (WARN_ON_ONCE(cond)) \ 43 + ftrace_kill(); \ 44 + } while (0) 34 45 35 46 /* ftrace_enabled is a method to turn ftrace on or off */ 36 47 int ftrace_enabled __read_mostly; ··· 164 153 } 165 154 166 155 #ifdef CONFIG_DYNAMIC_FTRACE 167 - 168 156 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 169 - /* 170 - * The hash lock is only needed when the recording of the mcount 171 - * callers are dynamic. That is, by the caller themselves and 172 - * not recorded via the compilation. 173 - */ 174 - static DEFINE_SPINLOCK(ftrace_hash_lock); 175 - #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) 176 - #define ftrace_hash_unlock(flags) \ 177 - spin_unlock_irqrestore(&ftrace_hash_lock, flags) 178 - #else 179 - /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ 180 - #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) 181 - #define ftrace_hash_unlock(flags) do { } while(0) 157 + # error Dynamic ftrace depends on MCOUNT_RECORD 182 158 #endif 183 159 184 160 /* ··· 175 177 * it instead. 176 178 */ 177 179 static unsigned long mcount_addr = MCOUNT_ADDR; 178 - 179 - static struct task_struct *ftraced_task; 180 180 181 181 enum { 182 182 FTRACE_ENABLE_CALLS = (1 << 0), ··· 186 190 187 191 static int ftrace_filtered; 188 192 static int tracing_on; 189 - static int frozen_record_count; 190 193 191 - static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; 194 + static LIST_HEAD(ftrace_new_addrs); 192 195 193 - static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 194 - 195 - static DEFINE_MUTEX(ftraced_lock); 196 196 static DEFINE_MUTEX(ftrace_regex_lock); 197 197 198 198 struct ftrace_page { ··· 206 214 static struct ftrace_page *ftrace_pages_start; 207 215 static struct ftrace_page *ftrace_pages; 208 216 209 - static int ftraced_trigger; 210 - static int ftraced_suspend; 211 - static int ftraced_stop; 212 - 213 - static int ftrace_record_suspend; 214 - 215 217 static struct dyn_ftrace *ftrace_free_records; 216 218 217 219 218 220 #ifdef CONFIG_KPROBES 221 + 222 + static int frozen_record_count; 223 + 219 224 static inline void freeze_record(struct dyn_ftrace *rec) 220 225 { 221 226 if (!(rec->flags & FTRACE_FL_FROZEN)) { ··· 238 249 # define unfreeze_record(rec) ({ 0; }) 239 250 # define record_frozen(rec) ({ 0; }) 240 251 #endif /* CONFIG_KPROBES */ 241 - 242 - int skip_trace(unsigned long ip) 243 - { 244 - unsigned long fl; 245 - struct dyn_ftrace *rec; 246 - struct hlist_node *t; 247 - struct hlist_head *head; 248 - 249 - if (frozen_record_count == 0) 250 - return 0; 251 - 252 - head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; 253 - hlist_for_each_entry_rcu(rec, t, head, node) { 254 - if (rec->ip == ip) { 255 - if (record_frozen(rec)) { 256 - if (rec->flags & FTRACE_FL_FAILED) 257 - return 1; 258 - 259 - if (!(rec->flags & FTRACE_FL_CONVERTED)) 260 - return 1; 261 - 262 - if (!tracing_on || !ftrace_enabled) 263 - return 1; 264 - 265 - if (ftrace_filtered) { 266 - fl = rec->flags & (FTRACE_FL_FILTER | 267 - FTRACE_FL_NOTRACE); 268 - if (!fl || (fl & FTRACE_FL_NOTRACE)) 269 - return 1; 270 - } 271 - } 272 - break; 273 - } 274 - } 275 - 276 - return 0; 277 - } 278 - 279 - static inline int 280 - ftrace_ip_in_hash(unsigned long ip, unsigned long key) 281 - { 282 - struct dyn_ftrace *p; 283 - struct hlist_node *t; 284 - int found = 0; 285 - 286 - hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { 287 - if (p->ip == ip) { 288 - found = 1; 289 - break; 290 - } 291 - } 292 - 293 - return found; 294 - } 295 - 296 - static inline void 297 - ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) 298 - { 299 - hlist_add_head_rcu(&node->node, &ftrace_hash[key]); 300 - } 301 - 302 - /* called from kstop_machine */ 303 - static inline void ftrace_del_hash(struct dyn_ftrace *node) 304 - { 305 - hlist_del(&node->node); 306 - } 307 252 308 253 static void ftrace_free_rec(struct dyn_ftrace *rec) 309 254 { ··· 269 346 } 270 347 } 271 348 spin_unlock(&ftrace_lock); 272 - 273 349 } 274 350 275 351 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) ··· 280 358 rec = ftrace_free_records; 281 359 282 360 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { 283 - WARN_ON_ONCE(1); 361 + FTRACE_WARN_ON_ONCE(1); 284 362 ftrace_free_records = NULL; 285 - ftrace_disabled = 1; 286 - ftrace_enabled = 0; 287 363 return NULL; 288 364 } 289 365 ··· 291 371 } 292 372 293 373 if (ftrace_pages->index == ENTRIES_PER_PAGE) { 294 - if (!ftrace_pages->next) 295 - return NULL; 374 + if (!ftrace_pages->next) { 375 + /* allocate another page */ 376 + ftrace_pages->next = 377 + (void *)get_zeroed_page(GFP_KERNEL); 378 + if (!ftrace_pages->next) 379 + return NULL; 380 + } 296 381 ftrace_pages = ftrace_pages->next; 297 382 } 298 383 299 384 return &ftrace_pages->records[ftrace_pages->index++]; 300 385 } 301 386 302 - static void 387 + static struct dyn_ftrace * 303 388 ftrace_record_ip(unsigned long ip) 304 389 { 305 - struct dyn_ftrace *node; 306 - unsigned long flags; 307 - unsigned long key; 308 - int resched; 309 - int cpu; 390 + struct dyn_ftrace *rec; 310 391 311 392 if (!ftrace_enabled || ftrace_disabled) 312 - return; 393 + return NULL; 313 394 314 - resched = need_resched(); 315 - preempt_disable_notrace(); 395 + rec = ftrace_alloc_dyn_node(ip); 396 + if (!rec) 397 + return NULL; 316 398 317 - /* 318 - * We simply need to protect against recursion. 319 - * Use the the raw version of smp_processor_id and not 320 - * __get_cpu_var which can call debug hooks that can 321 - * cause a recursive crash here. 322 - */ 323 - cpu = raw_smp_processor_id(); 324 - per_cpu(ftrace_shutdown_disable_cpu, cpu)++; 325 - if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) 326 - goto out; 399 + rec->ip = ip; 327 400 328 - if (unlikely(ftrace_record_suspend)) 329 - goto out; 401 + list_add(&rec->list, &ftrace_new_addrs); 330 402 331 - key = hash_long(ip, FTRACE_HASHBITS); 332 - 333 - WARN_ON_ONCE(key >= FTRACE_HASHSIZE); 334 - 335 - if (ftrace_ip_in_hash(ip, key)) 336 - goto out; 337 - 338 - ftrace_hash_lock(flags); 339 - 340 - /* This ip may have hit the hash before the lock */ 341 - if (ftrace_ip_in_hash(ip, key)) 342 - goto out_unlock; 343 - 344 - node = ftrace_alloc_dyn_node(ip); 345 - if (!node) 346 - goto out_unlock; 347 - 348 - node->ip = ip; 349 - 350 - ftrace_add_hash(node, key); 351 - 352 - ftraced_trigger = 1; 353 - 354 - out_unlock: 355 - ftrace_hash_unlock(flags); 356 - out: 357 - per_cpu(ftrace_shutdown_disable_cpu, cpu)--; 358 - 359 - /* prevent recursion with scheduler */ 360 - if (resched) 361 - preempt_enable_no_resched_notrace(); 362 - else 363 - preempt_enable_notrace(); 403 + return rec; 364 404 } 365 405 366 406 #define FTRACE_ADDR ((long)(ftrace_caller)) ··· 439 559 rec->flags |= FTRACE_FL_FAILED; 440 560 if ((system_state == SYSTEM_BOOTING) || 441 561 !core_kernel_text(rec->ip)) { 442 - ftrace_del_hash(rec); 443 562 ftrace_free_rec(rec); 444 563 } 445 564 } 446 565 } 447 566 } 448 - } 449 - 450 - static void ftrace_shutdown_replenish(void) 451 - { 452 - if (ftrace_pages->next) 453 - return; 454 - 455 - /* allocate another page */ 456 - ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); 457 567 } 458 568 459 569 static void print_ip_ins(const char *fmt, unsigned char *p) ··· 461 591 { 462 592 unsigned long ip; 463 593 unsigned char *nop, *call; 464 - int failed; 594 + int ret; 465 595 466 596 ip = rec->ip; 467 597 468 598 nop = ftrace_nop_replace(); 469 599 call = ftrace_call_replace(ip, mcount_addr); 470 600 471 - failed = ftrace_modify_code(ip, call, nop); 472 - if (failed) { 473 - switch (failed) { 474 - case 1: 475 - WARN_ON_ONCE(1); 601 + ret = ftrace_modify_code(ip, call, nop); 602 + if (ret) { 603 + switch (ret) { 604 + case -EFAULT: 605 + FTRACE_WARN_ON_ONCE(1); 476 606 pr_info("ftrace faulted on modifying "); 477 607 print_ip_sym(ip); 478 608 break; 479 - case 2: 480 - WARN_ON_ONCE(1); 609 + case -EINVAL: 610 + FTRACE_WARN_ON_ONCE(1); 481 611 pr_info("ftrace failed to modify "); 482 612 print_ip_sym(ip); 483 613 print_ip_ins(" expected: ", call); ··· 485 615 print_ip_ins(" replace: ", nop); 486 616 printk(KERN_CONT "\n"); 487 617 break; 618 + case -EPERM: 619 + FTRACE_WARN_ON_ONCE(1); 620 + pr_info("ftrace faulted on writing "); 621 + print_ip_sym(ip); 622 + break; 623 + default: 624 + FTRACE_WARN_ON_ONCE(1); 625 + pr_info("ftrace faulted on unknown error "); 626 + print_ip_sym(ip); 488 627 } 489 628 490 629 rec->flags |= FTRACE_FL_FAILED; ··· 502 623 return 1; 503 624 } 504 625 505 - static int __ftrace_update_code(void *ignore); 506 - 507 626 static int __ftrace_modify_code(void *data) 508 627 { 509 - unsigned long addr; 510 628 int *command = data; 511 629 512 630 if (*command & FTRACE_ENABLE_CALLS) { 513 - /* 514 - * Update any recorded ips now that we have the 515 - * machine stopped 516 - */ 517 - __ftrace_update_code(NULL); 518 631 ftrace_replace_code(1); 519 632 tracing_on = 1; 520 633 } else if (*command & FTRACE_DISABLE_CALLS) { ··· 517 646 if (*command & FTRACE_UPDATE_TRACE_FUNC) 518 647 ftrace_update_ftrace_func(ftrace_trace_function); 519 648 520 - if (*command & FTRACE_ENABLE_MCOUNT) { 521 - addr = (unsigned long)ftrace_record_ip; 522 - ftrace_mcount_set(&addr); 523 - } else if (*command & FTRACE_DISABLE_MCOUNT) { 524 - addr = (unsigned long)ftrace_stub; 525 - ftrace_mcount_set(&addr); 526 - } 527 - 528 649 return 0; 529 650 } 530 651 ··· 525 662 stop_machine(__ftrace_modify_code, &command, NULL); 526 663 } 527 664 528 - void ftrace_disable_daemon(void) 529 - { 530 - /* Stop the daemon from calling kstop_machine */ 531 - mutex_lock(&ftraced_lock); 532 - ftraced_stop = 1; 533 - mutex_unlock(&ftraced_lock); 534 - 535 - ftrace_force_update(); 536 - } 537 - 538 - void ftrace_enable_daemon(void) 539 - { 540 - mutex_lock(&ftraced_lock); 541 - ftraced_stop = 0; 542 - mutex_unlock(&ftraced_lock); 543 - 544 - ftrace_force_update(); 545 - } 546 - 547 665 static ftrace_func_t saved_ftrace_func; 666 + static int ftrace_start; 667 + static DEFINE_MUTEX(ftrace_start_lock); 548 668 549 669 static void ftrace_startup(void) 550 670 { ··· 536 690 if (unlikely(ftrace_disabled)) 537 691 return; 538 692 539 - mutex_lock(&ftraced_lock); 540 - ftraced_suspend++; 541 - if (ftraced_suspend == 1) 693 + mutex_lock(&ftrace_start_lock); 694 + ftrace_start++; 695 + if (ftrace_start == 1) 542 696 command |= FTRACE_ENABLE_CALLS; 543 697 544 698 if (saved_ftrace_func != ftrace_trace_function) { ··· 551 705 552 706 ftrace_run_update_code(command); 553 707 out: 554 - mutex_unlock(&ftraced_lock); 708 + mutex_unlock(&ftrace_start_lock); 555 709 } 556 710 557 711 static void ftrace_shutdown(void) ··· 561 715 if (unlikely(ftrace_disabled)) 562 716 return; 563 717 564 - mutex_lock(&ftraced_lock); 565 - ftraced_suspend--; 566 - if (!ftraced_suspend) 718 + mutex_lock(&ftrace_start_lock); 719 + ftrace_start--; 720 + if (!ftrace_start) 567 721 command |= FTRACE_DISABLE_CALLS; 568 722 569 723 if (saved_ftrace_func != ftrace_trace_function) { ··· 576 730 577 731 ftrace_run_update_code(command); 578 732 out: 579 - mutex_unlock(&ftraced_lock); 733 + mutex_unlock(&ftrace_start_lock); 580 734 } 581 735 582 736 static void ftrace_startup_sysctl(void) ··· 586 740 if (unlikely(ftrace_disabled)) 587 741 return; 588 742 589 - mutex_lock(&ftraced_lock); 743 + mutex_lock(&ftrace_start_lock); 590 744 /* Force update next time */ 591 745 saved_ftrace_func = NULL; 592 - /* ftraced_suspend is true if we want ftrace running */ 593 - if (ftraced_suspend) 746 + /* ftrace_start is true if we want ftrace running */ 747 + if (ftrace_start) 594 748 command |= FTRACE_ENABLE_CALLS; 595 749 596 750 ftrace_run_update_code(command); 597 - mutex_unlock(&ftraced_lock); 751 + mutex_unlock(&ftrace_start_lock); 598 752 } 599 753 600 754 static void ftrace_shutdown_sysctl(void) ··· 604 758 if (unlikely(ftrace_disabled)) 605 759 return; 606 760 607 - mutex_lock(&ftraced_lock); 608 - /* ftraced_suspend is true if ftrace is running */ 609 - if (ftraced_suspend) 761 + mutex_lock(&ftrace_start_lock); 762 + /* ftrace_start is true if ftrace is running */ 763 + if (ftrace_start) 610 764 command |= FTRACE_DISABLE_CALLS; 611 765 612 766 ftrace_run_update_code(command); 613 - mutex_unlock(&ftraced_lock); 767 + mutex_unlock(&ftrace_start_lock); 614 768 } 615 769 616 770 static cycle_t ftrace_update_time; 617 771 static unsigned long ftrace_update_cnt; 618 772 unsigned long ftrace_update_tot_cnt; 619 773 620 - static int __ftrace_update_code(void *ignore) 774 + static int ftrace_update_code(void) 621 775 { 622 - int i, save_ftrace_enabled; 776 + struct dyn_ftrace *p, *t; 623 777 cycle_t start, stop; 624 - struct dyn_ftrace *p; 625 - struct hlist_node *t, *n; 626 - struct hlist_head *head, temp_list; 627 - 628 - /* Don't be recording funcs now */ 629 - ftrace_record_suspend++; 630 - save_ftrace_enabled = ftrace_enabled; 631 - ftrace_enabled = 0; 632 778 633 779 start = ftrace_now(raw_smp_processor_id()); 634 780 ftrace_update_cnt = 0; 635 781 636 - /* No locks needed, the machine is stopped! */ 637 - for (i = 0; i < FTRACE_HASHSIZE; i++) { 638 - INIT_HLIST_HEAD(&temp_list); 639 - head = &ftrace_hash[i]; 782 + list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { 640 783 641 - /* all CPUS are stopped, we are safe to modify code */ 642 - hlist_for_each_entry_safe(p, t, n, head, node) { 643 - /* Skip over failed records which have not been 644 - * freed. */ 645 - if (p->flags & FTRACE_FL_FAILED) 646 - continue; 784 + /* If something went wrong, bail without enabling anything */ 785 + if (unlikely(ftrace_disabled)) 786 + return -1; 647 787 648 - /* Unconverted records are always at the head of the 649 - * hash bucket. Once we encounter a converted record, 650 - * simply skip over to the next bucket. Saves ftraced 651 - * some processor cycles (ftrace does its bid for 652 - * global warming :-p ). */ 653 - if (p->flags & (FTRACE_FL_CONVERTED)) 654 - break; 788 + list_del_init(&p->list); 655 789 656 - /* Ignore updates to this record's mcount site. 657 - * Reintroduce this record at the head of this 658 - * bucket to attempt to "convert" it again if 659 - * the kprobe on it is unregistered before the 660 - * next run. */ 661 - if (get_kprobe((void *)p->ip)) { 662 - ftrace_del_hash(p); 663 - INIT_HLIST_NODE(&p->node); 664 - hlist_add_head(&p->node, &temp_list); 665 - freeze_record(p); 666 - continue; 667 - } else { 668 - unfreeze_record(p); 669 - } 670 - 671 - /* convert record (i.e, patch mcount-call with NOP) */ 672 - if (ftrace_code_disable(p)) { 673 - p->flags |= FTRACE_FL_CONVERTED; 674 - ftrace_update_cnt++; 675 - } else { 676 - if ((system_state == SYSTEM_BOOTING) || 677 - !core_kernel_text(p->ip)) { 678 - ftrace_del_hash(p); 679 - ftrace_free_rec(p); 680 - } 681 - } 682 - } 683 - 684 - hlist_for_each_entry_safe(p, t, n, &temp_list, node) { 685 - hlist_del(&p->node); 686 - INIT_HLIST_NODE(&p->node); 687 - hlist_add_head(&p->node, head); 688 - } 790 + /* convert record (i.e, patch mcount-call with NOP) */ 791 + if (ftrace_code_disable(p)) { 792 + p->flags |= FTRACE_FL_CONVERTED; 793 + ftrace_update_cnt++; 794 + } else 795 + ftrace_free_rec(p); 689 796 } 690 797 691 798 stop = ftrace_now(raw_smp_processor_id()); 692 799 ftrace_update_time = stop - start; 693 800 ftrace_update_tot_cnt += ftrace_update_cnt; 694 - ftraced_trigger = 0; 695 - 696 - ftrace_enabled = save_ftrace_enabled; 697 - ftrace_record_suspend--; 698 801 699 802 return 0; 700 - } 701 - 702 - static int ftrace_update_code(void) 703 - { 704 - if (unlikely(ftrace_disabled) || 705 - !ftrace_enabled || !ftraced_trigger) 706 - return 0; 707 - 708 - stop_machine(__ftrace_update_code, NULL, NULL); 709 - 710 - return 1; 711 803 } 712 804 713 805 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) ··· 676 892 pg = ftrace_pages = ftrace_pages_start; 677 893 678 894 cnt = num_to_init / ENTRIES_PER_PAGE; 679 - pr_info("ftrace: allocating %ld hash entries in %d pages\n", 895 + pr_info("ftrace: allocating %ld entries in %d pages\n", 680 896 num_to_init, cnt); 681 897 682 898 for (i = 0; i < cnt; i++) { ··· 1185 1401 } 1186 1402 1187 1403 mutex_lock(&ftrace_sysctl_lock); 1188 - mutex_lock(&ftraced_lock); 1189 - if (iter->filtered && ftraced_suspend && ftrace_enabled) 1404 + mutex_lock(&ftrace_start_lock); 1405 + if (iter->filtered && ftrace_start && ftrace_enabled) 1190 1406 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1191 - mutex_unlock(&ftraced_lock); 1407 + mutex_unlock(&ftrace_start_lock); 1192 1408 mutex_unlock(&ftrace_sysctl_lock); 1193 1409 1194 1410 kfree(iter); ··· 1206 1422 ftrace_notrace_release(struct inode *inode, struct file *file) 1207 1423 { 1208 1424 return ftrace_regex_release(inode, file, 0); 1209 - } 1210 - 1211 - static ssize_t 1212 - ftraced_read(struct file *filp, char __user *ubuf, 1213 - size_t cnt, loff_t *ppos) 1214 - { 1215 - /* don't worry about races */ 1216 - char *buf = ftraced_stop ? "disabled\n" : "enabled\n"; 1217 - int r = strlen(buf); 1218 - 1219 - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 1220 - } 1221 - 1222 - static ssize_t 1223 - ftraced_write(struct file *filp, const char __user *ubuf, 1224 - size_t cnt, loff_t *ppos) 1225 - { 1226 - char buf[64]; 1227 - long val; 1228 - int ret; 1229 - 1230 - if (cnt >= sizeof(buf)) 1231 - return -EINVAL; 1232 - 1233 - if (copy_from_user(&buf, ubuf, cnt)) 1234 - return -EFAULT; 1235 - 1236 - if (strncmp(buf, "enable", 6) == 0) 1237 - val = 1; 1238 - else if (strncmp(buf, "disable", 7) == 0) 1239 - val = 0; 1240 - else { 1241 - buf[cnt] = 0; 1242 - 1243 - ret = strict_strtoul(buf, 10, &val); 1244 - if (ret < 0) 1245 - return ret; 1246 - 1247 - val = !!val; 1248 - } 1249 - 1250 - if (val) 1251 - ftrace_enable_daemon(); 1252 - else 1253 - ftrace_disable_daemon(); 1254 - 1255 - filp->f_pos += cnt; 1256 - 1257 - return cnt; 1258 1425 } 1259 1426 1260 1427 static struct file_operations ftrace_avail_fops = { ··· 1238 1503 .release = ftrace_notrace_release, 1239 1504 }; 1240 1505 1241 - static struct file_operations ftraced_fops = { 1242 - .open = tracing_open_generic, 1243 - .read = ftraced_read, 1244 - .write = ftraced_write, 1245 - }; 1246 - 1247 - /** 1248 - * ftrace_force_update - force an update to all recording ftrace functions 1249 - */ 1250 - int ftrace_force_update(void) 1251 - { 1252 - int ret = 0; 1253 - 1254 - if (unlikely(ftrace_disabled)) 1255 - return -ENODEV; 1256 - 1257 - mutex_lock(&ftrace_sysctl_lock); 1258 - mutex_lock(&ftraced_lock); 1259 - 1260 - /* 1261 - * If ftraced_trigger is not set, then there is nothing 1262 - * to update. 1263 - */ 1264 - if (ftraced_trigger && !ftrace_update_code()) 1265 - ret = -EBUSY; 1266 - 1267 - mutex_unlock(&ftraced_lock); 1268 - mutex_unlock(&ftrace_sysctl_lock); 1269 - 1270 - return ret; 1271 - } 1272 - 1273 - static void ftrace_force_shutdown(void) 1274 - { 1275 - struct task_struct *task; 1276 - int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; 1277 - 1278 - mutex_lock(&ftraced_lock); 1279 - task = ftraced_task; 1280 - ftraced_task = NULL; 1281 - ftraced_suspend = -1; 1282 - ftrace_run_update_code(command); 1283 - mutex_unlock(&ftraced_lock); 1284 - 1285 - if (task) 1286 - kthread_stop(task); 1287 - } 1288 - 1289 1506 static __init int ftrace_init_debugfs(void) 1290 1507 { 1291 1508 struct dentry *d_tracer; ··· 1268 1581 pr_warning("Could not create debugfs " 1269 1582 "'set_ftrace_notrace' entry\n"); 1270 1583 1271 - entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, 1272 - NULL, &ftraced_fops); 1273 - if (!entry) 1274 - pr_warning("Could not create debugfs " 1275 - "'ftraced_enabled' entry\n"); 1276 1584 return 0; 1277 1585 } 1278 1586 1279 1587 fs_initcall(ftrace_init_debugfs); 1280 1588 1281 - #ifdef CONFIG_FTRACE_MCOUNT_RECORD 1282 1589 static int ftrace_convert_nops(unsigned long *start, 1283 1590 unsigned long *end) 1284 1591 { ··· 1280 1599 unsigned long addr; 1281 1600 unsigned long flags; 1282 1601 1602 + mutex_lock(&ftrace_start_lock); 1283 1603 p = start; 1284 1604 while (p < end) { 1285 1605 addr = ftrace_call_adjust(*p++); 1286 - /* should not be called from interrupt context */ 1287 - spin_lock(&ftrace_lock); 1288 1606 ftrace_record_ip(addr); 1289 - spin_unlock(&ftrace_lock); 1290 - ftrace_shutdown_replenish(); 1291 1607 } 1292 1608 1293 - /* p is ignored */ 1609 + /* disable interrupts to prevent kstop machine */ 1294 1610 local_irq_save(flags); 1295 - __ftrace_update_code(p); 1611 + ftrace_update_code(); 1296 1612 local_irq_restore(flags); 1613 + mutex_unlock(&ftrace_start_lock); 1297 1614 1298 1615 return 0; 1299 1616 } ··· 1337 1658 failed: 1338 1659 ftrace_disabled = 1; 1339 1660 } 1340 - #else /* CONFIG_FTRACE_MCOUNT_RECORD */ 1341 - static int ftraced(void *ignore) 1342 - { 1343 - unsigned long usecs; 1344 - 1345 - while (!kthread_should_stop()) { 1346 - 1347 - set_current_state(TASK_INTERRUPTIBLE); 1348 - 1349 - /* check once a second */ 1350 - schedule_timeout(HZ); 1351 - 1352 - if (unlikely(ftrace_disabled)) 1353 - continue; 1354 - 1355 - mutex_lock(&ftrace_sysctl_lock); 1356 - mutex_lock(&ftraced_lock); 1357 - if (!ftraced_suspend && !ftraced_stop && 1358 - ftrace_update_code()) { 1359 - usecs = nsecs_to_usecs(ftrace_update_time); 1360 - if (ftrace_update_tot_cnt > 100000) { 1361 - ftrace_update_tot_cnt = 0; 1362 - pr_info("hm, dftrace overflow: %lu change%s" 1363 - " (%lu total) in %lu usec%s\n", 1364 - ftrace_update_cnt, 1365 - ftrace_update_cnt != 1 ? "s" : "", 1366 - ftrace_update_tot_cnt, 1367 - usecs, usecs != 1 ? "s" : ""); 1368 - ftrace_disabled = 1; 1369 - WARN_ON_ONCE(1); 1370 - } 1371 - } 1372 - mutex_unlock(&ftraced_lock); 1373 - mutex_unlock(&ftrace_sysctl_lock); 1374 - 1375 - ftrace_shutdown_replenish(); 1376 - } 1377 - __set_current_state(TASK_RUNNING); 1378 - return 0; 1379 - } 1380 - 1381 - static int __init ftrace_dynamic_init(void) 1382 - { 1383 - struct task_struct *p; 1384 - unsigned long addr; 1385 - int ret; 1386 - 1387 - addr = (unsigned long)ftrace_record_ip; 1388 - 1389 - stop_machine(ftrace_dyn_arch_init, &addr, NULL); 1390 - 1391 - /* ftrace_dyn_arch_init places the return code in addr */ 1392 - if (addr) { 1393 - ret = (int)addr; 1394 - goto failed; 1395 - } 1396 - 1397 - ret = ftrace_dyn_table_alloc(NR_TO_INIT); 1398 - if (ret) 1399 - goto failed; 1400 - 1401 - p = kthread_run(ftraced, NULL, "ftraced"); 1402 - if (IS_ERR(p)) { 1403 - ret = -1; 1404 - goto failed; 1405 - } 1406 - 1407 - last_ftrace_enabled = ftrace_enabled = 1; 1408 - ftraced_task = p; 1409 - 1410 - return 0; 1411 - 1412 - failed: 1413 - ftrace_disabled = 1; 1414 - return ret; 1415 - } 1416 - 1417 - core_initcall(ftrace_dynamic_init); 1418 - #endif /* CONFIG_FTRACE_MCOUNT_RECORD */ 1419 1661 1420 1662 #else 1421 1663 # define ftrace_startup() do { } while (0) 1422 1664 # define ftrace_shutdown() do { } while (0) 1423 1665 # define ftrace_startup_sysctl() do { } while (0) 1424 1666 # define ftrace_shutdown_sysctl() do { } while (0) 1425 - # define ftrace_force_shutdown() do { } while (0) 1426 1667 #endif /* CONFIG_DYNAMIC_FTRACE */ 1427 1668 1428 1669 /** 1429 - * ftrace_kill_atomic - kill ftrace from critical sections 1670 + * ftrace_kill - kill ftrace 1430 1671 * 1431 1672 * This function should be used by panic code. It stops ftrace 1432 1673 * but in a not so nice way. If you need to simply kill ftrace 1433 1674 * from a non-atomic section, use ftrace_kill. 1434 1675 */ 1435 - void ftrace_kill_atomic(void) 1436 - { 1437 - ftrace_disabled = 1; 1438 - ftrace_enabled = 0; 1439 - #ifdef CONFIG_DYNAMIC_FTRACE 1440 - ftraced_suspend = -1; 1441 - #endif 1442 - clear_ftrace_function(); 1443 - } 1444 - 1445 - /** 1446 - * ftrace_kill - totally shutdown ftrace 1447 - * 1448 - * This is a safety measure. If something was detected that seems 1449 - * wrong, calling this function will keep ftrace from doing 1450 - * any more modifications, and updates. 1451 - * used when something went wrong. 1452 - */ 1453 1676 void ftrace_kill(void) 1454 1677 { 1455 - mutex_lock(&ftrace_sysctl_lock); 1456 1678 ftrace_disabled = 1; 1457 1679 ftrace_enabled = 0; 1458 - 1459 1680 clear_ftrace_function(); 1460 - mutex_unlock(&ftrace_sysctl_lock); 1461 - 1462 - /* Try to totally disable ftrace */ 1463 - ftrace_force_shutdown(); 1464 1681 } 1465 1682 1466 1683 /** ··· 1445 1870 mutex_unlock(&ftrace_sysctl_lock); 1446 1871 return ret; 1447 1872 } 1873 +
+4 -2
kernel/trace/ring_buffer.c
··· 130 130 static inline void free_buffer_page(struct buffer_page *bpage) 131 131 { 132 132 if (bpage->page) 133 - __free_page(bpage->page); 133 + free_page((unsigned long)bpage->page); 134 134 kfree(bpage); 135 135 } 136 136 ··· 966 966 if (unlikely(*delta > (1ULL << 59) && !once++)) { 967 967 printk(KERN_WARNING "Delta way too big! %llu" 968 968 " ts=%llu write stamp = %llu\n", 969 - *delta, *ts, cpu_buffer->write_stamp); 969 + (unsigned long long)*delta, 970 + (unsigned long long)*ts, 971 + (unsigned long long)cpu_buffer->write_stamp); 970 972 WARN_ON(1); 971 973 } 972 974
+7 -8
kernel/trace/trace.c
··· 34 34 35 35 #include <linux/stacktrace.h> 36 36 #include <linux/ring_buffer.h> 37 + #include <linux/irqflags.h> 37 38 38 39 #include "trace.h" 39 40 ··· 852 851 preempt_enable_notrace(); 853 852 } 854 853 855 - #ifdef CONFIG_FTRACE 854 + #ifdef CONFIG_FUNCTION_TRACER 856 855 static void 857 856 function_trace_call(unsigned long ip, unsigned long parent_ip) 858 857 { ··· 864 863 int pc; 865 864 866 865 if (unlikely(!ftrace_function_enabled)) 867 - return; 868 - 869 - if (skip_trace(ip)) 870 866 return; 871 867 872 868 pc = preempt_count(); ··· 2377 2379 int i; 2378 2380 size_t ret; 2379 2381 2382 + ret = cnt; 2383 + 2380 2384 if (cnt > max_tracer_type_len) 2381 2385 cnt = max_tracer_type_len; 2382 - ret = cnt; 2383 2386 2384 2387 if (copy_from_user(&buf, ubuf, cnt)) 2385 2388 return -EFAULT; ··· 2413 2414 out: 2414 2415 mutex_unlock(&trace_types_lock); 2415 2416 2416 - if (ret == cnt) 2417 - filp->f_pos += cnt; 2417 + if (ret > 0) 2418 + filp->f_pos += ret; 2418 2419 2419 2420 return ret; 2420 2421 } ··· 3096 3097 dump_ran = 1; 3097 3098 3098 3099 /* No turning back! */ 3099 - ftrace_kill_atomic(); 3100 + ftrace_kill(); 3100 3101 3101 3102 for_each_tracing_cpu(cpu) { 3102 3103 atomic_inc(&global_trace.data[cpu]->disabled);
+1 -1
kernel/trace/trace.h
··· 335 335 336 336 extern cycle_t ftrace_now(int cpu); 337 337 338 - #ifdef CONFIG_FTRACE 338 + #ifdef CONFIG_FUNCTION_TRACER 339 339 void tracing_start_function_trace(void); 340 340 void tracing_stop_function_trace(void); 341 341 #else
+1 -1
kernel/trace/trace_functions.c
··· 64 64 65 65 static struct tracer function_trace __read_mostly = 66 66 { 67 - .name = "ftrace", 67 + .name = "function", 68 68 .init = function_trace_init, 69 69 .reset = function_trace_reset, 70 70 .ctrl_update = function_trace_ctrl_update,
+2 -2
kernel/trace/trace_irqsoff.c
··· 63 63 */ 64 64 static __cacheline_aligned_in_smp unsigned long max_sequence; 65 65 66 - #ifdef CONFIG_FTRACE 66 + #ifdef CONFIG_FUNCTION_TRACER 67 67 /* 68 68 * irqsoff uses its own tracer function to keep the overhead down: 69 69 */ ··· 104 104 { 105 105 .func = irqsoff_tracer_call, 106 106 }; 107 - #endif /* CONFIG_FTRACE */ 107 + #endif /* CONFIG_FUNCTION_TRACER */ 108 108 109 109 /* 110 110 * Should this new latency be reported/recorded?
+2 -2
kernel/trace/trace_sched_wakeup.c
··· 31 31 32 32 static void __wakeup_reset(struct trace_array *tr); 33 33 34 - #ifdef CONFIG_FTRACE 34 + #ifdef CONFIG_FUNCTION_TRACER 35 35 /* 36 36 * irqsoff uses its own tracer function to keep the overhead down: 37 37 */ ··· 96 96 { 97 97 .func = wakeup_tracer_call, 98 98 }; 99 - #endif /* CONFIG_FTRACE */ 99 + #endif /* CONFIG_FUNCTION_TRACER */ 100 100 101 101 /* 102 102 * Should this new latency be reported/recorded?
+2 -16
kernel/trace/trace_selftest.c
··· 70 70 return ret; 71 71 } 72 72 73 - #ifdef CONFIG_FTRACE 73 + #ifdef CONFIG_FUNCTION_TRACER 74 74 75 75 #ifdef CONFIG_DYNAMIC_FTRACE 76 76 ··· 98 98 99 99 /* passed in by parameter to fool gcc from optimizing */ 100 100 func(); 101 - 102 - /* update the records */ 103 - ret = ftrace_force_update(); 104 - if (ret) { 105 - printk(KERN_CONT ".. ftraced failed .. "); 106 - return ret; 107 - } 108 101 109 102 /* 110 103 * Some archs *cough*PowerPC*cough* add charachters to the ··· 176 183 /* make sure msleep has been recorded */ 177 184 msleep(1); 178 185 179 - /* force the recorded functions to be traced */ 180 - ret = ftrace_force_update(); 181 - if (ret) { 182 - printk(KERN_CONT ".. ftraced failed .. "); 183 - return ret; 184 - } 185 - 186 186 /* start the tracing */ 187 187 ftrace_enabled = 1; 188 188 tracer_enabled = 1; ··· 212 226 213 227 return ret; 214 228 } 215 - #endif /* CONFIG_FTRACE */ 229 + #endif /* CONFIG_FUNCTION_TRACER */ 216 230 217 231 #ifdef CONFIG_IRQSOFF_TRACER 218 232 int
+4
kernel/trace/trace_stack.c
··· 44 44 if (this_size <= max_stack_size) 45 45 return; 46 46 47 + /* we do not handle interrupt stacks yet */ 48 + if (!object_is_on_stack(&this_size)) 49 + return; 50 + 47 51 raw_local_irq_save(flags); 48 52 __raw_spin_lock(&max_stack_lock); 49 53
+8
kernel/tracepoint.c
··· 131 131 132 132 old = entry->funcs; 133 133 134 + if (!old) 135 + return NULL; 136 + 134 137 debug_print_probes(entry); 135 138 /* (N -> M), (N > 1, M >= 0) probes */ 136 139 for (nr_probes = 0; old[nr_probes]; nr_probes++) { ··· 391 388 if (entry->rcu_pending) 392 389 rcu_barrier_sched(); 393 390 old = tracepoint_entry_remove_probe(entry, probe); 391 + if (!old) { 392 + printk(KERN_WARNING "Warning: Trying to unregister a probe" 393 + "that doesn't exist\n"); 394 + goto end; 395 + } 394 396 mutex_unlock(&tracepoints_mutex); 395 397 tracepoint_update_probes(); /* may update entry */ 396 398 mutex_lock(&tracepoints_mutex);
+1 -1
lib/Makefile
··· 2 2 # Makefile for some libs needed in the kernel. 3 3 # 4 4 5 - ifdef CONFIG_FTRACE 5 + ifdef CONFIG_FUNCTION_TRACER 6 6 ORIG_CFLAGS := $(KBUILD_CFLAGS) 7 7 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 8 8 endif
+8 -2
scripts/Makefile.build
··· 198 198 fi; 199 199 endif 200 200 201 + ifdef CONFIG_64BIT 202 + arch_bits = 64 203 + else 204 + arch_bits = 32 205 + endif 206 + 201 207 ifdef CONFIG_FTRACE_MCOUNT_RECORD 202 208 cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \ 203 - "$(ARCH)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" \ 204 - "$(MV)" "$(@)"; 209 + "$(ARCH)" "$(arch_bits)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" \ 210 + "$(NM)" "$(RM)" "$(MV)" "$(@)"; 205 211 endif 206 212 207 213 define rule_cc_o_c
+12 -7
scripts/bootgraph.pl
··· 37 37 # dmesg | perl scripts/bootgraph.pl > output.svg 38 38 # 39 39 40 - my %start, %end; 40 + use strict; 41 + 42 + my %start; 43 + my %end; 41 44 my $done = 0; 42 45 my $maxtime = 0; 43 46 my $firsttime = 100; ··· 108 105 my $stylecounter = 0; 109 106 my %rows; 110 107 my $rowscount = 1; 111 - while (($key,$value) = each %start) { 108 + my @initcalls = sort { $start{$a} <=> $start{$b} } keys(%start); 109 + my $key; 110 + foreach $key (@initcalls) { 112 111 my $duration = $end{$key} - $start{$key}; 113 112 114 113 if ($duration >= $threshold) { 115 - my $s, $s2, $e, $y; 116 - $pid = $pids{$key}; 114 + my ($s, $s2, $e, $w, $y, $y2, $style); 115 + my $pid = $pids{$key}; 117 116 118 117 if (!defined($rows{$pid})) { 119 118 $rows{$pid} = $rowscount; 120 119 $rowscount = $rowscount + 1; 121 120 } 122 - $s = ($value - $firsttime) * $mult; 121 + $s = ($start{$key} - $firsttime) * $mult; 123 122 $s2 = $s + 6; 124 123 $e = ($end{$key} - $firsttime) * $mult; 125 124 $w = $e - $s; ··· 145 140 my $time = $firsttime; 146 141 my $step = ($maxtime - $firsttime) / 15; 147 142 while ($time < $maxtime) { 148 - my $s2 = ($time - $firsttime) * $mult; 143 + my $s3 = ($time - $firsttime) * $mult; 149 144 my $tm = int($time * 100) / 100.0; 150 - print "<text transform=\"translate($s2,89) rotate(90)\">$tm</text>\n"; 145 + print "<text transform=\"translate($s3,89) rotate(90)\">$tm</text>\n"; 151 146 $time = $time + $step; 152 147 } 153 148
+24 -4
scripts/recordmcount.pl
··· 106 106 exit(1); 107 107 } 108 108 109 - my ($arch, $objdump, $objcopy, $cc, $ld, $nm, $rm, $mv, $inputfile) = @ARGV; 109 + my ($arch, $bits, $objdump, $objcopy, $cc, 110 + $ld, $nm, $rm, $mv, $inputfile) = @ARGV; 111 + 112 + # Acceptable sections to record. 113 + my %text_sections = ( 114 + ".text" => 1, 115 + ); 110 116 111 117 $objdump = "objdump" if ((length $objdump) == 0); 112 118 $objcopy = "objcopy" if ((length $objcopy) == 0); ··· 135 129 # (return offset and func name) 136 130 my $mcount_regex; # Find the call site to mcount (return offset) 137 131 132 + if ($arch eq "x86") { 133 + if ($bits == 64) { 134 + $arch = "x86_64"; 135 + } else { 136 + $arch = "i386"; 137 + } 138 + } 139 + 138 140 if ($arch eq "x86_64") { 139 - $section_regex = "Disassembly of section"; 141 + $section_regex = "Disassembly of section\\s+(\\S+):"; 140 142 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 141 143 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; 142 144 $type = ".quad"; ··· 156 142 $cc .= " -m64"; 157 143 158 144 } elsif ($arch eq "i386") { 159 - $section_regex = "Disassembly of section"; 145 + $section_regex = "Disassembly of section\\s+(\\S+):"; 160 146 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 161 147 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 162 148 $type = ".long"; ··· 303 289 while (<IN>) { 304 290 # is it a section? 305 291 if (/$section_regex/) { 306 - $read_function = 1; 292 + 293 + # Only record text sections that we know are safe 294 + if (defined($text_sections{$1})) { 295 + $read_function = 1; 296 + } else { 297 + $read_function = 0; 298 + } 307 299 # print out any recorded offsets 308 300 update_funcs() if ($text_found); 309 301