Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (31 commits)
ftrace: fix current_tracer error return
tracing: fix a build error on alpha
ftrace: use a real variable for ftrace_nop in x86
tracing/ftrace: make boot tracer select the sched_switch tracer
tracepoint: check if the probe has been registered
asm-generic: define DIE_OOPS in asm-generic
trace: fix printk warning for u64
ftrace: warning in kernel/trace/ftrace.c
ftrace: fix build failure
ftrace, powerpc, sparc64, x86: remove notrace from arch ftrace file
ftrace: remove ftrace hash
ftrace: remove mcount set
ftrace: remove daemon
ftrace: disable dynamic ftrace for all archs that use daemon
ftrace: add ftrace warn on to disable ftrace
ftrace: only have ftrace_kill atomic
ftrace: use probe_kernel
ftrace: comment arch ftrace code
ftrace: return error on failed modified text.
ftrace: dynamic ftrace process only text section
...

+284 -716
+1 -1
Makefile
··· 536 KBUILD_AFLAGS += -gdwarf-2 537 endif 538 539 - ifdef CONFIG_FTRACE 540 KBUILD_CFLAGS += -pg 541 endif 542
··· 536 KBUILD_AFLAGS += -gdwarf-2 537 endif 538 539 + ifdef CONFIG_FUNCTION_TRACER 540 KBUILD_CFLAGS += -pg 541 endif 542
+1 -2
arch/arm/Kconfig
··· 16 select HAVE_ARCH_KGDB 17 select HAVE_KPROBES if (!XIP_KERNEL) 18 select HAVE_KRETPROBES if (HAVE_KPROBES) 19 - select HAVE_FTRACE if (!XIP_KERNEL) 20 - select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE) 21 select HAVE_GENERIC_DMA_COHERENT 22 help 23 The ARM series is a line of low-power-consumption RISC chip designs
··· 16 select HAVE_ARCH_KGDB 17 select HAVE_KPROBES if (!XIP_KERNEL) 18 select HAVE_KRETPROBES if (HAVE_KPROBES) 19 + select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 20 select HAVE_GENERIC_DMA_COHERENT 21 help 22 The ARM series is a line of low-power-consumption RISC chip designs
+1 -1
arch/arm/boot/compressed/Makefile
··· 70 targets := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \ 71 head.o misc.o $(OBJS) 72 73 - ifeq ($(CONFIG_FTRACE),y) 74 ORIG_CFLAGS := $(KBUILD_CFLAGS) 75 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 76 endif
··· 70 targets := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \ 71 head.o misc.o $(OBJS) 72 73 + ifeq ($(CONFIG_FUNCTION_TRACER),y) 74 ORIG_CFLAGS := $(KBUILD_CFLAGS) 75 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 76 endif
+1 -1
arch/arm/include/asm/ftrace.h
··· 1 #ifndef _ASM_ARM_FTRACE 2 #define _ASM_ARM_FTRACE 3 4 - #ifdef CONFIG_FTRACE 5 #define MCOUNT_ADDR ((long)(mcount)) 6 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 7
··· 1 #ifndef _ASM_ARM_FTRACE 2 #define _ASM_ARM_FTRACE 3 4 + #ifdef CONFIG_FUNCTION_TRACER 5 #define MCOUNT_ADDR ((long)(mcount)) 6 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 7
+1 -1
arch/arm/kernel/armksyms.c
··· 183 184 EXPORT_SYMBOL(copy_page); 185 186 - #ifdef CONFIG_FTRACE 187 EXPORT_SYMBOL(mcount); 188 #endif
··· 183 184 EXPORT_SYMBOL(copy_page); 185 186 + #ifdef CONFIG_FUNCTION_TRACER 187 EXPORT_SYMBOL(mcount); 188 #endif
+2 -2
arch/arm/kernel/entry-common.S
··· 101 #undef CALL 102 #define CALL(x) .long x 103 104 - #ifdef CONFIG_FTRACE 105 #ifdef CONFIG_DYNAMIC_FTRACE 106 ENTRY(mcount) 107 stmdb sp!, {r0-r3, lr} ··· 149 ftrace_stub: 150 mov pc, lr 151 152 - #endif /* CONFIG_FTRACE */ 153 154 /*============================================================================= 155 * SWI handler
··· 101 #undef CALL 102 #define CALL(x) .long x 103 104 + #ifdef CONFIG_FUNCTION_TRACER 105 #ifdef CONFIG_DYNAMIC_FTRACE 106 ENTRY(mcount) 107 stmdb sp!, {r0-r3, lr} ··· 149 ftrace_stub: 150 mov pc, lr 151 152 + #endif /* CONFIG_FUNCTION_TRACER */ 153 154 /*============================================================================= 155 * SWI handler
-13
arch/arm/kernel/ftrace.c
··· 95 return ret; 96 } 97 98 - int ftrace_mcount_set(unsigned long *data) 99 - { 100 - unsigned long pc, old; 101 - unsigned long *addr = data; 102 - unsigned char *new; 103 - 104 - pc = (unsigned long)&mcount_call; 105 - memcpy(&old, &mcount_call, MCOUNT_INSN_SIZE); 106 - new = ftrace_call_replace(pc, *addr); 107 - *addr = ftrace_modify_code(pc, (unsigned char *)&old, new); 108 - return 0; 109 - } 110 - 111 /* run from kstop_machine */ 112 int __init ftrace_dyn_arch_init(void *data) 113 {
··· 95 return ret; 96 } 97 98 /* run from kstop_machine */ 99 int __init ftrace_dyn_arch_init(void *data) 100 {
+1 -2
arch/powerpc/Kconfig
··· 108 config PPC 109 bool 110 default y 111 - select HAVE_DYNAMIC_FTRACE 112 - select HAVE_FTRACE 113 select ARCH_WANT_OPTIONAL_GPIOLIB 114 select HAVE_IDE 115 select HAVE_IOREMAP_PROT
··· 108 config PPC 109 bool 110 default y 111 + select HAVE_FUNCTION_TRACER 112 select ARCH_WANT_OPTIONAL_GPIOLIB 113 select HAVE_IDE 114 select HAVE_IOREMAP_PROT
+1 -1
arch/powerpc/Makefile
··· 122 endif 123 124 # Work around a gcc code-gen bug with -fno-omit-frame-pointer. 125 - ifeq ($(CONFIG_FTRACE),y) 126 KBUILD_CFLAGS += -mno-sched-epilog 127 endif 128
··· 122 endif 123 124 # Work around a gcc code-gen bug with -fno-omit-frame-pointer. 125 + ifeq ($(CONFIG_FUNCTION_TRACER),y) 126 KBUILD_CFLAGS += -mno-sched-epilog 127 endif 128
+1 -1
arch/powerpc/include/asm/ftrace.h
··· 1 #ifndef _ASM_POWERPC_FTRACE 2 #define _ASM_POWERPC_FTRACE 3 4 - #ifdef CONFIG_FTRACE 5 #define MCOUNT_ADDR ((long)(_mcount)) 6 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 7
··· 1 #ifndef _ASM_POWERPC_FTRACE 2 #define _ASM_POWERPC_FTRACE 3 4 + #ifdef CONFIG_FUNCTION_TRACER 5 #define MCOUNT_ADDR ((long)(_mcount)) 6 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 7
+1 -1
arch/powerpc/kernel/Makefile
··· 12 CFLAGS_btext.o += -fPIC 13 endif 14 15 - ifdef CONFIG_FTRACE 16 # Do not trace early boot code 17 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog 18 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
··· 12 CFLAGS_btext.o += -fPIC 13 endif 14 15 + ifdef CONFIG_FUNCTION_TRACER 16 # Do not trace early boot code 17 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog 18 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
+1 -1
arch/powerpc/kernel/entry_32.S
··· 1158 1159 #endif /* CONFIG_PPC_RTAS */ 1160 1161 - #ifdef CONFIG_FTRACE 1162 #ifdef CONFIG_DYNAMIC_FTRACE 1163 _GLOBAL(mcount) 1164 _GLOBAL(_mcount)
··· 1158 1159 #endif /* CONFIG_PPC_RTAS */ 1160 1161 + #ifdef CONFIG_FUNCTION_TRACER 1162 #ifdef CONFIG_DYNAMIC_FTRACE 1163 _GLOBAL(mcount) 1164 _GLOBAL(_mcount)
+1 -1
arch/powerpc/kernel/entry_64.S
··· 884 mtlr r0 885 blr 886 887 - #ifdef CONFIG_FTRACE 888 #ifdef CONFIG_DYNAMIC_FTRACE 889 _GLOBAL(mcount) 890 _GLOBAL(_mcount)
··· 884 mtlr r0 885 blr 886 887 + #ifdef CONFIG_FUNCTION_TRACER 888 #ifdef CONFIG_DYNAMIC_FTRACE 889 _GLOBAL(mcount) 890 _GLOBAL(_mcount)
+5 -22
arch/powerpc/kernel/ftrace.c
··· 28 #endif 29 30 31 - static unsigned int notrace ftrace_calc_offset(long ip, long addr) 32 { 33 return (int)(addr - ip); 34 } 35 36 - notrace unsigned char *ftrace_nop_replace(void) 37 { 38 return (char *)&ftrace_nop; 39 } 40 41 - notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 42 { 43 static unsigned int op; 44 ··· 68 # define _ASM_PTR " .long " 69 #endif 70 71 - notrace int 72 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 73 unsigned char *new_code) 74 { ··· 113 return faulted; 114 } 115 116 - notrace int ftrace_update_ftrace_func(ftrace_func_t func) 117 { 118 unsigned long ip = (unsigned long)(&ftrace_call); 119 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 124 ret = ftrace_modify_code(ip, old, new); 125 126 return ret; 127 - } 128 - 129 - notrace int ftrace_mcount_set(unsigned long *data) 130 - { 131 - unsigned long ip = (long)(&mcount_call); 132 - unsigned long *addr = data; 133 - unsigned char old[MCOUNT_INSN_SIZE], *new; 134 - 135 - /* 136 - * Replace the mcount stub with a pointer to the 137 - * ip recorder function. 138 - */ 139 - memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); 140 - new = ftrace_call_replace(ip, *addr); 141 - *addr = ftrace_modify_code(ip, old, new); 142 - 143 - return 0; 144 } 145 146 int __init ftrace_dyn_arch_init(void *data)
··· 28 #endif 29 30 31 + static unsigned int ftrace_calc_offset(long ip, long addr) 32 { 33 return (int)(addr - ip); 34 } 35 36 + unsigned char *ftrace_nop_replace(void) 37 { 38 return (char *)&ftrace_nop; 39 } 40 41 + unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 42 { 43 static unsigned int op; 44 ··· 68 # define _ASM_PTR " .long " 69 #endif 70 71 + int 72 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 73 unsigned char *new_code) 74 { ··· 113 return faulted; 114 } 115 116 + int ftrace_update_ftrace_func(ftrace_func_t func) 117 { 118 unsigned long ip = (unsigned long)(&ftrace_call); 119 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 124 ret = ftrace_modify_code(ip, old, new); 125 126 return ret; 127 } 128 129 int __init ftrace_dyn_arch_init(void *data)
+1 -1
arch/powerpc/kernel/ppc_ksyms.c
··· 68 EXPORT_SYMBOL(sys_sigreturn); 69 #endif 70 71 - #ifdef CONFIG_FTRACE 72 EXPORT_SYMBOL(_mcount); 73 #endif 74
··· 68 EXPORT_SYMBOL(sys_sigreturn); 69 #endif 70 71 + #ifdef CONFIG_FUNCTION_TRACER 72 EXPORT_SYMBOL(_mcount); 73 #endif 74
+1 -1
arch/powerpc/platforms/powermac/Makefile
··· 1 CFLAGS_bootx_init.o += -fPIC 2 3 - ifdef CONFIG_FTRACE 4 # Do not trace early boot code 5 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog 6 endif
··· 1 CFLAGS_bootx_init.o += -fPIC 2 3 + ifdef CONFIG_FUNCTION_TRACER 4 # Do not trace early boot code 5 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog 6 endif
+1 -2
arch/sparc64/Kconfig
··· 11 config SPARC64 12 bool 13 default y 14 - select HAVE_DYNAMIC_FTRACE 15 - select HAVE_FTRACE 16 select HAVE_IDE 17 select HAVE_LMB 18 select HAVE_ARCH_KGDB
··· 11 config SPARC64 12 bool 13 default y 14 + select HAVE_FUNCTION_TRACER 15 select HAVE_IDE 16 select HAVE_LMB 17 select HAVE_ARCH_KGDB
+1 -1
arch/sparc64/Kconfig.debug
··· 33 34 config MCOUNT 35 bool 36 - depends on STACK_DEBUG || FTRACE 37 default y 38 39 config FRAME_POINTER
··· 33 34 config MCOUNT 35 bool 36 + depends on STACK_DEBUG || FUNCTION_TRACER 37 default y 38 39 config FRAME_POINTER
+2
arch/sparc64/kernel/Makefile
··· 5 EXTRA_AFLAGS := -ansi 6 EXTRA_CFLAGS := -Werror 7 8 extra-y := head.o init_task.o vmlinux.lds 9 10 obj-y := process.o setup.o cpu.o idprom.o reboot.o \
··· 5 EXTRA_AFLAGS := -ansi 6 EXTRA_CFLAGS := -Werror 7 8 + CFLAGS_REMOVE_ftrace.o = -pg 9 + 10 extra-y := head.o init_task.o vmlinux.lds 11 12 obj-y := process.o setup.o cpu.o idprom.o reboot.o \
+4 -22
arch/sparc64/kernel/ftrace.c
··· 9 10 static const u32 ftrace_nop = 0x01000000; 11 12 - notrace unsigned char *ftrace_nop_replace(void) 13 { 14 return (char *)&ftrace_nop; 15 } 16 17 - notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 18 { 19 static u32 call; 20 s32 off; ··· 25 return (unsigned char *) &call; 26 } 27 28 - notrace int 29 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 30 unsigned char *new_code) 31 { ··· 59 return faulted; 60 } 61 62 - notrace int ftrace_update_ftrace_func(ftrace_func_t func) 63 { 64 unsigned long ip = (unsigned long)(&ftrace_call); 65 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 68 new = ftrace_call_replace(ip, (unsigned long)func); 69 return ftrace_modify_code(ip, old, new); 70 } 71 - 72 - notrace int ftrace_mcount_set(unsigned long *data) 73 - { 74 - unsigned long ip = (long)(&mcount_call); 75 - unsigned long *addr = data; 76 - unsigned char old[MCOUNT_INSN_SIZE], *new; 77 - 78 - /* 79 - * Replace the mcount stub with a pointer to the 80 - * ip recorder function. 81 - */ 82 - memcpy(old, &mcount_call, MCOUNT_INSN_SIZE); 83 - new = ftrace_call_replace(ip, *addr); 84 - *addr = ftrace_modify_code(ip, old, new); 85 - 86 - return 0; 87 - } 88 - 89 90 int __init ftrace_dyn_arch_init(void *data) 91 {
··· 9 10 static const u32 ftrace_nop = 0x01000000; 11 12 + unsigned char *ftrace_nop_replace(void) 13 { 14 return (char *)&ftrace_nop; 15 } 16 17 + unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 18 { 19 static u32 call; 20 s32 off; ··· 25 return (unsigned char *) &call; 26 } 27 28 + int 29 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 30 unsigned char *new_code) 31 { ··· 59 return faulted; 60 } 61 62 + int ftrace_update_ftrace_func(ftrace_func_t func) 63 { 64 unsigned long ip = (unsigned long)(&ftrace_call); 65 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 68 new = ftrace_call_replace(ip, (unsigned long)func); 69 return ftrace_modify_code(ip, old, new); 70 } 71 72 int __init ftrace_dyn_arch_init(void *data) 73 {
+2 -2
arch/sparc64/lib/mcount.S
··· 93 nop 94 1: 95 #endif 96 - #ifdef CONFIG_FTRACE 97 #ifdef CONFIG_DYNAMIC_FTRACE 98 mov %o7, %o0 99 .globl mcount_call ··· 119 .size _mcount,.-_mcount 120 .size mcount,.-mcount 121 122 - #ifdef CONFIG_FTRACE 123 .globl ftrace_stub 124 .type ftrace_stub,#function 125 ftrace_stub:
··· 93 nop 94 1: 95 #endif 96 + #ifdef CONFIG_FUNCTION_TRACER 97 #ifdef CONFIG_DYNAMIC_FTRACE 98 mov %o7, %o0 99 .globl mcount_call ··· 119 .size _mcount,.-_mcount 120 .size mcount,.-mcount 121 122 + #ifdef CONFIG_FUNCTION_TRACER 123 .globl ftrace_stub 124 .type ftrace_stub,#function 125 ftrace_stub:
+1 -1
arch/x86/Kconfig
··· 28 select HAVE_KRETPROBES 29 select HAVE_FTRACE_MCOUNT_RECORD 30 select HAVE_DYNAMIC_FTRACE 31 - select HAVE_FTRACE 32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 33 select HAVE_ARCH_KGDB if !X86_VOYAGER 34 select HAVE_ARCH_TRACEHOOK
··· 28 select HAVE_KRETPROBES 29 select HAVE_FTRACE_MCOUNT_RECORD 30 select HAVE_DYNAMIC_FTRACE 31 + select HAVE_FUNCTION_TRACER 32 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 33 select HAVE_ARCH_KGDB if !X86_VOYAGER 34 select HAVE_ARCH_TRACEHOOK
+2 -2
arch/x86/include/asm/ftrace.h
··· 1 #ifndef _ASM_X86_FTRACE_H 2 #define _ASM_X86_FTRACE_H 3 4 - #ifdef CONFIG_FTRACE 5 #define MCOUNT_ADDR ((long)(mcount)) 6 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 7 ··· 19 } 20 #endif 21 22 - #endif /* CONFIG_FTRACE */ 23 24 #endif /* _ASM_X86_FTRACE_H */
··· 1 #ifndef _ASM_X86_FTRACE_H 2 #define _ASM_X86_FTRACE_H 3 4 + #ifdef CONFIG_FUNCTION_TRACER 5 #define MCOUNT_ADDR ((long)(mcount)) 6 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 7 ··· 19 } 20 #endif 21 22 + #endif /* CONFIG_FUNCTION_TRACER */ 23 24 #endif /* _ASM_X86_FTRACE_H */
+2 -1
arch/x86/kernel/Makefile
··· 6 7 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 8 9 - ifdef CONFIG_FTRACE 10 # Do not profile debug and lowlevel utilities 11 CFLAGS_REMOVE_tsc.o = -pg 12 CFLAGS_REMOVE_rtc.o = -pg 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 endif 15 16 #
··· 6 7 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) 8 9 + ifdef CONFIG_FUNCTION_TRACER 10 # Do not profile debug and lowlevel utilities 11 CFLAGS_REMOVE_tsc.o = -pg 12 CFLAGS_REMOVE_rtc.o = -pg 13 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 14 + CFLAGS_REMOVE_ftrace.o = -pg 15 endif 16 17 #
+2 -2
arch/x86/kernel/entry_32.S
··· 1149 1150 #endif /* CONFIG_XEN */ 1151 1152 - #ifdef CONFIG_FTRACE 1153 #ifdef CONFIG_DYNAMIC_FTRACE 1154 1155 ENTRY(mcount) ··· 1204 jmp ftrace_stub 1205 END(mcount) 1206 #endif /* CONFIG_DYNAMIC_FTRACE */ 1207 - #endif /* CONFIG_FTRACE */ 1208 1209 .section .rodata,"a" 1210 #include "syscall_table_32.S"
··· 1149 1150 #endif /* CONFIG_XEN */ 1151 1152 + #ifdef CONFIG_FUNCTION_TRACER 1153 #ifdef CONFIG_DYNAMIC_FTRACE 1154 1155 ENTRY(mcount) ··· 1204 jmp ftrace_stub 1205 END(mcount) 1206 #endif /* CONFIG_DYNAMIC_FTRACE */ 1207 + #endif /* CONFIG_FUNCTION_TRACER */ 1208 1209 .section .rodata,"a" 1210 #include "syscall_table_32.S"
+2 -2
arch/x86/kernel/entry_64.S
··· 61 62 .code64 63 64 - #ifdef CONFIG_FTRACE 65 #ifdef CONFIG_DYNAMIC_FTRACE 66 ENTRY(mcount) 67 retq ··· 138 jmp ftrace_stub 139 END(mcount) 140 #endif /* CONFIG_DYNAMIC_FTRACE */ 141 - #endif /* CONFIG_FTRACE */ 142 143 #ifndef CONFIG_PREEMPT 144 #define retint_kernel retint_restore_args
··· 61 62 .code64 63 64 + #ifdef CONFIG_FUNCTION_TRACER 65 #ifdef CONFIG_DYNAMIC_FTRACE 66 ENTRY(mcount) 67 retq ··· 138 jmp ftrace_stub 139 END(mcount) 140 #endif /* CONFIG_DYNAMIC_FTRACE */ 141 + #endif /* CONFIG_FUNCTION_TRACER */ 142 143 #ifndef CONFIG_PREEMPT 144 #define retint_kernel retint_restore_args
+21 -29
arch/x86/kernel/ftrace.c
··· 21 #include <asm/nops.h> 22 23 24 - /* Long is fine, even if it is only 4 bytes ;-) */ 25 - static unsigned long *ftrace_nop; 26 27 union ftrace_code_union { 28 char code[MCOUNT_INSN_SIZE]; ··· 32 }; 33 34 35 - static int notrace ftrace_calc_offset(long ip, long addr) 36 { 37 return (int)(addr - ip); 38 } 39 40 - notrace unsigned char *ftrace_nop_replace(void) 41 { 42 - return (char *)ftrace_nop; 43 } 44 45 - notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 46 { 47 static union ftrace_code_union calc; 48 ··· 56 return calc.code; 57 } 58 59 - notrace int 60 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 61 unsigned char *new_code) 62 { ··· 65 /* 66 * Note: Due to modules and __init, code can 67 * disappear and change, we need to protect against faulting 68 - * as well as code changing. 69 * 70 * No real locking needed, this code is run through 71 * kstop_machine, or before SMP starts. 72 */ 73 - if (__copy_from_user_inatomic(replaced, (char __user *)ip, MCOUNT_INSN_SIZE)) 74 - return 1; 75 76 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 77 - return 2; 78 79 - WARN_ON_ONCE(__copy_to_user_inatomic((char __user *)ip, new_code, 80 - MCOUNT_INSN_SIZE)); 81 82 sync_core(); 83 84 return 0; 85 } 86 87 - notrace int ftrace_update_ftrace_func(ftrace_func_t func) 88 { 89 unsigned long ip = (unsigned long)(&ftrace_call); 90 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 100 ret = ftrace_modify_code(ip, old, new); 101 102 return ret; 103 - } 104 - 105 - notrace int ftrace_mcount_set(unsigned long *data) 106 - { 107 - /* mcount is initialized as a nop */ 108 - *data = 0; 109 - return 0; 110 } 111 112 int __init ftrace_dyn_arch_init(void *data) ··· 124 * TODO: check the cpuid to determine the best nop. 125 */ 126 asm volatile ( 127 - "jmp ftrace_test_jmp\n" 128 - /* This code needs to stay around */ 129 - ".section .text, \"ax\"\n" 130 "ftrace_test_jmp:" 131 "jmp ftrace_test_p6nop\n" 132 "nop\n" ··· 134 "jmp 1f\n" 135 "ftrace_test_nop5:" 136 ".byte 0x66,0x66,0x66,0x66,0x90\n" 137 - "jmp 1f\n" 138 - ".previous\n" 139 "1:" 140 ".section .fixup, \"ax\"\n" 141 "2: movl $1, %0\n" ··· 148 switch (faulted) { 149 case 0: 150 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); 151 - ftrace_nop = (unsigned long *)ftrace_test_p6nop; 152 break; 153 case 1: 154 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); 155 - ftrace_nop = (unsigned long *)ftrace_test_nop5; 156 break; 157 case 2: 158 pr_info("ftrace: converting mcount calls to jmp . + 5\n"); 159 - ftrace_nop = (unsigned long *)ftrace_test_jmp; 160 break; 161 } 162
··· 21 #include <asm/nops.h> 22 23 24 + static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; 25 26 union ftrace_code_union { 27 char code[MCOUNT_INSN_SIZE]; ··· 33 }; 34 35 36 + static int ftrace_calc_offset(long ip, long addr) 37 { 38 return (int)(addr - ip); 39 } 40 41 + unsigned char *ftrace_nop_replace(void) 42 { 43 + return ftrace_nop; 44 } 45 46 + unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 47 { 48 static union ftrace_code_union calc; 49 ··· 57 return calc.code; 58 } 59 60 + int 61 ftrace_modify_code(unsigned long ip, unsigned char *old_code, 62 unsigned char *new_code) 63 { ··· 66 /* 67 * Note: Due to modules and __init, code can 68 * disappear and change, we need to protect against faulting 69 + * as well as code changing. We do this by using the 70 + * probe_kernel_* functions. 71 * 72 * No real locking needed, this code is run through 73 * kstop_machine, or before SMP starts. 74 */ 75 76 + /* read the text we want to modify */ 77 + if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) 78 + return -EFAULT; 79 + 80 + /* Make sure it is what we expect it to be */ 81 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) 82 + return -EINVAL; 83 84 + /* replace the text with the new text */ 85 + if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) 86 + return -EPERM; 87 88 sync_core(); 89 90 return 0; 91 } 92 93 + int ftrace_update_ftrace_func(ftrace_func_t func) 94 { 95 unsigned long ip = (unsigned long)(&ftrace_call); 96 unsigned char old[MCOUNT_INSN_SIZE], *new; ··· 96 ret = ftrace_modify_code(ip, old, new); 97 98 return ret; 99 } 100 101 int __init ftrace_dyn_arch_init(void *data) ··· 127 * TODO: check the cpuid to determine the best nop. 128 */ 129 asm volatile ( 130 "ftrace_test_jmp:" 131 "jmp ftrace_test_p6nop\n" 132 "nop\n" ··· 140 "jmp 1f\n" 141 "ftrace_test_nop5:" 142 ".byte 0x66,0x66,0x66,0x66,0x90\n" 143 "1:" 144 ".section .fixup, \"ax\"\n" 145 "2: movl $1, %0\n" ··· 156 switch (faulted) { 157 case 0: 158 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n"); 159 + memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); 160 break; 161 case 1: 162 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n"); 163 + memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); 164 break; 165 case 2: 166 pr_info("ftrace: converting mcount calls to jmp . + 5\n"); 167 + memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); 168 break; 169 } 170
+1 -1
arch/x86/kernel/i386_ksyms_32.c
··· 5 #include <asm/desc.h> 6 #include <asm/ftrace.h> 7 8 - #ifdef CONFIG_FTRACE 9 /* mcount is defined in assembly */ 10 EXPORT_SYMBOL(mcount); 11 #endif
··· 5 #include <asm/desc.h> 6 #include <asm/ftrace.h> 7 8 + #ifdef CONFIG_FUNCTION_TRACER 9 /* mcount is defined in assembly */ 10 EXPORT_SYMBOL(mcount); 11 #endif
+1 -1
arch/x86/kernel/x8664_ksyms_64.c
··· 12 #include <asm/desc.h> 13 #include <asm/ftrace.h> 14 15 - #ifdef CONFIG_FTRACE 16 /* mcount is defined in assembly */ 17 EXPORT_SYMBOL(mcount); 18 #endif
··· 12 #include <asm/desc.h> 13 #include <asm/ftrace.h> 14 15 + #ifdef CONFIG_FUNCTION_TRACER 16 /* mcount is defined in assembly */ 17 EXPORT_SYMBOL(mcount); 18 #endif
+1 -1
arch/x86/xen/Makefile
··· 1 - ifdef CONFIG_FTRACE 2 # Do not profile debug and lowlevel utilities 3 CFLAGS_REMOVE_spinlock.o = -pg 4 CFLAGS_REMOVE_time.o = -pg
··· 1 + ifdef CONFIG_FUNCTION_TRACER 2 # Do not profile debug and lowlevel utilities 3 CFLAGS_REMOVE_spinlock.o = -pg 4 CFLAGS_REMOVE_time.o = -pg
+1
include/asm-generic/kdebug.h
··· 3 4 enum die_val { 5 DIE_UNUSED, 6 }; 7 8 #endif /* _ASM_GENERIC_KDEBUG_H */
··· 3 4 enum die_val { 5 DIE_UNUSED, 6 + DIE_OOPS=1 7 }; 8 9 #endif /* _ASM_GENERIC_KDEBUG_H */
+32 -16
include/linux/ftrace.h
··· 8 #include <linux/types.h> 9 #include <linux/kallsyms.h> 10 11 - #ifdef CONFIG_FTRACE 12 13 extern int ftrace_enabled; 14 extern int ··· 36 37 extern void ftrace_stub(unsigned long a0, unsigned long a1); 38 39 - #else /* !CONFIG_FTRACE */ 40 # define register_ftrace_function(ops) do { } while (0) 41 # define unregister_ftrace_function(ops) do { } while (0) 42 # define clear_ftrace_function(ops) do { } while (0) 43 - static inline void ftrace_kill_atomic(void) { } 44 - #endif /* CONFIG_FTRACE */ 45 46 #ifdef CONFIG_DYNAMIC_FTRACE 47 - # define FTRACE_HASHBITS 10 48 - # define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) 49 50 enum { 51 FTRACE_FL_FREE = (1 << 0), ··· 56 }; 57 58 struct dyn_ftrace { 59 - struct hlist_node node; 60 - unsigned long ip; /* address of mcount call-site */ 61 - unsigned long flags; 62 }; 63 64 int ftrace_force_update(void); ··· 69 extern unsigned char *ftrace_nop_replace(void); 70 extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); 71 extern int ftrace_dyn_arch_init(void *data); 72 - extern int ftrace_mcount_set(unsigned long *data); 73 - extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 74 - unsigned char *new_code); 75 extern int ftrace_update_ftrace_func(ftrace_func_t func); 76 extern void ftrace_caller(void); 77 extern void ftrace_call(void); 78 extern void mcount_call(void); 79 80 extern int skip_trace(unsigned long ip); 81 ··· 114 115 /* totally disable ftrace - can not re-enable after this */ 116 void ftrace_kill(void); 117 - void ftrace_kill_atomic(void); 118 119 static inline void tracer_disable(void) 120 { 121 - #ifdef CONFIG_FTRACE 122 ftrace_enabled = 0; 123 #endif 124 } ··· 129 */ 130 static inline int __ftrace_enabled_save(void) 131 { 132 - #ifdef CONFIG_FTRACE 133 int saved_ftrace_enabled = ftrace_enabled; 134 ftrace_enabled = 0; 135 return saved_ftrace_enabled; ··· 140 141 static inline void __ftrace_enabled_restore(int enabled) 142 { 143 - #ifdef CONFIG_FTRACE 144 ftrace_enabled = enabled; 145 #endif 146 }
··· 8 #include <linux/types.h> 9 #include <linux/kallsyms.h> 10 11 + #ifdef CONFIG_FUNCTION_TRACER 12 13 extern int ftrace_enabled; 14 extern int ··· 36 37 extern void ftrace_stub(unsigned long a0, unsigned long a1); 38 39 + #else /* !CONFIG_FUNCTION_TRACER */ 40 # define register_ftrace_function(ops) do { } while (0) 41 # define unregister_ftrace_function(ops) do { } while (0) 42 # define clear_ftrace_function(ops) do { } while (0) 43 + static inline void ftrace_kill(void) { } 44 + #endif /* CONFIG_FUNCTION_TRACER */ 45 46 #ifdef CONFIG_DYNAMIC_FTRACE 47 48 enum { 49 FTRACE_FL_FREE = (1 << 0), ··· 58 }; 59 60 struct dyn_ftrace { 61 + struct list_head list; 62 + unsigned long ip; /* address of mcount call-site */ 63 + unsigned long flags; 64 }; 65 66 int ftrace_force_update(void); ··· 71 extern unsigned char *ftrace_nop_replace(void); 72 extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr); 73 extern int ftrace_dyn_arch_init(void *data); 74 extern int ftrace_update_ftrace_func(ftrace_func_t func); 75 extern void ftrace_caller(void); 76 extern void ftrace_call(void); 77 extern void mcount_call(void); 78 + 79 + /** 80 + * ftrace_modify_code - modify code segment 81 + * @ip: the address of the code segment 82 + * @old_code: the contents of what is expected to be there 83 + * @new_code: the code to patch in 84 + * 85 + * This is a very sensitive operation and great care needs 86 + * to be taken by the arch. The operation should carefully 87 + * read the location, check to see if what is read is indeed 88 + * what we expect it to be, and then on success of the compare, 89 + * it should write to the location. 90 + * 91 + * Return must be: 92 + * 0 on success 93 + * -EFAULT on error reading the location 94 + * -EINVAL on a failed compare of the contents 95 + * -EPERM on error writing to the location 96 + * Any other value will be considered a failure. 97 + */ 98 + extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code, 99 + unsigned char *new_code); 100 101 extern int skip_trace(unsigned long ip); 102 ··· 97 98 /* totally disable ftrace - can not re-enable after this */ 99 void ftrace_kill(void); 100 101 static inline void tracer_disable(void) 102 { 103 + #ifdef CONFIG_FUNCTION_TRACER 104 ftrace_enabled = 0; 105 #endif 106 } ··· 113 */ 114 static inline int __ftrace_enabled_save(void) 115 { 116 + #ifdef CONFIG_FUNCTION_TRACER 117 int saved_ftrace_enabled = ftrace_enabled; 118 ftrace_enabled = 0; 119 return saved_ftrace_enabled; ··· 124 125 static inline void __ftrace_enabled_restore(int enabled) 126 { 127 + #ifdef CONFIG_FUNCTION_TRACER 128 ftrace_enabled = enabled; 129 #endif 130 }
+2 -2
kernel/Makefile
··· 13 14 CFLAGS_REMOVE_sched.o = -mno-spe 15 16 - ifdef CONFIG_FTRACE 17 # Do not trace debug files and internal ftrace files 18 CFLAGS_REMOVE_lockdep.o = -pg 19 CFLAGS_REMOVE_lockdep_proc.o = -pg ··· 88 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o 89 obj-$(CONFIG_LATENCYTOP) += latencytop.o 90 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 91 - obj-$(CONFIG_FTRACE) += trace/ 92 obj-$(CONFIG_TRACING) += trace/ 93 obj-$(CONFIG_SMP) += sched_cpupri.o 94
··· 13 14 CFLAGS_REMOVE_sched.o = -mno-spe 15 16 + ifdef CONFIG_FUNCTION_TRACER 17 # Do not trace debug files and internal ftrace files 18 CFLAGS_REMOVE_lockdep.o = -pg 19 CFLAGS_REMOVE_lockdep_proc.o = -pg ··· 88 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o 89 obj-$(CONFIG_LATENCYTOP) += latencytop.o 90 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 91 + obj-$(CONFIG_FUNCTION_TRACER) += trace/ 92 obj-$(CONFIG_TRACING) += trace/ 93 obj-$(CONFIG_SMP) += sched_cpupri.o 94
+1 -1
kernel/sysctl.c
··· 474 .mode = 0644, 475 .proc_handler = &proc_dointvec, 476 }, 477 - #ifdef CONFIG_FTRACE 478 { 479 .ctl_name = CTL_UNNUMBERED, 480 .procname = "ftrace_enabled",
··· 474 .mode = 0644, 475 .proc_handler = &proc_dointvec, 476 }, 477 + #ifdef CONFIG_FUNCTION_TRACER 478 { 479 .ctl_name = CTL_UNNUMBERED, 480 .procname = "ftrace_enabled",
+14 -13
kernel/trace/Kconfig
··· 1 # 2 - # Architectures that offer an FTRACE implementation should select HAVE_FTRACE: 3 # 4 5 config NOP_TRACER 6 bool 7 8 - config HAVE_FTRACE 9 bool 10 select NOP_TRACER 11 ··· 29 select STACKTRACE 30 select TRACEPOINTS 31 32 - config FTRACE 33 bool "Kernel Function Tracer" 34 - depends on HAVE_FTRACE 35 depends on DEBUG_KERNEL 36 select FRAME_POINTER 37 select TRACING ··· 52 default n 53 depends on TRACE_IRQFLAGS_SUPPORT 54 depends on GENERIC_TIME 55 - depends on HAVE_FTRACE 56 depends on DEBUG_KERNEL 57 select TRACE_IRQFLAGS 58 select TRACING ··· 75 default n 76 depends on GENERIC_TIME 77 depends on PREEMPT 78 - depends on HAVE_FTRACE 79 depends on DEBUG_KERNEL 80 select TRACING 81 select TRACER_MAX_TRACE ··· 102 103 config SCHED_TRACER 104 bool "Scheduling Latency Tracer" 105 - depends on HAVE_FTRACE 106 depends on DEBUG_KERNEL 107 select TRACING 108 select CONTEXT_SWITCH_TRACER ··· 112 113 config CONTEXT_SWITCH_TRACER 114 bool "Trace process context switches" 115 - depends on HAVE_FTRACE 116 depends on DEBUG_KERNEL 117 select TRACING 118 select MARKERS ··· 121 122 config BOOT_TRACER 123 bool "Trace boot initcalls" 124 - depends on HAVE_FTRACE 125 depends on DEBUG_KERNEL 126 select TRACING 127 help 128 This tracer helps developers to optimize boot times: it records 129 the timings of the initcalls and traces key events and the identity ··· 140 141 config STACK_TRACER 142 bool "Trace max stack" 143 - depends on HAVE_FTRACE 144 depends on DEBUG_KERNEL 145 - select FTRACE 146 select STACKTRACE 147 help 148 This special tracer records the maximum stack footprint of the ··· 159 160 config DYNAMIC_FTRACE 161 bool "enable/disable ftrace tracepoints dynamically" 162 - depends on FTRACE 163 depends on HAVE_DYNAMIC_FTRACE 164 depends on DEBUG_KERNEL 165 default y ··· 169 with a No-Op instruction) as they are called. A table is 170 created to dynamically enable them again. 171 172 - This way a CONFIG_FTRACE kernel is slightly larger, but otherwise 173 has native performance as long as no tracing is active. 174 175 The changes to the code are done by a kernel thread that ··· 194 a series of tests are made to verify that the tracer is 195 functioning properly. It will do tests on all the configured 196 tracers of ftrace.
··· 1 # 2 + # Architectures that offer an FUNCTION_TRACER implementation should 3 + # select HAVE_FUNCTION_TRACER: 4 # 5 6 config NOP_TRACER 7 bool 8 9 + config HAVE_FUNCTION_TRACER 10 bool 11 select NOP_TRACER 12 ··· 28 select STACKTRACE 29 select TRACEPOINTS 30 31 + menu "Tracers" 32 + 33 + config FUNCTION_TRACER 34 bool "Kernel Function Tracer" 35 + depends on HAVE_FUNCTION_TRACER 36 depends on DEBUG_KERNEL 37 select FRAME_POINTER 38 select TRACING ··· 49 default n 50 depends on TRACE_IRQFLAGS_SUPPORT 51 depends on GENERIC_TIME 52 depends on DEBUG_KERNEL 53 select TRACE_IRQFLAGS 54 select TRACING ··· 73 default n 74 depends on GENERIC_TIME 75 depends on PREEMPT 76 depends on DEBUG_KERNEL 77 select TRACING 78 select TRACER_MAX_TRACE ··· 101 102 config SCHED_TRACER 103 bool "Scheduling Latency Tracer" 104 depends on DEBUG_KERNEL 105 select TRACING 106 select CONTEXT_SWITCH_TRACER ··· 112 113 config CONTEXT_SWITCH_TRACER 114 bool "Trace process context switches" 115 depends on DEBUG_KERNEL 116 select TRACING 117 select MARKERS ··· 122 123 config BOOT_TRACER 124 bool "Trace boot initcalls" 125 depends on DEBUG_KERNEL 126 select TRACING 127 + select CONTEXT_SWITCH_TRACER 128 help 129 This tracer helps developers to optimize boot times: it records 130 the timings of the initcalls and traces key events and the identity ··· 141 142 config STACK_TRACER 143 bool "Trace max stack" 144 + depends on HAVE_FUNCTION_TRACER 145 depends on DEBUG_KERNEL 146 + select FUNCTION_TRACER 147 select STACKTRACE 148 help 149 This special tracer records the maximum stack footprint of the ··· 160 161 config DYNAMIC_FTRACE 162 bool "enable/disable ftrace tracepoints dynamically" 163 + depends on FUNCTION_TRACER 164 depends on HAVE_DYNAMIC_FTRACE 165 depends on DEBUG_KERNEL 166 default y ··· 170 with a No-Op instruction) as they are called. A table is 171 created to dynamically enable them again. 172 173 + This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise 174 has native performance as long as no tracing is active. 175 176 The changes to the code are done by a kernel thread that ··· 195 a series of tests are made to verify that the tracer is 196 functioning properly. It will do tests on all the configured 197 tracers of ftrace. 198 + 199 + endmenu
+3 -3
kernel/trace/Makefile
··· 1 2 # Do not instrument the tracer itself: 3 4 - ifdef CONFIG_FTRACE 5 ORIG_CFLAGS := $(KBUILD_CFLAGS) 6 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 7 ··· 10 obj-y += trace_selftest_dynamic.o 11 endif 12 13 - obj-$(CONFIG_FTRACE) += libftrace.o 14 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 15 16 obj-$(CONFIG_TRACING) += trace.o 17 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 18 obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o 19 - obj-$(CONFIG_FTRACE) += trace_functions.o 20 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 21 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 22 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
··· 1 2 # Do not instrument the tracer itself: 3 4 + ifdef CONFIG_FUNCTION_TRACER 5 ORIG_CFLAGS := $(KBUILD_CFLAGS) 6 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 7 ··· 10 obj-y += trace_selftest_dynamic.o 11 endif 12 13 + obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o 14 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o 15 16 obj-$(CONFIG_TRACING) += trace.o 17 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 18 obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o 19 + obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 20 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 21 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 22 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+92 -516
kernel/trace/ftrace.c
··· 25 #include <linux/ftrace.h> 26 #include <linux/sysctl.h> 27 #include <linux/ctype.h> 28 - #include <linux/hash.h> 29 #include <linux/list.h> 30 31 #include <asm/ftrace.h> 32 33 #include "trace.h" 34 35 /* ftrace_enabled is a method to turn ftrace on or off */ 36 int ftrace_enabled __read_mostly; ··· 164 } 165 166 #ifdef CONFIG_DYNAMIC_FTRACE 167 - 168 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 169 - /* 170 - * The hash lock is only needed when the recording of the mcount 171 - * callers are dynamic. That is, by the caller themselves and 172 - * not recorded via the compilation. 173 - */ 174 - static DEFINE_SPINLOCK(ftrace_hash_lock); 175 - #define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) 176 - #define ftrace_hash_unlock(flags) \ 177 - spin_unlock_irqrestore(&ftrace_hash_lock, flags) 178 - #else 179 - /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ 180 - #define ftrace_hash_lock(flags) do { (void)(flags); } while (0) 181 - #define ftrace_hash_unlock(flags) do { } while(0) 182 #endif 183 184 /* ··· 175 * it instead. 176 */ 177 static unsigned long mcount_addr = MCOUNT_ADDR; 178 - 179 - static struct task_struct *ftraced_task; 180 181 enum { 182 FTRACE_ENABLE_CALLS = (1 << 0), ··· 186 187 static int ftrace_filtered; 188 static int tracing_on; 189 - static int frozen_record_count; 190 191 - static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; 192 193 - static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 194 - 195 - static DEFINE_MUTEX(ftraced_lock); 196 static DEFINE_MUTEX(ftrace_regex_lock); 197 198 struct ftrace_page { ··· 206 static struct ftrace_page *ftrace_pages_start; 207 static struct ftrace_page *ftrace_pages; 208 209 - static int ftraced_trigger; 210 - static int ftraced_suspend; 211 - static int ftraced_stop; 212 - 213 - static int ftrace_record_suspend; 214 - 215 static struct dyn_ftrace *ftrace_free_records; 216 217 218 #ifdef CONFIG_KPROBES 219 static inline void freeze_record(struct dyn_ftrace *rec) 220 { 221 if (!(rec->flags & FTRACE_FL_FROZEN)) { ··· 238 # define unfreeze_record(rec) ({ 0; }) 239 # define record_frozen(rec) ({ 0; }) 240 #endif /* CONFIG_KPROBES */ 241 - 242 - int skip_trace(unsigned long ip) 243 - { 244 - unsigned long fl; 245 - struct dyn_ftrace *rec; 246 - struct hlist_node *t; 247 - struct hlist_head *head; 248 - 249 - if (frozen_record_count == 0) 250 - return 0; 251 - 252 - head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)]; 253 - hlist_for_each_entry_rcu(rec, t, head, node) { 254 - if (rec->ip == ip) { 255 - if (record_frozen(rec)) { 256 - if (rec->flags & FTRACE_FL_FAILED) 257 - return 1; 258 - 259 - if (!(rec->flags & FTRACE_FL_CONVERTED)) 260 - return 1; 261 - 262 - if (!tracing_on || !ftrace_enabled) 263 - return 1; 264 - 265 - if (ftrace_filtered) { 266 - fl = rec->flags & (FTRACE_FL_FILTER | 267 - FTRACE_FL_NOTRACE); 268 - if (!fl || (fl & FTRACE_FL_NOTRACE)) 269 - return 1; 270 - } 271 - } 272 - break; 273 - } 274 - } 275 - 276 - return 0; 277 - } 278 - 279 - static inline int 280 - ftrace_ip_in_hash(unsigned long ip, unsigned long key) 281 - { 282 - struct dyn_ftrace *p; 283 - struct hlist_node *t; 284 - int found = 0; 285 - 286 - hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) { 287 - if (p->ip == ip) { 288 - found = 1; 289 - break; 290 - } 291 - } 292 - 293 - return found; 294 - } 295 - 296 - static inline void 297 - ftrace_add_hash(struct dyn_ftrace *node, unsigned long key) 298 - { 299 - hlist_add_head_rcu(&node->node, &ftrace_hash[key]); 300 - } 301 - 302 - /* called from kstop_machine */ 303 - static inline void ftrace_del_hash(struct dyn_ftrace *node) 304 - { 305 - hlist_del(&node->node); 306 - } 307 308 static void ftrace_free_rec(struct dyn_ftrace *rec) 309 { ··· 269 } 270 } 271 spin_unlock(&ftrace_lock); 272 - 273 } 274 275 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) ··· 280 rec = ftrace_free_records; 281 282 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { 283 - WARN_ON_ONCE(1); 284 ftrace_free_records = NULL; 285 - ftrace_disabled = 1; 286 - ftrace_enabled = 0; 287 return NULL; 288 } 289 ··· 291 } 292 293 if (ftrace_pages->index == ENTRIES_PER_PAGE) { 294 - if (!ftrace_pages->next) 295 - return NULL; 296 ftrace_pages = ftrace_pages->next; 297 } 298 299 return &ftrace_pages->records[ftrace_pages->index++]; 300 } 301 302 - static void 303 ftrace_record_ip(unsigned long ip) 304 { 305 - struct dyn_ftrace *node; 306 - unsigned long flags; 307 - unsigned long key; 308 - int resched; 309 - int cpu; 310 311 if (!ftrace_enabled || ftrace_disabled) 312 - return; 313 314 - resched = need_resched(); 315 - preempt_disable_notrace(); 316 317 - /* 318 - * We simply need to protect against recursion. 319 - * Use the the raw version of smp_processor_id and not 320 - * __get_cpu_var which can call debug hooks that can 321 - * cause a recursive crash here. 322 - */ 323 - cpu = raw_smp_processor_id(); 324 - per_cpu(ftrace_shutdown_disable_cpu, cpu)++; 325 - if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1) 326 - goto out; 327 328 - if (unlikely(ftrace_record_suspend)) 329 - goto out; 330 331 - key = hash_long(ip, FTRACE_HASHBITS); 332 - 333 - WARN_ON_ONCE(key >= FTRACE_HASHSIZE); 334 - 335 - if (ftrace_ip_in_hash(ip, key)) 336 - goto out; 337 - 338 - ftrace_hash_lock(flags); 339 - 340 - /* This ip may have hit the hash before the lock */ 341 - if (ftrace_ip_in_hash(ip, key)) 342 - goto out_unlock; 343 - 344 - node = ftrace_alloc_dyn_node(ip); 345 - if (!node) 346 - goto out_unlock; 347 - 348 - node->ip = ip; 349 - 350 - ftrace_add_hash(node, key); 351 - 352 - ftraced_trigger = 1; 353 - 354 - out_unlock: 355 - ftrace_hash_unlock(flags); 356 - out: 357 - per_cpu(ftrace_shutdown_disable_cpu, cpu)--; 358 - 359 - /* prevent recursion with scheduler */ 360 - if (resched) 361 - preempt_enable_no_resched_notrace(); 362 - else 363 - preempt_enable_notrace(); 364 } 365 366 #define FTRACE_ADDR ((long)(ftrace_caller)) ··· 439 rec->flags |= FTRACE_FL_FAILED; 440 if ((system_state == SYSTEM_BOOTING) || 441 !core_kernel_text(rec->ip)) { 442 - ftrace_del_hash(rec); 443 ftrace_free_rec(rec); 444 } 445 } 446 } 447 } 448 - } 449 - 450 - static void ftrace_shutdown_replenish(void) 451 - { 452 - if (ftrace_pages->next) 453 - return; 454 - 455 - /* allocate another page */ 456 - ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); 457 } 458 459 static void print_ip_ins(const char *fmt, unsigned char *p) ··· 461 { 462 unsigned long ip; 463 unsigned char *nop, *call; 464 - int failed; 465 466 ip = rec->ip; 467 468 nop = ftrace_nop_replace(); 469 call = ftrace_call_replace(ip, mcount_addr); 470 471 - failed = ftrace_modify_code(ip, call, nop); 472 - if (failed) { 473 - switch (failed) { 474 - case 1: 475 - WARN_ON_ONCE(1); 476 pr_info("ftrace faulted on modifying "); 477 print_ip_sym(ip); 478 break; 479 - case 2: 480 - WARN_ON_ONCE(1); 481 pr_info("ftrace failed to modify "); 482 print_ip_sym(ip); 483 print_ip_ins(" expected: ", call); ··· 485 print_ip_ins(" replace: ", nop); 486 printk(KERN_CONT "\n"); 487 break; 488 } 489 490 rec->flags |= FTRACE_FL_FAILED; ··· 502 return 1; 503 } 504 505 - static int __ftrace_update_code(void *ignore); 506 - 507 static int __ftrace_modify_code(void *data) 508 { 509 - unsigned long addr; 510 int *command = data; 511 512 if (*command & FTRACE_ENABLE_CALLS) { 513 - /* 514 - * Update any recorded ips now that we have the 515 - * machine stopped 516 - */ 517 - __ftrace_update_code(NULL); 518 ftrace_replace_code(1); 519 tracing_on = 1; 520 } else if (*command & FTRACE_DISABLE_CALLS) { ··· 517 if (*command & FTRACE_UPDATE_TRACE_FUNC) 518 ftrace_update_ftrace_func(ftrace_trace_function); 519 520 - if (*command & FTRACE_ENABLE_MCOUNT) { 521 - addr = (unsigned long)ftrace_record_ip; 522 - ftrace_mcount_set(&addr); 523 - } else if (*command & FTRACE_DISABLE_MCOUNT) { 524 - addr = (unsigned long)ftrace_stub; 525 - ftrace_mcount_set(&addr); 526 - } 527 - 528 return 0; 529 } 530 ··· 525 stop_machine(__ftrace_modify_code, &command, NULL); 526 } 527 528 - void ftrace_disable_daemon(void) 529 - { 530 - /* Stop the daemon from calling kstop_machine */ 531 - mutex_lock(&ftraced_lock); 532 - ftraced_stop = 1; 533 - mutex_unlock(&ftraced_lock); 534 - 535 - ftrace_force_update(); 536 - } 537 - 538 - void ftrace_enable_daemon(void) 539 - { 540 - mutex_lock(&ftraced_lock); 541 - ftraced_stop = 0; 542 - mutex_unlock(&ftraced_lock); 543 - 544 - ftrace_force_update(); 545 - } 546 - 547 static ftrace_func_t saved_ftrace_func; 548 549 static void ftrace_startup(void) 550 { ··· 536 if (unlikely(ftrace_disabled)) 537 return; 538 539 - mutex_lock(&ftraced_lock); 540 - ftraced_suspend++; 541 - if (ftraced_suspend == 1) 542 command |= FTRACE_ENABLE_CALLS; 543 544 if (saved_ftrace_func != ftrace_trace_function) { ··· 551 552 ftrace_run_update_code(command); 553 out: 554 - mutex_unlock(&ftraced_lock); 555 } 556 557 static void ftrace_shutdown(void) ··· 561 if (unlikely(ftrace_disabled)) 562 return; 563 564 - mutex_lock(&ftraced_lock); 565 - ftraced_suspend--; 566 - if (!ftraced_suspend) 567 command |= FTRACE_DISABLE_CALLS; 568 569 if (saved_ftrace_func != ftrace_trace_function) { ··· 576 577 ftrace_run_update_code(command); 578 out: 579 - mutex_unlock(&ftraced_lock); 580 } 581 582 static void ftrace_startup_sysctl(void) ··· 586 if (unlikely(ftrace_disabled)) 587 return; 588 589 - mutex_lock(&ftraced_lock); 590 /* Force update next time */ 591 saved_ftrace_func = NULL; 592 - /* ftraced_suspend is true if we want ftrace running */ 593 - if (ftraced_suspend) 594 command |= FTRACE_ENABLE_CALLS; 595 596 ftrace_run_update_code(command); 597 - mutex_unlock(&ftraced_lock); 598 } 599 600 static void ftrace_shutdown_sysctl(void) ··· 604 if (unlikely(ftrace_disabled)) 605 return; 606 607 - mutex_lock(&ftraced_lock); 608 - /* ftraced_suspend is true if ftrace is running */ 609 - if (ftraced_suspend) 610 command |= FTRACE_DISABLE_CALLS; 611 612 ftrace_run_update_code(command); 613 - mutex_unlock(&ftraced_lock); 614 } 615 616 static cycle_t ftrace_update_time; 617 static unsigned long ftrace_update_cnt; 618 unsigned long ftrace_update_tot_cnt; 619 620 - static int __ftrace_update_code(void *ignore) 621 { 622 - int i, save_ftrace_enabled; 623 cycle_t start, stop; 624 - struct dyn_ftrace *p; 625 - struct hlist_node *t, *n; 626 - struct hlist_head *head, temp_list; 627 - 628 - /* Don't be recording funcs now */ 629 - ftrace_record_suspend++; 630 - save_ftrace_enabled = ftrace_enabled; 631 - ftrace_enabled = 0; 632 633 start = ftrace_now(raw_smp_processor_id()); 634 ftrace_update_cnt = 0; 635 636 - /* No locks needed, the machine is stopped! */ 637 - for (i = 0; i < FTRACE_HASHSIZE; i++) { 638 - INIT_HLIST_HEAD(&temp_list); 639 - head = &ftrace_hash[i]; 640 641 - /* all CPUS are stopped, we are safe to modify code */ 642 - hlist_for_each_entry_safe(p, t, n, head, node) { 643 - /* Skip over failed records which have not been 644 - * freed. */ 645 - if (p->flags & FTRACE_FL_FAILED) 646 - continue; 647 648 - /* Unconverted records are always at the head of the 649 - * hash bucket. Once we encounter a converted record, 650 - * simply skip over to the next bucket. Saves ftraced 651 - * some processor cycles (ftrace does its bid for 652 - * global warming :-p ). */ 653 - if (p->flags & (FTRACE_FL_CONVERTED)) 654 - break; 655 656 - /* Ignore updates to this record's mcount site. 657 - * Reintroduce this record at the head of this 658 - * bucket to attempt to "convert" it again if 659 - * the kprobe on it is unregistered before the 660 - * next run. */ 661 - if (get_kprobe((void *)p->ip)) { 662 - ftrace_del_hash(p); 663 - INIT_HLIST_NODE(&p->node); 664 - hlist_add_head(&p->node, &temp_list); 665 - freeze_record(p); 666 - continue; 667 - } else { 668 - unfreeze_record(p); 669 - } 670 - 671 - /* convert record (i.e, patch mcount-call with NOP) */ 672 - if (ftrace_code_disable(p)) { 673 - p->flags |= FTRACE_FL_CONVERTED; 674 - ftrace_update_cnt++; 675 - } else { 676 - if ((system_state == SYSTEM_BOOTING) || 677 - !core_kernel_text(p->ip)) { 678 - ftrace_del_hash(p); 679 - ftrace_free_rec(p); 680 - } 681 - } 682 - } 683 - 684 - hlist_for_each_entry_safe(p, t, n, &temp_list, node) { 685 - hlist_del(&p->node); 686 - INIT_HLIST_NODE(&p->node); 687 - hlist_add_head(&p->node, head); 688 - } 689 } 690 691 stop = ftrace_now(raw_smp_processor_id()); 692 ftrace_update_time = stop - start; 693 ftrace_update_tot_cnt += ftrace_update_cnt; 694 - ftraced_trigger = 0; 695 - 696 - ftrace_enabled = save_ftrace_enabled; 697 - ftrace_record_suspend--; 698 699 return 0; 700 - } 701 - 702 - static int ftrace_update_code(void) 703 - { 704 - if (unlikely(ftrace_disabled) || 705 - !ftrace_enabled || !ftraced_trigger) 706 - return 0; 707 - 708 - stop_machine(__ftrace_update_code, NULL, NULL); 709 - 710 - return 1; 711 } 712 713 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) ··· 676 pg = ftrace_pages = ftrace_pages_start; 677 678 cnt = num_to_init / ENTRIES_PER_PAGE; 679 - pr_info("ftrace: allocating %ld hash entries in %d pages\n", 680 num_to_init, cnt); 681 682 for (i = 0; i < cnt; i++) { ··· 1185 } 1186 1187 mutex_lock(&ftrace_sysctl_lock); 1188 - mutex_lock(&ftraced_lock); 1189 - if (iter->filtered && ftraced_suspend && ftrace_enabled) 1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1191 - mutex_unlock(&ftraced_lock); 1192 mutex_unlock(&ftrace_sysctl_lock); 1193 1194 kfree(iter); ··· 1206 ftrace_notrace_release(struct inode *inode, struct file *file) 1207 { 1208 return ftrace_regex_release(inode, file, 0); 1209 - } 1210 - 1211 - static ssize_t 1212 - ftraced_read(struct file *filp, char __user *ubuf, 1213 - size_t cnt, loff_t *ppos) 1214 - { 1215 - /* don't worry about races */ 1216 - char *buf = ftraced_stop ? "disabled\n" : "enabled\n"; 1217 - int r = strlen(buf); 1218 - 1219 - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 1220 - } 1221 - 1222 - static ssize_t 1223 - ftraced_write(struct file *filp, const char __user *ubuf, 1224 - size_t cnt, loff_t *ppos) 1225 - { 1226 - char buf[64]; 1227 - long val; 1228 - int ret; 1229 - 1230 - if (cnt >= sizeof(buf)) 1231 - return -EINVAL; 1232 - 1233 - if (copy_from_user(&buf, ubuf, cnt)) 1234 - return -EFAULT; 1235 - 1236 - if (strncmp(buf, "enable", 6) == 0) 1237 - val = 1; 1238 - else if (strncmp(buf, "disable", 7) == 0) 1239 - val = 0; 1240 - else { 1241 - buf[cnt] = 0; 1242 - 1243 - ret = strict_strtoul(buf, 10, &val); 1244 - if (ret < 0) 1245 - return ret; 1246 - 1247 - val = !!val; 1248 - } 1249 - 1250 - if (val) 1251 - ftrace_enable_daemon(); 1252 - else 1253 - ftrace_disable_daemon(); 1254 - 1255 - filp->f_pos += cnt; 1256 - 1257 - return cnt; 1258 } 1259 1260 static struct file_operations ftrace_avail_fops = { ··· 1238 .release = ftrace_notrace_release, 1239 }; 1240 1241 - static struct file_operations ftraced_fops = { 1242 - .open = tracing_open_generic, 1243 - .read = ftraced_read, 1244 - .write = ftraced_write, 1245 - }; 1246 - 1247 - /** 1248 - * ftrace_force_update - force an update to all recording ftrace functions 1249 - */ 1250 - int ftrace_force_update(void) 1251 - { 1252 - int ret = 0; 1253 - 1254 - if (unlikely(ftrace_disabled)) 1255 - return -ENODEV; 1256 - 1257 - mutex_lock(&ftrace_sysctl_lock); 1258 - mutex_lock(&ftraced_lock); 1259 - 1260 - /* 1261 - * If ftraced_trigger is not set, then there is nothing 1262 - * to update. 1263 - */ 1264 - if (ftraced_trigger && !ftrace_update_code()) 1265 - ret = -EBUSY; 1266 - 1267 - mutex_unlock(&ftraced_lock); 1268 - mutex_unlock(&ftrace_sysctl_lock); 1269 - 1270 - return ret; 1271 - } 1272 - 1273 - static void ftrace_force_shutdown(void) 1274 - { 1275 - struct task_struct *task; 1276 - int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC; 1277 - 1278 - mutex_lock(&ftraced_lock); 1279 - task = ftraced_task; 1280 - ftraced_task = NULL; 1281 - ftraced_suspend = -1; 1282 - ftrace_run_update_code(command); 1283 - mutex_unlock(&ftraced_lock); 1284 - 1285 - if (task) 1286 - kthread_stop(task); 1287 - } 1288 - 1289 static __init int ftrace_init_debugfs(void) 1290 { 1291 struct dentry *d_tracer; ··· 1268 pr_warning("Could not create debugfs " 1269 "'set_ftrace_notrace' entry\n"); 1270 1271 - entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer, 1272 - NULL, &ftraced_fops); 1273 - if (!entry) 1274 - pr_warning("Could not create debugfs " 1275 - "'ftraced_enabled' entry\n"); 1276 return 0; 1277 } 1278 1279 fs_initcall(ftrace_init_debugfs); 1280 1281 - #ifdef CONFIG_FTRACE_MCOUNT_RECORD 1282 static int ftrace_convert_nops(unsigned long *start, 1283 unsigned long *end) 1284 { ··· 1280 unsigned long addr; 1281 unsigned long flags; 1282 1283 p = start; 1284 while (p < end) { 1285 addr = ftrace_call_adjust(*p++); 1286 - /* should not be called from interrupt context */ 1287 - spin_lock(&ftrace_lock); 1288 ftrace_record_ip(addr); 1289 - spin_unlock(&ftrace_lock); 1290 - ftrace_shutdown_replenish(); 1291 } 1292 1293 - /* p is ignored */ 1294 local_irq_save(flags); 1295 - __ftrace_update_code(p); 1296 local_irq_restore(flags); 1297 1298 return 0; 1299 } ··· 1337 failed: 1338 ftrace_disabled = 1; 1339 } 1340 - #else /* CONFIG_FTRACE_MCOUNT_RECORD */ 1341 - static int ftraced(void *ignore) 1342 - { 1343 - unsigned long usecs; 1344 - 1345 - while (!kthread_should_stop()) { 1346 - 1347 - set_current_state(TASK_INTERRUPTIBLE); 1348 - 1349 - /* check once a second */ 1350 - schedule_timeout(HZ); 1351 - 1352 - if (unlikely(ftrace_disabled)) 1353 - continue; 1354 - 1355 - mutex_lock(&ftrace_sysctl_lock); 1356 - mutex_lock(&ftraced_lock); 1357 - if (!ftraced_suspend && !ftraced_stop && 1358 - ftrace_update_code()) { 1359 - usecs = nsecs_to_usecs(ftrace_update_time); 1360 - if (ftrace_update_tot_cnt > 100000) { 1361 - ftrace_update_tot_cnt = 0; 1362 - pr_info("hm, dftrace overflow: %lu change%s" 1363 - " (%lu total) in %lu usec%s\n", 1364 - ftrace_update_cnt, 1365 - ftrace_update_cnt != 1 ? "s" : "", 1366 - ftrace_update_tot_cnt, 1367 - usecs, usecs != 1 ? "s" : ""); 1368 - ftrace_disabled = 1; 1369 - WARN_ON_ONCE(1); 1370 - } 1371 - } 1372 - mutex_unlock(&ftraced_lock); 1373 - mutex_unlock(&ftrace_sysctl_lock); 1374 - 1375 - ftrace_shutdown_replenish(); 1376 - } 1377 - __set_current_state(TASK_RUNNING); 1378 - return 0; 1379 - } 1380 - 1381 - static int __init ftrace_dynamic_init(void) 1382 - { 1383 - struct task_struct *p; 1384 - unsigned long addr; 1385 - int ret; 1386 - 1387 - addr = (unsigned long)ftrace_record_ip; 1388 - 1389 - stop_machine(ftrace_dyn_arch_init, &addr, NULL); 1390 - 1391 - /* ftrace_dyn_arch_init places the return code in addr */ 1392 - if (addr) { 1393 - ret = (int)addr; 1394 - goto failed; 1395 - } 1396 - 1397 - ret = ftrace_dyn_table_alloc(NR_TO_INIT); 1398 - if (ret) 1399 - goto failed; 1400 - 1401 - p = kthread_run(ftraced, NULL, "ftraced"); 1402 - if (IS_ERR(p)) { 1403 - ret = -1; 1404 - goto failed; 1405 - } 1406 - 1407 - last_ftrace_enabled = ftrace_enabled = 1; 1408 - ftraced_task = p; 1409 - 1410 - return 0; 1411 - 1412 - failed: 1413 - ftrace_disabled = 1; 1414 - return ret; 1415 - } 1416 - 1417 - core_initcall(ftrace_dynamic_init); 1418 - #endif /* CONFIG_FTRACE_MCOUNT_RECORD */ 1419 1420 #else 1421 # define ftrace_startup() do { } while (0) 1422 # define ftrace_shutdown() do { } while (0) 1423 # define ftrace_startup_sysctl() do { } while (0) 1424 # define ftrace_shutdown_sysctl() do { } while (0) 1425 - # define ftrace_force_shutdown() do { } while (0) 1426 #endif /* CONFIG_DYNAMIC_FTRACE */ 1427 1428 /** 1429 - * ftrace_kill_atomic - kill ftrace from critical sections 1430 * 1431 * This function should be used by panic code. It stops ftrace 1432 * but in a not so nice way. If you need to simply kill ftrace 1433 * from a non-atomic section, use ftrace_kill. 1434 */ 1435 - void ftrace_kill_atomic(void) 1436 - { 1437 - ftrace_disabled = 1; 1438 - ftrace_enabled = 0; 1439 - #ifdef CONFIG_DYNAMIC_FTRACE 1440 - ftraced_suspend = -1; 1441 - #endif 1442 - clear_ftrace_function(); 1443 - } 1444 - 1445 - /** 1446 - * ftrace_kill - totally shutdown ftrace 1447 - * 1448 - * This is a safety measure. If something was detected that seems 1449 - * wrong, calling this function will keep ftrace from doing 1450 - * any more modifications, and updates. 1451 - * used when something went wrong. 1452 - */ 1453 void ftrace_kill(void) 1454 { 1455 - mutex_lock(&ftrace_sysctl_lock); 1456 ftrace_disabled = 1; 1457 ftrace_enabled = 0; 1458 - 1459 clear_ftrace_function(); 1460 - mutex_unlock(&ftrace_sysctl_lock); 1461 - 1462 - /* Try to totally disable ftrace */ 1463 - ftrace_force_shutdown(); 1464 } 1465 1466 /** ··· 1445 mutex_unlock(&ftrace_sysctl_lock); 1446 return ret; 1447 }
··· 25 #include <linux/ftrace.h> 26 #include <linux/sysctl.h> 27 #include <linux/ctype.h> 28 #include <linux/list.h> 29 30 #include <asm/ftrace.h> 31 32 #include "trace.h" 33 + 34 + #define FTRACE_WARN_ON(cond) \ 35 + do { \ 36 + if (WARN_ON(cond)) \ 37 + ftrace_kill(); \ 38 + } while (0) 39 + 40 + #define FTRACE_WARN_ON_ONCE(cond) \ 41 + do { \ 42 + if (WARN_ON_ONCE(cond)) \ 43 + ftrace_kill(); \ 44 + } while (0) 45 46 /* ftrace_enabled is a method to turn ftrace on or off */ 47 int ftrace_enabled __read_mostly; ··· 153 } 154 155 #ifdef CONFIG_DYNAMIC_FTRACE 156 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 157 + # error Dynamic ftrace depends on MCOUNT_RECORD 158 #endif 159 160 /* ··· 177 * it instead. 178 */ 179 static unsigned long mcount_addr = MCOUNT_ADDR; 180 181 enum { 182 FTRACE_ENABLE_CALLS = (1 << 0), ··· 190 191 static int ftrace_filtered; 192 static int tracing_on; 193 194 + static LIST_HEAD(ftrace_new_addrs); 195 196 static DEFINE_MUTEX(ftrace_regex_lock); 197 198 struct ftrace_page { ··· 214 static struct ftrace_page *ftrace_pages_start; 215 static struct ftrace_page *ftrace_pages; 216 217 static struct dyn_ftrace *ftrace_free_records; 218 219 220 #ifdef CONFIG_KPROBES 221 + 222 + static int frozen_record_count; 223 + 224 static inline void freeze_record(struct dyn_ftrace *rec) 225 { 226 if (!(rec->flags & FTRACE_FL_FROZEN)) { ··· 249 # define unfreeze_record(rec) ({ 0; }) 250 # define record_frozen(rec) ({ 0; }) 251 #endif /* CONFIG_KPROBES */ 252 253 static void ftrace_free_rec(struct dyn_ftrace *rec) 254 { ··· 346 } 347 } 348 spin_unlock(&ftrace_lock); 349 } 350 351 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) ··· 358 rec = ftrace_free_records; 359 360 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { 361 + FTRACE_WARN_ON_ONCE(1); 362 ftrace_free_records = NULL; 363 return NULL; 364 } 365 ··· 371 } 372 373 if (ftrace_pages->index == ENTRIES_PER_PAGE) { 374 + if (!ftrace_pages->next) { 375 + /* allocate another page */ 376 + ftrace_pages->next = 377 + (void *)get_zeroed_page(GFP_KERNEL); 378 + if (!ftrace_pages->next) 379 + return NULL; 380 + } 381 ftrace_pages = ftrace_pages->next; 382 } 383 384 return &ftrace_pages->records[ftrace_pages->index++]; 385 } 386 387 + static struct dyn_ftrace * 388 ftrace_record_ip(unsigned long ip) 389 { 390 + struct dyn_ftrace *rec; 391 392 if (!ftrace_enabled || ftrace_disabled) 393 + return NULL; 394 395 + rec = ftrace_alloc_dyn_node(ip); 396 + if (!rec) 397 + return NULL; 398 399 + rec->ip = ip; 400 401 + list_add(&rec->list, &ftrace_new_addrs); 402 403 + return rec; 404 } 405 406 #define FTRACE_ADDR ((long)(ftrace_caller)) ··· 559 rec->flags |= FTRACE_FL_FAILED; 560 if ((system_state == SYSTEM_BOOTING) || 561 !core_kernel_text(rec->ip)) { 562 ftrace_free_rec(rec); 563 } 564 } 565 } 566 } 567 } 568 569 static void print_ip_ins(const char *fmt, unsigned char *p) ··· 591 { 592 unsigned long ip; 593 unsigned char *nop, *call; 594 + int ret; 595 596 ip = rec->ip; 597 598 nop = ftrace_nop_replace(); 599 call = ftrace_call_replace(ip, mcount_addr); 600 601 + ret = ftrace_modify_code(ip, call, nop); 602 + if (ret) { 603 + switch (ret) { 604 + case -EFAULT: 605 + FTRACE_WARN_ON_ONCE(1); 606 pr_info("ftrace faulted on modifying "); 607 print_ip_sym(ip); 608 break; 609 + case -EINVAL: 610 + FTRACE_WARN_ON_ONCE(1); 611 pr_info("ftrace failed to modify "); 612 print_ip_sym(ip); 613 print_ip_ins(" expected: ", call); ··· 615 print_ip_ins(" replace: ", nop); 616 printk(KERN_CONT "\n"); 617 break; 618 + case -EPERM: 619 + FTRACE_WARN_ON_ONCE(1); 620 + pr_info("ftrace faulted on writing "); 621 + print_ip_sym(ip); 622 + break; 623 + default: 624 + FTRACE_WARN_ON_ONCE(1); 625 + pr_info("ftrace faulted on unknown error "); 626 + print_ip_sym(ip); 627 } 628 629 rec->flags |= FTRACE_FL_FAILED; ··· 623 return 1; 624 } 625 626 static int __ftrace_modify_code(void *data) 627 { 628 int *command = data; 629 630 if (*command & FTRACE_ENABLE_CALLS) { 631 ftrace_replace_code(1); 632 tracing_on = 1; 633 } else if (*command & FTRACE_DISABLE_CALLS) { ··· 646 if (*command & FTRACE_UPDATE_TRACE_FUNC) 647 ftrace_update_ftrace_func(ftrace_trace_function); 648 649 return 0; 650 } 651 ··· 662 stop_machine(__ftrace_modify_code, &command, NULL); 663 } 664 665 static ftrace_func_t saved_ftrace_func; 666 + static int ftrace_start; 667 + static DEFINE_MUTEX(ftrace_start_lock); 668 669 static void ftrace_startup(void) 670 { ··· 690 if (unlikely(ftrace_disabled)) 691 return; 692 693 + mutex_lock(&ftrace_start_lock); 694 + ftrace_start++; 695 + if (ftrace_start == 1) 696 command |= FTRACE_ENABLE_CALLS; 697 698 if (saved_ftrace_func != ftrace_trace_function) { ··· 705 706 ftrace_run_update_code(command); 707 out: 708 + mutex_unlock(&ftrace_start_lock); 709 } 710 711 static void ftrace_shutdown(void) ··· 715 if (unlikely(ftrace_disabled)) 716 return; 717 718 + mutex_lock(&ftrace_start_lock); 719 + ftrace_start--; 720 + if (!ftrace_start) 721 command |= FTRACE_DISABLE_CALLS; 722 723 if (saved_ftrace_func != ftrace_trace_function) { ··· 730 731 ftrace_run_update_code(command); 732 out: 733 + mutex_unlock(&ftrace_start_lock); 734 } 735 736 static void ftrace_startup_sysctl(void) ··· 740 if (unlikely(ftrace_disabled)) 741 return; 742 743 + mutex_lock(&ftrace_start_lock); 744 /* Force update next time */ 745 saved_ftrace_func = NULL; 746 + /* ftrace_start is true if we want ftrace running */ 747 + if (ftrace_start) 748 command |= FTRACE_ENABLE_CALLS; 749 750 ftrace_run_update_code(command); 751 + mutex_unlock(&ftrace_start_lock); 752 } 753 754 static void ftrace_shutdown_sysctl(void) ··· 758 if (unlikely(ftrace_disabled)) 759 return; 760 761 + mutex_lock(&ftrace_start_lock); 762 + /* ftrace_start is true if ftrace is running */ 763 + if (ftrace_start) 764 command |= FTRACE_DISABLE_CALLS; 765 766 ftrace_run_update_code(command); 767 + mutex_unlock(&ftrace_start_lock); 768 } 769 770 static cycle_t ftrace_update_time; 771 static unsigned long ftrace_update_cnt; 772 unsigned long ftrace_update_tot_cnt; 773 774 + static int ftrace_update_code(void) 775 { 776 + struct dyn_ftrace *p, *t; 777 cycle_t start, stop; 778 779 start = ftrace_now(raw_smp_processor_id()); 780 ftrace_update_cnt = 0; 781 782 + list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { 783 784 + /* If something went wrong, bail without enabling anything */ 785 + if (unlikely(ftrace_disabled)) 786 + return -1; 787 788 + list_del_init(&p->list); 789 790 + /* convert record (i.e, patch mcount-call with NOP) */ 791 + if (ftrace_code_disable(p)) { 792 + p->flags |= FTRACE_FL_CONVERTED; 793 + ftrace_update_cnt++; 794 + } else 795 + ftrace_free_rec(p); 796 } 797 798 stop = ftrace_now(raw_smp_processor_id()); 799 ftrace_update_time = stop - start; 800 ftrace_update_tot_cnt += ftrace_update_cnt; 801 802 return 0; 803 } 804 805 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) ··· 892 pg = ftrace_pages = ftrace_pages_start; 893 894 cnt = num_to_init / ENTRIES_PER_PAGE; 895 + pr_info("ftrace: allocating %ld entries in %d pages\n", 896 num_to_init, cnt); 897 898 for (i = 0; i < cnt; i++) { ··· 1401 } 1402 1403 mutex_lock(&ftrace_sysctl_lock); 1404 + mutex_lock(&ftrace_start_lock); 1405 + if (iter->filtered && ftrace_start && ftrace_enabled) 1406 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1407 + mutex_unlock(&ftrace_start_lock); 1408 mutex_unlock(&ftrace_sysctl_lock); 1409 1410 kfree(iter); ··· 1422 ftrace_notrace_release(struct inode *inode, struct file *file) 1423 { 1424 return ftrace_regex_release(inode, file, 0); 1425 } 1426 1427 static struct file_operations ftrace_avail_fops = { ··· 1503 .release = ftrace_notrace_release, 1504 }; 1505 1506 static __init int ftrace_init_debugfs(void) 1507 { 1508 struct dentry *d_tracer; ··· 1581 pr_warning("Could not create debugfs " 1582 "'set_ftrace_notrace' entry\n"); 1583 1584 return 0; 1585 } 1586 1587 fs_initcall(ftrace_init_debugfs); 1588 1589 static int ftrace_convert_nops(unsigned long *start, 1590 unsigned long *end) 1591 { ··· 1599 unsigned long addr; 1600 unsigned long flags; 1601 1602 + mutex_lock(&ftrace_start_lock); 1603 p = start; 1604 while (p < end) { 1605 addr = ftrace_call_adjust(*p++); 1606 ftrace_record_ip(addr); 1607 } 1608 1609 + /* disable interrupts to prevent kstop machine */ 1610 local_irq_save(flags); 1611 + ftrace_update_code(); 1612 local_irq_restore(flags); 1613 + mutex_unlock(&ftrace_start_lock); 1614 1615 return 0; 1616 } ··· 1658 failed: 1659 ftrace_disabled = 1; 1660 } 1661 1662 #else 1663 # define ftrace_startup() do { } while (0) 1664 # define ftrace_shutdown() do { } while (0) 1665 # define ftrace_startup_sysctl() do { } while (0) 1666 # define ftrace_shutdown_sysctl() do { } while (0) 1667 #endif /* CONFIG_DYNAMIC_FTRACE */ 1668 1669 /** 1670 + * ftrace_kill - kill ftrace 1671 * 1672 * This function should be used by panic code. It stops ftrace 1673 * but in a not so nice way. If you need to simply kill ftrace 1674 * from a non-atomic section, use ftrace_kill. 1675 */ 1676 void ftrace_kill(void) 1677 { 1678 ftrace_disabled = 1; 1679 ftrace_enabled = 0; 1680 clear_ftrace_function(); 1681 } 1682 1683 /** ··· 1870 mutex_unlock(&ftrace_sysctl_lock); 1871 return ret; 1872 } 1873 +
+4 -2
kernel/trace/ring_buffer.c
··· 130 static inline void free_buffer_page(struct buffer_page *bpage) 131 { 132 if (bpage->page) 133 - __free_page(bpage->page); 134 kfree(bpage); 135 } 136 ··· 966 if (unlikely(*delta > (1ULL << 59) && !once++)) { 967 printk(KERN_WARNING "Delta way too big! %llu" 968 " ts=%llu write stamp = %llu\n", 969 - *delta, *ts, cpu_buffer->write_stamp); 970 WARN_ON(1); 971 } 972
··· 130 static inline void free_buffer_page(struct buffer_page *bpage) 131 { 132 if (bpage->page) 133 + free_page((unsigned long)bpage->page); 134 kfree(bpage); 135 } 136 ··· 966 if (unlikely(*delta > (1ULL << 59) && !once++)) { 967 printk(KERN_WARNING "Delta way too big! %llu" 968 " ts=%llu write stamp = %llu\n", 969 + (unsigned long long)*delta, 970 + (unsigned long long)*ts, 971 + (unsigned long long)cpu_buffer->write_stamp); 972 WARN_ON(1); 973 } 974
+7 -8
kernel/trace/trace.c
··· 34 35 #include <linux/stacktrace.h> 36 #include <linux/ring_buffer.h> 37 38 #include "trace.h" 39 ··· 852 preempt_enable_notrace(); 853 } 854 855 - #ifdef CONFIG_FTRACE 856 static void 857 function_trace_call(unsigned long ip, unsigned long parent_ip) 858 { ··· 864 int pc; 865 866 if (unlikely(!ftrace_function_enabled)) 867 - return; 868 - 869 - if (skip_trace(ip)) 870 return; 871 872 pc = preempt_count(); ··· 2377 int i; 2378 size_t ret; 2379 2380 if (cnt > max_tracer_type_len) 2381 cnt = max_tracer_type_len; 2382 - ret = cnt; 2383 2384 if (copy_from_user(&buf, ubuf, cnt)) 2385 return -EFAULT; ··· 2413 out: 2414 mutex_unlock(&trace_types_lock); 2415 2416 - if (ret == cnt) 2417 - filp->f_pos += cnt; 2418 2419 return ret; 2420 } ··· 3096 dump_ran = 1; 3097 3098 /* No turning back! */ 3099 - ftrace_kill_atomic(); 3100 3101 for_each_tracing_cpu(cpu) { 3102 atomic_inc(&global_trace.data[cpu]->disabled);
··· 34 35 #include <linux/stacktrace.h> 36 #include <linux/ring_buffer.h> 37 + #include <linux/irqflags.h> 38 39 #include "trace.h" 40 ··· 851 preempt_enable_notrace(); 852 } 853 854 + #ifdef CONFIG_FUNCTION_TRACER 855 static void 856 function_trace_call(unsigned long ip, unsigned long parent_ip) 857 { ··· 863 int pc; 864 865 if (unlikely(!ftrace_function_enabled)) 866 return; 867 868 pc = preempt_count(); ··· 2379 int i; 2380 size_t ret; 2381 2382 + ret = cnt; 2383 + 2384 if (cnt > max_tracer_type_len) 2385 cnt = max_tracer_type_len; 2386 2387 if (copy_from_user(&buf, ubuf, cnt)) 2388 return -EFAULT; ··· 2414 out: 2415 mutex_unlock(&trace_types_lock); 2416 2417 + if (ret > 0) 2418 + filp->f_pos += ret; 2419 2420 return ret; 2421 } ··· 3097 dump_ran = 1; 3098 3099 /* No turning back! */ 3100 + ftrace_kill(); 3101 3102 for_each_tracing_cpu(cpu) { 3103 atomic_inc(&global_trace.data[cpu]->disabled);
+1 -1
kernel/trace/trace.h
··· 335 336 extern cycle_t ftrace_now(int cpu); 337 338 - #ifdef CONFIG_FTRACE 339 void tracing_start_function_trace(void); 340 void tracing_stop_function_trace(void); 341 #else
··· 335 336 extern cycle_t ftrace_now(int cpu); 337 338 + #ifdef CONFIG_FUNCTION_TRACER 339 void tracing_start_function_trace(void); 340 void tracing_stop_function_trace(void); 341 #else
+1 -1
kernel/trace/trace_functions.c
··· 64 65 static struct tracer function_trace __read_mostly = 66 { 67 - .name = "ftrace", 68 .init = function_trace_init, 69 .reset = function_trace_reset, 70 .ctrl_update = function_trace_ctrl_update,
··· 64 65 static struct tracer function_trace __read_mostly = 66 { 67 + .name = "function", 68 .init = function_trace_init, 69 .reset = function_trace_reset, 70 .ctrl_update = function_trace_ctrl_update,
+2 -2
kernel/trace/trace_irqsoff.c
··· 63 */ 64 static __cacheline_aligned_in_smp unsigned long max_sequence; 65 66 - #ifdef CONFIG_FTRACE 67 /* 68 * irqsoff uses its own tracer function to keep the overhead down: 69 */ ··· 104 { 105 .func = irqsoff_tracer_call, 106 }; 107 - #endif /* CONFIG_FTRACE */ 108 109 /* 110 * Should this new latency be reported/recorded?
··· 63 */ 64 static __cacheline_aligned_in_smp unsigned long max_sequence; 65 66 + #ifdef CONFIG_FUNCTION_TRACER 67 /* 68 * irqsoff uses its own tracer function to keep the overhead down: 69 */ ··· 104 { 105 .func = irqsoff_tracer_call, 106 }; 107 + #endif /* CONFIG_FUNCTION_TRACER */ 108 109 /* 110 * Should this new latency be reported/recorded?
+2 -2
kernel/trace/trace_sched_wakeup.c
··· 31 32 static void __wakeup_reset(struct trace_array *tr); 33 34 - #ifdef CONFIG_FTRACE 35 /* 36 * irqsoff uses its own tracer function to keep the overhead down: 37 */ ··· 96 { 97 .func = wakeup_tracer_call, 98 }; 99 - #endif /* CONFIG_FTRACE */ 100 101 /* 102 * Should this new latency be reported/recorded?
··· 31 32 static void __wakeup_reset(struct trace_array *tr); 33 34 + #ifdef CONFIG_FUNCTION_TRACER 35 /* 36 * irqsoff uses its own tracer function to keep the overhead down: 37 */ ··· 96 { 97 .func = wakeup_tracer_call, 98 }; 99 + #endif /* CONFIG_FUNCTION_TRACER */ 100 101 /* 102 * Should this new latency be reported/recorded?
+2 -16
kernel/trace/trace_selftest.c
··· 70 return ret; 71 } 72 73 - #ifdef CONFIG_FTRACE 74 75 #ifdef CONFIG_DYNAMIC_FTRACE 76 ··· 98 99 /* passed in by parameter to fool gcc from optimizing */ 100 func(); 101 - 102 - /* update the records */ 103 - ret = ftrace_force_update(); 104 - if (ret) { 105 - printk(KERN_CONT ".. ftraced failed .. "); 106 - return ret; 107 - } 108 109 /* 110 * Some archs *cough*PowerPC*cough* add charachters to the ··· 176 /* make sure msleep has been recorded */ 177 msleep(1); 178 179 - /* force the recorded functions to be traced */ 180 - ret = ftrace_force_update(); 181 - if (ret) { 182 - printk(KERN_CONT ".. ftraced failed .. "); 183 - return ret; 184 - } 185 - 186 /* start the tracing */ 187 ftrace_enabled = 1; 188 tracer_enabled = 1; ··· 212 213 return ret; 214 } 215 - #endif /* CONFIG_FTRACE */ 216 217 #ifdef CONFIG_IRQSOFF_TRACER 218 int
··· 70 return ret; 71 } 72 73 + #ifdef CONFIG_FUNCTION_TRACER 74 75 #ifdef CONFIG_DYNAMIC_FTRACE 76 ··· 98 99 /* passed in by parameter to fool gcc from optimizing */ 100 func(); 101 102 /* 103 * Some archs *cough*PowerPC*cough* add charachters to the ··· 183 /* make sure msleep has been recorded */ 184 msleep(1); 185 186 /* start the tracing */ 187 ftrace_enabled = 1; 188 tracer_enabled = 1; ··· 226 227 return ret; 228 } 229 + #endif /* CONFIG_FUNCTION_TRACER */ 230 231 #ifdef CONFIG_IRQSOFF_TRACER 232 int
+4
kernel/trace/trace_stack.c
··· 44 if (this_size <= max_stack_size) 45 return; 46 47 raw_local_irq_save(flags); 48 __raw_spin_lock(&max_stack_lock); 49
··· 44 if (this_size <= max_stack_size) 45 return; 46 47 + /* we do not handle interrupt stacks yet */ 48 + if (!object_is_on_stack(&this_size)) 49 + return; 50 + 51 raw_local_irq_save(flags); 52 __raw_spin_lock(&max_stack_lock); 53
+8
kernel/tracepoint.c
··· 131 132 old = entry->funcs; 133 134 debug_print_probes(entry); 135 /* (N -> M), (N > 1, M >= 0) probes */ 136 for (nr_probes = 0; old[nr_probes]; nr_probes++) { ··· 391 if (entry->rcu_pending) 392 rcu_barrier_sched(); 393 old = tracepoint_entry_remove_probe(entry, probe); 394 mutex_unlock(&tracepoints_mutex); 395 tracepoint_update_probes(); /* may update entry */ 396 mutex_lock(&tracepoints_mutex);
··· 131 132 old = entry->funcs; 133 134 + if (!old) 135 + return NULL; 136 + 137 debug_print_probes(entry); 138 /* (N -> M), (N > 1, M >= 0) probes */ 139 for (nr_probes = 0; old[nr_probes]; nr_probes++) { ··· 388 if (entry->rcu_pending) 389 rcu_barrier_sched(); 390 old = tracepoint_entry_remove_probe(entry, probe); 391 + if (!old) { 392 + printk(KERN_WARNING "Warning: Trying to unregister a probe" 393 + "that doesn't exist\n"); 394 + goto end; 395 + } 396 mutex_unlock(&tracepoints_mutex); 397 tracepoint_update_probes(); /* may update entry */ 398 mutex_lock(&tracepoints_mutex);
+1 -1
lib/Makefile
··· 2 # Makefile for some libs needed in the kernel. 3 # 4 5 - ifdef CONFIG_FTRACE 6 ORIG_CFLAGS := $(KBUILD_CFLAGS) 7 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 8 endif
··· 2 # Makefile for some libs needed in the kernel. 3 # 4 5 + ifdef CONFIG_FUNCTION_TRACER 6 ORIG_CFLAGS := $(KBUILD_CFLAGS) 7 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) 8 endif
+8 -2
scripts/Makefile.build
··· 198 fi; 199 endif 200 201 ifdef CONFIG_FTRACE_MCOUNT_RECORD 202 cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \ 203 - "$(ARCH)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" "$(NM)" "$(RM)" \ 204 - "$(MV)" "$(@)"; 205 endif 206 207 define rule_cc_o_c
··· 198 fi; 199 endif 200 201 + ifdef CONFIG_64BIT 202 + arch_bits = 64 203 + else 204 + arch_bits = 32 205 + endif 206 + 207 ifdef CONFIG_FTRACE_MCOUNT_RECORD 208 cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl \ 209 + "$(ARCH)" "$(arch_bits)" "$(OBJDUMP)" "$(OBJCOPY)" "$(CC)" "$(LD)" \ 210 + "$(NM)" "$(RM)" "$(MV)" "$(@)"; 211 endif 212 213 define rule_cc_o_c
+12 -7
scripts/bootgraph.pl
··· 37 # dmesg | perl scripts/bootgraph.pl > output.svg 38 # 39 40 - my %start, %end; 41 my $done = 0; 42 my $maxtime = 0; 43 my $firsttime = 100; ··· 108 my $stylecounter = 0; 109 my %rows; 110 my $rowscount = 1; 111 - while (($key,$value) = each %start) { 112 my $duration = $end{$key} - $start{$key}; 113 114 if ($duration >= $threshold) { 115 - my $s, $s2, $e, $y; 116 - $pid = $pids{$key}; 117 118 if (!defined($rows{$pid})) { 119 $rows{$pid} = $rowscount; 120 $rowscount = $rowscount + 1; 121 } 122 - $s = ($value - $firsttime) * $mult; 123 $s2 = $s + 6; 124 $e = ($end{$key} - $firsttime) * $mult; 125 $w = $e - $s; ··· 145 my $time = $firsttime; 146 my $step = ($maxtime - $firsttime) / 15; 147 while ($time < $maxtime) { 148 - my $s2 = ($time - $firsttime) * $mult; 149 my $tm = int($time * 100) / 100.0; 150 - print "<text transform=\"translate($s2,89) rotate(90)\">$tm</text>\n"; 151 $time = $time + $step; 152 } 153
··· 37 # dmesg | perl scripts/bootgraph.pl > output.svg 38 # 39 40 + use strict; 41 + 42 + my %start; 43 + my %end; 44 my $done = 0; 45 my $maxtime = 0; 46 my $firsttime = 100; ··· 105 my $stylecounter = 0; 106 my %rows; 107 my $rowscount = 1; 108 + my @initcalls = sort { $start{$a} <=> $start{$b} } keys(%start); 109 + my $key; 110 + foreach $key (@initcalls) { 111 my $duration = $end{$key} - $start{$key}; 112 113 if ($duration >= $threshold) { 114 + my ($s, $s2, $e, $w, $y, $y2, $style); 115 + my $pid = $pids{$key}; 116 117 if (!defined($rows{$pid})) { 118 $rows{$pid} = $rowscount; 119 $rowscount = $rowscount + 1; 120 } 121 + $s = ($start{$key} - $firsttime) * $mult; 122 $s2 = $s + 6; 123 $e = ($end{$key} - $firsttime) * $mult; 124 $w = $e - $s; ··· 140 my $time = $firsttime; 141 my $step = ($maxtime - $firsttime) / 15; 142 while ($time < $maxtime) { 143 + my $s3 = ($time - $firsttime) * $mult; 144 my $tm = int($time * 100) / 100.0; 145 + print "<text transform=\"translate($s3,89) rotate(90)\">$tm</text>\n"; 146 $time = $time + $step; 147 } 148
+24 -4
scripts/recordmcount.pl
··· 106 exit(1); 107 } 108 109 - my ($arch, $objdump, $objcopy, $cc, $ld, $nm, $rm, $mv, $inputfile) = @ARGV; 110 111 $objdump = "objdump" if ((length $objdump) == 0); 112 $objcopy = "objcopy" if ((length $objcopy) == 0); ··· 135 # (return offset and func name) 136 my $mcount_regex; # Find the call site to mcount (return offset) 137 138 if ($arch eq "x86_64") { 139 - $section_regex = "Disassembly of section"; 140 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 141 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; 142 $type = ".quad"; ··· 156 $cc .= " -m64"; 157 158 } elsif ($arch eq "i386") { 159 - $section_regex = "Disassembly of section"; 160 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 161 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 162 $type = ".long"; ··· 303 while (<IN>) { 304 # is it a section? 305 if (/$section_regex/) { 306 - $read_function = 1; 307 # print out any recorded offsets 308 update_funcs() if ($text_found); 309
··· 106 exit(1); 107 } 108 109 + my ($arch, $bits, $objdump, $objcopy, $cc, 110 + $ld, $nm, $rm, $mv, $inputfile) = @ARGV; 111 + 112 + # Acceptable sections to record. 113 + my %text_sections = ( 114 + ".text" => 1, 115 + ); 116 117 $objdump = "objdump" if ((length $objdump) == 0); 118 $objcopy = "objcopy" if ((length $objcopy) == 0); ··· 129 # (return offset and func name) 130 my $mcount_regex; # Find the call site to mcount (return offset) 131 132 + if ($arch eq "x86") { 133 + if ($bits == 64) { 134 + $arch = "x86_64"; 135 + } else { 136 + $arch = "i386"; 137 + } 138 + } 139 + 140 if ($arch eq "x86_64") { 141 + $section_regex = "Disassembly of section\\s+(\\S+):"; 142 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 143 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; 144 $type = ".quad"; ··· 142 $cc .= " -m64"; 143 144 } elsif ($arch eq "i386") { 145 + $section_regex = "Disassembly of section\\s+(\\S+):"; 146 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 147 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 148 $type = ".long"; ··· 289 while (<IN>) { 290 # is it a section? 291 if (/$section_regex/) { 292 + 293 + # Only record text sections that we know are safe 294 + if (defined($text_sections{$1})) { 295 + $read_function = 1; 296 + } else { 297 + $read_function = 0; 298 + } 299 # print out any recorded offsets 300 update_funcs() if ($text_found); 301