Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
"The major changes in this tracing update includes:

- Removal of non-DYNAMIC_FTRACE from 32bit x86

- Removal of mcount support from x86

- Emulating a call from int3 on x86_64, fixes live kernel patching

- Consolidated Tracing Error logs file

Minor updates:

- Removal of klp_check_compiler_support()

- kdb ftrace dumping output changes

- Accessing and creating ftrace instances from inside the kernel

- Clean up of #define if macro

- Introduction of TRACE_EVENT_NOP() to disable trace events based on
config options

And other minor fixes and clean ups"

* tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
x86: Hide the int3_emulate_call/jmp functions from UML
livepatch: Remove klp_check_compiler_support()
ftrace/x86: Remove mcount support
ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
tracing: Simplify "if" macro code
tracing: Fix documentation about disabling options using trace_options
tracing: Replace kzalloc with kcalloc
tracing: Fix partial reading of trace event's id file
tracing: Allow RCU to run between postponed startup tests
tracing: Fix white space issues in parse_pred() function
tracing: Eliminate const char[] auto variables
ring-buffer: Fix mispelling of Calculate
tracing: probeevent: Fix to make the type of $comm string
tracing: probeevent: Do not accumulate on ret variable
tracing: uprobes: Re-enable $comm support for uprobe events
ftrace/x86_64: Emulate call function while updating in breakpoint handler
x86_64: Allow breakpoints to emulate call instructions
x86_64: Add gap to int3 to allow for call emulation
tracing: kdb: Allow ftdump to skip all but the last few entries
tracing: Add trace_total_entries() / trace_total_entries_cpu()
...

+1350 -656
+31
Documentation/trace/ftrace.rst
··· 765 765 tracers from tracing simply echo "nop" into 766 766 current_tracer. 767 767 768 + Error conditions 769 + ---------------- 770 + 771 + For most ftrace commands, failure modes are obvious and communicated 772 + using standard return codes. 773 + 774 + For other more involved commands, extended error information may be 775 + available via the tracing/error_log file. For the commands that 776 + support it, reading the tracing/error_log file after an error will 777 + display more detailed information about what went wrong, if 778 + information is available. The tracing/error_log file is a circular 779 + error log displaying a small number (currently, 8) of ftrace errors 780 + for the last (8) failed commands. 781 + 782 + The extended error information and usage takes the form shown in 783 + this example:: 784 + 785 + # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger 786 + echo: write error: Invalid argument 787 + 788 + # cat /sys/kernel/debug/tracing/error_log 789 + [ 5348.887237] location: error: Couldn't yyy: zzz 790 + Command: xxx 791 + ^ 792 + [ 7517.023364] location: error: Bad rrr: sss 793 + Command: ppp qqq 794 + ^ 795 + 796 + To clear the error log, echo the empty string into it:: 797 + 798 + # echo > /sys/kernel/debug/tracing/error_log 768 799 769 800 Examples of using the tracer 770 801 ----------------------------
+2 -14
Documentation/trace/histogram.rst
··· 199 199 200 200 For some error conditions encountered when invoking a hist trigger 201 201 command, extended error information is available via the 202 - corresponding event's 'hist' file. Reading the hist file after an 203 - error will display more detailed information about what went wrong, 204 - if information is available. This extended error information will 205 - be available until the next hist trigger command for that event. 206 - 207 - If available for a given error condition, the extended error 208 - information and usage takes the following form:: 209 - 210 - # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger 211 - echo: write error: Invalid argument 212 - 213 - # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/hist 214 - ERROR: Couldn't yyy: zzz 215 - Last command: xxx 202 + tracing/error_log file. See Error Conditions in 203 + :file:`Documentation/trace/ftrace.rst` for details. 216 204 217 205 6.2 'hist' trigger examples 218 206 ---------------------------
-1
arch/nds32/kernel/ftrace.c
··· 7 7 #ifndef CONFIG_DYNAMIC_FTRACE 8 8 extern void (*ftrace_trace_function)(unsigned long, unsigned long, 9 9 struct ftrace_ops*, struct pt_regs*); 10 - extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 11 10 extern void ftrace_graph_caller(void); 12 11 13 12 noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
-1
arch/parisc/kernel/ftrace.c
··· 51 51 unsigned long org_sp_gr3) 52 52 { 53 53 extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */ 54 - extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 55 54 56 55 if (ftrace_trace_function != ftrace_stub) { 57 56 /* struct ftrace_ops *op, struct pt_regs *regs); */
-5
arch/powerpc/include/asm/livepatch.h
··· 24 24 #include <linux/sched/task_stack.h> 25 25 26 26 #ifdef CONFIG_LIVEPATCH 27 - static inline int klp_check_compiler_support(void) 28 - { 29 - return 0; 30 - } 31 - 32 27 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 33 28 { 34 29 regs->nip = ip;
-5
arch/s390/include/asm/livepatch.h
··· 13 13 14 14 #include <asm/ptrace.h> 15 15 16 - static inline int klp_check_compiler_support(void) 17 - { 18 - return 0; 19 - } 20 - 21 16 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 22 17 { 23 18 regs->psw.addr = ip;
+11
arch/x86/Kconfig
··· 31 31 select SWIOTLB 32 32 select ARCH_HAS_SYSCALL_WRAPPER 33 33 34 + config FORCE_DYNAMIC_FTRACE 35 + def_bool y 36 + depends on X86_32 37 + depends on FUNCTION_TRACER 38 + select DYNAMIC_FTRACE 39 + help 40 + We keep the static function tracing (!DYNAMIC_FTRACE) around 41 + in order to test the non static function tracing in the 42 + generic code, as other architectures still use it. But we 43 + only need to keep it around for x86_64. No need to keep it 44 + for x86_32. For x86_32, force DYNAMIC_FTRACE. 34 45 # 35 46 # Arch settings 36 47 #
+16 -2
arch/x86/entry/entry_64.S
··· 878 878 * @paranoid == 2 is special: the stub will never switch stacks. This is for 879 879 * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. 880 880 */ 881 - .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 881 + .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 882 882 ENTRY(\sym) 883 883 UNWIND_HINT_IRET_REGS offset=\has_error_code*8 884 884 ··· 896 896 .if \paranoid == 1 897 897 testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ 898 898 jnz .Lfrom_usermode_switch_stack_\@ 899 + .endif 900 + 901 + .if \create_gap == 1 902 + /* 903 + * If coming from kernel space, create a 6-word gap to allow the 904 + * int3 handler to emulate a call instruction. 905 + */ 906 + testb $3, CS-ORIG_RAX(%rsp) 907 + jnz .Lfrom_usermode_no_gap_\@ 908 + .rept 6 909 + pushq 5*8(%rsp) 910 + .endr 911 + UNWIND_HINT_IRET_REGS offset=8 912 + .Lfrom_usermode_no_gap_\@: 899 913 .endif 900 914 901 915 .if \paranoid ··· 1143 1129 #endif /* CONFIG_HYPERV */ 1144 1130 1145 1131 idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET 1146 - idtentry int3 do_int3 has_error_code=0 1132 + idtentry int3 do_int3 has_error_code=0 create_gap=1 1147 1133 idtentry stack_segment do_stack_segment has_error_code=1 1148 1134 1149 1135 #ifdef CONFIG_XEN_PV
+3 -5
arch/x86/include/asm/ftrace.h
··· 3 3 #define _ASM_X86_FTRACE_H 4 4 5 5 #ifdef CONFIG_FUNCTION_TRACER 6 - #ifdef CC_USING_FENTRY 7 - # define MCOUNT_ADDR ((unsigned long)(__fentry__)) 8 - #else 9 - # define MCOUNT_ADDR ((unsigned long)(mcount)) 10 - # define HAVE_FUNCTION_GRAPH_FP_TEST 6 + #ifndef CC_USING_FENTRY 7 + # error Compiler does not support fentry? 11 8 #endif 9 + # define MCOUNT_ADDR ((unsigned long)(__fentry__)) 12 10 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 13 11 14 12 #ifdef CONFIG_DYNAMIC_FTRACE
-8
arch/x86/include/asm/livepatch.h
··· 24 24 #include <asm/setup.h> 25 25 #include <linux/ftrace.h> 26 26 27 - static inline int klp_check_compiler_support(void) 28 - { 29 - #ifndef CC_USING_FENTRY 30 - return 1; 31 - #endif 32 - return 0; 33 - } 34 - 35 27 static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) 36 28 { 37 29 regs->ip = ip;
+30
arch/x86/include/asm/text-patching.h
··· 42 42 extern __ro_after_init struct mm_struct *poking_mm; 43 43 extern __ro_after_init unsigned long poking_addr; 44 44 45 + #ifndef CONFIG_UML_X86 46 + static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) 47 + { 48 + regs->ip = ip; 49 + } 50 + 51 + #define INT3_INSN_SIZE 1 52 + #define CALL_INSN_SIZE 5 53 + 54 + #ifdef CONFIG_X86_64 55 + static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) 56 + { 57 + /* 58 + * The int3 handler in entry_64.S adds a gap between the 59 + * stack where the break point happened, and the saving of 60 + * pt_regs. We can extend the original stack because of 61 + * this gap. See the idtentry macro's create_gap option. 62 + */ 63 + regs->sp -= sizeof(unsigned long); 64 + *(unsigned long *)regs->sp = val; 65 + } 66 + 67 + static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) 68 + { 69 + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); 70 + int3_emulate_jmp(regs, func); 71 + } 72 + #endif /* CONFIG_X86_64 */ 73 + #endif /* !CONFIG_UML_X86 */ 74 + 45 75 #endif /* _ASM_X86_TEXT_PATCHING_H */
+27 -5
arch/x86/kernel/ftrace.c
··· 29 29 #include <asm/kprobes.h> 30 30 #include <asm/ftrace.h> 31 31 #include <asm/nops.h> 32 + #include <asm/text-patching.h> 32 33 33 34 #ifdef CONFIG_DYNAMIC_FTRACE 34 35 ··· 232 231 } 233 232 234 233 static unsigned long ftrace_update_func; 234 + static unsigned long ftrace_update_func_call; 235 235 236 236 static int update_ftrace_func(unsigned long ip, void *new) 237 237 { ··· 260 258 unsigned long ip = (unsigned long)(&ftrace_call); 261 259 unsigned char *new; 262 260 int ret; 261 + 262 + ftrace_update_func_call = (unsigned long)func; 263 263 264 264 new = ftrace_call_replace(ip, (unsigned long)func); 265 265 ret = update_ftrace_func(ip, new); ··· 298 294 if (WARN_ON_ONCE(!regs)) 299 295 return 0; 300 296 301 - ip = regs->ip - 1; 302 - if (!ftrace_location(ip) && !is_ftrace_caller(ip)) 303 - return 0; 297 + ip = regs->ip - INT3_INSN_SIZE; 304 298 305 - regs->ip += MCOUNT_INSN_SIZE - 1; 299 + #ifdef CONFIG_X86_64 300 + if (ftrace_location(ip)) { 301 + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); 302 + return 1; 303 + } else if (is_ftrace_caller(ip)) { 304 + if (!ftrace_update_func_call) { 305 + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); 306 + return 1; 307 + } 308 + int3_emulate_call(regs, ftrace_update_func_call); 309 + return 1; 310 + } 311 + #else 312 + if (ftrace_location(ip) || is_ftrace_caller(ip)) { 313 + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); 314 + return 1; 315 + } 316 + #endif 306 317 307 - return 1; 318 + return 0; 308 319 } 309 320 NOKPROBE_SYMBOL(ftrace_int3_handler); 310 321 ··· 884 865 885 866 func = ftrace_ops_get_func(ops); 886 867 868 + ftrace_update_func_call = (unsigned long)func; 869 + 887 870 /* Do a safe modify in case the trampoline is executing */ 888 871 new = ftrace_call_replace(ip, (unsigned long)func); 889 872 ret = update_ftrace_func(ip, new); ··· 987 966 { 988 967 unsigned char *new; 989 968 969 + ftrace_update_func_call = 0UL; 990 970 new = ftrace_jmp_replace(ip, (unsigned long)func); 991 971 992 972 return update_ftrace_func(ip, new);
+5 -70
arch/x86/kernel/ftrace_32.S
··· 10 10 #include <asm/ftrace.h> 11 11 #include <asm/nospec-branch.h> 12 12 13 - #ifdef CC_USING_FENTRY 14 13 # define function_hook __fentry__ 15 14 EXPORT_SYMBOL(__fentry__) 16 - #else 17 - # define function_hook mcount 18 - EXPORT_SYMBOL(mcount) 19 - #endif 20 15 21 - #ifdef CONFIG_DYNAMIC_FTRACE 22 - 23 - /* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ 24 - #if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) 25 - # define USING_FRAME_POINTER 26 - #endif 27 - 28 - #ifdef USING_FRAME_POINTER 16 + #ifdef CONFIG_FRAME_POINTER 29 17 # define MCOUNT_FRAME 1 /* using frame = true */ 30 18 #else 31 19 # define MCOUNT_FRAME 0 /* using frame = false */ ··· 25 37 26 38 ENTRY(ftrace_caller) 27 39 28 - #ifdef USING_FRAME_POINTER 29 - # ifdef CC_USING_FENTRY 40 + #ifdef CONFIG_FRAME_POINTER 30 41 /* 31 42 * Frame pointers are of ip followed by bp. 32 43 * Since fentry is an immediate jump, we are left with ··· 36 49 pushl %ebp 37 50 movl %esp, %ebp 38 51 pushl 2*4(%esp) /* function ip */ 39 - # endif 52 + 40 53 /* For mcount, the function ip is directly above */ 41 54 pushl %ebp 42 55 movl %esp, %ebp ··· 46 59 pushl %edx 47 60 pushl $0 /* Pass NULL as regs pointer */ 48 61 49 - #ifdef USING_FRAME_POINTER 62 + #ifdef CONFIG_FRAME_POINTER 50 63 /* Load parent ebp into edx */ 51 64 movl 4*4(%esp), %edx 52 65 #else ··· 69 82 popl %edx 70 83 popl %ecx 71 84 popl %eax 72 - #ifdef USING_FRAME_POINTER 85 + #ifdef CONFIG_FRAME_POINTER 73 86 popl %ebp 74 - # ifdef CC_USING_FENTRY 75 87 addl $4,%esp /* skip function ip */ 76 88 popl %ebp /* this is the orig bp */ 77 89 addl $4, %esp /* skip parent ip */ 78 - # endif 79 90 #endif 80 91 .Lftrace_ret: 81 92 #ifdef CONFIG_FUNCTION_GRAPH_TRACER ··· 118 133 119 134 movl 12*4(%esp), %eax /* Load ip (1st parameter) */ 120 135 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ 121 - #ifdef CC_USING_FENTRY 122 136 movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ 123 - #else 124 - movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ 125 - #endif 126 137 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ 127 138 pushl %esp /* Save pt_regs as 4th parameter */ 128 139 ··· 151 170 lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ 152 171 153 172 jmp .Lftrace_ret 154 - #else /* ! CONFIG_DYNAMIC_FTRACE */ 155 - 156 - ENTRY(function_hook) 157 - cmpl $__PAGE_OFFSET, %esp 158 - jb ftrace_stub /* Paging not enabled yet? */ 159 - 160 - cmpl $ftrace_stub, ftrace_trace_function 161 - jnz .Ltrace 162 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 163 - cmpl $ftrace_stub, ftrace_graph_return 164 - jnz ftrace_graph_caller 165 - 166 - cmpl $ftrace_graph_entry_stub, ftrace_graph_entry 167 - jnz ftrace_graph_caller 168 - #endif 169 - .globl ftrace_stub 170 - ftrace_stub: 171 - ret 172 - 173 - /* taken from glibc */ 174 - .Ltrace: 175 - pushl %eax 176 - pushl %ecx 177 - pushl %edx 178 - movl 0xc(%esp), %eax 179 - movl 0x4(%ebp), %edx 180 - subl $MCOUNT_INSN_SIZE, %eax 181 - 182 - movl ftrace_trace_function, %ecx 183 - CALL_NOSPEC %ecx 184 - 185 - popl %edx 186 - popl %ecx 187 - popl %eax 188 - jmp ftrace_stub 189 - END(function_hook) 190 - #endif /* CONFIG_DYNAMIC_FTRACE */ 191 173 192 174 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 193 175 ENTRY(ftrace_graph_caller) ··· 159 215 pushl %edx 160 216 movl 3*4(%esp), %eax 161 217 /* Even with frame pointers, fentry doesn't have one here */ 162 - #ifdef CC_USING_FENTRY 163 218 lea 4*4(%esp), %edx 164 219 movl $0, %ecx 165 - #else 166 - lea 0x4(%ebp), %edx 167 - movl (%ebp), %ecx 168 - #endif 169 220 subl $MCOUNT_INSN_SIZE, %eax 170 221 call prepare_ftrace_return 171 222 popl %edx ··· 173 234 return_to_handler: 174 235 pushl %eax 175 236 pushl %edx 176 - #ifdef CC_USING_FENTRY 177 237 movl $0, %eax 178 - #else 179 - movl %ebp, %eax 180 - #endif 181 238 call ftrace_return_to_handler 182 239 movl %eax, %ecx 183 240 popl %edx
+1 -27
arch/x86/kernel/ftrace_64.S
··· 13 13 .code64 14 14 .section .entry.text, "ax" 15 15 16 - #ifdef CC_USING_FENTRY 17 16 # define function_hook __fentry__ 18 17 EXPORT_SYMBOL(__fentry__) 19 - #else 20 - # define function_hook mcount 21 - EXPORT_SYMBOL(mcount) 22 - #endif 23 18 24 19 #ifdef CONFIG_FRAME_POINTER 25 - # ifdef CC_USING_FENTRY 26 20 /* Save parent and function stack frames (rip and rbp) */ 27 21 # define MCOUNT_FRAME_SIZE (8+16*2) 28 - # else 29 - /* Save just function stack frame (rip and rbp) */ 30 - # define MCOUNT_FRAME_SIZE (8+16) 31 - # endif 32 22 #else 33 23 /* No need to save a stack frame */ 34 24 # define MCOUNT_FRAME_SIZE 0 ··· 65 75 * fentry is called before the stack frame is set up, where as mcount 66 76 * is called afterward. 67 77 */ 68 - #ifdef CC_USING_FENTRY 78 + 69 79 /* Save the parent pointer (skip orig rbp and our return address) */ 70 80 pushq \added+8*2(%rsp) 71 81 pushq %rbp 72 82 movq %rsp, %rbp 73 83 /* Save the return address (now skip orig rbp, rbp and parent) */ 74 84 pushq \added+8*3(%rsp) 75 - #else 76 - /* Can't assume that rip is before this (unless added was zero) */ 77 - pushq \added+8(%rsp) 78 - #endif 79 85 pushq %rbp 80 86 movq %rsp, %rbp 81 87 #endif /* CONFIG_FRAME_POINTER */ ··· 99 113 movq %rdx, RBP(%rsp) 100 114 101 115 /* Copy the parent address into %rsi (second parameter) */ 102 - #ifdef CC_USING_FENTRY 103 116 movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi 104 - #else 105 - /* %rdx contains original %rbp */ 106 - movq 8(%rdx), %rsi 107 - #endif 108 117 109 118 /* Move RIP to its proper location */ 110 119 movq MCOUNT_REG_SIZE+\added(%rsp), %rdi ··· 284 303 /* Saves rbp into %rdx and fills first parameter */ 285 304 save_mcount_regs 286 305 287 - #ifdef CC_USING_FENTRY 288 306 leaq MCOUNT_REG_SIZE+8(%rsp), %rsi 289 307 movq $0, %rdx /* No framepointers needed */ 290 - #else 291 - /* Save address of the return address of traced function */ 292 - leaq 8(%rdx), %rsi 293 - /* ftrace does sanity checks against frame pointers */ 294 - movq (%rdx), %rdx 295 - #endif 296 308 call prepare_ftrace_return 297 309 298 310 restore_mcount_regs
+18 -17
include/linux/compiler.h
··· 53 53 * "Define 'is'", Bill Clinton 54 54 * "Define 'if'", Steven Rostedt 55 55 */ 56 - #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 57 - #define __trace_if(cond) \ 58 - if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ 59 - ({ \ 60 - int ______r; \ 61 - static struct ftrace_branch_data \ 62 - __aligned(4) \ 63 - __section("_ftrace_branch") \ 64 - ______f = { \ 65 - .func = __func__, \ 66 - .file = __FILE__, \ 67 - .line = __LINE__, \ 68 - }; \ 69 - ______r = !!(cond); \ 70 - ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ 71 - ______r; \ 72 - })) 56 + #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) 57 + 58 + #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) 59 + 60 + #define __trace_if_value(cond) ({ \ 61 + static struct ftrace_branch_data \ 62 + __aligned(4) \ 63 + __section("_ftrace_branch") \ 64 + __if_trace = { \ 65 + .func = __func__, \ 66 + .file = __FILE__, \ 67 + .line = __LINE__, \ 68 + }; \ 69 + (cond) ? \ 70 + (__if_trace.miss_hit[1]++,1) : \ 71 + (__if_trace.miss_hit[0]++,0); \ 72 + }) 73 + 73 74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 74 75 75 76 #else
+2
include/linux/ftrace.h
··· 741 741 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 742 742 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 743 743 744 + extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 745 + 744 746 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 745 747 746 748 struct fgraph_ops {
+15
include/linux/tracepoint.h
··· 548 548 549 549 #define TRACE_EVENT_PERF_PERM(event, expr...) 550 550 551 + #define DECLARE_EVENT_NOP(name, proto, args) \ 552 + static inline void trace_##name(proto) \ 553 + { } \ 554 + static inline bool trace_##name##_enabled(void) \ 555 + { \ 556 + return false; \ 557 + } 558 + 559 + #define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) \ 560 + DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) 561 + 562 + #define DECLARE_EVENT_CLASS_NOP(name, proto, args, tstruct, assign, print) 563 + #define DEFINE_EVENT_NOP(template, name, proto, args) \ 564 + DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args)) 565 + 551 566 #endif /* ifdef TRACE_EVENT (see note above) */
+8
include/trace/define_trace.h
··· 46 46 assign, print, reg, unreg) \ 47 47 DEFINE_TRACE_FN(name, reg, unreg) 48 48 49 + #undef TRACE_EVENT_NOP 50 + #define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) 51 + 52 + #undef DEFINE_EVENT_NOP 53 + #define DEFINE_EVENT_NOP(template, name, proto, args) 54 + 49 55 #undef DEFINE_EVENT 50 56 #define DEFINE_EVENT(template, name, proto, args) \ 51 57 DEFINE_TRACE(name) ··· 108 102 #undef TRACE_EVENT_FN 109 103 #undef TRACE_EVENT_FN_COND 110 104 #undef TRACE_EVENT_CONDITION 105 + #undef TRACE_EVENT_NOP 106 + #undef DEFINE_EVENT_NOP 111 107 #undef DECLARE_EVENT_CLASS 112 108 #undef DEFINE_EVENT 113 109 #undef DEFINE_EVENT_FN
+25 -56
include/trace/events/rcu.h
··· 7 7 8 8 #include <linux/tracepoint.h> 9 9 10 + #ifdef CONFIG_RCU_TRACE 11 + #define TRACE_EVENT_RCU TRACE_EVENT 12 + #else 13 + #define TRACE_EVENT_RCU TRACE_EVENT_NOP 14 + #endif 15 + 10 16 /* 11 17 * Tracepoint for start/end markers used for utilization calculations. 12 18 * By convention, the string is of the following forms: ··· 41 35 TP_printk("%s", __entry->s) 42 36 ); 43 37 44 - #ifdef CONFIG_RCU_TRACE 45 - 46 38 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 47 39 48 40 /* ··· 66 62 * "end": End a grace period. 67 63 * "cpuend": CPU first notices a grace-period end. 68 64 */ 69 - TRACE_EVENT(rcu_grace_period, 65 + TRACE_EVENT_RCU(rcu_grace_period, 70 66 71 67 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent), 72 68 ··· 105 101 * "Cleanup": Clean up rcu_node structure after previous GP. 106 102 * "CleanupMore": Clean up, and another GP is needed. 107 103 */ 108 - TRACE_EVENT(rcu_future_grace_period, 104 + TRACE_EVENT_RCU(rcu_future_grace_period, 109 105 110 106 TP_PROTO(const char *rcuname, unsigned long gp_seq, 111 107 unsigned long gp_seq_req, u8 level, int grplo, int grphi, ··· 145 141 * rcu_node structure, and the mask of CPUs that will be waited for. 146 142 * All but the type of RCU are extracted from the rcu_node structure. 147 143 */ 148 - TRACE_EVENT(rcu_grace_period_init, 144 + TRACE_EVENT_RCU(rcu_grace_period_init, 149 145 150 146 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level, 151 147 int grplo, int grphi, unsigned long qsmask), ··· 190 186 * "endwake": Woke piggybackers up. 191 187 * "done": Someone else did the expedited grace period for us. 192 188 */ 193 - TRACE_EVENT(rcu_exp_grace_period, 189 + TRACE_EVENT_RCU(rcu_exp_grace_period, 194 190 195 191 TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent), 196 192 ··· 222 218 * "nxtlvl": Advance to next level of rcu_node funnel 223 219 * "wait": Wait for someone else to do expedited GP 224 220 */ 225 - TRACE_EVENT(rcu_exp_funnel_lock, 221 + TRACE_EVENT_RCU(rcu_exp_funnel_lock, 226 222 227 223 TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi, 228 224 const char *gpevent), ··· 273 269 * "WaitQueue": Enqueue partially done, timed wait for it to complete. 274 270 * "WokeQueue": Partial enqueue now complete. 275 271 */ 276 - TRACE_EVENT(rcu_nocb_wake, 272 + TRACE_EVENT_RCU(rcu_nocb_wake, 277 273 278 274 TP_PROTO(const char *rcuname, int cpu, const char *reason), 279 275 ··· 301 297 * include SRCU), the grace-period number that the task is blocking 302 298 * (the current or the next), and the task's PID. 303 299 */ 304 - TRACE_EVENT(rcu_preempt_task, 300 + TRACE_EVENT_RCU(rcu_preempt_task, 305 301 306 302 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq), 307 303 ··· 328 324 * read-side critical section exiting that critical section. Track the 329 325 * type of RCU (which one day might include SRCU) and the task's PID. 330 326 */ 331 - TRACE_EVENT(rcu_unlock_preempted_task, 327 + TRACE_EVENT_RCU(rcu_unlock_preempted_task, 332 328 333 329 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid), 334 330 ··· 357 353 * whether there are any blocked tasks blocking the current grace period. 358 354 * All but the type of RCU are extracted from the rcu_node structure. 359 355 */ 360 - TRACE_EVENT(rcu_quiescent_state_report, 356 + TRACE_EVENT_RCU(rcu_quiescent_state_report, 361 357 362 358 TP_PROTO(const char *rcuname, unsigned long gp_seq, 363 359 unsigned long mask, unsigned long qsmask, ··· 400 396 * state, which can be "dti" for dyntick-idle mode or "kick" when kicking 401 397 * a CPU that has been in dyntick-idle mode for too long. 402 398 */ 403 - TRACE_EVENT(rcu_fqs, 399 + TRACE_EVENT_RCU(rcu_fqs, 404 400 405 401 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent), 406 402 ··· 440 436 * events use two separate counters, and that the "++=" and "--=" events 441 437 * for irq/NMI will change the counter by two, otherwise by one. 442 438 */ 443 - TRACE_EVENT(rcu_dyntick, 439 + TRACE_EVENT_RCU(rcu_dyntick, 444 440 445 441 TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks), 446 442 ··· 472 468 * number of lazy callbacks queued, and the fourth element is the 473 469 * total number of callbacks queued. 474 470 */ 475 - TRACE_EVENT(rcu_callback, 471 + TRACE_EVENT_RCU(rcu_callback, 476 472 477 473 TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy, 478 474 long qlen), ··· 508 504 * the fourth argument is the number of lazy callbacks queued, and the 509 505 * fifth argument is the total number of callbacks queued. 510 506 */ 511 - TRACE_EVENT(rcu_kfree_callback, 507 + TRACE_EVENT_RCU(rcu_kfree_callback, 512 508 513 509 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset, 514 510 long qlen_lazy, long qlen), ··· 543 539 * the total number of callbacks queued, and the fourth argument is 544 540 * the current RCU-callback batch limit. 545 541 */ 546 - TRACE_EVENT(rcu_batch_start, 542 + TRACE_EVENT_RCU(rcu_batch_start, 547 543 548 544 TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), 549 545 ··· 573 569 * The first argument is the type of RCU, and the second argument is 574 570 * a pointer to the RCU callback itself. 575 571 */ 576 - TRACE_EVENT(rcu_invoke_callback, 572 + TRACE_EVENT_RCU(rcu_invoke_callback, 577 573 578 574 TP_PROTO(const char *rcuname, struct rcu_head *rhp), 579 575 ··· 602 598 * is the offset of the callback within the enclosing RCU-protected 603 599 * data structure. 604 600 */ 605 - TRACE_EVENT(rcu_invoke_kfree_callback, 601 + TRACE_EVENT_RCU(rcu_invoke_kfree_callback, 606 602 607 603 TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset), 608 604 ··· 635 631 * and the sixth argument (risk) is the return value from 636 632 * rcu_is_callbacks_kthread(). 637 633 */ 638 - TRACE_EVENT(rcu_batch_end, 634 + TRACE_EVENT_RCU(rcu_batch_end, 639 635 640 636 TP_PROTO(const char *rcuname, int callbacks_invoked, 641 637 char cb, char nr, char iit, char risk), ··· 677 673 * callback address can be NULL. 678 674 */ 679 675 #define RCUTORTURENAME_LEN 8 680 - TRACE_EVENT(rcu_torture_read, 676 + TRACE_EVENT_RCU(rcu_torture_read, 681 677 682 678 TP_PROTO(const char *rcutorturename, struct rcu_head *rhp, 683 679 unsigned long secs, unsigned long c_old, unsigned long c), ··· 725 721 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument 726 722 * is the count of remaining callbacks, and "done" is the piggybacking count. 727 723 */ 728 - TRACE_EVENT(rcu_barrier, 724 + TRACE_EVENT_RCU(rcu_barrier, 729 725 730 726 TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), 731 727 ··· 751 747 __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt, 752 748 __entry->done) 753 749 ); 754 - 755 - #else /* #ifdef CONFIG_RCU_TRACE */ 756 - 757 - #define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0) 758 - #define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \ 759 - level, grplo, grphi, event) \ 760 - do { } while (0) 761 - #define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \ 762 - qsmask) do { } while (0) 763 - #define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \ 764 - do { } while (0) 765 - #define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \ 766 - do { } while (0) 767 - #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) 768 - #define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0) 769 - #define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0) 770 - #define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \ 771 - grplo, grphi, gp_tasks) do { } \ 772 - while (0) 773 - #define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0) 774 - #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0) 775 - #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) 776 - #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ 777 - do { } while (0) 778 - #define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \ 779 - do { } while (0) 780 - #define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0) 781 - #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) 782 - #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ 783 - do { } while (0) 784 - #define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 785 - do { } while (0) 786 - #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) 787 - 788 - #endif /* #else #ifdef CONFIG_RCU_TRACE */ 789 750 790 751 #endif /* _TRACE_RCU_H */ 791 752
+14 -7
include/trace/events/sched.h
··· 241 241 DEFINE_EVENT(sched_process_template, sched_process_free, 242 242 TP_PROTO(struct task_struct *p), 243 243 TP_ARGS(p)); 244 - 245 244 246 245 /* 247 246 * Tracepoint for a task exiting: ··· 335 336 __entry->pid, __entry->old_pid) 336 337 ); 337 338 339 + 340 + #ifdef CONFIG_SCHEDSTATS 341 + #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT 342 + #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS 343 + #else 344 + #define DEFINE_EVENT_SCHEDSTAT DEFINE_EVENT_NOP 345 + #define DECLARE_EVENT_CLASS_SCHEDSTAT DECLARE_EVENT_CLASS_NOP 346 + #endif 347 + 338 348 /* 339 349 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE 340 350 * adding sched_stat support to SCHED_FIFO/RR would be welcome. 341 351 */ 342 - DECLARE_EVENT_CLASS(sched_stat_template, 352 + DECLARE_EVENT_CLASS_SCHEDSTAT(sched_stat_template, 343 353 344 354 TP_PROTO(struct task_struct *tsk, u64 delay), 345 355 ··· 371 363 (unsigned long long)__entry->delay) 372 364 ); 373 365 374 - 375 366 /* 376 367 * Tracepoint for accounting wait time (time the task is runnable 377 368 * but not actually running due to scheduler contention). 378 369 */ 379 - DEFINE_EVENT(sched_stat_template, sched_stat_wait, 370 + DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_wait, 380 371 TP_PROTO(struct task_struct *tsk, u64 delay), 381 372 TP_ARGS(tsk, delay)); 382 373 ··· 383 376 * Tracepoint for accounting sleep time (time the task is not runnable, 384 377 * including iowait, see below). 385 378 */ 386 - DEFINE_EVENT(sched_stat_template, sched_stat_sleep, 379 + DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_sleep, 387 380 TP_PROTO(struct task_struct *tsk, u64 delay), 388 381 TP_ARGS(tsk, delay)); 389 382 ··· 391 384 * Tracepoint for accounting iowait time (time the task is not runnable 392 385 * due to waiting on IO to complete). 393 386 */ 394 - DEFINE_EVENT(sched_stat_template, sched_stat_iowait, 387 + DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_iowait, 395 388 TP_PROTO(struct task_struct *tsk, u64 delay), 396 389 TP_ARGS(tsk, delay)); 397 390 398 391 /* 399 392 * Tracepoint for accounting blocked time (time the task is in uninterruptible). 400 393 */ 401 - DEFINE_EVENT(sched_stat_template, sched_stat_blocked, 394 + DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked, 402 395 TP_PROTO(struct task_struct *tsk, u64 delay), 403 396 TP_ARGS(tsk, delay)); 404 397
-8
kernel/livepatch/core.c
··· 1208 1208 1209 1209 static int __init klp_init(void) 1210 1210 { 1211 - int ret; 1212 - 1213 - ret = klp_check_compiler_support(); 1214 - if (ret) { 1215 - pr_info("Your compiler is too old; turning off.\n"); 1216 - return -EINVAL; 1217 - } 1218 - 1219 1211 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj); 1220 1212 if (!klp_root_kobj) 1221 1213 return -ENOMEM;
+2 -7
kernel/rcu/rcu.h
··· 11 11 #define __LINUX_RCU_H 12 12 13 13 #include <trace/events/rcu.h> 14 - #ifdef CONFIG_RCU_TRACE 15 - #define RCU_TRACE(stmt) stmt 16 - #else /* #ifdef CONFIG_RCU_TRACE */ 17 - #define RCU_TRACE(stmt) 18 - #endif /* #else #ifdef CONFIG_RCU_TRACE */ 19 14 20 15 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ 21 16 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) ··· 211 216 212 217 rcu_lock_acquire(&rcu_callback_map); 213 218 if (__is_kfree_rcu_offset(offset)) { 214 - RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);) 219 + trace_rcu_invoke_kfree_callback(rn, head, offset); 215 220 kfree((void *)head - offset); 216 221 rcu_lock_release(&rcu_callback_map); 217 222 return true; 218 223 } else { 219 - RCU_TRACE(trace_rcu_invoke_callback(rn, head);) 224 + trace_rcu_invoke_callback(rn, head); 220 225 f = head->func; 221 226 WRITE_ONCE(head->func, (rcu_callback_t)0L); 222 227 f(head);
+4 -4
kernel/rcu/tree.c
··· 1969 1969 */ 1970 1970 int rcutree_dying_cpu(unsigned int cpu) 1971 1971 { 1972 - RCU_TRACE(bool blkd;) 1973 - RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(&rcu_data);) 1974 - RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) 1972 + bool blkd; 1973 + struct rcu_data *rdp = this_cpu_ptr(&rcu_data); 1974 + struct rcu_node *rnp = rdp->mynode; 1975 1975 1976 1976 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 1977 1977 return 0; 1978 1978 1979 - RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);) 1979 + blkd = !!(rnp->qsmask & rdp->grpmask); 1980 1980 trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, 1981 1981 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp")); 1982 1982 return 0;
+4 -5
kernel/trace/ftrace.c
··· 70 70 #define INIT_OPS_HASH(opsname) \ 71 71 .func_hash = &opsname.local_hash, \ 72 72 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 73 - #define ASSIGN_OPS_HASH(opsname, val) \ 74 - .func_hash = val, \ 75 - .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 76 73 #else 77 74 #define INIT_OPS_HASH(opsname) 78 - #define ASSIGN_OPS_HASH(opsname, val) 79 75 #endif 80 76 81 77 enum { ··· 3876 3880 static bool module_exists(const char *module) 3877 3881 { 3878 3882 /* All modules have the symbol __this_module */ 3879 - const char this_mod[] = "__this_module"; 3883 + static const char this_mod[] = "__this_module"; 3880 3884 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 3881 3885 unsigned long val; 3882 3886 int n; ··· 6261 6265 preempt_disable_notrace(); 6262 6266 6263 6267 do_for_each_ftrace_op(op, ftrace_ops_list) { 6268 + /* Stub functions don't need to be called nor tested */ 6269 + if (op->flags & FTRACE_OPS_FL_STUB) 6270 + continue; 6264 6271 /* 6265 6272 * Check the following for each ops before calling their func: 6266 6273 * if RCU flag is set, then rcu_is_watching() must be true
+1 -1
kernel/trace/ring_buffer.c
··· 4979 4979 cnt = data->cnt + (nested ? 27 : 0); 4980 4980 4981 4981 /* Multiply cnt by ~e, to make some unique increment */ 4982 - size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1); 4982 + size = (cnt * 68 / 25) % (sizeof(rb_string) - 1); 4983 4983 4984 4984 len = size + sizeof(struct rb_item); 4985 4985
+1 -1
kernel/trace/ring_buffer_benchmark.c
··· 362 362 hit--; /* make it non zero */ 363 363 } 364 364 365 - /* Caculate the average time in nanosecs */ 365 + /* Calculate the average time in nanosecs */ 366 366 avg = NSEC_PER_MSEC / (hit + missed); 367 367 trace_printk("%ld ns per entry\n", avg); 368 368 }
+356 -61
kernel/trace/trace.c
··· 1727 1727 pr_info("Running postponed tracer tests:\n"); 1728 1728 1729 1729 list_for_each_entry_safe(p, n, &postponed_selftests, list) { 1730 + /* This loop can take minutes when sanitizers are enabled, so 1731 + * lets make sure we allow RCU processing. 1732 + */ 1733 + cond_resched(); 1730 1734 ret = run_tracer_selftest(p->type); 1731 1735 /* If the test fails, then warn and remove from available_tracers */ 1732 1736 if (ret < 0) { ··· 3049 3045 if (global_trace.trace_buffer.buffer) 3050 3046 tracing_start_cmdline_record(); 3051 3047 } 3048 + EXPORT_SYMBOL_GPL(trace_printk_init_buffers); 3052 3049 3053 3050 void trace_printk_start_comm(void) 3054 3051 { ··· 3210 3205 va_end(ap); 3211 3206 return ret; 3212 3207 } 3208 + EXPORT_SYMBOL_GPL(trace_array_printk); 3213 3209 3214 3210 __printf(3, 4) 3215 3211 int trace_array_printk_buf(struct ring_buffer *buffer, ··· 3489 3483 } 3490 3484 3491 3485 static void 3486 + get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total, 3487 + unsigned long *entries, int cpu) 3488 + { 3489 + unsigned long count; 3490 + 3491 + count = ring_buffer_entries_cpu(buf->buffer, cpu); 3492 + /* 3493 + * If this buffer has skipped entries, then we hold all 3494 + * entries for the trace and we need to ignore the 3495 + * ones before the time stamp. 3496 + */ 3497 + if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 3498 + count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 3499 + /* total is the same as the entries */ 3500 + *total = count; 3501 + } else 3502 + *total = count + 3503 + ring_buffer_overrun_cpu(buf->buffer, cpu); 3504 + *entries = count; 3505 + } 3506 + 3507 + static void 3492 3508 get_total_entries(struct trace_buffer *buf, 3493 3509 unsigned long *total, unsigned long *entries) 3494 3510 { 3495 - unsigned long count; 3511 + unsigned long t, e; 3496 3512 int cpu; 3497 3513 3498 3514 *total = 0; 3499 3515 *entries = 0; 3500 3516 3501 3517 for_each_tracing_cpu(cpu) { 3502 - count = ring_buffer_entries_cpu(buf->buffer, cpu); 3503 - /* 3504 - * If this buffer has skipped entries, then we hold all 3505 - * entries for the trace and we need to ignore the 3506 - * ones before the time stamp. 3507 - */ 3508 - if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { 3509 - count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; 3510 - /* total is the same as the entries */ 3511 - *total += count; 3512 - } else 3513 - *total += count + 3514 - ring_buffer_overrun_cpu(buf->buffer, cpu); 3515 - *entries += count; 3518 + get_total_entries_cpu(buf, &t, &e, cpu); 3519 + *total += t; 3520 + *entries += e; 3516 3521 } 3522 + } 3523 + 3524 + unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) 3525 + { 3526 + unsigned long total, entries; 3527 + 3528 + if (!tr) 3529 + tr = &global_trace; 3530 + 3531 + get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu); 3532 + 3533 + return entries; 3534 + } 3535 + 3536 + unsigned long trace_total_entries(struct trace_array *tr) 3537 + { 3538 + unsigned long total, entries; 3539 + 3540 + if (!tr) 3541 + tr = &global_trace; 3542 + 3543 + get_total_entries(&tr->trace_buffer, &total, &entries); 3544 + 3545 + return entries; 3517 3546 } 3518 3547 3519 3548 static void print_lat_help_header(struct seq_file *m) ··· 3589 3548 unsigned int flags) 3590 3549 { 3591 3550 bool tgid = flags & TRACE_ITER_RECORD_TGID; 3592 - const char tgid_space[] = " "; 3593 - const char space[] = " "; 3551 + const char *space = " "; 3552 + int prec = tgid ? 10 : 2; 3594 3553 3595 3554 print_event_info(buf, m); 3596 3555 3597 - seq_printf(m, "# %s _-----=> irqs-off\n", 3598 - tgid ? tgid_space : space); 3599 - seq_printf(m, "# %s / _----=> need-resched\n", 3600 - tgid ? tgid_space : space); 3601 - seq_printf(m, "# %s| / _---=> hardirq/softirq\n", 3602 - tgid ? tgid_space : space); 3603 - seq_printf(m, "# %s|| / _--=> preempt-depth\n", 3604 - tgid ? tgid_space : space); 3605 - seq_printf(m, "# %s||| / delay\n", 3606 - tgid ? tgid_space : space); 3607 - seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n", 3608 - tgid ? " TGID " : space); 3609 - seq_printf(m, "# | | %s | |||| | |\n", 3610 - tgid ? " | " : space); 3556 + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); 3557 + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); 3558 + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); 3559 + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); 3560 + seq_printf(m, "# %.*s||| / delay\n", prec, space); 3561 + seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); 3562 + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); 3611 3563 } 3612 3564 3613 3565 void ··· 4726 4692 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" 4727 4693 " current_tracer\t- function and latency tracers\n" 4728 4694 " available_tracers\t- list of configured tracers for current_tracer\n" 4695 + " error_log\t- error log for failed commands (that support it)\n" 4729 4696 " buffer_size_kb\t- view and modify size of per cpu buffer\n" 4730 4697 " buffer_total_size_kb - view total size of all cpu buffers\n\n" 4731 4698 " trace_clock\t\t-change the clock used to order events\n" ··· 4747 4712 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" 4748 4713 "\t\t\t Remove sub-buffer with rmdir\n" 4749 4714 " trace_options\t\t- Set format or modify how tracing happens\n" 4750 - "\t\t\t Disable an option by adding a suffix 'no' to the\n" 4715 + "\t\t\t Disable an option by prefixing 'no' to the\n" 4751 4716 "\t\t\t option name\n" 4752 4717 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" 4753 4718 #ifdef CONFIG_DYNAMIC_FTRACE ··· 6331 6296 struct ring_buffer *buffer; 6332 6297 struct print_entry *entry; 6333 6298 unsigned long irq_flags; 6334 - const char faulted[] = "<faulted>"; 6335 6299 ssize_t written; 6336 6300 int size; 6337 6301 int len; 6338 6302 6339 6303 /* Used in tracing_mark_raw_write() as well */ 6340 - #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */ 6304 + #define FAULTED_STR "<faulted>" 6305 + #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ 6341 6306 6342 6307 if (tracing_disabled) 6343 6308 return -EINVAL; ··· 6369 6334 6370 6335 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); 6371 6336 if (len) { 6372 - memcpy(&entry->buf, faulted, FAULTED_SIZE); 6337 + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 6373 6338 cnt = FAULTED_SIZE; 6374 6339 written = -EFAULT; 6375 6340 } else ··· 6410 6375 struct ring_buffer_event *event; 6411 6376 struct ring_buffer *buffer; 6412 6377 struct raw_data_entry *entry; 6413 - const char faulted[] = "<faulted>"; 6414 6378 unsigned long irq_flags; 6415 6379 ssize_t written; 6416 6380 int size; ··· 6449 6415 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); 6450 6416 if (len) { 6451 6417 entry->id = -1; 6452 - memcpy(&entry->buf, faulted, FAULTED_SIZE); 6418 + memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); 6453 6419 written = -EFAULT; 6454 6420 } else 6455 6421 written = cnt; ··· 6901 6867 }; 6902 6868 6903 6869 #endif /* CONFIG_TRACER_SNAPSHOT */ 6870 + 6871 + #define TRACING_LOG_ERRS_MAX 8 6872 + #define TRACING_LOG_LOC_MAX 128 6873 + 6874 + #define CMD_PREFIX " Command: " 6875 + 6876 + struct err_info { 6877 + const char **errs; /* ptr to loc-specific array of err strings */ 6878 + u8 type; /* index into errs -> specific err string */ 6879 + u8 pos; /* MAX_FILTER_STR_VAL = 256 */ 6880 + u64 ts; 6881 + }; 6882 + 6883 + struct tracing_log_err { 6884 + struct list_head list; 6885 + struct err_info info; 6886 + char loc[TRACING_LOG_LOC_MAX]; /* err location */ 6887 + char cmd[MAX_FILTER_STR_VAL]; /* what caused err */ 6888 + }; 6889 + 6890 + static DEFINE_MUTEX(tracing_err_log_lock); 6891 + 6892 + struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) 6893 + { 6894 + struct tracing_log_err *err; 6895 + 6896 + if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { 6897 + err = kzalloc(sizeof(*err), GFP_KERNEL); 6898 + if (!err) 6899 + err = ERR_PTR(-ENOMEM); 6900 + tr->n_err_log_entries++; 6901 + 6902 + return err; 6903 + } 6904 + 6905 + err = list_first_entry(&tr->err_log, struct tracing_log_err, list); 6906 + list_del(&err->list); 6907 + 6908 + return err; 6909 + } 6910 + 6911 + /** 6912 + * err_pos - find the position of a string within a command for error careting 6913 + * @cmd: The tracing command that caused the error 6914 + * @str: The string to position the caret at within @cmd 6915 + * 6916 + * Finds the position of the first occurence of @str within @cmd. The 6917 + * return value can be passed to tracing_log_err() for caret placement 6918 + * within @cmd. 6919 + * 6920 + * Returns the index within @cmd of the first occurence of @str or 0 6921 + * if @str was not found. 6922 + */ 6923 + unsigned int err_pos(char *cmd, const char *str) 6924 + { 6925 + char *found; 6926 + 6927 + if (WARN_ON(!strlen(cmd))) 6928 + return 0; 6929 + 6930 + found = strstr(cmd, str); 6931 + if (found) 6932 + return found - cmd; 6933 + 6934 + return 0; 6935 + } 6936 + 6937 + /** 6938 + * tracing_log_err - write an error to the tracing error log 6939 + * @tr: The associated trace array for the error (NULL for top level array) 6940 + * @loc: A string describing where the error occurred 6941 + * @cmd: The tracing command that caused the error 6942 + * @errs: The array of loc-specific static error strings 6943 + * @type: The index into errs[], which produces the specific static err string 6944 + * @pos: The position the caret should be placed in the cmd 6945 + * 6946 + * Writes an error into tracing/error_log of the form: 6947 + * 6948 + * <loc>: error: <text> 6949 + * Command: <cmd> 6950 + * ^ 6951 + * 6952 + * tracing/error_log is a small log file containing the last 6953 + * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated 6954 + * unless there has been a tracing error, and the error log can be 6955 + * cleared and have its memory freed by writing the empty string in 6956 + * truncation mode to it i.e. echo > tracing/error_log. 6957 + * 6958 + * NOTE: the @errs array along with the @type param are used to 6959 + * produce a static error string - this string is not copied and saved 6960 + * when the error is logged - only a pointer to it is saved. See 6961 + * existing callers for examples of how static strings are typically 6962 + * defined for use with tracing_log_err(). 6963 + */ 6964 + void tracing_log_err(struct trace_array *tr, 6965 + const char *loc, const char *cmd, 6966 + const char **errs, u8 type, u8 pos) 6967 + { 6968 + struct tracing_log_err *err; 6969 + 6970 + if (!tr) 6971 + tr = &global_trace; 6972 + 6973 + mutex_lock(&tracing_err_log_lock); 6974 + err = get_tracing_log_err(tr); 6975 + if (PTR_ERR(err) == -ENOMEM) { 6976 + mutex_unlock(&tracing_err_log_lock); 6977 + return; 6978 + } 6979 + 6980 + snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); 6981 + snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd); 6982 + 6983 + err->info.errs = errs; 6984 + err->info.type = type; 6985 + err->info.pos = pos; 6986 + err->info.ts = local_clock(); 6987 + 6988 + list_add_tail(&err->list, &tr->err_log); 6989 + mutex_unlock(&tracing_err_log_lock); 6990 + } 6991 + 6992 + static void clear_tracing_err_log(struct trace_array *tr) 6993 + { 6994 + struct tracing_log_err *err, *next; 6995 + 6996 + mutex_lock(&tracing_err_log_lock); 6997 + list_for_each_entry_safe(err, next, &tr->err_log, list) { 6998 + list_del(&err->list); 6999 + kfree(err); 7000 + } 7001 + 7002 + tr->n_err_log_entries = 0; 7003 + mutex_unlock(&tracing_err_log_lock); 7004 + } 7005 + 7006 + static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) 7007 + { 7008 + struct trace_array *tr = m->private; 7009 + 7010 + mutex_lock(&tracing_err_log_lock); 7011 + 7012 + return seq_list_start(&tr->err_log, *pos); 7013 + } 7014 + 7015 + static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) 7016 + { 7017 + struct trace_array *tr = m->private; 7018 + 7019 + return seq_list_next(v, &tr->err_log, pos); 7020 + } 7021 + 7022 + static void tracing_err_log_seq_stop(struct seq_file *m, void *v) 7023 + { 7024 + mutex_unlock(&tracing_err_log_lock); 7025 + } 7026 + 7027 + static void tracing_err_log_show_pos(struct seq_file *m, u8 pos) 7028 + { 7029 + u8 i; 7030 + 7031 + for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) 7032 + seq_putc(m, ' '); 7033 + for (i = 0; i < pos; i++) 7034 + seq_putc(m, ' '); 7035 + seq_puts(m, "^\n"); 7036 + } 7037 + 7038 + static int tracing_err_log_seq_show(struct seq_file *m, void *v) 7039 + { 7040 + struct tracing_log_err *err = v; 7041 + 7042 + if (err) { 7043 + const char *err_text = err->info.errs[err->info.type]; 7044 + u64 sec = err->info.ts; 7045 + u32 nsec; 7046 + 7047 + nsec = do_div(sec, NSEC_PER_SEC); 7048 + seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, 7049 + err->loc, err_text); 7050 + seq_printf(m, "%s", err->cmd); 7051 + tracing_err_log_show_pos(m, err->info.pos); 7052 + } 7053 + 7054 + return 0; 7055 + } 7056 + 7057 + static const struct seq_operations tracing_err_log_seq_ops = { 7058 + .start = tracing_err_log_seq_start, 7059 + .next = tracing_err_log_seq_next, 7060 + .stop = tracing_err_log_seq_stop, 7061 + .show = tracing_err_log_seq_show 7062 + }; 7063 + 7064 + static int tracing_err_log_open(struct inode *inode, struct file *file) 7065 + { 7066 + struct trace_array *tr = inode->i_private; 7067 + int ret = 0; 7068 + 7069 + if (trace_array_get(tr) < 0) 7070 + return -ENODEV; 7071 + 7072 + /* If this file was opened for write, then erase contents */ 7073 + if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) 7074 + clear_tracing_err_log(tr); 7075 + 7076 + if (file->f_mode & FMODE_READ) { 7077 + ret = seq_open(file, &tracing_err_log_seq_ops); 7078 + if (!ret) { 7079 + struct seq_file *m = file->private_data; 7080 + m->private = tr; 7081 + } else { 7082 + trace_array_put(tr); 7083 + } 7084 + } 7085 + return ret; 7086 + } 7087 + 7088 + static ssize_t tracing_err_log_write(struct file *file, 7089 + const char __user *buffer, 7090 + size_t count, loff_t *ppos) 7091 + { 7092 + return count; 7093 + } 7094 + 7095 + static const struct file_operations tracing_err_log_fops = { 7096 + .open = tracing_err_log_open, 7097 + .write = tracing_err_log_write, 7098 + .read = seq_read, 7099 + .llseek = seq_lseek, 7100 + .release = tracing_release_generic_tr, 7101 + }; 6904 7102 6905 7103 static int tracing_buffers_open(struct inode *inode, struct file *filp) 6906 7104 { ··· 8299 8033 mutex_unlock(&trace_types_lock); 8300 8034 } 8301 8035 8302 - static int instance_mkdir(const char *name) 8036 + struct trace_array *trace_array_create(const char *name) 8303 8037 { 8304 8038 struct trace_array *tr; 8305 8039 int ret; ··· 8338 8072 INIT_LIST_HEAD(&tr->systems); 8339 8073 INIT_LIST_HEAD(&tr->events); 8340 8074 INIT_LIST_HEAD(&tr->hist_vars); 8075 + INIT_LIST_HEAD(&tr->err_log); 8341 8076 8342 8077 if (allocate_trace_buffers(tr, trace_buf_size) < 0) 8343 8078 goto out_free_tr; ··· 8364 8097 mutex_unlock(&trace_types_lock); 8365 8098 mutex_unlock(&event_mutex); 8366 8099 8367 - return 0; 8100 + return tr; 8368 8101 8369 8102 out_free_tr: 8370 8103 free_trace_buffers(tr); ··· 8376 8109 mutex_unlock(&trace_types_lock); 8377 8110 mutex_unlock(&event_mutex); 8378 8111 8379 - return ret; 8112 + return ERR_PTR(ret); 8113 + } 8114 + EXPORT_SYMBOL_GPL(trace_array_create); 8380 8115 8116 + static int instance_mkdir(const char *name) 8117 + { 8118 + return PTR_ERR_OR_ZERO(trace_array_create(name)); 8381 8119 } 8382 8120 8383 - static int instance_rmdir(const char *name) 8121 + static int __remove_instance(struct trace_array *tr) 8384 8122 { 8385 - struct trace_array *tr; 8386 - int found = 0; 8387 - int ret; 8388 8123 int i; 8389 8124 8390 - mutex_lock(&event_mutex); 8391 - mutex_lock(&trace_types_lock); 8392 - 8393 - ret = -ENODEV; 8394 - list_for_each_entry(tr, &ftrace_trace_arrays, list) { 8395 - if (tr->name && strcmp(tr->name, name) == 0) { 8396 - found = 1; 8397 - break; 8398 - } 8399 - } 8400 - if (!found) 8401 - goto out_unlock; 8402 - 8403 - ret = -EBUSY; 8404 8125 if (tr->ref || (tr->current_trace && tr->current_trace->ref)) 8405 - goto out_unlock; 8126 + return -EBUSY; 8406 8127 8407 8128 list_del(&tr->list); 8408 8129 ··· 8416 8161 free_cpumask_var(tr->tracing_cpumask); 8417 8162 kfree(tr->name); 8418 8163 kfree(tr); 8164 + tr = NULL; 8419 8165 8420 - ret = 0; 8166 + return 0; 8167 + } 8421 8168 8422 - out_unlock: 8169 + int trace_array_destroy(struct trace_array *tr) 8170 + { 8171 + int ret; 8172 + 8173 + if (!tr) 8174 + return -EINVAL; 8175 + 8176 + mutex_lock(&event_mutex); 8177 + mutex_lock(&trace_types_lock); 8178 + 8179 + ret = __remove_instance(tr); 8180 + 8181 + mutex_unlock(&trace_types_lock); 8182 + mutex_unlock(&event_mutex); 8183 + 8184 + return ret; 8185 + } 8186 + EXPORT_SYMBOL_GPL(trace_array_destroy); 8187 + 8188 + static int instance_rmdir(const char *name) 8189 + { 8190 + struct trace_array *tr; 8191 + int ret; 8192 + 8193 + mutex_lock(&event_mutex); 8194 + mutex_lock(&trace_types_lock); 8195 + 8196 + ret = -ENODEV; 8197 + list_for_each_entry(tr, &ftrace_trace_arrays, list) { 8198 + if (tr->name && strcmp(tr->name, name) == 0) { 8199 + ret = __remove_instance(tr); 8200 + break; 8201 + } 8202 + } 8203 + 8423 8204 mutex_unlock(&trace_types_lock); 8424 8205 mutex_unlock(&event_mutex); 8425 8206 ··· 8544 8253 trace_create_file("snapshot", 0644, d_tracer, 8545 8254 tr, &snapshot_fops); 8546 8255 #endif 8256 + 8257 + trace_create_file("error_log", 0644, d_tracer, 8258 + tr, &tracing_err_log_fops); 8547 8259 8548 8260 for_each_tracing_cpu(cpu) 8549 8261 tracing_init_tracefs_percpu(tr, cpu); ··· 9133 8839 INIT_LIST_HEAD(&global_trace.systems); 9134 8840 INIT_LIST_HEAD(&global_trace.events); 9135 8841 INIT_LIST_HEAD(&global_trace.hist_vars); 8842 + INIT_LIST_HEAD(&global_trace.err_log); 9136 8843 list_add(&global_trace.list, &ftrace_trace_arrays); 9137 8844 9138 8845 apply_trace_boot_options();
+12 -1
kernel/trace/trace.h
··· 293 293 int nr_topts; 294 294 bool clear_trace; 295 295 int buffer_percent; 296 + unsigned int n_err_log_entries; 296 297 struct tracer *current_trace; 297 298 unsigned int trace_flags; 298 299 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 299 300 unsigned int flags; 300 301 raw_spinlock_t start_lock; 302 + struct list_head err_log; 301 303 struct dentry *dir; 302 304 struct dentry *options; 303 305 struct dentry *percpu_dir; ··· 720 718 void trace_init_global_iter(struct trace_iterator *iter); 721 719 722 720 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 721 + 722 + unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); 723 + unsigned long trace_total_entries(struct trace_array *tr); 723 724 724 725 void trace_function(struct trace_array *tr, 725 726 unsigned long ip, ··· 1550 1545 extern void print_subsystem_event_filter(struct event_subsystem *system, 1551 1546 struct trace_seq *s); 1552 1547 extern int filter_assign_type(const char *type); 1553 - extern int create_event_filter(struct trace_event_call *call, 1548 + extern int create_event_filter(struct trace_array *tr, 1549 + struct trace_event_call *call, 1554 1550 char *filter_str, bool set_str, 1555 1551 struct event_filter **filterp); 1556 1552 extern void free_event_filter(struct event_filter *filter); ··· 1881 1875 extern ssize_t trace_parse_run_command(struct file *file, 1882 1876 const char __user *buffer, size_t count, loff_t *ppos, 1883 1877 int (*createfn)(int, char**)); 1878 + 1879 + extern unsigned int err_pos(char *cmd, const char *str); 1880 + extern void tracing_log_err(struct trace_array *tr, 1881 + const char *loc, const char *cmd, 1882 + const char **errs, u8 type, u8 pos); 1884 1883 1885 1884 /* 1886 1885 * Normal trace_printk() and friends allocates special buffers
+1 -3
kernel/trace/trace_events.c
··· 832 832 833 833 return ret; 834 834 } 835 + EXPORT_SYMBOL_GPL(ftrace_set_clr_event); 835 836 836 837 /** 837 838 * trace_set_clr_event - enable or disable an event ··· 1318 1317 int id = (long)event_file_data(filp); 1319 1318 char buf[32]; 1320 1319 int len; 1321 - 1322 - if (*ppos) 1323 - return 0; 1324 1320 1325 1321 if (unlikely(!id)) 1326 1322 return -ENODEV;
+45 -35
kernel/trace/trace_events_filter.c
··· 66 66 C(INVALID_FILTER, "Meaningless filter expression"), \ 67 67 C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \ 68 68 C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \ 69 - C(NO_FILTER, "No filter found"), 69 + C(ERRNO, "Error"), \ 70 + C(NO_FILTER, "No filter found") 70 71 71 72 #undef C 72 73 #define C(a, b) FILT_ERR_##a ··· 77 76 #undef C 78 77 #define C(a, b) b 79 78 80 - static char *err_text[] = { ERRORS }; 79 + static const char *err_text[] = { ERRORS }; 81 80 82 81 /* Called after a '!' character but "!=" and "!~" are not "not"s */ 83 82 static bool is_not(const char *str) ··· 920 919 filter->filter_string = NULL; 921 920 } 922 921 923 - static void append_filter_err(struct filter_parse_error *pe, 922 + static void append_filter_err(struct trace_array *tr, 923 + struct filter_parse_error *pe, 924 924 struct event_filter *filter) 925 925 { 926 926 struct trace_seq *s; ··· 949 947 if (pe->lasterr > 0) { 950 948 trace_seq_printf(s, "\n%*s", pos, "^"); 951 949 trace_seq_printf(s, "\nparse_error: %s\n", err_text[pe->lasterr]); 950 + tracing_log_err(tr, "event filter parse error", 951 + filter->filter_string, err_text, 952 + pe->lasterr, pe->lasterr_pos); 952 953 } else { 953 954 trace_seq_printf(s, "\nError: (%d)\n", pe->lasterr); 955 + tracing_log_err(tr, "event filter parse error", 956 + filter->filter_string, err_text, 957 + FILT_ERR_ERRNO, 0); 954 958 } 955 959 trace_seq_putc(s, 0); 956 960 buf = kmemdup_nul(s->buffer, s->seq.len, GFP_KERNEL); ··· 1222 1214 * (perf doesn't use it) and grab everything. 1223 1215 */ 1224 1216 if (strcmp(field->name, "ip") != 0) { 1225 - parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i); 1226 - goto err_free; 1227 - } 1228 - pred->fn = filter_pred_none; 1217 + parse_error(pe, FILT_ERR_IP_FIELD_ONLY, pos + i); 1218 + goto err_free; 1219 + } 1220 + pred->fn = filter_pred_none; 1229 1221 1230 - /* 1231 - * Quotes are not required, but if they exist then we need 1232 - * to read them till we hit a matching one. 1233 - */ 1234 - if (str[i] == '\'' || str[i] == '"') 1235 - q = str[i]; 1236 - else 1237 - q = 0; 1222 + /* 1223 + * Quotes are not required, but if they exist then we need 1224 + * to read them till we hit a matching one. 1225 + */ 1226 + if (str[i] == '\'' || str[i] == '"') 1227 + q = str[i]; 1228 + else 1229 + q = 0; 1238 1230 1239 - for (i++; str[i]; i++) { 1240 - if (q && str[i] == q) 1241 - break; 1242 - if (!q && (str[i] == ')' || str[i] == '&' || 1243 - str[i] == '|')) 1244 - break; 1245 - } 1246 - /* Skip quotes */ 1247 - if (q) 1248 - s++; 1231 + for (i++; str[i]; i++) { 1232 + if (q && str[i] == q) 1233 + break; 1234 + if (!q && (str[i] == ')' || str[i] == '&' || 1235 + str[i] == '|')) 1236 + break; 1237 + } 1238 + /* Skip quotes */ 1239 + if (q) 1240 + s++; 1249 1241 len = i - s; 1250 1242 if (len >= MAX_FILTER_STR_VAL) { 1251 1243 parse_error(pe, FILT_ERR_OPERAND_TOO_LONG, pos + i); ··· 1608 1600 if (err) { 1609 1601 filter_disable(file); 1610 1602 parse_error(pe, FILT_ERR_BAD_SUBSYS_FILTER, 0); 1611 - append_filter_err(pe, filter); 1603 + append_filter_err(tr, pe, filter); 1612 1604 } else 1613 1605 event_set_filtered_flag(file); 1614 1606 ··· 1720 1712 * information if @set_str is %true and the caller is responsible for 1721 1713 * freeing it. 1722 1714 */ 1723 - static int create_filter(struct trace_event_call *call, 1715 + static int create_filter(struct trace_array *tr, 1716 + struct trace_event_call *call, 1724 1717 char *filter_string, bool set_str, 1725 1718 struct event_filter **filterp) 1726 1719 { ··· 1738 1729 1739 1730 err = process_preds(call, filter_string, *filterp, pe); 1740 1731 if (err && set_str) 1741 - append_filter_err(pe, *filterp); 1732 + append_filter_err(tr, pe, *filterp); 1742 1733 create_filter_finish(pe); 1743 1734 1744 1735 return err; 1745 1736 } 1746 1737 1747 - int create_event_filter(struct trace_event_call *call, 1738 + int create_event_filter(struct trace_array *tr, 1739 + struct trace_event_call *call, 1748 1740 char *filter_str, bool set_str, 1749 1741 struct event_filter **filterp) 1750 1742 { 1751 - return create_filter(call, filter_str, set_str, filterp); 1743 + return create_filter(tr, call, filter_str, set_str, filterp); 1752 1744 } 1753 1745 1754 1746 /** ··· 1776 1766 kfree((*filterp)->filter_string); 1777 1767 (*filterp)->filter_string = NULL; 1778 1768 } else { 1779 - append_filter_err(pe, *filterp); 1769 + append_filter_err(tr, pe, *filterp); 1780 1770 } 1781 1771 } 1782 1772 create_filter_finish(pe); ··· 1807 1797 return 0; 1808 1798 } 1809 1799 1810 - err = create_filter(call, filter_string, true, &filter); 1800 + err = create_filter(file->tr, call, filter_string, true, &filter); 1811 1801 1812 1802 /* 1813 1803 * Always swap the call filter with the new filter ··· 2063 2053 if (event->filter) 2064 2054 goto out_unlock; 2065 2055 2066 - err = create_filter(call, filter_str, false, &filter); 2056 + err = create_filter(NULL, call, filter_str, false, &filter); 2067 2057 if (err) 2068 2058 goto free_filter; 2069 2059 ··· 2212 2202 struct test_filter_data_t *d = &test_filter_data[i]; 2213 2203 int err; 2214 2204 2215 - err = create_filter(&event_ftrace_test_filter, d->filter, 2216 - false, &filter); 2205 + err = create_filter(NULL, &event_ftrace_test_filter, 2206 + d->filter, false, &filter); 2217 2207 if (err) { 2218 2208 printk(KERN_INFO 2219 2209 "Failed to get filter for '%s', err %d\n",
+162 -120
kernel/trace/trace_events_hist.c
··· 22 22 23 23 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */ 24 24 25 + #define ERRORS \ 26 + C(NONE, "No error"), \ 27 + C(DUPLICATE_VAR, "Variable already defined"), \ 28 + C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \ 29 + C(TOO_MANY_VARS, "Too many variables defined"), \ 30 + C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \ 31 + C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \ 32 + C(TRIGGER_EEXIST, "Hist trigger already exists"), \ 33 + C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \ 34 + C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \ 35 + C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \ 36 + C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \ 37 + C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \ 38 + C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \ 39 + C(EVENT_FILE_NOT_FOUND, "Event file not found"), \ 40 + C(HIST_NOT_FOUND, "Matching event histogram not found"), \ 41 + C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \ 42 + C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \ 43 + C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \ 44 + C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \ 45 + C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \ 46 + C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \ 47 + C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \ 48 + C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \ 49 + C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \ 50 + C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \ 51 + C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \ 52 + C(TOO_MANY_PARAMS, "Too many action params"), \ 53 + C(PARAM_NOT_FOUND, "Couldn't find param"), \ 54 + C(INVALID_PARAM, "Invalid action param"), \ 55 + C(ACTION_NOT_FOUND, "No action found"), \ 56 + C(NO_SAVE_PARAMS, "No params found for save()"), \ 57 + C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \ 58 + C(ACTION_MISMATCH, "Handler doesn't support action"), \ 59 + C(NO_CLOSING_PAREN, "No closing paren found"), \ 60 + C(SUBSYS_NOT_FOUND, "Missing subsystem"), \ 61 + C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \ 62 + C(INVALID_REF_KEY, "Using variable references as keys not supported"), \ 63 + C(VAR_NOT_FOUND, "Couldn't find variable"), \ 64 + C(FIELD_NOT_FOUND, "Couldn't find field"), 65 + 66 + #undef C 67 + #define C(a, b) HIST_ERR_##a 68 + 69 + enum { ERRORS }; 70 + 71 + #undef C 72 + #define C(a, b) b 73 + 74 + static const char *err_text[] = { ERRORS }; 75 + 25 76 struct hist_field; 26 77 27 78 typedef u64 (*hist_field_fn_t) (struct hist_field *field, ··· 586 535 return data; 587 536 } 588 537 589 - static char last_hist_cmd[MAX_FILTER_STR_VAL]; 590 - static char hist_err_str[MAX_FILTER_STR_VAL]; 538 + static char last_cmd[MAX_FILTER_STR_VAL]; 539 + static char last_cmd_loc[MAX_FILTER_STR_VAL]; 591 540 592 - static void last_cmd_set(char *str) 541 + static int errpos(char *str) 593 542 { 543 + return err_pos(last_cmd, str); 544 + } 545 + 546 + static void last_cmd_set(struct trace_event_file *file, char *str) 547 + { 548 + const char *system = NULL, *name = NULL; 549 + struct trace_event_call *call; 550 + 594 551 if (!str) 595 552 return; 596 553 597 - strncpy(last_hist_cmd, str, MAX_FILTER_STR_VAL - 1); 554 + strncpy(last_cmd, str, MAX_FILTER_STR_VAL - 1); 555 + 556 + if (file) { 557 + call = file->event_call; 558 + 559 + system = call->class->system; 560 + if (system) { 561 + name = trace_event_name(call); 562 + if (!name) 563 + system = NULL; 564 + } 565 + } 566 + 567 + if (system) 568 + snprintf(last_cmd_loc, MAX_FILTER_STR_VAL, "hist:%s:%s", system, name); 598 569 } 599 570 600 - static void hist_err(char *str, char *var) 571 + static void hist_err(struct trace_array *tr, u8 err_type, u8 err_pos) 601 572 { 602 - int maxlen = MAX_FILTER_STR_VAL - 1; 603 - 604 - if (!str) 605 - return; 606 - 607 - if (strlen(hist_err_str)) 608 - return; 609 - 610 - if (!var) 611 - var = ""; 612 - 613 - if (strlen(hist_err_str) + strlen(str) + strlen(var) > maxlen) 614 - return; 615 - 616 - strcat(hist_err_str, str); 617 - strcat(hist_err_str, var); 618 - } 619 - 620 - static void hist_err_event(char *str, char *system, char *event, char *var) 621 - { 622 - char err[MAX_FILTER_STR_VAL]; 623 - 624 - if (system && var) 625 - snprintf(err, MAX_FILTER_STR_VAL, "%s.%s.%s", system, event, var); 626 - else if (system) 627 - snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event); 628 - else 629 - strscpy(err, var, MAX_FILTER_STR_VAL); 630 - 631 - hist_err(str, err); 573 + tracing_log_err(tr, last_cmd_loc, last_cmd, err_text, 574 + err_type, err_pos); 632 575 } 633 576 634 577 static void hist_err_clear(void) 635 578 { 636 - hist_err_str[0] = '\0'; 637 - } 638 - 639 - static bool have_hist_err(void) 640 - { 641 - if (strlen(hist_err_str)) 642 - return true; 643 - 644 - return false; 579 + last_cmd[0] = '\0'; 580 + last_cmd_loc[0] = '\0'; 645 581 } 646 582 647 583 struct synth_trace_event { ··· 1757 1719 1758 1720 if (find_var_field(var_hist_data, var_name)) { 1759 1721 if (found) { 1760 - hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name); 1722 + hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, errpos(var_name)); 1761 1723 return NULL; 1762 1724 } 1763 1725 ··· 1808 1770 hist_field = find_file_var(file, var_name); 1809 1771 if (hist_field) { 1810 1772 if (found) { 1811 - hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name); 1773 + hist_err(tr, HIST_ERR_VAR_NOT_UNIQUE, 1774 + errpos(var_name)); 1812 1775 return ERR_PTR(-EINVAL); 1813 1776 } 1814 1777 ··· 2041 2002 attrs->n_actions++; 2042 2003 ret = 0; 2043 2004 } 2044 - 2045 2005 return ret; 2046 2006 } 2047 2007 2048 - static int parse_assignment(char *str, struct hist_trigger_attrs *attrs) 2008 + static int parse_assignment(struct trace_array *tr, 2009 + char *str, struct hist_trigger_attrs *attrs) 2049 2010 { 2050 2011 int ret = 0; 2051 2012 ··· 2101 2062 char *assignment; 2102 2063 2103 2064 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) { 2104 - hist_err("Too many variables defined: ", str); 2065 + hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(str)); 2105 2066 ret = -EINVAL; 2106 2067 goto out; 2107 2068 } ··· 2118 2079 return ret; 2119 2080 } 2120 2081 2121 - static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str) 2082 + static struct hist_trigger_attrs * 2083 + parse_hist_trigger_attrs(struct trace_array *tr, char *trigger_str) 2122 2084 { 2123 2085 struct hist_trigger_attrs *attrs; 2124 2086 int ret = 0; ··· 2132 2092 char *str = strsep(&trigger_str, ":"); 2133 2093 2134 2094 if (strchr(str, '=')) { 2135 - ret = parse_assignment(str, attrs); 2095 + ret = parse_assignment(tr, str, attrs); 2136 2096 if (ret) 2137 2097 goto free; 2138 2098 } else if (strcmp(str, "pause") == 0) ··· 2688 2648 char *var_name) 2689 2649 { 2690 2650 struct hist_field *var_field = NULL, *ref_field = NULL; 2651 + struct trace_array *tr = hist_data->event_file->tr; 2691 2652 2692 2653 if (!is_var_ref(var_name)) 2693 2654 return NULL; ··· 2701 2660 system, event_name); 2702 2661 2703 2662 if (!ref_field) 2704 - hist_err_event("Couldn't find variable: $", 2705 - system, event_name, var_name); 2663 + hist_err(tr, HIST_ERR_VAR_NOT_FOUND, errpos(var_name)); 2706 2664 2707 2665 return ref_field; 2708 2666 } ··· 2712 2672 { 2713 2673 struct ftrace_event_field *field = NULL; 2714 2674 char *field_name, *modifier, *str; 2675 + struct trace_array *tr = file->tr; 2715 2676 2716 2677 modifier = str = kstrdup(field_str, GFP_KERNEL); 2717 2678 if (!modifier) ··· 2736 2695 else if (strcmp(modifier, "usecs") == 0) 2737 2696 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2738 2697 else { 2739 - hist_err("Invalid field modifier: ", modifier); 2698 + hist_err(tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(modifier)); 2740 2699 field = ERR_PTR(-EINVAL); 2741 2700 goto out; 2742 2701 } ··· 2752 2711 else { 2753 2712 field = trace_find_event_field(file->event_call, field_name); 2754 2713 if (!field || !field->size) { 2755 - hist_err("Couldn't find field: ", field_name); 2714 + hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); 2756 2715 field = ERR_PTR(-EINVAL); 2757 2716 goto out; 2758 2717 } ··· 2814 2773 2815 2774 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var); 2816 2775 if (!s) { 2817 - hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var); 2776 + hist_field = parse_var_ref(hist_data, ref_system, 2777 + ref_event, ref_var); 2818 2778 if (hist_field) { 2819 2779 if (var_name) { 2820 2780 hist_field = create_alias(hist_data, hist_field, var_name); ··· 2864 2822 /* we support only -(xxx) i.e. explicit parens required */ 2865 2823 2866 2824 if (level > 3) { 2867 - hist_err("Too many subexpressions (3 max): ", str); 2825 + hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2868 2826 ret = -EINVAL; 2869 2827 goto free; 2870 2828 } ··· 2919 2877 return ERR_PTR(ret); 2920 2878 } 2921 2879 2922 - static int check_expr_operands(struct hist_field *operand1, 2880 + static int check_expr_operands(struct trace_array *tr, 2881 + struct hist_field *operand1, 2923 2882 struct hist_field *operand2) 2924 2883 { 2925 2884 unsigned long operand1_flags = operand1->flags; ··· 2948 2905 2949 2906 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) != 2950 2907 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) { 2951 - hist_err("Timestamp units in expression don't match", NULL); 2908 + hist_err(tr, HIST_ERR_TIMESTAMP_MISMATCH, 0); 2952 2909 return -EINVAL; 2953 2910 } 2954 2911 ··· 2966 2923 char *sep, *operand1_str; 2967 2924 2968 2925 if (level > 3) { 2969 - hist_err("Too many subexpressions (3 max): ", str); 2926 + hist_err(file->tr, HIST_ERR_TOO_MANY_SUBEXPR, errpos(str)); 2970 2927 return ERR_PTR(-EINVAL); 2971 2928 } 2972 2929 ··· 3011 2968 goto free; 3012 2969 } 3013 2970 3014 - ret = check_expr_operands(operand1, operand2); 2971 + ret = check_expr_operands(file->tr, operand1, operand2); 3015 2972 if (ret) 3016 2973 goto free; 3017 2974 ··· 3204 3161 int ret; 3205 3162 3206 3163 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) { 3207 - hist_err_event("trace action: Too many field variables defined: ", 3208 - subsys_name, event_name, field_name); 3164 + hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3209 3165 return ERR_PTR(-EINVAL); 3210 3166 } 3211 3167 3212 3168 file = event_file(tr, subsys_name, event_name); 3213 3169 3214 3170 if (IS_ERR(file)) { 3215 - hist_err_event("trace action: Event file not found: ", 3216 - subsys_name, event_name, field_name); 3171 + hist_err(tr, HIST_ERR_EVENT_FILE_NOT_FOUND, errpos(field_name)); 3217 3172 ret = PTR_ERR(file); 3218 3173 return ERR_PTR(ret); 3219 3174 } ··· 3224 3183 */ 3225 3184 hist_data = find_compatible_hist(target_hist_data, file); 3226 3185 if (!hist_data) { 3227 - hist_err_event("trace action: Matching event histogram not found: ", 3228 - subsys_name, event_name, field_name); 3186 + hist_err(tr, HIST_ERR_HIST_NOT_FOUND, errpos(field_name)); 3229 3187 return ERR_PTR(-EINVAL); 3230 3188 } 3231 3189 ··· 3285 3245 kfree(cmd); 3286 3246 kfree(var_hist->cmd); 3287 3247 kfree(var_hist); 3288 - hist_err_event("trace action: Couldn't create histogram for field: ", 3289 - subsys_name, event_name, field_name); 3248 + hist_err(tr, HIST_ERR_HIST_CREATE_FAIL, errpos(field_name)); 3290 3249 return ERR_PTR(ret); 3291 3250 } 3292 3251 ··· 3297 3258 if (IS_ERR_OR_NULL(event_var)) { 3298 3259 kfree(var_hist->cmd); 3299 3260 kfree(var_hist); 3300 - hist_err_event("trace action: Couldn't find synthetic variable: ", 3301 - subsys_name, event_name, field_name); 3261 + hist_err(tr, HIST_ERR_SYNTH_VAR_NOT_FOUND, errpos(field_name)); 3302 3262 return ERR_PTR(-EINVAL); 3303 3263 } 3304 3264 ··· 3430 3392 { 3431 3393 struct hist_field *val = NULL, *var = NULL; 3432 3394 unsigned long flags = HIST_FIELD_FL_VAR; 3395 + struct trace_array *tr = file->tr; 3433 3396 struct field_var *field_var; 3434 3397 int ret = 0; 3435 3398 3436 3399 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) { 3437 - hist_err("Too many field variables defined: ", field_name); 3400 + hist_err(tr, HIST_ERR_TOO_MANY_FIELD_VARS, errpos(field_name)); 3438 3401 ret = -EINVAL; 3439 3402 goto err; 3440 3403 } 3441 3404 3442 3405 val = parse_atom(hist_data, file, field_name, &flags, NULL); 3443 3406 if (IS_ERR(val)) { 3444 - hist_err("Couldn't parse field variable: ", field_name); 3407 + hist_err(tr, HIST_ERR_FIELD_VAR_PARSE_FAIL, errpos(field_name)); 3445 3408 ret = PTR_ERR(val); 3446 3409 goto err; 3447 3410 } 3448 3411 3449 3412 var = create_var(hist_data, file, field_name, val->size, val->type); 3450 3413 if (IS_ERR(var)) { 3451 - hist_err("Couldn't create or find variable: ", field_name); 3414 + hist_err(tr, HIST_ERR_VAR_CREATE_FIND_FAIL, errpos(field_name)); 3452 3415 kfree(val); 3453 3416 ret = PTR_ERR(var); 3454 3417 goto err; ··· 3776 3737 { 3777 3738 struct hist_field *var_field, *ref_field, *track_var = NULL; 3778 3739 struct trace_event_file *file = hist_data->event_file; 3740 + struct trace_array *tr = file->tr; 3779 3741 char *track_data_var_str; 3780 3742 int ret = 0; 3781 3743 3782 3744 track_data_var_str = data->track_data.var_str; 3783 3745 if (track_data_var_str[0] != '$') { 3784 - hist_err("For onmax(x) or onchange(x), x must be a variable: ", track_data_var_str); 3746 + hist_err(tr, HIST_ERR_ONX_NOT_VAR, errpos(track_data_var_str)); 3785 3747 return -EINVAL; 3786 3748 } 3787 3749 track_data_var_str++; 3788 3750 3789 3751 var_field = find_target_event_var(hist_data, NULL, NULL, track_data_var_str); 3790 3752 if (!var_field) { 3791 - hist_err("Couldn't find onmax or onchange variable: ", track_data_var_str); 3753 + hist_err(tr, HIST_ERR_ONX_VAR_NOT_FOUND, errpos(track_data_var_str)); 3792 3754 return -EINVAL; 3793 3755 } 3794 3756 ··· 3802 3762 if (data->handler == HANDLER_ONMAX) 3803 3763 track_var = create_var(hist_data, file, "__max", sizeof(u64), "u64"); 3804 3764 if (IS_ERR(track_var)) { 3805 - hist_err("Couldn't create onmax variable: ", "__max"); 3765 + hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3806 3766 ret = PTR_ERR(track_var); 3807 3767 goto out; 3808 3768 } ··· 3810 3770 if (data->handler == HANDLER_ONCHANGE) 3811 3771 track_var = create_var(hist_data, file, "__change", sizeof(u64), "u64"); 3812 3772 if (IS_ERR(track_var)) { 3813 - hist_err("Couldn't create onchange variable: ", "__change"); 3773 + hist_err(tr, HIST_ERR_ONX_VAR_CREATE_FAIL, 0); 3814 3774 ret = PTR_ERR(track_var); 3815 3775 goto out; 3816 3776 } ··· 3821 3781 return ret; 3822 3782 } 3823 3783 3824 - static int parse_action_params(char *params, struct action_data *data) 3784 + static int parse_action_params(struct trace_array *tr, char *params, 3785 + struct action_data *data) 3825 3786 { 3826 3787 char *param, *saved_param; 3827 3788 bool first_param = true; ··· 3830 3789 3831 3790 while (params) { 3832 3791 if (data->n_params >= SYNTH_FIELDS_MAX) { 3833 - hist_err("Too many action params", ""); 3792 + hist_err(tr, HIST_ERR_TOO_MANY_PARAMS, 0); 3834 3793 goto out; 3835 3794 } 3836 3795 3837 3796 param = strsep(&params, ","); 3838 3797 if (!param) { 3839 - hist_err("No action param found", ""); 3798 + hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, 0); 3840 3799 ret = -EINVAL; 3841 3800 goto out; 3842 3801 } 3843 3802 3844 3803 param = strstrip(param); 3845 3804 if (strlen(param) < 2) { 3846 - hist_err("Invalid action param: ", param); 3805 + hist_err(tr, HIST_ERR_INVALID_PARAM, errpos(param)); 3847 3806 ret = -EINVAL; 3848 3807 goto out; 3849 3808 } ··· 3867 3826 return ret; 3868 3827 } 3869 3828 3870 - static int action_parse(char *str, struct action_data *data, 3829 + static int action_parse(struct trace_array *tr, char *str, struct action_data *data, 3871 3830 enum handler_id handler) 3872 3831 { 3873 3832 char *action_name; ··· 3875 3834 3876 3835 strsep(&str, "."); 3877 3836 if (!str) { 3878 - hist_err("action parsing: No action found", ""); 3837 + hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3879 3838 ret = -EINVAL; 3880 3839 goto out; 3881 3840 } 3882 3841 3883 3842 action_name = strsep(&str, "("); 3884 3843 if (!action_name || !str) { 3885 - hist_err("action parsing: No action found", ""); 3844 + hist_err(tr, HIST_ERR_ACTION_NOT_FOUND, 0); 3886 3845 ret = -EINVAL; 3887 3846 goto out; 3888 3847 } ··· 3891 3850 char *params = strsep(&str, ")"); 3892 3851 3893 3852 if (!params) { 3894 - hist_err("action parsing: No params found for %s", "save"); 3853 + hist_err(tr, HIST_ERR_NO_SAVE_PARAMS, 0); 3895 3854 ret = -EINVAL; 3896 3855 goto out; 3897 3856 } 3898 3857 3899 - ret = parse_action_params(params, data); 3858 + ret = parse_action_params(tr, params, data); 3900 3859 if (ret) 3901 3860 goto out; 3902 3861 ··· 3905 3864 else if (handler == HANDLER_ONCHANGE) 3906 3865 data->track_data.check_val = check_track_val_changed; 3907 3866 else { 3908 - hist_err("action parsing: Handler doesn't support action: ", action_name); 3867 + hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3909 3868 ret = -EINVAL; 3910 3869 goto out; 3911 3870 } ··· 3917 3876 char *params = strsep(&str, ")"); 3918 3877 3919 3878 if (!str) { 3920 - hist_err("action parsing: No closing paren found: %s", params); 3879 + hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(params)); 3921 3880 ret = -EINVAL; 3922 3881 goto out; 3923 3882 } ··· 3927 3886 else if (handler == HANDLER_ONCHANGE) 3928 3887 data->track_data.check_val = check_track_val_changed; 3929 3888 else { 3930 - hist_err("action parsing: Handler doesn't support action: ", action_name); 3889 + hist_err(tr, HIST_ERR_ACTION_MISMATCH, errpos(action_name)); 3931 3890 ret = -EINVAL; 3932 3891 goto out; 3933 3892 } ··· 3942 3901 data->use_trace_keyword = true; 3943 3902 3944 3903 if (params) { 3945 - ret = parse_action_params(params, data); 3904 + ret = parse_action_params(tr, params, data); 3946 3905 if (ret) 3947 3906 goto out; 3948 3907 } ··· 3995 3954 goto free; 3996 3955 } 3997 3956 3998 - ret = action_parse(str, data, handler); 3957 + ret = action_parse(hist_data->event_file->tr, str, data, handler); 3999 3958 if (ret) 4000 3959 goto free; 4001 3960 out: ··· 4065 4024 struct action_data *data, 4066 4025 char *system, char *event, char *var) 4067 4026 { 4027 + struct trace_array *tr = hist_data->event_file->tr; 4068 4028 struct hist_field *hist_field; 4069 4029 4070 4030 var++; /* skip '$' */ ··· 4081 4039 } 4082 4040 4083 4041 if (!hist_field) 4084 - hist_err_event("trace action: Couldn't find param: $", system, event, var); 4042 + hist_err(tr, HIST_ERR_PARAM_NOT_FOUND, errpos(var)); 4085 4043 4086 4044 return hist_field; 4087 4045 } ··· 4139 4097 static int trace_action_create(struct hist_trigger_data *hist_data, 4140 4098 struct action_data *data) 4141 4099 { 4100 + struct trace_array *tr = hist_data->event_file->tr; 4142 4101 char *event_name, *param, *system = NULL; 4143 4102 struct hist_field *hist_field, *var_ref; 4144 4103 unsigned int i, var_ref_idx; ··· 4157 4114 4158 4115 event = find_synth_event(synth_event_name); 4159 4116 if (!event) { 4160 - hist_err("trace action: Couldn't find synthetic event: ", synth_event_name); 4117 + hist_err(tr, HIST_ERR_SYNTH_EVENT_NOT_FOUND, errpos(synth_event_name)); 4161 4118 return -EINVAL; 4162 4119 } 4163 4120 ··· 4218 4175 continue; 4219 4176 } 4220 4177 4221 - hist_err_event("trace action: Param type doesn't match synthetic event field type: ", 4222 - system, event_name, param); 4178 + hist_err(tr, HIST_ERR_SYNTH_TYPE_MISMATCH, errpos(param)); 4223 4179 kfree(p); 4224 4180 ret = -EINVAL; 4225 4181 goto err; 4226 4182 } 4227 4183 4228 4184 if (field_pos != event->n_fields) { 4229 - hist_err("trace action: Param count doesn't match synthetic event field count: ", event->name); 4185 + hist_err(tr, HIST_ERR_SYNTH_COUNT_MISMATCH, errpos(event->name)); 4230 4186 ret = -EINVAL; 4231 4187 goto err; 4232 4188 } ··· 4244 4202 struct action_data *data) 4245 4203 { 4246 4204 struct trace_event_file *file = hist_data->event_file; 4205 + struct trace_array *tr = file->tr; 4247 4206 struct track_data *track_data; 4248 4207 struct field_var *field_var; 4249 4208 unsigned int i; ··· 4272 4229 if (data->action == ACTION_SAVE) { 4273 4230 if (hist_data->n_save_vars) { 4274 4231 ret = -EEXIST; 4275 - hist_err("save action: Can't have more than one save() action per hist", ""); 4232 + hist_err(tr, HIST_ERR_TOO_MANY_SAVE_ACTIONS, 0); 4276 4233 goto out; 4277 4234 } 4278 4235 ··· 4285 4242 4286 4243 field_var = create_target_field_var(hist_data, NULL, NULL, param); 4287 4244 if (IS_ERR(field_var)) { 4288 - hist_err("save action: Couldn't create field variable: ", param); 4245 + hist_err(tr, HIST_ERR_FIELD_VAR_CREATE_FAIL, 4246 + errpos(param)); 4289 4247 ret = PTR_ERR(field_var); 4290 4248 kfree(param); 4291 4249 goto out; ··· 4320 4276 4321 4277 match_event = strsep(&str, ")"); 4322 4278 if (!match_event || !str) { 4323 - hist_err("onmatch: Missing closing paren: ", match_event); 4279 + hist_err(tr, HIST_ERR_NO_CLOSING_PAREN, errpos(match_event)); 4324 4280 goto free; 4325 4281 } 4326 4282 4327 4283 match_event_system = strsep(&match_event, "."); 4328 4284 if (!match_event) { 4329 - hist_err("onmatch: Missing subsystem for match event: ", match_event_system); 4285 + hist_err(tr, HIST_ERR_SUBSYS_NOT_FOUND, errpos(match_event_system)); 4330 4286 goto free; 4331 4287 } 4332 4288 4333 4289 if (IS_ERR(event_file(tr, match_event_system, match_event))) { 4334 - hist_err_event("onmatch: Invalid subsystem or event name: ", 4335 - match_event_system, match_event, NULL); 4290 + hist_err(tr, HIST_ERR_INVALID_SUBSYS_EVENT, errpos(match_event)); 4336 4291 goto free; 4337 4292 } 4338 4293 ··· 4347 4304 goto free; 4348 4305 } 4349 4306 4350 - ret = action_parse(str, data, HANDLER_ONMATCH); 4307 + ret = action_parse(tr, str, data, HANDLER_ONMATCH); 4351 4308 if (ret) 4352 4309 goto free; 4353 4310 out: ··· 4416 4373 struct trace_event_file *file, 4417 4374 char *var_name, char *expr_str) 4418 4375 { 4376 + struct trace_array *tr = hist_data->event_file->tr; 4419 4377 unsigned long flags = 0; 4420 4378 4421 4379 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX)) 4422 4380 return -EINVAL; 4423 4381 4424 4382 if (find_var(hist_data, file, var_name) && !hist_data->remove) { 4425 - hist_err("Variable already defined: ", var_name); 4383 + hist_err(tr, HIST_ERR_DUPLICATE_VAR, errpos(var_name)); 4426 4384 return -EINVAL; 4427 4385 } 4428 4386 ··· 4480 4436 struct trace_event_file *file, 4481 4437 char *field_str) 4482 4438 { 4439 + struct trace_array *tr = hist_data->event_file->tr; 4483 4440 struct hist_field *hist_field = NULL; 4484 - 4485 4441 unsigned long flags = 0; 4486 4442 unsigned int key_size; 4487 4443 int ret = 0; ··· 4504 4460 } 4505 4461 4506 4462 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) { 4507 - hist_err("Using variable references as keys not supported: ", field_str); 4463 + hist_err(tr, HIST_ERR_INVALID_REF_KEY, errpos(field_str)); 4508 4464 destroy_hist_field(hist_field, 0); 4509 4465 ret = -EINVAL; 4510 4466 goto out; ··· 4605 4561 4606 4562 static int parse_var_defs(struct hist_trigger_data *hist_data) 4607 4563 { 4564 + struct trace_array *tr = hist_data->event_file->tr; 4608 4565 char *s, *str, *var_name, *field_str; 4609 4566 unsigned int i, j, n_vars = 0; 4610 4567 int ret = 0; ··· 4619 4574 4620 4575 var_name = strsep(&field_str, "="); 4621 4576 if (!var_name || !field_str) { 4622 - hist_err("Malformed assignment: ", var_name); 4577 + hist_err(tr, HIST_ERR_MALFORMED_ASSIGNMENT, 4578 + errpos(var_name)); 4623 4579 ret = -EINVAL; 4624 4580 goto free; 4625 4581 } 4626 4582 4627 4583 if (n_vars == TRACING_MAP_VARS_MAX) { 4628 - hist_err("Too many variables defined: ", var_name); 4584 + hist_err(tr, HIST_ERR_TOO_MANY_VARS, errpos(var_name)); 4629 4585 ret = -EINVAL; 4630 4586 goto free; 4631 4587 } ··· 5477 5431 hist_trigger_show(m, data, n++); 5478 5432 } 5479 5433 5480 - if (have_hist_err()) { 5481 - seq_printf(m, "\nERROR: %s\n", hist_err_str); 5482 - seq_printf(m, " Last command: %s\n", last_hist_cmd); 5483 - } 5484 - 5485 5434 out_unlock: 5486 5435 mutex_unlock(&event_mutex); 5487 5436 ··· 5841 5800 { 5842 5801 struct hist_trigger_data *hist_data = data->private_data; 5843 5802 struct event_trigger_data *test, *named_data = NULL; 5803 + struct trace_array *tr = file->tr; 5844 5804 int ret = 0; 5845 5805 5846 5806 if (hist_data->attrs->name) { ··· 5849 5807 if (named_data) { 5850 5808 if (!hist_trigger_match(data, named_data, named_data, 5851 5809 true)) { 5852 - hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data->attrs->name); 5810 + hist_err(tr, HIST_ERR_NAMED_MISMATCH, errpos(hist_data->attrs->name)); 5853 5811 ret = -EINVAL; 5854 5812 goto out; 5855 5813 } ··· 5870 5828 else if (hist_data->attrs->clear) 5871 5829 hist_clear(test); 5872 5830 else { 5873 - hist_err("Hist trigger already exists", NULL); 5831 + hist_err(tr, HIST_ERR_TRIGGER_EEXIST, 0); 5874 5832 ret = -EEXIST; 5875 5833 } 5876 5834 goto out; ··· 5878 5836 } 5879 5837 new: 5880 5838 if (hist_data->attrs->cont || hist_data->attrs->clear) { 5881 - hist_err("Can't clear or continue a nonexistent hist trigger", NULL); 5839 + hist_err(tr, HIST_ERR_TRIGGER_ENOENT_CLEAR, 0); 5882 5840 ret = -ENOENT; 5883 5841 goto out; 5884 5842 } ··· 5903 5861 5904 5862 ret = tracing_set_clock(file->tr, hist_data->attrs->clock); 5905 5863 if (ret) { 5906 - hist_err("Couldn't set trace_clock: ", clock); 5864 + hist_err(tr, HIST_ERR_SET_CLOCK_FAIL, errpos(clock)); 5907 5865 goto out; 5908 5866 } 5909 5867 ··· 6079 6037 lockdep_assert_held(&event_mutex); 6080 6038 6081 6039 if (glob && strlen(glob)) { 6082 - last_cmd_set(param); 6083 6040 hist_err_clear(); 6041 + last_cmd_set(file, param); 6084 6042 } 6085 6043 6086 6044 if (!param) ··· 6121 6079 trigger = strstrip(trigger); 6122 6080 } 6123 6081 6124 - attrs = parse_hist_trigger_attrs(trigger); 6082 + attrs = parse_hist_trigger_attrs(file->tr, trigger); 6125 6083 if (IS_ERR(attrs)) 6126 6084 return PTR_ERR(attrs); 6127 6085
+2 -1
kernel/trace/trace_events_trigger.c
··· 731 731 goto out; 732 732 733 733 /* The filter is for the 'trigger' event, not the triggered event */ 734 - ret = create_event_filter(file->event_call, filter_str, false, &filter); 734 + ret = create_event_filter(file->tr, file->event_call, 735 + filter_str, false, &filter); 735 736 /* 736 737 * If create_event_filter() fails, filter still needs to be freed. 737 738 * Which the calling code will do with data->filter.
+39 -22
kernel/trace/trace_kdb.c
··· 17 17 #include "trace.h" 18 18 #include "trace_output.h" 19 19 20 - static void ftrace_dump_buf(int skip_lines, long cpu_file) 20 + static struct trace_iterator iter; 21 + static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; 22 + 23 + static void ftrace_dump_buf(int skip_entries, long cpu_file) 21 24 { 22 - /* use static because iter can be a bit big for the stack */ 23 - static struct trace_iterator iter; 24 - static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; 25 25 struct trace_array *tr; 26 26 unsigned int old_userobj; 27 27 int cnt = 0, cpu; 28 28 29 - trace_init_global_iter(&iter); 30 - iter.buffer_iter = buffer_iter; 31 29 tr = iter.tr; 32 - 33 - for_each_tracing_cpu(cpu) { 34 - atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 35 - } 36 30 37 31 old_userobj = tr->trace_flags; 38 32 ··· 34 40 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 35 41 36 42 kdb_printf("Dumping ftrace buffer:\n"); 43 + if (skip_entries) 44 + kdb_printf("(skipping %d entries)\n", skip_entries); 37 45 38 46 /* reset all but tr, trace, and overruns */ 39 47 memset(&iter.seq, 0, ··· 66 70 kdb_printf("---------------------------------\n"); 67 71 cnt++; 68 72 69 - if (!skip_lines) { 73 + if (!skip_entries) { 70 74 print_trace_line(&iter); 71 75 trace_printk_seq(&iter.seq); 72 76 } else { 73 - skip_lines--; 77 + skip_entries--; 74 78 } 75 79 76 80 if (KDB_FLAG(CMD_INTERRUPT)) ··· 86 90 tr->trace_flags = old_userobj; 87 91 88 92 for_each_tracing_cpu(cpu) { 89 - atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 90 - } 91 - 92 - for_each_tracing_cpu(cpu) { 93 93 if (iter.buffer_iter[cpu]) { 94 94 ring_buffer_read_finish(iter.buffer_iter[cpu]); 95 95 iter.buffer_iter[cpu] = NULL; ··· 98 106 */ 99 107 static int kdb_ftdump(int argc, const char **argv) 100 108 { 101 - int skip_lines = 0; 109 + int skip_entries = 0; 102 110 long cpu_file; 103 111 char *cp; 112 + int cnt; 113 + int cpu; 104 114 105 115 if (argc > 2) 106 116 return KDB_ARGCOUNT; 107 117 108 118 if (argc) { 109 - skip_lines = simple_strtol(argv[1], &cp, 0); 119 + skip_entries = simple_strtol(argv[1], &cp, 0); 110 120 if (*cp) 111 - skip_lines = 0; 121 + skip_entries = 0; 112 122 } 113 123 114 124 if (argc == 2) { ··· 123 129 } 124 130 125 131 kdb_trap_printk++; 126 - ftrace_dump_buf(skip_lines, cpu_file); 132 + 133 + trace_init_global_iter(&iter); 134 + iter.buffer_iter = buffer_iter; 135 + 136 + for_each_tracing_cpu(cpu) { 137 + atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 138 + } 139 + 140 + /* A negative skip_entries means skip all but the last entries */ 141 + if (skip_entries < 0) { 142 + if (cpu_file == RING_BUFFER_ALL_CPUS) 143 + cnt = trace_total_entries(NULL); 144 + else 145 + cnt = trace_total_entries_cpu(NULL, cpu_file); 146 + skip_entries = max(cnt + skip_entries, 0); 147 + } 148 + 149 + ftrace_dump_buf(skip_entries, cpu_file); 150 + 151 + for_each_tracing_cpu(cpu) { 152 + atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 153 + } 154 + 127 155 kdb_trap_printk--; 128 156 129 157 return 0; ··· 153 137 154 138 static __init int kdb_ftrace_register(void) 155 139 { 156 - kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", 157 - "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); 140 + kdb_register_flags("ftdump", kdb_ftdump, "[skip_#entries] [cpu]", 141 + "Dump ftrace log; -skip dumps last #entries", 0, 142 + KDB_ENABLE_ALWAYS_SAFE); 158 143 return 0; 159 144 } 160 145
+46 -31
kernel/trace/trace_kprobe.c
··· 441 441 else 442 442 ret = register_kprobe(&tk->rp.kp); 443 443 444 - if (ret == 0) { 444 + if (ret == 0) 445 445 tk->tp.flags |= TP_FLAG_REGISTERED; 446 - } else if (ret == -EILSEQ) { 447 - pr_warn("Probing address(0x%p) is not an instruction boundary.\n", 448 - tk->rp.kp.addr); 449 - ret = -EINVAL; 450 - } 451 446 return ret; 452 447 } 453 448 ··· 586 591 * Type of args: 587 592 * FETCHARG:TYPE : use TYPE instead of unsigned long. 588 593 */ 589 - struct trace_kprobe *tk; 594 + struct trace_kprobe *tk = NULL; 590 595 int i, len, ret = 0; 591 596 bool is_return = false; 592 597 char *symbol = NULL, *tmp = NULL; ··· 610 615 if (argc < 2) 611 616 return -ECANCELED; 612 617 618 + trace_probe_log_init("trace_kprobe", argc, argv); 619 + 613 620 event = strchr(&argv[0][1], ':'); 614 621 if (event) 615 622 event++; 616 623 617 624 if (isdigit(argv[0][1])) { 618 625 if (!is_return) { 619 - pr_info("Maxactive is not for kprobe"); 620 - return -EINVAL; 626 + trace_probe_log_err(1, MAXACT_NO_KPROBE); 627 + goto parse_error; 621 628 } 622 629 if (event) 623 630 len = event - &argv[0][1] - 1; 624 631 else 625 632 len = strlen(&argv[0][1]); 626 - if (len > MAX_EVENT_NAME_LEN - 1) 627 - return -E2BIG; 633 + if (len > MAX_EVENT_NAME_LEN - 1) { 634 + trace_probe_log_err(1, BAD_MAXACT); 635 + goto parse_error; 636 + } 628 637 memcpy(buf, &argv[0][1], len); 629 638 buf[len] = '\0'; 630 639 ret = kstrtouint(buf, 0, &maxactive); 631 640 if (ret || !maxactive) { 632 - pr_info("Invalid maxactive number\n"); 633 - return ret; 641 + trace_probe_log_err(1, BAD_MAXACT); 642 + goto parse_error; 634 643 } 635 644 /* kretprobes instances are iterated over via a list. The 636 645 * maximum should stay reasonable. 637 646 */ 638 647 if (maxactive > KRETPROBE_MAXACTIVE_MAX) { 639 - pr_info("Maxactive is too big (%d > %d).\n", 640 - maxactive, KRETPROBE_MAXACTIVE_MAX); 641 - return -E2BIG; 648 + trace_probe_log_err(1, MAXACT_TOO_BIG); 649 + goto parse_error; 642 650 } 643 651 } 644 652 645 653 /* try to parse an address. if that fails, try to read the 646 654 * input as a symbol. */ 647 655 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 656 + trace_probe_log_set_index(1); 648 657 /* Check whether uprobe event specified */ 649 - if (strchr(argv[1], '/') && strchr(argv[1], ':')) 650 - return -ECANCELED; 658 + if (strchr(argv[1], '/') && strchr(argv[1], ':')) { 659 + ret = -ECANCELED; 660 + goto error; 661 + } 651 662 /* a symbol specified */ 652 663 symbol = kstrdup(argv[1], GFP_KERNEL); 653 664 if (!symbol) ··· 661 660 /* TODO: support .init module functions */ 662 661 ret = traceprobe_split_symbol_offset(symbol, &offset); 663 662 if (ret || offset < 0 || offset > UINT_MAX) { 664 - pr_info("Failed to parse either an address or a symbol.\n"); 665 - goto out; 663 + trace_probe_log_err(0, BAD_PROBE_ADDR); 664 + goto parse_error; 666 665 } 667 666 if (kprobe_on_func_entry(NULL, symbol, offset)) 668 667 flags |= TPARG_FL_FENTRY; 669 668 if (offset && is_return && !(flags & TPARG_FL_FENTRY)) { 670 - pr_info("Given offset is not valid for return probe.\n"); 671 - ret = -EINVAL; 672 - goto out; 669 + trace_probe_log_err(0, BAD_RETPROBE); 670 + goto parse_error; 673 671 } 674 672 } 675 - argc -= 2; argv += 2; 676 673 674 + trace_probe_log_set_index(0); 677 675 if (event) { 678 - ret = traceprobe_parse_event_name(&event, &group, buf); 676 + ret = traceprobe_parse_event_name(&event, &group, buf, 677 + event - argv[0]); 679 678 if (ret) 680 - goto out; 679 + goto parse_error; 681 680 } else { 682 681 /* Make a new event name */ 683 682 if (symbol) ··· 692 691 693 692 /* setup a probe */ 694 693 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive, 695 - argc, is_return); 694 + argc - 2, is_return); 696 695 if (IS_ERR(tk)) { 697 696 ret = PTR_ERR(tk); 698 - /* This must return -ENOMEM otherwise there is a bug */ 697 + /* This must return -ENOMEM, else there is a bug */ 699 698 WARN_ON_ONCE(ret != -ENOMEM); 700 - goto out; 699 + goto out; /* We know tk is not allocated */ 701 700 } 701 + argc -= 2; argv += 2; 702 702 703 703 /* parse arguments */ 704 704 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { ··· 709 707 goto error; 710 708 } 711 709 710 + trace_probe_log_set_index(i + 2); 712 711 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags); 713 712 kfree(tmp); 714 713 if (ret) 715 - goto error; 714 + goto error; /* This can be -ENOMEM */ 716 715 } 717 716 718 717 ret = register_trace_kprobe(tk); 719 - if (ret) 718 + if (ret) { 719 + trace_probe_log_set_index(1); 720 + if (ret == -EILSEQ) 721 + trace_probe_log_err(0, BAD_INSN_BNDRY); 722 + else if (ret == -ENOENT) 723 + trace_probe_log_err(0, BAD_PROBE_ADDR); 724 + else if (ret != -ENOMEM) 725 + trace_probe_log_err(0, FAIL_REG_PROBE); 720 726 goto error; 727 + } 728 + 721 729 out: 730 + trace_probe_log_clear(); 722 731 kfree(symbol); 723 732 return ret; 724 733 734 + parse_error: 735 + ret = -EINVAL; 725 736 error: 726 737 free_trace_kprobe(tk); 727 738 goto out;
+205 -86
kernel/trace/trace_probe.c
··· 13 13 14 14 #include "trace_probe.h" 15 15 16 + #undef C 17 + #define C(a, b) b 18 + 19 + static const char *trace_probe_err_text[] = { ERRORS }; 20 + 16 21 static const char *reserved_field_names[] = { 17 22 "common_type", 18 23 "common_flags", ··· 138 133 return NULL; 139 134 } 140 135 136 + static struct trace_probe_log trace_probe_log; 137 + 138 + void trace_probe_log_init(const char *subsystem, int argc, const char **argv) 139 + { 140 + trace_probe_log.subsystem = subsystem; 141 + trace_probe_log.argc = argc; 142 + trace_probe_log.argv = argv; 143 + trace_probe_log.index = 0; 144 + } 145 + 146 + void trace_probe_log_clear(void) 147 + { 148 + memset(&trace_probe_log, 0, sizeof(trace_probe_log)); 149 + } 150 + 151 + void trace_probe_log_set_index(int index) 152 + { 153 + trace_probe_log.index = index; 154 + } 155 + 156 + void __trace_probe_log_err(int offset, int err_type) 157 + { 158 + char *command, *p; 159 + int i, len = 0, pos = 0; 160 + 161 + if (!trace_probe_log.argv) 162 + return; 163 + 164 + /* Recalcurate the length and allocate buffer */ 165 + for (i = 0; i < trace_probe_log.argc; i++) { 166 + if (i == trace_probe_log.index) 167 + pos = len; 168 + len += strlen(trace_probe_log.argv[i]) + 1; 169 + } 170 + command = kzalloc(len, GFP_KERNEL); 171 + if (!command) 172 + return; 173 + 174 + /* And make a command string from argv array */ 175 + p = command; 176 + for (i = 0; i < trace_probe_log.argc; i++) { 177 + len = strlen(trace_probe_log.argv[i]); 178 + strcpy(p, trace_probe_log.argv[i]); 179 + p[len] = ' '; 180 + p += len + 1; 181 + } 182 + *(p - 1) = '\0'; 183 + 184 + tracing_log_err(NULL, trace_probe_log.subsystem, command, 185 + trace_probe_err_text, err_type, pos + offset); 186 + 187 + kfree(command); 188 + } 189 + 141 190 /* Split symbol and offset. */ 142 191 int traceprobe_split_symbol_offset(char *symbol, long *offset) 143 192 { ··· 215 156 216 157 /* @buf must has MAX_EVENT_NAME_LEN size */ 217 158 int traceprobe_parse_event_name(const char **pevent, const char **pgroup, 218 - char *buf) 159 + char *buf, int offset) 219 160 { 220 161 const char *slash, *event = *pevent; 221 162 int len; ··· 223 164 slash = strchr(event, '/'); 224 165 if (slash) { 225 166 if (slash == event) { 226 - pr_info("Group name is not specified\n"); 167 + trace_probe_log_err(offset, NO_GROUP_NAME); 227 168 return -EINVAL; 228 169 } 229 170 if (slash - event + 1 > MAX_EVENT_NAME_LEN) { 230 - pr_info("Group name is too long\n"); 231 - return -E2BIG; 171 + trace_probe_log_err(offset, GROUP_TOO_LONG); 172 + return -EINVAL; 232 173 } 233 174 strlcpy(buf, event, slash - event + 1); 234 175 if (!is_good_name(buf)) { 235 - pr_info("Group name must follow the same rules as C identifiers\n"); 176 + trace_probe_log_err(offset, BAD_GROUP_NAME); 236 177 return -EINVAL; 237 178 } 238 179 *pgroup = buf; 239 180 *pevent = slash + 1; 181 + offset += slash - event + 1; 240 182 event = *pevent; 241 183 } 242 184 len = strlen(event); 243 185 if (len == 0) { 244 - pr_info("Event name is not specified\n"); 186 + trace_probe_log_err(offset, NO_EVENT_NAME); 245 187 return -EINVAL; 246 188 } else if (len > MAX_EVENT_NAME_LEN) { 247 - pr_info("Event name is too long\n"); 248 - return -E2BIG; 189 + trace_probe_log_err(offset, EVENT_TOO_LONG); 190 + return -EINVAL; 249 191 } 250 192 if (!is_good_name(event)) { 251 - pr_info("Event name must follow the same rules as C identifiers\n"); 193 + trace_probe_log_err(offset, BAD_EVENT_NAME); 252 194 return -EINVAL; 253 195 } 254 196 return 0; ··· 258 198 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) 259 199 260 200 static int parse_probe_vars(char *arg, const struct fetch_type *t, 261 - struct fetch_insn *code, unsigned int flags) 201 + struct fetch_insn *code, unsigned int flags, int offs) 262 202 { 263 203 unsigned long param; 264 204 int ret = 0; 265 205 int len; 266 206 267 207 if (strcmp(arg, "retval") == 0) { 268 - if (flags & TPARG_FL_RETURN) 208 + if (flags & TPARG_FL_RETURN) { 269 209 code->op = FETCH_OP_RETVAL; 270 - else 210 + } else { 211 + trace_probe_log_err(offs, RETVAL_ON_PROBE); 271 212 ret = -EINVAL; 213 + } 272 214 } else if ((len = str_has_prefix(arg, "stack"))) { 273 215 if (arg[len] == '\0') { 274 216 code->op = FETCH_OP_STACKP; 275 217 } else if (isdigit(arg[len])) { 276 218 ret = kstrtoul(arg + len, 10, &param); 277 - if (ret || ((flags & TPARG_FL_KERNEL) && 278 - param > PARAM_MAX_STACK)) 219 + if (ret) { 220 + goto inval_var; 221 + } else if ((flags & TPARG_FL_KERNEL) && 222 + param > PARAM_MAX_STACK) { 223 + trace_probe_log_err(offs, BAD_STACK_NUM); 279 224 ret = -EINVAL; 280 - else { 225 + } else { 281 226 code->op = FETCH_OP_STACK; 282 227 code->param = (unsigned int)param; 283 228 } 284 229 } else 285 - ret = -EINVAL; 230 + goto inval_var; 286 231 } else if (strcmp(arg, "comm") == 0) { 287 232 code->op = FETCH_OP_COMM; 288 233 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 289 234 } else if (((flags & TPARG_FL_MASK) == 290 235 (TPARG_FL_KERNEL | TPARG_FL_FENTRY)) && 291 236 (len = str_has_prefix(arg, "arg"))) { 292 - if (!isdigit(arg[len])) 293 - return -EINVAL; 294 237 ret = kstrtoul(arg + len, 10, &param); 295 - if (ret || !param || param > PARAM_MAX_STACK) 238 + if (ret) { 239 + goto inval_var; 240 + } else if (!param || param > PARAM_MAX_STACK) { 241 + trace_probe_log_err(offs, BAD_ARG_NUM); 296 242 return -EINVAL; 243 + } 297 244 code->op = FETCH_OP_ARG; 298 245 code->param = (unsigned int)param - 1; 299 246 #endif 300 247 } else 301 - ret = -EINVAL; 248 + goto inval_var; 302 249 303 250 return ret; 251 + 252 + inval_var: 253 + trace_probe_log_err(offs, BAD_VAR); 254 + return -EINVAL; 304 255 } 305 256 306 257 /* Recursive argument parser */ 307 258 static int 308 259 parse_probe_arg(char *arg, const struct fetch_type *type, 309 260 struct fetch_insn **pcode, struct fetch_insn *end, 310 - unsigned int flags) 261 + unsigned int flags, int offs) 311 262 { 312 263 struct fetch_insn *code = *pcode; 313 264 unsigned long param; ··· 328 257 329 258 switch (arg[0]) { 330 259 case '$': 331 - ret = parse_probe_vars(arg + 1, type, code, flags); 260 + ret = parse_probe_vars(arg + 1, type, code, flags, offs); 332 261 break; 333 262 334 263 case '%': /* named register */ ··· 337 266 code->op = FETCH_OP_REG; 338 267 code->param = (unsigned int)ret; 339 268 ret = 0; 340 - } 269 + } else 270 + trace_probe_log_err(offs, BAD_REG_NAME); 341 271 break; 342 272 343 273 case '@': /* memory, file-offset or symbol */ 344 274 if (isdigit(arg[1])) { 345 275 ret = kstrtoul(arg + 1, 0, &param); 346 - if (ret) 276 + if (ret) { 277 + trace_probe_log_err(offs, BAD_MEM_ADDR); 347 278 break; 279 + } 348 280 /* load address */ 349 281 code->op = FETCH_OP_IMM; 350 282 code->immediate = param; 351 283 } else if (arg[1] == '+') { 352 284 /* kprobes don't support file offsets */ 353 - if (flags & TPARG_FL_KERNEL) 285 + if (flags & TPARG_FL_KERNEL) { 286 + trace_probe_log_err(offs, FILE_ON_KPROBE); 354 287 return -EINVAL; 355 - 288 + } 356 289 ret = kstrtol(arg + 2, 0, &offset); 357 - if (ret) 290 + if (ret) { 291 + trace_probe_log_err(offs, BAD_FILE_OFFS); 358 292 break; 293 + } 359 294 360 295 code->op = FETCH_OP_FOFFS; 361 296 code->immediate = (unsigned long)offset; // imm64? 362 297 } else { 363 298 /* uprobes don't support symbols */ 364 - if (!(flags & TPARG_FL_KERNEL)) 299 + if (!(flags & TPARG_FL_KERNEL)) { 300 + trace_probe_log_err(offs, SYM_ON_UPROBE); 365 301 return -EINVAL; 366 - 302 + } 367 303 /* Preserve symbol for updating */ 368 304 code->op = FETCH_NOP_SYMBOL; 369 305 code->data = kstrdup(arg + 1, GFP_KERNEL); 370 306 if (!code->data) 371 307 return -ENOMEM; 372 - if (++code == end) 373 - return -E2BIG; 374 - 308 + if (++code == end) { 309 + trace_probe_log_err(offs, TOO_MANY_OPS); 310 + return -EINVAL; 311 + } 375 312 code->op = FETCH_OP_IMM; 376 313 code->immediate = 0; 377 314 } 378 315 /* These are fetching from memory */ 379 - if (++code == end) 380 - return -E2BIG; 316 + if (++code == end) { 317 + trace_probe_log_err(offs, TOO_MANY_OPS); 318 + return -EINVAL; 319 + } 381 320 *pcode = code; 382 321 code->op = FETCH_OP_DEREF; 383 322 code->offset = offset; ··· 398 317 /* fall through */ 399 318 case '-': 400 319 tmp = strchr(arg, '('); 401 - if (!tmp) 320 + if (!tmp) { 321 + trace_probe_log_err(offs, DEREF_NEED_BRACE); 402 322 return -EINVAL; 403 - 323 + } 404 324 *tmp = '\0'; 405 325 ret = kstrtol(arg, 0, &offset); 406 - if (ret) 326 + if (ret) { 327 + trace_probe_log_err(offs, BAD_DEREF_OFFS); 407 328 break; 408 - 329 + } 330 + offs += (tmp + 1 - arg) + (arg[0] != '-' ? 1 : 0); 409 331 arg = tmp + 1; 410 332 tmp = strrchr(arg, ')'); 411 - 412 - if (tmp) { 333 + if (!tmp) { 334 + trace_probe_log_err(offs + strlen(arg), 335 + DEREF_OPEN_BRACE); 336 + return -EINVAL; 337 + } else { 413 338 const struct fetch_type *t2 = find_fetch_type(NULL); 414 339 415 340 *tmp = '\0'; 416 - ret = parse_probe_arg(arg, t2, &code, end, flags); 341 + ret = parse_probe_arg(arg, t2, &code, end, flags, offs); 417 342 if (ret) 418 343 break; 419 - if (code->op == FETCH_OP_COMM) 344 + if (code->op == FETCH_OP_COMM) { 345 + trace_probe_log_err(offs, COMM_CANT_DEREF); 420 346 return -EINVAL; 421 - if (++code == end) 422 - return -E2BIG; 347 + } 348 + if (++code == end) { 349 + trace_probe_log_err(offs, TOO_MANY_OPS); 350 + return -EINVAL; 351 + } 423 352 *pcode = code; 424 353 425 354 code->op = FETCH_OP_DEREF; ··· 439 348 } 440 349 if (!ret && code->op == FETCH_OP_NOP) { 441 350 /* Parsed, but do not find fetch method */ 351 + trace_probe_log_err(offs, BAD_FETCH_ARG); 442 352 ret = -EINVAL; 443 353 } 444 354 return ret; ··· 471 379 return -EINVAL; 472 380 code++; 473 381 if (code->op != FETCH_OP_NOP) 474 - return -E2BIG; 382 + return -EINVAL; 475 383 *pcode = code; 476 384 477 385 code->op = FETCH_OP_MOD_BF; ··· 484 392 485 393 /* String length checking wrapper */ 486 394 static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size, 487 - struct probe_arg *parg, unsigned int flags) 395 + struct probe_arg *parg, unsigned int flags, int offset) 488 396 { 489 397 struct fetch_insn *code, *scode, *tmp = NULL; 490 - char *t, *t2; 398 + char *t, *t2, *t3; 491 399 int ret, len; 492 400 493 - if (strlen(arg) > MAX_ARGSTR_LEN) { 494 - pr_info("Argument is too long.: %s\n", arg); 495 - return -ENOSPC; 401 + len = strlen(arg); 402 + if (len > MAX_ARGSTR_LEN) { 403 + trace_probe_log_err(offset, ARG_TOO_LONG); 404 + return -EINVAL; 405 + } else if (len == 0) { 406 + trace_probe_log_err(offset, NO_ARG_BODY); 407 + return -EINVAL; 496 408 } 409 + 497 410 parg->comm = kstrdup(arg, GFP_KERNEL); 498 - if (!parg->comm) { 499 - pr_info("Failed to allocate memory for command '%s'.\n", arg); 411 + if (!parg->comm) 500 412 return -ENOMEM; 501 - } 413 + 502 414 t = strchr(arg, ':'); 503 415 if (t) { 504 416 *t = '\0'; 505 417 t2 = strchr(++t, '['); 506 418 if (t2) { 507 - *t2 = '\0'; 508 - parg->count = simple_strtoul(t2 + 1, &t2, 0); 509 - if (strcmp(t2, "]") || parg->count == 0) 419 + *t2++ = '\0'; 420 + t3 = strchr(t2, ']'); 421 + if (!t3) { 422 + offset += t2 + strlen(t2) - arg; 423 + trace_probe_log_err(offset, 424 + ARRAY_NO_CLOSE); 510 425 return -EINVAL; 511 - if (parg->count > MAX_ARRAY_LEN) 512 - return -E2BIG; 426 + } else if (t3[1] != '\0') { 427 + trace_probe_log_err(offset + t3 + 1 - arg, 428 + BAD_ARRAY_SUFFIX); 429 + return -EINVAL; 430 + } 431 + *t3 = '\0'; 432 + if (kstrtouint(t2, 0, &parg->count) || !parg->count) { 433 + trace_probe_log_err(offset + t2 - arg, 434 + BAD_ARRAY_NUM); 435 + return -EINVAL; 436 + } 437 + if (parg->count > MAX_ARRAY_LEN) { 438 + trace_probe_log_err(offset + t2 - arg, 439 + ARRAY_TOO_BIG); 440 + return -EINVAL; 441 + } 513 442 } 514 443 } 515 - /* 516 - * The default type of $comm should be "string", and it can't be 517 - * dereferenced. 518 - */ 519 - if (!t && strcmp(arg, "$comm") == 0) 444 + 445 + /* Since $comm can not be dereferred, we can find $comm by strcmp */ 446 + if (strcmp(arg, "$comm") == 0) { 447 + /* The type of $comm must be "string", and not an array. */ 448 + if (parg->count || (t && strcmp(t, "string"))) 449 + return -EINVAL; 520 450 parg->type = find_fetch_type("string"); 521 - else 451 + } else 522 452 parg->type = find_fetch_type(t); 523 453 if (!parg->type) { 524 - pr_info("Unsupported type: %s\n", t); 454 + trace_probe_log_err(offset + (t ? (t - arg) : 0), BAD_TYPE); 525 455 return -EINVAL; 526 456 } 527 457 parg->offset = *size; ··· 558 444 parg->count); 559 445 } 560 446 561 - code = tmp = kzalloc(sizeof(*code) * FETCH_INSN_MAX, GFP_KERNEL); 447 + code = tmp = kcalloc(FETCH_INSN_MAX, sizeof(*code), GFP_KERNEL); 562 448 if (!code) 563 449 return -ENOMEM; 564 450 code[FETCH_INSN_MAX - 1].op = FETCH_OP_END; 565 451 566 452 ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1], 567 - flags); 453 + flags, offset); 568 454 if (ret) 569 455 goto fail; 570 456 ··· 572 458 if (!strcmp(parg->type->name, "string")) { 573 459 if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_IMM && 574 460 code->op != FETCH_OP_COMM) { 575 - pr_info("string only accepts memory or address.\n"); 461 + trace_probe_log_err(offset + (t ? (t - arg) : 0), 462 + BAD_STRING); 576 463 ret = -EINVAL; 577 464 goto fail; 578 465 } ··· 585 470 */ 586 471 code++; 587 472 if (code->op != FETCH_OP_NOP) { 588 - ret = -E2BIG; 473 + trace_probe_log_err(offset, TOO_MANY_OPS); 474 + ret = -EINVAL; 589 475 goto fail; 590 476 } 591 477 } ··· 599 483 } else { 600 484 code++; 601 485 if (code->op != FETCH_OP_NOP) { 602 - ret = -E2BIG; 486 + trace_probe_log_err(offset, TOO_MANY_OPS); 487 + ret = -EINVAL; 603 488 goto fail; 604 489 } 605 490 code->op = FETCH_OP_ST_RAW; ··· 610 493 /* Modify operation */ 611 494 if (t != NULL) { 612 495 ret = __parse_bitfield_probe_arg(t, parg->type, &code); 613 - if (ret) 496 + if (ret) { 497 + trace_probe_log_err(offset + t - arg, BAD_BITFIELD); 614 498 goto fail; 499 + } 615 500 } 616 501 /* Loop(Array) operation */ 617 502 if (parg->count) { 618 503 if (scode->op != FETCH_OP_ST_MEM && 619 504 scode->op != FETCH_OP_ST_STRING) { 620 - pr_info("array only accepts memory or address\n"); 505 + trace_probe_log_err(offset + (t ? (t - arg) : 0), 506 + BAD_STRING); 621 507 ret = -EINVAL; 622 508 goto fail; 623 509 } 624 510 code++; 625 511 if (code->op != FETCH_OP_NOP) { 626 - ret = -E2BIG; 512 + trace_probe_log_err(offset, TOO_MANY_OPS); 513 + ret = -EINVAL; 627 514 goto fail; 628 515 } 629 516 code->op = FETCH_OP_LP_ARRAY; ··· 637 516 code->op = FETCH_OP_END; 638 517 639 518 /* Shrink down the code buffer */ 640 - parg->code = kzalloc(sizeof(*code) * (code - tmp + 1), GFP_KERNEL); 519 + parg->code = kcalloc(code - tmp + 1, sizeof(*code), GFP_KERNEL); 641 520 if (!parg->code) 642 521 ret = -ENOMEM; 643 522 else ··· 676 555 { 677 556 struct probe_arg *parg = &tp->args[i]; 678 557 char *body; 679 - int ret; 680 558 681 559 /* Increment count for freeing args in error case */ 682 560 tp->nr_args++; 683 561 684 562 body = strchr(arg, '='); 685 563 if (body) { 686 - if (body - arg > MAX_ARG_NAME_LEN || body == arg) 564 + if (body - arg > MAX_ARG_NAME_LEN) { 565 + trace_probe_log_err(0, ARG_NAME_TOO_LONG); 687 566 return -EINVAL; 567 + } else if (body == arg) { 568 + trace_probe_log_err(0, NO_ARG_NAME); 569 + return -EINVAL; 570 + } 688 571 parg->name = kmemdup_nul(arg, body - arg, GFP_KERNEL); 689 572 body++; 690 573 } else { ··· 700 575 return -ENOMEM; 701 576 702 577 if (!is_good_name(parg->name)) { 703 - pr_info("Invalid argument[%d] name: %s\n", 704 - i, parg->name); 578 + trace_probe_log_err(0, BAD_ARG_NAME); 705 579 return -EINVAL; 706 580 } 707 - 708 581 if (traceprobe_conflict_field_name(parg->name, tp->args, i)) { 709 - pr_info("Argument[%d]: '%s' conflicts with another field.\n", 710 - i, parg->name); 582 + trace_probe_log_err(0, USED_ARG_NAME); 711 583 return -EINVAL; 712 584 } 713 - 714 585 /* Parse fetch argument */ 715 - ret = traceprobe_parse_probe_arg_body(body, &tp->size, parg, flags); 716 - if (ret) 717 - pr_info("Parse error at argument[%d]. (%d)\n", i, ret); 718 - return ret; 586 + return traceprobe_parse_probe_arg_body(body, &tp->size, parg, flags, 587 + body - arg); 719 588 } 720 589 721 590 void traceprobe_free_probe_arg(struct probe_arg *arg)
+76 -2
kernel/trace/trace_probe.h
··· 124 124 125 125 /* fetch + deref*N + store + mod + end <= 16, this allows N=12, enough */ 126 126 #define FETCH_INSN_MAX 16 127 + #define FETCH_TOKEN_COMM (-ECOMM) 127 128 128 129 /* Fetch type information table */ 129 130 struct fetch_type { ··· 281 280 extern void traceprobe_free_probe_arg(struct probe_arg *arg); 282 281 283 282 extern int traceprobe_split_symbol_offset(char *symbol, long *offset); 284 - extern int traceprobe_parse_event_name(const char **pevent, 285 - const char **pgroup, char *buf); 283 + int traceprobe_parse_event_name(const char **pevent, const char **pgroup, 284 + char *buf, int offset); 286 285 287 286 extern int traceprobe_set_print_fmt(struct trace_probe *tp, bool is_return); 288 287 ··· 299 298 #endif 300 299 extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, 301 300 size_t offset, struct trace_probe *tp); 301 + 302 + #undef ERRORS 303 + #define ERRORS \ 304 + C(FILE_NOT_FOUND, "Failed to find the given file"), \ 305 + C(NO_REGULAR_FILE, "Not a regular file"), \ 306 + C(BAD_REFCNT, "Invalid reference counter offset"), \ 307 + C(REFCNT_OPEN_BRACE, "Reference counter brace is not closed"), \ 308 + C(BAD_REFCNT_SUFFIX, "Reference counter has wrong suffix"), \ 309 + C(BAD_UPROBE_OFFS, "Invalid uprobe offset"), \ 310 + C(MAXACT_NO_KPROBE, "Maxactive is not for kprobe"), \ 311 + C(BAD_MAXACT, "Invalid maxactive number"), \ 312 + C(MAXACT_TOO_BIG, "Maxactive is too big"), \ 313 + C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \ 314 + C(BAD_RETPROBE, "Retprobe address must be an function entry"), \ 315 + C(NO_GROUP_NAME, "Group name is not specified"), \ 316 + C(GROUP_TOO_LONG, "Group name is too long"), \ 317 + C(BAD_GROUP_NAME, "Group name must follow the same rules as C identifiers"), \ 318 + C(NO_EVENT_NAME, "Event name is not specified"), \ 319 + C(EVENT_TOO_LONG, "Event name is too long"), \ 320 + C(BAD_EVENT_NAME, "Event name must follow the same rules as C identifiers"), \ 321 + C(RETVAL_ON_PROBE, "$retval is not available on probe"), \ 322 + C(BAD_STACK_NUM, "Invalid stack number"), \ 323 + C(BAD_ARG_NUM, "Invalid argument number"), \ 324 + C(BAD_VAR, "Invalid $-valiable specified"), \ 325 + C(BAD_REG_NAME, "Invalid register name"), \ 326 + C(BAD_MEM_ADDR, "Invalid memory address"), \ 327 + C(FILE_ON_KPROBE, "File offset is not available with kprobe"), \ 328 + C(BAD_FILE_OFFS, "Invalid file offset value"), \ 329 + C(SYM_ON_UPROBE, "Symbol is not available with uprobe"), \ 330 + C(TOO_MANY_OPS, "Dereference is too much nested"), \ 331 + C(DEREF_NEED_BRACE, "Dereference needs a brace"), \ 332 + C(BAD_DEREF_OFFS, "Invalid dereference offset"), \ 333 + C(DEREF_OPEN_BRACE, "Dereference brace is not closed"), \ 334 + C(COMM_CANT_DEREF, "$comm can not be dereferenced"), \ 335 + C(BAD_FETCH_ARG, "Invalid fetch argument"), \ 336 + C(ARRAY_NO_CLOSE, "Array is not closed"), \ 337 + C(BAD_ARRAY_SUFFIX, "Array has wrong suffix"), \ 338 + C(BAD_ARRAY_NUM, "Invalid array size"), \ 339 + C(ARRAY_TOO_BIG, "Array number is too big"), \ 340 + C(BAD_TYPE, "Unknown type is specified"), \ 341 + C(BAD_STRING, "String accepts only memory argument"), \ 342 + C(BAD_BITFIELD, "Invalid bitfield"), \ 343 + C(ARG_NAME_TOO_LONG, "Argument name is too long"), \ 344 + C(NO_ARG_NAME, "Argument name is not specified"), \ 345 + C(BAD_ARG_NAME, "Argument name must follow the same rules as C identifiers"), \ 346 + C(USED_ARG_NAME, "This argument name is already used"), \ 347 + C(ARG_TOO_LONG, "Argument expression is too long"), \ 348 + C(NO_ARG_BODY, "No argument expression"), \ 349 + C(BAD_INSN_BNDRY, "Probe point is not an instruction boundary"),\ 350 + C(FAIL_REG_PROBE, "Failed to register probe event"), 351 + 352 + #undef C 353 + #define C(a, b) TP_ERR_##a 354 + 355 + /* Define TP_ERR_ */ 356 + enum { ERRORS }; 357 + 358 + /* Error text is defined in trace_probe.c */ 359 + 360 + struct trace_probe_log { 361 + const char *subsystem; 362 + const char **argv; 363 + int argc; 364 + int index; 365 + }; 366 + 367 + void trace_probe_log_init(const char *subsystem, int argc, const char **argv); 368 + void trace_probe_log_set_index(int index); 369 + void trace_probe_log_clear(void); 370 + void __trace_probe_log_err(int offset, int err); 371 + 372 + #define trace_probe_log_err(offs, err) \ 373 + __trace_probe_log_err(offs, TP_ERR_##err)
+1 -1
kernel/trace/trace_probe_tmpl.h
··· 88 88 /* 3rd stage: store value to buffer */ 89 89 if (unlikely(!dest)) { 90 90 if (code->op == FETCH_OP_ST_STRING) { 91 - ret += fetch_store_strlen(val + code->offset); 91 + ret = fetch_store_strlen(val + code->offset); 92 92 code++; 93 93 goto array; 94 94 } else
+4 -1
kernel/trace/trace_selftest.c
··· 792 792 /* check the trace buffer */ 793 793 ret = trace_test_buffer(&tr->trace_buffer, &count); 794 794 795 - trace->reset(tr); 795 + /* Need to also simulate the tr->reset to remove this fgraph_ops */ 796 + tracing_stop_cmdline_record(); 797 + unregister_ftrace_graph(&fgraph_ops); 798 + 796 799 tracing_start(); 797 800 798 801 if (!ret && !count) {
+42 -15
kernel/trace/trace_uprobe.c
··· 156 156 if (unlikely(!maxlen)) 157 157 return -ENOMEM; 158 158 159 - ret = strncpy_from_user(dst, src, maxlen); 159 + if (addr == FETCH_TOKEN_COMM) 160 + ret = strlcpy(dst, current->comm, maxlen); 161 + else 162 + ret = strncpy_from_user(dst, src, maxlen); 160 163 if (ret >= 0) { 161 164 if (ret == maxlen) 162 165 dst[ret - 1] = '\0'; ··· 183 180 int len; 184 181 void __user *vaddr = (void __force __user *) addr; 185 182 186 - len = strnlen_user(vaddr, MAX_STRING_SIZE); 183 + if (addr == FETCH_TOKEN_COMM) 184 + len = strlen(current->comm) + 1; 185 + else 186 + len = strnlen_user(vaddr, MAX_STRING_SIZE); 187 187 188 188 return (len > MAX_STRING_SIZE) ? 0 : len; 189 189 } ··· 225 219 break; 226 220 case FETCH_OP_IMM: 227 221 val = code->immediate; 222 + break; 223 + case FETCH_OP_COMM: 224 + val = FETCH_TOKEN_COMM; 228 225 break; 229 226 case FETCH_OP_FOFFS: 230 227 val = translate_user_vaddr(code->immediate); ··· 466 457 return -ECANCELED; 467 458 } 468 459 460 + trace_probe_log_init("trace_uprobe", argc, argv); 461 + trace_probe_log_set_index(1); /* filename is the 2nd argument */ 462 + 469 463 *arg++ = '\0'; 470 464 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 471 465 if (ret) { 466 + trace_probe_log_err(0, FILE_NOT_FOUND); 472 467 kfree(filename); 468 + trace_probe_log_clear(); 473 469 return ret; 474 470 } 475 471 if (!d_is_reg(path.dentry)) { 472 + trace_probe_log_err(0, NO_REGULAR_FILE); 476 473 ret = -EINVAL; 477 474 goto fail_address_parse; 478 475 } ··· 487 472 rctr = strchr(arg, '('); 488 473 if (rctr) { 489 474 rctr_end = strchr(rctr, ')'); 490 - if (rctr > rctr_end || *(rctr_end + 1) != 0) { 475 + if (!rctr_end) { 491 476 ret = -EINVAL; 492 - pr_info("Invalid reference counter offset.\n"); 477 + rctr_end = rctr + strlen(rctr); 478 + trace_probe_log_err(rctr_end - filename, 479 + REFCNT_OPEN_BRACE); 480 + goto fail_address_parse; 481 + } else if (rctr_end[1] != '\0') { 482 + ret = -EINVAL; 483 + trace_probe_log_err(rctr_end + 1 - filename, 484 + BAD_REFCNT_SUFFIX); 493 485 goto fail_address_parse; 494 486 } 495 487 ··· 504 482 *rctr_end = '\0'; 505 483 ret = kstrtoul(rctr, 0, &ref_ctr_offset); 506 484 if (ret) { 507 - pr_info("Invalid reference counter offset.\n"); 485 + trace_probe_log_err(rctr - filename, BAD_REFCNT); 508 486 goto fail_address_parse; 509 487 } 510 488 } 511 489 512 490 /* Parse uprobe offset. */ 513 491 ret = kstrtoul(arg, 0, &offset); 514 - if (ret) 492 + if (ret) { 493 + trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS); 515 494 goto fail_address_parse; 516 - 517 - argc -= 2; 518 - argv += 2; 495 + } 519 496 520 497 /* setup a probe */ 498 + trace_probe_log_set_index(0); 521 499 if (event) { 522 - ret = traceprobe_parse_event_name(&event, &group, buf); 500 + ret = traceprobe_parse_event_name(&event, &group, buf, 501 + event - argv[0]); 523 502 if (ret) 524 503 goto fail_address_parse; 525 504 } else { ··· 542 519 kfree(tail); 543 520 } 544 521 522 + argc -= 2; 523 + argv += 2; 524 + 545 525 tu = alloc_trace_uprobe(group, event, argc, is_return); 546 526 if (IS_ERR(tu)) { 547 527 ret = PTR_ERR(tu); ··· 565 539 goto error; 566 540 } 567 541 542 + trace_probe_log_set_index(i + 2); 568 543 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp, 569 544 is_return ? TPARG_FL_RETURN : 0); 570 545 kfree(tmp); ··· 574 547 } 575 548 576 549 ret = register_trace_uprobe(tu); 577 - if (ret) 578 - goto error; 579 - return 0; 550 + if (!ret) 551 + goto out; 580 552 581 553 error: 582 554 free_trace_uprobe(tu); 555 + out: 556 + trace_probe_log_clear(); 583 557 return ret; 584 558 585 559 fail_address_parse: 560 + trace_probe_log_clear(); 586 561 path_put(&path); 587 562 kfree(filename); 588 - 589 - pr_info("Failed to parse address or file.\n"); 590 563 591 564 return ret; 592 565 }
+19
tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # description: ftrace - test tracing error log support 4 + 5 + fail() { #msg 6 + echo $1 7 + exit_fail 8 + } 9 + 10 + # event tracing is currently the only ftrace tracer that uses the 11 + # tracing error_log, hence this check 12 + if [ ! -f set_event ]; then 13 + echo "event tracing is not supported" 14 + exit_unsupported 15 + fi 16 + 17 + ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter' 18 + 19 + exit 0
+12
tools/testing/selftests/ftrace/test.d/functions
··· 109 109 yield() { 110 110 ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1 111 111 } 112 + 113 + ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file 114 + pos=$(echo -n "${2%^*}" | wc -c) # error position 115 + command=$(echo "$2" | tr -d ^) 116 + echo "Test command: $command" 117 + echo > error_log 118 + (! echo "$command" > "$3" ) 2> /dev/null 119 + grep "$1: error:" -A 3 error_log 120 + N=$(tail -n 1 error_log | wc -c) 121 + # " Command: " and "^\n" => 13 122 + test $(expr 13 + $pos) -eq $N 123 + }
+85
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # description: Kprobe event parser error log check 4 + 5 + [ -f kprobe_events ] || exit_unsupported # this is configurable 6 + 7 + [ -f error_log ] || exit_unsupported 8 + 9 + check_error() { # command-with-error-pos-by-^ 10 + ftrace_errlog_check 'trace_kprobe' "$1" 'kprobe_events' 11 + } 12 + 13 + if grep -q 'r\[maxactive\]' README; then 14 + check_error 'p^100 vfs_read' # MAXACT_NO_KPROBE 15 + check_error 'r^1a111 vfs_read' # BAD_MAXACT 16 + check_error 'r^100000 vfs_read' # MAXACT_TOO_BIG 17 + fi 18 + 19 + check_error 'p ^non_exist_func' # BAD_PROBE_ADDR (enoent) 20 + check_error 'p ^hoge-fuga' # BAD_PROBE_ADDR (bad syntax) 21 + check_error 'p ^hoge+1000-1000' # BAD_PROBE_ADDR (bad syntax) 22 + check_error 'r ^vfs_read+10' # BAD_RETPROBE 23 + check_error 'p:^/bar vfs_read' # NO_GROUP_NAME 24 + check_error 'p:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG 25 + 26 + check_error 'p:^foo.1/bar vfs_read' # BAD_GROUP_NAME 27 + check_error 'p:foo/^ vfs_read' # NO_EVENT_NAME 28 + check_error 'p:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG 29 + check_error 'p:foo/^bar.1 vfs_read' # BAD_EVENT_NAME 30 + 31 + check_error 'p vfs_read ^$retval' # RETVAL_ON_PROBE 32 + check_error 'p vfs_read ^$stack10000' # BAD_STACK_NUM 33 + 34 + if grep -q '$arg<N>' README; then 35 + check_error 'p vfs_read ^$arg10000' # BAD_ARG_NUM 36 + fi 37 + 38 + check_error 'p vfs_read ^$none_var' # BAD_VAR 39 + 40 + check_error 'p vfs_read ^%none_reg' # BAD_REG_NAME 41 + check_error 'p vfs_read ^@12345678abcde' # BAD_MEM_ADDR 42 + check_error 'p vfs_read ^@+10' # FILE_ON_KPROBE 43 + 44 + check_error 'p vfs_read ^+0@0)' # DEREF_NEED_BRACE 45 + check_error 'p vfs_read ^+0ab1(@0)' # BAD_DEREF_OFFS 46 + check_error 'p vfs_read +0(+0(@0^)' # DEREF_OPEN_BRACE 47 + 48 + if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then 49 + check_error 'p vfs_read +0(^$comm)' # COMM_CANT_DEREF 50 + fi 51 + 52 + check_error 'p vfs_read ^&1' # BAD_FETCH_ARG 53 + 54 + 55 + # We've introduced this limitation with array support 56 + if grep -q ' <type>\\\[<array-size>\\\]' README; then 57 + check_error 'p vfs_read +0(^+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(@0))))))))))))))' # TOO_MANY_OPS? 58 + check_error 'p vfs_read +0(@11):u8[10^' # ARRAY_NO_CLOSE 59 + check_error 'p vfs_read +0(@11):u8[10]^a' # BAD_ARRAY_SUFFIX 60 + check_error 'p vfs_read +0(@11):u8[^10a]' # BAD_ARRAY_NUM 61 + check_error 'p vfs_read +0(@11):u8[^256]' # ARRAY_TOO_BIG 62 + fi 63 + 64 + check_error 'p vfs_read @11:^unknown_type' # BAD_TYPE 65 + check_error 'p vfs_read $stack0:^string' # BAD_STRING 66 + check_error 'p vfs_read @11:^b10@a/16' # BAD_BITFIELD 67 + 68 + check_error 'p vfs_read ^arg123456789012345678901234567890=@11' # ARG_NAME_TOO_LOG 69 + check_error 'p vfs_read ^=@11' # NO_ARG_NAME 70 + check_error 'p vfs_read ^var.1=@11' # BAD_ARG_NAME 71 + check_error 'p vfs_read var1=@11 ^var1=@12' # USED_ARG_NAME 72 + check_error 'p vfs_read ^+1234567(+1234567(+1234567(+1234567(+1234567(+1234567(@1234))))))' # ARG_TOO_LONG 73 + check_error 'p vfs_read arg1=^' # NO_ARG_BODY 74 + 75 + # instruction boundary check is valid on x86 (at this moment) 76 + case $(uname -m) in 77 + x86_64|i[3456]86) 78 + echo 'p vfs_read' > kprobe_events 79 + if grep -q FTRACE ../kprobes/list ; then 80 + check_error 'p ^vfs_read+3' # BAD_INSN_BNDRY (only if function-tracer is enabled) 81 + fi 82 + ;; 83 + esac 84 + 85 + exit 0
+23
tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # description: Uprobe event parser error log check 4 + 5 + [ -f uprobe_events ] || exit_unsupported # this is configurable 6 + 7 + [ -f error_log ] || exit_unsupported 8 + 9 + check_error() { # command-with-error-pos-by-^ 10 + ftrace_errlog_check 'trace_uprobe' "$1" 'uprobe_events' 11 + } 12 + 13 + check_error 'p ^/non_exist_file:100' # FILE_NOT_FOUND 14 + check_error 'p ^/sys:100' # NO_REGULAR_FILE 15 + check_error 'p /bin/sh:^10a' # BAD_UPROBE_OFFS 16 + check_error 'p /bin/sh:10(^1a)' # BAD_REFCNT 17 + check_error 'p /bin/sh:10(10^' # REFCNT_OPEN_BRACE 18 + check_error 'p /bin/sh:10(10)^a' # BAD_REFCNT_SUFFIX 19 + 20 + check_error 'p /bin/sh:10 ^@+ab' # BAD_FILE_OFFS 21 + check_error 'p /bin/sh:10 ^@symbol' # SYM_ON_UPROBE 22 + 23 + exit 0
-28
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
··· 1 - #!/bin/sh 2 - # SPDX-License-Identifier: GPL-2.0 3 - # description: event trigger - test extended error support 4 - 5 - 6 - fail() { #msg 7 - echo $1 8 - exit_fail 9 - } 10 - 11 - if [ ! -f set_event ]; then 12 - echo "event tracing is not supported" 13 - exit_unsupported 14 - fi 15 - 16 - if [ ! -f synthetic_events ]; then 17 - echo "synthetic event is not supported" 18 - exit_unsupported 19 - fi 20 - 21 - echo "Test extended error support" 22 - echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger 23 - ! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null 24 - if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then 25 - fail "Failed to generate extended error in histogram" 26 - fi 27 - 28 - exit 0