Merge tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing update from Steven Rostedt:
"This batch of changes is mostly clean ups and small bug fixes. The
only real feature that was added this release is from Namhyung Kim,
who introduced "set_graph_notrace" filter that lets you run the
function graph tracer and not trace particular functions and their
call chain.

Tom Zanussi added some updates to the ftrace multibuffer tracing that
made it more consistent with the top level tracing.

One of the fixes for perf function tracing required an API change in
RCU; the addition of "rcu_is_watching()". As Paul McKenney is pushing
that change in this release too, he gave me a branch that included all
the changes to get that working, and I pulled that into my tree in
order to complete the perf function tracing fix"

* tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Add rcu annotation for syscall trace descriptors
tracing: Do not use signed enums with unsigned long long in fgragh output
tracing: Remove unused function ftrace_off_permanent()
tracing: Do not assign filp->private_data to freed memory
tracing: Add helper function tracing_is_disabled()
tracing: Open tracer when ftrace_dump_on_oops is used
tracing: Add support for SOFT_DISABLE to syscall events
tracing: Make register/unregister_ftrace_command __init
tracing: Update event filters for multibuffer
recordmcount.pl: Add support for __fentry__
ftrace: Have control op function callback only trace when RCU is watching
rcu: Do not trace rcu_is_watching() functions
ftrace/x86: skip over the breakpoint for ftrace caller
trace/trace_stat: use rbtree postorder iteration helper instead of opencoding
ftrace: Add set_graph_notrace filter
ftrace: Narrow down the protected area of graph_lock
ftrace: Introduce struct ftrace_graph_data
ftrace: Get rid of ftrace_graph_filter_enabled
tracing: Fix potential out-of-bounds in trace_get_user()
tracing: Show more exact help information about snapshot

+568 -234
+13 -1
arch/x86/kernel/ftrace.c
··· 248 return ret; 249 } 250 251 /* 252 * A breakpoint was added to the code address we are about to 253 * modify, and this is the handle that will just skip over it. ··· 266 */ 267 int ftrace_int3_handler(struct pt_regs *regs) 268 { 269 if (WARN_ON_ONCE(!regs)) 270 return 0; 271 272 - if (!ftrace_location(regs->ip - 1)) 273 return 0; 274 275 regs->ip += MCOUNT_INSN_SIZE - 1;
··· 248 return ret; 249 } 250 251 + static int is_ftrace_caller(unsigned long ip) 252 + { 253 + if (ip == (unsigned long)(&ftrace_call) || 254 + ip == (unsigned long)(&ftrace_regs_call)) 255 + return 1; 256 + 257 + return 0; 258 + } 259 + 260 /* 261 * A breakpoint was added to the code address we are about to 262 * modify, and this is the handle that will just skip over it. ··· 257 */ 258 int ftrace_int3_handler(struct pt_regs *regs) 259 { 260 + unsigned long ip; 261 + 262 if (WARN_ON_ONCE(!regs)) 263 return 0; 264 265 + ip = regs->ip - 1; 266 + if (!ftrace_location(ip) && !is_ftrace_caller(ip)) 267 return 0; 268 269 regs->ip += MCOUNT_INSN_SIZE - 1;
+3 -2
include/linux/ftrace.h
··· 533 static inline void ftrace_disable_daemon(void) { } 534 static inline void ftrace_enable_daemon(void) { } 535 static inline void ftrace_release_mod(struct module *mod) {} 536 - static inline int register_ftrace_command(struct ftrace_func_command *cmd) 537 { 538 return -EINVAL; 539 } 540 - static inline int unregister_ftrace_command(char *cmd_name) 541 { 542 return -EINVAL; 543 } ··· 721 extern char __irqentry_text_start[]; 722 extern char __irqentry_text_end[]; 723 724 #define FTRACE_RETFUNC_DEPTH 50 725 #define FTRACE_RETSTACK_ALLOC_SIZE 32 726 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
··· 533 static inline void ftrace_disable_daemon(void) { } 534 static inline void ftrace_enable_daemon(void) { } 535 static inline void ftrace_release_mod(struct module *mod) {} 536 + static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 537 { 538 return -EINVAL; 539 } 540 + static inline __init int unregister_ftrace_command(char *cmd_name) 541 { 542 return -EINVAL; 543 } ··· 721 extern char __irqentry_text_start[]; 722 extern char __irqentry_text_end[]; 723 724 + #define FTRACE_NOTRACE_DEPTH 65536 725 #define FTRACE_RETFUNC_DEPTH 50 726 #define FTRACE_RETSTACK_ALLOC_SIZE 32 727 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+20 -5
include/linux/ftrace_event.h
··· 202 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 203 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 204 TRACE_EVENT_FL_WAS_ENABLED_BIT, 205 }; 206 207 /* ··· 214 * WAS_ENABLED - Set and stays set when an event was ever enabled 215 * (used for module unloading, if a module event is enabled, 216 * it is best to clear the buffers that used it). 217 */ 218 enum { 219 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), ··· 222 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 223 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 224 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), 225 }; 226 227 struct ftrace_event_call { ··· 241 * bit 2: failed to apply filter 242 * bit 3: ftrace internal event (do not enable) 243 * bit 4: Event was enabled by module 244 */ 245 int flags; /* static flags of different events */ 246 ··· 257 enum { 258 FTRACE_EVENT_FL_ENABLED_BIT, 259 FTRACE_EVENT_FL_RECORDED_CMD_BIT, 260 FTRACE_EVENT_FL_SOFT_MODE_BIT, 261 FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 262 }; ··· 267 * Ftrace event file flags: 268 * ENABLED - The event is enabled 269 * RECORDED_CMD - The comms should be recorded at sched_switch 270 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED 271 * SOFT_DISABLED - When set, do not trace the event (even though its 272 * tracepoint may be enabled) ··· 276 enum { 277 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), 278 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), 279 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), 280 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), 281 }; ··· 285 struct ftrace_event_file { 286 struct list_head list; 287 struct ftrace_event_call *event_call; 288 struct dentry *dir; 289 struct trace_array *tr; 290 struct ftrace_subsystem_dir *system; ··· 321 322 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 323 324 - extern void destroy_preds(struct ftrace_event_call *call); 325 extern int filter_match_preds(struct event_filter *filter, void *rec); 326 - extern int filter_current_check_discard(struct ring_buffer *buffer, 327 - struct ftrace_event_call *call, 328 - void *rec, 329 - struct ring_buffer_event *event); 330 331 enum { 332 FILTER_OTHER = 0,
··· 202 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 203 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 204 TRACE_EVENT_FL_WAS_ENABLED_BIT, 205 + TRACE_EVENT_FL_USE_CALL_FILTER_BIT, 206 }; 207 208 /* ··· 213 * WAS_ENABLED - Set and stays set when an event was ever enabled 214 * (used for module unloading, if a module event is enabled, 215 * it is best to clear the buffers that used it). 216 + * USE_CALL_FILTER - For ftrace internal events, don't use file filter 217 */ 218 enum { 219 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), ··· 220 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 221 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 222 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), 223 + TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), 224 }; 225 226 struct ftrace_event_call { ··· 238 * bit 2: failed to apply filter 239 * bit 3: ftrace internal event (do not enable) 240 * bit 4: Event was enabled by module 241 + * bit 5: use call filter rather than file filter 242 */ 243 int flags; /* static flags of different events */ 244 ··· 253 enum { 254 FTRACE_EVENT_FL_ENABLED_BIT, 255 FTRACE_EVENT_FL_RECORDED_CMD_BIT, 256 + FTRACE_EVENT_FL_FILTERED_BIT, 257 + FTRACE_EVENT_FL_NO_SET_FILTER_BIT, 258 FTRACE_EVENT_FL_SOFT_MODE_BIT, 259 FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 260 }; ··· 261 * Ftrace event file flags: 262 * ENABLED - The event is enabled 263 * RECORDED_CMD - The comms should be recorded at sched_switch 264 + * FILTERED - The event has a filter attached 265 + * NO_SET_FILTER - Set when filter has error and is to be ignored 266 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED 267 * SOFT_DISABLED - When set, do not trace the event (even though its 268 * tracepoint may be enabled) ··· 268 enum { 269 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), 270 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), 271 + FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), 272 + FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), 273 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), 274 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), 275 }; ··· 275 struct ftrace_event_file { 276 struct list_head list; 277 struct ftrace_event_call *event_call; 278 + struct event_filter *filter; 279 struct dentry *dir; 280 struct trace_array *tr; 281 struct ftrace_subsystem_dir *system; ··· 310 311 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 312 313 + extern void destroy_preds(struct ftrace_event_file *file); 314 + extern void destroy_call_preds(struct ftrace_event_call *call); 315 extern int filter_match_preds(struct event_filter *filter, void *rec); 316 + 317 + extern int filter_check_discard(struct ftrace_event_file *file, void *rec, 318 + struct ring_buffer *buffer, 319 + struct ring_buffer_event *event); 320 + extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 321 + struct ring_buffer *buffer, 322 + struct ring_buffer_event *event); 323 324 enum { 325 FILTER_OTHER = 0,
-2
include/linux/kernel.h
··· 501 502 extern void tracing_start(void); 503 extern void tracing_stop(void); 504 - extern void ftrace_off_permanent(void); 505 506 static inline __printf(1, 2) 507 void ____trace_printk_check_format(const char *fmt, ...) ··· 638 #else 639 static inline void tracing_start(void) { } 640 static inline void tracing_stop(void) { } 641 - static inline void ftrace_off_permanent(void) { } 642 static inline void trace_dump_stack(int skip) { } 643 644 static inline void tracing_on(void) { }
··· 501 502 extern void tracing_start(void); 503 extern void tracing_stop(void); 504 505 static inline __printf(1, 2) 506 void ____trace_printk_check_format(const char *fmt, ...) ··· 639 #else 640 static inline void tracing_start(void) { } 641 static inline void tracing_stop(void) { } 642 static inline void trace_dump_stack(int skip) { } 643 644 static inline void tracing_on(void) { }
+2 -2
include/linux/syscalls.h
··· 120 .class = &event_class_syscall_enter, \ 121 .event.funcs = &enter_syscall_print_funcs, \ 122 .data = (void *)&__syscall_meta_##sname,\ 123 - .flags = TRACE_EVENT_FL_CAP_ANY, \ 124 }; \ 125 static struct ftrace_event_call __used \ 126 __attribute__((section("_ftrace_events"))) \ ··· 134 .class = &event_class_syscall_exit, \ 135 .event.funcs = &exit_syscall_print_funcs, \ 136 .data = (void *)&__syscall_meta_##sname,\ 137 - .flags = TRACE_EVENT_FL_CAP_ANY, \ 138 }; \ 139 static struct ftrace_event_call __used \ 140 __attribute__((section("_ftrace_events"))) \
··· 120 .class = &event_class_syscall_enter, \ 121 .event.funcs = &enter_syscall_print_funcs, \ 122 .data = (void *)&__syscall_meta_##sname,\ 123 + .flags = TRACE_EVENT_FL_CAP_ANY, \ 124 }; \ 125 static struct ftrace_event_call __used \ 126 __attribute__((section("_ftrace_events"))) \ ··· 134 .class = &event_class_syscall_exit, \ 135 .event.funcs = &exit_syscall_print_funcs, \ 136 .data = (void *)&__syscall_meta_##sname,\ 137 + .flags = TRACE_EVENT_FL_CAP_ANY, \ 138 }; \ 139 static struct ftrace_event_call __used \ 140 __attribute__((section("_ftrace_events"))) \
+3 -4
include/trace/ftrace.h
··· 437 * { <assign>; } <-- Here we assign the entries by the __field and 438 * __array macros. 439 * 440 - * if (!filter_current_check_discard(buffer, event_call, entry, event)) 441 - * trace_nowake_buffer_unlock_commit(buffer, 442 - * event, irq_flags, pc); 443 * } 444 * 445 * static struct trace_event ftrace_event_type_<call> = { ··· 552 \ 553 { assign; } \ 554 \ 555 - if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 556 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 557 } 558 /*
··· 437 * { <assign>; } <-- Here we assign the entries by the __field and 438 * __array macros. 439 * 440 + * if (!filter_check_discard(ftrace_file, entry, buffer, event)) 441 + * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 442 * } 443 * 444 * static struct trace_event ftrace_event_type_<call> = { ··· 553 \ 554 { assign; } \ 555 \ 556 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ 557 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 558 } 559 /*
+1 -1
kernel/rcu/tiny.c
··· 181 /* 182 * Test whether RCU thinks that the current CPU is idle. 183 */ 184 - bool __rcu_is_watching(void) 185 { 186 return rcu_dynticks_nesting; 187 }
··· 181 /* 182 * Test whether RCU thinks that the current CPU is idle. 183 */ 184 + bool notrace __rcu_is_watching(void) 185 { 186 return rcu_dynticks_nesting; 187 }
+2 -2
kernel/rcu/tree.c
··· 664 * rcu_is_watching(), the caller of __rcu_is_watching() must have at 665 * least disabled preemption. 666 */ 667 - bool __rcu_is_watching(void) 668 { 669 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; 670 } ··· 675 * If the current CPU is in its idle loop and is neither in an interrupt 676 * or NMI handler, return true. 677 */ 678 - bool rcu_is_watching(void) 679 { 680 int ret; 681
··· 664 * rcu_is_watching(), the caller of __rcu_is_watching() must have at 665 * least disabled preemption. 666 */ 667 + bool notrace __rcu_is_watching(void) 668 { 669 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; 670 } ··· 675 * If the current CPU is in its idle loop and is neither in an interrupt 676 * or NMI handler, return true. 677 */ 678 + bool notrace rcu_is_watching(void) 679 { 680 int ret; 681
+123 -38
kernel/trace/ftrace.c
··· 3307 static LIST_HEAD(ftrace_commands); 3308 static DEFINE_MUTEX(ftrace_cmd_mutex); 3309 3310 - int register_ftrace_command(struct ftrace_func_command *cmd) 3311 { 3312 struct ftrace_func_command *p; 3313 int ret = 0; ··· 3330 return ret; 3331 } 3332 3333 - int unregister_ftrace_command(struct ftrace_func_command *cmd) 3334 { 3335 struct ftrace_func_command *p, *n; 3336 int ret = -ENODEV; ··· 3649 3650 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3651 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 3652 - static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); 3653 3654 static int __init set_graph_function(char *str) 3655 { ··· 3667 func = strsep(&buf, ","); 3668 /* we allow only one expression at a time */ 3669 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 3670 - func); 3671 if (ret) 3672 printk(KERN_DEBUG "ftrace: function %s not " 3673 "traceable\n", func); ··· 3784 static DEFINE_MUTEX(graph_lock); 3785 3786 int ftrace_graph_count; 3787 - int ftrace_graph_filter_enabled; 3788 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3789 3790 static void * 3791 __g_next(struct seq_file *m, loff_t *pos) 3792 { 3793 - if (*pos >= ftrace_graph_count) 3794 return NULL; 3795 - return &ftrace_graph_funcs[*pos]; 3796 } 3797 3798 static void * ··· 3814 3815 static void *g_start(struct seq_file *m, loff_t *pos) 3816 { 3817 mutex_lock(&graph_lock); 3818 3819 /* Nothing, tell g_show to print all functions are enabled */ 3820 - if (!ftrace_graph_filter_enabled && !*pos) 3821 return (void *)1; 3822 3823 return __g_next(m, pos); ··· 3855 }; 3856 3857 static int 3858 - ftrace_graph_open(struct inode *inode, struct file *file) 3859 { 3860 int ret = 0; 3861 - 3862 - if (unlikely(ftrace_disabled)) 3863 - return -ENODEV; 3864 3865 mutex_lock(&graph_lock); 3866 if ((file->f_mode & FMODE_WRITE) && 3867 (file->f_flags & O_TRUNC)) { 3868 - ftrace_graph_filter_enabled = 0; 3869 - ftrace_graph_count = 0; 3870 - memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 3871 } 3872 mutex_unlock(&graph_lock); 3873 3874 - if (file->f_mode & FMODE_READ) 3875 - ret = seq_open(file, &ftrace_graph_seq_ops); 3876 3877 return ret; 3878 } 3879 3880 static int 3881 ftrace_graph_release(struct inode *inode, struct file *file) 3882 { 3883 - if (file->f_mode & FMODE_READ) 3884 seq_release(inode, file); 3885 return 0; 3886 } 3887 3888 static int 3889 - ftrace_set_func(unsigned long *array, int *idx, char *buffer) 3890 { 3891 struct dyn_ftrace *rec; 3892 struct ftrace_page *pg; ··· 3949 3950 /* decode regex */ 3951 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 3952 - if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) 3953 return -EBUSY; 3954 3955 search_len = strlen(search); ··· 3977 fail = 0; 3978 if (!exists) { 3979 array[(*idx)++] = rec->ip; 3980 - if (*idx >= FTRACE_GRAPH_MAX_FUNCS) 3981 goto out; 3982 } 3983 } else { ··· 3995 if (fail) 3996 return -EINVAL; 3997 3998 - ftrace_graph_filter_enabled = !!(*idx); 3999 - 4000 return 0; 4001 } 4002 ··· 4003 size_t cnt, loff_t *ppos) 4004 { 4005 struct trace_parser parser; 4006 - ssize_t read, ret; 4007 4008 if (!cnt) 4009 return 0; 4010 4011 - mutex_lock(&graph_lock); 4012 - 4013 - if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 4014 - ret = -ENOMEM; 4015 - goto out_unlock; 4016 - } 4017 4018 read = trace_get_user(&parser, ubuf, cnt, ppos); 4019 4020 if (read >= 0 && trace_parser_loaded((&parser))) { 4021 parser.buffer[parser.idx] = 0; 4022 4023 /* we allow only one expression at a time */ 4024 - ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 4025 - parser.buffer); 4026 - if (ret) 4027 - goto out_free; 4028 } 4029 4030 - ret = read; 4031 4032 - out_free: 4033 trace_parser_put(&parser); 4034 - out_unlock: 4035 - mutex_unlock(&graph_lock); 4036 4037 return ret; 4038 } 4039 4040 static const struct file_operations ftrace_graph_fops = { 4041 .open = ftrace_graph_open, 4042 .read = seq_read, 4043 .write = ftrace_graph_write, 4044 .llseek = ftrace_filter_lseek, ··· 4070 trace_create_file("set_graph_function", 0444, d_tracer, 4071 NULL, 4072 &ftrace_graph_fops); 4073 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4074 4075 return 0; ··· 4396 */ 4397 preempt_disable_notrace(); 4398 trace_recursion_set(TRACE_CONTROL_BIT); 4399 do_for_each_ftrace_op(op, ftrace_control_list) { 4400 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4401 !ftrace_function_local_disabled(op) && 4402 ftrace_ops_test(op, ip, regs)) 4403 op->func(ip, parent_ip, op, regs); 4404 } while_for_each_ftrace_op(op); 4405 trace_recursion_clear(TRACE_CONTROL_BIT); 4406 preempt_enable_notrace(); 4407 }
··· 3307 static LIST_HEAD(ftrace_commands); 3308 static DEFINE_MUTEX(ftrace_cmd_mutex); 3309 3310 + /* 3311 + * Currently we only register ftrace commands from __init, so mark this 3312 + * __init too. 3313 + */ 3314 + __init int register_ftrace_command(struct ftrace_func_command *cmd) 3315 { 3316 struct ftrace_func_command *p; 3317 int ret = 0; ··· 3326 return ret; 3327 } 3328 3329 + /* 3330 + * Currently we only unregister ftrace commands from __init, so mark 3331 + * this __init too. 3332 + */ 3333 + __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 3334 { 3335 struct ftrace_func_command *p, *n; 3336 int ret = -ENODEV; ··· 3641 3642 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3643 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 3644 + static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); 3645 3646 static int __init set_graph_function(char *str) 3647 { ··· 3659 func = strsep(&buf, ","); 3660 /* we allow only one expression at a time */ 3661 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 3662 + FTRACE_GRAPH_MAX_FUNCS, func); 3663 if (ret) 3664 printk(KERN_DEBUG "ftrace: function %s not " 3665 "traceable\n", func); ··· 3776 static DEFINE_MUTEX(graph_lock); 3777 3778 int ftrace_graph_count; 3779 + int ftrace_graph_notrace_count; 3780 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3781 + unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3782 + 3783 + struct ftrace_graph_data { 3784 + unsigned long *table; 3785 + size_t size; 3786 + int *count; 3787 + const struct seq_operations *seq_ops; 3788 + }; 3789 3790 static void * 3791 __g_next(struct seq_file *m, loff_t *pos) 3792 { 3793 + struct ftrace_graph_data *fgd = m->private; 3794 + 3795 + if (*pos >= *fgd->count) 3796 return NULL; 3797 + return &fgd->table[*pos]; 3798 } 3799 3800 static void * ··· 3796 3797 static void *g_start(struct seq_file *m, loff_t *pos) 3798 { 3799 + struct ftrace_graph_data *fgd = m->private; 3800 + 3801 mutex_lock(&graph_lock); 3802 3803 /* Nothing, tell g_show to print all functions are enabled */ 3804 + if (!*fgd->count && !*pos) 3805 return (void *)1; 3806 3807 return __g_next(m, pos); ··· 3835 }; 3836 3837 static int 3838 + __ftrace_graph_open(struct inode *inode, struct file *file, 3839 + struct ftrace_graph_data *fgd) 3840 { 3841 int ret = 0; 3842 3843 mutex_lock(&graph_lock); 3844 if ((file->f_mode & FMODE_WRITE) && 3845 (file->f_flags & O_TRUNC)) { 3846 + *fgd->count = 0; 3847 + memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); 3848 } 3849 mutex_unlock(&graph_lock); 3850 3851 + if (file->f_mode & FMODE_READ) { 3852 + ret = seq_open(file, fgd->seq_ops); 3853 + if (!ret) { 3854 + struct seq_file *m = file->private_data; 3855 + m->private = fgd; 3856 + } 3857 + } else 3858 + file->private_data = fgd; 3859 3860 return ret; 3861 } 3862 3863 static int 3864 + ftrace_graph_open(struct inode *inode, struct file *file) 3865 + { 3866 + struct ftrace_graph_data *fgd; 3867 + 3868 + if (unlikely(ftrace_disabled)) 3869 + return -ENODEV; 3870 + 3871 + fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 3872 + if (fgd == NULL) 3873 + return -ENOMEM; 3874 + 3875 + fgd->table = ftrace_graph_funcs; 3876 + fgd->size = FTRACE_GRAPH_MAX_FUNCS; 3877 + fgd->count = &ftrace_graph_count; 3878 + fgd->seq_ops = &ftrace_graph_seq_ops; 3879 + 3880 + return __ftrace_graph_open(inode, file, fgd); 3881 + } 3882 + 3883 + static int 3884 + ftrace_graph_notrace_open(struct inode *inode, struct file *file) 3885 + { 3886 + struct ftrace_graph_data *fgd; 3887 + 3888 + if (unlikely(ftrace_disabled)) 3889 + return -ENODEV; 3890 + 3891 + fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 3892 + if (fgd == NULL) 3893 + return -ENOMEM; 3894 + 3895 + fgd->table = ftrace_graph_notrace_funcs; 3896 + fgd->size = FTRACE_GRAPH_MAX_FUNCS; 3897 + fgd->count = &ftrace_graph_notrace_count; 3898 + fgd->seq_ops = &ftrace_graph_seq_ops; 3899 + 3900 + return __ftrace_graph_open(inode, file, fgd); 3901 + } 3902 + 3903 + static int 3904 ftrace_graph_release(struct inode *inode, struct file *file) 3905 { 3906 + if (file->f_mode & FMODE_READ) { 3907 + struct seq_file *m = file->private_data; 3908 + 3909 + kfree(m->private); 3910 seq_release(inode, file); 3911 + } else { 3912 + kfree(file->private_data); 3913 + } 3914 + 3915 return 0; 3916 } 3917 3918 static int 3919 + ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) 3920 { 3921 struct dyn_ftrace *rec; 3922 struct ftrace_page *pg; ··· 3879 3880 /* decode regex */ 3881 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 3882 + if (!not && *idx >= size) 3883 return -EBUSY; 3884 3885 search_len = strlen(search); ··· 3907 fail = 0; 3908 if (!exists) { 3909 array[(*idx)++] = rec->ip; 3910 + if (*idx >= size) 3911 goto out; 3912 } 3913 } else { ··· 3925 if (fail) 3926 return -EINVAL; 3927 3928 return 0; 3929 } 3930 ··· 3935 size_t cnt, loff_t *ppos) 3936 { 3937 struct trace_parser parser; 3938 + ssize_t read, ret = 0; 3939 + struct ftrace_graph_data *fgd = file->private_data; 3940 3941 if (!cnt) 3942 return 0; 3943 3944 + if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) 3945 + return -ENOMEM; 3946 3947 read = trace_get_user(&parser, ubuf, cnt, ppos); 3948 3949 if (read >= 0 && trace_parser_loaded((&parser))) { 3950 parser.buffer[parser.idx] = 0; 3951 3952 + mutex_lock(&graph_lock); 3953 + 3954 /* we allow only one expression at a time */ 3955 + ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, 3956 + parser.buffer); 3957 + 3958 + mutex_unlock(&graph_lock); 3959 } 3960 3961 + if (!ret) 3962 + ret = read; 3963 3964 trace_parser_put(&parser); 3965 3966 return ret; 3967 } 3968 3969 static const struct file_operations ftrace_graph_fops = { 3970 .open = ftrace_graph_open, 3971 + .read = seq_read, 3972 + .write = ftrace_graph_write, 3973 + .llseek = ftrace_filter_lseek, 3974 + .release = ftrace_graph_release, 3975 + }; 3976 + 3977 + static const struct file_operations ftrace_graph_notrace_fops = { 3978 + .open = ftrace_graph_notrace_open, 3979 .read = seq_read, 3980 .write = ftrace_graph_write, 3981 .llseek = ftrace_filter_lseek, ··· 3997 trace_create_file("set_graph_function", 0444, d_tracer, 3998 NULL, 3999 &ftrace_graph_fops); 4000 + trace_create_file("set_graph_notrace", 0444, d_tracer, 4001 + NULL, 4002 + &ftrace_graph_notrace_fops); 4003 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4004 4005 return 0; ··· 4320 */ 4321 preempt_disable_notrace(); 4322 trace_recursion_set(TRACE_CONTROL_BIT); 4323 + 4324 + /* 4325 + * Control funcs (perf) uses RCU. Only trace if 4326 + * RCU is currently active. 4327 + */ 4328 + if (!rcu_is_watching()) 4329 + goto out; 4330 + 4331 do_for_each_ftrace_op(op, ftrace_control_list) { 4332 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4333 !ftrace_function_local_disabled(op) && 4334 ftrace_ops_test(op, ip, regs)) 4335 op->func(ip, parent_ip, op, regs); 4336 } while_for_each_ftrace_op(op); 4337 + out: 4338 trace_recursion_clear(TRACE_CONTROL_BIT); 4339 preempt_enable_notrace(); 4340 }
+53 -29
kernel/trace/trace.c
··· 235 mutex_unlock(&trace_types_lock); 236 } 237 238 - int filter_current_check_discard(struct ring_buffer *buffer, 239 - struct ftrace_event_call *call, void *rec, 240 - struct ring_buffer_event *event) 241 { 242 - return filter_check_discard(call, rec, buffer, event); 243 } 244 - EXPORT_SYMBOL_GPL(filter_current_check_discard); 245 246 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 247 { ··· 863 if (isspace(ch)) { 864 parser->buffer[parser->idx] = 0; 865 parser->cont = false; 866 - } else { 867 parser->cont = true; 868 parser->buffer[parser->idx++] = ch; 869 } 870 871 *ppos += read; ··· 1284 } 1285 1286 /** 1287 - * ftrace_off_permanent - disable all ftrace code permanently 1288 - * 1289 - * This should only be called when a serious anomally has 1290 - * been detected. This will turn off the function tracing, 1291 - * ring buffers, and other tracing utilites. It takes no 1292 - * locks and can be called from any context. 1293 - */ 1294 - void ftrace_off_permanent(void) 1295 - { 1296 - tracing_disabled = 1; 1297 - ftrace_stop(); 1298 - tracing_off_permanent(); 1299 - } 1300 - 1301 - /** 1302 * tracing_start - quick start of the tracer 1303 * 1304 * If tracing is enabled but was stopped by tracing_stop, ··· 1639 entry->ip = ip; 1640 entry->parent_ip = parent_ip; 1641 1642 - if (!filter_check_discard(call, entry, buffer, event)) 1643 __buffer_unlock_commit(buffer, event); 1644 } 1645 ··· 1723 1724 entry->size = trace.nr_entries; 1725 1726 - if (!filter_check_discard(call, entry, buffer, event)) 1727 __buffer_unlock_commit(buffer, event); 1728 1729 out: ··· 1825 trace.entries = entry->caller; 1826 1827 save_stack_trace_user(&trace); 1828 - if (!filter_check_discard(call, entry, buffer, event)) 1829 __buffer_unlock_commit(buffer, event); 1830 1831 out_drop_count: ··· 2017 entry->fmt = fmt; 2018 2019 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2020 - if (!filter_check_discard(call, entry, buffer, event)) { 2021 __buffer_unlock_commit(buffer, event); 2022 ftrace_trace_stack(buffer, flags, 6, pc); 2023 } ··· 2072 2073 memcpy(&entry->buf, tbuffer, len); 2074 entry->buf[len] = '\0'; 2075 - if (!filter_check_discard(call, entry, buffer, event)) { 2076 __buffer_unlock_commit(buffer, event); 2077 ftrace_trace_stack(buffer, flags, 6, pc); 2078 } ··· 2769 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2770 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2771 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2772 - seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2773 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2774 seq_printf(m, "# is not a '0' or '1')\n"); 2775 } ··· 2971 2972 filp->private_data = inode->i_private; 2973 return 0; 2974 } 2975 2976 /* ··· 5468 .func = ftrace_trace_snapshot_callback, 5469 }; 5470 5471 - static int register_snapshot_cmd(void) 5472 { 5473 return register_ftrace_command(&ftrace_snapshot_cmd); 5474 } 5475 #else 5476 - static inline int register_snapshot_cmd(void) { return 0; } 5477 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5478 5479 struct dentry *tracing_init_dentry_tr(struct trace_array *tr) ··· 6267 iter->trace = iter->tr->current_trace; 6268 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6269 iter->trace_buffer = &global_trace.trace_buffer; 6270 } 6271 6272 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
··· 235 mutex_unlock(&trace_types_lock); 236 } 237 238 + int filter_check_discard(struct ftrace_event_file *file, void *rec, 239 + struct ring_buffer *buffer, 240 + struct ring_buffer_event *event) 241 { 242 + if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && 243 + !filter_match_preds(file->filter, rec)) { 244 + ring_buffer_discard_commit(buffer, event); 245 + return 1; 246 + } 247 + 248 + return 0; 249 } 250 + EXPORT_SYMBOL_GPL(filter_check_discard); 251 + 252 + int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 253 + struct ring_buffer *buffer, 254 + struct ring_buffer_event *event) 255 + { 256 + if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 257 + !filter_match_preds(call->filter, rec)) { 258 + ring_buffer_discard_commit(buffer, event); 259 + return 1; 260 + } 261 + 262 + return 0; 263 + } 264 + EXPORT_SYMBOL_GPL(call_filter_check_discard); 265 266 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 267 { ··· 843 if (isspace(ch)) { 844 parser->buffer[parser->idx] = 0; 845 parser->cont = false; 846 + } else if (parser->idx < parser->size - 1) { 847 parser->cont = true; 848 parser->buffer[parser->idx++] = ch; 849 + } else { 850 + ret = -EINVAL; 851 + goto out; 852 } 853 854 *ppos += read; ··· 1261 } 1262 1263 /** 1264 * tracing_start - quick start of the tracer 1265 * 1266 * If tracing is enabled but was stopped by tracing_stop, ··· 1631 entry->ip = ip; 1632 entry->parent_ip = parent_ip; 1633 1634 + if (!call_filter_check_discard(call, entry, buffer, event)) 1635 __buffer_unlock_commit(buffer, event); 1636 } 1637 ··· 1715 1716 entry->size = trace.nr_entries; 1717 1718 + if (!call_filter_check_discard(call, entry, buffer, event)) 1719 __buffer_unlock_commit(buffer, event); 1720 1721 out: ··· 1817 trace.entries = entry->caller; 1818 1819 save_stack_trace_user(&trace); 1820 + if (!call_filter_check_discard(call, entry, buffer, event)) 1821 __buffer_unlock_commit(buffer, event); 1822 1823 out_drop_count: ··· 2009 entry->fmt = fmt; 2010 2011 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2012 + if (!call_filter_check_discard(call, entry, buffer, event)) { 2013 __buffer_unlock_commit(buffer, event); 2014 ftrace_trace_stack(buffer, flags, 6, pc); 2015 } ··· 2064 2065 memcpy(&entry->buf, tbuffer, len); 2066 entry->buf[len] = '\0'; 2067 + if (!call_filter_check_discard(call, entry, buffer, event)) { 2068 __buffer_unlock_commit(buffer, event); 2069 ftrace_trace_stack(buffer, flags, 6, pc); 2070 } ··· 2761 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2762 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2763 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2764 + seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); 2765 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2766 seq_printf(m, "# is not a '0' or '1')\n"); 2767 } ··· 2963 2964 filp->private_data = inode->i_private; 2965 return 0; 2966 + } 2967 + 2968 + bool tracing_is_disabled(void) 2969 + { 2970 + return (tracing_disabled) ? true: false; 2971 } 2972 2973 /* ··· 5455 .func = ftrace_trace_snapshot_callback, 5456 }; 5457 5458 + static __init int register_snapshot_cmd(void) 5459 { 5460 return register_ftrace_command(&ftrace_snapshot_cmd); 5461 } 5462 #else 5463 + static inline __init int register_snapshot_cmd(void) { return 0; } 5464 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5465 5466 struct dentry *tracing_init_dentry_tr(struct trace_array *tr) ··· 6254 iter->trace = iter->tr->current_trace; 6255 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6256 iter->trace_buffer = &global_trace.trace_buffer; 6257 + 6258 + if (iter->trace && iter->trace->open) 6259 + iter->trace->open(iter); 6260 + 6261 + /* Annotate start of buffers if we had overruns */ 6262 + if (ring_buffer_overruns(iter->trace_buffer->buffer)) 6263 + iter->iter_flags |= TRACE_FILE_ANNOTATE; 6264 + 6265 + /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6266 + if (trace_clocks[iter->tr->clock_id].in_ns) 6267 + iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6268 } 6269 6270 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+30 -20
kernel/trace/trace.h
··· 193 #ifdef CONFIG_FTRACE_SYSCALLS 194 int sys_refcount_enter; 195 int sys_refcount_exit; 196 - DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 197 - DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 198 #endif 199 int stop_count; 200 int clock_id; ··· 515 void tracing_reset_current(int cpu); 516 void tracing_reset_all_online_cpus(void); 517 int tracing_open_generic(struct inode *inode, struct file *filp); 518 struct dentry *trace_create_file(const char *name, 519 umode_t mode, 520 struct dentry *parent, ··· 713 #define TRACE_GRAPH_PRINT_PROC 0x8 714 #define TRACE_GRAPH_PRINT_DURATION 0x10 715 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 716 717 extern enum print_line_t 718 print_graph_function_flags(struct trace_iterator *iter, u32 flags); ··· 734 #ifdef CONFIG_DYNAMIC_FTRACE 735 /* TODO: make this variable */ 736 #define FTRACE_GRAPH_MAX_FUNCS 32 737 - extern int ftrace_graph_filter_enabled; 738 extern int ftrace_graph_count; 739 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 740 741 static inline int ftrace_graph_addr(unsigned long addr) 742 { 743 int i; 744 745 - if (!ftrace_graph_filter_enabled) 746 return 1; 747 748 for (i = 0; i < ftrace_graph_count; i++) { ··· 763 764 return 0; 765 } 766 #else 767 static inline int ftrace_graph_addr(unsigned long addr) 768 { 769 return 1; 770 } 771 #endif /* CONFIG_DYNAMIC_FTRACE */ 772 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ ··· 1011 1012 extern enum regex_type 1013 filter_parse_regex(char *buff, int len, char **search, int *not); 1014 - extern void print_event_filter(struct ftrace_event_call *call, 1015 struct trace_seq *s); 1016 - extern int apply_event_filter(struct ftrace_event_call *call, 1017 char *filter_string); 1018 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, 1019 char *filter_string); ··· 1023 1024 struct ftrace_event_field * 1025 trace_find_event_field(struct ftrace_event_call *call, char *name); 1026 - 1027 - static inline int 1028 - filter_check_discard(struct ftrace_event_call *call, void *rec, 1029 - struct ring_buffer *buffer, 1030 - struct ring_buffer_event *event) 1031 - { 1032 - if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 1033 - !filter_match_preds(call->filter, rec)) { 1034 - ring_buffer_discard_commit(buffer, event); 1035 - return 1; 1036 - } 1037 - 1038 - return 0; 1039 - } 1040 1041 extern void trace_event_enable_cmd_record(bool enable); 1042 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
··· 193 #ifdef CONFIG_FTRACE_SYSCALLS 194 int sys_refcount_enter; 195 int sys_refcount_exit; 196 + struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls]; 197 + struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls]; 198 #endif 199 int stop_count; 200 int clock_id; ··· 515 void tracing_reset_current(int cpu); 516 void tracing_reset_all_online_cpus(void); 517 int tracing_open_generic(struct inode *inode, struct file *filp); 518 + bool tracing_is_disabled(void); 519 struct dentry *trace_create_file(const char *name, 520 umode_t mode, 521 struct dentry *parent, ··· 712 #define TRACE_GRAPH_PRINT_PROC 0x8 713 #define TRACE_GRAPH_PRINT_DURATION 0x10 714 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 715 + #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 716 + #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 717 718 extern enum print_line_t 719 print_graph_function_flags(struct trace_iterator *iter, u32 flags); ··· 731 #ifdef CONFIG_DYNAMIC_FTRACE 732 /* TODO: make this variable */ 733 #define FTRACE_GRAPH_MAX_FUNCS 32 734 extern int ftrace_graph_count; 735 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 736 + extern int ftrace_graph_notrace_count; 737 + extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; 738 739 static inline int ftrace_graph_addr(unsigned long addr) 740 { 741 int i; 742 743 + if (!ftrace_graph_count) 744 return 1; 745 746 for (i = 0; i < ftrace_graph_count; i++) { ··· 759 760 return 0; 761 } 762 + 763 + static inline int ftrace_graph_notrace_addr(unsigned long addr) 764 + { 765 + int i; 766 + 767 + if (!ftrace_graph_notrace_count) 768 + return 0; 769 + 770 + for (i = 0; i < ftrace_graph_notrace_count; i++) { 771 + if (addr == ftrace_graph_notrace_funcs[i]) 772 + return 1; 773 + } 774 + 775 + return 0; 776 + } 777 #else 778 static inline int ftrace_graph_addr(unsigned long addr) 779 { 780 return 1; 781 + } 782 + 783 + static inline int ftrace_graph_notrace_addr(unsigned long addr) 784 + { 785 + return 0; 786 } 787 #endif /* CONFIG_DYNAMIC_FTRACE */ 788 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ ··· 987 988 extern enum regex_type 989 filter_parse_regex(char *buff, int len, char **search, int *not); 990 + extern void print_event_filter(struct ftrace_event_file *file, 991 struct trace_seq *s); 992 + extern int apply_event_filter(struct ftrace_event_file *file, 993 char *filter_string); 994 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, 995 char *filter_string); ··· 999 1000 struct ftrace_event_field * 1001 trace_find_event_field(struct ftrace_event_call *call, char *name); 1002 1003 extern void trace_event_enable_cmd_record(bool enable); 1004 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
+1 -1
kernel/trace/trace_branch.c
··· 78 entry->line = f->line; 79 entry->correct = val == expect; 80 81 - if (!filter_check_discard(call, entry, buffer, event)) 82 __buffer_unlock_commit(buffer, event); 83 84 out:
··· 78 entry->line = f->line; 79 entry->correct = val == expect; 80 81 + if (!call_filter_check_discard(call, entry, buffer, event)) 82 __buffer_unlock_commit(buffer, event); 83 84 out:
+20 -12
kernel/trace/trace_events.c
··· 989 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 990 loff_t *ppos) 991 { 992 - struct ftrace_event_call *call; 993 struct trace_seq *s; 994 int r = -ENODEV; 995 ··· 1004 trace_seq_init(s); 1005 1006 mutex_lock(&event_mutex); 1007 - call = event_file_data(filp); 1008 - if (call) 1009 - print_event_filter(call, s); 1010 mutex_unlock(&event_mutex); 1011 1012 - if (call) 1013 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1014 1015 kfree(s); ··· 1021 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1022 loff_t *ppos) 1023 { 1024 - struct ftrace_event_call *call; 1025 char *buf; 1026 int err = -ENODEV; 1027 ··· 1039 buf[cnt] = '\0'; 1040 1041 mutex_lock(&event_mutex); 1042 - call = event_file_data(filp); 1043 - if (call) 1044 - err = apply_event_filter(call, buf); 1045 mutex_unlock(&event_mutex); 1046 1047 free_page((unsigned long) buf); ··· 1061 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1062 struct trace_array *tr; 1063 int ret; 1064 1065 /* Make sure the system still exists */ 1066 mutex_lock(&trace_types_lock); ··· 1111 struct trace_array *tr = inode->i_private; 1112 int ret; 1113 1114 if (trace_array_get(tr) < 0) 1115 return -ENODEV; 1116 ··· 1130 if (ret < 0) { 1131 trace_array_put(tr); 1132 kfree(dir); 1133 } 1134 1135 filp->private_data = dir; 1136 1137 - return ret; 1138 } 1139 1140 static int subsystem_release(struct inode *inode, struct file *file) ··· 1546 return -1; 1547 } 1548 } 1549 - trace_create_file("filter", 0644, file->dir, call, 1550 &ftrace_event_filter_fops); 1551 1552 trace_create_file("format", 0444, file->dir, call, ··· 1584 if (file->event_call != call) 1585 continue; 1586 ftrace_event_enable_disable(file, 0); 1587 /* 1588 * The do_for_each_event_file() is 1589 * a double loop. After finding the call for this ··· 1708 { 1709 event_remove(call); 1710 trace_destroy_fields(call); 1711 - destroy_preds(call); 1712 } 1713 1714 static int probe_remove_event_call(struct ftrace_event_call *call)
··· 989 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 990 loff_t *ppos) 991 { 992 + struct ftrace_event_file *file; 993 struct trace_seq *s; 994 int r = -ENODEV; 995 ··· 1004 trace_seq_init(s); 1005 1006 mutex_lock(&event_mutex); 1007 + file = event_file_data(filp); 1008 + if (file) 1009 + print_event_filter(file, s); 1010 mutex_unlock(&event_mutex); 1011 1012 + if (file) 1013 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1014 1015 kfree(s); ··· 1021 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1022 loff_t *ppos) 1023 { 1024 + struct ftrace_event_file *file; 1025 char *buf; 1026 int err = -ENODEV; 1027 ··· 1039 buf[cnt] = '\0'; 1040 1041 mutex_lock(&event_mutex); 1042 + file = event_file_data(filp); 1043 + if (file) 1044 + err = apply_event_filter(file, buf); 1045 mutex_unlock(&event_mutex); 1046 1047 free_page((unsigned long) buf); ··· 1061 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1062 struct trace_array *tr; 1063 int ret; 1064 + 1065 + if (tracing_is_disabled()) 1066 + return -ENODEV; 1067 1068 /* Make sure the system still exists */ 1069 mutex_lock(&trace_types_lock); ··· 1108 struct trace_array *tr = inode->i_private; 1109 int ret; 1110 1111 + if (tracing_is_disabled()) 1112 + return -ENODEV; 1113 + 1114 if (trace_array_get(tr) < 0) 1115 return -ENODEV; 1116 ··· 1124 if (ret < 0) { 1125 trace_array_put(tr); 1126 kfree(dir); 1127 + return ret; 1128 } 1129 1130 filp->private_data = dir; 1131 1132 + return 0; 1133 } 1134 1135 static int subsystem_release(struct inode *inode, struct file *file) ··· 1539 return -1; 1540 } 1541 } 1542 + trace_create_file("filter", 0644, file->dir, file, 1543 &ftrace_event_filter_fops); 1544 1545 trace_create_file("format", 0444, file->dir, call, ··· 1577 if (file->event_call != call) 1578 continue; 1579 ftrace_event_enable_disable(file, 0); 1580 + destroy_preds(file); 1581 /* 1582 * The do_for_each_event_file() is 1583 * a double loop. After finding the call for this ··· 1700 { 1701 event_remove(call); 1702 trace_destroy_fields(call); 1703 + destroy_call_preds(call); 1704 } 1705 1706 static int probe_remove_event_call(struct ftrace_event_call *call)
+182 -42
kernel/trace/trace_events_filter.c
··· 637 free_page((unsigned long) buf); 638 } 639 640 - /* caller must hold event_mutex */ 641 - void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) 642 { 643 - struct event_filter *filter = call->filter; 644 645 if (filter && filter->filter_string) 646 trace_seq_printf(s, "%s\n", filter->filter_string); ··· 774 filter->n_preds = 0; 775 } 776 777 - static void filter_disable(struct ftrace_event_call *call) 778 { 779 call->flags &= ~TRACE_EVENT_FL_FILTERED; 780 } 781 782 static void __free_filter(struct event_filter *filter) ··· 799 kfree(filter); 800 } 801 802 - /* 803 - * Called when destroying the ftrace_event_call. 804 - * The call is being freed, so we do not need to worry about 805 - * the call being currently used. This is for module code removing 806 - * the tracepoints from within it. 807 - */ 808 - void destroy_preds(struct ftrace_event_call *call) 809 { 810 __free_filter(call->filter); 811 call->filter = NULL; 812 } 813 814 static struct event_filter *__alloc_filter(void) ··· 857 return 0; 858 } 859 860 - static void filter_free_subsystem_preds(struct event_subsystem *system) 861 { 862 struct ftrace_event_call *call; 863 864 - list_for_each_entry(call, &ftrace_events, list) { 865 if (strcmp(call->class->system, system->name) != 0) 866 continue; 867 868 - filter_disable(call); 869 - remove_filter_string(call->filter); 870 } 871 } 872 873 - static void filter_free_subsystem_filters(struct event_subsystem *system) 874 { 875 - struct ftrace_event_call *call; 876 877 - list_for_each_entry(call, &ftrace_events, list) { 878 - if (strcmp(call->class->system, system->name) != 0) 879 - continue; 880 __free_filter(call->filter); 881 call->filter = NULL; 882 } 883 } 884 ··· 1677 return err; 1678 } 1679 1680 struct filter_list { 1681 struct list_head list; 1682 struct event_filter *filter; 1683 }; 1684 1685 static int replace_system_preds(struct event_subsystem *system, 1686 struct filter_parse_state *ps, 1687 char *filter_string) 1688 { 1689 struct ftrace_event_call *call; 1690 struct filter_list *filter_item; 1691 struct filter_list *tmp; ··· 1763 bool fail = true; 1764 int err; 1765 1766 - list_for_each_entry(call, &ftrace_events, list) { 1767 - 1768 if (strcmp(call->class->system, system->name) != 0) 1769 continue; 1770 ··· 1774 */ 1775 err = replace_preds(call, NULL, ps, filter_string, true); 1776 if (err) 1777 - call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; 1778 else 1779 - call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; 1780 } 1781 1782 - list_for_each_entry(call, &ftrace_events, list) { 1783 struct event_filter *filter; 1784 1785 if (strcmp(call->class->system, system->name) != 0) 1786 continue; 1787 1788 - if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) 1789 continue; 1790 1791 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); ··· 1808 1809 err = replace_preds(call, filter, ps, filter_string, false); 1810 if (err) { 1811 - filter_disable(call); 1812 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); 1813 append_filter_err(ps, filter); 1814 } else 1815 - call->flags |= TRACE_EVENT_FL_FILTERED; 1816 /* 1817 * Regardless of if this returned an error, we still 1818 * replace the filter for the call. 1819 */ 1820 - filter = call->filter; 1821 - rcu_assign_pointer(call->filter, filter_item->filter); 1822 filter_item->filter = filter; 1823 1824 fail = false; ··· 1948 * and always remembers @filter_str. 1949 */ 1950 static int create_system_filter(struct event_subsystem *system, 1951 char *filter_str, struct event_filter **filterp) 1952 { 1953 struct event_filter *filter = NULL; ··· 1957 1958 err = create_filter_start(filter_str, true, &ps, &filter); 1959 if (!err) { 1960 - err = replace_system_preds(system, ps, filter_str); 1961 if (!err) { 1962 /* System filters just show a default message */ 1963 kfree(filter->filter_string); ··· 1973 } 1974 1975 /* caller must hold event_mutex */ 1976 - int apply_event_filter(struct ftrace_event_call *call, char *filter_string) 1977 { 1978 struct event_filter *filter; 1979 int err; 1980 1981 if (!strcmp(strstrip(filter_string), "0")) { 1982 - filter_disable(call); 1983 - filter = call->filter; 1984 if (!filter) 1985 return 0; 1986 - RCU_INIT_POINTER(call->filter, NULL); 1987 /* Make sure the filter is not being used */ 1988 synchronize_sched(); 1989 __free_filter(filter); 1990 return 0; 1991 } 1992 ··· 2004 * string 2005 */ 2006 if (filter) { 2007 - struct event_filter *tmp = call->filter; 2008 2009 if (!err) 2010 - call->flags |= TRACE_EVENT_FL_FILTERED; 2011 else 2012 - filter_disable(call); 2013 2014 - rcu_assign_pointer(call->filter, filter); 2015 2016 if (tmp) { 2017 /* Make sure the call is done with the filter */ ··· 2028 char *filter_string) 2029 { 2030 struct event_subsystem *system = dir->subsystem; 2031 struct event_filter *filter; 2032 int err = 0; 2033 ··· 2041 } 2042 2043 if (!strcmp(strstrip(filter_string), "0")) { 2044 - filter_free_subsystem_preds(system); 2045 remove_filter_string(system->filter); 2046 filter = system->filter; 2047 system->filter = NULL; 2048 /* Ensure all filters are no longer used */ 2049 synchronize_sched(); 2050 - filter_free_subsystem_filters(system); 2051 __free_filter(filter); 2052 goto out_unlock; 2053 } 2054 2055 - err = create_system_filter(system, filter_string, &filter); 2056 if (filter) { 2057 /* 2058 * No event actually uses the system filter
··· 637 free_page((unsigned long) buf); 638 } 639 640 + static inline struct event_filter *event_filter(struct ftrace_event_file *file) 641 { 642 + if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 643 + return file->event_call->filter; 644 + else 645 + return file->filter; 646 + } 647 + 648 + /* caller must hold event_mutex */ 649 + void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s) 650 + { 651 + struct event_filter *filter = event_filter(file); 652 653 if (filter && filter->filter_string) 654 trace_seq_printf(s, "%s\n", filter->filter_string); ··· 766 filter->n_preds = 0; 767 } 768 769 + static void call_filter_disable(struct ftrace_event_call *call) 770 { 771 call->flags &= ~TRACE_EVENT_FL_FILTERED; 772 + } 773 + 774 + static void filter_disable(struct ftrace_event_file *file) 775 + { 776 + struct ftrace_event_call *call = file->event_call; 777 + 778 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 779 + call_filter_disable(call); 780 + else 781 + file->flags &= ~FTRACE_EVENT_FL_FILTERED; 782 } 783 784 static void __free_filter(struct event_filter *filter) ··· 781 kfree(filter); 782 } 783 784 + void destroy_call_preds(struct ftrace_event_call *call) 785 { 786 __free_filter(call->filter); 787 call->filter = NULL; 788 + } 789 + 790 + static void destroy_file_preds(struct ftrace_event_file *file) 791 + { 792 + __free_filter(file->filter); 793 + file->filter = NULL; 794 + } 795 + 796 + /* 797 + * Called when destroying the ftrace_event_file. 798 + * The file is being freed, so we do not need to worry about 799 + * the file being currently used. This is for module code removing 800 + * the tracepoints from within it. 801 + */ 802 + void destroy_preds(struct ftrace_event_file *file) 803 + { 804 + if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 805 + destroy_call_preds(file->event_call); 806 + else 807 + destroy_file_preds(file); 808 } 809 810 static struct event_filter *__alloc_filter(void) ··· 825 return 0; 826 } 827 828 + static inline void __remove_filter(struct ftrace_event_file *file) 829 { 830 + struct ftrace_event_call *call = file->event_call; 831 + 832 + filter_disable(file); 833 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 834 + remove_filter_string(call->filter); 835 + else 836 + remove_filter_string(file->filter); 837 + } 838 + 839 + static void filter_free_subsystem_preds(struct event_subsystem *system, 840 + struct trace_array *tr) 841 + { 842 + struct ftrace_event_file *file; 843 struct ftrace_event_call *call; 844 845 + list_for_each_entry(file, &tr->events, list) { 846 + call = file->event_call; 847 if (strcmp(call->class->system, system->name) != 0) 848 continue; 849 850 + __remove_filter(file); 851 } 852 } 853 854 + static inline void __free_subsystem_filter(struct ftrace_event_file *file) 855 { 856 + struct ftrace_event_call *call = file->event_call; 857 858 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { 859 __free_filter(call->filter); 860 call->filter = NULL; 861 + } else { 862 + __free_filter(file->filter); 863 + file->filter = NULL; 864 + } 865 + } 866 + 867 + static void filter_free_subsystem_filters(struct event_subsystem *system, 868 + struct trace_array *tr) 869 + { 870 + struct ftrace_event_file *file; 871 + struct ftrace_event_call *call; 872 + 873 + list_for_each_entry(file, &tr->events, list) { 874 + call = file->event_call; 875 + if (strcmp(call->class->system, system->name) != 0) 876 + continue; 877 + __free_subsystem_filter(file); 878 } 879 } 880 ··· 1617 return err; 1618 } 1619 1620 + static inline void event_set_filtered_flag(struct ftrace_event_file *file) 1621 + { 1622 + struct ftrace_event_call *call = file->event_call; 1623 + 1624 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1625 + call->flags |= TRACE_EVENT_FL_FILTERED; 1626 + else 1627 + file->flags |= FTRACE_EVENT_FL_FILTERED; 1628 + } 1629 + 1630 + static inline void event_set_filter(struct ftrace_event_file *file, 1631 + struct event_filter *filter) 1632 + { 1633 + struct ftrace_event_call *call = file->event_call; 1634 + 1635 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1636 + rcu_assign_pointer(call->filter, filter); 1637 + else 1638 + rcu_assign_pointer(file->filter, filter); 1639 + } 1640 + 1641 + static inline void event_clear_filter(struct ftrace_event_file *file) 1642 + { 1643 + struct ftrace_event_call *call = file->event_call; 1644 + 1645 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1646 + RCU_INIT_POINTER(call->filter, NULL); 1647 + else 1648 + RCU_INIT_POINTER(file->filter, NULL); 1649 + } 1650 + 1651 + static inline void 1652 + event_set_no_set_filter_flag(struct ftrace_event_file *file) 1653 + { 1654 + struct ftrace_event_call *call = file->event_call; 1655 + 1656 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1657 + call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; 1658 + else 1659 + file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER; 1660 + } 1661 + 1662 + static inline void 1663 + event_clear_no_set_filter_flag(struct ftrace_event_file *file) 1664 + { 1665 + struct ftrace_event_call *call = file->event_call; 1666 + 1667 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1668 + call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; 1669 + else 1670 + file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER; 1671 + } 1672 + 1673 + static inline bool 1674 + event_no_set_filter_flag(struct ftrace_event_file *file) 1675 + { 1676 + struct ftrace_event_call *call = file->event_call; 1677 + 1678 + if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) 1679 + return true; 1680 + 1681 + if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && 1682 + (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)) 1683 + return true; 1684 + 1685 + return false; 1686 + } 1687 + 1688 struct filter_list { 1689 struct list_head list; 1690 struct event_filter *filter; 1691 }; 1692 1693 static int replace_system_preds(struct event_subsystem *system, 1694 + struct trace_array *tr, 1695 struct filter_parse_state *ps, 1696 char *filter_string) 1697 { 1698 + struct ftrace_event_file *file; 1699 struct ftrace_event_call *call; 1700 struct filter_list *filter_item; 1701 struct filter_list *tmp; ··· 1633 bool fail = true; 1634 int err; 1635 1636 + list_for_each_entry(file, &tr->events, list) { 1637 + call = file->event_call; 1638 if (strcmp(call->class->system, system->name) != 0) 1639 continue; 1640 ··· 1644 */ 1645 err = replace_preds(call, NULL, ps, filter_string, true); 1646 if (err) 1647 + event_set_no_set_filter_flag(file); 1648 else 1649 + event_clear_no_set_filter_flag(file); 1650 } 1651 1652 + list_for_each_entry(file, &tr->events, list) { 1653 struct event_filter *filter; 1654 + 1655 + call = file->event_call; 1656 1657 if (strcmp(call->class->system, system->name) != 0) 1658 continue; 1659 1660 + if (event_no_set_filter_flag(file)) 1661 continue; 1662 1663 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); ··· 1676 1677 err = replace_preds(call, filter, ps, filter_string, false); 1678 if (err) { 1679 + filter_disable(file); 1680 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); 1681 append_filter_err(ps, filter); 1682 } else 1683 + event_set_filtered_flag(file); 1684 /* 1685 * Regardless of if this returned an error, we still 1686 * replace the filter for the call. 1687 */ 1688 + filter = event_filter(file); 1689 + event_set_filter(file, filter_item->filter); 1690 filter_item->filter = filter; 1691 1692 fail = false; ··· 1816 * and always remembers @filter_str. 1817 */ 1818 static int create_system_filter(struct event_subsystem *system, 1819 + struct trace_array *tr, 1820 char *filter_str, struct event_filter **filterp) 1821 { 1822 struct event_filter *filter = NULL; ··· 1824 1825 err = create_filter_start(filter_str, true, &ps, &filter); 1826 if (!err) { 1827 + err = replace_system_preds(system, tr, ps, filter_str); 1828 if (!err) { 1829 /* System filters just show a default message */ 1830 kfree(filter->filter_string); ··· 1840 } 1841 1842 /* caller must hold event_mutex */ 1843 + int apply_event_filter(struct ftrace_event_file *file, char *filter_string) 1844 { 1845 + struct ftrace_event_call *call = file->event_call; 1846 struct event_filter *filter; 1847 int err; 1848 1849 if (!strcmp(strstrip(filter_string), "0")) { 1850 + filter_disable(file); 1851 + filter = event_filter(file); 1852 + 1853 if (!filter) 1854 return 0; 1855 + 1856 + event_clear_filter(file); 1857 + 1858 /* Make sure the filter is not being used */ 1859 synchronize_sched(); 1860 __free_filter(filter); 1861 + 1862 return 0; 1863 } 1864 ··· 1866 * string 1867 */ 1868 if (filter) { 1869 + struct event_filter *tmp; 1870 1871 + tmp = event_filter(file); 1872 if (!err) 1873 + event_set_filtered_flag(file); 1874 else 1875 + filter_disable(file); 1876 1877 + event_set_filter(file, filter); 1878 1879 if (tmp) { 1880 /* Make sure the call is done with the filter */ ··· 1889 char *filter_string) 1890 { 1891 struct event_subsystem *system = dir->subsystem; 1892 + struct trace_array *tr = dir->tr; 1893 struct event_filter *filter; 1894 int err = 0; 1895 ··· 1901 } 1902 1903 if (!strcmp(strstrip(filter_string), "0")) { 1904 + filter_free_subsystem_preds(system, tr); 1905 remove_filter_string(system->filter); 1906 filter = system->filter; 1907 system->filter = NULL; 1908 /* Ensure all filters are no longer used */ 1909 synchronize_sched(); 1910 + filter_free_subsystem_filters(system, tr); 1911 __free_filter(filter); 1912 goto out_unlock; 1913 } 1914 1915 + err = create_system_filter(system, tr, filter_string, &filter); 1916 if (filter) { 1917 /* 1918 * No event actually uses the system filter
+1 -1
kernel/trace/trace_export.c
··· 180 .event.type = etype, \ 181 .class = &event_class_ftrace_##call, \ 182 .print_fmt = print, \ 183 - .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 184 }; \ 185 struct ftrace_event_call __used \ 186 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
··· 180 .event.type = etype, \ 181 .class = &event_class_ftrace_##call, \ 182 .print_fmt = print, \ 183 + .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ 184 }; \ 185 struct ftrace_event_call __used \ 186 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
+66 -16
kernel/trace/trace_functions_graph.c
··· 82 * to fill in space into DURATION column. 83 */ 84 enum { 85 - DURATION_FILL_FULL = -1, 86 - DURATION_FILL_START = -2, 87 - DURATION_FILL_END = -3, 88 }; 89 90 static enum print_line_t ··· 114 return -EBUSY; 115 } 116 117 calltime = trace_clock_local(); 118 119 index = ++current->curr_ret_stack; 120 barrier(); 121 current->ret_stack[index].ret = ret; 122 current->ret_stack[index].func = func; 123 current->ret_stack[index].calltime = calltime; 124 current->ret_stack[index].subtime = 0; 125 current->ret_stack[index].fp = frame_pointer; 126 - *depth = index; 127 128 return 0; 129 } ··· 158 159 index = current->curr_ret_stack; 160 161 - if (unlikely(index < 0)) { 162 ftrace_graph_stop(); 163 WARN_ON(1); 164 /* Might as well panic, otherwise we have no where to go */ ··· 224 trace.rettime = trace_clock_local(); 225 barrier(); 226 current->curr_ret_stack--; 227 228 /* 229 * The trace should run after decrementing the ret counter ··· 270 return 0; 271 entry = ring_buffer_event_data(event); 272 entry->graph_ent = *trace; 273 - if (!filter_current_check_discard(buffer, call, entry, event)) 274 __buffer_unlock_commit(buffer, event); 275 276 return 1; ··· 299 300 /* trace it when it is-nested-in or is a function enabled. */ 301 if ((!(trace->depth || ftrace_graph_addr(trace->func)) || 302 - ftrace_graph_ignore_irqs()) || 303 (max_depth && trace->depth >= max_depth)) 304 return 0; 305 306 local_irq_save(flags); 307 cpu = raw_smp_processor_id(); ··· 385 return; 386 entry = ring_buffer_event_data(event); 387 entry->ret = *trace; 388 - if (!filter_current_check_discard(buffer, call, entry, event)) 389 __buffer_unlock_commit(buffer, event); 390 } 391 ··· 702 } 703 704 /* No overhead */ 705 - ret = print_graph_duration(DURATION_FILL_START, s, flags); 706 if (ret != TRACE_TYPE_HANDLED) 707 return ret; 708 ··· 714 if (!ret) 715 return TRACE_TYPE_PARTIAL_LINE; 716 717 - ret = print_graph_duration(DURATION_FILL_END, s, flags); 718 if (ret != TRACE_TYPE_HANDLED) 719 return ret; 720 ··· 779 return TRACE_TYPE_HANDLED; 780 781 /* No real adata, just filling the column with spaces */ 782 - switch (duration) { 783 - case DURATION_FILL_FULL: 784 ret = trace_seq_puts(s, " | "); 785 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 786 - case DURATION_FILL_START: 787 ret = trace_seq_puts(s, " "); 788 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 789 - case DURATION_FILL_END: 790 ret = trace_seq_puts(s, " |"); 791 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 792 } ··· 902 } 903 904 /* No time */ 905 - ret = print_graph_duration(DURATION_FILL_FULL, s, flags); 906 if (ret != TRACE_TYPE_HANDLED) 907 return ret; 908 ··· 1222 return TRACE_TYPE_PARTIAL_LINE; 1223 1224 /* No time */ 1225 - ret = print_graph_duration(DURATION_FILL_FULL, s, flags); 1226 if (ret != TRACE_TYPE_HANDLED) 1227 return ret; 1228
··· 82 * to fill in space into DURATION column. 83 */ 84 enum { 85 + FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, 86 + FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, 87 + FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, 88 }; 89 90 static enum print_line_t ··· 114 return -EBUSY; 115 } 116 117 + /* 118 + * The curr_ret_stack is an index to ftrace return stack of 119 + * current task. Its value should be in [0, FTRACE_RETFUNC_ 120 + * DEPTH) when the function graph tracer is used. To support 121 + * filtering out specific functions, it makes the index 122 + * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) 123 + * so when it sees a negative index the ftrace will ignore 124 + * the record. And the index gets recovered when returning 125 + * from the filtered function by adding the FTRACE_NOTRACE_ 126 + * DEPTH and then it'll continue to record functions normally. 127 + * 128 + * The curr_ret_stack is initialized to -1 and get increased 129 + * in this function. So it can be less than -1 only if it was 130 + * filtered out via ftrace_graph_notrace_addr() which can be 131 + * set from set_graph_notrace file in debugfs by user. 132 + */ 133 + if (current->curr_ret_stack < -1) 134 + return -EBUSY; 135 + 136 calltime = trace_clock_local(); 137 138 index = ++current->curr_ret_stack; 139 + if (ftrace_graph_notrace_addr(func)) 140 + current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; 141 barrier(); 142 current->ret_stack[index].ret = ret; 143 current->ret_stack[index].func = func; 144 current->ret_stack[index].calltime = calltime; 145 current->ret_stack[index].subtime = 0; 146 current->ret_stack[index].fp = frame_pointer; 147 + *depth = current->curr_ret_stack; 148 149 return 0; 150 } ··· 137 138 index = current->curr_ret_stack; 139 140 + /* 141 + * A negative index here means that it's just returned from a 142 + * notrace'd function. Recover index to get an original 143 + * return address. See ftrace_push_return_trace(). 144 + * 145 + * TODO: Need to check whether the stack gets corrupted. 146 + */ 147 + if (index < 0) 148 + index += FTRACE_NOTRACE_DEPTH; 149 + 150 + if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { 151 ftrace_graph_stop(); 152 WARN_ON(1); 153 /* Might as well panic, otherwise we have no where to go */ ··· 193 trace.rettime = trace_clock_local(); 194 barrier(); 195 current->curr_ret_stack--; 196 + /* 197 + * The curr_ret_stack can be less than -1 only if it was 198 + * filtered out and it's about to return from the function. 199 + * Recover the index and continue to trace normal functions. 200 + */ 201 + if (current->curr_ret_stack < -1) { 202 + current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; 203 + return ret; 204 + } 205 206 /* 207 * The trace should run after decrementing the ret counter ··· 230 return 0; 231 entry = ring_buffer_event_data(event); 232 entry->graph_ent = *trace; 233 + if (!call_filter_check_discard(call, entry, buffer, event)) 234 __buffer_unlock_commit(buffer, event); 235 236 return 1; ··· 259 260 /* trace it when it is-nested-in or is a function enabled. */ 261 if ((!(trace->depth || ftrace_graph_addr(trace->func)) || 262 + ftrace_graph_ignore_irqs()) || (trace->depth < 0) || 263 (max_depth && trace->depth >= max_depth)) 264 return 0; 265 + 266 + /* 267 + * Do not trace a function if it's filtered by set_graph_notrace. 268 + * Make the index of ret stack negative to indicate that it should 269 + * ignore further functions. But it needs its own ret stack entry 270 + * to recover the original index in order to continue tracing after 271 + * returning from the function. 272 + */ 273 + if (ftrace_graph_notrace_addr(trace->func)) 274 + return 1; 275 276 local_irq_save(flags); 277 cpu = raw_smp_processor_id(); ··· 335 return; 336 entry = ring_buffer_event_data(event); 337 entry->ret = *trace; 338 + if (!call_filter_check_discard(call, entry, buffer, event)) 339 __buffer_unlock_commit(buffer, event); 340 } 341 ··· 652 } 653 654 /* No overhead */ 655 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_START); 656 if (ret != TRACE_TYPE_HANDLED) 657 return ret; 658 ··· 664 if (!ret) 665 return TRACE_TYPE_PARTIAL_LINE; 666 667 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_END); 668 if (ret != TRACE_TYPE_HANDLED) 669 return ret; 670 ··· 729 return TRACE_TYPE_HANDLED; 730 731 /* No real adata, just filling the column with spaces */ 732 + switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { 733 + case FLAGS_FILL_FULL: 734 ret = trace_seq_puts(s, " | "); 735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 736 + case FLAGS_FILL_START: 737 ret = trace_seq_puts(s, " "); 738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 739 + case FLAGS_FILL_END: 740 ret = trace_seq_puts(s, " |"); 741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 742 } ··· 852 } 853 854 /* No time */ 855 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); 856 if (ret != TRACE_TYPE_HANDLED) 857 return ret; 858 ··· 1172 return TRACE_TYPE_PARTIAL_LINE; 1173 1174 /* No time */ 1175 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); 1176 if (ret != TRACE_TYPE_HANDLED) 1177 return ret; 1178
+2 -2
kernel/trace/trace_kprobe.c
··· 835 entry->ip = (unsigned long)tp->rp.kp.addr; 836 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 837 838 - if (!filter_current_check_discard(buffer, call, entry, event)) 839 trace_buffer_unlock_commit_regs(buffer, event, 840 irq_flags, pc, regs); 841 } ··· 884 entry->ret_ip = (unsigned long)ri->ret_addr; 885 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 886 887 - if (!filter_current_check_discard(buffer, call, entry, event)) 888 trace_buffer_unlock_commit_regs(buffer, event, 889 irq_flags, pc, regs); 890 }
··· 835 entry->ip = (unsigned long)tp->rp.kp.addr; 836 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 837 838 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 839 trace_buffer_unlock_commit_regs(buffer, event, 840 irq_flags, pc, regs); 841 } ··· 884 entry->ret_ip = (unsigned long)ri->ret_addr; 885 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 886 887 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 888 trace_buffer_unlock_commit_regs(buffer, event, 889 irq_flags, pc, regs); 890 }
+2 -2
kernel/trace/trace_mmiotrace.c
··· 323 entry = ring_buffer_event_data(event); 324 entry->rw = *rw; 325 326 - if (!filter_check_discard(call, entry, buffer, event)) 327 trace_buffer_unlock_commit(buffer, event, 0, pc); 328 } 329 ··· 353 entry = ring_buffer_event_data(event); 354 entry->map = *map; 355 356 - if (!filter_check_discard(call, entry, buffer, event)) 357 trace_buffer_unlock_commit(buffer, event, 0, pc); 358 } 359
··· 323 entry = ring_buffer_event_data(event); 324 entry->rw = *rw; 325 326 + if (!call_filter_check_discard(call, entry, buffer, event)) 327 trace_buffer_unlock_commit(buffer, event, 0, pc); 328 } 329 ··· 353 entry = ring_buffer_event_data(event); 354 entry->map = *map; 355 356 + if (!call_filter_check_discard(call, entry, buffer, event)) 357 trace_buffer_unlock_commit(buffer, event, 0, pc); 358 } 359
+2 -2
kernel/trace/trace_sched_switch.c
··· 45 entry->next_state = next->state; 46 entry->next_cpu = task_cpu(next); 47 48 - if (!filter_check_discard(call, entry, buffer, event)) 49 trace_buffer_unlock_commit(buffer, event, flags, pc); 50 } 51 ··· 101 entry->next_state = wakee->state; 102 entry->next_cpu = task_cpu(wakee); 103 104 - if (!filter_check_discard(call, entry, buffer, event)) 105 trace_buffer_unlock_commit(buffer, event, flags, pc); 106 } 107
··· 45 entry->next_state = next->state; 46 entry->next_cpu = task_cpu(next); 47 48 + if (!call_filter_check_discard(call, entry, buffer, event)) 49 trace_buffer_unlock_commit(buffer, event, flags, pc); 50 } 51 ··· 101 entry->next_state = wakee->state; 102 entry->next_cpu = task_cpu(wakee); 103 104 + if (!call_filter_check_discard(call, entry, buffer, event)) 105 trace_buffer_unlock_commit(buffer, event, flags, pc); 106 } 107
+6 -37
kernel/trace/trace_stat.c
··· 43 /* The root directory for all stat files */ 44 static struct dentry *stat_dir; 45 46 - /* 47 - * Iterate through the rbtree using a post order traversal path 48 - * to release the next node. 49 - * It won't necessary release one at each iteration 50 - * but it will at least advance closer to the next one 51 - * to be released. 52 - */ 53 - static struct rb_node *release_next(struct tracer_stat *ts, 54 - struct rb_node *node) 55 - { 56 - struct stat_node *snode; 57 - struct rb_node *parent = rb_parent(node); 58 - 59 - if (node->rb_left) 60 - return node->rb_left; 61 - else if (node->rb_right) 62 - return node->rb_right; 63 - else { 64 - if (!parent) 65 - ; 66 - else if (parent->rb_left == node) 67 - parent->rb_left = NULL; 68 - else 69 - parent->rb_right = NULL; 70 - 71 - snode = container_of(node, struct stat_node, node); 72 - if (ts->stat_release) 73 - ts->stat_release(snode->stat); 74 - kfree(snode); 75 - 76 - return parent; 77 - } 78 - } 79 - 80 static void __reset_stat_session(struct stat_session *session) 81 { 82 - struct rb_node *node = session->stat_root.rb_node; 83 84 - while (node) 85 - node = release_next(session->ts, node); 86 87 session->stat_root = RB_ROOT; 88 }
··· 43 /* The root directory for all stat files */ 44 static struct dentry *stat_dir; 45 46 static void __reset_stat_session(struct stat_session *session) 47 { 48 + struct stat_node *snode, *n; 49 50 + rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { 51 + if (session->ts->stat_release) 52 + session->ts->stat_release(snode->stat); 53 + kfree(snode); 54 + } 55 56 session->stat_root = RB_ROOT; 57 }
+32 -10
kernel/trace/trace_syscalls.c
··· 302 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 303 { 304 struct trace_array *tr = data; 305 struct syscall_trace_enter *entry; 306 struct syscall_metadata *sys_data; 307 struct ring_buffer_event *event; ··· 315 syscall_nr = trace_get_syscall_nr(current, regs); 316 if (syscall_nr < 0) 317 return; 318 - if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) 319 return; 320 321 sys_data = syscall_nr_to_meta(syscall_nr); ··· 343 entry->nr = syscall_nr; 344 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 345 346 - if (!filter_current_check_discard(buffer, sys_data->enter_event, 347 - entry, event)) 348 trace_current_buffer_unlock_commit(buffer, event, 349 irq_flags, pc); 350 } ··· 351 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 352 { 353 struct trace_array *tr = data; 354 struct syscall_trace_exit *entry; 355 struct syscall_metadata *sys_data; 356 struct ring_buffer_event *event; ··· 363 syscall_nr = trace_get_syscall_nr(current, regs); 364 if (syscall_nr < 0) 365 return; 366 - if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) 367 return; 368 369 sys_data = syscall_nr_to_meta(syscall_nr); ··· 390 entry->nr = syscall_nr; 391 entry->ret = syscall_get_return_value(current, regs); 392 393 - if (!filter_current_check_discard(buffer, sys_data->exit_event, 394 - entry, event)) 395 trace_current_buffer_unlock_commit(buffer, event, 396 irq_flags, pc); 397 } ··· 409 if (!tr->sys_refcount_enter) 410 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 411 if (!ret) { 412 - set_bit(num, tr->enabled_enter_syscalls); 413 tr->sys_refcount_enter++; 414 } 415 mutex_unlock(&syscall_trace_lock); ··· 427 return; 428 mutex_lock(&syscall_trace_lock); 429 tr->sys_refcount_enter--; 430 - clear_bit(num, tr->enabled_enter_syscalls); 431 if (!tr->sys_refcount_enter) 432 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 433 mutex_unlock(&syscall_trace_lock); 434 } 435 436 static int reg_event_syscall_exit(struct ftrace_event_file *file, ··· 452 if (!tr->sys_refcount_exit) 453 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 454 if (!ret) { 455 - set_bit(num, tr->enabled_exit_syscalls); 456 tr->sys_refcount_exit++; 457 } 458 mutex_unlock(&syscall_trace_lock); ··· 470 return; 471 mutex_lock(&syscall_trace_lock); 472 tr->sys_refcount_exit--; 473 - clear_bit(num, tr->enabled_exit_syscalls); 474 if (!tr->sys_refcount_exit) 475 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 476 mutex_unlock(&syscall_trace_lock); 477 } 478 479 static int __init init_syscall_trace(struct ftrace_event_call *call)
··· 302 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 303 { 304 struct trace_array *tr = data; 305 + struct ftrace_event_file *ftrace_file; 306 struct syscall_trace_enter *entry; 307 struct syscall_metadata *sys_data; 308 struct ring_buffer_event *event; ··· 314 syscall_nr = trace_get_syscall_nr(current, regs); 315 if (syscall_nr < 0) 316 return; 317 + 318 + /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 319 + ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 320 + if (!ftrace_file) 321 + return; 322 + 323 + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) 324 return; 325 326 sys_data = syscall_nr_to_meta(syscall_nr); ··· 336 entry->nr = syscall_nr; 337 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 338 339 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 340 trace_current_buffer_unlock_commit(buffer, event, 341 irq_flags, pc); 342 } ··· 345 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 346 { 347 struct trace_array *tr = data; 348 + struct ftrace_event_file *ftrace_file; 349 struct syscall_trace_exit *entry; 350 struct syscall_metadata *sys_data; 351 struct ring_buffer_event *event; ··· 356 syscall_nr = trace_get_syscall_nr(current, regs); 357 if (syscall_nr < 0) 358 return; 359 + 360 + /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 361 + ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 362 + if (!ftrace_file) 363 + return; 364 + 365 + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) 366 return; 367 368 sys_data = syscall_nr_to_meta(syscall_nr); ··· 377 entry->nr = syscall_nr; 378 entry->ret = syscall_get_return_value(current, regs); 379 380 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 381 trace_current_buffer_unlock_commit(buffer, event, 382 irq_flags, pc); 383 } ··· 397 if (!tr->sys_refcount_enter) 398 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 399 if (!ret) { 400 + rcu_assign_pointer(tr->enter_syscall_files[num], file); 401 tr->sys_refcount_enter++; 402 } 403 mutex_unlock(&syscall_trace_lock); ··· 415 return; 416 mutex_lock(&syscall_trace_lock); 417 tr->sys_refcount_enter--; 418 + rcu_assign_pointer(tr->enter_syscall_files[num], NULL); 419 if (!tr->sys_refcount_enter) 420 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 421 mutex_unlock(&syscall_trace_lock); 422 + /* 423 + * Callers expect the event to be completely disabled on 424 + * return, so wait for current handlers to finish. 425 + */ 426 + synchronize_sched(); 427 } 428 429 static int reg_event_syscall_exit(struct ftrace_event_file *file, ··· 435 if (!tr->sys_refcount_exit) 436 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 437 if (!ret) { 438 + rcu_assign_pointer(tr->exit_syscall_files[num], file); 439 tr->sys_refcount_exit++; 440 } 441 mutex_unlock(&syscall_trace_lock); ··· 453 return; 454 mutex_lock(&syscall_trace_lock); 455 tr->sys_refcount_exit--; 456 + rcu_assign_pointer(tr->exit_syscall_files[num], NULL); 457 if (!tr->sys_refcount_exit) 458 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 459 mutex_unlock(&syscall_trace_lock); 460 + /* 461 + * Callers expect the event to be completely disabled on 462 + * return, so wait for current handlers to finish. 463 + */ 464 + synchronize_sched(); 465 } 466 467 static int __init init_syscall_trace(struct ftrace_event_call *call)
+2 -1
kernel/trace/trace_uprobe.c
··· 128 if (is_ret) 129 tu->consumer.ret_handler = uretprobe_dispatcher; 130 init_trace_uprobe_filter(&tu->filter); 131 return tu; 132 133 error: ··· 562 for (i = 0; i < tu->nr_args; i++) 563 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 564 565 - if (!filter_current_check_discard(buffer, call, entry, event)) 566 trace_buffer_unlock_commit(buffer, event, 0, 0); 567 } 568
··· 128 if (is_ret) 129 tu->consumer.ret_handler = uretprobe_dispatcher; 130 init_trace_uprobe_filter(&tu->filter); 131 + tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; 132 return tu; 133 134 error: ··· 561 for (i = 0; i < tu->nr_args; i++) 562 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 563 564 + if (!call_filter_check_discard(call, entry, buffer, event)) 565 trace_buffer_unlock_commit(buffer, event, 0, 0); 566 } 567
+2 -2
scripts/recordmcount.pl
··· 214 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)"; 215 $section_regex = "Disassembly of section\\s+(\\S+):"; 216 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 217 - $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 218 $section_type = '@progbits'; 219 $mcount_adjust = 0; 220 $type = ".long"; 221 222 if ($arch eq "x86_64") { 223 - $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; 224 $type = ".quad"; 225 $alignment = 8; 226 $mcount_adjust = -1;
··· 214 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)"; 215 $section_regex = "Disassembly of section\\s+(\\S+):"; 216 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 217 + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$"; 218 $section_type = '@progbits'; 219 $mcount_adjust = 0; 220 $type = ".long"; 221 222 if ($arch eq "x86_64") { 223 + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)([+-]0x[0-9a-zA-Z]+)?\$"; 224 $type = ".quad"; 225 $alignment = 8; 226 $mcount_adjust = -1;