Merge tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing update from Steven Rostedt:
"This batch of changes is mostly clean ups and small bug fixes. The
only real feature that was added this release is from Namhyung Kim,
who introduced "set_graph_notrace" filter that lets you run the
function graph tracer and not trace particular functions and their
call chain.

Tom Zanussi added some updates to the ftrace multibuffer tracing that
made it more consistent with the top level tracing.

One of the fixes for perf function tracing required an API change in
RCU; the addition of "rcu_is_watching()". As Paul McKenney is pushing
that change in this release too, he gave me a branch that included all
the changes to get that working, and I pulled that into my tree in
order to complete the perf function tracing fix"

* tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Add rcu annotation for syscall trace descriptors
tracing: Do not use signed enums with unsigned long long in fgragh output
tracing: Remove unused function ftrace_off_permanent()
tracing: Do not assign filp->private_data to freed memory
tracing: Add helper function tracing_is_disabled()
tracing: Open tracer when ftrace_dump_on_oops is used
tracing: Add support for SOFT_DISABLE to syscall events
tracing: Make register/unregister_ftrace_command __init
tracing: Update event filters for multibuffer
recordmcount.pl: Add support for __fentry__
ftrace: Have control op function callback only trace when RCU is watching
rcu: Do not trace rcu_is_watching() functions
ftrace/x86: skip over the breakpoint for ftrace caller
trace/trace_stat: use rbtree postorder iteration helper instead of opencoding
ftrace: Add set_graph_notrace filter
ftrace: Narrow down the protected area of graph_lock
ftrace: Introduce struct ftrace_graph_data
ftrace: Get rid of ftrace_graph_filter_enabled
tracing: Fix potential out-of-bounds in trace_get_user()
tracing: Show more exact help information about snapshot

+568 -234
+13 -1
arch/x86/kernel/ftrace.c
··· 248 248 return ret; 249 249 } 250 250 251 + static int is_ftrace_caller(unsigned long ip) 252 + { 253 + if (ip == (unsigned long)(&ftrace_call) || 254 + ip == (unsigned long)(&ftrace_regs_call)) 255 + return 1; 256 + 257 + return 0; 258 + } 259 + 251 260 /* 252 261 * A breakpoint was added to the code address we are about to 253 262 * modify, and this is the handle that will just skip over it. ··· 266 257 */ 267 258 int ftrace_int3_handler(struct pt_regs *regs) 268 259 { 260 + unsigned long ip; 261 + 269 262 if (WARN_ON_ONCE(!regs)) 270 263 return 0; 271 264 272 - if (!ftrace_location(regs->ip - 1)) 265 + ip = regs->ip - 1; 266 + if (!ftrace_location(ip) && !is_ftrace_caller(ip)) 273 267 return 0; 274 268 275 269 regs->ip += MCOUNT_INSN_SIZE - 1;
+3 -2
include/linux/ftrace.h
··· 533 533 static inline void ftrace_disable_daemon(void) { } 534 534 static inline void ftrace_enable_daemon(void) { } 535 535 static inline void ftrace_release_mod(struct module *mod) {} 536 - static inline int register_ftrace_command(struct ftrace_func_command *cmd) 536 + static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 537 537 { 538 538 return -EINVAL; 539 539 } 540 - static inline int unregister_ftrace_command(char *cmd_name) 540 + static inline __init int unregister_ftrace_command(char *cmd_name) 541 541 { 542 542 return -EINVAL; 543 543 } ··· 721 721 extern char __irqentry_text_start[]; 722 722 extern char __irqentry_text_end[]; 723 723 724 + #define FTRACE_NOTRACE_DEPTH 65536 724 725 #define FTRACE_RETFUNC_DEPTH 50 725 726 #define FTRACE_RETSTACK_ALLOC_SIZE 32 726 727 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+20 -5
include/linux/ftrace_event.h
··· 202 202 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 203 203 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 204 204 TRACE_EVENT_FL_WAS_ENABLED_BIT, 205 + TRACE_EVENT_FL_USE_CALL_FILTER_BIT, 205 206 }; 206 207 207 208 /* ··· 214 213 * WAS_ENABLED - Set and stays set when an event was ever enabled 215 214 * (used for module unloading, if a module event is enabled, 216 215 * it is best to clear the buffers that used it). 216 + * USE_CALL_FILTER - For ftrace internal events, don't use file filter 217 217 */ 218 218 enum { 219 219 TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), ··· 222 220 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 223 221 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 224 222 TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), 223 + TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), 225 224 }; 226 225 227 226 struct ftrace_event_call { ··· 241 238 * bit 2: failed to apply filter 242 239 * bit 3: ftrace internal event (do not enable) 243 240 * bit 4: Event was enabled by module 241 + * bit 5: use call filter rather than file filter 244 242 */ 245 243 int flags; /* static flags of different events */ 246 244 ··· 257 253 enum { 258 254 FTRACE_EVENT_FL_ENABLED_BIT, 259 255 FTRACE_EVENT_FL_RECORDED_CMD_BIT, 256 + FTRACE_EVENT_FL_FILTERED_BIT, 257 + FTRACE_EVENT_FL_NO_SET_FILTER_BIT, 260 258 FTRACE_EVENT_FL_SOFT_MODE_BIT, 261 259 FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 262 260 }; ··· 267 261 * Ftrace event file flags: 268 262 * ENABLED - The event is enabled 269 263 * RECORDED_CMD - The comms should be recorded at sched_switch 264 + * FILTERED - The event has a filter attached 265 + * NO_SET_FILTER - Set when filter has error and is to be ignored 270 266 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED 271 267 * SOFT_DISABLED - When set, do not trace the event (even though its 272 268 * tracepoint may be enabled) ··· 276 268 enum { 277 269 FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), 278 270 FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), 271 + FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), 272 + FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), 279 273 FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), 280 274 FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), 281 275 }; ··· 285 275 struct ftrace_event_file { 286 276 struct list_head list; 287 277 struct ftrace_event_call *event_call; 278 + struct event_filter *filter; 288 279 struct dentry *dir; 289 280 struct trace_array *tr; 290 281 struct ftrace_subsystem_dir *system; ··· 321 310 322 311 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 323 312 324 - extern void destroy_preds(struct ftrace_event_call *call); 313 + extern void destroy_preds(struct ftrace_event_file *file); 314 + extern void destroy_call_preds(struct ftrace_event_call *call); 325 315 extern int filter_match_preds(struct event_filter *filter, void *rec); 326 - extern int filter_current_check_discard(struct ring_buffer *buffer, 327 - struct ftrace_event_call *call, 328 - void *rec, 329 - struct ring_buffer_event *event); 316 + 317 + extern int filter_check_discard(struct ftrace_event_file *file, void *rec, 318 + struct ring_buffer *buffer, 319 + struct ring_buffer_event *event); 320 + extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 321 + struct ring_buffer *buffer, 322 + struct ring_buffer_event *event); 330 323 331 324 enum { 332 325 FILTER_OTHER = 0,
-2
include/linux/kernel.h
··· 501 501 502 502 extern void tracing_start(void); 503 503 extern void tracing_stop(void); 504 - extern void ftrace_off_permanent(void); 505 504 506 505 static inline __printf(1, 2) 507 506 void ____trace_printk_check_format(const char *fmt, ...) ··· 638 639 #else 639 640 static inline void tracing_start(void) { } 640 641 static inline void tracing_stop(void) { } 641 - static inline void ftrace_off_permanent(void) { } 642 642 static inline void trace_dump_stack(int skip) { } 643 643 644 644 static inline void tracing_on(void) { }
+2 -2
include/linux/syscalls.h
··· 120 120 .class = &event_class_syscall_enter, \ 121 121 .event.funcs = &enter_syscall_print_funcs, \ 122 122 .data = (void *)&__syscall_meta_##sname,\ 123 - .flags = TRACE_EVENT_FL_CAP_ANY, \ 123 + .flags = TRACE_EVENT_FL_CAP_ANY, \ 124 124 }; \ 125 125 static struct ftrace_event_call __used \ 126 126 __attribute__((section("_ftrace_events"))) \ ··· 134 134 .class = &event_class_syscall_exit, \ 135 135 .event.funcs = &exit_syscall_print_funcs, \ 136 136 .data = (void *)&__syscall_meta_##sname,\ 137 - .flags = TRACE_EVENT_FL_CAP_ANY, \ 137 + .flags = TRACE_EVENT_FL_CAP_ANY, \ 138 138 }; \ 139 139 static struct ftrace_event_call __used \ 140 140 __attribute__((section("_ftrace_events"))) \
+3 -4
include/trace/ftrace.h
··· 437 437 * { <assign>; } <-- Here we assign the entries by the __field and 438 438 * __array macros. 439 439 * 440 - * if (!filter_current_check_discard(buffer, event_call, entry, event)) 441 - * trace_nowake_buffer_unlock_commit(buffer, 442 - * event, irq_flags, pc); 440 + * if (!filter_check_discard(ftrace_file, entry, buffer, event)) 441 + * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 443 442 * } 444 443 * 445 444 * static struct trace_event ftrace_event_type_<call> = { ··· 552 553 \ 553 554 { assign; } \ 554 555 \ 555 - if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 556 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) \ 556 557 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 557 558 } 558 559 /*
+1 -1
kernel/rcu/tiny.c
··· 181 181 /* 182 182 * Test whether RCU thinks that the current CPU is idle. 183 183 */ 184 - bool __rcu_is_watching(void) 184 + bool notrace __rcu_is_watching(void) 185 185 { 186 186 return rcu_dynticks_nesting; 187 187 }
+2 -2
kernel/rcu/tree.c
··· 664 664 * rcu_is_watching(), the caller of __rcu_is_watching() must have at 665 665 * least disabled preemption. 666 666 */ 667 - bool __rcu_is_watching(void) 667 + bool notrace __rcu_is_watching(void) 668 668 { 669 669 return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; 670 670 } ··· 675 675 * If the current CPU is in its idle loop and is neither in an interrupt 676 676 * or NMI handler, return true. 677 677 */ 678 - bool rcu_is_watching(void) 678 + bool notrace rcu_is_watching(void) 679 679 { 680 680 int ret; 681 681
+123 -38
kernel/trace/ftrace.c
··· 3307 3307 static LIST_HEAD(ftrace_commands); 3308 3308 static DEFINE_MUTEX(ftrace_cmd_mutex); 3309 3309 3310 - int register_ftrace_command(struct ftrace_func_command *cmd) 3310 + /* 3311 + * Currently we only register ftrace commands from __init, so mark this 3312 + * __init too. 3313 + */ 3314 + __init int register_ftrace_command(struct ftrace_func_command *cmd) 3311 3315 { 3312 3316 struct ftrace_func_command *p; 3313 3317 int ret = 0; ··· 3330 3326 return ret; 3331 3327 } 3332 3328 3333 - int unregister_ftrace_command(struct ftrace_func_command *cmd) 3329 + /* 3330 + * Currently we only unregister ftrace commands from __init, so mark 3331 + * this __init too. 3332 + */ 3333 + __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 3334 3334 { 3335 3335 struct ftrace_func_command *p, *n; 3336 3336 int ret = -ENODEV; ··· 3649 3641 3650 3642 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3651 3643 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 3652 - static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); 3644 + static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); 3653 3645 3654 3646 static int __init set_graph_function(char *str) 3655 3647 { ··· 3667 3659 func = strsep(&buf, ","); 3668 3660 /* we allow only one expression at a time */ 3669 3661 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 3670 - func); 3662 + FTRACE_GRAPH_MAX_FUNCS, func); 3671 3663 if (ret) 3672 3664 printk(KERN_DEBUG "ftrace: function %s not " 3673 3665 "traceable\n", func); ··· 3784 3776 static DEFINE_MUTEX(graph_lock); 3785 3777 3786 3778 int ftrace_graph_count; 3787 - int ftrace_graph_filter_enabled; 3779 + int ftrace_graph_notrace_count; 3788 3780 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3781 + unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3782 + 3783 + struct ftrace_graph_data { 3784 + unsigned long *table; 3785 + size_t size; 3786 + int *count; 3787 + const struct seq_operations *seq_ops; 3788 + }; 3789 3789 3790 3790 static void * 3791 3791 __g_next(struct seq_file *m, loff_t *pos) 3792 3792 { 3793 - if (*pos >= ftrace_graph_count) 3793 + struct ftrace_graph_data *fgd = m->private; 3794 + 3795 + if (*pos >= *fgd->count) 3794 3796 return NULL; 3795 - return &ftrace_graph_funcs[*pos]; 3797 + return &fgd->table[*pos]; 3796 3798 } 3797 3799 3798 3800 static void * ··· 3814 3796 3815 3797 static void *g_start(struct seq_file *m, loff_t *pos) 3816 3798 { 3799 + struct ftrace_graph_data *fgd = m->private; 3800 + 3817 3801 mutex_lock(&graph_lock); 3818 3802 3819 3803 /* Nothing, tell g_show to print all functions are enabled */ 3820 - if (!ftrace_graph_filter_enabled && !*pos) 3804 + if (!*fgd->count && !*pos) 3821 3805 return (void *)1; 3822 3806 3823 3807 return __g_next(m, pos); ··· 3855 3835 }; 3856 3836 3857 3837 static int 3858 - ftrace_graph_open(struct inode *inode, struct file *file) 3838 + __ftrace_graph_open(struct inode *inode, struct file *file, 3839 + struct ftrace_graph_data *fgd) 3859 3840 { 3860 3841 int ret = 0; 3861 - 3862 - if (unlikely(ftrace_disabled)) 3863 - return -ENODEV; 3864 3842 3865 3843 mutex_lock(&graph_lock); 3866 3844 if ((file->f_mode & FMODE_WRITE) && 3867 3845 (file->f_flags & O_TRUNC)) { 3868 - ftrace_graph_filter_enabled = 0; 3869 - ftrace_graph_count = 0; 3870 - memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 3846 + *fgd->count = 0; 3847 + memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); 3871 3848 } 3872 3849 mutex_unlock(&graph_lock); 3873 3850 3874 - if (file->f_mode & FMODE_READ) 3875 - ret = seq_open(file, &ftrace_graph_seq_ops); 3851 + if (file->f_mode & FMODE_READ) { 3852 + ret = seq_open(file, fgd->seq_ops); 3853 + if (!ret) { 3854 + struct seq_file *m = file->private_data; 3855 + m->private = fgd; 3856 + } 3857 + } else 3858 + file->private_data = fgd; 3876 3859 3877 3860 return ret; 3878 3861 } 3879 3862 3880 3863 static int 3864 + ftrace_graph_open(struct inode *inode, struct file *file) 3865 + { 3866 + struct ftrace_graph_data *fgd; 3867 + 3868 + if (unlikely(ftrace_disabled)) 3869 + return -ENODEV; 3870 + 3871 + fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 3872 + if (fgd == NULL) 3873 + return -ENOMEM; 3874 + 3875 + fgd->table = ftrace_graph_funcs; 3876 + fgd->size = FTRACE_GRAPH_MAX_FUNCS; 3877 + fgd->count = &ftrace_graph_count; 3878 + fgd->seq_ops = &ftrace_graph_seq_ops; 3879 + 3880 + return __ftrace_graph_open(inode, file, fgd); 3881 + } 3882 + 3883 + static int 3884 + ftrace_graph_notrace_open(struct inode *inode, struct file *file) 3885 + { 3886 + struct ftrace_graph_data *fgd; 3887 + 3888 + if (unlikely(ftrace_disabled)) 3889 + return -ENODEV; 3890 + 3891 + fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 3892 + if (fgd == NULL) 3893 + return -ENOMEM; 3894 + 3895 + fgd->table = ftrace_graph_notrace_funcs; 3896 + fgd->size = FTRACE_GRAPH_MAX_FUNCS; 3897 + fgd->count = &ftrace_graph_notrace_count; 3898 + fgd->seq_ops = &ftrace_graph_seq_ops; 3899 + 3900 + return __ftrace_graph_open(inode, file, fgd); 3901 + } 3902 + 3903 + static int 3881 3904 ftrace_graph_release(struct inode *inode, struct file *file) 3882 3905 { 3883 - if (file->f_mode & FMODE_READ) 3906 + if (file->f_mode & FMODE_READ) { 3907 + struct seq_file *m = file->private_data; 3908 + 3909 + kfree(m->private); 3884 3910 seq_release(inode, file); 3911 + } else { 3912 + kfree(file->private_data); 3913 + } 3914 + 3885 3915 return 0; 3886 3916 } 3887 3917 3888 3918 static int 3889 - ftrace_set_func(unsigned long *array, int *idx, char *buffer) 3919 + ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) 3890 3920 { 3891 3921 struct dyn_ftrace *rec; 3892 3922 struct ftrace_page *pg; ··· 3949 3879 3950 3880 /* decode regex */ 3951 3881 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 3952 - if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) 3882 + if (!not && *idx >= size) 3953 3883 return -EBUSY; 3954 3884 3955 3885 search_len = strlen(search); ··· 3977 3907 fail = 0; 3978 3908 if (!exists) { 3979 3909 array[(*idx)++] = rec->ip; 3980 - if (*idx >= FTRACE_GRAPH_MAX_FUNCS) 3910 + if (*idx >= size) 3981 3911 goto out; 3982 3912 } 3983 3913 } else { ··· 3995 3925 if (fail) 3996 3926 return -EINVAL; 3997 3927 3998 - ftrace_graph_filter_enabled = !!(*idx); 3999 - 4000 3928 return 0; 4001 3929 } 4002 3930 ··· 4003 3935 size_t cnt, loff_t *ppos) 4004 3936 { 4005 3937 struct trace_parser parser; 4006 - ssize_t read, ret; 3938 + ssize_t read, ret = 0; 3939 + struct ftrace_graph_data *fgd = file->private_data; 4007 3940 4008 3941 if (!cnt) 4009 3942 return 0; 4010 3943 4011 - mutex_lock(&graph_lock); 4012 - 4013 - if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 4014 - ret = -ENOMEM; 4015 - goto out_unlock; 4016 - } 3944 + if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) 3945 + return -ENOMEM; 4017 3946 4018 3947 read = trace_get_user(&parser, ubuf, cnt, ppos); 4019 3948 4020 3949 if (read >= 0 && trace_parser_loaded((&parser))) { 4021 3950 parser.buffer[parser.idx] = 0; 4022 3951 3952 + mutex_lock(&graph_lock); 3953 + 4023 3954 /* we allow only one expression at a time */ 4024 - ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 4025 - parser.buffer); 4026 - if (ret) 4027 - goto out_free; 3955 + ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, 3956 + parser.buffer); 3957 + 3958 + mutex_unlock(&graph_lock); 4028 3959 } 4029 3960 4030 - ret = read; 3961 + if (!ret) 3962 + ret = read; 4031 3963 4032 - out_free: 4033 3964 trace_parser_put(&parser); 4034 - out_unlock: 4035 - mutex_unlock(&graph_lock); 4036 3965 4037 3966 return ret; 4038 3967 } 4039 3968 4040 3969 static const struct file_operations ftrace_graph_fops = { 4041 3970 .open = ftrace_graph_open, 3971 + .read = seq_read, 3972 + .write = ftrace_graph_write, 3973 + .llseek = ftrace_filter_lseek, 3974 + .release = ftrace_graph_release, 3975 + }; 3976 + 3977 + static const struct file_operations ftrace_graph_notrace_fops = { 3978 + .open = ftrace_graph_notrace_open, 4042 3979 .read = seq_read, 4043 3980 .write = ftrace_graph_write, 4044 3981 .llseek = ftrace_filter_lseek, ··· 4070 3997 trace_create_file("set_graph_function", 0444, d_tracer, 4071 3998 NULL, 4072 3999 &ftrace_graph_fops); 4000 + trace_create_file("set_graph_notrace", 0444, d_tracer, 4001 + NULL, 4002 + &ftrace_graph_notrace_fops); 4073 4003 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4074 4004 4075 4005 return 0; ··· 4396 4320 */ 4397 4321 preempt_disable_notrace(); 4398 4322 trace_recursion_set(TRACE_CONTROL_BIT); 4323 + 4324 + /* 4325 + * Control funcs (perf) uses RCU. Only trace if 4326 + * RCU is currently active. 4327 + */ 4328 + if (!rcu_is_watching()) 4329 + goto out; 4330 + 4399 4331 do_for_each_ftrace_op(op, ftrace_control_list) { 4400 4332 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4401 4333 !ftrace_function_local_disabled(op) && 4402 4334 ftrace_ops_test(op, ip, regs)) 4403 4335 op->func(ip, parent_ip, op, regs); 4404 4336 } while_for_each_ftrace_op(op); 4337 + out: 4405 4338 trace_recursion_clear(TRACE_CONTROL_BIT); 4406 4339 preempt_enable_notrace(); 4407 4340 }
+53 -29
kernel/trace/trace.c
··· 235 235 mutex_unlock(&trace_types_lock); 236 236 } 237 237 238 - int filter_current_check_discard(struct ring_buffer *buffer, 239 - struct ftrace_event_call *call, void *rec, 240 - struct ring_buffer_event *event) 238 + int filter_check_discard(struct ftrace_event_file *file, void *rec, 239 + struct ring_buffer *buffer, 240 + struct ring_buffer_event *event) 241 241 { 242 - return filter_check_discard(call, rec, buffer, event); 242 + if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && 243 + !filter_match_preds(file->filter, rec)) { 244 + ring_buffer_discard_commit(buffer, event); 245 + return 1; 246 + } 247 + 248 + return 0; 243 249 } 244 - EXPORT_SYMBOL_GPL(filter_current_check_discard); 250 + EXPORT_SYMBOL_GPL(filter_check_discard); 251 + 252 + int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 253 + struct ring_buffer *buffer, 254 + struct ring_buffer_event *event) 255 + { 256 + if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 257 + !filter_match_preds(call->filter, rec)) { 258 + ring_buffer_discard_commit(buffer, event); 259 + return 1; 260 + } 261 + 262 + return 0; 263 + } 264 + EXPORT_SYMBOL_GPL(call_filter_check_discard); 245 265 246 266 cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) 247 267 { ··· 863 843 if (isspace(ch)) { 864 844 parser->buffer[parser->idx] = 0; 865 845 parser->cont = false; 866 - } else { 846 + } else if (parser->idx < parser->size - 1) { 867 847 parser->cont = true; 868 848 parser->buffer[parser->idx++] = ch; 849 + } else { 850 + ret = -EINVAL; 851 + goto out; 869 852 } 870 853 871 854 *ppos += read; ··· 1284 1261 } 1285 1262 1286 1263 /** 1287 - * ftrace_off_permanent - disable all ftrace code permanently 1288 - * 1289 - * This should only be called when a serious anomally has 1290 - * been detected. This will turn off the function tracing, 1291 - * ring buffers, and other tracing utilites. It takes no 1292 - * locks and can be called from any context. 1293 - */ 1294 - void ftrace_off_permanent(void) 1295 - { 1296 - tracing_disabled = 1; 1297 - ftrace_stop(); 1298 - tracing_off_permanent(); 1299 - } 1300 - 1301 - /** 1302 1264 * tracing_start - quick start of the tracer 1303 1265 * 1304 1266 * If tracing is enabled but was stopped by tracing_stop, ··· 1639 1631 entry->ip = ip; 1640 1632 entry->parent_ip = parent_ip; 1641 1633 1642 - if (!filter_check_discard(call, entry, buffer, event)) 1634 + if (!call_filter_check_discard(call, entry, buffer, event)) 1643 1635 __buffer_unlock_commit(buffer, event); 1644 1636 } 1645 1637 ··· 1723 1715 1724 1716 entry->size = trace.nr_entries; 1725 1717 1726 - if (!filter_check_discard(call, entry, buffer, event)) 1718 + if (!call_filter_check_discard(call, entry, buffer, event)) 1727 1719 __buffer_unlock_commit(buffer, event); 1728 1720 1729 1721 out: ··· 1825 1817 trace.entries = entry->caller; 1826 1818 1827 1819 save_stack_trace_user(&trace); 1828 - if (!filter_check_discard(call, entry, buffer, event)) 1820 + if (!call_filter_check_discard(call, entry, buffer, event)) 1829 1821 __buffer_unlock_commit(buffer, event); 1830 1822 1831 1823 out_drop_count: ··· 2017 2009 entry->fmt = fmt; 2018 2010 2019 2011 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2020 - if (!filter_check_discard(call, entry, buffer, event)) { 2012 + if (!call_filter_check_discard(call, entry, buffer, event)) { 2021 2013 __buffer_unlock_commit(buffer, event); 2022 2014 ftrace_trace_stack(buffer, flags, 6, pc); 2023 2015 } ··· 2072 2064 2073 2065 memcpy(&entry->buf, tbuffer, len); 2074 2066 entry->buf[len] = '\0'; 2075 - if (!filter_check_discard(call, entry, buffer, event)) { 2067 + if (!call_filter_check_discard(call, entry, buffer, event)) { 2076 2068 __buffer_unlock_commit(buffer, event); 2077 2069 ftrace_trace_stack(buffer, flags, 6, pc); 2078 2070 } ··· 2769 2761 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); 2770 2762 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); 2771 2763 seq_printf(m, "# Takes a snapshot of the main buffer.\n"); 2772 - seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); 2764 + seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); 2773 2765 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); 2774 2766 seq_printf(m, "# is not a '0' or '1')\n"); 2775 2767 } ··· 2971 2963 2972 2964 filp->private_data = inode->i_private; 2973 2965 return 0; 2966 + } 2967 + 2968 + bool tracing_is_disabled(void) 2969 + { 2970 + return (tracing_disabled) ? true: false; 2974 2971 } 2975 2972 2976 2973 /* ··· 5468 5455 .func = ftrace_trace_snapshot_callback, 5469 5456 }; 5470 5457 5471 - static int register_snapshot_cmd(void) 5458 + static __init int register_snapshot_cmd(void) 5472 5459 { 5473 5460 return register_ftrace_command(&ftrace_snapshot_cmd); 5474 5461 } 5475 5462 #else 5476 - static inline int register_snapshot_cmd(void) { return 0; } 5463 + static inline __init int register_snapshot_cmd(void) { return 0; } 5477 5464 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ 5478 5465 5479 5466 struct dentry *tracing_init_dentry_tr(struct trace_array *tr) ··· 6267 6254 iter->trace = iter->tr->current_trace; 6268 6255 iter->cpu_file = RING_BUFFER_ALL_CPUS; 6269 6256 iter->trace_buffer = &global_trace.trace_buffer; 6257 + 6258 + if (iter->trace && iter->trace->open) 6259 + iter->trace->open(iter); 6260 + 6261 + /* Annotate start of buffers if we had overruns */ 6262 + if (ring_buffer_overruns(iter->trace_buffer->buffer)) 6263 + iter->iter_flags |= TRACE_FILE_ANNOTATE; 6264 + 6265 + /* Output in nanoseconds only if we are using a clock in nanoseconds. */ 6266 + if (trace_clocks[iter->tr->clock_id].in_ns) 6267 + iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 6270 6268 } 6271 6269 6272 6270 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+30 -20
kernel/trace/trace.h
··· 193 193 #ifdef CONFIG_FTRACE_SYSCALLS 194 194 int sys_refcount_enter; 195 195 int sys_refcount_exit; 196 - DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 197 - DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 196 + struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls]; 197 + struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls]; 198 198 #endif 199 199 int stop_count; 200 200 int clock_id; ··· 515 515 void tracing_reset_current(int cpu); 516 516 void tracing_reset_all_online_cpus(void); 517 517 int tracing_open_generic(struct inode *inode, struct file *filp); 518 + bool tracing_is_disabled(void); 518 519 struct dentry *trace_create_file(const char *name, 519 520 umode_t mode, 520 521 struct dentry *parent, ··· 713 712 #define TRACE_GRAPH_PRINT_PROC 0x8 714 713 #define TRACE_GRAPH_PRINT_DURATION 0x10 715 714 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 715 + #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 716 + #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 716 717 717 718 extern enum print_line_t 718 719 print_graph_function_flags(struct trace_iterator *iter, u32 flags); ··· 734 731 #ifdef CONFIG_DYNAMIC_FTRACE 735 732 /* TODO: make this variable */ 736 733 #define FTRACE_GRAPH_MAX_FUNCS 32 737 - extern int ftrace_graph_filter_enabled; 738 734 extern int ftrace_graph_count; 739 735 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 736 + extern int ftrace_graph_notrace_count; 737 + extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; 740 738 741 739 static inline int ftrace_graph_addr(unsigned long addr) 742 740 { 743 741 int i; 744 742 745 - if (!ftrace_graph_filter_enabled) 743 + if (!ftrace_graph_count) 746 744 return 1; 747 745 748 746 for (i = 0; i < ftrace_graph_count; i++) { ··· 763 759 764 760 return 0; 765 761 } 762 + 763 + static inline int ftrace_graph_notrace_addr(unsigned long addr) 764 + { 765 + int i; 766 + 767 + if (!ftrace_graph_notrace_count) 768 + return 0; 769 + 770 + for (i = 0; i < ftrace_graph_notrace_count; i++) { 771 + if (addr == ftrace_graph_notrace_funcs[i]) 772 + return 1; 773 + } 774 + 775 + return 0; 776 + } 766 777 #else 767 778 static inline int ftrace_graph_addr(unsigned long addr) 768 779 { 769 780 return 1; 781 + } 782 + 783 + static inline int ftrace_graph_notrace_addr(unsigned long addr) 784 + { 785 + return 0; 770 786 } 771 787 #endif /* CONFIG_DYNAMIC_FTRACE */ 772 788 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ ··· 1011 987 1012 988 extern enum regex_type 1013 989 filter_parse_regex(char *buff, int len, char **search, int *not); 1014 - extern void print_event_filter(struct ftrace_event_call *call, 990 + extern void print_event_filter(struct ftrace_event_file *file, 1015 991 struct trace_seq *s); 1016 - extern int apply_event_filter(struct ftrace_event_call *call, 992 + extern int apply_event_filter(struct ftrace_event_file *file, 1017 993 char *filter_string); 1018 994 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, 1019 995 char *filter_string); ··· 1023 999 1024 1000 struct ftrace_event_field * 1025 1001 trace_find_event_field(struct ftrace_event_call *call, char *name); 1026 - 1027 - static inline int 1028 - filter_check_discard(struct ftrace_event_call *call, void *rec, 1029 - struct ring_buffer *buffer, 1030 - struct ring_buffer_event *event) 1031 - { 1032 - if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && 1033 - !filter_match_preds(call->filter, rec)) { 1034 - ring_buffer_discard_commit(buffer, event); 1035 - return 1; 1036 - } 1037 - 1038 - return 0; 1039 - } 1040 1002 1041 1003 extern void trace_event_enable_cmd_record(bool enable); 1042 1004 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
+1 -1
kernel/trace/trace_branch.c
··· 78 78 entry->line = f->line; 79 79 entry->correct = val == expect; 80 80 81 - if (!filter_check_discard(call, entry, buffer, event)) 81 + if (!call_filter_check_discard(call, entry, buffer, event)) 82 82 __buffer_unlock_commit(buffer, event); 83 83 84 84 out:
+20 -12
kernel/trace/trace_events.c
··· 989 989 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 990 990 loff_t *ppos) 991 991 { 992 - struct ftrace_event_call *call; 992 + struct ftrace_event_file *file; 993 993 struct trace_seq *s; 994 994 int r = -ENODEV; 995 995 ··· 1004 1004 trace_seq_init(s); 1005 1005 1006 1006 mutex_lock(&event_mutex); 1007 - call = event_file_data(filp); 1008 - if (call) 1009 - print_event_filter(call, s); 1007 + file = event_file_data(filp); 1008 + if (file) 1009 + print_event_filter(file, s); 1010 1010 mutex_unlock(&event_mutex); 1011 1011 1012 - if (call) 1012 + if (file) 1013 1013 r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); 1014 1014 1015 1015 kfree(s); ··· 1021 1021 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1022 1022 loff_t *ppos) 1023 1023 { 1024 - struct ftrace_event_call *call; 1024 + struct ftrace_event_file *file; 1025 1025 char *buf; 1026 1026 int err = -ENODEV; 1027 1027 ··· 1039 1039 buf[cnt] = '\0'; 1040 1040 1041 1041 mutex_lock(&event_mutex); 1042 - call = event_file_data(filp); 1043 - if (call) 1044 - err = apply_event_filter(call, buf); 1042 + file = event_file_data(filp); 1043 + if (file) 1044 + err = apply_event_filter(file, buf); 1045 1045 mutex_unlock(&event_mutex); 1046 1046 1047 1047 free_page((unsigned long) buf); ··· 1061 1061 struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1062 1062 struct trace_array *tr; 1063 1063 int ret; 1064 + 1065 + if (tracing_is_disabled()) 1066 + return -ENODEV; 1064 1067 1065 1068 /* Make sure the system still exists */ 1066 1069 mutex_lock(&trace_types_lock); ··· 1111 1108 struct trace_array *tr = inode->i_private; 1112 1109 int ret; 1113 1110 1111 + if (tracing_is_disabled()) 1112 + return -ENODEV; 1113 + 1114 1114 if (trace_array_get(tr) < 0) 1115 1115 return -ENODEV; 1116 1116 ··· 1130 1124 if (ret < 0) { 1131 1125 trace_array_put(tr); 1132 1126 kfree(dir); 1127 + return ret; 1133 1128 } 1134 1129 1135 1130 filp->private_data = dir; 1136 1131 1137 - return ret; 1132 + return 0; 1138 1133 } 1139 1134 1140 1135 static int subsystem_release(struct inode *inode, struct file *file) ··· 1546 1539 return -1; 1547 1540 } 1548 1541 } 1549 - trace_create_file("filter", 0644, file->dir, call, 1542 + trace_create_file("filter", 0644, file->dir, file, 1550 1543 &ftrace_event_filter_fops); 1551 1544 1552 1545 trace_create_file("format", 0444, file->dir, call, ··· 1584 1577 if (file->event_call != call) 1585 1578 continue; 1586 1579 ftrace_event_enable_disable(file, 0); 1580 + destroy_preds(file); 1587 1581 /* 1588 1582 * The do_for_each_event_file() is 1589 1583 * a double loop. After finding the call for this ··· 1708 1700 { 1709 1701 event_remove(call); 1710 1702 trace_destroy_fields(call); 1711 - destroy_preds(call); 1703 + destroy_call_preds(call); 1712 1704 } 1713 1705 1714 1706 static int probe_remove_event_call(struct ftrace_event_call *call)
+182 -42
kernel/trace/trace_events_filter.c
··· 637 637 free_page((unsigned long) buf); 638 638 } 639 639 640 - /* caller must hold event_mutex */ 641 - void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) 640 + static inline struct event_filter *event_filter(struct ftrace_event_file *file) 642 641 { 643 - struct event_filter *filter = call->filter; 642 + if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 643 + return file->event_call->filter; 644 + else 645 + return file->filter; 646 + } 647 + 648 + /* caller must hold event_mutex */ 649 + void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s) 650 + { 651 + struct event_filter *filter = event_filter(file); 644 652 645 653 if (filter && filter->filter_string) 646 654 trace_seq_printf(s, "%s\n", filter->filter_string); ··· 774 766 filter->n_preds = 0; 775 767 } 776 768 777 - static void filter_disable(struct ftrace_event_call *call) 769 + static void call_filter_disable(struct ftrace_event_call *call) 778 770 { 779 771 call->flags &= ~TRACE_EVENT_FL_FILTERED; 772 + } 773 + 774 + static void filter_disable(struct ftrace_event_file *file) 775 + { 776 + struct ftrace_event_call *call = file->event_call; 777 + 778 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 779 + call_filter_disable(call); 780 + else 781 + file->flags &= ~FTRACE_EVENT_FL_FILTERED; 780 782 } 781 783 782 784 static void __free_filter(struct event_filter *filter) ··· 799 781 kfree(filter); 800 782 } 801 783 802 - /* 803 - * Called when destroying the ftrace_event_call. 804 - * The call is being freed, so we do not need to worry about 805 - * the call being currently used. This is for module code removing 806 - * the tracepoints from within it. 807 - */ 808 - void destroy_preds(struct ftrace_event_call *call) 784 + void destroy_call_preds(struct ftrace_event_call *call) 809 785 { 810 786 __free_filter(call->filter); 811 787 call->filter = NULL; 788 + } 789 + 790 + static void destroy_file_preds(struct ftrace_event_file *file) 791 + { 792 + __free_filter(file->filter); 793 + file->filter = NULL; 794 + } 795 + 796 + /* 797 + * Called when destroying the ftrace_event_file. 798 + * The file is being freed, so we do not need to worry about 799 + * the file being currently used. This is for module code removing 800 + * the tracepoints from within it. 801 + */ 802 + void destroy_preds(struct ftrace_event_file *file) 803 + { 804 + if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 805 + destroy_call_preds(file->event_call); 806 + else 807 + destroy_file_preds(file); 812 808 } 813 809 814 810 static struct event_filter *__alloc_filter(void) ··· 857 825 return 0; 858 826 } 859 827 860 - static void filter_free_subsystem_preds(struct event_subsystem *system) 828 + static inline void __remove_filter(struct ftrace_event_file *file) 861 829 { 830 + struct ftrace_event_call *call = file->event_call; 831 + 832 + filter_disable(file); 833 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 834 + remove_filter_string(call->filter); 835 + else 836 + remove_filter_string(file->filter); 837 + } 838 + 839 + static void filter_free_subsystem_preds(struct event_subsystem *system, 840 + struct trace_array *tr) 841 + { 842 + struct ftrace_event_file *file; 862 843 struct ftrace_event_call *call; 863 844 864 - list_for_each_entry(call, &ftrace_events, list) { 845 + list_for_each_entry(file, &tr->events, list) { 846 + call = file->event_call; 865 847 if (strcmp(call->class->system, system->name) != 0) 866 848 continue; 867 849 868 - filter_disable(call); 869 - remove_filter_string(call->filter); 850 + __remove_filter(file); 870 851 } 871 852 } 872 853 873 - static void filter_free_subsystem_filters(struct event_subsystem *system) 854 + static inline void __free_subsystem_filter(struct ftrace_event_file *file) 874 855 { 875 - struct ftrace_event_call *call; 856 + struct ftrace_event_call *call = file->event_call; 876 857 877 - list_for_each_entry(call, &ftrace_events, list) { 878 - if (strcmp(call->class->system, system->name) != 0) 879 - continue; 858 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { 880 859 __free_filter(call->filter); 881 860 call->filter = NULL; 861 + } else { 862 + __free_filter(file->filter); 863 + file->filter = NULL; 864 + } 865 + } 866 + 867 + static void filter_free_subsystem_filters(struct event_subsystem *system, 868 + struct trace_array *tr) 869 + { 870 + struct ftrace_event_file *file; 871 + struct ftrace_event_call *call; 872 + 873 + list_for_each_entry(file, &tr->events, list) { 874 + call = file->event_call; 875 + if (strcmp(call->class->system, system->name) != 0) 876 + continue; 877 + __free_subsystem_filter(file); 882 878 } 883 879 } 884 880 ··· 1677 1617 return err; 1678 1618 } 1679 1619 1620 + static inline void event_set_filtered_flag(struct ftrace_event_file *file) 1621 + { 1622 + struct ftrace_event_call *call = file->event_call; 1623 + 1624 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1625 + call->flags |= TRACE_EVENT_FL_FILTERED; 1626 + else 1627 + file->flags |= FTRACE_EVENT_FL_FILTERED; 1628 + } 1629 + 1630 + static inline void event_set_filter(struct ftrace_event_file *file, 1631 + struct event_filter *filter) 1632 + { 1633 + struct ftrace_event_call *call = file->event_call; 1634 + 1635 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1636 + rcu_assign_pointer(call->filter, filter); 1637 + else 1638 + rcu_assign_pointer(file->filter, filter); 1639 + } 1640 + 1641 + static inline void event_clear_filter(struct ftrace_event_file *file) 1642 + { 1643 + struct ftrace_event_call *call = file->event_call; 1644 + 1645 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1646 + RCU_INIT_POINTER(call->filter, NULL); 1647 + else 1648 + RCU_INIT_POINTER(file->filter, NULL); 1649 + } 1650 + 1651 + static inline void 1652 + event_set_no_set_filter_flag(struct ftrace_event_file *file) 1653 + { 1654 + struct ftrace_event_call *call = file->event_call; 1655 + 1656 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1657 + call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; 1658 + else 1659 + file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER; 1660 + } 1661 + 1662 + static inline void 1663 + event_clear_no_set_filter_flag(struct ftrace_event_file *file) 1664 + { 1665 + struct ftrace_event_call *call = file->event_call; 1666 + 1667 + if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1668 + call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; 1669 + else 1670 + file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER; 1671 + } 1672 + 1673 + static inline bool 1674 + event_no_set_filter_flag(struct ftrace_event_file *file) 1675 + { 1676 + struct ftrace_event_call *call = file->event_call; 1677 + 1678 + if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) 1679 + return true; 1680 + 1681 + if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && 1682 + (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)) 1683 + return true; 1684 + 1685 + return false; 1686 + } 1687 + 1680 1688 struct filter_list { 1681 1689 struct list_head list; 1682 1690 struct event_filter *filter; 1683 1691 }; 1684 1692 1685 1693 static int replace_system_preds(struct event_subsystem *system, 1694 + struct trace_array *tr, 1686 1695 struct filter_parse_state *ps, 1687 1696 char *filter_string) 1688 1697 { 1698 + struct ftrace_event_file *file; 1689 1699 struct ftrace_event_call *call; 1690 1700 struct filter_list *filter_item; 1691 1701 struct filter_list *tmp; ··· 1763 1633 bool fail = true; 1764 1634 int err; 1765 1635 1766 - list_for_each_entry(call, &ftrace_events, list) { 1767 - 1636 + list_for_each_entry(file, &tr->events, list) { 1637 + call = file->event_call; 1768 1638 if (strcmp(call->class->system, system->name) != 0) 1769 1639 continue; 1770 1640 ··· 1774 1644 */ 1775 1645 err = replace_preds(call, NULL, ps, filter_string, true); 1776 1646 if (err) 1777 - call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; 1647 + event_set_no_set_filter_flag(file); 1778 1648 else 1779 - call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; 1649 + event_clear_no_set_filter_flag(file); 1780 1650 } 1781 1651 1782 - list_for_each_entry(call, &ftrace_events, list) { 1652 + list_for_each_entry(file, &tr->events, list) { 1783 1653 struct event_filter *filter; 1654 + 1655 + call = file->event_call; 1784 1656 1785 1657 if (strcmp(call->class->system, system->name) != 0) 1786 1658 continue; 1787 1659 1788 - if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) 1660 + if (event_no_set_filter_flag(file)) 1789 1661 continue; 1790 1662 1791 1663 filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); ··· 1808 1676 1809 1677 err = replace_preds(call, filter, ps, filter_string, false); 1810 1678 if (err) { 1811 - filter_disable(call); 1679 + filter_disable(file); 1812 1680 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); 1813 1681 append_filter_err(ps, filter); 1814 1682 } else 1815 - call->flags |= TRACE_EVENT_FL_FILTERED; 1683 + event_set_filtered_flag(file); 1816 1684 /* 1817 1685 * Regardless of if this returned an error, we still 1818 1686 * replace the filter for the call. 1819 1687 */ 1820 - filter = call->filter; 1821 - rcu_assign_pointer(call->filter, filter_item->filter); 1688 + filter = event_filter(file); 1689 + event_set_filter(file, filter_item->filter); 1822 1690 filter_item->filter = filter; 1823 1691 1824 1692 fail = false; ··· 1948 1816 * and always remembers @filter_str. 1949 1817 */ 1950 1818 static int create_system_filter(struct event_subsystem *system, 1819 + struct trace_array *tr, 1951 1820 char *filter_str, struct event_filter **filterp) 1952 1821 { 1953 1822 struct event_filter *filter = NULL; ··· 1957 1824 1958 1825 err = create_filter_start(filter_str, true, &ps, &filter); 1959 1826 if (!err) { 1960 - err = replace_system_preds(system, ps, filter_str); 1827 + err = replace_system_preds(system, tr, ps, filter_str); 1961 1828 if (!err) { 1962 1829 /* System filters just show a default message */ 1963 1830 kfree(filter->filter_string); ··· 1973 1840 } 1974 1841 1975 1842 /* caller must hold event_mutex */ 1976 - int apply_event_filter(struct ftrace_event_call *call, char *filter_string) 1843 + int apply_event_filter(struct ftrace_event_file *file, char *filter_string) 1977 1844 { 1845 + struct ftrace_event_call *call = file->event_call; 1978 1846 struct event_filter *filter; 1979 1847 int err; 1980 1848 1981 1849 if (!strcmp(strstrip(filter_string), "0")) { 1982 - filter_disable(call); 1983 - filter = call->filter; 1850 + filter_disable(file); 1851 + filter = event_filter(file); 1852 + 1984 1853 if (!filter) 1985 1854 return 0; 1986 - RCU_INIT_POINTER(call->filter, NULL); 1855 + 1856 + event_clear_filter(file); 1857 + 1987 1858 /* Make sure the filter is not being used */ 1988 1859 synchronize_sched(); 1989 1860 __free_filter(filter); 1861 + 1990 1862 return 0; 1991 1863 } 1992 1864 ··· 2004 1866 * string 2005 1867 */ 2006 1868 if (filter) { 2007 - struct event_filter *tmp = call->filter; 1869 + struct event_filter *tmp; 2008 1870 1871 + tmp = event_filter(file); 2009 1872 if (!err) 2010 - call->flags |= TRACE_EVENT_FL_FILTERED; 1873 + event_set_filtered_flag(file); 2011 1874 else 2012 - filter_disable(call); 1875 + filter_disable(file); 2013 1876 2014 - rcu_assign_pointer(call->filter, filter); 1877 + event_set_filter(file, filter); 2015 1878 2016 1879 if (tmp) { 2017 1880 /* Make sure the call is done with the filter */ ··· 2028 1889 char *filter_string) 2029 1890 { 2030 1891 struct event_subsystem *system = dir->subsystem; 1892 + struct trace_array *tr = dir->tr; 2031 1893 struct event_filter *filter; 2032 1894 int err = 0; 2033 1895 ··· 2041 1901 } 2042 1902 2043 1903 if (!strcmp(strstrip(filter_string), "0")) { 2044 - filter_free_subsystem_preds(system); 1904 + filter_free_subsystem_preds(system, tr); 2045 1905 remove_filter_string(system->filter); 2046 1906 filter = system->filter; 2047 1907 system->filter = NULL; 2048 1908 /* Ensure all filters are no longer used */ 2049 1909 synchronize_sched(); 2050 - filter_free_subsystem_filters(system); 1910 + filter_free_subsystem_filters(system, tr); 2051 1911 __free_filter(filter); 2052 1912 goto out_unlock; 2053 1913 } 2054 1914 2055 - err = create_system_filter(system, filter_string, &filter); 1915 + err = create_system_filter(system, tr, filter_string, &filter); 2056 1916 if (filter) { 2057 1917 /* 2058 1918 * No event actually uses the system filter
+1 -1
kernel/trace/trace_export.c
··· 180 180 .event.type = etype, \ 181 181 .class = &event_class_ftrace_##call, \ 182 182 .print_fmt = print, \ 183 - .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 183 + .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ 184 184 }; \ 185 185 struct ftrace_event_call __used \ 186 186 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
+66 -16
kernel/trace/trace_functions_graph.c
··· 82 82 * to fill in space into DURATION column. 83 83 */ 84 84 enum { 85 - DURATION_FILL_FULL = -1, 86 - DURATION_FILL_START = -2, 87 - DURATION_FILL_END = -3, 85 + FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, 86 + FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, 87 + FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, 88 88 }; 89 89 90 90 static enum print_line_t ··· 114 114 return -EBUSY; 115 115 } 116 116 117 + /* 118 + * The curr_ret_stack is an index to ftrace return stack of 119 + * current task. Its value should be in [0, FTRACE_RETFUNC_ 120 + * DEPTH) when the function graph tracer is used. To support 121 + * filtering out specific functions, it makes the index 122 + * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) 123 + * so when it sees a negative index the ftrace will ignore 124 + * the record. And the index gets recovered when returning 125 + * from the filtered function by adding the FTRACE_NOTRACE_ 126 + * DEPTH and then it'll continue to record functions normally. 127 + * 128 + * The curr_ret_stack is initialized to -1 and get increased 129 + * in this function. So it can be less than -1 only if it was 130 + * filtered out via ftrace_graph_notrace_addr() which can be 131 + * set from set_graph_notrace file in debugfs by user. 132 + */ 133 + if (current->curr_ret_stack < -1) 134 + return -EBUSY; 135 + 117 136 calltime = trace_clock_local(); 118 137 119 138 index = ++current->curr_ret_stack; 139 + if (ftrace_graph_notrace_addr(func)) 140 + current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; 120 141 barrier(); 121 142 current->ret_stack[index].ret = ret; 122 143 current->ret_stack[index].func = func; 123 144 current->ret_stack[index].calltime = calltime; 124 145 current->ret_stack[index].subtime = 0; 125 146 current->ret_stack[index].fp = frame_pointer; 126 - *depth = index; 147 + *depth = current->curr_ret_stack; 127 148 128 149 return 0; 129 150 } ··· 158 137 159 138 index = current->curr_ret_stack; 160 139 161 - if (unlikely(index < 0)) { 140 + /* 141 + * A negative index here means that it's just returned from a 142 + * notrace'd function. Recover index to get an original 143 + * return address. See ftrace_push_return_trace(). 144 + * 145 + * TODO: Need to check whether the stack gets corrupted. 146 + */ 147 + if (index < 0) 148 + index += FTRACE_NOTRACE_DEPTH; 149 + 150 + if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { 162 151 ftrace_graph_stop(); 163 152 WARN_ON(1); 164 153 /* Might as well panic, otherwise we have no where to go */ ··· 224 193 trace.rettime = trace_clock_local(); 225 194 barrier(); 226 195 current->curr_ret_stack--; 196 + /* 197 + * The curr_ret_stack can be less than -1 only if it was 198 + * filtered out and it's about to return from the function. 199 + * Recover the index and continue to trace normal functions. 200 + */ 201 + if (current->curr_ret_stack < -1) { 202 + current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; 203 + return ret; 204 + } 227 205 228 206 /* 229 207 * The trace should run after decrementing the ret counter ··· 270 230 return 0; 271 231 entry = ring_buffer_event_data(event); 272 232 entry->graph_ent = *trace; 273 - if (!filter_current_check_discard(buffer, call, entry, event)) 233 + if (!call_filter_check_discard(call, entry, buffer, event)) 274 234 __buffer_unlock_commit(buffer, event); 275 235 276 236 return 1; ··· 299 259 300 260 /* trace it when it is-nested-in or is a function enabled. */ 301 261 if ((!(trace->depth || ftrace_graph_addr(trace->func)) || 302 - ftrace_graph_ignore_irqs()) || 262 + ftrace_graph_ignore_irqs()) || (trace->depth < 0) || 303 263 (max_depth && trace->depth >= max_depth)) 304 264 return 0; 265 + 266 + /* 267 + * Do not trace a function if it's filtered by set_graph_notrace. 268 + * Make the index of ret stack negative to indicate that it should 269 + * ignore further functions. But it needs its own ret stack entry 270 + * to recover the original index in order to continue tracing after 271 + * returning from the function. 272 + */ 273 + if (ftrace_graph_notrace_addr(trace->func)) 274 + return 1; 305 275 306 276 local_irq_save(flags); 307 277 cpu = raw_smp_processor_id(); ··· 385 335 return; 386 336 entry = ring_buffer_event_data(event); 387 337 entry->ret = *trace; 388 - if (!filter_current_check_discard(buffer, call, entry, event)) 338 + if (!call_filter_check_discard(call, entry, buffer, event)) 389 339 __buffer_unlock_commit(buffer, event); 390 340 } 391 341 ··· 702 652 } 703 653 704 654 /* No overhead */ 705 - ret = print_graph_duration(DURATION_FILL_START, s, flags); 655 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_START); 706 656 if (ret != TRACE_TYPE_HANDLED) 707 657 return ret; 708 658 ··· 714 664 if (!ret) 715 665 return TRACE_TYPE_PARTIAL_LINE; 716 666 717 - ret = print_graph_duration(DURATION_FILL_END, s, flags); 667 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_END); 718 668 if (ret != TRACE_TYPE_HANDLED) 719 669 return ret; 720 670 ··· 779 729 return TRACE_TYPE_HANDLED; 780 730 781 731 /* No real adata, just filling the column with spaces */ 782 - switch (duration) { 783 - case DURATION_FILL_FULL: 732 + switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { 733 + case FLAGS_FILL_FULL: 784 734 ret = trace_seq_puts(s, " | "); 785 735 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 786 - case DURATION_FILL_START: 736 + case FLAGS_FILL_START: 787 737 ret = trace_seq_puts(s, " "); 788 738 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 789 - case DURATION_FILL_END: 739 + case FLAGS_FILL_END: 790 740 ret = trace_seq_puts(s, " |"); 791 741 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; 792 742 } ··· 902 852 } 903 853 904 854 /* No time */ 905 - ret = print_graph_duration(DURATION_FILL_FULL, s, flags); 855 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); 906 856 if (ret != TRACE_TYPE_HANDLED) 907 857 return ret; 908 858 ··· 1222 1172 return TRACE_TYPE_PARTIAL_LINE; 1223 1173 1224 1174 /* No time */ 1225 - ret = print_graph_duration(DURATION_FILL_FULL, s, flags); 1175 + ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); 1226 1176 if (ret != TRACE_TYPE_HANDLED) 1227 1177 return ret; 1228 1178
+2 -2
kernel/trace/trace_kprobe.c
··· 835 835 entry->ip = (unsigned long)tp->rp.kp.addr; 836 836 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 837 837 838 - if (!filter_current_check_discard(buffer, call, entry, event)) 838 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 839 839 trace_buffer_unlock_commit_regs(buffer, event, 840 840 irq_flags, pc, regs); 841 841 } ··· 884 884 entry->ret_ip = (unsigned long)ri->ret_addr; 885 885 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 886 886 887 - if (!filter_current_check_discard(buffer, call, entry, event)) 887 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 888 888 trace_buffer_unlock_commit_regs(buffer, event, 889 889 irq_flags, pc, regs); 890 890 }
+2 -2
kernel/trace/trace_mmiotrace.c
··· 323 323 entry = ring_buffer_event_data(event); 324 324 entry->rw = *rw; 325 325 326 - if (!filter_check_discard(call, entry, buffer, event)) 326 + if (!call_filter_check_discard(call, entry, buffer, event)) 327 327 trace_buffer_unlock_commit(buffer, event, 0, pc); 328 328 } 329 329 ··· 353 353 entry = ring_buffer_event_data(event); 354 354 entry->map = *map; 355 355 356 - if (!filter_check_discard(call, entry, buffer, event)) 356 + if (!call_filter_check_discard(call, entry, buffer, event)) 357 357 trace_buffer_unlock_commit(buffer, event, 0, pc); 358 358 } 359 359
+2 -2
kernel/trace/trace_sched_switch.c
··· 45 45 entry->next_state = next->state; 46 46 entry->next_cpu = task_cpu(next); 47 47 48 - if (!filter_check_discard(call, entry, buffer, event)) 48 + if (!call_filter_check_discard(call, entry, buffer, event)) 49 49 trace_buffer_unlock_commit(buffer, event, flags, pc); 50 50 } 51 51 ··· 101 101 entry->next_state = wakee->state; 102 102 entry->next_cpu = task_cpu(wakee); 103 103 104 - if (!filter_check_discard(call, entry, buffer, event)) 104 + if (!call_filter_check_discard(call, entry, buffer, event)) 105 105 trace_buffer_unlock_commit(buffer, event, flags, pc); 106 106 } 107 107
+6 -37
kernel/trace/trace_stat.c
··· 43 43 /* The root directory for all stat files */ 44 44 static struct dentry *stat_dir; 45 45 46 - /* 47 - * Iterate through the rbtree using a post order traversal path 48 - * to release the next node. 49 - * It won't necessary release one at each iteration 50 - * but it will at least advance closer to the next one 51 - * to be released. 52 - */ 53 - static struct rb_node *release_next(struct tracer_stat *ts, 54 - struct rb_node *node) 55 - { 56 - struct stat_node *snode; 57 - struct rb_node *parent = rb_parent(node); 58 - 59 - if (node->rb_left) 60 - return node->rb_left; 61 - else if (node->rb_right) 62 - return node->rb_right; 63 - else { 64 - if (!parent) 65 - ; 66 - else if (parent->rb_left == node) 67 - parent->rb_left = NULL; 68 - else 69 - parent->rb_right = NULL; 70 - 71 - snode = container_of(node, struct stat_node, node); 72 - if (ts->stat_release) 73 - ts->stat_release(snode->stat); 74 - kfree(snode); 75 - 76 - return parent; 77 - } 78 - } 79 - 80 46 static void __reset_stat_session(struct stat_session *session) 81 47 { 82 - struct rb_node *node = session->stat_root.rb_node; 48 + struct stat_node *snode, *n; 83 49 84 - while (node) 85 - node = release_next(session->ts, node); 50 + rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { 51 + if (session->ts->stat_release) 52 + session->ts->stat_release(snode->stat); 53 + kfree(snode); 54 + } 86 55 87 56 session->stat_root = RB_ROOT; 88 57 }
+32 -10
kernel/trace/trace_syscalls.c
··· 302 302 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 303 303 { 304 304 struct trace_array *tr = data; 305 + struct ftrace_event_file *ftrace_file; 305 306 struct syscall_trace_enter *entry; 306 307 struct syscall_metadata *sys_data; 307 308 struct ring_buffer_event *event; ··· 315 314 syscall_nr = trace_get_syscall_nr(current, regs); 316 315 if (syscall_nr < 0) 317 316 return; 318 - if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) 317 + 318 + /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 319 + ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 320 + if (!ftrace_file) 321 + return; 322 + 323 + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) 319 324 return; 320 325 321 326 sys_data = syscall_nr_to_meta(syscall_nr); ··· 343 336 entry->nr = syscall_nr; 344 337 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 345 338 346 - if (!filter_current_check_discard(buffer, sys_data->enter_event, 347 - entry, event)) 339 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 348 340 trace_current_buffer_unlock_commit(buffer, event, 349 341 irq_flags, pc); 350 342 } ··· 351 345 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 352 346 { 353 347 struct trace_array *tr = data; 348 + struct ftrace_event_file *ftrace_file; 354 349 struct syscall_trace_exit *entry; 355 350 struct syscall_metadata *sys_data; 356 351 struct ring_buffer_event *event; ··· 363 356 syscall_nr = trace_get_syscall_nr(current, regs); 364 357 if (syscall_nr < 0) 365 358 return; 366 - if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) 359 + 360 + /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 361 + ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 362 + if (!ftrace_file) 363 + return; 364 + 365 + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) 367 366 return; 368 367 369 368 sys_data = syscall_nr_to_meta(syscall_nr); ··· 390 377 entry->nr = syscall_nr; 391 378 entry->ret = syscall_get_return_value(current, regs); 392 379 393 - if (!filter_current_check_discard(buffer, sys_data->exit_event, 394 - entry, event)) 380 + if (!filter_check_discard(ftrace_file, entry, buffer, event)) 395 381 trace_current_buffer_unlock_commit(buffer, event, 396 382 irq_flags, pc); 397 383 } ··· 409 397 if (!tr->sys_refcount_enter) 410 398 ret = register_trace_sys_enter(ftrace_syscall_enter, tr); 411 399 if (!ret) { 412 - set_bit(num, tr->enabled_enter_syscalls); 400 + rcu_assign_pointer(tr->enter_syscall_files[num], file); 413 401 tr->sys_refcount_enter++; 414 402 } 415 403 mutex_unlock(&syscall_trace_lock); ··· 427 415 return; 428 416 mutex_lock(&syscall_trace_lock); 429 417 tr->sys_refcount_enter--; 430 - clear_bit(num, tr->enabled_enter_syscalls); 418 + rcu_assign_pointer(tr->enter_syscall_files[num], NULL); 431 419 if (!tr->sys_refcount_enter) 432 420 unregister_trace_sys_enter(ftrace_syscall_enter, tr); 433 421 mutex_unlock(&syscall_trace_lock); 422 + /* 423 + * Callers expect the event to be completely disabled on 424 + * return, so wait for current handlers to finish. 425 + */ 426 + synchronize_sched(); 434 427 } 435 428 436 429 static int reg_event_syscall_exit(struct ftrace_event_file *file, ··· 452 435 if (!tr->sys_refcount_exit) 453 436 ret = register_trace_sys_exit(ftrace_syscall_exit, tr); 454 437 if (!ret) { 455 - set_bit(num, tr->enabled_exit_syscalls); 438 + rcu_assign_pointer(tr->exit_syscall_files[num], file); 456 439 tr->sys_refcount_exit++; 457 440 } 458 441 mutex_unlock(&syscall_trace_lock); ··· 470 453 return; 471 454 mutex_lock(&syscall_trace_lock); 472 455 tr->sys_refcount_exit--; 473 - clear_bit(num, tr->enabled_exit_syscalls); 456 + rcu_assign_pointer(tr->exit_syscall_files[num], NULL); 474 457 if (!tr->sys_refcount_exit) 475 458 unregister_trace_sys_exit(ftrace_syscall_exit, tr); 476 459 mutex_unlock(&syscall_trace_lock); 460 + /* 461 + * Callers expect the event to be completely disabled on 462 + * return, so wait for current handlers to finish. 463 + */ 464 + synchronize_sched(); 477 465 } 478 466 479 467 static int __init init_syscall_trace(struct ftrace_event_call *call)
+2 -1
kernel/trace/trace_uprobe.c
··· 128 128 if (is_ret) 129 129 tu->consumer.ret_handler = uretprobe_dispatcher; 130 130 init_trace_uprobe_filter(&tu->filter); 131 + tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; 131 132 return tu; 132 133 133 134 error: ··· 562 561 for (i = 0; i < tu->nr_args; i++) 563 562 call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); 564 563 565 - if (!filter_current_check_discard(buffer, call, entry, event)) 564 + if (!call_filter_check_discard(call, entry, buffer, event)) 566 565 trace_buffer_unlock_commit(buffer, event, 0, 0); 567 566 } 568 567
+2 -2
scripts/recordmcount.pl
··· 214 214 $weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)"; 215 215 $section_regex = "Disassembly of section\\s+(\\S+):"; 216 216 $function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 217 - $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 217 + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)\$"; 218 218 $section_type = '@progbits'; 219 219 $mcount_adjust = 0; 220 220 $type = ".long"; 221 221 222 222 if ($arch eq "x86_64") { 223 - $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; 223 + $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s(mcount|__fentry__)([+-]0x[0-9a-zA-Z]+)?\$"; 224 224 $type = ".quad"; 225 225 $alignment = 8; 226 226 $mcount_adjust = -1;