Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracking updates from Steven Rostedt:
"Most of the changes are clean ups and small fixes. Some of them have
stable tags to them. I searched through my INBOX just as the merge
window opened and found lots of patches to pull. I ran them through
all my tests and they were in linux-next for a few days.

Features added this release:
----------------------------

- Module globbing. You can now filter function tracing to several
modules. # echo '*:mod:*snd*' > set_ftrace_filter (Dmitry Safonov)

- Tracer specific options are now visible even when the tracer is not
active. It was rather annoying that you can only see and modify
tracer options after enabling the tracer. Now they are in the
options/ directory even when the tracer is not active. Although
they are still only visible when the tracer is active in the
trace_options file.

- Trace options are now per instance (although some of the tracer
specific options are global)

- New tracefs file: set_event_pid. If any pid is added to this file,
then all events in the instance will filter out events that are not
part of this pid. sched_switch and sched_wakeup events handle next
and the wakee pids"

* tag 'trace-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (68 commits)
tracefs: Fix refcount imbalance in start_creating()
tracing: Put back comma for empty fields in boot string parsing
tracing: Apply tracer specific options from kernel command line.
tracing: Add some documentation about set_event_pid
ring_buffer: Remove unneeded smp_wmb() before wakeup of reader benchmark
tracing: Allow dumping traces without tracking trace started cpus
ring_buffer: Fix more races when terminating the producer in the benchmark
ring_buffer: Do no not complete benchmark reader too early
tracing: Remove redundant TP_ARGS redefining
tracing: Rename max_stack_lock to stack_trace_max_lock
tracing: Allow arch-specific stack tracer
recordmcount: arm64: Replace the ignored mcount call into nop
recordmcount: Fix endianness handling bug for nop_mcount
tracepoints: Fix documentation of RCU lockdep checks
tracing: ftrace_event_is_function() can return boolean
tracing: is_legal_op() can return boolean
ring-buffer: rb_event_is_commit() can return boolean
ring-buffer: rb_per_cpu_empty() can return boolean
ring_buffer: ring_buffer_empty{cpu}() can return boolean
ring-buffer: rb_is_reader_page() can return boolean
...

+1801 -957
+18
Documentation/trace/events.txt
··· 288 288 # cat sched_wakeup/filter 289 289 common_pid == 0 290 290 291 + 5.4 PID filtering 292 + ----------------- 293 + 294 + The set_event_pid file in the same directory as the top events directory 295 + exists, will filter all events from tracing any task that does not have the 296 + PID listed in the set_event_pid file. 297 + 298 + # cd /sys/kernel/debug/tracing 299 + # echo $$ > set_event_pid 300 + # echo 1 > events/enabled 301 + 302 + Will only trace events for the current task. 303 + 304 + To add more PIDs without losing the PIDs already included, use '>>'. 305 + 306 + # echo 123 244 1 >> set_event_pid 307 + 308 + 291 309 6. Event triggers 292 310 ================= 293 311
+23
Documentation/trace/ftrace.txt
··· 204 204 205 205 Have the function tracer only trace a single thread. 206 206 207 + set_event_pid: 208 + 209 + Have the events only trace a task with a PID listed in this file. 210 + Note, sched_switch and sched_wake_up will also trace events 211 + listed in this file. 212 + 207 213 set_graph_function: 208 214 209 215 Set a "trigger" function where tracing should start ··· 2442 2436 '!': 2443 2437 2444 2438 echo '!writeback*:mod:ext3' >> set_ftrace_filter 2439 + 2440 + Mod command supports module globbing. Disable tracing for all 2441 + functions except a specific module: 2442 + 2443 + echo '!*:mod:!ext3' >> set_ftrace_filter 2444 + 2445 + Disable tracing for all modules, but still trace kernel: 2446 + 2447 + echo '!*:mod:*' >> set_ftrace_filter 2448 + 2449 + Enable filter only for kernel: 2450 + 2451 + echo '*write*:mod:!*' >> set_ftrace_filter 2452 + 2453 + Enable filter for module globbing: 2454 + 2455 + echo '*write*:mod:*snd*' >> set_ftrace_filter 2445 2456 2446 2457 - traceon/traceoff 2447 2458 These commands turn tracing on and off when the specified
+4
arch/x86/kernel/ftrace.c
··· 556 556 run_sync(); 557 557 558 558 report = "updating code"; 559 + count = 0; 559 560 560 561 for_ftrace_rec_iter(iter) { 561 562 rec = ftrace_rec_iter_record(iter); ··· 564 563 ret = add_update(rec, enable); 565 564 if (ret) 566 565 goto remove_breakpoints; 566 + count++; 567 567 } 568 568 569 569 run_sync(); 570 570 571 571 report = "removing breakpoints"; 572 + count = 0; 572 573 573 574 for_ftrace_rec_iter(iter) { 574 575 rec = ftrace_rec_iter_record(iter); ··· 578 575 ret = finish_update(rec, enable); 579 576 if (ret) 580 577 goto remove_breakpoints; 578 + count++; 581 579 } 582 580 583 581 run_sync();
+5 -1
fs/tracefs/inode.c
··· 340 340 dput(dentry); 341 341 dentry = ERR_PTR(-EEXIST); 342 342 } 343 - if (IS_ERR(dentry)) 343 + 344 + if (IS_ERR(dentry)) { 344 345 mutex_unlock(&parent->d_inode->i_mutex); 346 + simple_release_fs(&tracefs_mount, &tracefs_mount_count); 347 + } 348 + 345 349 return dentry; 346 350 } 347 351
+11
include/linux/ftrace.h
··· 263 263 #endif /* CONFIG_FUNCTION_TRACER */ 264 264 265 265 #ifdef CONFIG_STACK_TRACER 266 + 267 + #define STACK_TRACE_ENTRIES 500 268 + 269 + struct stack_trace; 270 + 271 + extern unsigned stack_trace_index[]; 272 + extern struct stack_trace stack_trace_max; 273 + extern unsigned long stack_trace_max_size; 274 + extern arch_spinlock_t stack_trace_max_lock; 275 + 266 276 extern int stack_tracer_enabled; 277 + void stack_trace_print(void); 267 278 int 268 279 stack_trace_sysctl(struct ctl_table *table, int write, 269 280 void __user *buffer, size_t *lenp,
+2 -2
include/linux/ring_buffer.h
··· 154 154 } 155 155 #endif 156 156 157 - int ring_buffer_empty(struct ring_buffer *buffer); 158 - int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); 157 + bool ring_buffer_empty(struct ring_buffer *buffer); 158 + bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); 159 159 160 160 void ring_buffer_record_disable(struct ring_buffer *buffer); 161 161 void ring_buffer_record_enable(struct ring_buffer *buffer);
+13 -7
include/linux/trace_events.h
··· 168 168 trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, 169 169 int type, unsigned long len, 170 170 unsigned long flags, int pc); 171 - void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 172 - struct ring_buffer_event *event, 173 - unsigned long flags, int pc); 174 - void trace_buffer_unlock_commit(struct ring_buffer *buffer, 171 + void trace_buffer_unlock_commit(struct trace_array *tr, 172 + struct ring_buffer *buffer, 175 173 struct ring_buffer_event *event, 176 174 unsigned long flags, int pc); 177 - void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 175 + void trace_buffer_unlock_commit_regs(struct trace_array *tr, 176 + struct ring_buffer *buffer, 178 177 struct ring_buffer_event *event, 179 178 unsigned long flags, int pc, 180 179 struct pt_regs *regs); ··· 328 329 EVENT_FILE_FL_SOFT_DISABLED_BIT, 329 330 EVENT_FILE_FL_TRIGGER_MODE_BIT, 330 331 EVENT_FILE_FL_TRIGGER_COND_BIT, 332 + EVENT_FILE_FL_PID_FILTER_BIT, 331 333 }; 332 334 333 335 /* ··· 342 342 * tracepoint may be enabled) 343 343 * TRIGGER_MODE - When set, invoke the triggers associated with the event 344 344 * TRIGGER_COND - When set, one or more triggers has an associated filter 345 + * PID_FILTER - When set, the event is filtered based on pid 345 346 */ 346 347 enum { 347 348 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), ··· 353 352 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), 354 353 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), 355 354 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), 355 + EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), 356 356 }; 357 357 358 358 struct trace_event_file { ··· 432 430 extern void event_triggers_post_call(struct trace_event_file *file, 433 431 enum event_trigger_type tt); 434 432 433 + bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); 434 + 435 435 /** 436 436 * trace_trigger_soft_disabled - do triggers and test if soft disabled 437 437 * @file: The file pointer of the event to test ··· 453 449 event_triggers_call(file, NULL); 454 450 if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 455 451 return true; 452 + if (eflags & EVENT_FILE_FL_PID_FILTER) 453 + return trace_event_ignore_this_pid(file); 456 454 } 457 455 return false; 458 456 } ··· 514 508 enum event_trigger_type tt = ETT_NONE; 515 509 516 510 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 517 - trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 511 + trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 518 512 519 513 if (tt) 520 514 event_triggers_post_call(file, tt); ··· 546 540 enum event_trigger_type tt = ETT_NONE; 547 541 548 542 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 549 - trace_buffer_unlock_commit_regs(buffer, event, 543 + trace_buffer_unlock_commit_regs(file->tr, buffer, event, 550 544 irq_flags, pc, regs); 551 545 552 546 if (tt)
+32 -7
include/linux/tracepoint.h
··· 26 26 struct tracepoint_func { 27 27 void *func; 28 28 void *data; 29 + int prio; 29 30 }; 30 31 31 32 struct tracepoint { ··· 43 42 unsigned long enum_value; 44 43 }; 45 44 45 + #define TRACEPOINT_DEFAULT_PRIO 10 46 + 46 47 extern int 47 48 tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); 49 + extern int 50 + tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, 51 + int prio); 48 52 extern int 49 53 tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); 50 54 extern void ··· 117 111 #define TP_ARGS(args...) args 118 112 #define TP_CONDITION(args...) args 119 113 120 - #ifdef CONFIG_TRACEPOINTS 114 + /* 115 + * Individual subsystem my have a separate configuration to 116 + * enable their tracepoints. By default, this file will create 117 + * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem 118 + * wants to be able to disable its tracepoints from being created 119 + * it can define NOTRACE before including the tracepoint headers. 120 + */ 121 + #if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) 122 + #define TRACEPOINTS_ENABLED 123 + #endif 124 + 125 + #ifdef TRACEPOINTS_ENABLED 121 126 122 127 /* 123 128 * it_func[0] is never NULL because there is at least one element in the array ··· 184 167 * structure. Force alignment to the same alignment as the section start. 185 168 * 186 169 * When lockdep is enabled, we make sure to always do the RCU portions of 187 - * the tracepoint code, regardless of whether tracing is on or we match the 188 - * condition. This lets us find RCU issues triggered with tracepoints even 189 - * when this tracepoint is off. This code has no purpose other than poking 190 - * RCU a bit. 170 + * the tracepoint code, regardless of whether tracing is on. However, 171 + * don't check if the condition is false, due to interaction with idle 172 + * instrumentation. This lets us find RCU issues triggered with tracepoints 173 + * even when this tracepoint is off. This code has no purpose other than 174 + * poking RCU a bit. 191 175 */ 192 176 #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 193 177 extern struct tracepoint __tracepoint_##name; \ ··· 212 194 { \ 213 195 return tracepoint_probe_register(&__tracepoint_##name, \ 214 196 (void *)probe, data); \ 197 + } \ 198 + static inline int \ 199 + register_trace_prio_##name(void (*probe)(data_proto), void *data,\ 200 + int prio) \ 201 + { \ 202 + return tracepoint_probe_register_prio(&__tracepoint_##name, \ 203 + (void *)probe, data, prio); \ 215 204 } \ 216 205 static inline int \ 217 206 unregister_trace_##name(void (*probe)(data_proto), void *data) \ ··· 259 234 #define EXPORT_TRACEPOINT_SYMBOL(name) \ 260 235 EXPORT_SYMBOL(__tracepoint_##name) 261 236 262 - #else /* !CONFIG_TRACEPOINTS */ 237 + #else /* !TRACEPOINTS_ENABLED */ 263 238 #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 264 239 static inline void trace_##name(proto) \ 265 240 { } \ ··· 291 266 #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) 292 267 #define EXPORT_TRACEPOINT_SYMBOL(name) 293 268 294 - #endif /* CONFIG_TRACEPOINTS */ 269 + #endif /* TRACEPOINTS_ENABLED */ 295 270 296 271 #ifdef CONFIG_TRACING 297 272 /**
+1 -1
include/trace/define_trace.h
··· 86 86 #undef DECLARE_TRACE 87 87 #define DECLARE_TRACE(name, proto, args) 88 88 89 - #ifdef CONFIG_EVENT_TRACING 89 + #ifdef TRACEPOINTS_ENABLED 90 90 #include <trace/trace_events.h> 91 91 #include <trace/perf.h> 92 92 #endif
+4
include/trace/events/gpio.h
··· 1 1 #undef TRACE_SYSTEM 2 2 #define TRACE_SYSTEM gpio 3 3 4 + #ifndef CONFIG_TRACING_EVENTS_GPIO 5 + #define NOTRACE 6 + #endif 7 + 4 8 #if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) 5 9 #define _TRACE_GPIO_H 6 10
-258
include/trace/perf.h
··· 1 - /* 2 - * Stage 4 of the trace events. 3 - * 4 - * Override the macros in <trace/trace_events.h> to include the following: 5 - * 6 - * For those macros defined with TRACE_EVENT: 7 - * 8 - * static struct trace_event_call event_<call>; 9 - * 10 - * static void trace_event_raw_event_<call>(void *__data, proto) 11 - * { 12 - * struct trace_event_file *trace_file = __data; 13 - * struct trace_event_call *event_call = trace_file->event_call; 14 - * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 15 - * unsigned long eflags = trace_file->flags; 16 - * enum event_trigger_type __tt = ETT_NONE; 17 - * struct ring_buffer_event *event; 18 - * struct trace_event_raw_<call> *entry; <-- defined in stage 1 19 - * struct ring_buffer *buffer; 20 - * unsigned long irq_flags; 21 - * int __data_size; 22 - * int pc; 23 - * 24 - * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 25 - * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 26 - * event_triggers_call(trace_file, NULL); 27 - * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 28 - * return; 29 - * } 30 - * 31 - * local_save_flags(irq_flags); 32 - * pc = preempt_count(); 33 - * 34 - * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 35 - * 36 - * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 37 - * event_<call>->event.type, 38 - * sizeof(*entry) + __data_size, 39 - * irq_flags, pc); 40 - * if (!event) 41 - * return; 42 - * entry = ring_buffer_event_data(event); 43 - * 44 - * { <assign>; } <-- Here we assign the entries by the __field and 45 - * __array macros. 46 - * 47 - * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 48 - * __tt = event_triggers_call(trace_file, entry); 49 - * 50 - * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 51 - * &trace_file->flags)) 52 - * ring_buffer_discard_commit(buffer, event); 53 - * else if (!filter_check_discard(trace_file, entry, buffer, event)) 54 - * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 55 - * 56 - * if (__tt) 57 - * event_triggers_post_call(trace_file, __tt); 58 - * } 59 - * 60 - * static struct trace_event ftrace_event_type_<call> = { 61 - * .trace = trace_raw_output_<call>, <-- stage 2 62 - * }; 63 - * 64 - * static char print_fmt_<call>[] = <TP_printk>; 65 - * 66 - * static struct trace_event_class __used event_class_<template> = { 67 - * .system = "<system>", 68 - * .define_fields = trace_event_define_fields_<call>, 69 - * .fields = LIST_HEAD_INIT(event_class_##call.fields), 70 - * .raw_init = trace_event_raw_init, 71 - * .probe = trace_event_raw_event_##call, 72 - * .reg = trace_event_reg, 73 - * }; 74 - * 75 - * static struct trace_event_call event_<call> = { 76 - * .class = event_class_<template>, 77 - * { 78 - * .tp = &__tracepoint_<call>, 79 - * }, 80 - * .event = &ftrace_event_type_<call>, 81 - * .print_fmt = print_fmt_<call>, 82 - * .flags = TRACE_EVENT_FL_TRACEPOINT, 83 - * }; 84 - * // its only safe to use pointers when doing linker tricks to 85 - * // create an array. 86 - * static struct trace_event_call __used 87 - * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 88 - * 89 - */ 90 - 91 - #ifdef CONFIG_PERF_EVENTS 92 - 93 - #define _TRACE_PERF_PROTO(call, proto) \ 94 - static notrace void \ 95 - perf_trace_##call(void *__data, proto); 96 - 97 - #define _TRACE_PERF_INIT(call) \ 98 - .perf_probe = perf_trace_##call, 99 - 100 - #else 101 - #define _TRACE_PERF_PROTO(call, proto) 102 - #define _TRACE_PERF_INIT(call) 103 - #endif /* CONFIG_PERF_EVENTS */ 104 - 105 - #undef __entry 106 - #define __entry entry 107 - 108 - #undef __field 109 - #define __field(type, item) 110 - 111 - #undef __field_struct 112 - #define __field_struct(type, item) 113 - 114 - #undef __array 115 - #define __array(type, item, len) 116 - 117 - #undef __dynamic_array 118 - #define __dynamic_array(type, item, len) \ 119 - __entry->__data_loc_##item = __data_offsets.item; 120 - 121 - #undef __string 122 - #define __string(item, src) __dynamic_array(char, item, -1) 123 - 124 - #undef __assign_str 125 - #define __assign_str(dst, src) \ 126 - strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 127 - 128 - #undef __bitmask 129 - #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 130 - 131 - #undef __get_bitmask 132 - #define __get_bitmask(field) (char *)__get_dynamic_array(field) 133 - 134 - #undef __assign_bitmask 135 - #define __assign_bitmask(dst, src, nr_bits) \ 136 - memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 137 - 138 - #undef TP_fast_assign 139 - #define TP_fast_assign(args...) args 140 - 141 - #undef __perf_addr 142 - #define __perf_addr(a) (a) 143 - 144 - #undef __perf_count 145 - #define __perf_count(c) (c) 146 - 147 - #undef __perf_task 148 - #define __perf_task(t) (t) 149 - 150 - #undef DECLARE_EVENT_CLASS 151 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 152 - \ 153 - static notrace void \ 154 - trace_event_raw_event_##call(void *__data, proto) \ 155 - { \ 156 - struct trace_event_file *trace_file = __data; \ 157 - struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 158 - struct trace_event_buffer fbuffer; \ 159 - struct trace_event_raw_##call *entry; \ 160 - int __data_size; \ 161 - \ 162 - if (trace_trigger_soft_disabled(trace_file)) \ 163 - return; \ 164 - \ 165 - __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 166 - \ 167 - entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 168 - sizeof(*entry) + __data_size); \ 169 - \ 170 - if (!entry) \ 171 - return; \ 172 - \ 173 - tstruct \ 174 - \ 175 - { assign; } \ 176 - \ 177 - trace_event_buffer_commit(&fbuffer); \ 178 - } 179 - /* 180 - * The ftrace_test_probe is compiled out, it is only here as a build time check 181 - * to make sure that if the tracepoint handling changes, the ftrace probe will 182 - * fail to compile unless it too is updated. 183 - */ 184 - 185 - #undef DEFINE_EVENT 186 - #define DEFINE_EVENT(template, call, proto, args) \ 187 - static inline void ftrace_test_probe_##call(void) \ 188 - { \ 189 - check_trace_callback_type_##call(trace_event_raw_event_##template); \ 190 - } 191 - 192 - #undef DEFINE_EVENT_PRINT 193 - #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 194 - 195 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 196 - 197 - #undef __entry 198 - #define __entry REC 199 - 200 - #undef __print_flags 201 - #undef __print_symbolic 202 - #undef __print_hex 203 - #undef __get_dynamic_array 204 - #undef __get_dynamic_array_len 205 - #undef __get_str 206 - #undef __get_bitmask 207 - #undef __print_array 208 - 209 - #undef TP_printk 210 - #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 211 - 212 - #undef DECLARE_EVENT_CLASS 213 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 214 - _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 215 - static char print_fmt_##call[] = print; \ 216 - static struct trace_event_class __used __refdata event_class_##call = { \ 217 - .system = TRACE_SYSTEM_STRING, \ 218 - .define_fields = trace_event_define_fields_##call, \ 219 - .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 220 - .raw_init = trace_event_raw_init, \ 221 - .probe = trace_event_raw_event_##call, \ 222 - .reg = trace_event_reg, \ 223 - _TRACE_PERF_INIT(call) \ 224 - }; 225 - 226 - #undef DEFINE_EVENT 227 - #define DEFINE_EVENT(template, call, proto, args) \ 228 - \ 229 - static struct trace_event_call __used event_##call = { \ 230 - .class = &event_class_##template, \ 231 - { \ 232 - .tp = &__tracepoint_##call, \ 233 - }, \ 234 - .event.funcs = &trace_event_type_funcs_##template, \ 235 - .print_fmt = print_fmt_##template, \ 236 - .flags = TRACE_EVENT_FL_TRACEPOINT, \ 237 - }; \ 238 - static struct trace_event_call __used \ 239 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 240 - 241 - #undef DEFINE_EVENT_PRINT 242 - #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 243 - \ 244 - static char print_fmt_##call[] = print; \ 245 - \ 246 - static struct trace_event_call __used event_##call = { \ 247 - .class = &event_class_##template, \ 248 - { \ 249 - .tp = &__tracepoint_##call, \ 250 - }, \ 251 - .event.funcs = &trace_event_type_funcs_##call, \ 252 - .print_fmt = print_fmt_##call, \ 253 - .flags = TRACE_EVENT_FL_TRACEPOINT, \ 254 - }; \ 255 - static struct trace_event_call __used \ 256 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 257 - 258 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 259 1 260 2 #undef TRACE_SYSTEM_VAR 261 3
+258
include/trace/trace_events.h
··· 506 506 507 507 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 508 508 509 + /* 510 + * Stage 4 of the trace events. 511 + * 512 + * Override the macros in <trace/trace_events.h> to include the following: 513 + * 514 + * For those macros defined with TRACE_EVENT: 515 + * 516 + * static struct trace_event_call event_<call>; 517 + * 518 + * static void trace_event_raw_event_<call>(void *__data, proto) 519 + * { 520 + * struct trace_event_file *trace_file = __data; 521 + * struct trace_event_call *event_call = trace_file->event_call; 522 + * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 523 + * unsigned long eflags = trace_file->flags; 524 + * enum event_trigger_type __tt = ETT_NONE; 525 + * struct ring_buffer_event *event; 526 + * struct trace_event_raw_<call> *entry; <-- defined in stage 1 527 + * struct ring_buffer *buffer; 528 + * unsigned long irq_flags; 529 + * int __data_size; 530 + * int pc; 531 + * 532 + * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 533 + * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 534 + * event_triggers_call(trace_file, NULL); 535 + * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 536 + * return; 537 + * } 538 + * 539 + * local_save_flags(irq_flags); 540 + * pc = preempt_count(); 541 + * 542 + * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 543 + * 544 + * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 545 + * event_<call>->event.type, 546 + * sizeof(*entry) + __data_size, 547 + * irq_flags, pc); 548 + * if (!event) 549 + * return; 550 + * entry = ring_buffer_event_data(event); 551 + * 552 + * { <assign>; } <-- Here we assign the entries by the __field and 553 + * __array macros. 554 + * 555 + * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 556 + * __tt = event_triggers_call(trace_file, entry); 557 + * 558 + * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 559 + * &trace_file->flags)) 560 + * ring_buffer_discard_commit(buffer, event); 561 + * else if (!filter_check_discard(trace_file, entry, buffer, event)) 562 + * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 563 + * 564 + * if (__tt) 565 + * event_triggers_post_call(trace_file, __tt); 566 + * } 567 + * 568 + * static struct trace_event ftrace_event_type_<call> = { 569 + * .trace = trace_raw_output_<call>, <-- stage 2 570 + * }; 571 + * 572 + * static char print_fmt_<call>[] = <TP_printk>; 573 + * 574 + * static struct trace_event_class __used event_class_<template> = { 575 + * .system = "<system>", 576 + * .define_fields = trace_event_define_fields_<call>, 577 + * .fields = LIST_HEAD_INIT(event_class_##call.fields), 578 + * .raw_init = trace_event_raw_init, 579 + * .probe = trace_event_raw_event_##call, 580 + * .reg = trace_event_reg, 581 + * }; 582 + * 583 + * static struct trace_event_call event_<call> = { 584 + * .class = event_class_<template>, 585 + * { 586 + * .tp = &__tracepoint_<call>, 587 + * }, 588 + * .event = &ftrace_event_type_<call>, 589 + * .print_fmt = print_fmt_<call>, 590 + * .flags = TRACE_EVENT_FL_TRACEPOINT, 591 + * }; 592 + * // its only safe to use pointers when doing linker tricks to 593 + * // create an array. 594 + * static struct trace_event_call __used 595 + * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 596 + * 597 + */ 598 + 599 + #ifdef CONFIG_PERF_EVENTS 600 + 601 + #define _TRACE_PERF_PROTO(call, proto) \ 602 + static notrace void \ 603 + perf_trace_##call(void *__data, proto); 604 + 605 + #define _TRACE_PERF_INIT(call) \ 606 + .perf_probe = perf_trace_##call, 607 + 608 + #else 609 + #define _TRACE_PERF_PROTO(call, proto) 610 + #define _TRACE_PERF_INIT(call) 611 + #endif /* CONFIG_PERF_EVENTS */ 612 + 613 + #undef __entry 614 + #define __entry entry 615 + 616 + #undef __field 617 + #define __field(type, item) 618 + 619 + #undef __field_struct 620 + #define __field_struct(type, item) 621 + 622 + #undef __array 623 + #define __array(type, item, len) 624 + 625 + #undef __dynamic_array 626 + #define __dynamic_array(type, item, len) \ 627 + __entry->__data_loc_##item = __data_offsets.item; 628 + 629 + #undef __string 630 + #define __string(item, src) __dynamic_array(char, item, -1) 631 + 632 + #undef __assign_str 633 + #define __assign_str(dst, src) \ 634 + strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 635 + 636 + #undef __bitmask 637 + #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 638 + 639 + #undef __get_bitmask 640 + #define __get_bitmask(field) (char *)__get_dynamic_array(field) 641 + 642 + #undef __assign_bitmask 643 + #define __assign_bitmask(dst, src, nr_bits) \ 644 + memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 645 + 646 + #undef TP_fast_assign 647 + #define TP_fast_assign(args...) args 648 + 649 + #undef __perf_addr 650 + #define __perf_addr(a) (a) 651 + 652 + #undef __perf_count 653 + #define __perf_count(c) (c) 654 + 655 + #undef __perf_task 656 + #define __perf_task(t) (t) 657 + 658 + #undef DECLARE_EVENT_CLASS 659 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 660 + \ 661 + static notrace void \ 662 + trace_event_raw_event_##call(void *__data, proto) \ 663 + { \ 664 + struct trace_event_file *trace_file = __data; \ 665 + struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 666 + struct trace_event_buffer fbuffer; \ 667 + struct trace_event_raw_##call *entry; \ 668 + int __data_size; \ 669 + \ 670 + if (trace_trigger_soft_disabled(trace_file)) \ 671 + return; \ 672 + \ 673 + __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 674 + \ 675 + entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 676 + sizeof(*entry) + __data_size); \ 677 + \ 678 + if (!entry) \ 679 + return; \ 680 + \ 681 + tstruct \ 682 + \ 683 + { assign; } \ 684 + \ 685 + trace_event_buffer_commit(&fbuffer); \ 686 + } 687 + /* 688 + * The ftrace_test_probe is compiled out, it is only here as a build time check 689 + * to make sure that if the tracepoint handling changes, the ftrace probe will 690 + * fail to compile unless it too is updated. 691 + */ 692 + 693 + #undef DEFINE_EVENT 694 + #define DEFINE_EVENT(template, call, proto, args) \ 695 + static inline void ftrace_test_probe_##call(void) \ 696 + { \ 697 + check_trace_callback_type_##call(trace_event_raw_event_##template); \ 698 + } 699 + 700 + #undef DEFINE_EVENT_PRINT 701 + #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 702 + 703 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 704 + 705 + #undef __entry 706 + #define __entry REC 707 + 708 + #undef __print_flags 709 + #undef __print_symbolic 710 + #undef __print_hex 711 + #undef __get_dynamic_array 712 + #undef __get_dynamic_array_len 713 + #undef __get_str 714 + #undef __get_bitmask 715 + #undef __print_array 716 + 717 + #undef TP_printk 718 + #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 719 + 720 + #undef DECLARE_EVENT_CLASS 721 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 722 + _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 723 + static char print_fmt_##call[] = print; \ 724 + static struct trace_event_class __used __refdata event_class_##call = { \ 725 + .system = TRACE_SYSTEM_STRING, \ 726 + .define_fields = trace_event_define_fields_##call, \ 727 + .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 728 + .raw_init = trace_event_raw_init, \ 729 + .probe = trace_event_raw_event_##call, \ 730 + .reg = trace_event_reg, \ 731 + _TRACE_PERF_INIT(call) \ 732 + }; 733 + 734 + #undef DEFINE_EVENT 735 + #define DEFINE_EVENT(template, call, proto, args) \ 736 + \ 737 + static struct trace_event_call __used event_##call = { \ 738 + .class = &event_class_##template, \ 739 + { \ 740 + .tp = &__tracepoint_##call, \ 741 + }, \ 742 + .event.funcs = &trace_event_type_funcs_##template, \ 743 + .print_fmt = print_fmt_##template, \ 744 + .flags = TRACE_EVENT_FL_TRACEPOINT, \ 745 + }; \ 746 + static struct trace_event_call __used \ 747 + __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 748 + 749 + #undef DEFINE_EVENT_PRINT 750 + #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 751 + \ 752 + static char print_fmt_##call[] = print; \ 753 + \ 754 + static struct trace_event_call __used event_##call = { \ 755 + .class = &event_class_##template, \ 756 + { \ 757 + .tp = &__tracepoint_##call, \ 758 + }, \ 759 + .event.funcs = &trace_event_type_funcs_##call, \ 760 + .print_fmt = print_fmt_##call, \ 761 + .flags = TRACE_EVENT_FL_TRACEPOINT, \ 762 + }; \ 763 + static struct trace_event_call __used \ 764 + __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 765 + 766 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+7
kernel/trace/Kconfig
··· 635 635 636 636 If unsure, say N 637 637 638 + config TRACING_EVENTS_GPIO 639 + bool "Trace gpio events" 640 + depends on GPIOLIB 641 + default y 642 + help 643 + Enable tracing events for gpio subsystem 644 + 638 645 endif # FTRACE 639 646 640 647 endif # TRACING_SUPPORT
+6 -5
kernel/trace/blktrace.c
··· 103 103 memcpy((void *) t + sizeof(*t), data, len); 104 104 105 105 if (blk_tracer) 106 - trace_buffer_unlock_commit(buffer, event, 0, pc); 106 + trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); 107 107 } 108 108 } 109 109 ··· 278 278 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); 279 279 280 280 if (blk_tracer) { 281 - trace_buffer_unlock_commit(buffer, event, 0, pc); 281 + trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc); 282 282 return; 283 283 } 284 284 } ··· 1340 1340 static enum print_line_t print_one_line(struct trace_iterator *iter, 1341 1341 bool classic) 1342 1342 { 1343 + struct trace_array *tr = iter->tr; 1343 1344 struct trace_seq *s = &iter->seq; 1344 1345 const struct blk_io_trace *t; 1345 1346 u16 what; ··· 1349 1348 1350 1349 t = te_blk_io_trace(iter->ent); 1351 1350 what = t->action & ((1 << BLK_TC_SHIFT) - 1); 1352 - long_act = !!(trace_flags & TRACE_ITER_VERBOSE); 1351 + long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE); 1353 1352 log_action = classic ? &blk_log_action_classic : &blk_log_action; 1354 1353 1355 1354 if (t->action == BLK_TN_MESSAGE) { ··· 1411 1410 /* don't output context-info for blk_classic output */ 1412 1411 if (bit == TRACE_BLK_OPT_CLASSIC) { 1413 1412 if (set) 1414 - trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1413 + tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO; 1415 1414 else 1416 - trace_flags |= TRACE_ITER_CONTEXT_INFO; 1415 + tr->trace_flags |= TRACE_ITER_CONTEXT_INFO; 1417 1416 } 1418 1417 return 0; 1419 1418 }
+104 -93
kernel/trace/ftrace.c
··· 243 243 244 244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 245 245 static void update_function_graph_func(void); 246 + 247 + /* Both enabled by default (can be cleared by function_graph tracer flags */ 248 + static bool fgraph_sleep_time = true; 249 + static bool fgraph_graph_time = true; 250 + 246 251 #else 247 252 static inline void update_function_graph_func(void) { } 248 253 #endif ··· 922 917 923 918 calltime = trace->rettime - trace->calltime; 924 919 925 - if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { 920 + if (!fgraph_graph_time) { 926 921 int index; 927 922 928 923 index = trace->depth; ··· 3425 3420 inode, file); 3426 3421 } 3427 3422 3428 - static int ftrace_match(char *str, char *regex, int len, int type) 3423 + /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3424 + struct ftrace_glob { 3425 + char *search; 3426 + unsigned len; 3427 + int type; 3428 + }; 3429 + 3430 + static int ftrace_match(char *str, struct ftrace_glob *g) 3429 3431 { 3430 3432 int matched = 0; 3431 3433 int slen; 3432 3434 3433 - switch (type) { 3435 + switch (g->type) { 3434 3436 case MATCH_FULL: 3435 - if (strcmp(str, regex) == 0) 3437 + if (strcmp(str, g->search) == 0) 3436 3438 matched = 1; 3437 3439 break; 3438 3440 case MATCH_FRONT_ONLY: 3439 - if (strncmp(str, regex, len) == 0) 3441 + if (strncmp(str, g->search, g->len) == 0) 3440 3442 matched = 1; 3441 3443 break; 3442 3444 case MATCH_MIDDLE_ONLY: 3443 - if (strstr(str, regex)) 3445 + if (strstr(str, g->search)) 3444 3446 matched = 1; 3445 3447 break; 3446 3448 case MATCH_END_ONLY: 3447 3449 slen = strlen(str); 3448 - if (slen >= len && memcmp(str + slen - len, regex, len) == 0) 3450 + if (slen >= g->len && 3451 + memcmp(str + slen - g->len, g->search, g->len) == 0) 3449 3452 matched = 1; 3450 3453 break; 3451 3454 } ··· 3462 3449 } 3463 3450 3464 3451 static int 3465 - enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) 3452 + enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 3466 3453 { 3467 3454 struct ftrace_func_entry *entry; 3468 3455 int ret = 0; 3469 3456 3470 3457 entry = ftrace_lookup_ip(hash, rec->ip); 3471 - if (not) { 3458 + if (clear_filter) { 3472 3459 /* Do nothing if it doesn't exist */ 3473 3460 if (!entry) 3474 3461 return 0; ··· 3485 3472 } 3486 3473 3487 3474 static int 3488 - ftrace_match_record(struct dyn_ftrace *rec, char *mod, 3489 - char *regex, int len, int type) 3475 + ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 3476 + struct ftrace_glob *mod_g, int exclude_mod) 3490 3477 { 3491 3478 char str[KSYM_SYMBOL_LEN]; 3492 3479 char *modname; 3493 3480 3494 3481 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 3495 3482 3496 - if (mod) { 3497 - /* module lookup requires matching the module */ 3498 - if (!modname || strcmp(modname, mod)) 3483 + if (mod_g) { 3484 + int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 3485 + 3486 + /* blank module name to match all modules */ 3487 + if (!mod_g->len) { 3488 + /* blank module globbing: modname xor exclude_mod */ 3489 + if ((!exclude_mod) != (!modname)) 3490 + goto func_match; 3491 + return 0; 3492 + } 3493 + 3494 + /* not matching the module */ 3495 + if (!modname || !mod_matches) { 3496 + if (exclude_mod) 3497 + goto func_match; 3498 + else 3499 + return 0; 3500 + } 3501 + 3502 + if (mod_matches && exclude_mod) 3499 3503 return 0; 3500 3504 3505 + func_match: 3501 3506 /* blank search means to match all funcs in the mod */ 3502 - if (!len) 3507 + if (!func_g->len) 3503 3508 return 1; 3504 3509 } 3505 3510 3506 - return ftrace_match(str, regex, len, type); 3511 + return ftrace_match(str, func_g); 3507 3512 } 3508 3513 3509 3514 static int 3510 - match_records(struct ftrace_hash *hash, char *buff, 3511 - int len, char *mod, int not) 3515 + match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 3512 3516 { 3513 - unsigned search_len = 0; 3514 3517 struct ftrace_page *pg; 3515 3518 struct dyn_ftrace *rec; 3516 - int type = MATCH_FULL; 3517 - char *search = buff; 3519 + struct ftrace_glob func_g = { .type = MATCH_FULL }; 3520 + struct ftrace_glob mod_g = { .type = MATCH_FULL }; 3521 + struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 3522 + int exclude_mod = 0; 3518 3523 int found = 0; 3519 3524 int ret; 3525 + int clear_filter; 3520 3526 3521 - if (len) { 3522 - type = filter_parse_regex(buff, len, &search, &not); 3523 - search_len = strlen(search); 3527 + if (func) { 3528 + func_g.type = filter_parse_regex(func, len, &func_g.search, 3529 + &clear_filter); 3530 + func_g.len = strlen(func_g.search); 3531 + } 3532 + 3533 + if (mod) { 3534 + mod_g.type = filter_parse_regex(mod, strlen(mod), 3535 + &mod_g.search, &exclude_mod); 3536 + mod_g.len = strlen(mod_g.search); 3524 3537 } 3525 3538 3526 3539 mutex_lock(&ftrace_lock); ··· 3555 3516 goto out_unlock; 3556 3517 3557 3518 do_for_each_ftrace_rec(pg, rec) { 3558 - if (ftrace_match_record(rec, mod, search, search_len, type)) { 3559 - ret = enter_record(hash, rec, not); 3519 + if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3520 + ret = enter_record(hash, rec, clear_filter); 3560 3521 if (ret < 0) { 3561 3522 found = ret; 3562 3523 goto out_unlock; ··· 3573 3534 static int 3574 3535 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 3575 3536 { 3576 - return match_records(hash, buff, len, NULL, 0); 3537 + return match_records(hash, buff, len, NULL); 3577 3538 } 3578 3539 3579 - static int 3580 - ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) 3581 - { 3582 - int not = 0; 3583 - 3584 - /* blank or '*' mean the same */ 3585 - if (strcmp(buff, "*") == 0) 3586 - buff[0] = 0; 3587 - 3588 - /* handle the case of 'dont filter this module' */ 3589 - if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { 3590 - buff[0] = 0; 3591 - not = 1; 3592 - } 3593 - 3594 - return match_records(hash, buff, strlen(buff), mod, not); 3595 - } 3596 3540 3597 3541 /* 3598 3542 * We register the module command as a template to show others how ··· 3584 3562 3585 3563 static int 3586 3564 ftrace_mod_callback(struct ftrace_hash *hash, 3587 - char *func, char *cmd, char *param, int enable) 3565 + char *func, char *cmd, char *module, int enable) 3588 3566 { 3589 - char *mod; 3590 - int ret = -EINVAL; 3567 + int ret; 3591 3568 3592 3569 /* 3593 3570 * cmd == 'mod' because we only registered this func ··· 3595 3574 * you can tell which command was used by the cmd 3596 3575 * parameter. 3597 3576 */ 3598 - 3599 - /* we must have a module name */ 3600 - if (!param) 3601 - return ret; 3602 - 3603 - mod = strsep(&param, ":"); 3604 - if (!strlen(mod)) 3605 - return ret; 3606 - 3607 - ret = ftrace_match_module_records(hash, func, mod); 3577 + ret = match_records(hash, func, strlen(func), module); 3608 3578 if (!ret) 3609 - ret = -EINVAL; 3579 + return -EINVAL; 3610 3580 if (ret < 0) 3611 3581 return ret; 3612 - 3613 3582 return 0; 3614 3583 } 3615 3584 ··· 3710 3699 { 3711 3700 struct ftrace_ops_hash old_hash_ops; 3712 3701 struct ftrace_func_probe *entry; 3702 + struct ftrace_glob func_g; 3713 3703 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3714 3704 struct ftrace_hash *old_hash = *orig_hash; 3715 3705 struct ftrace_hash *hash; 3716 3706 struct ftrace_page *pg; 3717 3707 struct dyn_ftrace *rec; 3718 - int type, len, not; 3708 + int not; 3719 3709 unsigned long key; 3720 3710 int count = 0; 3721 - char *search; 3722 3711 int ret; 3723 3712 3724 - type = filter_parse_regex(glob, strlen(glob), &search, &not); 3725 - len = strlen(search); 3713 + func_g.type = filter_parse_regex(glob, strlen(glob), 3714 + &func_g.search, &not); 3715 + func_g.len = strlen(func_g.search); 3726 3716 3727 3717 /* we do not support '!' for function probes */ 3728 3718 if (WARN_ON(not)) ··· 3750 3738 3751 3739 do_for_each_ftrace_rec(pg, rec) { 3752 3740 3753 - if (!ftrace_match_record(rec, NULL, search, len, type)) 3741 + if (!ftrace_match_record(rec, &func_g, NULL, 0)) 3754 3742 continue; 3755 3743 3756 3744 entry = kmalloc(sizeof(*entry), GFP_KERNEL); ··· 3823 3811 struct ftrace_func_entry *rec_entry; 3824 3812 struct ftrace_func_probe *entry; 3825 3813 struct ftrace_func_probe *p; 3814 + struct ftrace_glob func_g; 3826 3815 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3827 3816 struct ftrace_hash *old_hash = *orig_hash; 3828 3817 struct list_head free_list; 3829 3818 struct ftrace_hash *hash; 3830 3819 struct hlist_node *tmp; 3831 3820 char str[KSYM_SYMBOL_LEN]; 3832 - int type = MATCH_FULL; 3833 - int i, len = 0; 3834 - char *search; 3835 - int ret; 3821 + int i, ret; 3836 3822 3837 3823 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3838 - glob = NULL; 3824 + func_g.search = NULL; 3839 3825 else if (glob) { 3840 3826 int not; 3841 3827 3842 - type = filter_parse_regex(glob, strlen(glob), &search, &not); 3843 - len = strlen(search); 3828 + func_g.type = filter_parse_regex(glob, strlen(glob), 3829 + &func_g.search, &not); 3830 + func_g.len = strlen(func_g.search); 3831 + func_g.search = glob; 3844 3832 3845 3833 /* we do not support '!' for function probes */ 3846 3834 if (WARN_ON(not)) ··· 3869 3857 continue; 3870 3858 3871 3859 /* do this last, since it is the most expensive */ 3872 - if (glob) { 3860 + if (func_g.search) { 3873 3861 kallsyms_lookup(entry->ip, NULL, NULL, 3874 3862 NULL, str); 3875 - if (!ftrace_match(str, glob, len, type)) 3863 + if (!ftrace_match(str, &func_g)) 3876 3864 continue; 3877 3865 } 3878 3866 ··· 3901 3889 ftrace_free_entry(entry); 3902 3890 } 3903 3891 mutex_unlock(&ftrace_lock); 3904 - 3892 + 3905 3893 out_unlock: 3906 3894 mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 3907 3895 free_ftrace_hash(hash); ··· 4617 4605 static int 4618 4606 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) 4619 4607 { 4608 + struct ftrace_glob func_g; 4620 4609 struct dyn_ftrace *rec; 4621 4610 struct ftrace_page *pg; 4622 - int search_len; 4623 4611 int fail = 1; 4624 - int type, not; 4625 - char *search; 4612 + int not; 4626 4613 bool exists; 4627 4614 int i; 4628 4615 4629 4616 /* decode regex */ 4630 - type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 4617 + func_g.type = filter_parse_regex(buffer, strlen(buffer), 4618 + &func_g.search, &not); 4631 4619 if (!not && *idx >= size) 4632 4620 return -EBUSY; 4633 4621 4634 - search_len = strlen(search); 4622 + func_g.len = strlen(func_g.search); 4635 4623 4636 4624 mutex_lock(&ftrace_lock); 4637 4625 ··· 4642 4630 4643 4631 do_for_each_ftrace_rec(pg, rec) { 4644 4632 4645 - if (ftrace_match_record(rec, NULL, search, search_len, type)) { 4633 + if (ftrace_match_record(rec, &func_g, NULL, 0)) { 4646 4634 /* if it is in the array */ 4647 4635 exists = false; 4648 4636 for (i = 0; i < *idx; i++) { ··· 4795 4783 return 0; 4796 4784 } 4797 4785 4798 - static void ftrace_swap_ips(void *a, void *b, int size) 4799 - { 4800 - unsigned long *ipa = a; 4801 - unsigned long *ipb = b; 4802 - unsigned long t; 4803 - 4804 - t = *ipa; 4805 - *ipa = *ipb; 4806 - *ipb = t; 4807 - } 4808 - 4809 4786 static int ftrace_process_locs(struct module *mod, 4810 4787 unsigned long *start, 4811 4788 unsigned long *end) ··· 4814 4813 return 0; 4815 4814 4816 4815 sort(start, count, sizeof(*start), 4817 - ftrace_cmp_ips, ftrace_swap_ips); 4816 + ftrace_cmp_ips, NULL); 4818 4817 4819 4818 start_pg = ftrace_allocate_pages(count); 4820 4819 if (!start_pg) ··· 5640 5639 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 5641 5640 }; 5642 5641 5642 + void ftrace_graph_sleep_time_control(bool enable) 5643 + { 5644 + fgraph_sleep_time = enable; 5645 + } 5646 + 5647 + void ftrace_graph_graph_time_control(bool enable) 5648 + { 5649 + fgraph_graph_time = enable; 5650 + } 5651 + 5643 5652 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 5644 5653 { 5645 5654 return 0; ··· 5718 5707 * Does the user want to count the time a function was asleep. 5719 5708 * If so, do not update the time stamps. 5720 5709 */ 5721 - if (trace_flags & TRACE_ITER_SLEEP_TIME) 5710 + if (fgraph_sleep_time) 5722 5711 return; 5723 5712 5724 5713 timestamp = trace_clock_local();
+10 -10
kernel/trace/ring_buffer.c
··· 829 829 * writer is ever on it, the previous pointer never points 830 830 * back to the reader page. 831 831 */ 832 - static int rb_is_reader_page(struct buffer_page *page) 832 + static bool rb_is_reader_page(struct buffer_page *page) 833 833 { 834 834 struct list_head *list = page->list.prev; 835 835 ··· 2270 2270 return skip_time_extend(event); 2271 2271 } 2272 2272 2273 - static inline int rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 2273 + static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 2274 2274 struct ring_buffer_event *event); 2275 2275 2276 2276 /** ··· 2498 2498 event->time_delta = 1; 2499 2499 } 2500 2500 2501 - static inline int 2501 + static inline bool 2502 2502 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, 2503 2503 struct ring_buffer_event *event) 2504 2504 { ··· 3039 3039 } 3040 3040 EXPORT_SYMBOL_GPL(ring_buffer_write); 3041 3041 3042 - static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3042 + static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 3043 3043 { 3044 3044 struct buffer_page *reader = cpu_buffer->reader_page; 3045 3045 struct buffer_page *head = rb_set_head_page(cpu_buffer); ··· 3047 3047 3048 3048 /* In case of error, head will be NULL */ 3049 3049 if (unlikely(!head)) 3050 - return 1; 3050 + return true; 3051 3051 3052 3052 return reader->read == rb_page_commit(reader) && 3053 3053 (commit == reader || ··· 4267 4267 * rind_buffer_empty - is the ring buffer empty? 4268 4268 * @buffer: The ring buffer to test 4269 4269 */ 4270 - int ring_buffer_empty(struct ring_buffer *buffer) 4270 + bool ring_buffer_empty(struct ring_buffer *buffer) 4271 4271 { 4272 4272 struct ring_buffer_per_cpu *cpu_buffer; 4273 4273 unsigned long flags; ··· 4285 4285 local_irq_restore(flags); 4286 4286 4287 4287 if (!ret) 4288 - return 0; 4288 + return false; 4289 4289 } 4290 4290 4291 - return 1; 4291 + return true; 4292 4292 } 4293 4293 EXPORT_SYMBOL_GPL(ring_buffer_empty); 4294 4294 ··· 4297 4297 * @buffer: The ring buffer 4298 4298 * @cpu: The CPU buffer to test 4299 4299 */ 4300 - int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 4300 + bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) 4301 4301 { 4302 4302 struct ring_buffer_per_cpu *cpu_buffer; 4303 4303 unsigned long flags; ··· 4305 4305 int ret; 4306 4306 4307 4307 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4308 - return 1; 4308 + return true; 4309 4309 4310 4310 cpu_buffer = buffer->buffers[cpu]; 4311 4311 local_irq_save(flags);
+45 -36
kernel/trace/ring_buffer_benchmark.c
··· 24 24 static int wakeup_interval = 100; 25 25 26 26 static int reader_finish; 27 - static struct completion read_start; 28 - static struct completion read_done; 27 + static DECLARE_COMPLETION(read_start); 28 + static DECLARE_COMPLETION(read_done); 29 29 30 30 static struct ring_buffer *buffer; 31 31 static struct task_struct *producer; ··· 60 60 61 61 static int read_events; 62 62 63 - static int kill_test; 63 + static int test_error; 64 64 65 - #define KILL_TEST() \ 65 + #define TEST_ERROR() \ 66 66 do { \ 67 - if (!kill_test) { \ 68 - kill_test = 1; \ 67 + if (!test_error) { \ 68 + test_error = 1; \ 69 69 WARN_ON(1); \ 70 70 } \ 71 71 } while (0) ··· 74 74 EVENT_FOUND, 75 75 EVENT_DROPPED, 76 76 }; 77 + 78 + static bool break_test(void) 79 + { 80 + return test_error || kthread_should_stop(); 81 + } 77 82 78 83 static enum event_status read_event(int cpu) 79 84 { ··· 92 87 93 88 entry = ring_buffer_event_data(event); 94 89 if (*entry != cpu) { 95 - KILL_TEST(); 90 + TEST_ERROR(); 96 91 return EVENT_DROPPED; 97 92 } 98 93 ··· 120 115 rpage = bpage; 121 116 /* The commit may have missed event flags set, clear them */ 122 117 commit = local_read(&rpage->commit) & 0xfffff; 123 - for (i = 0; i < commit && !kill_test; i += inc) { 118 + for (i = 0; i < commit && !test_error ; i += inc) { 124 119 125 120 if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { 126 - KILL_TEST(); 121 + TEST_ERROR(); 127 122 break; 128 123 } 129 124 ··· 133 128 case RINGBUF_TYPE_PADDING: 134 129 /* failed writes may be discarded events */ 135 130 if (!event->time_delta) 136 - KILL_TEST(); 131 + TEST_ERROR(); 137 132 inc = event->array[0] + 4; 138 133 break; 139 134 case RINGBUF_TYPE_TIME_EXTEND: ··· 142 137 case 0: 143 138 entry = ring_buffer_event_data(event); 144 139 if (*entry != cpu) { 145 - KILL_TEST(); 140 + TEST_ERROR(); 146 141 break; 147 142 } 148 143 read++; 149 144 if (!event->array[0]) { 150 - KILL_TEST(); 145 + TEST_ERROR(); 151 146 break; 152 147 } 153 148 inc = event->array[0] + 4; ··· 155 150 default: 156 151 entry = ring_buffer_event_data(event); 157 152 if (*entry != cpu) { 158 - KILL_TEST(); 153 + TEST_ERROR(); 159 154 break; 160 155 } 161 156 read++; 162 157 inc = ((event->type_len + 1) * 4); 163 158 } 164 - if (kill_test) 159 + if (test_error) 165 160 break; 166 161 167 162 if (inc <= 0) { 168 - KILL_TEST(); 163 + TEST_ERROR(); 169 164 break; 170 165 } 171 166 } ··· 183 178 read_events ^= 1; 184 179 185 180 read = 0; 186 - while (!reader_finish && !kill_test) { 187 - int found; 181 + /* 182 + * Continue running until the producer specifically asks to stop 183 + * and is ready for the completion. 184 + */ 185 + while (!READ_ONCE(reader_finish)) { 186 + int found = 1; 188 187 189 - do { 188 + while (found && !test_error) { 190 189 int cpu; 191 190 192 191 found = 0; ··· 202 193 else 203 194 stat = read_page(cpu); 204 195 205 - if (kill_test) 196 + if (test_error) 206 197 break; 198 + 207 199 if (stat == EVENT_FOUND) 208 200 found = 1; 209 - } 210 - } while (found && !kill_test); 211 201 202 + } 203 + } 204 + 205 + /* Wait till the producer wakes us up when there is more data 206 + * available or when the producer wants us to finish reading. 207 + */ 212 208 set_current_state(TASK_INTERRUPTIBLE); 213 209 if (reader_finish) 214 210 break; 215 211 216 212 schedule(); 217 213 } 214 + __set_current_state(TASK_RUNNING); 218 215 reader_finish = 0; 219 216 complete(&read_done); 220 217 } ··· 278 263 if (cnt % wakeup_interval) 279 264 cond_resched(); 280 265 #endif 281 - if (kthread_should_stop()) 282 - kill_test = 1; 283 - 284 - } while (ktime_before(end_time, timeout) && !kill_test); 266 + } while (ktime_before(end_time, timeout) && !break_test()); 285 267 trace_printk("End ring buffer hammer\n"); 286 268 287 269 if (consumer) { ··· 288 276 /* the completions must be visible before the finish var */ 289 277 smp_wmb(); 290 278 reader_finish = 1; 291 - /* finish var visible before waking up the consumer */ 292 - smp_wmb(); 293 279 wake_up_process(consumer); 294 280 wait_for_completion(&read_done); 295 281 } ··· 297 287 entries = ring_buffer_entries(buffer); 298 288 overruns = ring_buffer_overruns(buffer); 299 289 300 - if (kill_test && !kthread_should_stop()) 290 + if (test_error) 301 291 trace_printk("ERROR!\n"); 302 292 303 293 if (!disable_reader) { ··· 378 368 379 369 static int ring_buffer_consumer_thread(void *arg) 380 370 { 381 - while (!kthread_should_stop() && !kill_test) { 371 + while (!break_test()) { 382 372 complete(&read_start); 383 373 384 374 ring_buffer_consumer(); 385 375 386 376 set_current_state(TASK_INTERRUPTIBLE); 387 - if (kthread_should_stop() || kill_test) 377 + if (break_test()) 388 378 break; 389 - 390 379 schedule(); 391 380 } 392 381 __set_current_state(TASK_RUNNING); ··· 398 389 399 390 static int ring_buffer_producer_thread(void *arg) 400 391 { 401 - init_completion(&read_start); 402 - 403 - while (!kthread_should_stop() && !kill_test) { 392 + while (!break_test()) { 404 393 ring_buffer_reset(buffer); 405 394 406 395 if (consumer) { 407 - smp_wmb(); 408 396 wake_up_process(consumer); 409 397 wait_for_completion(&read_start); 410 398 } 411 399 412 400 ring_buffer_producer(); 413 - if (kill_test) 401 + if (break_test()) 414 402 goto out_kill; 415 403 416 404 trace_printk("Sleeping for 10 secs\n"); 417 405 set_current_state(TASK_INTERRUPTIBLE); 406 + if (break_test()) 407 + goto out_kill; 418 408 schedule_timeout(HZ * SLEEP_TIME); 419 409 } 420 410 421 411 out_kill: 412 + __set_current_state(TASK_RUNNING); 422 413 if (!kthread_should_stop()) 423 414 wait_to_die(); 424 415
+292 -166
kernel/trace/trace.c
··· 214 214 215 215 216 216 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 217 - static char *trace_boot_options __initdata; 218 217 219 218 static int __init set_trace_boot_options(char *str) 220 219 { 221 220 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 222 - trace_boot_options = trace_boot_options_buf; 223 221 return 0; 224 222 } 225 223 __setup("trace_options=", set_trace_boot_options); ··· 248 250 return nsec; 249 251 } 250 252 253 + /* trace_flags holds trace_options default values */ 254 + #define TRACE_DEFAULT_FLAGS \ 255 + (FUNCTION_DEFAULT_FLAGS | \ 256 + TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ 257 + TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ 258 + TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ 259 + TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS) 260 + 261 + /* trace_options that are only supported by global_trace */ 262 + #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ 263 + TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) 264 + 265 + 251 266 /* 252 267 * The global_trace is the descriptor that holds the tracing 253 268 * buffers for the live tracing. For each CPU, it contains ··· 273 262 * pages for the buffer for that CPU. Each CPU has the same number 274 263 * of pages allocated for its buffer. 275 264 */ 276 - static struct trace_array global_trace; 265 + static struct trace_array global_trace = { 266 + .trace_flags = TRACE_DEFAULT_FLAGS, 267 + }; 277 268 278 269 LIST_HEAD(ftrace_trace_arrays); 279 270 ··· 481 468 482 469 #endif 483 470 484 - /* trace_flags holds trace_options default values */ 485 - unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | 486 - TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 487 - TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | 488 - TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; 471 + #ifdef CONFIG_STACKTRACE 472 + static void __ftrace_trace_stack(struct ring_buffer *buffer, 473 + unsigned long flags, 474 + int skip, int pc, struct pt_regs *regs); 475 + static inline void ftrace_trace_stack(struct trace_array *tr, 476 + struct ring_buffer *buffer, 477 + unsigned long flags, 478 + int skip, int pc, struct pt_regs *regs); 479 + 480 + #else 481 + static inline void __ftrace_trace_stack(struct ring_buffer *buffer, 482 + unsigned long flags, 483 + int skip, int pc, struct pt_regs *regs) 484 + { 485 + } 486 + static inline void ftrace_trace_stack(struct trace_array *tr, 487 + struct ring_buffer *buffer, 488 + unsigned long flags, 489 + int skip, int pc, struct pt_regs *regs) 490 + { 491 + } 492 + 493 + #endif 489 494 490 495 static void tracer_tracing_on(struct trace_array *tr) 491 496 { ··· 549 518 int alloc; 550 519 int pc; 551 520 552 - if (!(trace_flags & TRACE_ITER_PRINTK)) 521 + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 553 522 return 0; 554 523 555 524 pc = preempt_count(); ··· 579 548 entry->buf[size] = '\0'; 580 549 581 550 __buffer_unlock_commit(buffer, event); 582 - ftrace_trace_stack(buffer, irq_flags, 4, pc); 551 + ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); 583 552 584 553 return size; 585 554 } ··· 599 568 int size = sizeof(struct bputs_entry); 600 569 int pc; 601 570 602 - if (!(trace_flags & TRACE_ITER_PRINTK)) 571 + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 603 572 return 0; 604 573 605 574 pc = preempt_count(); ··· 619 588 entry->str = str; 620 589 621 590 __buffer_unlock_commit(buffer, event); 622 - ftrace_trace_stack(buffer, irq_flags, 4, pc); 591 + ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL); 623 592 624 593 return 1; 625 594 } ··· 865 834 return nsecs / 1000; 866 835 } 867 836 837 + /* 838 + * TRACE_FLAGS is defined as a tuple matching bit masks with strings. 839 + * It uses C(a, b) where 'a' is the enum name and 'b' is the string that 840 + * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list 841 + * of strings in the order that the enums were defined. 842 + */ 843 + #undef C 844 + #define C(a, b) b 845 + 868 846 /* These must match the bit postions in trace_iterator_flags */ 869 847 static const char *trace_options[] = { 870 - "print-parent", 871 - "sym-offset", 872 - "sym-addr", 873 - "verbose", 874 - "raw", 875 - "hex", 876 - "bin", 877 - "block", 878 - "stacktrace", 879 - "trace_printk", 880 - "ftrace_preempt", 881 - "branch", 882 - "annotate", 883 - "userstacktrace", 884 - "sym-userobj", 885 - "printk-msg-only", 886 - "context-info", 887 - "latency-format", 888 - "sleep-time", 889 - "graph-time", 890 - "record-cmd", 891 - "overwrite", 892 - "disable_on_free", 893 - "irq-info", 894 - "markers", 895 - "function-trace", 848 + TRACE_FLAGS 896 849 NULL 897 850 }; 898 851 ··· 1219 1204 } 1220 1205 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 1221 1206 1207 + static void add_tracer_options(struct trace_array *tr, struct tracer *t); 1208 + 1209 + static void __init apply_trace_boot_options(void); 1210 + 1222 1211 /** 1223 1212 * register_tracer - register a tracer with the ftrace system. 1224 1213 * @type - the plugin for the tracer 1225 1214 * 1226 1215 * Register a new plugin tracer. 1227 1216 */ 1228 - int register_tracer(struct tracer *type) 1217 + int __init register_tracer(struct tracer *type) 1229 1218 { 1230 1219 struct tracer *t; 1231 1220 int ret = 0; ··· 1272 1253 1273 1254 type->next = trace_types; 1274 1255 trace_types = type; 1256 + add_tracer_options(&global_trace, type); 1275 1257 1276 1258 out: 1277 1259 tracing_selftest_running = false; ··· 1288 1268 /* Do we want this tracer to start on bootup? */ 1289 1269 tracing_set_tracer(&global_trace, type->name); 1290 1270 default_bootup_tracer = NULL; 1271 + 1272 + apply_trace_boot_options(); 1273 + 1291 1274 /* disable other selftests, since this will break it. */ 1292 1275 tracing_selftest_disabled = true; 1293 1276 #ifdef CONFIG_FTRACE_STARTUP_TEST ··· 1694 1671 ring_buffer_unlock_commit(buffer, event); 1695 1672 } 1696 1673 1697 - static inline void 1698 - __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1699 - struct ring_buffer_event *event, 1700 - unsigned long flags, int pc) 1701 - { 1702 - __buffer_unlock_commit(buffer, event); 1703 - 1704 - ftrace_trace_stack(buffer, flags, 6, pc); 1705 - ftrace_trace_userstack(buffer, flags, pc); 1706 - } 1707 - 1708 - void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1674 + void trace_buffer_unlock_commit(struct trace_array *tr, 1675 + struct ring_buffer *buffer, 1709 1676 struct ring_buffer_event *event, 1710 1677 unsigned long flags, int pc) 1711 1678 { 1712 - __trace_buffer_unlock_commit(buffer, event, flags, pc); 1679 + __buffer_unlock_commit(buffer, event); 1680 + 1681 + ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); 1682 + ftrace_trace_userstack(buffer, flags, pc); 1713 1683 } 1714 1684 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1715 1685 ··· 1745 1729 } 1746 1730 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 1747 1731 1748 - void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 1749 - struct ring_buffer_event *event, 1750 - unsigned long flags, int pc) 1751 - { 1752 - __trace_buffer_unlock_commit(buffer, event, flags, pc); 1753 - } 1754 - EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1755 - 1756 - void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1732 + void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1733 + struct ring_buffer *buffer, 1757 1734 struct ring_buffer_event *event, 1758 1735 unsigned long flags, int pc, 1759 1736 struct pt_regs *regs) 1760 1737 { 1761 1738 __buffer_unlock_commit(buffer, event); 1762 1739 1763 - ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1740 + ftrace_trace_stack(tr, buffer, flags, 6, pc, regs); 1764 1741 ftrace_trace_userstack(buffer, flags, pc); 1765 1742 } 1766 1743 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); ··· 1882 1873 1883 1874 } 1884 1875 1885 - void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 1886 - int skip, int pc, struct pt_regs *regs) 1876 + static inline void ftrace_trace_stack(struct trace_array *tr, 1877 + struct ring_buffer *buffer, 1878 + unsigned long flags, 1879 + int skip, int pc, struct pt_regs *regs) 1887 1880 { 1888 - if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1881 + if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) 1889 1882 return; 1890 1883 1891 1884 __ftrace_trace_stack(buffer, flags, skip, pc, regs); 1892 - } 1893 - 1894 - void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 1895 - int skip, int pc) 1896 - { 1897 - if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1898 - return; 1899 - 1900 - __ftrace_trace_stack(buffer, flags, skip, pc, NULL); 1901 1885 } 1902 1886 1903 1887 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, ··· 1931 1929 struct userstack_entry *entry; 1932 1930 struct stack_trace trace; 1933 1931 1934 - if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1932 + if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE)) 1935 1933 return; 1936 1934 1937 1935 /* ··· 2175 2173 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 2176 2174 if (!call_filter_check_discard(call, entry, buffer, event)) { 2177 2175 __buffer_unlock_commit(buffer, event); 2178 - ftrace_trace_stack(buffer, flags, 6, pc); 2176 + ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL); 2179 2177 } 2180 2178 2181 2179 out: ··· 2227 2225 memcpy(&entry->buf, tbuffer, len + 1); 2228 2226 if (!call_filter_check_discard(call, entry, buffer, event)) { 2229 2227 __buffer_unlock_commit(buffer, event); 2230 - ftrace_trace_stack(buffer, flags, 6, pc); 2228 + ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL); 2231 2229 } 2232 2230 out: 2233 2231 preempt_enable_notrace(); ··· 2248 2246 int ret; 2249 2247 va_list ap; 2250 2248 2251 - if (!(trace_flags & TRACE_ITER_PRINTK)) 2249 + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 2252 2250 return 0; 2253 2251 2254 2252 va_start(ap, fmt); ··· 2263 2261 int ret; 2264 2262 va_list ap; 2265 2263 2266 - if (!(trace_flags & TRACE_ITER_PRINTK)) 2264 + if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) 2267 2265 return 0; 2268 2266 2269 2267 va_start(ap, fmt); ··· 2604 2602 void 2605 2603 print_trace_header(struct seq_file *m, struct trace_iterator *iter) 2606 2604 { 2607 - unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2605 + unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); 2608 2606 struct trace_buffer *buf = iter->trace_buffer; 2609 2607 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); 2610 2608 struct tracer *type = iter->trace; ··· 2666 2664 static void test_cpu_buff_start(struct trace_iterator *iter) 2667 2665 { 2668 2666 struct trace_seq *s = &iter->seq; 2667 + struct trace_array *tr = iter->tr; 2669 2668 2670 - if (!(trace_flags & TRACE_ITER_ANNOTATE)) 2669 + if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) 2671 2670 return; 2672 2671 2673 2672 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 2674 2673 return; 2675 2674 2676 - if (cpumask_test_cpu(iter->cpu, iter->started)) 2675 + if (iter->started && cpumask_test_cpu(iter->cpu, iter->started)) 2677 2676 return; 2678 2677 2679 2678 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) 2680 2679 return; 2681 2680 2682 - cpumask_set_cpu(iter->cpu, iter->started); 2681 + if (iter->started) 2682 + cpumask_set_cpu(iter->cpu, iter->started); 2683 2683 2684 2684 /* Don't print started cpu buffer for the first entry of the trace */ 2685 2685 if (iter->idx > 1) ··· 2691 2687 2692 2688 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) 2693 2689 { 2690 + struct trace_array *tr = iter->tr; 2694 2691 struct trace_seq *s = &iter->seq; 2695 - unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 2692 + unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 2696 2693 struct trace_entry *entry; 2697 2694 struct trace_event *event; 2698 2695 ··· 2703 2698 2704 2699 event = ftrace_find_event(entry->type); 2705 2700 2706 - if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2701 + if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 2707 2702 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2708 2703 trace_print_lat_context(iter); 2709 2704 else ··· 2723 2718 2724 2719 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) 2725 2720 { 2721 + struct trace_array *tr = iter->tr; 2726 2722 struct trace_seq *s = &iter->seq; 2727 2723 struct trace_entry *entry; 2728 2724 struct trace_event *event; 2729 2725 2730 2726 entry = iter->ent; 2731 2727 2732 - if (trace_flags & TRACE_ITER_CONTEXT_INFO) 2728 + if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) 2733 2729 trace_seq_printf(s, "%d %d %llu ", 2734 2730 entry->pid, iter->cpu, iter->ts); 2735 2731 ··· 2748 2742 2749 2743 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) 2750 2744 { 2745 + struct trace_array *tr = iter->tr; 2751 2746 struct trace_seq *s = &iter->seq; 2752 2747 unsigned char newline = '\n'; 2753 2748 struct trace_entry *entry; ··· 2756 2749 2757 2750 entry = iter->ent; 2758 2751 2759 - if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2752 + if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 2760 2753 SEQ_PUT_HEX_FIELD(s, entry->pid); 2761 2754 SEQ_PUT_HEX_FIELD(s, iter->cpu); 2762 2755 SEQ_PUT_HEX_FIELD(s, iter->ts); ··· 2778 2771 2779 2772 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) 2780 2773 { 2774 + struct trace_array *tr = iter->tr; 2781 2775 struct trace_seq *s = &iter->seq; 2782 2776 struct trace_entry *entry; 2783 2777 struct trace_event *event; 2784 2778 2785 2779 entry = iter->ent; 2786 2780 2787 - if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 2781 + if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 2788 2782 SEQ_PUT_FIELD(s, entry->pid); 2789 2783 SEQ_PUT_FIELD(s, iter->cpu); 2790 2784 SEQ_PUT_FIELD(s, iter->ts); ··· 2834 2826 /* Called with trace_event_read_lock() held. */ 2835 2827 enum print_line_t print_trace_line(struct trace_iterator *iter) 2836 2828 { 2829 + struct trace_array *tr = iter->tr; 2830 + unsigned long trace_flags = tr->trace_flags; 2837 2831 enum print_line_t ret; 2838 2832 2839 2833 if (iter->lost_events) { ··· 2881 2871 void trace_latency_header(struct seq_file *m) 2882 2872 { 2883 2873 struct trace_iterator *iter = m->private; 2874 + struct trace_array *tr = iter->tr; 2884 2875 2885 2876 /* print nothing if the buffers are empty */ 2886 2877 if (trace_empty(iter)) ··· 2890 2879 if (iter->iter_flags & TRACE_FILE_LAT_FMT) 2891 2880 print_trace_header(m, iter); 2892 2881 2893 - if (!(trace_flags & TRACE_ITER_VERBOSE)) 2882 + if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) 2894 2883 print_lat_help_header(m); 2895 2884 } 2896 2885 2897 2886 void trace_default_header(struct seq_file *m) 2898 2887 { 2899 2888 struct trace_iterator *iter = m->private; 2889 + struct trace_array *tr = iter->tr; 2890 + unsigned long trace_flags = tr->trace_flags; 2900 2891 2901 2892 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 2902 2893 return; ··· 3243 3230 iter = __tracing_open(inode, file, false); 3244 3231 if (IS_ERR(iter)) 3245 3232 ret = PTR_ERR(iter); 3246 - else if (trace_flags & TRACE_ITER_LATENCY_FMT) 3233 + else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 3247 3234 iter->iter_flags |= TRACE_FILE_LAT_FMT; 3248 3235 } 3249 3236 ··· 3490 3477 trace_opts = tr->current_trace->flags->opts; 3491 3478 3492 3479 for (i = 0; trace_options[i]; i++) { 3493 - if (trace_flags & (1 << i)) 3480 + if (tr->trace_flags & (1 << i)) 3494 3481 seq_printf(m, "%s\n", trace_options[i]); 3495 3482 else 3496 3483 seq_printf(m, "no%s\n", trace_options[i]); ··· 3555 3542 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) 3556 3543 { 3557 3544 /* do nothing if flag is already set */ 3558 - if (!!(trace_flags & mask) == !!enabled) 3545 + if (!!(tr->trace_flags & mask) == !!enabled) 3559 3546 return 0; 3560 3547 3561 3548 /* Give the tracer a chance to approve the change */ ··· 3564 3551 return -EINVAL; 3565 3552 3566 3553 if (enabled) 3567 - trace_flags |= mask; 3554 + tr->trace_flags |= mask; 3568 3555 else 3569 - trace_flags &= ~mask; 3556 + tr->trace_flags &= ~mask; 3570 3557 3571 3558 if (mask == TRACE_ITER_RECORD_CMD) 3572 3559 trace_event_enable_cmd_record(enabled); ··· 3578 3565 #endif 3579 3566 } 3580 3567 3581 - if (mask == TRACE_ITER_PRINTK) 3568 + if (mask == TRACE_ITER_PRINTK) { 3582 3569 trace_printk_start_stop_comm(enabled); 3570 + trace_printk_control(enabled); 3571 + } 3583 3572 3584 3573 return 0; 3585 3574 } ··· 3592 3577 int neg = 0; 3593 3578 int ret = -ENODEV; 3594 3579 int i; 3580 + size_t orig_len = strlen(option); 3595 3581 3596 3582 cmp = strstrip(option); 3597 3583 ··· 3616 3600 3617 3601 mutex_unlock(&trace_types_lock); 3618 3602 3603 + /* 3604 + * If the first trailing whitespace is replaced with '\0' by strstrip, 3605 + * turn it back into a space. 3606 + */ 3607 + if (orig_len > strlen(option)) 3608 + option[strlen(option)] = ' '; 3609 + 3619 3610 return ret; 3611 + } 3612 + 3613 + static void __init apply_trace_boot_options(void) 3614 + { 3615 + char *buf = trace_boot_options_buf; 3616 + char *option; 3617 + 3618 + while (true) { 3619 + option = strsep(&buf, ","); 3620 + 3621 + if (!option) 3622 + break; 3623 + 3624 + if (*option) 3625 + trace_set_options(&global_trace, option); 3626 + 3627 + /* Put back the comma to allow this to be called again */ 3628 + if (buf) 3629 + *(buf - 1) = ','; 3630 + } 3620 3631 } 3621 3632 3622 3633 static ssize_t ··· 4340 4297 4341 4298 struct trace_option_dentry; 4342 4299 4343 - static struct trace_option_dentry * 4344 - create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 4345 - 4346 4300 static void 4347 - destroy_trace_option_files(struct trace_option_dentry *topts); 4301 + create_trace_option_files(struct trace_array *tr, struct tracer *tracer); 4348 4302 4349 4303 /* 4350 4304 * Used to clear out the tracer before deletion of an instance. ··· 4360 4320 tr->current_trace = &nop_trace; 4361 4321 } 4362 4322 4363 - static void update_tracer_options(struct trace_array *tr, struct tracer *t) 4323 + static void add_tracer_options(struct trace_array *tr, struct tracer *t) 4364 4324 { 4365 - static struct trace_option_dentry *topts; 4366 - 4367 4325 /* Only enable if the directory has been created already. */ 4368 4326 if (!tr->dir) 4369 4327 return; 4370 4328 4371 - /* Currently, only the top instance has options */ 4372 - if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) 4373 - return; 4374 - 4375 - destroy_trace_option_files(topts); 4376 - topts = create_trace_option_files(tr, t); 4329 + create_trace_option_files(tr, t); 4377 4330 } 4378 4331 4379 4332 static int tracing_set_tracer(struct trace_array *tr, const char *buf) ··· 4435 4402 free_snapshot(tr); 4436 4403 } 4437 4404 #endif 4438 - update_tracer_options(tr, t); 4439 4405 4440 4406 #ifdef CONFIG_TRACER_MAX_TRACE 4441 4407 if (t->use_max_tr && !had_max_tr) { ··· 4601 4569 /* trace pipe does not show start of buffer */ 4602 4570 cpumask_setall(iter->started); 4603 4571 4604 - if (trace_flags & TRACE_ITER_LATENCY_FMT) 4572 + if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 4605 4573 iter->iter_flags |= TRACE_FILE_LAT_FMT; 4606 4574 4607 4575 /* Output in nanoseconds only if we are using a clock in nanoseconds. */ ··· 4658 4626 static unsigned int 4659 4627 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) 4660 4628 { 4629 + struct trace_array *tr = iter->tr; 4630 + 4661 4631 /* Iterators are static, they should be filled or empty */ 4662 4632 if (trace_buffer_iter(iter, iter->cpu_file)) 4663 4633 return POLLIN | POLLRDNORM; 4664 4634 4665 - if (trace_flags & TRACE_ITER_BLOCK) 4635 + if (tr->trace_flags & TRACE_ITER_BLOCK) 4666 4636 /* 4667 4637 * Always select as readable when in blocking mode 4668 4638 */ ··· 5081 5047 struct trace_array *tr = inode->i_private; 5082 5048 5083 5049 /* disable tracing ? */ 5084 - if (trace_flags & TRACE_ITER_STOP_ON_FREE) 5050 + if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) 5085 5051 tracer_tracing_off(tr); 5086 5052 /* resize the ring buffer to 0 */ 5087 5053 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); ··· 5114 5080 if (tracing_disabled) 5115 5081 return -EINVAL; 5116 5082 5117 - if (!(trace_flags & TRACE_ITER_MARKERS)) 5083 + if (!(tr->trace_flags & TRACE_ITER_MARKERS)) 5118 5084 return -EINVAL; 5119 5085 5120 5086 if (cnt > TRACE_BUF_SIZE) ··· 6166 6132 #include "trace_selftest.c" 6167 6133 #endif 6168 6134 6169 - struct trace_option_dentry { 6170 - struct tracer_opt *opt; 6171 - struct tracer_flags *flags; 6172 - struct trace_array *tr; 6173 - struct dentry *entry; 6174 - }; 6175 - 6176 6135 static ssize_t 6177 6136 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, 6178 6137 loff_t *ppos) ··· 6218 6191 .llseek = generic_file_llseek, 6219 6192 }; 6220 6193 6194 + /* 6195 + * In order to pass in both the trace_array descriptor as well as the index 6196 + * to the flag that the trace option file represents, the trace_array 6197 + * has a character array of trace_flags_index[], which holds the index 6198 + * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. 6199 + * The address of this character array is passed to the flag option file 6200 + * read/write callbacks. 6201 + * 6202 + * In order to extract both the index and the trace_array descriptor, 6203 + * get_tr_index() uses the following algorithm. 6204 + * 6205 + * idx = *ptr; 6206 + * 6207 + * As the pointer itself contains the address of the index (remember 6208 + * index[1] == 1). 6209 + * 6210 + * Then to get the trace_array descriptor, by subtracting that index 6211 + * from the ptr, we get to the start of the index itself. 6212 + * 6213 + * ptr - idx == &index[0] 6214 + * 6215 + * Then a simple container_of() from that pointer gets us to the 6216 + * trace_array descriptor. 6217 + */ 6218 + static void get_tr_index(void *data, struct trace_array **ptr, 6219 + unsigned int *pindex) 6220 + { 6221 + *pindex = *(unsigned char *)data; 6222 + 6223 + *ptr = container_of(data - *pindex, struct trace_array, 6224 + trace_flags_index); 6225 + } 6226 + 6221 6227 static ssize_t 6222 6228 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, 6223 6229 loff_t *ppos) 6224 6230 { 6225 - long index = (long)filp->private_data; 6231 + void *tr_index = filp->private_data; 6232 + struct trace_array *tr; 6233 + unsigned int index; 6226 6234 char *buf; 6227 6235 6228 - if (trace_flags & (1 << index)) 6236 + get_tr_index(tr_index, &tr, &index); 6237 + 6238 + if (tr->trace_flags & (1 << index)) 6229 6239 buf = "1\n"; 6230 6240 else 6231 6241 buf = "0\n"; ··· 6274 6210 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, 6275 6211 loff_t *ppos) 6276 6212 { 6277 - struct trace_array *tr = &global_trace; 6278 - long index = (long)filp->private_data; 6213 + void *tr_index = filp->private_data; 6214 + struct trace_array *tr; 6215 + unsigned int index; 6279 6216 unsigned long val; 6280 6217 int ret; 6218 + 6219 + get_tr_index(tr_index, &tr, &index); 6281 6220 6282 6221 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 6283 6222 if (ret) ··· 6365 6298 6366 6299 } 6367 6300 6368 - static struct trace_option_dentry * 6301 + static void 6369 6302 create_trace_option_files(struct trace_array *tr, struct tracer *tracer) 6370 6303 { 6371 6304 struct trace_option_dentry *topts; 6305 + struct trace_options *tr_topts; 6372 6306 struct tracer_flags *flags; 6373 6307 struct tracer_opt *opts; 6374 6308 int cnt; 6309 + int i; 6375 6310 6376 6311 if (!tracer) 6377 - return NULL; 6312 + return; 6378 6313 6379 6314 flags = tracer->flags; 6380 6315 6381 6316 if (!flags || !flags->opts) 6382 - return NULL; 6317 + return; 6318 + 6319 + /* 6320 + * If this is an instance, only create flags for tracers 6321 + * the instance may have. 6322 + */ 6323 + if (!trace_ok_for_array(tracer, tr)) 6324 + return; 6325 + 6326 + for (i = 0; i < tr->nr_topts; i++) { 6327 + /* 6328 + * Check if these flags have already been added. 6329 + * Some tracers share flags. 6330 + */ 6331 + if (tr->topts[i].tracer->flags == tracer->flags) 6332 + return; 6333 + } 6383 6334 6384 6335 opts = flags->opts; 6385 6336 ··· 6406 6321 6407 6322 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); 6408 6323 if (!topts) 6409 - return NULL; 6410 - 6411 - for (cnt = 0; opts[cnt].name; cnt++) 6412 - create_trace_option_file(tr, &topts[cnt], flags, 6413 - &opts[cnt]); 6414 - 6415 - return topts; 6416 - } 6417 - 6418 - static void 6419 - destroy_trace_option_files(struct trace_option_dentry *topts) 6420 - { 6421 - int cnt; 6422 - 6423 - if (!topts) 6424 6324 return; 6425 6325 6426 - for (cnt = 0; topts[cnt].opt; cnt++) 6427 - tracefs_remove(topts[cnt].entry); 6326 + tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), 6327 + GFP_KERNEL); 6328 + if (!tr_topts) { 6329 + kfree(topts); 6330 + return; 6331 + } 6428 6332 6429 - kfree(topts); 6333 + tr->topts = tr_topts; 6334 + tr->topts[tr->nr_topts].tracer = tracer; 6335 + tr->topts[tr->nr_topts].topts = topts; 6336 + tr->nr_topts++; 6337 + 6338 + for (cnt = 0; opts[cnt].name; cnt++) { 6339 + create_trace_option_file(tr, &topts[cnt], flags, 6340 + &opts[cnt]); 6341 + WARN_ONCE(topts[cnt].entry == NULL, 6342 + "Failed to create trace option: %s", 6343 + opts[cnt].name); 6344 + } 6430 6345 } 6431 6346 6432 6347 static struct dentry * ··· 6439 6354 if (!t_options) 6440 6355 return NULL; 6441 6356 6442 - return trace_create_file(option, 0644, t_options, (void *)index, 6443 - &trace_options_core_fops); 6357 + return trace_create_file(option, 0644, t_options, 6358 + (void *)&tr->trace_flags_index[index], 6359 + &trace_options_core_fops); 6444 6360 } 6445 6361 6446 - static __init void create_trace_options_dir(struct trace_array *tr) 6362 + static void create_trace_options_dir(struct trace_array *tr) 6447 6363 { 6448 6364 struct dentry *t_options; 6365 + bool top_level = tr == &global_trace; 6449 6366 int i; 6450 6367 6451 6368 t_options = trace_options_init_dentry(tr); 6452 6369 if (!t_options) 6453 6370 return; 6454 6371 6455 - for (i = 0; trace_options[i]; i++) 6456 - create_trace_option_core_file(tr, trace_options[i], i); 6372 + for (i = 0; trace_options[i]; i++) { 6373 + if (top_level || 6374 + !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) 6375 + create_trace_option_core_file(tr, trace_options[i], i); 6376 + } 6457 6377 } 6458 6378 6459 6379 static ssize_t ··· 6525 6435 { 6526 6436 enum ring_buffer_flags rb_flags; 6527 6437 6528 - rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 6438 + rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; 6529 6439 6530 6440 buf->tr = tr; 6531 6441 ··· 6595 6505 #endif 6596 6506 } 6597 6507 6508 + static void init_trace_flags_index(struct trace_array *tr) 6509 + { 6510 + int i; 6511 + 6512 + /* Used by the trace options files */ 6513 + for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) 6514 + tr->trace_flags_index[i] = i; 6515 + } 6516 + 6517 + static void __update_tracer_options(struct trace_array *tr) 6518 + { 6519 + struct tracer *t; 6520 + 6521 + for (t = trace_types; t; t = t->next) 6522 + add_tracer_options(tr, t); 6523 + } 6524 + 6525 + static void update_tracer_options(struct trace_array *tr) 6526 + { 6527 + mutex_lock(&trace_types_lock); 6528 + __update_tracer_options(tr); 6529 + mutex_unlock(&trace_types_lock); 6530 + } 6531 + 6598 6532 static int instance_mkdir(const char *name) 6599 6533 { 6600 6534 struct trace_array *tr; ··· 6643 6529 6644 6530 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 6645 6531 goto out_free_tr; 6532 + 6533 + tr->trace_flags = global_trace.trace_flags; 6646 6534 6647 6535 cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 6648 6536 ··· 6671 6555 } 6672 6556 6673 6557 init_tracer_tracefs(tr, tr->dir); 6558 + init_trace_flags_index(tr); 6559 + __update_tracer_options(tr); 6674 6560 6675 6561 list_add(&tr->list, &ftrace_trace_arrays); 6676 6562 ··· 6698 6580 struct trace_array *tr; 6699 6581 int found = 0; 6700 6582 int ret; 6583 + int i; 6701 6584 6702 6585 mutex_lock(&trace_types_lock); 6703 6586 ··· 6721 6602 tracing_set_nop(tr); 6722 6603 event_trace_del_tracer(tr); 6723 6604 ftrace_destroy_function_files(tr); 6724 - debugfs_remove_recursive(tr->dir); 6605 + tracefs_remove_recursive(tr->dir); 6725 6606 free_trace_buffers(tr); 6607 + 6608 + for (i = 0; i < tr->nr_topts; i++) { 6609 + kfree(tr->topts[i].topts); 6610 + } 6611 + kfree(tr->topts); 6726 6612 6727 6613 kfree(tr->name); 6728 6614 kfree(tr); ··· 6789 6665 6790 6666 trace_create_file("tracing_on", 0644, d_tracer, 6791 6667 tr, &rb_simple_fops); 6668 + 6669 + create_trace_options_dir(tr); 6792 6670 6793 6671 #ifdef CONFIG_TRACER_MAX_TRACE 6794 6672 trace_create_file("tracing_max_latency", 0644, d_tracer, ··· 6987 6861 6988 6862 create_trace_instances(d_tracer); 6989 6863 6990 - create_trace_options_dir(&global_trace); 6991 - 6992 - /* If the tracer was started via cmdline, create options for it here */ 6993 - if (global_trace.current_trace != &nop_trace) 6994 - update_tracer_options(&global_trace, global_trace.current_trace); 6864 + update_tracer_options(&global_trace); 6995 6865 6996 6866 return 0; 6997 6867 } ··· 7086 6964 /* use static because iter can be a bit big for the stack */ 7087 6965 static struct trace_iterator iter; 7088 6966 static atomic_t dump_running; 6967 + struct trace_array *tr = &global_trace; 7089 6968 unsigned int old_userobj; 7090 6969 unsigned long flags; 7091 6970 int cnt = 0, cpu; ··· 7116 6993 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 7117 6994 } 7118 6995 7119 - old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; 6996 + old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; 7120 6997 7121 6998 /* don't look at user memory in panic mode */ 7122 - trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 6999 + tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 7123 7000 7124 7001 switch (oops_dump_mode) { 7125 7002 case DUMP_ALL: ··· 7182 7059 printk(KERN_TRACE "---------------------------------\n"); 7183 7060 7184 7061 out_enable: 7185 - trace_flags |= old_userobj; 7062 + tr->trace_flags |= old_userobj; 7186 7063 7187 7064 for_each_tracing_cpu(cpu) { 7188 7065 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); ··· 7196 7073 { 7197 7074 int ring_buf_size; 7198 7075 int ret = -ENOMEM; 7076 + 7077 + /* 7078 + * Make sure we don't accidently add more trace options 7079 + * than we have bits for. 7080 + */ 7081 + BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); 7199 7082 7200 7083 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 7201 7084 goto out; ··· 7261 7132 7262 7133 ftrace_init_global_array_ops(&global_trace); 7263 7134 7135 + init_trace_flags_index(&global_trace); 7136 + 7264 7137 register_tracer(&nop_trace); 7265 7138 7266 7139 /* All seems OK, enable tracing */ ··· 7279 7148 INIT_LIST_HEAD(&global_trace.events); 7280 7149 list_add(&global_trace.list, &ftrace_trace_arrays); 7281 7150 7282 - while (trace_boot_options) { 7283 - char *option; 7284 - 7285 - option = strsep(&trace_boot_options, ","); 7286 - trace_set_options(&global_trace, option); 7287 - } 7151 + apply_trace_boot_options(); 7288 7152 7289 7153 register_snapshot_cmd(); 7290 7154
+116 -52
kernel/trace/trace.h
··· 71 71 tstruct \ 72 72 } 73 73 74 - #undef TP_ARGS 75 - #define TP_ARGS(args...) args 76 - 77 74 #undef FTRACE_ENTRY_DUP 78 75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 79 76 ··· 153 156 pid_t pid; 154 157 kuid_t uid; 155 158 char comm[TASK_COMM_LEN]; 159 + 160 + bool ignore_pid; 156 161 }; 157 162 158 163 struct tracer; 164 + struct trace_option_dentry; 159 165 160 166 struct trace_buffer { 161 167 struct trace_array *tr; ··· 166 166 struct trace_array_cpu __percpu *data; 167 167 cycle_t time_start; 168 168 int cpu; 169 + }; 170 + 171 + #define TRACE_FLAGS_MAX_SIZE 32 172 + 173 + struct trace_options { 174 + struct tracer *tracer; 175 + struct trace_option_dentry *topts; 176 + }; 177 + 178 + struct trace_pid_list { 179 + unsigned int nr_pids; 180 + int order; 181 + pid_t *pids; 169 182 }; 170 183 171 184 /* ··· 206 193 bool allocated_snapshot; 207 194 unsigned long max_latency; 208 195 #endif 196 + struct trace_pid_list __rcu *filtered_pids; 209 197 /* 210 198 * max_lock is used to protect the swapping of buffers 211 199 * when taking a max snapshot. The buffers themselves are ··· 230 216 #endif 231 217 int stop_count; 232 218 int clock_id; 219 + int nr_topts; 233 220 struct tracer *current_trace; 221 + unsigned int trace_flags; 222 + unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 234 223 unsigned int flags; 235 224 raw_spinlock_t start_lock; 236 225 struct dentry *dir; 237 226 struct dentry *options; 238 227 struct dentry *percpu_dir; 239 228 struct dentry *event_dir; 229 + struct trace_options *topts; 240 230 struct list_head systems; 241 231 struct list_head events; 242 232 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ ··· 350 332 /* Makes more easy to define a tracer opt */ 351 333 #define TRACER_OPT(s, b) .name = #s, .bit = b 352 334 335 + 336 + struct trace_option_dentry { 337 + struct tracer_opt *opt; 338 + struct tracer_flags *flags; 339 + struct trace_array *tr; 340 + struct dentry *entry; 341 + }; 353 342 354 343 /** 355 344 * struct tracer - a specific tracer and its callbacks to interact with tracefs ··· 636 611 #endif /* CONFIG_TRACER_MAX_TRACE */ 637 612 638 613 #ifdef CONFIG_STACKTRACE 639 - void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 640 - int skip, int pc); 641 - 642 - void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 643 - int skip, int pc, struct pt_regs *regs); 644 - 645 614 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, 646 615 int pc); 647 616 648 617 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 649 618 int pc); 650 619 #else 651 - static inline void ftrace_trace_stack(struct ring_buffer *buffer, 652 - unsigned long flags, int skip, int pc) 653 - { 654 - } 655 - 656 - static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer, 657 - unsigned long flags, int skip, 658 - int pc, struct pt_regs *regs) 659 - { 660 - } 661 - 662 620 static inline void ftrace_trace_userstack(struct ring_buffer *buffer, 663 621 unsigned long flags, int pc) 664 622 { ··· 715 707 void trace_printk_seq(struct trace_seq *s); 716 708 enum print_line_t print_trace_line(struct trace_iterator *iter); 717 709 718 - extern unsigned long trace_flags; 719 - 720 710 extern char trace_find_mark(unsigned long long duration); 721 711 722 712 /* Standard output formatting function used for function return traces */ ··· 729 723 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 730 724 #define TRACE_GRAPH_PRINT_IRQS 0x40 731 725 #define TRACE_GRAPH_PRINT_TAIL 0x80 726 + #define TRACE_GRAPH_SLEEP_TIME 0x100 727 + #define TRACE_GRAPH_GRAPH_TIME 0x200 732 728 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 733 729 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 730 + 731 + extern void ftrace_graph_sleep_time_control(bool enable); 732 + extern void ftrace_graph_graph_time_control(bool enable); 734 733 735 734 extern enum print_line_t 736 735 print_graph_function_flags(struct trace_iterator *iter, u32 flags); ··· 870 859 #define ftrace_destroy_filter_files(ops) do { } while (0) 871 860 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 872 861 873 - int ftrace_event_is_function(struct trace_event_call *call); 862 + bool ftrace_event_is_function(struct trace_event_call *call); 874 863 875 864 /* 876 865 * struct trace_parser - servers for reading the user input separated by spaces ··· 908 897 size_t cnt, loff_t *ppos); 909 898 910 899 /* 900 + * Only create function graph options if function graph is configured. 901 + */ 902 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 903 + # define FGRAPH_FLAGS \ 904 + C(DISPLAY_GRAPH, "display-graph"), 905 + #else 906 + # define FGRAPH_FLAGS 907 + #endif 908 + 909 + #ifdef CONFIG_BRANCH_TRACER 910 + # define BRANCH_FLAGS \ 911 + C(BRANCH, "branch"), 912 + #else 913 + # define BRANCH_FLAGS 914 + #endif 915 + 916 + #ifdef CONFIG_FUNCTION_TRACER 917 + # define FUNCTION_FLAGS \ 918 + C(FUNCTION, "function-trace"), 919 + # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 920 + #else 921 + # define FUNCTION_FLAGS 922 + # define FUNCTION_DEFAULT_FLAGS 0UL 923 + #endif 924 + 925 + #ifdef CONFIG_STACKTRACE 926 + # define STACK_FLAGS \ 927 + C(STACKTRACE, "stacktrace"), 928 + #else 929 + # define STACK_FLAGS 930 + #endif 931 + 932 + /* 911 933 * trace_iterator_flags is an enumeration that defines bit 912 934 * positions into trace_flags that controls the output. 913 935 * 914 936 * NOTE: These bits must match the trace_options array in 915 - * trace.c. 937 + * trace.c (this macro guarantees it). 916 938 */ 917 - enum trace_iterator_flags { 918 - TRACE_ITER_PRINT_PARENT = 0x01, 919 - TRACE_ITER_SYM_OFFSET = 0x02, 920 - TRACE_ITER_SYM_ADDR = 0x04, 921 - TRACE_ITER_VERBOSE = 0x08, 922 - TRACE_ITER_RAW = 0x10, 923 - TRACE_ITER_HEX = 0x20, 924 - TRACE_ITER_BIN = 0x40, 925 - TRACE_ITER_BLOCK = 0x80, 926 - TRACE_ITER_STACKTRACE = 0x100, 927 - TRACE_ITER_PRINTK = 0x200, 928 - TRACE_ITER_PREEMPTONLY = 0x400, 929 - TRACE_ITER_BRANCH = 0x800, 930 - TRACE_ITER_ANNOTATE = 0x1000, 931 - TRACE_ITER_USERSTACKTRACE = 0x2000, 932 - TRACE_ITER_SYM_USEROBJ = 0x4000, 933 - TRACE_ITER_PRINTK_MSGONLY = 0x8000, 934 - TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ 935 - TRACE_ITER_LATENCY_FMT = 0x20000, 936 - TRACE_ITER_SLEEP_TIME = 0x40000, 937 - TRACE_ITER_GRAPH_TIME = 0x80000, 938 - TRACE_ITER_RECORD_CMD = 0x100000, 939 - TRACE_ITER_OVERWRITE = 0x200000, 940 - TRACE_ITER_STOP_ON_FREE = 0x400000, 941 - TRACE_ITER_IRQ_INFO = 0x800000, 942 - TRACE_ITER_MARKERS = 0x1000000, 943 - TRACE_ITER_FUNCTION = 0x2000000, 939 + #define TRACE_FLAGS \ 940 + C(PRINT_PARENT, "print-parent"), \ 941 + C(SYM_OFFSET, "sym-offset"), \ 942 + C(SYM_ADDR, "sym-addr"), \ 943 + C(VERBOSE, "verbose"), \ 944 + C(RAW, "raw"), \ 945 + C(HEX, "hex"), \ 946 + C(BIN, "bin"), \ 947 + C(BLOCK, "block"), \ 948 + C(PRINTK, "trace_printk"), \ 949 + C(ANNOTATE, "annotate"), \ 950 + C(USERSTACKTRACE, "userstacktrace"), \ 951 + C(SYM_USEROBJ, "sym-userobj"), \ 952 + C(PRINTK_MSGONLY, "printk-msg-only"), \ 953 + C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 954 + C(LATENCY_FMT, "latency-format"), \ 955 + C(RECORD_CMD, "record-cmd"), \ 956 + C(OVERWRITE, "overwrite"), \ 957 + C(STOP_ON_FREE, "disable_on_free"), \ 958 + C(IRQ_INFO, "irq-info"), \ 959 + C(MARKERS, "markers"), \ 960 + FUNCTION_FLAGS \ 961 + FGRAPH_FLAGS \ 962 + STACK_FLAGS \ 963 + BRANCH_FLAGS 964 + 965 + /* 966 + * By defining C, we can make TRACE_FLAGS a list of bit names 967 + * that will define the bits for the flag masks. 968 + */ 969 + #undef C 970 + #define C(a, b) TRACE_ITER_##a##_BIT 971 + 972 + enum trace_iterator_bits { 973 + TRACE_FLAGS 974 + /* Make sure we don't go more than we have bits for */ 975 + TRACE_ITER_LAST_BIT 944 976 }; 977 + 978 + /* 979 + * By redefining C, we can make TRACE_FLAGS a list of masks that 980 + * use the bits as defined above. 981 + */ 982 + #undef C 983 + #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 984 + 985 + enum trace_iterator_flags { TRACE_FLAGS }; 945 986 946 987 /* 947 988 * TRACE_ITER_SYM_MASK masks the options in trace_flags that ··· 1009 946 extern void disable_branch_tracing(void); 1010 947 static inline int trace_branch_enable(struct trace_array *tr) 1011 948 { 1012 - if (trace_flags & TRACE_ITER_BRANCH) 949 + if (tr->trace_flags & TRACE_ITER_BRANCH) 1013 950 return enable_branch_tracing(tr); 1014 951 return 0; 1015 952 } ··· 1332 1269 extern const char *__start___tracepoint_str[]; 1333 1270 extern const char *__stop___tracepoint_str[]; 1334 1271 1272 + void trace_printk_control(bool enabled); 1335 1273 void trace_printk_init_buffers(void); 1336 1274 void trace_printk_start_comm(void); 1337 1275 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+1 -1
kernel/trace/trace_benchmark.c
··· 43 43 unsigned int std = 0; 44 44 45 45 /* Only run if the tracepoint is actually active */ 46 - if (!trace_benchmark_event_enabled()) 46 + if (!trace_benchmark_event_enabled() || !tracing_is_on()) 47 47 return; 48 48 49 49 local_irq_disable();
+2 -13
kernel/trace/trace_branch.c
··· 125 125 mutex_unlock(&branch_tracing_mutex); 126 126 } 127 127 128 - static void start_branch_trace(struct trace_array *tr) 129 - { 130 - enable_branch_tracing(tr); 131 - } 132 - 133 - static void stop_branch_trace(struct trace_array *tr) 134 - { 135 - disable_branch_tracing(); 136 - } 137 - 138 128 static int branch_trace_init(struct trace_array *tr) 139 129 { 140 - start_branch_trace(tr); 141 - return 0; 130 + return enable_branch_tracing(tr); 142 131 } 143 132 144 133 static void branch_trace_reset(struct trace_array *tr) 145 134 { 146 - stop_branch_trace(tr); 135 + disable_branch_tracing(); 147 136 } 148 137 149 138 static enum print_line_t trace_branch_print(struct trace_iterator *iter,
+479 -27
kernel/trace/trace_events.c
··· 15 15 #include <linux/kthread.h> 16 16 #include <linux/tracefs.h> 17 17 #include <linux/uaccess.h> 18 + #include <linux/bsearch.h> 18 19 #include <linux/module.h> 19 20 #include <linux/ctype.h> 21 + #include <linux/sort.h> 20 22 #include <linux/slab.h> 21 23 #include <linux/delay.h> 24 + 25 + #include <trace/events/sched.h> 22 26 23 27 #include <asm/setup.h> 24 28 ··· 42 38 static struct kmem_cache *field_cachep; 43 39 static struct kmem_cache *file_cachep; 44 40 45 - #define SYSTEM_FL_FREE_NAME (1 << 31) 46 - 47 41 static inline int system_refcount(struct event_subsystem *system) 48 42 { 49 - return system->ref_count & ~SYSTEM_FL_FREE_NAME; 43 + return system->ref_count; 50 44 } 51 45 52 46 static int system_refcount_inc(struct event_subsystem *system) 53 47 { 54 - return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME; 48 + return system->ref_count++; 55 49 } 56 50 57 51 static int system_refcount_dec(struct event_subsystem *system) 58 52 { 59 - return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME; 53 + return --system->ref_count; 60 54 } 61 55 62 56 /* Double loops, do not use break, only goto's work */ ··· 214 212 } 215 213 EXPORT_SYMBOL_GPL(trace_event_raw_init); 216 214 215 + bool trace_event_ignore_this_pid(struct trace_event_file *trace_file) 216 + { 217 + struct trace_array *tr = trace_file->tr; 218 + struct trace_array_cpu *data; 219 + struct trace_pid_list *pid_list; 220 + 221 + pid_list = rcu_dereference_sched(tr->filtered_pids); 222 + if (!pid_list) 223 + return false; 224 + 225 + data = this_cpu_ptr(tr->trace_buffer.data); 226 + 227 + return data->ignore_pid; 228 + } 229 + EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid); 230 + 217 231 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 218 232 struct trace_event_file *trace_file, 219 233 unsigned long len) 220 234 { 221 235 struct trace_event_call *event_call = trace_file->event_call; 236 + 237 + if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) && 238 + trace_event_ignore_this_pid(trace_file)) 239 + return NULL; 222 240 223 241 local_save_flags(fbuffer->flags); 224 242 fbuffer->pc = preempt_count(); ··· 360 338 int enable, int soft_disable) 361 339 { 362 340 struct trace_event_call *call = file->event_call; 341 + struct trace_array *tr = file->tr; 363 342 int ret = 0; 364 343 int disable; 365 344 ··· 424 401 if (soft_disable) 425 402 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 426 403 427 - if (trace_flags & TRACE_ITER_RECORD_CMD) { 404 + if (tr->trace_flags & TRACE_ITER_RECORD_CMD) { 428 405 tracing_start_cmdline_record(); 429 406 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 430 407 } ··· 469 446 mutex_unlock(&event_mutex); 470 447 } 471 448 449 + static int cmp_pid(const void *key, const void *elt) 450 + { 451 + const pid_t *search_pid = key; 452 + const pid_t *pid = elt; 453 + 454 + if (*search_pid == *pid) 455 + return 0; 456 + if (*search_pid < *pid) 457 + return -1; 458 + return 1; 459 + } 460 + 461 + static bool 462 + check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task) 463 + { 464 + pid_t search_pid; 465 + pid_t *pid; 466 + 467 + /* 468 + * Return false, because if filtered_pids does not exist, 469 + * all pids are good to trace. 470 + */ 471 + if (!filtered_pids) 472 + return false; 473 + 474 + search_pid = task->pid; 475 + 476 + pid = bsearch(&search_pid, filtered_pids->pids, 477 + filtered_pids->nr_pids, sizeof(pid_t), 478 + cmp_pid); 479 + if (!pid) 480 + return true; 481 + 482 + return false; 483 + } 484 + 485 + static void 486 + event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, 487 + struct task_struct *prev, struct task_struct *next) 488 + { 489 + struct trace_array *tr = data; 490 + struct trace_pid_list *pid_list; 491 + 492 + pid_list = rcu_dereference_sched(tr->filtered_pids); 493 + 494 + this_cpu_write(tr->trace_buffer.data->ignore_pid, 495 + check_ignore_pid(pid_list, prev) && 496 + check_ignore_pid(pid_list, next)); 497 + } 498 + 499 + static void 500 + event_filter_pid_sched_switch_probe_post(void *data, bool preempt, 501 + struct task_struct *prev, struct task_struct *next) 502 + { 503 + struct trace_array *tr = data; 504 + struct trace_pid_list *pid_list; 505 + 506 + pid_list = rcu_dereference_sched(tr->filtered_pids); 507 + 508 + this_cpu_write(tr->trace_buffer.data->ignore_pid, 509 + check_ignore_pid(pid_list, next)); 510 + } 511 + 512 + static void 513 + event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) 514 + { 515 + struct trace_array *tr = data; 516 + struct trace_pid_list *pid_list; 517 + 518 + /* Nothing to do if we are already tracing */ 519 + if (!this_cpu_read(tr->trace_buffer.data->ignore_pid)) 520 + return; 521 + 522 + pid_list = rcu_dereference_sched(tr->filtered_pids); 523 + 524 + this_cpu_write(tr->trace_buffer.data->ignore_pid, 525 + check_ignore_pid(pid_list, task)); 526 + } 527 + 528 + static void 529 + event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) 530 + { 531 + struct trace_array *tr = data; 532 + struct trace_pid_list *pid_list; 533 + 534 + /* Nothing to do if we are not tracing */ 535 + if (this_cpu_read(tr->trace_buffer.data->ignore_pid)) 536 + return; 537 + 538 + pid_list = rcu_dereference_sched(tr->filtered_pids); 539 + 540 + /* Set tracing if current is enabled */ 541 + this_cpu_write(tr->trace_buffer.data->ignore_pid, 542 + check_ignore_pid(pid_list, current)); 543 + } 544 + 545 + static void __ftrace_clear_event_pids(struct trace_array *tr) 546 + { 547 + struct trace_pid_list *pid_list; 548 + struct trace_event_file *file; 549 + int cpu; 550 + 551 + pid_list = rcu_dereference_protected(tr->filtered_pids, 552 + lockdep_is_held(&event_mutex)); 553 + if (!pid_list) 554 + return; 555 + 556 + unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr); 557 + unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr); 558 + 559 + unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 560 + unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 561 + 562 + list_for_each_entry(file, &tr->events, list) { 563 + clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 564 + } 565 + 566 + for_each_possible_cpu(cpu) 567 + per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; 568 + 569 + rcu_assign_pointer(tr->filtered_pids, NULL); 570 + 571 + /* Wait till all users are no longer using pid filtering */ 572 + synchronize_sched(); 573 + 574 + free_pages((unsigned long)pid_list->pids, pid_list->order); 575 + kfree(pid_list); 576 + } 577 + 578 + static void ftrace_clear_event_pids(struct trace_array *tr) 579 + { 580 + mutex_lock(&event_mutex); 581 + __ftrace_clear_event_pids(tr); 582 + mutex_unlock(&event_mutex); 583 + } 584 + 472 585 static void __put_system(struct event_subsystem *system) 473 586 { 474 587 struct event_filter *filter = system->filter; ··· 619 460 kfree(filter->filter_string); 620 461 kfree(filter); 621 462 } 622 - if (system->ref_count & SYSTEM_FL_FREE_NAME) 623 - kfree(system->name); 463 + kfree_const(system->name); 624 464 kfree(system); 625 465 } 626 466 ··· 935 777 static void t_stop(struct seq_file *m, void *p) 936 778 { 937 779 mutex_unlock(&event_mutex); 780 + } 781 + 782 + static void *p_start(struct seq_file *m, loff_t *pos) 783 + __acquires(RCU) 784 + { 785 + struct trace_pid_list *pid_list; 786 + struct trace_array *tr = m->private; 787 + 788 + /* 789 + * Grab the mutex, to keep calls to p_next() having the same 790 + * tr->filtered_pids as p_start() has. 791 + * If we just passed the tr->filtered_pids around, then RCU would 792 + * have been enough, but doing that makes things more complex. 793 + */ 794 + mutex_lock(&event_mutex); 795 + rcu_read_lock_sched(); 796 + 797 + pid_list = rcu_dereference_sched(tr->filtered_pids); 798 + 799 + if (!pid_list || *pos >= pid_list->nr_pids) 800 + return NULL; 801 + 802 + return (void *)&pid_list->pids[*pos]; 803 + } 804 + 805 + static void p_stop(struct seq_file *m, void *p) 806 + __releases(RCU) 807 + { 808 + rcu_read_unlock_sched(); 809 + mutex_unlock(&event_mutex); 810 + } 811 + 812 + static void * 813 + p_next(struct seq_file *m, void *v, loff_t *pos) 814 + { 815 + struct trace_array *tr = m->private; 816 + struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids); 817 + 818 + (*pos)++; 819 + 820 + if (*pos >= pid_list->nr_pids) 821 + return NULL; 822 + 823 + return (void *)&pid_list->pids[*pos]; 824 + } 825 + 826 + static int p_show(struct seq_file *m, void *v) 827 + { 828 + pid_t *pid = v; 829 + 830 + seq_printf(m, "%d\n", *pid); 831 + return 0; 938 832 } 939 833 940 834 static ssize_t ··· 1546 1336 return r; 1547 1337 } 1548 1338 1339 + static int max_pids(struct trace_pid_list *pid_list) 1340 + { 1341 + return (PAGE_SIZE << pid_list->order) / sizeof(pid_t); 1342 + } 1343 + 1344 + static void ignore_task_cpu(void *data) 1345 + { 1346 + struct trace_array *tr = data; 1347 + struct trace_pid_list *pid_list; 1348 + 1349 + /* 1350 + * This function is called by on_each_cpu() while the 1351 + * event_mutex is held. 1352 + */ 1353 + pid_list = rcu_dereference_protected(tr->filtered_pids, 1354 + mutex_is_locked(&event_mutex)); 1355 + 1356 + this_cpu_write(tr->trace_buffer.data->ignore_pid, 1357 + check_ignore_pid(pid_list, current)); 1358 + } 1359 + 1360 + static ssize_t 1361 + ftrace_event_pid_write(struct file *filp, const char __user *ubuf, 1362 + size_t cnt, loff_t *ppos) 1363 + { 1364 + struct seq_file *m = filp->private_data; 1365 + struct trace_array *tr = m->private; 1366 + struct trace_pid_list *filtered_pids = NULL; 1367 + struct trace_pid_list *pid_list = NULL; 1368 + struct trace_event_file *file; 1369 + struct trace_parser parser; 1370 + unsigned long val; 1371 + loff_t this_pos; 1372 + ssize_t read = 0; 1373 + ssize_t ret = 0; 1374 + pid_t pid; 1375 + int i; 1376 + 1377 + if (!cnt) 1378 + return 0; 1379 + 1380 + ret = tracing_update_buffers(); 1381 + if (ret < 0) 1382 + return ret; 1383 + 1384 + if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) 1385 + return -ENOMEM; 1386 + 1387 + mutex_lock(&event_mutex); 1388 + /* 1389 + * Load as many pids into the array before doing a 1390 + * swap from the tr->filtered_pids to the new list. 1391 + */ 1392 + while (cnt > 0) { 1393 + 1394 + this_pos = 0; 1395 + 1396 + ret = trace_get_user(&parser, ubuf, cnt, &this_pos); 1397 + if (ret < 0 || !trace_parser_loaded(&parser)) 1398 + break; 1399 + 1400 + read += ret; 1401 + ubuf += ret; 1402 + cnt -= ret; 1403 + 1404 + parser.buffer[parser.idx] = 0; 1405 + 1406 + ret = -EINVAL; 1407 + if (kstrtoul(parser.buffer, 0, &val)) 1408 + break; 1409 + if (val > INT_MAX) 1410 + break; 1411 + 1412 + pid = (pid_t)val; 1413 + 1414 + ret = -ENOMEM; 1415 + if (!pid_list) { 1416 + pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); 1417 + if (!pid_list) 1418 + break; 1419 + 1420 + filtered_pids = rcu_dereference_protected(tr->filtered_pids, 1421 + lockdep_is_held(&event_mutex)); 1422 + if (filtered_pids) 1423 + pid_list->order = filtered_pids->order; 1424 + else 1425 + pid_list->order = 0; 1426 + 1427 + pid_list->pids = (void *)__get_free_pages(GFP_KERNEL, 1428 + pid_list->order); 1429 + if (!pid_list->pids) 1430 + break; 1431 + 1432 + if (filtered_pids) { 1433 + pid_list->nr_pids = filtered_pids->nr_pids; 1434 + memcpy(pid_list->pids, filtered_pids->pids, 1435 + pid_list->nr_pids * sizeof(pid_t)); 1436 + } else 1437 + pid_list->nr_pids = 0; 1438 + } 1439 + 1440 + if (pid_list->nr_pids >= max_pids(pid_list)) { 1441 + pid_t *pid_page; 1442 + 1443 + pid_page = (void *)__get_free_pages(GFP_KERNEL, 1444 + pid_list->order + 1); 1445 + if (!pid_page) 1446 + break; 1447 + memcpy(pid_page, pid_list->pids, 1448 + pid_list->nr_pids * sizeof(pid_t)); 1449 + free_pages((unsigned long)pid_list->pids, pid_list->order); 1450 + 1451 + pid_list->order++; 1452 + pid_list->pids = pid_page; 1453 + } 1454 + 1455 + pid_list->pids[pid_list->nr_pids++] = pid; 1456 + trace_parser_clear(&parser); 1457 + ret = 0; 1458 + } 1459 + trace_parser_put(&parser); 1460 + 1461 + if (ret < 0) { 1462 + if (pid_list) 1463 + free_pages((unsigned long)pid_list->pids, pid_list->order); 1464 + kfree(pid_list); 1465 + mutex_unlock(&event_mutex); 1466 + return ret; 1467 + } 1468 + 1469 + if (!pid_list) { 1470 + mutex_unlock(&event_mutex); 1471 + return ret; 1472 + } 1473 + 1474 + sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL); 1475 + 1476 + /* Remove duplicates */ 1477 + for (i = 1; i < pid_list->nr_pids; i++) { 1478 + int start = i; 1479 + 1480 + while (i < pid_list->nr_pids && 1481 + pid_list->pids[i - 1] == pid_list->pids[i]) 1482 + i++; 1483 + 1484 + if (start != i) { 1485 + if (i < pid_list->nr_pids) { 1486 + memmove(&pid_list->pids[start], &pid_list->pids[i], 1487 + (pid_list->nr_pids - i) * sizeof(pid_t)); 1488 + pid_list->nr_pids -= i - start; 1489 + i = start; 1490 + } else 1491 + pid_list->nr_pids = start; 1492 + } 1493 + } 1494 + 1495 + rcu_assign_pointer(tr->filtered_pids, pid_list); 1496 + 1497 + list_for_each_entry(file, &tr->events, list) { 1498 + set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 1499 + } 1500 + 1501 + if (filtered_pids) { 1502 + synchronize_sched(); 1503 + 1504 + free_pages((unsigned long)filtered_pids->pids, filtered_pids->order); 1505 + kfree(filtered_pids); 1506 + } else { 1507 + /* 1508 + * Register a probe that is called before all other probes 1509 + * to set ignore_pid if next or prev do not match. 1510 + * Register a probe this is called after all other probes 1511 + * to only keep ignore_pid set if next pid matches. 1512 + */ 1513 + register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre, 1514 + tr, INT_MAX); 1515 + register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post, 1516 + tr, 0); 1517 + 1518 + register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, 1519 + tr, INT_MAX); 1520 + register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1521 + tr, 0); 1522 + } 1523 + 1524 + /* 1525 + * Ignoring of pids is done at task switch. But we have to 1526 + * check for those tasks that are currently running. 1527 + * Always do this in case a pid was appended or removed. 1528 + */ 1529 + on_each_cpu(ignore_task_cpu, tr, 1); 1530 + 1531 + mutex_unlock(&event_mutex); 1532 + 1533 + ret = read; 1534 + *ppos += read; 1535 + 1536 + return ret; 1537 + } 1538 + 1549 1539 static int ftrace_event_avail_open(struct inode *inode, struct file *file); 1550 1540 static int ftrace_event_set_open(struct inode *inode, struct file *file); 1541 + static int ftrace_event_set_pid_open(struct inode *inode, struct file *file); 1551 1542 static int ftrace_event_release(struct inode *inode, struct file *file); 1552 1543 1553 1544 static const struct seq_operations show_event_seq_ops = { ··· 1765 1354 .stop = t_stop, 1766 1355 }; 1767 1356 1357 + static const struct seq_operations show_set_pid_seq_ops = { 1358 + .start = p_start, 1359 + .next = p_next, 1360 + .show = p_show, 1361 + .stop = p_stop, 1362 + }; 1363 + 1768 1364 static const struct file_operations ftrace_avail_fops = { 1769 1365 .open = ftrace_event_avail_open, 1770 1366 .read = seq_read, ··· 1783 1365 .open = ftrace_event_set_open, 1784 1366 .read = seq_read, 1785 1367 .write = ftrace_event_write, 1368 + .llseek = seq_lseek, 1369 + .release = ftrace_event_release, 1370 + }; 1371 + 1372 + static const struct file_operations ftrace_set_event_pid_fops = { 1373 + .open = ftrace_event_set_pid_open, 1374 + .read = seq_read, 1375 + .write = ftrace_event_pid_write, 1786 1376 .llseek = seq_lseek, 1787 1377 .release = ftrace_event_release, 1788 1378 }; ··· 1905 1479 return ret; 1906 1480 } 1907 1481 1482 + static int 1483 + ftrace_event_set_pid_open(struct inode *inode, struct file *file) 1484 + { 1485 + const struct seq_operations *seq_ops = &show_set_pid_seq_ops; 1486 + struct trace_array *tr = inode->i_private; 1487 + int ret; 1488 + 1489 + if (trace_array_get(tr) < 0) 1490 + return -ENODEV; 1491 + 1492 + if ((file->f_mode & FMODE_WRITE) && 1493 + (file->f_flags & O_TRUNC)) 1494 + ftrace_clear_event_pids(tr); 1495 + 1496 + ret = ftrace_event_open(inode, file, seq_ops); 1497 + if (ret < 0) 1498 + trace_array_put(tr); 1499 + return ret; 1500 + } 1501 + 1908 1502 static struct event_subsystem * 1909 1503 create_new_subsystem(const char *name) 1910 1504 { ··· 1938 1492 system->ref_count = 1; 1939 1493 1940 1494 /* Only allocate if dynamic (kprobes and modules) */ 1941 - if (!core_kernel_data((unsigned long)name)) { 1942 - system->ref_count |= SYSTEM_FL_FREE_NAME; 1943 - system->name = kstrdup(name, GFP_KERNEL); 1944 - if (!system->name) 1945 - goto out_free; 1946 - } else 1947 - system->name = name; 1495 + system->name = kstrdup_const(name, GFP_KERNEL); 1496 + if (!system->name) 1497 + goto out_free; 1948 1498 1949 1499 system->filter = NULL; 1950 1500 ··· 1953 1511 return system; 1954 1512 1955 1513 out_free: 1956 - if (system->ref_count & SYSTEM_FL_FREE_NAME) 1957 - kfree(system->name); 1514 + kfree_const(system->name); 1958 1515 kfree(system); 1959 1516 return NULL; 1960 1517 } ··· 2919 2478 return -ENOMEM; 2920 2479 } 2921 2480 2481 + entry = tracefs_create_file("set_event_pid", 0644, parent, 2482 + tr, &ftrace_set_event_pid_fops); 2483 + 2922 2484 /* ring buffer internal formats */ 2923 2485 trace_create_file("header_page", 0444, d_events, 2924 2486 ring_buffer_print_page_header, ··· 3002 2558 /* Disable any event triggers and associated soft-disabled events */ 3003 2559 clear_event_triggers(tr); 3004 2560 2561 + /* Clear the pid list */ 2562 + __ftrace_clear_event_pids(tr); 2563 + 3005 2564 /* Disable any running events */ 3006 2565 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0); 3007 2566 ··· 3042 2595 3043 2596 if (!token) 3044 2597 break; 3045 - if (!*token) 3046 - continue; 3047 2598 3048 - /* Restarting syscalls requires that we stop them first */ 3049 - if (disable_first) 3050 - ftrace_set_clr_event(tr, token, 0); 2599 + if (*token) { 2600 + /* Restarting syscalls requires that we stop them first */ 2601 + if (disable_first) 2602 + ftrace_set_clr_event(tr, token, 0); 3051 2603 3052 - ret = ftrace_set_clr_event(tr, token, 1); 3053 - if (ret) 3054 - pr_warn("Failed to enable trace event: %s\n", token); 2604 + ret = ftrace_set_clr_event(tr, token, 1); 2605 + if (ret) 2606 + pr_warn("Failed to enable trace event: %s\n", token); 2607 + } 3055 2608 3056 2609 /* Put back the comma to allow this to be called again */ 3057 2610 if (buf) ··· 3338 2891 3339 2892 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); 3340 2893 3341 - static void 2894 + static struct trace_array *event_tr; 2895 + 2896 + static void __init 3342 2897 function_test_events_call(unsigned long ip, unsigned long parent_ip, 3343 2898 struct ftrace_ops *op, struct pt_regs *pt_regs) 3344 2899 { ··· 3371 2922 entry->ip = ip; 3372 2923 entry->parent_ip = parent_ip; 3373 2924 3374 - trace_buffer_unlock_commit(buffer, event, flags, pc); 2925 + trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc); 3375 2926 3376 2927 out: 3377 2928 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); ··· 3387 2938 static __init void event_trace_self_test_with_function(void) 3388 2939 { 3389 2940 int ret; 2941 + event_tr = top_trace_array(); 2942 + if (WARN_ON(!event_tr)) 2943 + return; 3390 2944 ret = register_ftrace_function(&trace_ops); 3391 2945 if (WARN_ON(ret < 0)) { 3392 2946 pr_info("Failed to enable function tracer for event tests\n");
+4 -4
kernel/trace/trace_events_filter.c
··· 973 973 field->filter_type == FILTER_PTR_STRING; 974 974 } 975 975 976 - static int is_legal_op(struct ftrace_event_field *field, int op) 976 + static bool is_legal_op(struct ftrace_event_field *field, int op) 977 977 { 978 978 if (is_string_field(field) && 979 979 (op != OP_EQ && op != OP_NE && op != OP_GLOB)) 980 - return 0; 980 + return false; 981 981 if (!is_string_field(field) && op == OP_GLOB) 982 - return 0; 982 + return false; 983 983 984 - return 1; 984 + return true; 985 985 } 986 986 987 987 static filter_pred_fn_t select_comparison_fn(int op, int field_size,
+1 -1
kernel/trace/trace_export.c
··· 187 187 FTRACE_ENTRY_REG(call, struct_name, etype, \ 188 188 PARAMS(tstruct), PARAMS(print), filter, NULL) 189 189 190 - int ftrace_event_is_function(struct trace_event_call *call) 190 + bool ftrace_event_is_function(struct trace_event_call *call) 191 191 { 192 192 return call == &event_function; 193 193 }
+41 -22
kernel/trace/trace_functions_graph.c
··· 83 83 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, 84 84 /* Display function name after trailing } */ 85 85 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) }, 86 + /* Include sleep time (scheduled out) between entry and return */ 87 + { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) }, 88 + /* Include time within nested functions */ 89 + { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) }, 86 90 { } /* Empty entry */ 87 91 }; 88 92 89 93 static struct tracer_flags tracer_flags = { 90 94 /* Don't display overruns, proc, or tail by default */ 91 95 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | 92 - TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, 96 + TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS | 97 + TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME, 93 98 .opts = trace_opts 94 99 }; 95 100 ··· 112 107 }; 113 108 114 109 static void 115 - print_graph_duration(unsigned long long duration, struct trace_seq *s, 116 - u32 flags); 110 + print_graph_duration(struct trace_array *tr, unsigned long long duration, 111 + struct trace_seq *s, u32 flags); 117 112 118 113 /* Add a function return address to the trace stack on thread info.*/ 119 114 int ··· 658 653 print_graph_irq(struct trace_iterator *iter, unsigned long addr, 659 654 enum trace_type type, int cpu, pid_t pid, u32 flags) 660 655 { 656 + struct trace_array *tr = iter->tr; 661 657 struct trace_seq *s = &iter->seq; 662 658 struct trace_entry *ent = iter->ent; 663 659 ··· 666 660 addr >= (unsigned long)__irqentry_text_end) 667 661 return; 668 662 669 - if (trace_flags & TRACE_ITER_CONTEXT_INFO) { 663 + if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { 670 664 /* Absolute time */ 671 665 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) 672 666 print_graph_abs_time(iter->ts, s); ··· 682 676 } 683 677 684 678 /* Latency format */ 685 - if (trace_flags & TRACE_ITER_LATENCY_FMT) 679 + if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 686 680 print_graph_lat_fmt(s, ent); 687 681 } 688 682 689 683 /* No overhead */ 690 - print_graph_duration(0, s, flags | FLAGS_FILL_START); 684 + print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START); 691 685 692 686 if (type == TRACE_GRAPH_ENT) 693 687 trace_seq_puts(s, "==========>"); 694 688 else 695 689 trace_seq_puts(s, "<=========="); 696 690 697 - print_graph_duration(0, s, flags | FLAGS_FILL_END); 691 + print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END); 698 692 trace_seq_putc(s, '\n'); 699 693 } 700 694 ··· 732 726 } 733 727 734 728 static void 735 - print_graph_duration(unsigned long long duration, struct trace_seq *s, 736 - u32 flags) 729 + print_graph_duration(struct trace_array *tr, unsigned long long duration, 730 + struct trace_seq *s, u32 flags) 737 731 { 738 732 if (!(flags & TRACE_GRAPH_PRINT_DURATION) || 739 - !(trace_flags & TRACE_ITER_CONTEXT_INFO)) 733 + !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 740 734 return; 741 735 742 736 /* No real adata, just filling the column with spaces */ ··· 770 764 struct trace_seq *s, u32 flags) 771 765 { 772 766 struct fgraph_data *data = iter->private; 767 + struct trace_array *tr = iter->tr; 773 768 struct ftrace_graph_ret *graph_ret; 774 769 struct ftrace_graph_ent *call; 775 770 unsigned long long duration; ··· 799 792 } 800 793 801 794 /* Overhead and duration */ 802 - print_graph_duration(duration, s, flags); 795 + print_graph_duration(tr, duration, s, flags); 803 796 804 797 /* Function */ 805 798 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) ··· 817 810 { 818 811 struct ftrace_graph_ent *call = &entry->graph_ent; 819 812 struct fgraph_data *data = iter->private; 813 + struct trace_array *tr = iter->tr; 820 814 int i; 821 815 822 816 if (data) { ··· 833 825 } 834 826 835 827 /* No time */ 836 - print_graph_duration(0, s, flags | FLAGS_FILL_FULL); 828 + print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 837 829 838 830 /* Function */ 839 831 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) ··· 857 849 { 858 850 struct fgraph_data *data = iter->private; 859 851 struct trace_entry *ent = iter->ent; 852 + struct trace_array *tr = iter->tr; 860 853 int cpu = iter->cpu; 861 854 862 855 /* Pid */ ··· 867 858 /* Interrupt */ 868 859 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); 869 860 870 - if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 861 + if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 871 862 return; 872 863 873 864 /* Absolute time */ ··· 885 876 } 886 877 887 878 /* Latency format */ 888 - if (trace_flags & TRACE_ITER_LATENCY_FMT) 879 + if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) 889 880 print_graph_lat_fmt(s, ent); 890 881 891 882 return; ··· 1036 1027 { 1037 1028 unsigned long long duration = trace->rettime - trace->calltime; 1038 1029 struct fgraph_data *data = iter->private; 1030 + struct trace_array *tr = iter->tr; 1039 1031 pid_t pid = ent->pid; 1040 1032 int cpu = iter->cpu; 1041 1033 int func_match = 1; ··· 1068 1058 print_graph_prologue(iter, s, 0, 0, flags); 1069 1059 1070 1060 /* Overhead and duration */ 1071 - print_graph_duration(duration, s, flags); 1061 + print_graph_duration(tr, duration, s, flags); 1072 1062 1073 1063 /* Closing brace */ 1074 1064 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) ··· 1101 1091 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, 1102 1092 struct trace_iterator *iter, u32 flags) 1103 1093 { 1104 - unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1094 + struct trace_array *tr = iter->tr; 1095 + unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); 1105 1096 struct fgraph_data *data = iter->private; 1106 1097 struct trace_event *event; 1107 1098 int depth = 0; ··· 1115 1104 print_graph_prologue(iter, s, 0, 0, flags); 1116 1105 1117 1106 /* No time */ 1118 - print_graph_duration(0, s, flags | FLAGS_FILL_FULL); 1107 + print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL); 1119 1108 1120 1109 /* Indentation */ 1121 1110 if (depth > 0) ··· 1256 1245 seq_printf(s, "#%.*s||| / \n", size, spaces); 1257 1246 } 1258 1247 1259 - static void __print_graph_headers_flags(struct seq_file *s, u32 flags) 1248 + static void __print_graph_headers_flags(struct trace_array *tr, 1249 + struct seq_file *s, u32 flags) 1260 1250 { 1261 - int lat = trace_flags & TRACE_ITER_LATENCY_FMT; 1251 + int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT; 1262 1252 1263 1253 if (lat) 1264 1254 print_lat_header(s, flags); ··· 1301 1289 void print_graph_headers_flags(struct seq_file *s, u32 flags) 1302 1290 { 1303 1291 struct trace_iterator *iter = s->private; 1292 + struct trace_array *tr = iter->tr; 1304 1293 1305 - if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) 1294 + if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO)) 1306 1295 return; 1307 1296 1308 - if (trace_flags & TRACE_ITER_LATENCY_FMT) { 1297 + if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) { 1309 1298 /* print nothing if the buffers are empty */ 1310 1299 if (trace_empty(iter)) 1311 1300 return; ··· 1314 1301 print_trace_header(s, iter); 1315 1302 } 1316 1303 1317 - __print_graph_headers_flags(s, flags); 1304 + __print_graph_headers_flags(tr, s, flags); 1318 1305 } 1319 1306 1320 1307 void graph_trace_open(struct trace_iterator *iter) ··· 1374 1361 { 1375 1362 if (bit == TRACE_GRAPH_PRINT_IRQS) 1376 1363 ftrace_graph_skip_irqs = !set; 1364 + 1365 + if (bit == TRACE_GRAPH_SLEEP_TIME) 1366 + ftrace_graph_sleep_time_control(set); 1367 + 1368 + if (bit == TRACE_GRAPH_GRAPH_TIME) 1369 + ftrace_graph_graph_time_control(set); 1377 1370 1378 1371 return 0; 1379 1372 }
+55 -51
kernel/trace/trace_irqsoff.c
··· 31 31 static int trace_type __read_mostly; 32 32 33 33 static int save_flags; 34 - static bool function_enabled; 35 34 36 35 static void stop_irqsoff_tracer(struct trace_array *tr, int graph); 37 36 static int start_irqsoff_tracer(struct trace_array *tr, int graph); ··· 56 57 # define irq_trace() (0) 57 58 #endif 58 59 59 - #define TRACE_DISPLAY_GRAPH 1 60 - 61 - static struct tracer_opt trace_opts[] = { 62 60 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 63 - /* display latency trace as call graph */ 64 - { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, 61 + static int irqsoff_display_graph(struct trace_array *tr, int set); 62 + # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) 63 + #else 64 + static inline int irqsoff_display_graph(struct trace_array *tr, int set) 65 + { 66 + return -EINVAL; 67 + } 68 + # define is_graph(tr) false 65 69 #endif 66 - { } /* Empty entry */ 67 - }; 68 - 69 - static struct tracer_flags tracer_flags = { 70 - .val = 0, 71 - .opts = trace_opts, 72 - }; 73 - 74 - #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) 75 70 76 71 /* 77 72 * Sequence count - we record it when starting a measurement and ··· 145 152 #endif /* CONFIG_FUNCTION_TRACER */ 146 153 147 154 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 148 - static int 149 - irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 155 + static int irqsoff_display_graph(struct trace_array *tr, int set) 150 156 { 151 157 int cpu; 152 158 153 - if (!(bit & TRACE_DISPLAY_GRAPH)) 154 - return -EINVAL; 155 - 156 - if (!(is_graph() ^ set)) 159 + if (!(is_graph(tr) ^ set)) 157 160 return 0; 158 161 159 162 stop_irqsoff_tracer(irqsoff_trace, !set); ··· 198 209 199 210 static void irqsoff_trace_open(struct trace_iterator *iter) 200 211 { 201 - if (is_graph()) 212 + if (is_graph(iter->tr)) 202 213 graph_trace_open(iter); 203 214 204 215 } ··· 220 231 * In graph mode call the graph tracer output function, 221 232 * otherwise go with the TRACE_FN event handler 222 233 */ 223 - if (is_graph()) 234 + if (is_graph(iter->tr)) 224 235 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); 225 236 226 237 return TRACE_TYPE_UNHANDLED; ··· 228 239 229 240 static void irqsoff_print_header(struct seq_file *s) 230 241 { 231 - if (is_graph()) 242 + struct trace_array *tr = irqsoff_trace; 243 + 244 + if (is_graph(tr)) 232 245 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); 233 246 else 234 247 trace_default_header(s); ··· 241 250 unsigned long ip, unsigned long parent_ip, 242 251 unsigned long flags, int pc) 243 252 { 244 - if (is_graph()) 253 + if (is_graph(tr)) 245 254 trace_graph_function(tr, ip, parent_ip, flags, pc); 246 255 else 247 256 trace_function(tr, ip, parent_ip, flags, pc); ··· 250 259 #else 251 260 #define __trace_function trace_function 252 261 253 - static int 254 - irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 255 - { 256 - return -EINVAL; 257 - } 258 - 262 + #ifdef CONFIG_FUNCTION_TRACER 259 263 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) 260 264 { 261 265 return -1; 262 266 } 267 + #endif 263 268 264 269 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) 265 270 { 266 271 return TRACE_TYPE_UNHANDLED; 267 272 } 268 273 269 - static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } 270 274 static void irqsoff_trace_open(struct trace_iterator *iter) { } 271 275 static void irqsoff_trace_close(struct trace_iterator *iter) { } 272 276 273 277 #ifdef CONFIG_FUNCTION_TRACER 278 + static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } 274 279 static void irqsoff_print_header(struct seq_file *s) 275 280 { 276 281 trace_default_header(s); ··· 282 295 /* 283 296 * Should this new latency be reported/recorded? 284 297 */ 285 - static int report_latency(struct trace_array *tr, cycle_t delta) 298 + static bool report_latency(struct trace_array *tr, cycle_t delta) 286 299 { 287 300 if (tracing_thresh) { 288 301 if (delta < tracing_thresh) 289 - return 0; 302 + return false; 290 303 } else { 291 304 if (delta <= tr->max_latency) 292 - return 0; 305 + return false; 293 306 } 294 - return 1; 307 + return true; 295 308 } 296 309 297 310 static void ··· 510 523 } 511 524 #endif /* CONFIG_PREEMPT_TRACER */ 512 525 526 + #ifdef CONFIG_FUNCTION_TRACER 527 + static bool function_enabled; 528 + 513 529 static int register_irqsoff_function(struct trace_array *tr, int graph, int set) 514 530 { 515 531 int ret; 516 532 517 533 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ 518 - if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) 534 + if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) 519 535 return 0; 520 536 521 537 if (graph) ··· 546 556 function_enabled = false; 547 557 } 548 558 549 - static void irqsoff_function_set(struct trace_array *tr, int set) 559 + static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) 550 560 { 561 + if (!(mask & TRACE_ITER_FUNCTION)) 562 + return 0; 563 + 551 564 if (set) 552 - register_irqsoff_function(tr, is_graph(), 1); 565 + register_irqsoff_function(tr, is_graph(tr), 1); 553 566 else 554 - unregister_irqsoff_function(tr, is_graph()); 567 + unregister_irqsoff_function(tr, is_graph(tr)); 568 + return 1; 555 569 } 570 + #else 571 + static int register_irqsoff_function(struct trace_array *tr, int graph, int set) 572 + { 573 + return 0; 574 + } 575 + static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } 576 + static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) 577 + { 578 + return 0; 579 + } 580 + #endif /* CONFIG_FUNCTION_TRACER */ 556 581 557 582 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) 558 583 { 559 584 struct tracer *tracer = tr->current_trace; 560 585 561 - if (mask & TRACE_ITER_FUNCTION) 562 - irqsoff_function_set(tr, set); 586 + if (irqsoff_function_set(tr, mask, set)) 587 + return 0; 588 + 589 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 590 + if (mask & TRACE_ITER_DISPLAY_GRAPH) 591 + return irqsoff_display_graph(tr, set); 592 + #endif 563 593 564 594 return trace_keep_overwrite(tracer, mask, set); 565 595 } ··· 612 602 if (irqsoff_busy) 613 603 return -EBUSY; 614 604 615 - save_flags = trace_flags; 605 + save_flags = tr->trace_flags; 616 606 617 607 /* non overwrite screws up the latency tracers */ 618 608 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); ··· 628 618 629 619 /* Only toplevel instance supports graph tracing */ 630 620 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && 631 - is_graph()))) 621 + is_graph(tr)))) 632 622 printk(KERN_ERR "failed to start irqsoff tracer\n"); 633 623 634 624 irqsoff_busy = true; ··· 640 630 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; 641 631 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; 642 632 643 - stop_irqsoff_tracer(tr, is_graph()); 633 + stop_irqsoff_tracer(tr, is_graph(tr)); 644 634 645 635 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); 646 636 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); ··· 676 666 .print_max = true, 677 667 .print_header = irqsoff_print_header, 678 668 .print_line = irqsoff_print_line, 679 - .flags = &tracer_flags, 680 - .set_flag = irqsoff_set_flag, 681 669 .flag_changed = irqsoff_flag_changed, 682 670 #ifdef CONFIG_FTRACE_SELFTEST 683 671 .selftest = trace_selftest_startup_irqsoff, ··· 708 700 .print_max = true, 709 701 .print_header = irqsoff_print_header, 710 702 .print_line = irqsoff_print_line, 711 - .flags = &tracer_flags, 712 - .set_flag = irqsoff_set_flag, 713 703 .flag_changed = irqsoff_flag_changed, 714 704 #ifdef CONFIG_FTRACE_SELFTEST 715 705 .selftest = trace_selftest_startup_preemptoff, ··· 742 736 .print_max = true, 743 737 .print_header = irqsoff_print_header, 744 738 .print_line = irqsoff_print_line, 745 - .flags = &tracer_flags, 746 - .set_flag = irqsoff_set_flag, 747 739 .flag_changed = irqsoff_flag_changed, 748 740 #ifdef CONFIG_FTRACE_SELFTEST 749 741 .selftest = trace_selftest_startup_preemptirqsoff,
+5 -3
kernel/trace/trace_kdb.c
··· 21 21 /* use static because iter can be a bit big for the stack */ 22 22 static struct trace_iterator iter; 23 23 static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; 24 + struct trace_array *tr; 24 25 unsigned int old_userobj; 25 26 int cnt = 0, cpu; 26 27 27 28 trace_init_global_iter(&iter); 28 29 iter.buffer_iter = buffer_iter; 30 + tr = iter.tr; 29 31 30 32 for_each_tracing_cpu(cpu) { 31 33 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); 32 34 } 33 35 34 - old_userobj = trace_flags; 36 + old_userobj = tr->trace_flags; 35 37 36 38 /* don't look at user memory in panic mode */ 37 - trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 39 + tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 38 40 39 41 kdb_printf("Dumping ftrace buffer:\n"); 40 42 ··· 84 82 kdb_printf("---------------------------------\n"); 85 83 86 84 out: 87 - trace_flags = old_userobj; 85 + tr->trace_flags = old_userobj; 88 86 89 87 for_each_tracing_cpu(cpu) { 90 88 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
+2 -2
kernel/trace/trace_mmiotrace.c
··· 314 314 entry->rw = *rw; 315 315 316 316 if (!call_filter_check_discard(call, entry, buffer, event)) 317 - trace_buffer_unlock_commit(buffer, event, 0, pc); 317 + trace_buffer_unlock_commit(tr, buffer, event, 0, pc); 318 318 } 319 319 320 320 void mmio_trace_rw(struct mmiotrace_rw *rw) ··· 344 344 entry->map = *map; 345 345 346 346 if (!call_filter_check_discard(call, entry, buffer, event)) 347 - trace_buffer_unlock_commit(buffer, event, 0, pc); 347 + trace_buffer_unlock_commit(tr, buffer, event, 0, pc); 348 348 } 349 349 350 350 void mmio_trace_mapping(struct mmiotrace_map *map)
+46 -51
kernel/trace/trace_output.c
··· 322 322 # define IP_FMT "%016lx" 323 323 #endif 324 324 325 - int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 326 - unsigned long ip, unsigned long sym_flags) 325 + static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 326 + unsigned long ip, unsigned long sym_flags) 327 327 { 328 328 struct file *file = NULL; 329 329 unsigned long vmstart = 0; ··· 351 351 } 352 352 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) 353 353 trace_seq_printf(s, " <" IP_FMT ">", ip); 354 - return !trace_seq_has_overflowed(s); 355 - } 356 - 357 - int 358 - seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, 359 - unsigned long sym_flags) 360 - { 361 - struct mm_struct *mm = NULL; 362 - unsigned int i; 363 - 364 - if (trace_flags & TRACE_ITER_SYM_USEROBJ) { 365 - struct task_struct *task; 366 - /* 367 - * we do the lookup on the thread group leader, 368 - * since individual threads might have already quit! 369 - */ 370 - rcu_read_lock(); 371 - task = find_task_by_vpid(entry->tgid); 372 - if (task) 373 - mm = get_task_mm(task); 374 - rcu_read_unlock(); 375 - } 376 - 377 - for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 378 - unsigned long ip = entry->caller[i]; 379 - 380 - if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) 381 - break; 382 - 383 - trace_seq_puts(s, " => "); 384 - 385 - if (!ip) { 386 - trace_seq_puts(s, "??"); 387 - trace_seq_putc(s, '\n'); 388 - continue; 389 - } 390 - 391 - seq_print_user_ip(s, mm, ip, sym_flags); 392 - trace_seq_putc(s, '\n'); 393 - } 394 - 395 - if (mm) 396 - mmput(mm); 397 - 398 354 return !trace_seq_has_overflowed(s); 399 355 } 400 356 ··· 476 520 static int 477 521 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) 478 522 { 479 - unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; 523 + struct trace_array *tr = iter->tr; 524 + unsigned long verbose = tr->trace_flags & TRACE_ITER_VERBOSE; 480 525 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; 481 526 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; 482 527 unsigned long long rel_ts = next_ts - iter->ts; ··· 520 563 521 564 int trace_print_context(struct trace_iterator *iter) 522 565 { 566 + struct trace_array *tr = iter->tr; 523 567 struct trace_seq *s = &iter->seq; 524 568 struct trace_entry *entry = iter->ent; 525 569 unsigned long long t; ··· 532 574 trace_seq_printf(s, "%16s-%-5d [%03d] ", 533 575 comm, entry->pid, iter->cpu); 534 576 535 - if (trace_flags & TRACE_ITER_IRQ_INFO) 577 + if (tr->trace_flags & TRACE_ITER_IRQ_INFO) 536 578 trace_print_lat_fmt(s, entry); 537 579 538 580 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { ··· 548 590 549 591 int trace_print_lat_context(struct trace_iterator *iter) 550 592 { 551 - u64 next_ts; 593 + struct trace_array *tr = iter->tr; 552 594 /* trace_find_next_entry will reset ent_size */ 553 595 int ent_size = iter->ent_size; 554 596 struct trace_seq *s = &iter->seq; 597 + u64 next_ts; 555 598 struct trace_entry *entry = iter->ent, 556 599 *next_entry = trace_find_next_entry(iter, NULL, 557 600 &next_ts); 558 - unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 601 + unsigned long verbose = (tr->trace_flags & TRACE_ITER_VERBOSE); 559 602 560 603 /* Restore the original ent_size */ 561 604 iter->ent_size = ent_size; ··· 1038 1079 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, 1039 1080 int flags, struct trace_event *event) 1040 1081 { 1082 + struct trace_array *tr = iter->tr; 1041 1083 struct userstack_entry *field; 1042 1084 struct trace_seq *s = &iter->seq; 1085 + struct mm_struct *mm = NULL; 1086 + unsigned int i; 1043 1087 1044 1088 trace_assign_type(field, iter->ent); 1045 1089 1046 1090 trace_seq_puts(s, "<user stack trace>\n"); 1047 - seq_print_userip_objs(field, s, flags); 1091 + 1092 + if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) { 1093 + struct task_struct *task; 1094 + /* 1095 + * we do the lookup on the thread group leader, 1096 + * since individual threads might have already quit! 1097 + */ 1098 + rcu_read_lock(); 1099 + task = find_task_by_vpid(field->tgid); 1100 + if (task) 1101 + mm = get_task_mm(task); 1102 + rcu_read_unlock(); 1103 + } 1104 + 1105 + for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1106 + unsigned long ip = field->caller[i]; 1107 + 1108 + if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) 1109 + break; 1110 + 1111 + trace_seq_puts(s, " => "); 1112 + 1113 + if (!ip) { 1114 + trace_seq_puts(s, "??"); 1115 + trace_seq_putc(s, '\n'); 1116 + continue; 1117 + } 1118 + 1119 + seq_print_user_ip(s, mm, ip, flags); 1120 + trace_seq_putc(s, '\n'); 1121 + } 1122 + 1123 + if (mm) 1124 + mmput(mm); 1048 1125 1049 1126 return trace_handle_return(s); 1050 1127 }
-4
kernel/trace/trace_output.h
··· 14 14 extern int 15 15 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, 16 16 unsigned long sym_flags); 17 - extern int seq_print_userip_objs(const struct userstack_entry *entry, 18 - struct trace_seq *s, unsigned long sym_flags); 19 - extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, 20 - unsigned long ip, unsigned long sym_flags); 21 17 22 18 extern int trace_print_context(struct trace_iterator *iter); 23 19 extern int trace_print_lat_context(struct trace_iterator *iter);
+10 -4
kernel/trace/trace_printk.c
··· 178 178 static inline void format_mod_stop(void) { } 179 179 #endif /* CONFIG_MODULES */ 180 180 181 + static bool __read_mostly trace_printk_enabled = true; 182 + 183 + void trace_printk_control(bool enabled) 184 + { 185 + trace_printk_enabled = enabled; 186 + } 181 187 182 188 __initdata_or_module static 183 189 struct notifier_block module_trace_bprintk_format_nb = { ··· 198 192 if (unlikely(!fmt)) 199 193 return 0; 200 194 201 - if (!(trace_flags & TRACE_ITER_PRINTK)) 195 + if (!trace_printk_enabled) 202 196 return 0; 203 197 204 198 va_start(ap, fmt); ··· 213 207 if (unlikely(!fmt)) 214 208 return 0; 215 209 216 - if (!(trace_flags & TRACE_ITER_PRINTK)) 210 + if (!trace_printk_enabled) 217 211 return 0; 218 212 219 213 return trace_vbprintk(ip, fmt, ap); ··· 225 219 int ret; 226 220 va_list ap; 227 221 228 - if (!(trace_flags & TRACE_ITER_PRINTK)) 222 + if (!trace_printk_enabled) 229 223 return 0; 230 224 231 225 va_start(ap, fmt); ··· 237 231 238 232 int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) 239 233 { 240 - if (!(trace_flags & TRACE_ITER_PRINTK)) 234 + if (!trace_printk_enabled) 241 235 return 0; 242 236 243 237 return trace_vprintk(ip, fmt, ap);
+4 -4
kernel/trace/trace_probe.h
··· 302 302 } 303 303 304 304 /* Check the name is good for event/group/fields */ 305 - static inline int is_good_name(const char *name) 305 + static inline bool is_good_name(const char *name) 306 306 { 307 307 if (!isalpha(*name) && *name != '_') 308 - return 0; 308 + return false; 309 309 while (*++name != '\0') { 310 310 if (!isalpha(*name) && !isdigit(*name) && *name != '_') 311 - return 0; 311 + return false; 312 312 } 313 - return 1; 313 + return true; 314 314 } 315 315 316 316 static inline struct event_file_link *
+63 -65
kernel/trace/trace_sched_wakeup.c
··· 34 34 35 35 static void wakeup_reset(struct trace_array *tr); 36 36 static void __wakeup_reset(struct trace_array *tr); 37 + 38 + static int save_flags; 39 + 40 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 41 + static int wakeup_display_graph(struct trace_array *tr, int set); 42 + # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) 43 + #else 44 + static inline int wakeup_display_graph(struct trace_array *tr, int set) 45 + { 46 + return 0; 47 + } 48 + # define is_graph(tr) false 49 + #endif 50 + 51 + 52 + #ifdef CONFIG_FUNCTION_TRACER 53 + 37 54 static int wakeup_graph_entry(struct ftrace_graph_ent *trace); 38 55 static void wakeup_graph_return(struct ftrace_graph_ret *trace); 39 56 40 - static int save_flags; 41 57 static bool function_enabled; 42 - 43 - #define TRACE_DISPLAY_GRAPH 1 44 - 45 - static struct tracer_opt trace_opts[] = { 46 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 47 - /* display latency trace as call graph */ 48 - { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, 49 - #endif 50 - { } /* Empty entry */ 51 - }; 52 - 53 - static struct tracer_flags tracer_flags = { 54 - .val = 0, 55 - .opts = trace_opts, 56 - }; 57 - 58 - #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) 59 - 60 - #ifdef CONFIG_FUNCTION_TRACER 61 58 62 59 /* 63 60 * Prologue for the wakeup function tracers. ··· 125 128 atomic_dec(&data->disabled); 126 129 preempt_enable_notrace(); 127 130 } 128 - #endif /* CONFIG_FUNCTION_TRACER */ 129 131 130 132 static int register_wakeup_function(struct trace_array *tr, int graph, int set) 131 133 { 132 134 int ret; 133 135 134 136 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ 135 - if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) 137 + if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) 136 138 return 0; 137 139 138 140 if (graph) ··· 159 163 function_enabled = false; 160 164 } 161 165 162 - static void wakeup_function_set(struct trace_array *tr, int set) 166 + static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) 163 167 { 168 + if (!(mask & TRACE_ITER_FUNCTION)) 169 + return 0; 170 + 164 171 if (set) 165 - register_wakeup_function(tr, is_graph(), 1); 172 + register_wakeup_function(tr, is_graph(tr), 1); 166 173 else 167 - unregister_wakeup_function(tr, is_graph()); 174 + unregister_wakeup_function(tr, is_graph(tr)); 175 + return 1; 168 176 } 177 + #else 178 + static int register_wakeup_function(struct trace_array *tr, int graph, int set) 179 + { 180 + return 0; 181 + } 182 + static void unregister_wakeup_function(struct trace_array *tr, int graph) { } 183 + static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) 184 + { 185 + return 0; 186 + } 187 + #endif /* CONFIG_FUNCTION_TRACER */ 169 188 170 189 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) 171 190 { 172 191 struct tracer *tracer = tr->current_trace; 173 192 174 - if (mask & TRACE_ITER_FUNCTION) 175 - wakeup_function_set(tr, set); 193 + if (wakeup_function_set(tr, mask, set)) 194 + return 0; 195 + 196 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 197 + if (mask & TRACE_ITER_DISPLAY_GRAPH) 198 + return wakeup_display_graph(tr, set); 199 + #endif 176 200 177 201 return trace_keep_overwrite(tracer, mask, set); 178 202 } ··· 219 203 } 220 204 221 205 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 222 - static int 223 - wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 206 + static int wakeup_display_graph(struct trace_array *tr, int set) 224 207 { 225 - 226 - if (!(bit & TRACE_DISPLAY_GRAPH)) 227 - return -EINVAL; 228 - 229 - if (!(is_graph() ^ set)) 208 + if (!(is_graph(tr) ^ set)) 230 209 return 0; 231 210 232 211 stop_func_tracer(tr, !set); ··· 270 259 271 260 static void wakeup_trace_open(struct trace_iterator *iter) 272 261 { 273 - if (is_graph()) 262 + if (is_graph(iter->tr)) 274 263 graph_trace_open(iter); 275 264 } 276 265 ··· 290 279 * In graph mode call the graph tracer output function, 291 280 * otherwise go with the TRACE_FN event handler 292 281 */ 293 - if (is_graph()) 282 + if (is_graph(iter->tr)) 294 283 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); 295 284 296 285 return TRACE_TYPE_UNHANDLED; ··· 298 287 299 288 static void wakeup_print_header(struct seq_file *s) 300 289 { 301 - if (is_graph()) 290 + if (is_graph(wakeup_trace)) 302 291 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); 303 292 else 304 293 trace_default_header(s); ··· 309 298 unsigned long ip, unsigned long parent_ip, 310 299 unsigned long flags, int pc) 311 300 { 312 - if (is_graph()) 301 + if (is_graph(tr)) 313 302 trace_graph_function(tr, ip, parent_ip, flags, pc); 314 303 else 315 304 trace_function(tr, ip, parent_ip, flags, pc); ··· 317 306 #else 318 307 #define __trace_function trace_function 319 308 320 - static int 321 - wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) 322 - { 323 - return -EINVAL; 324 - } 325 - 326 - static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 327 - { 328 - return -1; 329 - } 330 - 331 309 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) 332 310 { 333 311 return TRACE_TYPE_UNHANDLED; 334 312 } 335 313 336 - static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } 337 314 static void wakeup_trace_open(struct trace_iterator *iter) { } 338 315 static void wakeup_trace_close(struct trace_iterator *iter) { } 339 316 340 317 #ifdef CONFIG_FUNCTION_TRACER 318 + static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 319 + { 320 + return -1; 321 + } 322 + static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } 341 323 static void wakeup_print_header(struct seq_file *s) 342 324 { 343 325 trace_default_header(s); ··· 346 342 /* 347 343 * Should this new latency be reported/recorded? 348 344 */ 349 - static int report_latency(struct trace_array *tr, cycle_t delta) 345 + static bool report_latency(struct trace_array *tr, cycle_t delta) 350 346 { 351 347 if (tracing_thresh) { 352 348 if (delta < tracing_thresh) 353 - return 0; 349 + return false; 354 350 } else { 355 351 if (delta <= tr->max_latency) 356 - return 0; 352 + return false; 357 353 } 358 - return 1; 354 + return true; 359 355 } 360 356 361 357 static void ··· 392 388 entry->next_cpu = task_cpu(next); 393 389 394 390 if (!call_filter_check_discard(call, entry, buffer, event)) 395 - trace_buffer_unlock_commit(buffer, event, flags, pc); 391 + trace_buffer_unlock_commit(tr, buffer, event, flags, pc); 396 392 } 397 393 398 394 static void ··· 420 416 entry->next_cpu = task_cpu(wakee); 421 417 422 418 if (!call_filter_check_discard(call, entry, buffer, event)) 423 - trace_buffer_unlock_commit(buffer, event, flags, pc); 419 + trace_buffer_unlock_commit(tr, buffer, event, flags, pc); 424 420 } 425 421 426 422 static void notrace ··· 639 635 */ 640 636 smp_wmb(); 641 637 642 - if (start_func_tracer(tr, is_graph())) 638 + if (start_func_tracer(tr, is_graph(tr))) 643 639 printk(KERN_ERR "failed to start wakeup tracer\n"); 644 640 645 641 return; ··· 652 648 static void stop_wakeup_tracer(struct trace_array *tr) 653 649 { 654 650 tracer_enabled = 0; 655 - stop_func_tracer(tr, is_graph()); 651 + stop_func_tracer(tr, is_graph(tr)); 656 652 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 657 653 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 658 654 unregister_trace_sched_wakeup(probe_wakeup, NULL); ··· 663 659 664 660 static int __wakeup_tracer_init(struct trace_array *tr) 665 661 { 666 - save_flags = trace_flags; 662 + save_flags = tr->trace_flags; 667 663 668 664 /* non overwrite screws up the latency tracers */ 669 665 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); ··· 744 740 .print_max = true, 745 741 .print_header = wakeup_print_header, 746 742 .print_line = wakeup_print_line, 747 - .flags = &tracer_flags, 748 - .set_flag = wakeup_set_flag, 749 743 .flag_changed = wakeup_flag_changed, 750 744 #ifdef CONFIG_FTRACE_SELFTEST 751 745 .selftest = trace_selftest_startup_wakeup, ··· 764 762 .print_max = true, 765 763 .print_header = wakeup_print_header, 766 764 .print_line = wakeup_print_line, 767 - .flags = &tracer_flags, 768 - .set_flag = wakeup_set_flag, 769 765 .flag_changed = wakeup_flag_changed, 770 766 #ifdef CONFIG_FTRACE_SELFTEST 771 767 .selftest = trace_selftest_startup_wakeup, ··· 784 784 .print_max = true, 785 785 .print_header = wakeup_print_header, 786 786 .print_line = wakeup_print_line, 787 - .flags = &tracer_flags, 788 - .set_flag = wakeup_set_flag, 789 787 .flag_changed = wakeup_flag_changed, 790 788 #ifdef CONFIG_FTRACE_SELFTEST 791 789 .selftest = trace_selftest_startup_wakeup,
+49 -43
kernel/trace/trace_stack.c
··· 16 16 17 17 #include "trace.h" 18 18 19 - #define STACK_TRACE_ENTRIES 500 20 - 21 19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 22 20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 23 - static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; 21 + unsigned stack_trace_index[STACK_TRACE_ENTRIES]; 24 22 25 23 /* 26 24 * Reserve one entry for the passed in ip. This will allow 27 25 * us to remove most or all of the stack size overhead 28 26 * added by the stack tracer itself. 29 27 */ 30 - static struct stack_trace max_stack_trace = { 28 + struct stack_trace stack_trace_max = { 31 29 .max_entries = STACK_TRACE_ENTRIES - 1, 32 30 .entries = &stack_dump_trace[0], 33 31 }; 34 32 35 - static unsigned long max_stack_size; 36 - static arch_spinlock_t max_stack_lock = 33 + unsigned long stack_trace_max_size; 34 + arch_spinlock_t stack_trace_max_lock = 37 35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 38 36 39 37 static DEFINE_PER_CPU(int, trace_active); ··· 40 42 int stack_tracer_enabled; 41 43 static int last_stack_tracer_enabled; 42 44 43 - static inline void print_max_stack(void) 45 + void stack_trace_print(void) 44 46 { 45 47 long i; 46 48 int size; 47 49 48 50 pr_emerg(" Depth Size Location (%d entries)\n" 49 51 " ----- ---- --------\n", 50 - max_stack_trace.nr_entries); 52 + stack_trace_max.nr_entries); 51 53 52 - for (i = 0; i < max_stack_trace.nr_entries; i++) { 54 + for (i = 0; i < stack_trace_max.nr_entries; i++) { 53 55 if (stack_dump_trace[i] == ULONG_MAX) 54 56 break; 55 - if (i+1 == max_stack_trace.nr_entries || 57 + if (i+1 == stack_trace_max.nr_entries || 56 58 stack_dump_trace[i+1] == ULONG_MAX) 57 - size = stack_dump_index[i]; 59 + size = stack_trace_index[i]; 58 60 else 59 - size = stack_dump_index[i] - stack_dump_index[i+1]; 61 + size = stack_trace_index[i] - stack_trace_index[i+1]; 60 62 61 - pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i], 63 + pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], 62 64 size, (void *)stack_dump_trace[i]); 63 65 } 64 66 } 65 67 66 - static inline void 68 + /* 69 + * When arch-specific code overides this function, the following 70 + * data should be filled up, assuming stack_trace_max_lock is held to 71 + * prevent concurrent updates. 72 + * stack_trace_index[] 73 + * stack_trace_max 74 + * stack_trace_max_size 75 + */ 76 + void __weak 67 77 check_stack(unsigned long ip, unsigned long *stack) 68 78 { 69 79 unsigned long this_size, flags; unsigned long *p, *top, *start; ··· 84 78 /* Remove the frame of the tracer */ 85 79 this_size -= frame_size; 86 80 87 - if (this_size <= max_stack_size) 81 + if (this_size <= stack_trace_max_size) 88 82 return; 89 83 90 84 /* we do not handle interrupt stacks yet */ ··· 96 90 return; 97 91 98 92 local_irq_save(flags); 99 - arch_spin_lock(&max_stack_lock); 93 + arch_spin_lock(&stack_trace_max_lock); 100 94 101 95 /* 102 96 * RCU may not be watching, make it see us. ··· 109 103 this_size -= tracer_frame; 110 104 111 105 /* a race could have already updated it */ 112 - if (this_size <= max_stack_size) 106 + if (this_size <= stack_trace_max_size) 113 107 goto out; 114 108 115 - max_stack_size = this_size; 109 + stack_trace_max_size = this_size; 116 110 117 - max_stack_trace.nr_entries = 0; 118 - max_stack_trace.skip = 3; 111 + stack_trace_max.nr_entries = 0; 112 + stack_trace_max.skip = 3; 119 113 120 - save_stack_trace(&max_stack_trace); 114 + save_stack_trace(&stack_trace_max); 121 115 122 116 /* Skip over the overhead of the stack tracer itself */ 123 - for (i = 0; i < max_stack_trace.nr_entries; i++) { 117 + for (i = 0; i < stack_trace_max.nr_entries; i++) { 124 118 if (stack_dump_trace[i] == ip) 125 119 break; 126 120 } ··· 140 134 * loop will only happen once. This code only takes place 141 135 * on a new max, so it is far from a fast path. 142 136 */ 143 - while (i < max_stack_trace.nr_entries) { 137 + while (i < stack_trace_max.nr_entries) { 144 138 int found = 0; 145 139 146 - stack_dump_index[x] = this_size; 140 + stack_trace_index[x] = this_size; 147 141 p = start; 148 142 149 - for (; p < top && i < max_stack_trace.nr_entries; p++) { 143 + for (; p < top && i < stack_trace_max.nr_entries; p++) { 150 144 if (stack_dump_trace[i] == ULONG_MAX) 151 145 break; 152 146 if (*p == stack_dump_trace[i]) { 153 147 stack_dump_trace[x] = stack_dump_trace[i++]; 154 - this_size = stack_dump_index[x++] = 148 + this_size = stack_trace_index[x++] = 155 149 (top - p) * sizeof(unsigned long); 156 150 found = 1; 157 151 /* Start the search from here */ ··· 166 160 if (unlikely(!tracer_frame)) { 167 161 tracer_frame = (p - stack) * 168 162 sizeof(unsigned long); 169 - max_stack_size -= tracer_frame; 163 + stack_trace_max_size -= tracer_frame; 170 164 } 171 165 } 172 166 } ··· 175 169 i++; 176 170 } 177 171 178 - max_stack_trace.nr_entries = x; 172 + stack_trace_max.nr_entries = x; 179 173 for (; x < i; x++) 180 174 stack_dump_trace[x] = ULONG_MAX; 181 175 182 176 if (task_stack_end_corrupted(current)) { 183 - print_max_stack(); 177 + stack_trace_print(); 184 178 BUG(); 185 179 } 186 180 187 181 out: 188 182 rcu_irq_exit(); 189 - arch_spin_unlock(&max_stack_lock); 183 + arch_spin_unlock(&stack_trace_max_lock); 190 184 local_irq_restore(flags); 191 185 } 192 186 ··· 257 251 cpu = smp_processor_id(); 258 252 per_cpu(trace_active, cpu)++; 259 253 260 - arch_spin_lock(&max_stack_lock); 254 + arch_spin_lock(&stack_trace_max_lock); 261 255 *ptr = val; 262 - arch_spin_unlock(&max_stack_lock); 256 + arch_spin_unlock(&stack_trace_max_lock); 263 257 264 258 per_cpu(trace_active, cpu)--; 265 259 local_irq_restore(flags); ··· 279 273 { 280 274 long n = *pos - 1; 281 275 282 - if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) 276 + if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) 283 277 return NULL; 284 278 285 279 m->private = (void *)n; ··· 302 296 cpu = smp_processor_id(); 303 297 per_cpu(trace_active, cpu)++; 304 298 305 - arch_spin_lock(&max_stack_lock); 299 + arch_spin_lock(&stack_trace_max_lock); 306 300 307 301 if (*pos == 0) 308 302 return SEQ_START_TOKEN; ··· 314 308 { 315 309 int cpu; 316 310 317 - arch_spin_unlock(&max_stack_lock); 311 + arch_spin_unlock(&stack_trace_max_lock); 318 312 319 313 cpu = smp_processor_id(); 320 314 per_cpu(trace_active, cpu)--; ··· 349 343 seq_printf(m, " Depth Size Location" 350 344 " (%d entries)\n" 351 345 " ----- ---- --------\n", 352 - max_stack_trace.nr_entries); 346 + stack_trace_max.nr_entries); 353 347 354 - if (!stack_tracer_enabled && !max_stack_size) 348 + if (!stack_tracer_enabled && !stack_trace_max_size) 355 349 print_disabled(m); 356 350 357 351 return 0; ··· 359 353 360 354 i = *(long *)v; 361 355 362 - if (i >= max_stack_trace.nr_entries || 356 + if (i >= stack_trace_max.nr_entries || 363 357 stack_dump_trace[i] == ULONG_MAX) 364 358 return 0; 365 359 366 - if (i+1 == max_stack_trace.nr_entries || 360 + if (i+1 == stack_trace_max.nr_entries || 367 361 stack_dump_trace[i+1] == ULONG_MAX) 368 - size = stack_dump_index[i]; 362 + size = stack_trace_index[i]; 369 363 else 370 - size = stack_dump_index[i] - stack_dump_index[i+1]; 364 + size = stack_trace_index[i] - stack_trace_index[i+1]; 371 365 372 - seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); 366 + seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); 373 367 374 368 trace_lookup_stack(m, i); 375 369 ··· 459 453 return 0; 460 454 461 455 trace_create_file("stack_max_size", 0644, d_tracer, 462 - &max_stack_size, &stack_max_size_fops); 456 + &stack_trace_max_size, &stack_max_size_fops); 463 457 464 458 trace_create_file("stack_trace", 0444, d_tracer, 465 459 NULL, &stack_trace_fops);
+2 -1
kernel/trace/trace_syscalls.c
··· 110 110 print_syscall_enter(struct trace_iterator *iter, int flags, 111 111 struct trace_event *event) 112 112 { 113 + struct trace_array *tr = iter->tr; 113 114 struct trace_seq *s = &iter->seq; 114 115 struct trace_entry *ent = iter->ent; 115 116 struct syscall_trace_enter *trace; ··· 137 136 goto end; 138 137 139 138 /* parameter types */ 140 - if (trace_flags & TRACE_ITER_VERBOSE) 139 + if (tr->trace_flags & TRACE_ITER_VERBOSE) 141 140 trace_seq_printf(s, "%s ", entry->types[i]); 142 141 143 142 /* parameter values */
+57 -18
kernel/tracepoint.c
··· 91 91 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); 92 92 } 93 93 94 - static struct tracepoint_func *func_add(struct tracepoint_func **funcs, 95 - struct tracepoint_func *tp_func) 94 + static struct tracepoint_func * 95 + func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func, 96 + int prio) 96 97 { 97 - int nr_probes = 0; 98 98 struct tracepoint_func *old, *new; 99 + int nr_probes = 0; 100 + int pos = -1; 99 101 100 102 if (WARN_ON(!tp_func->func)) 101 103 return ERR_PTR(-EINVAL); ··· 106 104 old = *funcs; 107 105 if (old) { 108 106 /* (N -> N+1), (N != 0, 1) probes */ 109 - for (nr_probes = 0; old[nr_probes].func; nr_probes++) 107 + for (nr_probes = 0; old[nr_probes].func; nr_probes++) { 108 + /* Insert before probes of lower priority */ 109 + if (pos < 0 && old[nr_probes].prio < prio) 110 + pos = nr_probes; 110 111 if (old[nr_probes].func == tp_func->func && 111 112 old[nr_probes].data == tp_func->data) 112 113 return ERR_PTR(-EEXIST); 114 + } 113 115 } 114 116 /* + 2 : one for new probe, one for NULL func */ 115 117 new = allocate_probes(nr_probes + 2); 116 118 if (new == NULL) 117 119 return ERR_PTR(-ENOMEM); 118 - if (old) 119 - memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); 120 - new[nr_probes] = *tp_func; 120 + if (old) { 121 + if (pos < 0) { 122 + pos = nr_probes; 123 + memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); 124 + } else { 125 + /* Copy higher priority probes ahead of the new probe */ 126 + memcpy(new, old, pos * sizeof(struct tracepoint_func)); 127 + /* Copy the rest after it. */ 128 + memcpy(new + pos + 1, old + pos, 129 + (nr_probes - pos) * sizeof(struct tracepoint_func)); 130 + } 131 + } else 132 + pos = 0; 133 + new[pos] = *tp_func; 121 134 new[nr_probes + 1].func = NULL; 122 135 *funcs = new; 123 136 debug_print_probes(*funcs); ··· 191 174 * Add the probe function to a tracepoint. 192 175 */ 193 176 static int tracepoint_add_func(struct tracepoint *tp, 194 - struct tracepoint_func *func) 177 + struct tracepoint_func *func, int prio) 195 178 { 196 179 struct tracepoint_func *old, *tp_funcs; 197 180 ··· 200 183 201 184 tp_funcs = rcu_dereference_protected(tp->funcs, 202 185 lockdep_is_held(&tracepoints_mutex)); 203 - old = func_add(&tp_funcs, func); 186 + old = func_add(&tp_funcs, func, prio); 204 187 if (IS_ERR(old)) { 205 188 WARN_ON_ONCE(1); 206 189 return PTR_ERR(old); ··· 257 240 * @tp: tracepoint 258 241 * @probe: probe handler 259 242 * @data: tracepoint data 243 + * @prio: priority of this function over other registered functions 244 + * 245 + * Returns 0 if ok, error value on error. 246 + * Note: if @tp is within a module, the caller is responsible for 247 + * unregistering the probe before the module is gone. This can be 248 + * performed either with a tracepoint module going notifier, or from 249 + * within module exit functions. 250 + */ 251 + int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, 252 + void *data, int prio) 253 + { 254 + struct tracepoint_func tp_func; 255 + int ret; 256 + 257 + mutex_lock(&tracepoints_mutex); 258 + tp_func.func = probe; 259 + tp_func.data = data; 260 + tp_func.prio = prio; 261 + ret = tracepoint_add_func(tp, &tp_func, prio); 262 + mutex_unlock(&tracepoints_mutex); 263 + return ret; 264 + } 265 + EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio); 266 + 267 + /** 268 + * tracepoint_probe_register - Connect a probe to a tracepoint 269 + * @tp: tracepoint 270 + * @probe: probe handler 271 + * @data: tracepoint data 272 + * @prio: priority of this function over other registered functions 260 273 * 261 274 * Returns 0 if ok, error value on error. 262 275 * Note: if @tp is within a module, the caller is responsible for ··· 296 249 */ 297 250 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) 298 251 { 299 - struct tracepoint_func tp_func; 300 - int ret; 301 - 302 - mutex_lock(&tracepoints_mutex); 303 - tp_func.func = probe; 304 - tp_func.data = data; 305 - ret = tracepoint_add_func(tp, &tp_func); 306 - mutex_unlock(&tracepoints_mutex); 307 - return ret; 252 + return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO); 308 253 } 309 254 EXPORT_SYMBOL_GPL(tracepoint_probe_register); 310 255
+3 -3
samples/trace_events/trace-events-sample.h
··· 4 4 * 5 5 * The define_trace.h below will also look for a file name of 6 6 * TRACE_SYSTEM.h where TRACE_SYSTEM is what is defined here. 7 - * In this case, it would look for sample.h 7 + * In this case, it would look for sample-trace.h 8 8 * 9 9 * If the header name will be different than the system name 10 10 * (as in this case), then you can override the header name that 11 11 * define_trace.h will look up by defining TRACE_INCLUDE_FILE 12 12 * 13 13 * This file is called trace-events-sample.h but we want the system 14 - * to be called "sample". Therefore we must define the name of this 14 + * to be called "sample-trace". Therefore we must define the name of this 15 15 * file: 16 16 * 17 17 * #define TRACE_INCLUDE_FILE trace-events-sample ··· 106 106 * 107 107 * memcpy(__entry->foo, bar, 10); 108 108 * 109 - * __dynamic_array: This is similar to array, but can vary is size from 109 + * __dynamic_array: This is similar to array, but can vary its size from 110 110 * instance to instance of the tracepoint being called. 111 111 * Like __array, this too has three elements (type, name, size); 112 112 * type is the type of the element, name is the name of the array.
+25 -1
scripts/recordmcount.c
··· 42 42 43 43 #ifndef EM_AARCH64 44 44 #define EM_AARCH64 183 45 + #define R_AARCH64_NONE 0 45 46 #define R_AARCH64_ABS64 257 46 47 #endif 47 48 ··· 158 157 /* convert to nop */ 159 158 ulseek(fd_map, offset - 1, SEEK_SET); 160 159 uwrite(fd_map, ideal_nop, 5); 160 + return 0; 161 + } 162 + 163 + static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5}; 164 + static int make_nop_arm64(void *map, size_t const offset) 165 + { 166 + uint32_t *ptr; 167 + 168 + ptr = map + offset; 169 + /* bl <_mcount> is 0x94000000 before relocation */ 170 + if (*ptr != 0x94000000) 171 + return -1; 172 + 173 + /* Convert to nop */ 174 + ulseek(fd_map, offset, SEEK_SET); 175 + uwrite(fd_map, ideal_nop, 4); 161 176 return 0; 162 177 } 163 178 ··· 362 345 break; 363 346 case EM_386: 364 347 reltype = R_386_32; 348 + rel_type_nop = R_386_NONE; 365 349 make_nop = make_nop_x86; 366 350 ideal_nop = ideal_nop5_x86_32; 367 351 mcount_adjust_32 = -1; ··· 371 353 altmcount = "__gnu_mcount_nc"; 372 354 break; 373 355 case EM_AARCH64: 374 - reltype = R_AARCH64_ABS64; gpfx = '_'; break; 356 + reltype = R_AARCH64_ABS64; 357 + make_nop = make_nop_arm64; 358 + rel_type_nop = R_AARCH64_NONE; 359 + ideal_nop = ideal_nop4_arm64; 360 + gpfx = '_'; 361 + break; 375 362 case EM_IA_64: reltype = R_IA64_IMM64; gpfx = '_'; break; 376 363 case EM_METAG: reltype = R_METAG_ADDR32; 377 364 altmcount = "_mcount_wrapper"; ··· 394 371 make_nop = make_nop_x86; 395 372 ideal_nop = ideal_nop5_x86_64; 396 373 reltype = R_X86_64_64; 374 + rel_type_nop = R_X86_64_NONE; 397 375 mcount_adjust_64 = -1; 398 376 break; 399 377 } /* end switch */
+1 -1
scripts/recordmcount.h
··· 377 377 378 378 if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) { 379 379 if (make_nop) 380 - ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset); 380 + ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset)); 381 381 if (warn_on_notrace_sect && !once) { 382 382 printf("Section %s has mcount callers being ignored\n", 383 383 txtname);