Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
"This patch series contains several clean ups and even a new trace
clock "monitonic raw". Also some enhancements to make the ring buffer
even faster. But the biggest and most noticeable change is the
renaming of the ftrace* files, structures and variables that have to
deal with trace events.

Over the years I've had several developers tell me about their
confusion with what ftrace is compared to events. Technically,
"ftrace" is the infrastructure to do the function hooks, which include
tracing and also helps with live kernel patching. But the trace
events are a separate entity altogether, and the files that affect the
trace events should not be named "ftrace". These include:

include/trace/ftrace.h -> include/trace/trace_events.h
include/linux/ftrace_event.h -> include/linux/trace_events.h

Also, functions that are specific for trace events have also been renamed:

ftrace_print_*() -> trace_print_*()
(un)register_ftrace_event() -> (un)register_trace_event()
ftrace_event_name() -> trace_event_name()
ftrace_trigger_soft_disabled() -> trace_trigger_soft_disabled()
ftrace_define_fields_##call() -> trace_define_fields_##call()
ftrace_get_offsets_##call() -> trace_get_offsets_##call()

Structures have been renamed:

ftrace_event_file -> trace_event_file
ftrace_event_{call,class} -> trace_event_{call,class}
ftrace_event_buffer -> trace_event_buffer
ftrace_subsystem_dir -> trace_subsystem_dir
ftrace_event_raw_##call -> trace_event_raw_##call
ftrace_event_data_offset_##call-> trace_event_data_offset_##call
ftrace_event_type_funcs_##call -> trace_event_type_funcs_##call

And a few various variables and flags have also been updated.

This has been sitting in linux-next for some time, and I have not
heard a single complaint about this rename breaking anything. Mostly
because these functions, variables and structures are mostly internal
to the tracing system and are seldom (if ever) used by anything
external to that"

* tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits)
ring_buffer: Allow to exit the ring buffer benchmark immediately
ring-buffer-benchmark: Fix the wrong type
ring-buffer-benchmark: Fix the wrong param in module_param
ring-buffer: Add enum names for the context levels
ring-buffer: Remove useless unused tracing_off_permanent()
ring-buffer: Give NMIs a chance to lock the reader_lock
ring-buffer: Add trace_recursive checks to ring_buffer_write()
ring-buffer: Allways do the trace_recursive checks
ring-buffer: Move recursive check to per_cpu descriptor
ring-buffer: Add unlikelys to make fast path the default
tracing: Rename ftrace_get_offsets_##call() to trace_event_get_offsets_##call()
tracing: Rename ftrace_define_fields_##call() to trace_event_define_fields_##call()
tracing: Rename ftrace_event_type_funcs_##call to trace_event_type_funcs_##call
tracing: Rename ftrace_data_offset_##call to trace_event_data_offset_##call
tracing: Rename ftrace_raw_##call event structures to trace_event_raw_##call
tracing: Rename ftrace_trigger_soft_disabled() to trace_trigger_soft_disabled()
tracing: Rename FTRACE_EVENT_FL_* flags to EVENT_FILE_FL_*
tracing: Rename struct ftrace_subsystem_dir to trace_subsystem_dir
tracing: Rename ftrace_event_name() to trace_event_name()
tracing: Rename FTRACE_MAX_EVENT to TRACE_EVENT_TYPE_MAX
...

+1024 -1075
+1 -1
arch/x86/kvm/mmutrace.h
··· 2 2 #define _TRACE_KVMMMU_H 3 3 4 4 #include <linux/tracepoint.h> 5 - #include <linux/ftrace_event.h> 5 + #include <linux/trace_events.h> 6 6 7 7 #undef TRACE_SYSTEM 8 8 #define TRACE_SYSTEM kvmmmu
+1 -1
arch/x86/kvm/svm.c
··· 29 29 #include <linux/vmalloc.h> 30 30 #include <linux/highmem.h> 31 31 #include <linux/sched.h> 32 - #include <linux/ftrace_event.h> 32 + #include <linux/trace_events.h> 33 33 #include <linux/slab.h> 34 34 35 35 #include <asm/perf_event.h>
+1 -1
arch/x86/kvm/vmx.c
··· 28 28 #include <linux/sched.h> 29 29 #include <linux/moduleparam.h> 30 30 #include <linux/mod_devicetable.h> 31 - #include <linux/ftrace_event.h> 31 + #include <linux/trace_events.h> 32 32 #include <linux/slab.h> 33 33 #include <linux/tboot.h> 34 34 #include <linux/hrtimer.h>
+83 -94
include/linux/ftrace_event.h include/linux/trace_events.h
··· 1 1 2 - #ifndef _LINUX_FTRACE_EVENT_H 3 - #define _LINUX_FTRACE_EVENT_H 2 + #ifndef _LINUX_TRACE_EVENT_H 3 + #define _LINUX_TRACE_EVENT_H 4 4 5 5 #include <linux/ring_buffer.h> 6 6 #include <linux/trace_seq.h> ··· 25 25 const char *name; 26 26 }; 27 27 28 - const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 29 - unsigned long flags, 30 - const struct trace_print_flags *flag_array); 28 + const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, 29 + unsigned long flags, 30 + const struct trace_print_flags *flag_array); 31 31 32 - const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 33 - const struct trace_print_flags *symbol_array); 32 + const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, 33 + const struct trace_print_flags *symbol_array); 34 34 35 35 #if BITS_PER_LONG == 32 36 - const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, 37 - unsigned long long val, 38 - const struct trace_print_flags_u64 36 + const char *trace_print_symbols_seq_u64(struct trace_seq *p, 37 + unsigned long long val, 38 + const struct trace_print_flags_u64 39 39 *symbol_array); 40 40 #endif 41 41 42 - const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 43 - unsigned int bitmask_size); 42 + const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 43 + unsigned int bitmask_size); 44 44 45 - const char *ftrace_print_hex_seq(struct trace_seq *p, 46 - const unsigned char *buf, int len); 45 + const char *trace_print_hex_seq(struct trace_seq *p, 46 + const unsigned char *buf, int len); 47 47 48 - const char *ftrace_print_array_seq(struct trace_seq *p, 48 + const char *trace_print_array_seq(struct trace_seq *p, 49 49 const void *buf, int count, 50 50 size_t el_size); 51 51 52 52 struct trace_iterator; 53 53 struct trace_event; 54 54 55 - int ftrace_raw_output_prep(struct trace_iterator *iter, 56 - struct trace_event *event); 55 + int trace_raw_output_prep(struct trace_iterator *iter, 56 + struct trace_event *event); 57 57 58 58 /* 59 59 * The trace entry - the most basic unit of tracing. This is what ··· 68 68 int pid; 69 69 }; 70 70 71 - #define FTRACE_MAX_EVENT \ 71 + #define TRACE_EVENT_TYPE_MAX \ 72 72 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) 73 73 74 74 /* ··· 132 132 struct trace_event_functions *funcs; 133 133 }; 134 134 135 - extern int register_ftrace_event(struct trace_event *event); 136 - extern int unregister_ftrace_event(struct trace_event *event); 135 + extern int register_trace_event(struct trace_event *event); 136 + extern int unregister_trace_event(struct trace_event *event); 137 137 138 138 /* Return values for print_line callback */ 139 139 enum print_line_t { ··· 157 157 void tracing_generic_entry_update(struct trace_entry *entry, 158 158 unsigned long flags, 159 159 int pc); 160 - struct ftrace_event_file; 160 + struct trace_event_file; 161 161 162 162 struct ring_buffer_event * 163 163 trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, 164 - struct ftrace_event_file *ftrace_file, 164 + struct trace_event_file *trace_file, 165 165 int type, unsigned long len, 166 166 unsigned long flags, int pc); 167 167 struct ring_buffer_event * ··· 183 183 184 184 void tracing_record_cmdline(struct task_struct *tsk); 185 185 186 - int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); 186 + int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); 187 187 188 188 struct event_filter; 189 189 ··· 200 200 #endif 201 201 }; 202 202 203 - struct ftrace_event_call; 203 + struct trace_event_call; 204 204 205 - struct ftrace_event_class { 205 + struct trace_event_class { 206 206 const char *system; 207 207 void *probe; 208 208 #ifdef CONFIG_PERF_EVENTS 209 209 void *perf_probe; 210 210 #endif 211 - int (*reg)(struct ftrace_event_call *event, 211 + int (*reg)(struct trace_event_call *event, 212 212 enum trace_reg type, void *data); 213 - int (*define_fields)(struct ftrace_event_call *); 214 - struct list_head *(*get_fields)(struct ftrace_event_call *); 213 + int (*define_fields)(struct trace_event_call *); 214 + struct list_head *(*get_fields)(struct trace_event_call *); 215 215 struct list_head fields; 216 - int (*raw_init)(struct ftrace_event_call *); 216 + int (*raw_init)(struct trace_event_call *); 217 217 }; 218 218 219 - extern int ftrace_event_reg(struct ftrace_event_call *event, 219 + extern int trace_event_reg(struct trace_event_call *event, 220 220 enum trace_reg type, void *data); 221 221 222 - int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event, 223 - char *fmt, ...); 224 - 225 - int ftrace_event_define_field(struct ftrace_event_call *call, 226 - char *type, int len, char *item, int offset, 227 - int field_size, int sign, int filter); 228 - 229 - struct ftrace_event_buffer { 222 + struct trace_event_buffer { 230 223 struct ring_buffer *buffer; 231 224 struct ring_buffer_event *event; 232 - struct ftrace_event_file *ftrace_file; 225 + struct trace_event_file *trace_file; 233 226 void *entry; 234 227 unsigned long flags; 235 228 int pc; 236 229 }; 237 230 238 - void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, 239 - struct ftrace_event_file *ftrace_file, 231 + void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 232 + struct trace_event_file *trace_file, 240 233 unsigned long len); 241 234 242 - void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer); 243 - 244 - int ftrace_event_define_field(struct ftrace_event_call *call, 245 - char *type, int len, char *item, int offset, 246 - int field_size, int sign, int filter); 235 + void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); 247 236 248 237 enum { 249 238 TRACE_EVENT_FL_FILTERED_BIT, ··· 250 261 * FILTERED - The event has a filter attached 251 262 * CAP_ANY - Any user can enable for perf 252 263 * NO_SET_FILTER - Set when filter has error and is to be ignored 253 - * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file 264 + * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file 254 265 * WAS_ENABLED - Set and stays set when an event was ever enabled 255 266 * (used for module unloading, if a module event is enabled, 256 267 * it is best to clear the buffers that used it). 257 - * USE_CALL_FILTER - For ftrace internal events, don't use file filter 268 + * USE_CALL_FILTER - For trace internal events, don't use file filter 258 269 * TRACEPOINT - Event is a tracepoint 259 270 * KPROBE - Event is a kprobe 260 271 */ ··· 269 280 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 270 281 }; 271 282 272 - struct ftrace_event_call { 283 + struct trace_event_call { 273 284 struct list_head list; 274 - struct ftrace_event_class *class; 285 + struct trace_event_class *class; 275 286 union { 276 287 char *name; 277 288 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ ··· 286 297 * bit 0: filter_active 287 298 * bit 1: allow trace by non root (cap any) 288 299 * bit 2: failed to apply filter 289 - * bit 3: ftrace internal event (do not enable) 300 + * bit 3: trace internal event (do not enable) 290 301 * bit 4: Event was enabled by module 291 302 * bit 5: use call filter rather than file filter 292 303 * bit 6: Event is a tracepoint ··· 298 309 struct hlist_head __percpu *perf_events; 299 310 struct bpf_prog *prog; 300 311 301 - int (*perf_perm)(struct ftrace_event_call *, 312 + int (*perf_perm)(struct trace_event_call *, 302 313 struct perf_event *); 303 314 #endif 304 315 }; 305 316 306 317 static inline const char * 307 - ftrace_event_name(struct ftrace_event_call *call) 318 + trace_event_name(struct trace_event_call *call) 308 319 { 309 320 if (call->flags & TRACE_EVENT_FL_TRACEPOINT) 310 321 return call->tp ? call->tp->name : NULL; ··· 313 324 } 314 325 315 326 struct trace_array; 316 - struct ftrace_subsystem_dir; 327 + struct trace_subsystem_dir; 317 328 318 329 enum { 319 - FTRACE_EVENT_FL_ENABLED_BIT, 320 - FTRACE_EVENT_FL_RECORDED_CMD_BIT, 321 - FTRACE_EVENT_FL_FILTERED_BIT, 322 - FTRACE_EVENT_FL_NO_SET_FILTER_BIT, 323 - FTRACE_EVENT_FL_SOFT_MODE_BIT, 324 - FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 325 - FTRACE_EVENT_FL_TRIGGER_MODE_BIT, 326 - FTRACE_EVENT_FL_TRIGGER_COND_BIT, 330 + EVENT_FILE_FL_ENABLED_BIT, 331 + EVENT_FILE_FL_RECORDED_CMD_BIT, 332 + EVENT_FILE_FL_FILTERED_BIT, 333 + EVENT_FILE_FL_NO_SET_FILTER_BIT, 334 + EVENT_FILE_FL_SOFT_MODE_BIT, 335 + EVENT_FILE_FL_SOFT_DISABLED_BIT, 336 + EVENT_FILE_FL_TRIGGER_MODE_BIT, 337 + EVENT_FILE_FL_TRIGGER_COND_BIT, 327 338 }; 328 339 329 340 /* 330 - * Ftrace event file flags: 341 + * Event file flags: 331 342 * ENABLED - The event is enabled 332 343 * RECORDED_CMD - The comms should be recorded at sched_switch 333 344 * FILTERED - The event has a filter attached ··· 339 350 * TRIGGER_COND - When set, one or more triggers has an associated filter 340 351 */ 341 352 enum { 342 - FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), 343 - FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), 344 - FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), 345 - FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), 346 - FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), 347 - FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), 348 - FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT), 349 - FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT), 353 + EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), 354 + EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), 355 + EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), 356 + EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), 357 + EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), 358 + EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), 359 + EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), 360 + EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), 350 361 }; 351 362 352 - struct ftrace_event_file { 363 + struct trace_event_file { 353 364 struct list_head list; 354 - struct ftrace_event_call *event_call; 365 + struct trace_event_call *event_call; 355 366 struct event_filter *filter; 356 367 struct dentry *dir; 357 368 struct trace_array *tr; 358 - struct ftrace_subsystem_dir *system; 369 + struct trace_subsystem_dir *system; 359 370 struct list_head triggers; 360 371 361 372 /* ··· 388 399 early_initcall(trace_init_flags_##name); 389 400 390 401 #define __TRACE_EVENT_PERF_PERM(name, expr...) \ 391 - static int perf_perm_##name(struct ftrace_event_call *tp_event, \ 402 + static int perf_perm_##name(struct trace_event_call *tp_event, \ 392 403 struct perf_event *p_event) \ 393 404 { \ 394 405 return ({ expr; }); \ ··· 414 425 415 426 extern int filter_match_preds(struct event_filter *filter, void *rec); 416 427 417 - extern int filter_check_discard(struct ftrace_event_file *file, void *rec, 428 + extern int filter_check_discard(struct trace_event_file *file, void *rec, 418 429 struct ring_buffer *buffer, 419 430 struct ring_buffer_event *event); 420 - extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 431 + extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 421 432 struct ring_buffer *buffer, 422 433 struct ring_buffer_event *event); 423 - extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file, 434 + extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, 424 435 void *rec); 425 - extern void event_triggers_post_call(struct ftrace_event_file *file, 436 + extern void event_triggers_post_call(struct trace_event_file *file, 426 437 enum event_trigger_type tt); 427 438 428 439 /** 429 - * ftrace_trigger_soft_disabled - do triggers and test if soft disabled 440 + * trace_trigger_soft_disabled - do triggers and test if soft disabled 430 441 * @file: The file pointer of the event to test 431 442 * 432 443 * If any triggers without filters are attached to this event, they ··· 435 446 * otherwise false. 436 447 */ 437 448 static inline bool 438 - ftrace_trigger_soft_disabled(struct ftrace_event_file *file) 449 + trace_trigger_soft_disabled(struct trace_event_file *file) 439 450 { 440 451 unsigned long eflags = file->flags; 441 452 442 - if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { 443 - if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) 453 + if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 454 + if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 444 455 event_triggers_call(file, NULL); 445 - if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) 456 + if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 446 457 return true; 447 458 } 448 459 return false; ··· 462 473 * Returns true if the event is discarded, false otherwise. 463 474 */ 464 475 static inline bool 465 - __event_trigger_test_discard(struct ftrace_event_file *file, 476 + __event_trigger_test_discard(struct trace_event_file *file, 466 477 struct ring_buffer *buffer, 467 478 struct ring_buffer_event *event, 468 479 void *entry, ··· 470 481 { 471 482 unsigned long eflags = file->flags; 472 483 473 - if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) 484 + if (eflags & EVENT_FILE_FL_TRIGGER_COND) 474 485 *tt = event_triggers_call(file, entry); 475 486 476 - if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags)) 487 + if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags)) 477 488 ring_buffer_discard_commit(buffer, event); 478 489 else if (!filter_check_discard(file, entry, buffer, event)) 479 490 return false; ··· 495 506 * if the event is soft disabled and should be discarded. 496 507 */ 497 508 static inline void 498 - event_trigger_unlock_commit(struct ftrace_event_file *file, 509 + event_trigger_unlock_commit(struct trace_event_file *file, 499 510 struct ring_buffer *buffer, 500 511 struct ring_buffer_event *event, 501 512 void *entry, unsigned long irq_flags, int pc) ··· 526 537 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 527 538 */ 528 539 static inline void 529 - event_trigger_unlock_commit_regs(struct ftrace_event_file *file, 540 + event_trigger_unlock_commit_regs(struct trace_event_file *file, 530 541 struct ring_buffer *buffer, 531 542 struct ring_buffer_event *event, 532 543 void *entry, unsigned long irq_flags, int pc, ··· 559 570 FILTER_TRACE_FN, 560 571 }; 561 572 562 - extern int trace_event_raw_init(struct ftrace_event_call *call); 563 - extern int trace_define_field(struct ftrace_event_call *call, const char *type, 573 + extern int trace_event_raw_init(struct trace_event_call *call); 574 + extern int trace_define_field(struct trace_event_call *call, const char *type, 564 575 const char *name, int offset, int size, 565 576 int is_signed, int filter_type); 566 - extern int trace_add_event_call(struct ftrace_event_call *call); 567 - extern int trace_remove_event_call(struct ftrace_event_call *call); 577 + extern int trace_add_event_call(struct trace_event_call *call); 578 + extern int trace_remove_event_call(struct trace_event_call *call); 568 579 569 580 #define is_signed_type(type) (((type)(-1)) < (type)1) 570 581 ··· 613 624 } 614 625 #endif 615 626 616 - #endif /* _LINUX_FTRACE_EVENT_H */ 627 + #endif /* _LINUX_TRACE_EVENT_H */
-6
include/linux/kernel.h
··· 533 533 * 534 534 * Most likely, you want to use tracing_on/tracing_off. 535 535 */ 536 - #ifdef CONFIG_RING_BUFFER 537 - /* trace_off_permanent stops recording with no way to bring it back */ 538 - void tracing_off_permanent(void); 539 - #else 540 - static inline void tracing_off_permanent(void) { } 541 - #endif 542 536 543 537 enum ftrace_dump_mode { 544 538 DUMP_NONE,
+1 -1
include/linux/module.h
··· 336 336 const char **trace_bprintk_fmt_start; 337 337 #endif 338 338 #ifdef CONFIG_EVENT_TRACING 339 - struct ftrace_event_call **trace_events; 339 + struct trace_event_call **trace_events; 340 340 unsigned int num_trace_events; 341 341 struct trace_enum_map **trace_enums; 342 342 unsigned int num_trace_enums;
+1 -1
include/linux/perf_event.h
··· 484 484 void *overflow_handler_context; 485 485 486 486 #ifdef CONFIG_EVENT_TRACING 487 - struct ftrace_event_call *tp_event; 487 + struct trace_event_call *tp_event; 488 488 struct event_filter *filter; 489 489 #ifdef CONFIG_FUNCTION_TRACER 490 490 struct ftrace_ops ftrace_ops;
+6 -6
include/linux/syscalls.h
··· 111 111 #define __SC_STR_ADECL(t, a) #a 112 112 #define __SC_STR_TDECL(t, a) #t 113 113 114 - extern struct ftrace_event_class event_class_syscall_enter; 115 - extern struct ftrace_event_class event_class_syscall_exit; 114 + extern struct trace_event_class event_class_syscall_enter; 115 + extern struct trace_event_class event_class_syscall_exit; 116 116 extern struct trace_event_functions enter_syscall_print_funcs; 117 117 extern struct trace_event_functions exit_syscall_print_funcs; 118 118 119 119 #define SYSCALL_TRACE_ENTER_EVENT(sname) \ 120 120 static struct syscall_metadata __syscall_meta_##sname; \ 121 - static struct ftrace_event_call __used \ 121 + static struct trace_event_call __used \ 122 122 event_enter_##sname = { \ 123 123 .class = &event_class_syscall_enter, \ 124 124 { \ ··· 128 128 .data = (void *)&__syscall_meta_##sname,\ 129 129 .flags = TRACE_EVENT_FL_CAP_ANY, \ 130 130 }; \ 131 - static struct ftrace_event_call __used \ 131 + static struct trace_event_call __used \ 132 132 __attribute__((section("_ftrace_events"))) \ 133 133 *__event_enter_##sname = &event_enter_##sname; 134 134 135 135 #define SYSCALL_TRACE_EXIT_EVENT(sname) \ 136 136 static struct syscall_metadata __syscall_meta_##sname; \ 137 - static struct ftrace_event_call __used \ 137 + static struct trace_event_call __used \ 138 138 event_exit_##sname = { \ 139 139 .class = &event_class_syscall_exit, \ 140 140 { \ ··· 144 144 .data = (void *)&__syscall_meta_##sname,\ 145 145 .flags = TRACE_EVENT_FL_CAP_ANY, \ 146 146 }; \ 147 - static struct ftrace_event_call __used \ 147 + static struct trace_event_call __used \ 148 148 __attribute__((section("_ftrace_events"))) \ 149 149 *__event_exit_##sname = &event_exit_##sname; 150 150
+2 -1
include/trace/define_trace.h
··· 87 87 #define DECLARE_TRACE(name, proto, args) 88 88 89 89 #ifdef CONFIG_EVENT_TRACING 90 - #include <trace/ftrace.h> 90 + #include <trace/trace_events.h> 91 + #include <trace/perf.h> 91 92 #endif 92 93 93 94 #undef TRACE_EVENT
+1 -1
include/trace/events/power.h
··· 7 7 #include <linux/ktime.h> 8 8 #include <linux/pm_qos.h> 9 9 #include <linux/tracepoint.h> 10 - #include <linux/ftrace_event.h> 10 + #include <linux/trace_events.h> 11 11 12 12 #define TPS(x) tracepoint_string(x) 13 13
+31 -382
include/trace/ftrace.h include/trace/trace_events.h
··· 3 3 * 4 4 * Override the macros in <trace/trace_events.h> to include the following: 5 5 * 6 - * struct ftrace_raw_<call> { 6 + * struct trace_event_raw_<call> { 7 7 * struct trace_entry ent; 8 8 * <type> <item>; 9 9 * <type2> <item2>[<len>]; ··· 16 16 * in the structure. 17 17 */ 18 18 19 - #include <linux/ftrace_event.h> 19 + #include <linux/trace_events.h> 20 20 21 21 #ifndef TRACE_SYSTEM_VAR 22 22 #define TRACE_SYSTEM_VAR TRACE_SYSTEM ··· 95 95 96 96 #undef DECLARE_EVENT_CLASS 97 97 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 98 - struct ftrace_raw_##name { \ 98 + struct trace_event_raw_##name { \ 99 99 struct trace_entry ent; \ 100 100 tstruct \ 101 101 char __data[0]; \ 102 102 }; \ 103 103 \ 104 - static struct ftrace_event_class event_class_##name; 104 + static struct trace_event_class event_class_##name; 105 105 106 106 #undef DEFINE_EVENT 107 107 #define DEFINE_EVENT(template, name, proto, args) \ 108 - static struct ftrace_event_call __used \ 108 + static struct trace_event_call __used \ 109 109 __attribute__((__aligned__(4))) event_##name 110 110 111 111 #undef DEFINE_EVENT_FN ··· 138 138 * 139 139 * Include the following: 140 140 * 141 - * struct ftrace_data_offsets_<call> { 141 + * struct trace_event_data_offsets_<call> { 142 142 * u32 <item1>; 143 143 * u32 <item2>; 144 144 * [...] ··· 178 178 179 179 #undef DECLARE_EVENT_CLASS 180 180 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 181 - struct ftrace_data_offsets_##call { \ 181 + struct trace_event_data_offsets_##call { \ 182 182 tstruct; \ 183 183 }; 184 184 ··· 203 203 * Override the macros in <trace/trace_events.h> to include the following: 204 204 * 205 205 * enum print_line_t 206 - * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 206 + * trace_raw_output_<call>(struct trace_iterator *iter, int flags) 207 207 * { 208 208 * struct trace_seq *s = &iter->seq; 209 - * struct ftrace_raw_<call> *field; <-- defined in stage 1 209 + * struct trace_event_raw_<call> *field; <-- defined in stage 1 210 210 * struct trace_entry *entry; 211 211 * struct trace_seq *p = &iter->tmp_seq; 212 212 * int ret; ··· 258 258 void *__bitmask = __get_dynamic_array(field); \ 259 259 unsigned int __bitmask_size; \ 260 260 __bitmask_size = __get_dynamic_array_len(field); \ 261 - ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ 261 + trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ 262 262 }) 263 263 264 264 #undef __print_flags ··· 266 266 ({ \ 267 267 static const struct trace_print_flags __flags[] = \ 268 268 { flag_array, { -1, NULL }}; \ 269 - ftrace_print_flags_seq(p, delim, flag, __flags); \ 269 + trace_print_flags_seq(p, delim, flag, __flags); \ 270 270 }) 271 271 272 272 #undef __print_symbolic ··· 274 274 ({ \ 275 275 static const struct trace_print_flags symbols[] = \ 276 276 { symbol_array, { -1, NULL }}; \ 277 - ftrace_print_symbols_seq(p, value, symbols); \ 277 + trace_print_symbols_seq(p, value, symbols); \ 278 278 }) 279 279 280 280 #undef __print_symbolic_u64 ··· 283 283 ({ \ 284 284 static const struct trace_print_flags_u64 symbols[] = \ 285 285 { symbol_array, { -1, NULL } }; \ 286 - ftrace_print_symbols_seq_u64(p, value, symbols); \ 286 + trace_print_symbols_seq_u64(p, value, symbols); \ 287 287 }) 288 288 #else 289 289 #define __print_symbolic_u64(value, symbol_array...) \ ··· 291 291 #endif 292 292 293 293 #undef __print_hex 294 - #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) 294 + #define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len) 295 295 296 296 #undef __print_array 297 297 #define __print_array(array, count, el_size) \ 298 298 ({ \ 299 299 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ 300 300 el_size != 4 && el_size != 8); \ 301 - ftrace_print_array_seq(p, array, count, el_size); \ 301 + trace_print_array_seq(p, array, count, el_size); \ 302 302 }) 303 303 304 304 #undef DECLARE_EVENT_CLASS 305 305 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 306 306 static notrace enum print_line_t \ 307 - ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 308 - struct trace_event *trace_event) \ 307 + trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 308 + struct trace_event *trace_event) \ 309 309 { \ 310 310 struct trace_seq *s = &iter->seq; \ 311 311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 312 - struct ftrace_raw_##call *field; \ 312 + struct trace_event_raw_##call *field; \ 313 313 int ret; \ 314 314 \ 315 315 field = (typeof(field))iter->ent; \ 316 316 \ 317 - ret = ftrace_raw_output_prep(iter, trace_event); \ 317 + ret = trace_raw_output_prep(iter, trace_event); \ 318 318 if (ret != TRACE_TYPE_HANDLED) \ 319 319 return ret; \ 320 320 \ ··· 322 322 \ 323 323 return trace_handle_return(s); \ 324 324 } \ 325 - static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 326 - .trace = ftrace_raw_output_##call, \ 325 + static struct trace_event_functions trace_event_type_funcs_##call = { \ 326 + .trace = trace_raw_output_##call, \ 327 327 }; 328 328 329 329 #undef DEFINE_EVENT_PRINT 330 330 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 331 331 static notrace enum print_line_t \ 332 - ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 332 + trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 333 333 struct trace_event *event) \ 334 334 { \ 335 - struct ftrace_raw_##template *field; \ 335 + struct trace_event_raw_##template *field; \ 336 336 struct trace_entry *entry; \ 337 337 struct trace_seq *p = &iter->tmp_seq; \ 338 338 \ ··· 346 346 field = (typeof(field))entry; \ 347 347 \ 348 348 trace_seq_init(p); \ 349 - return ftrace_output_call(iter, #call, print); \ 349 + return trace_output_call(iter, #call, print); \ 350 350 } \ 351 - static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 352 - .trace = ftrace_raw_output_##call, \ 351 + static struct trace_event_functions trace_event_type_funcs_##call = { \ 352 + .trace = trace_raw_output_##call, \ 353 353 }; 354 354 355 355 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) ··· 407 407 #undef DECLARE_EVENT_CLASS 408 408 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 409 409 static int notrace __init \ 410 - ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 410 + trace_event_define_fields_##call(struct trace_event_call *event_call) \ 411 411 { \ 412 - struct ftrace_raw_##call field; \ 412 + struct trace_event_raw_##call field; \ 413 413 int ret; \ 414 414 \ 415 415 tstruct; \ ··· 485 485 486 486 #undef DECLARE_EVENT_CLASS 487 487 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 488 - static inline notrace int ftrace_get_offsets_##call( \ 489 - struct ftrace_data_offsets_##call *__data_offsets, proto) \ 488 + static inline notrace int trace_event_get_offsets_##call( \ 489 + struct trace_event_data_offsets_##call *__data_offsets, proto) \ 490 490 { \ 491 491 int __data_size = 0; \ 492 492 int __maybe_unused __item_length; \ 493 - struct ftrace_raw_##call __maybe_unused *entry; \ 493 + struct trace_event_raw_##call __maybe_unused *entry; \ 494 494 \ 495 495 tstruct; \ 496 496 \ ··· 505 505 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 506 506 507 507 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 508 - 509 - /* 510 - * Stage 4 of the trace events. 511 - * 512 - * Override the macros in <trace/trace_events.h> to include the following: 513 - * 514 - * For those macros defined with TRACE_EVENT: 515 - * 516 - * static struct ftrace_event_call event_<call>; 517 - * 518 - * static void ftrace_raw_event_<call>(void *__data, proto) 519 - * { 520 - * struct ftrace_event_file *ftrace_file = __data; 521 - * struct ftrace_event_call *event_call = ftrace_file->event_call; 522 - * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 523 - * unsigned long eflags = ftrace_file->flags; 524 - * enum event_trigger_type __tt = ETT_NONE; 525 - * struct ring_buffer_event *event; 526 - * struct ftrace_raw_<call> *entry; <-- defined in stage 1 527 - * struct ring_buffer *buffer; 528 - * unsigned long irq_flags; 529 - * int __data_size; 530 - * int pc; 531 - * 532 - * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { 533 - * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) 534 - * event_triggers_call(ftrace_file, NULL); 535 - * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) 536 - * return; 537 - * } 538 - * 539 - * local_save_flags(irq_flags); 540 - * pc = preempt_count(); 541 - * 542 - * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 543 - * 544 - * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 545 - * event_<call>->event.type, 546 - * sizeof(*entry) + __data_size, 547 - * irq_flags, pc); 548 - * if (!event) 549 - * return; 550 - * entry = ring_buffer_event_data(event); 551 - * 552 - * { <assign>; } <-- Here we assign the entries by the __field and 553 - * __array macros. 554 - * 555 - * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) 556 - * __tt = event_triggers_call(ftrace_file, entry); 557 - * 558 - * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 559 - * &ftrace_file->flags)) 560 - * ring_buffer_discard_commit(buffer, event); 561 - * else if (!filter_check_discard(ftrace_file, entry, buffer, event)) 562 - * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 563 - * 564 - * if (__tt) 565 - * event_triggers_post_call(ftrace_file, __tt); 566 - * } 567 - * 568 - * static struct trace_event ftrace_event_type_<call> = { 569 - * .trace = ftrace_raw_output_<call>, <-- stage 2 570 - * }; 571 - * 572 - * static char print_fmt_<call>[] = <TP_printk>; 573 - * 574 - * static struct ftrace_event_class __used event_class_<template> = { 575 - * .system = "<system>", 576 - * .define_fields = ftrace_define_fields_<call>, 577 - * .fields = LIST_HEAD_INIT(event_class_##call.fields), 578 - * .raw_init = trace_event_raw_init, 579 - * .probe = ftrace_raw_event_##call, 580 - * .reg = ftrace_event_reg, 581 - * }; 582 - * 583 - * static struct ftrace_event_call event_<call> = { 584 - * .class = event_class_<template>, 585 - * { 586 - * .tp = &__tracepoint_<call>, 587 - * }, 588 - * .event = &ftrace_event_type_<call>, 589 - * .print_fmt = print_fmt_<call>, 590 - * .flags = TRACE_EVENT_FL_TRACEPOINT, 591 - * }; 592 - * // its only safe to use pointers when doing linker tricks to 593 - * // create an array. 594 - * static struct ftrace_event_call __used 595 - * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 596 - * 597 - */ 598 - 599 - #ifdef CONFIG_PERF_EVENTS 600 - 601 - #define _TRACE_PERF_PROTO(call, proto) \ 602 - static notrace void \ 603 - perf_trace_##call(void *__data, proto); 604 - 605 - #define _TRACE_PERF_INIT(call) \ 606 - .perf_probe = perf_trace_##call, 607 - 608 - #else 609 - #define _TRACE_PERF_PROTO(call, proto) 610 - #define _TRACE_PERF_INIT(call) 611 - #endif /* CONFIG_PERF_EVENTS */ 612 - 613 - #undef __entry 614 - #define __entry entry 615 - 616 - #undef __field 617 - #define __field(type, item) 618 - 619 - #undef __field_struct 620 - #define __field_struct(type, item) 621 - 622 - #undef __array 623 - #define __array(type, item, len) 624 - 625 - #undef __dynamic_array 626 - #define __dynamic_array(type, item, len) \ 627 - __entry->__data_loc_##item = __data_offsets.item; 628 - 629 - #undef __string 630 - #define __string(item, src) __dynamic_array(char, item, -1) 631 - 632 - #undef __assign_str 633 - #define __assign_str(dst, src) \ 634 - strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 635 - 636 - #undef __bitmask 637 - #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 638 - 639 - #undef __get_bitmask 640 - #define __get_bitmask(field) (char *)__get_dynamic_array(field) 641 - 642 - #undef __assign_bitmask 643 - #define __assign_bitmask(dst, src, nr_bits) \ 644 - memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 645 - 646 - #undef TP_fast_assign 647 - #define TP_fast_assign(args...) args 648 - 649 - #undef __perf_addr 650 - #define __perf_addr(a) (a) 651 - 652 - #undef __perf_count 653 - #define __perf_count(c) (c) 654 - 655 - #undef __perf_task 656 - #define __perf_task(t) (t) 657 - 658 - #undef DECLARE_EVENT_CLASS 659 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 660 - \ 661 - static notrace void \ 662 - ftrace_raw_event_##call(void *__data, proto) \ 663 - { \ 664 - struct ftrace_event_file *ftrace_file = __data; \ 665 - struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 666 - struct ftrace_event_buffer fbuffer; \ 667 - struct ftrace_raw_##call *entry; \ 668 - int __data_size; \ 669 - \ 670 - if (ftrace_trigger_soft_disabled(ftrace_file)) \ 671 - return; \ 672 - \ 673 - __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 674 - \ 675 - entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \ 676 - sizeof(*entry) + __data_size); \ 677 - \ 678 - if (!entry) \ 679 - return; \ 680 - \ 681 - tstruct \ 682 - \ 683 - { assign; } \ 684 - \ 685 - ftrace_event_buffer_commit(&fbuffer); \ 686 - } 687 - /* 688 - * The ftrace_test_probe is compiled out, it is only here as a build time check 689 - * to make sure that if the tracepoint handling changes, the ftrace probe will 690 - * fail to compile unless it too is updated. 691 - */ 692 - 693 - #undef DEFINE_EVENT 694 - #define DEFINE_EVENT(template, call, proto, args) \ 695 - static inline void ftrace_test_probe_##call(void) \ 696 - { \ 697 - check_trace_callback_type_##call(ftrace_raw_event_##template); \ 698 - } 699 - 700 - #undef DEFINE_EVENT_PRINT 701 - #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 702 - 703 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 704 - 705 - #undef __entry 706 - #define __entry REC 707 - 708 - #undef __print_flags 709 - #undef __print_symbolic 710 - #undef __print_hex 711 - #undef __get_dynamic_array 712 - #undef __get_dynamic_array_len 713 - #undef __get_str 714 - #undef __get_bitmask 715 - #undef __print_array 716 - 717 - #undef TP_printk 718 - #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 719 - 720 - #undef DECLARE_EVENT_CLASS 721 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 722 - _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 723 - static char print_fmt_##call[] = print; \ 724 - static struct ftrace_event_class __used __refdata event_class_##call = { \ 725 - .system = TRACE_SYSTEM_STRING, \ 726 - .define_fields = ftrace_define_fields_##call, \ 727 - .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 728 - .raw_init = trace_event_raw_init, \ 729 - .probe = ftrace_raw_event_##call, \ 730 - .reg = ftrace_event_reg, \ 731 - _TRACE_PERF_INIT(call) \ 732 - }; 733 - 734 - #undef DEFINE_EVENT 735 - #define DEFINE_EVENT(template, call, proto, args) \ 736 - \ 737 - static struct ftrace_event_call __used event_##call = { \ 738 - .class = &event_class_##template, \ 739 - { \ 740 - .tp = &__tracepoint_##call, \ 741 - }, \ 742 - .event.funcs = &ftrace_event_type_funcs_##template, \ 743 - .print_fmt = print_fmt_##template, \ 744 - .flags = TRACE_EVENT_FL_TRACEPOINT, \ 745 - }; \ 746 - static struct ftrace_event_call __used \ 747 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 748 - 749 - #undef DEFINE_EVENT_PRINT 750 - #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 751 - \ 752 - static char print_fmt_##call[] = print; \ 753 - \ 754 - static struct ftrace_event_call __used event_##call = { \ 755 - .class = &event_class_##template, \ 756 - { \ 757 - .tp = &__tracepoint_##call, \ 758 - }, \ 759 - .event.funcs = &ftrace_event_type_funcs_##call, \ 760 - .print_fmt = print_fmt_##call, \ 761 - .flags = TRACE_EVENT_FL_TRACEPOINT, \ 762 - }; \ 763 - static struct ftrace_event_call __used \ 764 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 765 - 766 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 767 - 768 - #undef TRACE_SYSTEM_VAR 769 - 770 - #ifdef CONFIG_PERF_EVENTS 771 - 772 - #undef __entry 773 - #define __entry entry 774 - 775 - #undef __get_dynamic_array 776 - #define __get_dynamic_array(field) \ 777 - ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 778 - 779 - #undef __get_dynamic_array_len 780 - #define __get_dynamic_array_len(field) \ 781 - ((__entry->__data_loc_##field >> 16) & 0xffff) 782 - 783 - #undef __get_str 784 - #define __get_str(field) (char *)__get_dynamic_array(field) 785 - 786 - #undef __get_bitmask 787 - #define __get_bitmask(field) (char *)__get_dynamic_array(field) 788 - 789 - #undef __perf_addr 790 - #define __perf_addr(a) (__addr = (a)) 791 - 792 - #undef __perf_count 793 - #define __perf_count(c) (__count = (c)) 794 - 795 - #undef __perf_task 796 - #define __perf_task(t) (__task = (t)) 797 - 798 - #undef DECLARE_EVENT_CLASS 799 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 800 - static notrace void \ 801 - perf_trace_##call(void *__data, proto) \ 802 - { \ 803 - struct ftrace_event_call *event_call = __data; \ 804 - struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 805 - struct ftrace_raw_##call *entry; \ 806 - struct pt_regs *__regs; \ 807 - u64 __addr = 0, __count = 1; \ 808 - struct task_struct *__task = NULL; \ 809 - struct hlist_head *head; \ 810 - int __entry_size; \ 811 - int __data_size; \ 812 - int rctx; \ 813 - \ 814 - __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 815 - \ 816 - head = this_cpu_ptr(event_call->perf_events); \ 817 - if (__builtin_constant_p(!__task) && !__task && \ 818 - hlist_empty(head)) \ 819 - return; \ 820 - \ 821 - __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 822 - sizeof(u64)); \ 823 - __entry_size -= sizeof(u32); \ 824 - \ 825 - entry = perf_trace_buf_prepare(__entry_size, \ 826 - event_call->event.type, &__regs, &rctx); \ 827 - if (!entry) \ 828 - return; \ 829 - \ 830 - perf_fetch_caller_regs(__regs); \ 831 - \ 832 - tstruct \ 833 - \ 834 - { assign; } \ 835 - \ 836 - perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 837 - __count, __regs, head, __task); \ 838 - } 839 - 840 - /* 841 - * This part is compiled out, it is only here as a build time check 842 - * to make sure that if the tracepoint handling changes, the 843 - * perf probe will fail to compile unless it too is updated. 844 - */ 845 - #undef DEFINE_EVENT 846 - #define DEFINE_EVENT(template, call, proto, args) \ 847 - static inline void perf_test_probe_##call(void) \ 848 - { \ 849 - check_trace_callback_type_##call(perf_trace_##template); \ 850 - } 851 - 852 - 853 - #undef DEFINE_EVENT_PRINT 854 - #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 855 - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 856 - 857 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 858 - #endif /* CONFIG_PERF_EVENTS */ 859 508
+350
include/trace/perf.h
··· 1 + /* 2 + * Stage 4 of the trace events. 3 + * 4 + * Override the macros in <trace/trace_events.h> to include the following: 5 + * 6 + * For those macros defined with TRACE_EVENT: 7 + * 8 + * static struct trace_event_call event_<call>; 9 + * 10 + * static void trace_event_raw_event_<call>(void *__data, proto) 11 + * { 12 + * struct trace_event_file *trace_file = __data; 13 + * struct trace_event_call *event_call = trace_file->event_call; 14 + * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 15 + * unsigned long eflags = trace_file->flags; 16 + * enum event_trigger_type __tt = ETT_NONE; 17 + * struct ring_buffer_event *event; 18 + * struct trace_event_raw_<call> *entry; <-- defined in stage 1 19 + * struct ring_buffer *buffer; 20 + * unsigned long irq_flags; 21 + * int __data_size; 22 + * int pc; 23 + * 24 + * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 25 + * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 26 + * event_triggers_call(trace_file, NULL); 27 + * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 28 + * return; 29 + * } 30 + * 31 + * local_save_flags(irq_flags); 32 + * pc = preempt_count(); 33 + * 34 + * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 35 + * 36 + * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 37 + * event_<call>->event.type, 38 + * sizeof(*entry) + __data_size, 39 + * irq_flags, pc); 40 + * if (!event) 41 + * return; 42 + * entry = ring_buffer_event_data(event); 43 + * 44 + * { <assign>; } <-- Here we assign the entries by the __field and 45 + * __array macros. 46 + * 47 + * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 48 + * __tt = event_triggers_call(trace_file, entry); 49 + * 50 + * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 51 + * &trace_file->flags)) 52 + * ring_buffer_discard_commit(buffer, event); 53 + * else if (!filter_check_discard(trace_file, entry, buffer, event)) 54 + * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 55 + * 56 + * if (__tt) 57 + * event_triggers_post_call(trace_file, __tt); 58 + * } 59 + * 60 + * static struct trace_event ftrace_event_type_<call> = { 61 + * .trace = trace_raw_output_<call>, <-- stage 2 62 + * }; 63 + * 64 + * static char print_fmt_<call>[] = <TP_printk>; 65 + * 66 + * static struct trace_event_class __used event_class_<template> = { 67 + * .system = "<system>", 68 + * .define_fields = trace_event_define_fields_<call>, 69 + * .fields = LIST_HEAD_INIT(event_class_##call.fields), 70 + * .raw_init = trace_event_raw_init, 71 + * .probe = trace_event_raw_event_##call, 72 + * .reg = trace_event_reg, 73 + * }; 74 + * 75 + * static struct trace_event_call event_<call> = { 76 + * .class = event_class_<template>, 77 + * { 78 + * .tp = &__tracepoint_<call>, 79 + * }, 80 + * .event = &ftrace_event_type_<call>, 81 + * .print_fmt = print_fmt_<call>, 82 + * .flags = TRACE_EVENT_FL_TRACEPOINT, 83 + * }; 84 + * // its only safe to use pointers when doing linker tricks to 85 + * // create an array. 86 + * static struct trace_event_call __used 87 + * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 88 + * 89 + */ 90 + 91 + #ifdef CONFIG_PERF_EVENTS 92 + 93 + #define _TRACE_PERF_PROTO(call, proto) \ 94 + static notrace void \ 95 + perf_trace_##call(void *__data, proto); 96 + 97 + #define _TRACE_PERF_INIT(call) \ 98 + .perf_probe = perf_trace_##call, 99 + 100 + #else 101 + #define _TRACE_PERF_PROTO(call, proto) 102 + #define _TRACE_PERF_INIT(call) 103 + #endif /* CONFIG_PERF_EVENTS */ 104 + 105 + #undef __entry 106 + #define __entry entry 107 + 108 + #undef __field 109 + #define __field(type, item) 110 + 111 + #undef __field_struct 112 + #define __field_struct(type, item) 113 + 114 + #undef __array 115 + #define __array(type, item, len) 116 + 117 + #undef __dynamic_array 118 + #define __dynamic_array(type, item, len) \ 119 + __entry->__data_loc_##item = __data_offsets.item; 120 + 121 + #undef __string 122 + #define __string(item, src) __dynamic_array(char, item, -1) 123 + 124 + #undef __assign_str 125 + #define __assign_str(dst, src) \ 126 + strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 127 + 128 + #undef __bitmask 129 + #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 130 + 131 + #undef __get_bitmask 132 + #define __get_bitmask(field) (char *)__get_dynamic_array(field) 133 + 134 + #undef __assign_bitmask 135 + #define __assign_bitmask(dst, src, nr_bits) \ 136 + memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 137 + 138 + #undef TP_fast_assign 139 + #define TP_fast_assign(args...) args 140 + 141 + #undef __perf_addr 142 + #define __perf_addr(a) (a) 143 + 144 + #undef __perf_count 145 + #define __perf_count(c) (c) 146 + 147 + #undef __perf_task 148 + #define __perf_task(t) (t) 149 + 150 + #undef DECLARE_EVENT_CLASS 151 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 152 + \ 153 + static notrace void \ 154 + trace_event_raw_event_##call(void *__data, proto) \ 155 + { \ 156 + struct trace_event_file *trace_file = __data; \ 157 + struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 158 + struct trace_event_buffer fbuffer; \ 159 + struct trace_event_raw_##call *entry; \ 160 + int __data_size; \ 161 + \ 162 + if (trace_trigger_soft_disabled(trace_file)) \ 163 + return; \ 164 + \ 165 + __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 166 + \ 167 + entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 168 + sizeof(*entry) + __data_size); \ 169 + \ 170 + if (!entry) \ 171 + return; \ 172 + \ 173 + tstruct \ 174 + \ 175 + { assign; } \ 176 + \ 177 + trace_event_buffer_commit(&fbuffer); \ 178 + } 179 + /* 180 + * The ftrace_test_probe is compiled out, it is only here as a build time check 181 + * to make sure that if the tracepoint handling changes, the ftrace probe will 182 + * fail to compile unless it too is updated. 183 + */ 184 + 185 + #undef DEFINE_EVENT 186 + #define DEFINE_EVENT(template, call, proto, args) \ 187 + static inline void ftrace_test_probe_##call(void) \ 188 + { \ 189 + check_trace_callback_type_##call(trace_event_raw_event_##template); \ 190 + } 191 + 192 + #undef DEFINE_EVENT_PRINT 193 + #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 194 + 195 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 196 + 197 + #undef __entry 198 + #define __entry REC 199 + 200 + #undef __print_flags 201 + #undef __print_symbolic 202 + #undef __print_hex 203 + #undef __get_dynamic_array 204 + #undef __get_dynamic_array_len 205 + #undef __get_str 206 + #undef __get_bitmask 207 + #undef __print_array 208 + 209 + #undef TP_printk 210 + #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 211 + 212 + #undef DECLARE_EVENT_CLASS 213 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 214 + _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 215 + static char print_fmt_##call[] = print; \ 216 + static struct trace_event_class __used __refdata event_class_##call = { \ 217 + .system = TRACE_SYSTEM_STRING, \ 218 + .define_fields = trace_event_define_fields_##call, \ 219 + .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 220 + .raw_init = trace_event_raw_init, \ 221 + .probe = trace_event_raw_event_##call, \ 222 + .reg = trace_event_reg, \ 223 + _TRACE_PERF_INIT(call) \ 224 + }; 225 + 226 + #undef DEFINE_EVENT 227 + #define DEFINE_EVENT(template, call, proto, args) \ 228 + \ 229 + static struct trace_event_call __used event_##call = { \ 230 + .class = &event_class_##template, \ 231 + { \ 232 + .tp = &__tracepoint_##call, \ 233 + }, \ 234 + .event.funcs = &trace_event_type_funcs_##template, \ 235 + .print_fmt = print_fmt_##template, \ 236 + .flags = TRACE_EVENT_FL_TRACEPOINT, \ 237 + }; \ 238 + static struct trace_event_call __used \ 239 + __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 240 + 241 + #undef DEFINE_EVENT_PRINT 242 + #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 243 + \ 244 + static char print_fmt_##call[] = print; \ 245 + \ 246 + static struct trace_event_call __used event_##call = { \ 247 + .class = &event_class_##template, \ 248 + { \ 249 + .tp = &__tracepoint_##call, \ 250 + }, \ 251 + .event.funcs = &trace_event_type_funcs_##call, \ 252 + .print_fmt = print_fmt_##call, \ 253 + .flags = TRACE_EVENT_FL_TRACEPOINT, \ 254 + }; \ 255 + static struct trace_event_call __used \ 256 + __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 257 + 258 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 259 + 260 + #undef TRACE_SYSTEM_VAR 261 + 262 + #ifdef CONFIG_PERF_EVENTS 263 + 264 + #undef __entry 265 + #define __entry entry 266 + 267 + #undef __get_dynamic_array 268 + #define __get_dynamic_array(field) \ 269 + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 270 + 271 + #undef __get_dynamic_array_len 272 + #define __get_dynamic_array_len(field) \ 273 + ((__entry->__data_loc_##field >> 16) & 0xffff) 274 + 275 + #undef __get_str 276 + #define __get_str(field) (char *)__get_dynamic_array(field) 277 + 278 + #undef __get_bitmask 279 + #define __get_bitmask(field) (char *)__get_dynamic_array(field) 280 + 281 + #undef __perf_addr 282 + #define __perf_addr(a) (__addr = (a)) 283 + 284 + #undef __perf_count 285 + #define __perf_count(c) (__count = (c)) 286 + 287 + #undef __perf_task 288 + #define __perf_task(t) (__task = (t)) 289 + 290 + #undef DECLARE_EVENT_CLASS 291 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 292 + static notrace void \ 293 + perf_trace_##call(void *__data, proto) \ 294 + { \ 295 + struct trace_event_call *event_call = __data; \ 296 + struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 297 + struct trace_event_raw_##call *entry; \ 298 + struct pt_regs *__regs; \ 299 + u64 __addr = 0, __count = 1; \ 300 + struct task_struct *__task = NULL; \ 301 + struct hlist_head *head; \ 302 + int __entry_size; \ 303 + int __data_size; \ 304 + int rctx; \ 305 + \ 306 + __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 307 + \ 308 + head = this_cpu_ptr(event_call->perf_events); \ 309 + if (__builtin_constant_p(!__task) && !__task && \ 310 + hlist_empty(head)) \ 311 + return; \ 312 + \ 313 + __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 314 + sizeof(u64)); \ 315 + __entry_size -= sizeof(u32); \ 316 + \ 317 + entry = perf_trace_buf_prepare(__entry_size, \ 318 + event_call->event.type, &__regs, &rctx); \ 319 + if (!entry) \ 320 + return; \ 321 + \ 322 + perf_fetch_caller_regs(__regs); \ 323 + \ 324 + tstruct \ 325 + \ 326 + { assign; } \ 327 + \ 328 + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 329 + __count, __regs, head, __task); \ 330 + } 331 + 332 + /* 333 + * This part is compiled out, it is only here as a build time check 334 + * to make sure that if the tracepoint handling changes, the 335 + * perf probe will fail to compile unless it too is updated. 336 + */ 337 + #undef DEFINE_EVENT 338 + #define DEFINE_EVENT(template, call, proto, args) \ 339 + static inline void perf_test_probe_##call(void) \ 340 + { \ 341 + check_trace_callback_type_##call(perf_trace_##template); \ 342 + } 343 + 344 + 345 + #undef DEFINE_EVENT_PRINT 346 + #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 347 + DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 348 + 349 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 350 + #endif /* CONFIG_PERF_EVENTS */
+3 -3
include/trace/syscall.h
··· 3 3 4 4 #include <linux/tracepoint.h> 5 5 #include <linux/unistd.h> 6 - #include <linux/ftrace_event.h> 6 + #include <linux/trace_events.h> 7 7 #include <linux/thread_info.h> 8 8 9 9 #include <asm/ptrace.h> ··· 29 29 const char **args; 30 30 struct list_head enter_fields; 31 31 32 - struct ftrace_event_call *enter_event; 33 - struct ftrace_event_call *exit_event; 32 + struct trace_event_call *enter_event; 33 + struct trace_event_call *exit_event; 34 34 }; 35 35 36 36 #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+1 -1
kernel/events/core.c
··· 36 36 #include <linux/kernel_stat.h> 37 37 #include <linux/cgroup.h> 38 38 #include <linux/perf_event.h> 39 - #include <linux/ftrace_event.h> 39 + #include <linux/trace_events.h> 40 40 #include <linux/hw_breakpoint.h> 41 41 #include <linux/mm_types.h> 42 42 #include <linux/module.h>
+1 -1
kernel/module.c
··· 18 18 */ 19 19 #include <linux/export.h> 20 20 #include <linux/moduleloader.h> 21 - #include <linux/ftrace_event.h> 21 + #include <linux/trace_events.h> 22 22 #include <linux/init.h> 23 23 #include <linux/kallsyms.h> 24 24 #include <linux/file.h>
+1 -1
kernel/rcu/tiny.c
··· 35 35 #include <linux/time.h> 36 36 #include <linux/cpu.h> 37 37 #include <linux/prefetch.h> 38 - #include <linux/ftrace_event.h> 38 + #include <linux/trace_events.h> 39 39 40 40 #include "rcu.h" 41 41
+1 -1
kernel/rcu/tree.c
··· 54 54 #include <linux/delay.h> 55 55 #include <linux/stop_machine.h> 56 56 #include <linux/random.h> 57 - #include <linux/ftrace_event.h> 57 + #include <linux/trace_events.h> 58 58 #include <linux/suspend.h> 59 59 60 60 #include "tree.h"
+2 -2
kernel/trace/blktrace.c
··· 1448 1448 1449 1449 static int __init init_blk_tracer(void) 1450 1450 { 1451 - if (!register_ftrace_event(&trace_blk_event)) { 1451 + if (!register_trace_event(&trace_blk_event)) { 1452 1452 pr_warning("Warning: could not register block events\n"); 1453 1453 return 1; 1454 1454 } 1455 1455 1456 1456 if (register_tracer(&blk_tracer) != 0) { 1457 1457 pr_warning("Warning: could not register the block tracer\n"); 1458 - unregister_ftrace_event(&trace_blk_event); 1458 + unregister_trace_event(&trace_blk_event); 1459 1459 return 1; 1460 1460 } 1461 1461
+89 -132
kernel/trace/ring_buffer.c
··· 3 3 * 4 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 5 */ 6 - #include <linux/ftrace_event.h> 6 + #include <linux/trace_events.h> 7 7 #include <linux/ring_buffer.h> 8 8 #include <linux/trace_clock.h> 9 9 #include <linux/trace_seq.h> ··· 115 115 * 116 116 */ 117 117 118 - /* 119 - * A fast way to enable or disable all ring buffers is to 120 - * call tracing_on or tracing_off. Turning off the ring buffers 121 - * prevents all ring buffers from being recorded to. 122 - * Turning this switch on, makes it OK to write to the 123 - * ring buffer, if the ring buffer is enabled itself. 124 - * 125 - * There's three layers that must be on in order to write 126 - * to the ring buffer. 127 - * 128 - * 1) This global flag must be set. 129 - * 2) The ring buffer must be enabled for recording. 130 - * 3) The per cpu buffer must be enabled for recording. 131 - * 132 - * In case of an anomaly, this global flag has a bit set that 133 - * will permantly disable all ring buffers. 134 - */ 135 - 136 - /* 137 - * Global flag to disable all recording to ring buffers 138 - * This has two bits: ON, DISABLED 139 - * 140 - * ON DISABLED 141 - * ---- ---------- 142 - * 0 0 : ring buffers are off 143 - * 1 0 : ring buffers are on 144 - * X 1 : ring buffers are permanently disabled 145 - */ 146 - 147 - enum { 148 - RB_BUFFERS_ON_BIT = 0, 149 - RB_BUFFERS_DISABLED_BIT = 1, 150 - }; 151 - 152 - enum { 153 - RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, 154 - RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, 155 - }; 156 - 157 - static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 158 - 159 118 /* Used for individual buffers (after the counter) */ 160 119 #define RB_BUFFER_OFF (1 << 20) 161 120 162 121 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 163 - 164 - /** 165 - * tracing_off_permanent - permanently disable ring buffers 166 - * 167 - * This function, once called, will disable all ring buffers 168 - * permanently. 169 - */ 170 - void tracing_off_permanent(void) 171 - { 172 - set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 173 - } 174 122 175 123 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 176 124 #define RB_ALIGNMENT 4U ··· 400 452 }; 401 453 402 454 /* 455 + * Used for which event context the event is in. 456 + * NMI = 0 457 + * IRQ = 1 458 + * SOFTIRQ = 2 459 + * NORMAL = 3 460 + * 461 + * See trace_recursive_lock() comment below for more details. 462 + */ 463 + enum { 464 + RB_CTX_NMI, 465 + RB_CTX_IRQ, 466 + RB_CTX_SOFTIRQ, 467 + RB_CTX_NORMAL, 468 + RB_CTX_MAX 469 + }; 470 + 471 + /* 403 472 * head_page == tail_page && head == tail then buffer is empty. 404 473 */ 405 474 struct ring_buffer_per_cpu { ··· 427 462 arch_spinlock_t lock; 428 463 struct lock_class_key lock_key; 429 464 unsigned int nr_pages; 465 + unsigned int current_context; 430 466 struct list_head *pages; 431 467 struct buffer_page *head_page; /* read from head */ 432 468 struct buffer_page *tail_page; /* write to tail */ ··· 2190 2224 2191 2225 /* zero length can cause confusions */ 2192 2226 if (!length) 2193 - length = 1; 2227 + length++; 2194 2228 2195 2229 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) 2196 2230 length += sizeof(event.array[0]); ··· 2602 2636 return NULL; 2603 2637 } 2604 2638 2605 - #ifdef CONFIG_TRACING 2606 - 2607 2639 /* 2608 2640 * The lock and unlock are done within a preempt disable section. 2609 2641 * The current_context per_cpu variable can only be modified ··· 2639 2675 * just so happens that it is the same bit corresponding to 2640 2676 * the current context. 2641 2677 */ 2642 - static DEFINE_PER_CPU(unsigned int, current_context); 2643 2678 2644 - static __always_inline int trace_recursive_lock(void) 2679 + static __always_inline int 2680 + trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) 2645 2681 { 2646 - unsigned int val = __this_cpu_read(current_context); 2682 + unsigned int val = cpu_buffer->current_context; 2647 2683 int bit; 2648 2684 2649 2685 if (in_interrupt()) { 2650 2686 if (in_nmi()) 2651 - bit = 0; 2687 + bit = RB_CTX_NMI; 2652 2688 else if (in_irq()) 2653 - bit = 1; 2689 + bit = RB_CTX_IRQ; 2654 2690 else 2655 - bit = 2; 2691 + bit = RB_CTX_SOFTIRQ; 2656 2692 } else 2657 - bit = 3; 2693 + bit = RB_CTX_NORMAL; 2658 2694 2659 2695 if (unlikely(val & (1 << bit))) 2660 2696 return 1; 2661 2697 2662 2698 val |= (1 << bit); 2663 - __this_cpu_write(current_context, val); 2699 + cpu_buffer->current_context = val; 2664 2700 2665 2701 return 0; 2666 2702 } 2667 2703 2668 - static __always_inline void trace_recursive_unlock(void) 2704 + static __always_inline void 2705 + trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) 2669 2706 { 2670 - __this_cpu_and(current_context, __this_cpu_read(current_context) - 1); 2707 + cpu_buffer->current_context &= cpu_buffer->current_context - 1; 2671 2708 } 2672 - 2673 - #else 2674 - 2675 - #define trace_recursive_lock() (0) 2676 - #define trace_recursive_unlock() do { } while (0) 2677 - 2678 - #endif 2679 2709 2680 2710 /** 2681 2711 * ring_buffer_lock_reserve - reserve a part of the buffer ··· 2693 2735 struct ring_buffer_event *event; 2694 2736 int cpu; 2695 2737 2696 - if (ring_buffer_flags != RB_BUFFERS_ON) 2697 - return NULL; 2698 - 2699 2738 /* If we are tracing schedule, we don't want to recurse */ 2700 2739 preempt_disable_notrace(); 2701 2740 2702 - if (atomic_read(&buffer->record_disabled)) 2703 - goto out_nocheck; 2704 - 2705 - if (trace_recursive_lock()) 2706 - goto out_nocheck; 2741 + if (unlikely(atomic_read(&buffer->record_disabled))) 2742 + goto out; 2707 2743 2708 2744 cpu = raw_smp_processor_id(); 2709 2745 2710 - if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2746 + if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) 2711 2747 goto out; 2712 2748 2713 2749 cpu_buffer = buffer->buffers[cpu]; 2714 2750 2715 - if (atomic_read(&cpu_buffer->record_disabled)) 2751 + if (unlikely(atomic_read(&cpu_buffer->record_disabled))) 2716 2752 goto out; 2717 2753 2718 - if (length > BUF_MAX_DATA_SIZE) 2754 + if (unlikely(length > BUF_MAX_DATA_SIZE)) 2755 + goto out; 2756 + 2757 + if (unlikely(trace_recursive_lock(cpu_buffer))) 2719 2758 goto out; 2720 2759 2721 2760 event = rb_reserve_next_event(buffer, cpu_buffer, length); 2722 2761 if (!event) 2723 - goto out; 2762 + goto out_unlock; 2724 2763 2725 2764 return event; 2726 2765 2766 + out_unlock: 2767 + trace_recursive_unlock(cpu_buffer); 2727 2768 out: 2728 - trace_recursive_unlock(); 2729 - 2730 - out_nocheck: 2731 2769 preempt_enable_notrace(); 2732 2770 return NULL; 2733 2771 } ··· 2813 2859 2814 2860 rb_wakeups(buffer, cpu_buffer); 2815 2861 2816 - trace_recursive_unlock(); 2862 + trace_recursive_unlock(cpu_buffer); 2817 2863 2818 2864 preempt_enable_notrace(); 2819 2865 ··· 2924 2970 out: 2925 2971 rb_end_commit(cpu_buffer); 2926 2972 2927 - trace_recursive_unlock(); 2973 + trace_recursive_unlock(cpu_buffer); 2928 2974 2929 2975 preempt_enable_notrace(); 2930 2976 ··· 2954 3000 int ret = -EBUSY; 2955 3001 int cpu; 2956 3002 2957 - if (ring_buffer_flags != RB_BUFFERS_ON) 2958 - return -EBUSY; 2959 - 2960 3003 preempt_disable_notrace(); 2961 3004 2962 3005 if (atomic_read(&buffer->record_disabled)) ··· 2972 3021 if (length > BUF_MAX_DATA_SIZE) 2973 3022 goto out; 2974 3023 3024 + if (unlikely(trace_recursive_lock(cpu_buffer))) 3025 + goto out; 3026 + 2975 3027 event = rb_reserve_next_event(buffer, cpu_buffer, length); 2976 3028 if (!event) 2977 - goto out; 3029 + goto out_unlock; 2978 3030 2979 3031 body = rb_event_data(event); 2980 3032 ··· 2988 3034 rb_wakeups(buffer, cpu_buffer); 2989 3035 2990 3036 ret = 0; 3037 + 3038 + out_unlock: 3039 + trace_recursive_unlock(cpu_buffer); 3040 + 2991 3041 out: 2992 3042 preempt_enable_notrace(); 2993 3043 ··· 3818 3860 } 3819 3861 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); 3820 3862 3821 - static inline int rb_ok_to_lock(void) 3863 + static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) 3822 3864 { 3865 + if (likely(!in_nmi())) { 3866 + raw_spin_lock(&cpu_buffer->reader_lock); 3867 + return true; 3868 + } 3869 + 3823 3870 /* 3824 3871 * If an NMI die dumps out the content of the ring buffer 3825 - * do not grab locks. We also permanently disable the ring 3826 - * buffer too. A one time deal is all you get from reading 3827 - * the ring buffer from an NMI. 3872 + * trylock must be used to prevent a deadlock if the NMI 3873 + * preempted a task that holds the ring buffer locks. If 3874 + * we get the lock then all is fine, if not, then continue 3875 + * to do the read, but this can corrupt the ring buffer, 3876 + * so it must be permanently disabled from future writes. 3877 + * Reading from NMI is a oneshot deal. 3828 3878 */ 3829 - if (likely(!in_nmi())) 3830 - return 1; 3879 + if (raw_spin_trylock(&cpu_buffer->reader_lock)) 3880 + return true; 3831 3881 3832 - tracing_off_permanent(); 3833 - return 0; 3882 + /* Continue without locking, but disable the ring buffer */ 3883 + atomic_inc(&cpu_buffer->record_disabled); 3884 + return false; 3885 + } 3886 + 3887 + static inline void 3888 + rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) 3889 + { 3890 + if (likely(locked)) 3891 + raw_spin_unlock(&cpu_buffer->reader_lock); 3892 + return; 3834 3893 } 3835 3894 3836 3895 /** ··· 3867 3892 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 3868 3893 struct ring_buffer_event *event; 3869 3894 unsigned long flags; 3870 - int dolock; 3895 + bool dolock; 3871 3896 3872 3897 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3873 3898 return NULL; 3874 3899 3875 - dolock = rb_ok_to_lock(); 3876 3900 again: 3877 3901 local_irq_save(flags); 3878 - if (dolock) 3879 - raw_spin_lock(&cpu_buffer->reader_lock); 3902 + dolock = rb_reader_lock(cpu_buffer); 3880 3903 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3881 3904 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3882 3905 rb_advance_reader(cpu_buffer); 3883 - if (dolock) 3884 - raw_spin_unlock(&cpu_buffer->reader_lock); 3906 + rb_reader_unlock(cpu_buffer, dolock); 3885 3907 local_irq_restore(flags); 3886 3908 3887 3909 if (event && event->type_len == RINGBUF_TYPE_PADDING) ··· 3931 3959 struct ring_buffer_per_cpu *cpu_buffer; 3932 3960 struct ring_buffer_event *event = NULL; 3933 3961 unsigned long flags; 3934 - int dolock; 3935 - 3936 - dolock = rb_ok_to_lock(); 3962 + bool dolock; 3937 3963 3938 3964 again: 3939 3965 /* might be called in atomic */ ··· 3942 3972 3943 3973 cpu_buffer = buffer->buffers[cpu]; 3944 3974 local_irq_save(flags); 3945 - if (dolock) 3946 - raw_spin_lock(&cpu_buffer->reader_lock); 3975 + dolock = rb_reader_lock(cpu_buffer); 3947 3976 3948 3977 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3949 3978 if (event) { ··· 3950 3981 rb_advance_reader(cpu_buffer); 3951 3982 } 3952 3983 3953 - if (dolock) 3954 - raw_spin_unlock(&cpu_buffer->reader_lock); 3984 + rb_reader_unlock(cpu_buffer, dolock); 3955 3985 local_irq_restore(flags); 3956 3986 3957 3987 out: ··· 4231 4263 { 4232 4264 struct ring_buffer_per_cpu *cpu_buffer; 4233 4265 unsigned long flags; 4234 - int dolock; 4266 + bool dolock; 4235 4267 int cpu; 4236 4268 int ret; 4237 - 4238 - dolock = rb_ok_to_lock(); 4239 4269 4240 4270 /* yes this is racy, but if you don't like the race, lock the buffer */ 4241 4271 for_each_buffer_cpu(buffer, cpu) { 4242 4272 cpu_buffer = buffer->buffers[cpu]; 4243 4273 local_irq_save(flags); 4244 - if (dolock) 4245 - raw_spin_lock(&cpu_buffer->reader_lock); 4274 + dolock = rb_reader_lock(cpu_buffer); 4246 4275 ret = rb_per_cpu_empty(cpu_buffer); 4247 - if (dolock) 4248 - raw_spin_unlock(&cpu_buffer->reader_lock); 4276 + rb_reader_unlock(cpu_buffer, dolock); 4249 4277 local_irq_restore(flags); 4250 4278 4251 4279 if (!ret) ··· 4261 4297 { 4262 4298 struct ring_buffer_per_cpu *cpu_buffer; 4263 4299 unsigned long flags; 4264 - int dolock; 4300 + bool dolock; 4265 4301 int ret; 4266 4302 4267 4303 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 4268 4304 return 1; 4269 4305 4270 - dolock = rb_ok_to_lock(); 4271 - 4272 4306 cpu_buffer = buffer->buffers[cpu]; 4273 4307 local_irq_save(flags); 4274 - if (dolock) 4275 - raw_spin_lock(&cpu_buffer->reader_lock); 4308 + dolock = rb_reader_lock(cpu_buffer); 4276 4309 ret = rb_per_cpu_empty(cpu_buffer); 4277 - if (dolock) 4278 - raw_spin_unlock(&cpu_buffer->reader_lock); 4310 + rb_reader_unlock(cpu_buffer, dolock); 4279 4311 local_irq_restore(flags); 4280 4312 4281 4313 return ret; ··· 4308 4348 goto out; 4309 4349 4310 4350 ret = -EAGAIN; 4311 - 4312 - if (ring_buffer_flags != RB_BUFFERS_ON) 4313 - goto out; 4314 4351 4315 4352 if (atomic_read(&buffer_a->record_disabled)) 4316 4353 goto out;
+14 -9
kernel/trace/ring_buffer_benchmark.c
··· 32 32 static struct task_struct *consumer; 33 33 static unsigned long read; 34 34 35 - static int disable_reader; 35 + static unsigned int disable_reader; 36 36 module_param(disable_reader, uint, 0644); 37 37 MODULE_PARM_DESC(disable_reader, "only run producer"); 38 38 39 - static int write_iteration = 50; 39 + static unsigned int write_iteration = 50; 40 40 module_param(write_iteration, uint, 0644); 41 41 MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); 42 42 ··· 46 46 static int producer_fifo = -1; 47 47 static int consumer_fifo = -1; 48 48 49 - module_param(producer_nice, uint, 0644); 49 + module_param(producer_nice, int, 0644); 50 50 MODULE_PARM_DESC(producer_nice, "nice prio for producer"); 51 51 52 - module_param(consumer_nice, uint, 0644); 52 + module_param(consumer_nice, int, 0644); 53 53 MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); 54 54 55 - module_param(producer_fifo, uint, 0644); 55 + module_param(producer_fifo, int, 0644); 56 56 MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); 57 57 58 - module_param(consumer_fifo, uint, 0644); 58 + module_param(consumer_fifo, int, 0644); 59 59 MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); 60 60 61 61 static int read_events; ··· 263 263 if (cnt % wakeup_interval) 264 264 cond_resched(); 265 265 #endif 266 + if (kthread_should_stop()) 267 + kill_test = 1; 266 268 267 269 } while (ktime_before(end_time, timeout) && !kill_test); 268 270 trace_printk("End ring buffer hammer\n"); ··· 287 285 entries = ring_buffer_entries(buffer); 288 286 overruns = ring_buffer_overruns(buffer); 289 287 290 - if (kill_test) 288 + if (kill_test && !kthread_should_stop()) 291 289 trace_printk("ERROR!\n"); 292 290 293 291 if (!disable_reader) { ··· 381 379 } 382 380 __set_current_state(TASK_RUNNING); 383 381 384 - if (kill_test) 382 + if (!kthread_should_stop()) 385 383 wait_to_die(); 386 384 387 385 return 0; ··· 401 399 } 402 400 403 401 ring_buffer_producer(); 402 + if (kill_test) 403 + goto out_kill; 404 404 405 405 trace_printk("Sleeping for 10 secs\n"); 406 406 set_current_state(TASK_INTERRUPTIBLE); 407 407 schedule_timeout(HZ * SLEEP_TIME); 408 408 } 409 409 410 - if (kill_test) 410 + out_kill: 411 + if (!kthread_should_stop()) 411 412 wait_to_die(); 412 413 413 414 return 0;
+12 -11
kernel/trace/trace.c
··· 297 297 mutex_unlock(&trace_types_lock); 298 298 } 299 299 300 - int filter_check_discard(struct ftrace_event_file *file, void *rec, 300 + int filter_check_discard(struct trace_event_file *file, void *rec, 301 301 struct ring_buffer *buffer, 302 302 struct ring_buffer_event *event) 303 303 { 304 - if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && 304 + if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 305 305 !filter_match_preds(file->filter, rec)) { 306 306 ring_buffer_discard_commit(buffer, event); 307 307 return 1; ··· 311 311 } 312 312 EXPORT_SYMBOL_GPL(filter_check_discard); 313 313 314 - int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 314 + int call_filter_check_discard(struct trace_event_call *call, void *rec, 315 315 struct ring_buffer *buffer, 316 316 struct ring_buffer_event *event) 317 317 { ··· 876 876 { trace_clock_jiffies, "uptime", 0 }, 877 877 { trace_clock, "perf", 1 }, 878 878 { ktime_get_mono_fast_ns, "mono", 1 }, 879 + { ktime_get_raw_fast_ns, "mono_raw", 1 }, 879 880 ARCH_TRACE_CLOCKS 880 881 }; 881 882 ··· 1694 1693 1695 1694 struct ring_buffer_event * 1696 1695 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, 1697 - struct ftrace_event_file *ftrace_file, 1696 + struct trace_event_file *trace_file, 1698 1697 int type, unsigned long len, 1699 1698 unsigned long flags, int pc) 1700 1699 { 1701 1700 struct ring_buffer_event *entry; 1702 1701 1703 - *current_rb = ftrace_file->tr->trace_buffer.buffer; 1702 + *current_rb = trace_file->tr->trace_buffer.buffer; 1704 1703 entry = trace_buffer_lock_reserve(*current_rb, 1705 1704 type, len, flags, pc); 1706 1705 /* ··· 1709 1708 * to store the trace event for the tigger to use. It's recusive 1710 1709 * safe and will not be recorded anywhere. 1711 1710 */ 1712 - if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { 1711 + if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { 1713 1712 *current_rb = temp_buffer; 1714 1713 entry = trace_buffer_lock_reserve(*current_rb, 1715 1714 type, len, flags, pc); ··· 1761 1760 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1762 1761 int pc) 1763 1762 { 1764 - struct ftrace_event_call *call = &event_function; 1763 + struct trace_event_call *call = &event_function; 1765 1764 struct ring_buffer *buffer = tr->trace_buffer.buffer; 1766 1765 struct ring_buffer_event *event; 1767 1766 struct ftrace_entry *entry; ··· 1796 1795 unsigned long flags, 1797 1796 int skip, int pc, struct pt_regs *regs) 1798 1797 { 1799 - struct ftrace_event_call *call = &event_kernel_stack; 1798 + struct trace_event_call *call = &event_kernel_stack; 1800 1799 struct ring_buffer_event *event; 1801 1800 struct stack_entry *entry; 1802 1801 struct stack_trace trace; ··· 1924 1923 void 1925 1924 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1926 1925 { 1927 - struct ftrace_event_call *call = &event_user_stack; 1926 + struct trace_event_call *call = &event_user_stack; 1928 1927 struct ring_buffer_event *event; 1929 1928 struct userstack_entry *entry; 1930 1929 struct stack_trace trace; ··· 2130 2129 */ 2131 2130 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 2132 2131 { 2133 - struct ftrace_event_call *call = &event_bprint; 2132 + struct trace_event_call *call = &event_bprint; 2134 2133 struct ring_buffer_event *event; 2135 2134 struct ring_buffer *buffer; 2136 2135 struct trace_array *tr = &global_trace; ··· 2188 2187 __trace_array_vprintk(struct ring_buffer *buffer, 2189 2188 unsigned long ip, const char *fmt, va_list args) 2190 2189 { 2191 - struct ftrace_event_call *call = &event_print; 2190 + struct trace_event_call *call = &event_print; 2192 2191 struct ring_buffer_event *event; 2193 2192 int len = 0, size, pc; 2194 2193 struct print_entry *entry;
+21 -21
kernel/trace/trace.h
··· 12 12 #include <linux/ftrace.h> 13 13 #include <linux/hw_breakpoint.h> 14 14 #include <linux/trace_seq.h> 15 - #include <linux/ftrace_event.h> 15 + #include <linux/trace_events.h> 16 16 #include <linux/compiler.h> 17 17 #include <linux/trace_seq.h> 18 18 ··· 211 211 #ifdef CONFIG_FTRACE_SYSCALLS 212 212 int sys_refcount_enter; 213 213 int sys_refcount_exit; 214 - struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls]; 215 - struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls]; 214 + struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 215 + struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 216 216 #endif 217 217 int stop_count; 218 218 int clock_id; ··· 858 858 #define ftrace_destroy_filter_files(ops) do { } while (0) 859 859 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 860 860 861 - int ftrace_event_is_function(struct ftrace_event_call *call); 861 + int ftrace_event_is_function(struct trace_event_call *call); 862 862 863 863 /* 864 864 * struct trace_parser - servers for reading the user input separated by spaces ··· 992 992 int ref_count; 993 993 }; 994 994 995 - struct ftrace_subsystem_dir { 995 + struct trace_subsystem_dir { 996 996 struct list_head list; 997 997 struct event_subsystem *subsystem; 998 998 struct trace_array *tr; ··· 1052 1052 1053 1053 extern enum regex_type 1054 1054 filter_parse_regex(char *buff, int len, char **search, int *not); 1055 - extern void print_event_filter(struct ftrace_event_file *file, 1055 + extern void print_event_filter(struct trace_event_file *file, 1056 1056 struct trace_seq *s); 1057 - extern int apply_event_filter(struct ftrace_event_file *file, 1057 + extern int apply_event_filter(struct trace_event_file *file, 1058 1058 char *filter_string); 1059 - extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, 1059 + extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1060 1060 char *filter_string); 1061 1061 extern void print_subsystem_event_filter(struct event_subsystem *system, 1062 1062 struct trace_seq *s); 1063 1063 extern int filter_assign_type(const char *type); 1064 - extern int create_event_filter(struct ftrace_event_call *call, 1064 + extern int create_event_filter(struct trace_event_call *call, 1065 1065 char *filter_str, bool set_str, 1066 1066 struct event_filter **filterp); 1067 1067 extern void free_event_filter(struct event_filter *filter); 1068 1068 1069 1069 struct ftrace_event_field * 1070 - trace_find_event_field(struct ftrace_event_call *call, char *name); 1070 + trace_find_event_field(struct trace_event_call *call, char *name); 1071 1071 1072 1072 extern void trace_event_enable_cmd_record(bool enable); 1073 1073 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1074 1074 extern int event_trace_del_tracer(struct trace_array *tr); 1075 1075 1076 - extern struct ftrace_event_file *find_event_file(struct trace_array *tr, 1077 - const char *system, 1078 - const char *event); 1076 + extern struct trace_event_file *find_event_file(struct trace_array *tr, 1077 + const char *system, 1078 + const char *event); 1079 1079 1080 1080 static inline void *event_file_data(struct file *filp) 1081 1081 { ··· 1180 1180 * commands need to do this if they themselves log to the trace 1181 1181 * buffer (see the @post_trigger() member below). @trigger_type 1182 1182 * values are defined by adding new values to the trigger_type 1183 - * enum in include/linux/ftrace_event.h. 1183 + * enum in include/linux/trace_events.h. 1184 1184 * 1185 1185 * @post_trigger: A flag that says whether or not this command needs 1186 1186 * to have its action delayed until after the current event has ··· 1242 1242 enum event_trigger_type trigger_type; 1243 1243 bool post_trigger; 1244 1244 int (*func)(struct event_command *cmd_ops, 1245 - struct ftrace_event_file *file, 1245 + struct trace_event_file *file, 1246 1246 char *glob, char *cmd, char *params); 1247 1247 int (*reg)(char *glob, 1248 1248 struct event_trigger_ops *ops, 1249 1249 struct event_trigger_data *data, 1250 - struct ftrace_event_file *file); 1250 + struct trace_event_file *file); 1251 1251 void (*unreg)(char *glob, 1252 1252 struct event_trigger_ops *ops, 1253 1253 struct event_trigger_data *data, 1254 - struct ftrace_event_file *file); 1254 + struct trace_event_file *file); 1255 1255 int (*set_filter)(char *filter_str, 1256 1256 struct event_trigger_data *data, 1257 - struct ftrace_event_file *file); 1257 + struct trace_event_file *file); 1258 1258 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1259 1259 }; 1260 1260 1261 - extern int trace_event_enable_disable(struct ftrace_event_file *file, 1261 + extern int trace_event_enable_disable(struct trace_event_file *file, 1262 1262 int enable, int soft_disable); 1263 1263 extern int tracing_alloc_snapshot(void); 1264 1264 ··· 1286 1286 1287 1287 #undef FTRACE_ENTRY 1288 1288 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1289 - extern struct ftrace_event_call \ 1289 + extern struct trace_event_call \ 1290 1290 __aligned(4) event_##call; 1291 1291 #undef FTRACE_ENTRY_DUP 1292 1292 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ ··· 1295 1295 #include "trace_entries.h" 1296 1296 1297 1297 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1298 - int perf_ftrace_event_register(struct ftrace_event_call *call, 1298 + int perf_ftrace_event_register(struct trace_event_call *call, 1299 1299 enum trace_reg type, void *data); 1300 1300 #else 1301 1301 #define perf_ftrace_event_register NULL
+2 -2
kernel/trace/trace_branch.c
··· 29 29 static void 30 30 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) 31 31 { 32 - struct ftrace_event_call *call = &event_branch; 32 + struct trace_event_call *call = &event_branch; 33 33 struct trace_array *tr = branch_tracer; 34 34 struct trace_array_cpu *data; 35 35 struct ring_buffer_event *event; ··· 191 191 { 192 192 int ret; 193 193 194 - ret = register_ftrace_event(&trace_branch_event); 194 + ret = register_trace_event(&trace_branch_event); 195 195 if (!ret) { 196 196 printk(KERN_WARNING "Warning: could not register " 197 197 "branch events\n");
+3
kernel/trace/trace_clock.c
··· 56 56 { 57 57 return local_clock(); 58 58 } 59 + EXPORT_SYMBOL_GPL(trace_clock); 59 60 60 61 /* 61 62 * trace_jiffy_clock(): Simply use jiffies as a clock counter. ··· 69 68 { 70 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); 71 70 } 71 + EXPORT_SYMBOL_GPL(trace_clock_jiffies); 72 72 73 73 /* 74 74 * trace_clock_global(): special globally coherent trace clock ··· 125 123 126 124 return now; 127 125 } 126 + EXPORT_SYMBOL_GPL(trace_clock_global); 128 127 129 128 static atomic64_t trace_counter; 130 129
+10 -10
kernel/trace/trace_event_perf.c
··· 21 21 /* Count the events in use (per event id, not per instance) */ 22 22 static int total_ref_count; 23 23 24 - static int perf_trace_event_perm(struct ftrace_event_call *tp_event, 24 + static int perf_trace_event_perm(struct trace_event_call *tp_event, 25 25 struct perf_event *p_event) 26 26 { 27 27 if (tp_event->perf_perm) { ··· 83 83 return 0; 84 84 } 85 85 86 - static int perf_trace_event_reg(struct ftrace_event_call *tp_event, 86 + static int perf_trace_event_reg(struct trace_event_call *tp_event, 87 87 struct perf_event *p_event) 88 88 { 89 89 struct hlist_head __percpu *list; ··· 143 143 144 144 static void perf_trace_event_unreg(struct perf_event *p_event) 145 145 { 146 - struct ftrace_event_call *tp_event = p_event->tp_event; 146 + struct trace_event_call *tp_event = p_event->tp_event; 147 147 int i; 148 148 149 149 if (--tp_event->perf_refcount > 0) ··· 172 172 173 173 static int perf_trace_event_open(struct perf_event *p_event) 174 174 { 175 - struct ftrace_event_call *tp_event = p_event->tp_event; 175 + struct trace_event_call *tp_event = p_event->tp_event; 176 176 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); 177 177 } 178 178 179 179 static void perf_trace_event_close(struct perf_event *p_event) 180 180 { 181 - struct ftrace_event_call *tp_event = p_event->tp_event; 181 + struct trace_event_call *tp_event = p_event->tp_event; 182 182 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); 183 183 } 184 184 185 - static int perf_trace_event_init(struct ftrace_event_call *tp_event, 185 + static int perf_trace_event_init(struct trace_event_call *tp_event, 186 186 struct perf_event *p_event) 187 187 { 188 188 int ret; ··· 206 206 207 207 int perf_trace_init(struct perf_event *p_event) 208 208 { 209 - struct ftrace_event_call *tp_event; 209 + struct trace_event_call *tp_event; 210 210 u64 event_id = p_event->attr.config; 211 211 int ret = -EINVAL; 212 212 ··· 236 236 237 237 int perf_trace_add(struct perf_event *p_event, int flags) 238 238 { 239 - struct ftrace_event_call *tp_event = p_event->tp_event; 239 + struct trace_event_call *tp_event = p_event->tp_event; 240 240 struct hlist_head __percpu *pcpu_list; 241 241 struct hlist_head *list; 242 242 ··· 255 255 256 256 void perf_trace_del(struct perf_event *p_event, int flags) 257 257 { 258 - struct ftrace_event_call *tp_event = p_event->tp_event; 258 + struct trace_event_call *tp_event = p_event->tp_event; 259 259 hlist_del_rcu(&p_event->hlist_entry); 260 260 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); 261 261 } ··· 357 357 ftrace_function_local_disable(&event->ftrace_ops); 358 358 } 359 359 360 - int perf_ftrace_event_register(struct ftrace_event_call *call, 360 + int perf_ftrace_event_register(struct trace_event_call *call, 361 361 enum trace_reg type, void *data) 362 362 { 363 363 switch (type) {
+152 -152
kernel/trace/trace_events.c
··· 61 61 62 62 #define do_for_each_event_file_safe(tr, file) \ 63 63 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \ 64 - struct ftrace_event_file *___n; \ 64 + struct trace_event_file *___n; \ 65 65 list_for_each_entry_safe(file, ___n, &tr->events, list) 66 66 67 67 #define while_for_each_event_file() \ 68 68 } 69 69 70 70 static struct list_head * 71 - trace_get_fields(struct ftrace_event_call *event_call) 71 + trace_get_fields(struct trace_event_call *event_call) 72 72 { 73 73 if (!event_call->class->get_fields) 74 74 return &event_call->class->fields; ··· 89 89 } 90 90 91 91 struct ftrace_event_field * 92 - trace_find_event_field(struct ftrace_event_call *call, char *name) 92 + trace_find_event_field(struct trace_event_call *call, char *name) 93 93 { 94 94 struct ftrace_event_field *field; 95 95 struct list_head *head; ··· 129 129 return 0; 130 130 } 131 131 132 - int trace_define_field(struct ftrace_event_call *call, const char *type, 132 + int trace_define_field(struct trace_event_call *call, const char *type, 133 133 const char *name, int offset, int size, int is_signed, 134 134 int filter_type) 135 135 { ··· 166 166 return ret; 167 167 } 168 168 169 - static void trace_destroy_fields(struct ftrace_event_call *call) 169 + static void trace_destroy_fields(struct trace_event_call *call) 170 170 { 171 171 struct ftrace_event_field *field, *next; 172 172 struct list_head *head; ··· 178 178 } 179 179 } 180 180 181 - int trace_event_raw_init(struct ftrace_event_call *call) 181 + int trace_event_raw_init(struct trace_event_call *call) 182 182 { 183 183 int id; 184 184 185 - id = register_ftrace_event(&call->event); 185 + id = register_trace_event(&call->event); 186 186 if (!id) 187 187 return -ENODEV; 188 188 ··· 190 190 } 191 191 EXPORT_SYMBOL_GPL(trace_event_raw_init); 192 192 193 - void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, 194 - struct ftrace_event_file *ftrace_file, 195 - unsigned long len) 193 + void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 194 + struct trace_event_file *trace_file, 195 + unsigned long len) 196 196 { 197 - struct ftrace_event_call *event_call = ftrace_file->event_call; 197 + struct trace_event_call *event_call = trace_file->event_call; 198 198 199 199 local_save_flags(fbuffer->flags); 200 200 fbuffer->pc = preempt_count(); 201 - fbuffer->ftrace_file = ftrace_file; 201 + fbuffer->trace_file = trace_file; 202 202 203 203 fbuffer->event = 204 - trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, 204 + trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file, 205 205 event_call->event.type, len, 206 206 fbuffer->flags, fbuffer->pc); 207 207 if (!fbuffer->event) ··· 210 210 fbuffer->entry = ring_buffer_event_data(fbuffer->event); 211 211 return fbuffer->entry; 212 212 } 213 - EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); 213 + EXPORT_SYMBOL_GPL(trace_event_buffer_reserve); 214 214 215 215 static DEFINE_SPINLOCK(tracepoint_iter_lock); 216 216 217 - static void output_printk(struct ftrace_event_buffer *fbuffer) 217 + static void output_printk(struct trace_event_buffer *fbuffer) 218 218 { 219 - struct ftrace_event_call *event_call; 219 + struct trace_event_call *event_call; 220 220 struct trace_event *event; 221 221 unsigned long flags; 222 222 struct trace_iterator *iter = tracepoint_print_iter; ··· 224 224 if (!iter) 225 225 return; 226 226 227 - event_call = fbuffer->ftrace_file->event_call; 227 + event_call = fbuffer->trace_file->event_call; 228 228 if (!event_call || !event_call->event.funcs || 229 229 !event_call->event.funcs->trace) 230 230 return; 231 231 232 - event = &fbuffer->ftrace_file->event_call->event; 232 + event = &fbuffer->trace_file->event_call->event; 233 233 234 234 spin_lock_irqsave(&tracepoint_iter_lock, flags); 235 235 trace_seq_init(&iter->seq); ··· 241 241 spin_unlock_irqrestore(&tracepoint_iter_lock, flags); 242 242 } 243 243 244 - void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) 244 + void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) 245 245 { 246 246 if (tracepoint_printk) 247 247 output_printk(fbuffer); 248 248 249 - event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, 249 + event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer, 250 250 fbuffer->event, fbuffer->entry, 251 251 fbuffer->flags, fbuffer->pc); 252 252 } 253 - EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); 253 + EXPORT_SYMBOL_GPL(trace_event_buffer_commit); 254 254 255 - int ftrace_event_reg(struct ftrace_event_call *call, 256 - enum trace_reg type, void *data) 255 + int trace_event_reg(struct trace_event_call *call, 256 + enum trace_reg type, void *data) 257 257 { 258 - struct ftrace_event_file *file = data; 258 + struct trace_event_file *file = data; 259 259 260 260 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); 261 261 switch (type) { ··· 288 288 } 289 289 return 0; 290 290 } 291 - EXPORT_SYMBOL_GPL(ftrace_event_reg); 291 + EXPORT_SYMBOL_GPL(trace_event_reg); 292 292 293 293 void trace_event_enable_cmd_record(bool enable) 294 294 { 295 - struct ftrace_event_file *file; 295 + struct trace_event_file *file; 296 296 struct trace_array *tr; 297 297 298 298 mutex_lock(&event_mutex); 299 299 do_for_each_event_file(tr, file) { 300 300 301 - if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) 301 + if (!(file->flags & EVENT_FILE_FL_ENABLED)) 302 302 continue; 303 303 304 304 if (enable) { 305 305 tracing_start_cmdline_record(); 306 - set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 306 + set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 307 307 } else { 308 308 tracing_stop_cmdline_record(); 309 - clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 309 + clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 310 310 } 311 311 } while_for_each_event_file(); 312 312 mutex_unlock(&event_mutex); 313 313 } 314 314 315 - static int __ftrace_event_enable_disable(struct ftrace_event_file *file, 315 + static int __ftrace_event_enable_disable(struct trace_event_file *file, 316 316 int enable, int soft_disable) 317 317 { 318 - struct ftrace_event_call *call = file->event_call; 318 + struct trace_event_call *call = file->event_call; 319 319 int ret = 0; 320 320 int disable; 321 321 ··· 337 337 if (soft_disable) { 338 338 if (atomic_dec_return(&file->sm_ref) > 0) 339 339 break; 340 - disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 341 - clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 340 + disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; 341 + clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 342 342 } else 343 - disable = !(file->flags & FTRACE_EVENT_FL_SOFT_MODE); 343 + disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); 344 344 345 - if (disable && (file->flags & FTRACE_EVENT_FL_ENABLED)) { 346 - clear_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 347 - if (file->flags & FTRACE_EVENT_FL_RECORDED_CMD) { 345 + if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) { 346 + clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 347 + if (file->flags & EVENT_FILE_FL_RECORDED_CMD) { 348 348 tracing_stop_cmdline_record(); 349 - clear_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 349 + clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 350 350 } 351 351 call->class->reg(call, TRACE_REG_UNREGISTER, file); 352 352 } 353 353 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 354 - if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) 355 - set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 354 + if (file->flags & EVENT_FILE_FL_SOFT_MODE) 355 + set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 356 356 else 357 - clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 357 + clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 358 358 break; 359 359 case 1: 360 360 /* ··· 366 366 * it still seems to be disabled. 367 367 */ 368 368 if (!soft_disable) 369 - clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 369 + clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 370 370 else { 371 371 if (atomic_inc_return(&file->sm_ref) > 1) 372 372 break; 373 - set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 373 + set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); 374 374 } 375 375 376 - if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 376 + if (!(file->flags & EVENT_FILE_FL_ENABLED)) { 377 377 378 378 /* Keep the event disabled, when going to SOFT_MODE. */ 379 379 if (soft_disable) 380 - set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 380 + set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); 381 381 382 382 if (trace_flags & TRACE_ITER_RECORD_CMD) { 383 383 tracing_start_cmdline_record(); 384 - set_bit(FTRACE_EVENT_FL_RECORDED_CMD_BIT, &file->flags); 384 + set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags); 385 385 } 386 386 ret = call->class->reg(call, TRACE_REG_REGISTER, file); 387 387 if (ret) { 388 388 tracing_stop_cmdline_record(); 389 389 pr_info("event trace: Could not enable event " 390 - "%s\n", ftrace_event_name(call)); 390 + "%s\n", trace_event_name(call)); 391 391 break; 392 392 } 393 - set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); 393 + set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags); 394 394 395 395 /* WAS_ENABLED gets set but never cleared. */ 396 396 call->flags |= TRACE_EVENT_FL_WAS_ENABLED; ··· 401 401 return ret; 402 402 } 403 403 404 - int trace_event_enable_disable(struct ftrace_event_file *file, 404 + int trace_event_enable_disable(struct trace_event_file *file, 405 405 int enable, int soft_disable) 406 406 { 407 407 return __ftrace_event_enable_disable(file, enable, soft_disable); 408 408 } 409 409 410 - static int ftrace_event_enable_disable(struct ftrace_event_file *file, 410 + static int ftrace_event_enable_disable(struct trace_event_file *file, 411 411 int enable) 412 412 { 413 413 return __ftrace_event_enable_disable(file, enable, 0); ··· 415 415 416 416 static void ftrace_clear_events(struct trace_array *tr) 417 417 { 418 - struct ftrace_event_file *file; 418 + struct trace_event_file *file; 419 419 420 420 mutex_lock(&event_mutex); 421 421 list_for_each_entry(file, &tr->events, list) { ··· 449 449 system_refcount_inc(system); 450 450 } 451 451 452 - static void __get_system_dir(struct ftrace_subsystem_dir *dir) 452 + static void __get_system_dir(struct trace_subsystem_dir *dir) 453 453 { 454 454 WARN_ON_ONCE(dir->ref_count == 0); 455 455 dir->ref_count++; 456 456 __get_system(dir->subsystem); 457 457 } 458 458 459 - static void __put_system_dir(struct ftrace_subsystem_dir *dir) 459 + static void __put_system_dir(struct trace_subsystem_dir *dir) 460 460 { 461 461 WARN_ON_ONCE(dir->ref_count == 0); 462 462 /* If the subsystem is about to be freed, the dir must be too */ ··· 467 467 kfree(dir); 468 468 } 469 469 470 - static void put_system(struct ftrace_subsystem_dir *dir) 470 + static void put_system(struct trace_subsystem_dir *dir) 471 471 { 472 472 mutex_lock(&event_mutex); 473 473 __put_system_dir(dir); 474 474 mutex_unlock(&event_mutex); 475 475 } 476 476 477 - static void remove_subsystem(struct ftrace_subsystem_dir *dir) 477 + static void remove_subsystem(struct trace_subsystem_dir *dir) 478 478 { 479 479 if (!dir) 480 480 return; ··· 486 486 } 487 487 } 488 488 489 - static void remove_event_file_dir(struct ftrace_event_file *file) 489 + static void remove_event_file_dir(struct trace_event_file *file) 490 490 { 491 491 struct dentry *dir = file->dir; 492 492 struct dentry *child; ··· 515 515 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, 516 516 const char *sub, const char *event, int set) 517 517 { 518 - struct ftrace_event_file *file; 519 - struct ftrace_event_call *call; 518 + struct trace_event_file *file; 519 + struct trace_event_call *call; 520 520 const char *name; 521 521 int ret = -EINVAL; 522 522 523 523 list_for_each_entry(file, &tr->events, list) { 524 524 525 525 call = file->event_call; 526 - name = ftrace_event_name(call); 526 + name = trace_event_name(call); 527 527 528 528 if (!name || !call->class || !call->class->reg) 529 529 continue; ··· 671 671 static void * 672 672 t_next(struct seq_file *m, void *v, loff_t *pos) 673 673 { 674 - struct ftrace_event_file *file = v; 675 - struct ftrace_event_call *call; 674 + struct trace_event_file *file = v; 675 + struct trace_event_call *call; 676 676 struct trace_array *tr = m->private; 677 677 678 678 (*pos)++; ··· 692 692 693 693 static void *t_start(struct seq_file *m, loff_t *pos) 694 694 { 695 - struct ftrace_event_file *file; 695 + struct trace_event_file *file; 696 696 struct trace_array *tr = m->private; 697 697 loff_t l; 698 698 699 699 mutex_lock(&event_mutex); 700 700 701 - file = list_entry(&tr->events, struct ftrace_event_file, list); 701 + file = list_entry(&tr->events, struct trace_event_file, list); 702 702 for (l = 0; l <= *pos; ) { 703 703 file = t_next(m, file, &l); 704 704 if (!file) ··· 710 710 static void * 711 711 s_next(struct seq_file *m, void *v, loff_t *pos) 712 712 { 713 - struct ftrace_event_file *file = v; 713 + struct trace_event_file *file = v; 714 714 struct trace_array *tr = m->private; 715 715 716 716 (*pos)++; 717 717 718 718 list_for_each_entry_continue(file, &tr->events, list) { 719 - if (file->flags & FTRACE_EVENT_FL_ENABLED) 719 + if (file->flags & EVENT_FILE_FL_ENABLED) 720 720 return file; 721 721 } 722 722 ··· 725 725 726 726 static void *s_start(struct seq_file *m, loff_t *pos) 727 727 { 728 - struct ftrace_event_file *file; 728 + struct trace_event_file *file; 729 729 struct trace_array *tr = m->private; 730 730 loff_t l; 731 731 732 732 mutex_lock(&event_mutex); 733 733 734 - file = list_entry(&tr->events, struct ftrace_event_file, list); 734 + file = list_entry(&tr->events, struct trace_event_file, list); 735 735 for (l = 0; l <= *pos; ) { 736 736 file = s_next(m, file, &l); 737 737 if (!file) ··· 742 742 743 743 static int t_show(struct seq_file *m, void *v) 744 744 { 745 - struct ftrace_event_file *file = v; 746 - struct ftrace_event_call *call = file->event_call; 745 + struct trace_event_file *file = v; 746 + struct trace_event_call *call = file->event_call; 747 747 748 748 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 749 749 seq_printf(m, "%s:", call->class->system); 750 - seq_printf(m, "%s\n", ftrace_event_name(call)); 750 + seq_printf(m, "%s\n", trace_event_name(call)); 751 751 752 752 return 0; 753 753 } ··· 761 761 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 762 762 loff_t *ppos) 763 763 { 764 - struct ftrace_event_file *file; 764 + struct trace_event_file *file; 765 765 unsigned long flags; 766 766 char buf[4] = "0"; 767 767 ··· 774 774 if (!file) 775 775 return -ENODEV; 776 776 777 - if (flags & FTRACE_EVENT_FL_ENABLED && 778 - !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 777 + if (flags & EVENT_FILE_FL_ENABLED && 778 + !(flags & EVENT_FILE_FL_SOFT_DISABLED)) 779 779 strcpy(buf, "1"); 780 780 781 - if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || 782 - flags & FTRACE_EVENT_FL_SOFT_MODE) 781 + if (flags & EVENT_FILE_FL_SOFT_DISABLED || 782 + flags & EVENT_FILE_FL_SOFT_MODE) 783 783 strcat(buf, "*"); 784 784 785 785 strcat(buf, "\n"); ··· 791 791 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 792 792 loff_t *ppos) 793 793 { 794 - struct ftrace_event_file *file; 794 + struct trace_event_file *file; 795 795 unsigned long val; 796 796 int ret; 797 797 ··· 828 828 loff_t *ppos) 829 829 { 830 830 const char set_to_char[4] = { '?', '0', '1', 'X' }; 831 - struct ftrace_subsystem_dir *dir = filp->private_data; 831 + struct trace_subsystem_dir *dir = filp->private_data; 832 832 struct event_subsystem *system = dir->subsystem; 833 - struct ftrace_event_call *call; 834 - struct ftrace_event_file *file; 833 + struct trace_event_call *call; 834 + struct trace_event_file *file; 835 835 struct trace_array *tr = dir->tr; 836 836 char buf[2]; 837 837 int set = 0; ··· 840 840 mutex_lock(&event_mutex); 841 841 list_for_each_entry(file, &tr->events, list) { 842 842 call = file->event_call; 843 - if (!ftrace_event_name(call) || !call->class || !call->class->reg) 843 + if (!trace_event_name(call) || !call->class || !call->class->reg) 844 844 continue; 845 845 846 846 if (system && strcmp(call->class->system, system->name) != 0) ··· 851 851 * or if all events or cleared, or if we have 852 852 * a mixture. 853 853 */ 854 - set |= (1 << !!(file->flags & FTRACE_EVENT_FL_ENABLED)); 854 + set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED)); 855 855 856 856 /* 857 857 * If we have a mixture, no need to look further. ··· 873 873 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, 874 874 loff_t *ppos) 875 875 { 876 - struct ftrace_subsystem_dir *dir = filp->private_data; 876 + struct trace_subsystem_dir *dir = filp->private_data; 877 877 struct event_subsystem *system = dir->subsystem; 878 878 const char *name = NULL; 879 879 unsigned long val; ··· 917 917 918 918 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 919 919 { 920 - struct ftrace_event_call *call = event_file_data(m->private); 920 + struct trace_event_call *call = event_file_data(m->private); 921 921 struct list_head *common_head = &ftrace_common_fields; 922 922 struct list_head *head = trace_get_fields(call); 923 923 struct list_head *node = v; ··· 949 949 950 950 static int f_show(struct seq_file *m, void *v) 951 951 { 952 - struct ftrace_event_call *call = event_file_data(m->private); 952 + struct trace_event_call *call = event_file_data(m->private); 953 953 struct ftrace_event_field *field; 954 954 const char *array_descriptor; 955 955 956 956 switch ((unsigned long)v) { 957 957 case FORMAT_HEADER: 958 - seq_printf(m, "name: %s\n", ftrace_event_name(call)); 958 + seq_printf(m, "name: %s\n", trace_event_name(call)); 959 959 seq_printf(m, "ID: %d\n", call->event.type); 960 960 seq_puts(m, "format:\n"); 961 961 return 0; ··· 1062 1062 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1063 1063 loff_t *ppos) 1064 1064 { 1065 - struct ftrace_event_file *file; 1065 + struct trace_event_file *file; 1066 1066 struct trace_seq *s; 1067 1067 int r = -ENODEV; 1068 1068 ··· 1095 1095 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1096 1096 loff_t *ppos) 1097 1097 { 1098 - struct ftrace_event_file *file; 1098 + struct trace_event_file *file; 1099 1099 char *buf; 1100 1100 int err = -ENODEV; 1101 1101 ··· 1132 1132 static int subsystem_open(struct inode *inode, struct file *filp) 1133 1133 { 1134 1134 struct event_subsystem *system = NULL; 1135 - struct ftrace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1135 + struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */ 1136 1136 struct trace_array *tr; 1137 1137 int ret; 1138 1138 ··· 1181 1181 1182 1182 static int system_tr_open(struct inode *inode, struct file *filp) 1183 1183 { 1184 - struct ftrace_subsystem_dir *dir; 1184 + struct trace_subsystem_dir *dir; 1185 1185 struct trace_array *tr = inode->i_private; 1186 1186 int ret; 1187 1187 ··· 1214 1214 1215 1215 static int subsystem_release(struct inode *inode, struct file *file) 1216 1216 { 1217 - struct ftrace_subsystem_dir *dir = file->private_data; 1217 + struct trace_subsystem_dir *dir = file->private_data; 1218 1218 1219 1219 trace_array_put(dir->tr); 1220 1220 ··· 1235 1235 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, 1236 1236 loff_t *ppos) 1237 1237 { 1238 - struct ftrace_subsystem_dir *dir = filp->private_data; 1238 + struct trace_subsystem_dir *dir = filp->private_data; 1239 1239 struct event_subsystem *system = dir->subsystem; 1240 1240 struct trace_seq *s; 1241 1241 int r; ··· 1262 1262 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, 1263 1263 loff_t *ppos) 1264 1264 { 1265 - struct ftrace_subsystem_dir *dir = filp->private_data; 1265 + struct trace_subsystem_dir *dir = filp->private_data; 1266 1266 char *buf; 1267 1267 int err; 1268 1268 ··· 1497 1497 1498 1498 static struct dentry * 1499 1499 event_subsystem_dir(struct trace_array *tr, const char *name, 1500 - struct ftrace_event_file *file, struct dentry *parent) 1500 + struct trace_event_file *file, struct dentry *parent) 1501 1501 { 1502 - struct ftrace_subsystem_dir *dir; 1502 + struct trace_subsystem_dir *dir; 1503 1503 struct event_subsystem *system; 1504 1504 struct dentry *entry; 1505 1505 ··· 1571 1571 } 1572 1572 1573 1573 static int 1574 - event_create_dir(struct dentry *parent, struct ftrace_event_file *file) 1574 + event_create_dir(struct dentry *parent, struct trace_event_file *file) 1575 1575 { 1576 - struct ftrace_event_call *call = file->event_call; 1576 + struct trace_event_call *call = file->event_call; 1577 1577 struct trace_array *tr = file->tr; 1578 1578 struct list_head *head; 1579 1579 struct dentry *d_events; ··· 1591 1591 } else 1592 1592 d_events = parent; 1593 1593 1594 - name = ftrace_event_name(call); 1594 + name = trace_event_name(call); 1595 1595 file->dir = tracefs_create_dir(name, d_events); 1596 1596 if (!file->dir) { 1597 1597 pr_warn("Could not create tracefs '%s' directory\n", name); ··· 1634 1634 return 0; 1635 1635 } 1636 1636 1637 - static void remove_event_from_tracers(struct ftrace_event_call *call) 1637 + static void remove_event_from_tracers(struct trace_event_call *call) 1638 1638 { 1639 - struct ftrace_event_file *file; 1639 + struct trace_event_file *file; 1640 1640 struct trace_array *tr; 1641 1641 1642 1642 do_for_each_event_file_safe(tr, file) { ··· 1654 1654 } while_for_each_event_file(); 1655 1655 } 1656 1656 1657 - static void event_remove(struct ftrace_event_call *call) 1657 + static void event_remove(struct trace_event_call *call) 1658 1658 { 1659 1659 struct trace_array *tr; 1660 - struct ftrace_event_file *file; 1660 + struct trace_event_file *file; 1661 1661 1662 1662 do_for_each_event_file(tr, file) { 1663 1663 if (file->event_call != call) ··· 1673 1673 } while_for_each_event_file(); 1674 1674 1675 1675 if (call->event.funcs) 1676 - __unregister_ftrace_event(&call->event); 1676 + __unregister_trace_event(&call->event); 1677 1677 remove_event_from_tracers(call); 1678 1678 list_del(&call->list); 1679 1679 } 1680 1680 1681 - static int event_init(struct ftrace_event_call *call) 1681 + static int event_init(struct trace_event_call *call) 1682 1682 { 1683 1683 int ret = 0; 1684 1684 const char *name; 1685 1685 1686 - name = ftrace_event_name(call); 1686 + name = trace_event_name(call); 1687 1687 if (WARN_ON(!name)) 1688 1688 return -EINVAL; 1689 1689 ··· 1697 1697 } 1698 1698 1699 1699 static int 1700 - __register_event(struct ftrace_event_call *call, struct module *mod) 1700 + __register_event(struct trace_event_call *call, struct module *mod) 1701 1701 { 1702 1702 int ret; 1703 1703 ··· 1733 1733 return ptr + elen; 1734 1734 } 1735 1735 1736 - static void update_event_printk(struct ftrace_event_call *call, 1736 + static void update_event_printk(struct trace_event_call *call, 1737 1737 struct trace_enum_map *map) 1738 1738 { 1739 1739 char *ptr; ··· 1811 1811 1812 1812 void trace_event_enum_update(struct trace_enum_map **map, int len) 1813 1813 { 1814 - struct ftrace_event_call *call, *p; 1814 + struct trace_event_call *call, *p; 1815 1815 const char *last_system = NULL; 1816 1816 int last_i; 1817 1817 int i; ··· 1836 1836 up_write(&trace_event_sem); 1837 1837 } 1838 1838 1839 - static struct ftrace_event_file * 1840 - trace_create_new_event(struct ftrace_event_call *call, 1839 + static struct trace_event_file * 1840 + trace_create_new_event(struct trace_event_call *call, 1841 1841 struct trace_array *tr) 1842 1842 { 1843 - struct ftrace_event_file *file; 1843 + struct trace_event_file *file; 1844 1844 1845 1845 file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1846 1846 if (!file) ··· 1858 1858 1859 1859 /* Add an event to a trace directory */ 1860 1860 static int 1861 - __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) 1861 + __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 1862 1862 { 1863 - struct ftrace_event_file *file; 1863 + struct trace_event_file *file; 1864 1864 1865 1865 file = trace_create_new_event(call, tr); 1866 1866 if (!file) ··· 1875 1875 * the filesystem is initialized. 1876 1876 */ 1877 1877 static __init int 1878 - __trace_early_add_new_event(struct ftrace_event_call *call, 1878 + __trace_early_add_new_event(struct trace_event_call *call, 1879 1879 struct trace_array *tr) 1880 1880 { 1881 - struct ftrace_event_file *file; 1881 + struct trace_event_file *file; 1882 1882 1883 1883 file = trace_create_new_event(call, tr); 1884 1884 if (!file) ··· 1888 1888 } 1889 1889 1890 1890 struct ftrace_module_file_ops; 1891 - static void __add_event_to_tracers(struct ftrace_event_call *call); 1891 + static void __add_event_to_tracers(struct trace_event_call *call); 1892 1892 1893 1893 /* Add an additional event_call dynamically */ 1894 - int trace_add_event_call(struct ftrace_event_call *call) 1894 + int trace_add_event_call(struct trace_event_call *call) 1895 1895 { 1896 1896 int ret; 1897 1897 mutex_lock(&trace_types_lock); ··· 1910 1910 * Must be called under locking of trace_types_lock, event_mutex and 1911 1911 * trace_event_sem. 1912 1912 */ 1913 - static void __trace_remove_event_call(struct ftrace_event_call *call) 1913 + static void __trace_remove_event_call(struct trace_event_call *call) 1914 1914 { 1915 1915 event_remove(call); 1916 1916 trace_destroy_fields(call); ··· 1918 1918 call->filter = NULL; 1919 1919 } 1920 1920 1921 - static int probe_remove_event_call(struct ftrace_event_call *call) 1921 + static int probe_remove_event_call(struct trace_event_call *call) 1922 1922 { 1923 1923 struct trace_array *tr; 1924 - struct ftrace_event_file *file; 1924 + struct trace_event_file *file; 1925 1925 1926 1926 #ifdef CONFIG_PERF_EVENTS 1927 1927 if (call->perf_refcount) ··· 1932 1932 continue; 1933 1933 /* 1934 1934 * We can't rely on ftrace_event_enable_disable(enable => 0) 1935 - * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress 1935 + * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress 1936 1936 * TRACE_REG_UNREGISTER. 1937 1937 */ 1938 - if (file->flags & FTRACE_EVENT_FL_ENABLED) 1938 + if (file->flags & EVENT_FILE_FL_ENABLED) 1939 1939 return -EBUSY; 1940 1940 /* 1941 1941 * The do_for_each_event_file_safe() is ··· 1952 1952 } 1953 1953 1954 1954 /* Remove an event_call */ 1955 - int trace_remove_event_call(struct ftrace_event_call *call) 1955 + int trace_remove_event_call(struct trace_event_call *call) 1956 1956 { 1957 1957 int ret; 1958 1958 ··· 1976 1976 1977 1977 static void trace_module_add_events(struct module *mod) 1978 1978 { 1979 - struct ftrace_event_call **call, **start, **end; 1979 + struct trace_event_call **call, **start, **end; 1980 1980 1981 1981 if (!mod->num_trace_events) 1982 1982 return; ··· 1999 1999 2000 2000 static void trace_module_remove_events(struct module *mod) 2001 2001 { 2002 - struct ftrace_event_call *call, *p; 2002 + struct trace_event_call *call, *p; 2003 2003 bool clear_trace = false; 2004 2004 2005 2005 down_write(&trace_event_sem); ··· 2055 2055 static void 2056 2056 __trace_add_event_dirs(struct trace_array *tr) 2057 2057 { 2058 - struct ftrace_event_call *call; 2058 + struct trace_event_call *call; 2059 2059 int ret; 2060 2060 2061 2061 list_for_each_entry(call, &ftrace_events, list) { 2062 2062 ret = __trace_add_new_event(call, tr); 2063 2063 if (ret < 0) 2064 2064 pr_warn("Could not create directory for event %s\n", 2065 - ftrace_event_name(call)); 2065 + trace_event_name(call)); 2066 2066 } 2067 2067 } 2068 2068 2069 - struct ftrace_event_file * 2069 + struct trace_event_file * 2070 2070 find_event_file(struct trace_array *tr, const char *system, const char *event) 2071 2071 { 2072 - struct ftrace_event_file *file; 2073 - struct ftrace_event_call *call; 2072 + struct trace_event_file *file; 2073 + struct trace_event_call *call; 2074 2074 const char *name; 2075 2075 2076 2076 list_for_each_entry(file, &tr->events, list) { 2077 2077 2078 2078 call = file->event_call; 2079 - name = ftrace_event_name(call); 2079 + name = trace_event_name(call); 2080 2080 2081 2081 if (!name || !call->class || !call->class->reg) 2082 2082 continue; ··· 2098 2098 #define DISABLE_EVENT_STR "disable_event" 2099 2099 2100 2100 struct event_probe_data { 2101 - struct ftrace_event_file *file; 2101 + struct trace_event_file *file; 2102 2102 unsigned long count; 2103 2103 int ref; 2104 2104 bool enable; ··· 2114 2114 return; 2115 2115 2116 2116 if (data->enable) 2117 - clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); 2117 + clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 2118 2118 else 2119 - set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &data->file->flags); 2119 + set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags); 2120 2120 } 2121 2121 2122 2122 static void ··· 2132 2132 return; 2133 2133 2134 2134 /* Skip if the event is in a state we want to switch to */ 2135 - if (data->enable == !(data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 2135 + if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 2136 2136 return; 2137 2137 2138 2138 if (data->count != -1) ··· 2152 2152 seq_printf(m, "%s:%s:%s", 2153 2153 data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 2154 2154 data->file->event_call->class->system, 2155 - ftrace_event_name(data->file->event_call)); 2155 + trace_event_name(data->file->event_call)); 2156 2156 2157 2157 if (data->count == -1) 2158 2158 seq_puts(m, ":unlimited\n"); ··· 2226 2226 char *glob, char *cmd, char *param, int enabled) 2227 2227 { 2228 2228 struct trace_array *tr = top_trace_array(); 2229 - struct ftrace_event_file *file; 2229 + struct trace_event_file *file; 2230 2230 struct ftrace_probe_ops *ops; 2231 2231 struct event_probe_data *data; 2232 2232 const char *system; ··· 2358 2358 #endif /* CONFIG_DYNAMIC_FTRACE */ 2359 2359 2360 2360 /* 2361 - * The top level array has already had its ftrace_event_file 2361 + * The top level array has already had its trace_event_file 2362 2362 * descriptors created in order to allow for early events to 2363 2363 * be recorded. This function is called after the tracefs has been 2364 2364 * initialized, and we now have to create the files associated ··· 2367 2367 static __init void 2368 2368 __trace_early_add_event_dirs(struct trace_array *tr) 2369 2369 { 2370 - struct ftrace_event_file *file; 2370 + struct trace_event_file *file; 2371 2371 int ret; 2372 2372 2373 2373 ··· 2375 2375 ret = event_create_dir(tr->event_dir, file); 2376 2376 if (ret < 0) 2377 2377 pr_warn("Could not create directory for event %s\n", 2378 - ftrace_event_name(file->event_call)); 2378 + trace_event_name(file->event_call)); 2379 2379 } 2380 2380 } 2381 2381 ··· 2388 2388 static __init void 2389 2389 __trace_early_add_events(struct trace_array *tr) 2390 2390 { 2391 - struct ftrace_event_call *call; 2391 + struct trace_event_call *call; 2392 2392 int ret; 2393 2393 2394 2394 list_for_each_entry(call, &ftrace_events, list) { ··· 2399 2399 ret = __trace_early_add_new_event(call, tr); 2400 2400 if (ret < 0) 2401 2401 pr_warn("Could not create early event %s\n", 2402 - ftrace_event_name(call)); 2402 + trace_event_name(call)); 2403 2403 } 2404 2404 } 2405 2405 ··· 2407 2407 static void 2408 2408 __trace_remove_event_dirs(struct trace_array *tr) 2409 2409 { 2410 - struct ftrace_event_file *file, *next; 2410 + struct trace_event_file *file, *next; 2411 2411 2412 2412 list_for_each_entry_safe(file, next, &tr->events, list) 2413 2413 remove_event_file_dir(file); 2414 2414 } 2415 2415 2416 - static void __add_event_to_tracers(struct ftrace_event_call *call) 2416 + static void __add_event_to_tracers(struct trace_event_call *call) 2417 2417 { 2418 2418 struct trace_array *tr; 2419 2419 ··· 2421 2421 __trace_add_new_event(call, tr); 2422 2422 } 2423 2423 2424 - extern struct ftrace_event_call *__start_ftrace_events[]; 2425 - extern struct ftrace_event_call *__stop_ftrace_events[]; 2424 + extern struct trace_event_call *__start_ftrace_events[]; 2425 + extern struct trace_event_call *__stop_ftrace_events[]; 2426 2426 2427 2427 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 2428 2428 ··· 2557 2557 static __init int event_trace_memsetup(void) 2558 2558 { 2559 2559 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC); 2560 - file_cachep = KMEM_CACHE(ftrace_event_file, SLAB_PANIC); 2560 + file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC); 2561 2561 return 0; 2562 2562 } 2563 2563 ··· 2593 2593 static __init int event_trace_enable(void) 2594 2594 { 2595 2595 struct trace_array *tr = top_trace_array(); 2596 - struct ftrace_event_call **iter, *call; 2596 + struct trace_event_call **iter, *call; 2597 2597 int ret; 2598 2598 2599 2599 if (!tr) ··· 2754 2754 */ 2755 2755 static __init void event_trace_self_tests(void) 2756 2756 { 2757 - struct ftrace_subsystem_dir *dir; 2758 - struct ftrace_event_file *file; 2759 - struct ftrace_event_call *call; 2757 + struct trace_subsystem_dir *dir; 2758 + struct trace_event_file *file; 2759 + struct trace_event_call *call; 2760 2760 struct event_subsystem *system; 2761 2761 struct trace_array *tr; 2762 2762 int ret; ··· 2787 2787 continue; 2788 2788 #endif 2789 2789 2790 - pr_info("Testing event %s: ", ftrace_event_name(call)); 2790 + pr_info("Testing event %s: ", trace_event_name(call)); 2791 2791 2792 2792 /* 2793 2793 * If an event is already enabled, someone is using 2794 2794 * it and the self test should not be on. 2795 2795 */ 2796 - if (file->flags & FTRACE_EVENT_FL_ENABLED) { 2796 + if (file->flags & EVENT_FILE_FL_ENABLED) { 2797 2797 pr_warn("Enabled event during self test!\n"); 2798 2798 WARN_ON_ONCE(1); 2799 2799 continue;
+43 -43
kernel/trace/trace_events_filter.c
··· 643 643 free_page((unsigned long) buf); 644 644 } 645 645 646 - static inline struct event_filter *event_filter(struct ftrace_event_file *file) 646 + static inline struct event_filter *event_filter(struct trace_event_file *file) 647 647 { 648 648 if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 649 649 return file->event_call->filter; ··· 652 652 } 653 653 654 654 /* caller must hold event_mutex */ 655 - void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s) 655 + void print_event_filter(struct trace_event_file *file, struct trace_seq *s) 656 656 { 657 657 struct event_filter *filter = event_filter(file); 658 658 ··· 780 780 filter->n_preds = 0; 781 781 } 782 782 783 - static void filter_disable(struct ftrace_event_file *file) 783 + static void filter_disable(struct trace_event_file *file) 784 784 { 785 - struct ftrace_event_call *call = file->event_call; 785 + struct trace_event_call *call = file->event_call; 786 786 787 787 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 788 788 call->flags &= ~TRACE_EVENT_FL_FILTERED; 789 789 else 790 - file->flags &= ~FTRACE_EVENT_FL_FILTERED; 790 + file->flags &= ~EVENT_FILE_FL_FILTERED; 791 791 } 792 792 793 793 static void __free_filter(struct event_filter *filter) ··· 837 837 return 0; 838 838 } 839 839 840 - static inline void __remove_filter(struct ftrace_event_file *file) 840 + static inline void __remove_filter(struct trace_event_file *file) 841 841 { 842 - struct ftrace_event_call *call = file->event_call; 842 + struct trace_event_call *call = file->event_call; 843 843 844 844 filter_disable(file); 845 845 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) ··· 848 848 remove_filter_string(file->filter); 849 849 } 850 850 851 - static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir, 851 + static void filter_free_subsystem_preds(struct trace_subsystem_dir *dir, 852 852 struct trace_array *tr) 853 853 { 854 - struct ftrace_event_file *file; 854 + struct trace_event_file *file; 855 855 856 856 list_for_each_entry(file, &tr->events, list) { 857 857 if (file->system != dir) ··· 860 860 } 861 861 } 862 862 863 - static inline void __free_subsystem_filter(struct ftrace_event_file *file) 863 + static inline void __free_subsystem_filter(struct trace_event_file *file) 864 864 { 865 - struct ftrace_event_call *call = file->event_call; 865 + struct trace_event_call *call = file->event_call; 866 866 867 867 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { 868 868 __free_filter(call->filter); ··· 873 873 } 874 874 } 875 875 876 - static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir, 876 + static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir, 877 877 struct trace_array *tr) 878 878 { 879 - struct ftrace_event_file *file; 879 + struct trace_event_file *file; 880 880 881 881 list_for_each_entry(file, &tr->events, list) { 882 882 if (file->system != dir) ··· 1342 1342 } 1343 1343 1344 1344 static struct filter_pred *create_pred(struct filter_parse_state *ps, 1345 - struct ftrace_event_call *call, 1345 + struct trace_event_call *call, 1346 1346 int op, char *operand1, char *operand2) 1347 1347 { 1348 1348 struct ftrace_event_field *field; ··· 1564 1564 filter->preds); 1565 1565 } 1566 1566 1567 - static int replace_preds(struct ftrace_event_call *call, 1567 + static int replace_preds(struct trace_event_call *call, 1568 1568 struct event_filter *filter, 1569 1569 struct filter_parse_state *ps, 1570 1570 bool dry_run) ··· 1677 1677 return err; 1678 1678 } 1679 1679 1680 - static inline void event_set_filtered_flag(struct ftrace_event_file *file) 1680 + static inline void event_set_filtered_flag(struct trace_event_file *file) 1681 1681 { 1682 - struct ftrace_event_call *call = file->event_call; 1682 + struct trace_event_call *call = file->event_call; 1683 1683 1684 1684 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1685 1685 call->flags |= TRACE_EVENT_FL_FILTERED; 1686 1686 else 1687 - file->flags |= FTRACE_EVENT_FL_FILTERED; 1687 + file->flags |= EVENT_FILE_FL_FILTERED; 1688 1688 } 1689 1689 1690 - static inline void event_set_filter(struct ftrace_event_file *file, 1690 + static inline void event_set_filter(struct trace_event_file *file, 1691 1691 struct event_filter *filter) 1692 1692 { 1693 - struct ftrace_event_call *call = file->event_call; 1693 + struct trace_event_call *call = file->event_call; 1694 1694 1695 1695 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1696 1696 rcu_assign_pointer(call->filter, filter); ··· 1698 1698 rcu_assign_pointer(file->filter, filter); 1699 1699 } 1700 1700 1701 - static inline void event_clear_filter(struct ftrace_event_file *file) 1701 + static inline void event_clear_filter(struct trace_event_file *file) 1702 1702 { 1703 - struct ftrace_event_call *call = file->event_call; 1703 + struct trace_event_call *call = file->event_call; 1704 1704 1705 1705 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1706 1706 RCU_INIT_POINTER(call->filter, NULL); ··· 1709 1709 } 1710 1710 1711 1711 static inline void 1712 - event_set_no_set_filter_flag(struct ftrace_event_file *file) 1712 + event_set_no_set_filter_flag(struct trace_event_file *file) 1713 1713 { 1714 - struct ftrace_event_call *call = file->event_call; 1714 + struct trace_event_call *call = file->event_call; 1715 1715 1716 1716 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1717 1717 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; 1718 1718 else 1719 - file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER; 1719 + file->flags |= EVENT_FILE_FL_NO_SET_FILTER; 1720 1720 } 1721 1721 1722 1722 static inline void 1723 - event_clear_no_set_filter_flag(struct ftrace_event_file *file) 1723 + event_clear_no_set_filter_flag(struct trace_event_file *file) 1724 1724 { 1725 - struct ftrace_event_call *call = file->event_call; 1725 + struct trace_event_call *call = file->event_call; 1726 1726 1727 1727 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1728 1728 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; 1729 1729 else 1730 - file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER; 1730 + file->flags &= ~EVENT_FILE_FL_NO_SET_FILTER; 1731 1731 } 1732 1732 1733 1733 static inline bool 1734 - event_no_set_filter_flag(struct ftrace_event_file *file) 1734 + event_no_set_filter_flag(struct trace_event_file *file) 1735 1735 { 1736 - struct ftrace_event_call *call = file->event_call; 1736 + struct trace_event_call *call = file->event_call; 1737 1737 1738 - if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) 1738 + if (file->flags & EVENT_FILE_FL_NO_SET_FILTER) 1739 1739 return true; 1740 1740 1741 1741 if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && ··· 1750 1750 struct event_filter *filter; 1751 1751 }; 1752 1752 1753 - static int replace_system_preds(struct ftrace_subsystem_dir *dir, 1753 + static int replace_system_preds(struct trace_subsystem_dir *dir, 1754 1754 struct trace_array *tr, 1755 1755 struct filter_parse_state *ps, 1756 1756 char *filter_string) 1757 1757 { 1758 - struct ftrace_event_file *file; 1758 + struct trace_event_file *file; 1759 1759 struct filter_list *filter_item; 1760 1760 struct filter_list *tmp; 1761 1761 LIST_HEAD(filter_list); ··· 1899 1899 } 1900 1900 1901 1901 /** 1902 - * create_filter - create a filter for a ftrace_event_call 1903 - * @call: ftrace_event_call to create a filter for 1902 + * create_filter - create a filter for a trace_event_call 1903 + * @call: trace_event_call to create a filter for 1904 1904 * @filter_str: filter string 1905 1905 * @set_str: remember @filter_str and enable detailed error in filter 1906 1906 * @filterp: out param for created filter (always updated on return) ··· 1914 1914 * information if @set_str is %true and the caller is responsible for 1915 1915 * freeing it. 1916 1916 */ 1917 - static int create_filter(struct ftrace_event_call *call, 1917 + static int create_filter(struct trace_event_call *call, 1918 1918 char *filter_str, bool set_str, 1919 1919 struct event_filter **filterp) 1920 1920 { ··· 1934 1934 return err; 1935 1935 } 1936 1936 1937 - int create_event_filter(struct ftrace_event_call *call, 1937 + int create_event_filter(struct trace_event_call *call, 1938 1938 char *filter_str, bool set_str, 1939 1939 struct event_filter **filterp) 1940 1940 { ··· 1950 1950 * Identical to create_filter() except that it creates a subsystem filter 1951 1951 * and always remembers @filter_str. 1952 1952 */ 1953 - static int create_system_filter(struct ftrace_subsystem_dir *dir, 1953 + static int create_system_filter(struct trace_subsystem_dir *dir, 1954 1954 struct trace_array *tr, 1955 1955 char *filter_str, struct event_filter **filterp) 1956 1956 { ··· 1976 1976 } 1977 1977 1978 1978 /* caller must hold event_mutex */ 1979 - int apply_event_filter(struct ftrace_event_file *file, char *filter_string) 1979 + int apply_event_filter(struct trace_event_file *file, char *filter_string) 1980 1980 { 1981 - struct ftrace_event_call *call = file->event_call; 1981 + struct trace_event_call *call = file->event_call; 1982 1982 struct event_filter *filter; 1983 1983 int err; 1984 1984 ··· 2027 2027 return err; 2028 2028 } 2029 2029 2030 - int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, 2030 + int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 2031 2031 char *filter_string) 2032 2032 { 2033 2033 struct event_subsystem *system = dir->subsystem; ··· 2226 2226 { 2227 2227 int err; 2228 2228 struct event_filter *filter; 2229 - struct ftrace_event_call *call; 2229 + struct trace_event_call *call; 2230 2230 2231 2231 mutex_lock(&event_mutex); 2232 2232 ··· 2282 2282 2283 2283 static struct test_filter_data_t { 2284 2284 char *filter; 2285 - struct ftrace_raw_ftrace_test_filter rec; 2285 + struct trace_event_raw_ftrace_test_filter rec; 2286 2286 int match; 2287 2287 char *not_visited; 2288 2288 } test_filter_data[] = {
+35 -35
kernel/trace/trace_events_trigger.c
··· 40 40 41 41 /** 42 42 * event_triggers_call - Call triggers associated with a trace event 43 - * @file: The ftrace_event_file associated with the event 43 + * @file: The trace_event_file associated with the event 44 44 * @rec: The trace entry for the event, NULL for unconditional invocation 45 45 * 46 46 * For each trigger associated with an event, invoke the trigger ··· 63 63 * any trigger that should be deferred, ETT_NONE if nothing to defer. 64 64 */ 65 65 enum event_trigger_type 66 - event_triggers_call(struct ftrace_event_file *file, void *rec) 66 + event_triggers_call(struct trace_event_file *file, void *rec) 67 67 { 68 68 struct event_trigger_data *data; 69 69 enum event_trigger_type tt = ETT_NONE; ··· 92 92 93 93 /** 94 94 * event_triggers_post_call - Call 'post_triggers' for a trace event 95 - * @file: The ftrace_event_file associated with the event 95 + * @file: The trace_event_file associated with the event 96 96 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke 97 97 * 98 98 * For each trigger associated with an event, invoke the trigger ··· 103 103 * Called from tracepoint handlers (with rcu_read_lock_sched() held). 104 104 */ 105 105 void 106 - event_triggers_post_call(struct ftrace_event_file *file, 106 + event_triggers_post_call(struct trace_event_file *file, 107 107 enum event_trigger_type tt) 108 108 { 109 109 struct event_trigger_data *data; ··· 119 119 120 120 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos) 121 121 { 122 - struct ftrace_event_file *event_file = event_file_data(m->private); 122 + struct trace_event_file *event_file = event_file_data(m->private); 123 123 124 124 if (t == SHOW_AVAILABLE_TRIGGERS) 125 125 return NULL; ··· 129 129 130 130 static void *trigger_start(struct seq_file *m, loff_t *pos) 131 131 { 132 - struct ftrace_event_file *event_file; 132 + struct trace_event_file *event_file; 133 133 134 134 /* ->stop() is called even if ->start() fails */ 135 135 mutex_lock(&event_mutex); ··· 201 201 return ret; 202 202 } 203 203 204 - static int trigger_process_regex(struct ftrace_event_file *file, char *buff) 204 + static int trigger_process_regex(struct trace_event_file *file, char *buff) 205 205 { 206 206 char *command, *next = buff; 207 207 struct event_command *p; ··· 227 227 const char __user *ubuf, 228 228 size_t cnt, loff_t *ppos) 229 229 { 230 - struct ftrace_event_file *event_file; 230 + struct trace_event_file *event_file; 231 231 ssize_t ret; 232 232 char *buf; 233 233 ··· 430 430 trigger_data_free(data); 431 431 } 432 432 433 - static int trace_event_trigger_enable_disable(struct ftrace_event_file *file, 433 + static int trace_event_trigger_enable_disable(struct trace_event_file *file, 434 434 int trigger_enable) 435 435 { 436 436 int ret = 0; ··· 438 438 if (trigger_enable) { 439 439 if (atomic_inc_return(&file->tm_ref) > 1) 440 440 return ret; 441 - set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags); 441 + set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 442 442 ret = trace_event_enable_disable(file, 1, 1); 443 443 } else { 444 444 if (atomic_dec_return(&file->tm_ref) > 0) 445 445 return ret; 446 - clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags); 446 + clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags); 447 447 ret = trace_event_enable_disable(file, 0, 1); 448 448 } 449 449 ··· 466 466 void 467 467 clear_event_triggers(struct trace_array *tr) 468 468 { 469 - struct ftrace_event_file *file; 469 + struct trace_event_file *file; 470 470 471 471 list_for_each_entry(file, &tr->events, list) { 472 472 struct event_trigger_data *data; ··· 480 480 481 481 /** 482 482 * update_cond_flag - Set or reset the TRIGGER_COND bit 483 - * @file: The ftrace_event_file associated with the event 483 + * @file: The trace_event_file associated with the event 484 484 * 485 485 * If an event has triggers and any of those triggers has a filter or 486 486 * a post_trigger, trigger invocation needs to be deferred until after ··· 488 488 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be 489 489 * cleared. 490 490 */ 491 - static void update_cond_flag(struct ftrace_event_file *file) 491 + static void update_cond_flag(struct trace_event_file *file) 492 492 { 493 493 struct event_trigger_data *data; 494 494 bool set_cond = false; ··· 501 501 } 502 502 503 503 if (set_cond) 504 - set_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); 504 + set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 505 505 else 506 - clear_bit(FTRACE_EVENT_FL_TRIGGER_COND_BIT, &file->flags); 506 + clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags); 507 507 } 508 508 509 509 /** ··· 511 511 * @glob: The raw string used to register the trigger 512 512 * @ops: The trigger ops associated with the trigger 513 513 * @data: Trigger-specific data to associate with the trigger 514 - * @file: The ftrace_event_file associated with the event 514 + * @file: The trace_event_file associated with the event 515 515 * 516 516 * Common implementation for event trigger registration. 517 517 * ··· 522 522 */ 523 523 static int register_trigger(char *glob, struct event_trigger_ops *ops, 524 524 struct event_trigger_data *data, 525 - struct ftrace_event_file *file) 525 + struct trace_event_file *file) 526 526 { 527 527 struct event_trigger_data *test; 528 528 int ret = 0; ··· 557 557 * @glob: The raw string used to register the trigger 558 558 * @ops: The trigger ops associated with the trigger 559 559 * @test: Trigger-specific data used to find the trigger to remove 560 - * @file: The ftrace_event_file associated with the event 560 + * @file: The trace_event_file associated with the event 561 561 * 562 562 * Common implementation for event trigger unregistration. 563 563 * ··· 566 566 */ 567 567 static void unregister_trigger(char *glob, struct event_trigger_ops *ops, 568 568 struct event_trigger_data *test, 569 - struct ftrace_event_file *file) 569 + struct trace_event_file *file) 570 570 { 571 571 struct event_trigger_data *data; 572 572 bool unregistered = false; ··· 588 588 /** 589 589 * event_trigger_callback - Generic event_command @func implementation 590 590 * @cmd_ops: The command ops, used for trigger registration 591 - * @file: The ftrace_event_file associated with the event 591 + * @file: The trace_event_file associated with the event 592 592 * @glob: The raw string used to register the trigger 593 593 * @cmd: The cmd portion of the string used to register the trigger 594 594 * @param: The params portion of the string used to register the trigger ··· 603 603 */ 604 604 static int 605 605 event_trigger_callback(struct event_command *cmd_ops, 606 - struct ftrace_event_file *file, 606 + struct trace_event_file *file, 607 607 char *glob, char *cmd, char *param) 608 608 { 609 609 struct event_trigger_data *trigger_data; ··· 688 688 * set_trigger_filter - Generic event_command @set_filter implementation 689 689 * @filter_str: The filter string for the trigger, NULL to remove filter 690 690 * @trigger_data: Trigger-specific data 691 - * @file: The ftrace_event_file associated with the event 691 + * @file: The trace_event_file associated with the event 692 692 * 693 693 * Common implementation for event command filter parsing and filter 694 694 * instantiation. ··· 702 702 */ 703 703 static int set_trigger_filter(char *filter_str, 704 704 struct event_trigger_data *trigger_data, 705 - struct ftrace_event_file *file) 705 + struct trace_event_file *file) 706 706 { 707 707 struct event_trigger_data *data = trigger_data; 708 708 struct event_filter *filter = NULL, *tmp; ··· 900 900 static int 901 901 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops, 902 902 struct event_trigger_data *data, 903 - struct ftrace_event_file *file) 903 + struct trace_event_file *file) 904 904 { 905 905 int ret = register_trigger(glob, ops, data, file); 906 906 ··· 968 968 * Skip 3: 969 969 * stacktrace_trigger() 970 970 * event_triggers_post_call() 971 - * ftrace_raw_event_xxx() 971 + * trace_event_raw_event_xxx() 972 972 */ 973 973 #define STACK_SKIP 3 974 974 ··· 1053 1053 #define DISABLE_EVENT_STR "disable_event" 1054 1054 1055 1055 struct enable_trigger_data { 1056 - struct ftrace_event_file *file; 1056 + struct trace_event_file *file; 1057 1057 bool enable; 1058 1058 }; 1059 1059 ··· 1063 1063 struct enable_trigger_data *enable_data = data->private_data; 1064 1064 1065 1065 if (enable_data->enable) 1066 - clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1066 + clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1067 1067 else 1068 - set_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1068 + set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags); 1069 1069 } 1070 1070 1071 1071 static void ··· 1077 1077 return; 1078 1078 1079 1079 /* Skip if the event is in a state we want to switch to */ 1080 - if (enable_data->enable == !(enable_data->file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) 1080 + if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED)) 1081 1081 return; 1082 1082 1083 1083 if (data->count != -1) ··· 1095 1095 seq_printf(m, "%s:%s:%s", 1096 1096 enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, 1097 1097 enable_data->file->event_call->class->system, 1098 - ftrace_event_name(enable_data->file->event_call)); 1098 + trace_event_name(enable_data->file->event_call)); 1099 1099 1100 1100 if (data->count == -1) 1101 1101 seq_puts(m, ":unlimited"); ··· 1159 1159 1160 1160 static int 1161 1161 event_enable_trigger_func(struct event_command *cmd_ops, 1162 - struct ftrace_event_file *file, 1162 + struct trace_event_file *file, 1163 1163 char *glob, char *cmd, char *param) 1164 1164 { 1165 - struct ftrace_event_file *event_enable_file; 1165 + struct trace_event_file *event_enable_file; 1166 1166 struct enable_trigger_data *enable_data; 1167 1167 struct event_trigger_data *trigger_data; 1168 1168 struct event_trigger_ops *trigger_ops; ··· 1294 1294 static int event_enable_register_trigger(char *glob, 1295 1295 struct event_trigger_ops *ops, 1296 1296 struct event_trigger_data *data, 1297 - struct ftrace_event_file *file) 1297 + struct trace_event_file *file) 1298 1298 { 1299 1299 struct enable_trigger_data *enable_data = data->private_data; 1300 1300 struct enable_trigger_data *test_enable_data; ··· 1331 1331 static void event_enable_unregister_trigger(char *glob, 1332 1332 struct event_trigger_ops *ops, 1333 1333 struct event_trigger_data *test, 1334 - struct ftrace_event_file *file) 1334 + struct trace_event_file *file) 1335 1335 { 1336 1336 struct enable_trigger_data *test_enable_data = test->private_data; 1337 1337 struct enable_trigger_data *enable_data;
+5 -5
kernel/trace/trace_export.c
··· 125 125 #undef FTRACE_ENTRY 126 126 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 127 127 static int __init \ 128 - ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ 128 + ftrace_define_fields_##name(struct trace_event_call *event_call) \ 129 129 { \ 130 130 struct struct_name field; \ 131 131 int ret; \ ··· 163 163 #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ 164 164 regfn) \ 165 165 \ 166 - struct ftrace_event_class __refdata event_class_ftrace_##call = { \ 166 + struct trace_event_class __refdata event_class_ftrace_##call = { \ 167 167 .system = __stringify(TRACE_SYSTEM), \ 168 168 .define_fields = ftrace_define_fields_##call, \ 169 169 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 170 170 .reg = regfn, \ 171 171 }; \ 172 172 \ 173 - struct ftrace_event_call __used event_##call = { \ 173 + struct trace_event_call __used event_##call = { \ 174 174 .class = &event_class_ftrace_##call, \ 175 175 { \ 176 176 .name = #call, \ ··· 179 179 .print_fmt = print, \ 180 180 .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 181 181 }; \ 182 - struct ftrace_event_call __used \ 182 + struct trace_event_call __used \ 183 183 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; 184 184 185 185 #undef FTRACE_ENTRY ··· 187 187 FTRACE_ENTRY_REG(call, struct_name, etype, \ 188 188 PARAMS(tstruct), PARAMS(print), filter, NULL) 189 189 190 - int ftrace_event_is_function(struct ftrace_event_call *call) 190 + int ftrace_event_is_function(struct trace_event_call *call) 191 191 { 192 192 return call == &event_function; 193 193 }
+4 -4
kernel/trace/trace_functions_graph.c
··· 278 278 unsigned long flags, 279 279 int pc) 280 280 { 281 - struct ftrace_event_call *call = &event_funcgraph_entry; 281 + struct trace_event_call *call = &event_funcgraph_entry; 282 282 struct ring_buffer_event *event; 283 283 struct ring_buffer *buffer = tr->trace_buffer.buffer; 284 284 struct ftrace_graph_ent_entry *entry; ··· 393 393 unsigned long flags, 394 394 int pc) 395 395 { 396 - struct ftrace_event_call *call = &event_funcgraph_exit; 396 + struct trace_event_call *call = &event_funcgraph_exit; 397 397 struct ring_buffer_event *event; 398 398 struct ring_buffer *buffer = tr->trace_buffer.buffer; 399 399 struct ftrace_graph_ret_entry *entry; ··· 1454 1454 { 1455 1455 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1); 1456 1456 1457 - if (!register_ftrace_event(&graph_trace_entry_event)) { 1457 + if (!register_trace_event(&graph_trace_entry_event)) { 1458 1458 pr_warning("Warning: could not register graph trace events\n"); 1459 1459 return 1; 1460 1460 } 1461 1461 1462 - if (!register_ftrace_event(&graph_trace_ret_event)) { 1462 + if (!register_trace_event(&graph_trace_ret_event)) { 1463 1463 pr_warning("Warning: could not register graph trace events\n"); 1464 1464 return 1; 1465 1465 }
+35 -35
kernel/trace/trace_kprobe.c
··· 348 348 struct trace_kprobe *tk; 349 349 350 350 list_for_each_entry(tk, &probe_list, list) 351 - if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && 351 + if (strcmp(trace_event_name(&tk->tp.call), event) == 0 && 352 352 strcmp(tk->tp.call.class->system, group) == 0) 353 353 return tk; 354 354 return NULL; ··· 359 359 * if the file is NULL, enable "perf" handler, or enable "trace" handler. 360 360 */ 361 361 static int 362 - enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) 362 + enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 363 363 { 364 364 int ret = 0; 365 365 ··· 394 394 * if the file is NULL, disable "perf" handler, or disable "trace" handler. 395 395 */ 396 396 static int 397 - disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) 397 + disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 398 398 { 399 399 struct event_file_link *link = NULL; 400 400 int wait = 0; ··· 523 523 mutex_lock(&probe_lock); 524 524 525 525 /* Delete old (same name) event if exist */ 526 - old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), 526 + old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call), 527 527 tk->tp.call.class->system); 528 528 if (old_tk) { 529 529 ret = unregister_trace_kprobe(old_tk); ··· 572 572 if (ret) 573 573 pr_warning("Failed to re-register probe %s on" 574 574 "%s: %d\n", 575 - ftrace_event_name(&tk->tp.call), 575 + trace_event_name(&tk->tp.call), 576 576 mod->name, ret); 577 577 } 578 578 } ··· 829 829 830 830 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); 831 831 seq_printf(m, ":%s/%s", tk->tp.call.class->system, 832 - ftrace_event_name(&tk->tp.call)); 832 + trace_event_name(&tk->tp.call)); 833 833 834 834 if (!tk->symbol) 835 835 seq_printf(m, " 0x%p", tk->rp.kp.addr); ··· 888 888 struct trace_kprobe *tk = v; 889 889 890 890 seq_printf(m, " %-44s %15lu %15lu\n", 891 - ftrace_event_name(&tk->tp.call), tk->nhit, 891 + trace_event_name(&tk->tp.call), tk->nhit, 892 892 tk->rp.kp.nmissed); 893 893 894 894 return 0; ··· 917 917 /* Kprobe handler */ 918 918 static nokprobe_inline void 919 919 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, 920 - struct ftrace_event_file *ftrace_file) 920 + struct trace_event_file *trace_file) 921 921 { 922 922 struct kprobe_trace_entry_head *entry; 923 923 struct ring_buffer_event *event; 924 924 struct ring_buffer *buffer; 925 925 int size, dsize, pc; 926 926 unsigned long irq_flags; 927 - struct ftrace_event_call *call = &tk->tp.call; 927 + struct trace_event_call *call = &tk->tp.call; 928 928 929 - WARN_ON(call != ftrace_file->event_call); 929 + WARN_ON(call != trace_file->event_call); 930 930 931 - if (ftrace_trigger_soft_disabled(ftrace_file)) 931 + if (trace_trigger_soft_disabled(trace_file)) 932 932 return; 933 933 934 934 local_save_flags(irq_flags); ··· 937 937 dsize = __get_data_size(&tk->tp, regs); 938 938 size = sizeof(*entry) + tk->tp.size + dsize; 939 939 940 - event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 940 + event = trace_event_buffer_lock_reserve(&buffer, trace_file, 941 941 call->event.type, 942 942 size, irq_flags, pc); 943 943 if (!event) ··· 947 947 entry->ip = (unsigned long)tk->rp.kp.addr; 948 948 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 949 949 950 - event_trigger_unlock_commit_regs(ftrace_file, buffer, event, 950 + event_trigger_unlock_commit_regs(trace_file, buffer, event, 951 951 entry, irq_flags, pc, regs); 952 952 } 953 953 ··· 965 965 static nokprobe_inline void 966 966 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 967 967 struct pt_regs *regs, 968 - struct ftrace_event_file *ftrace_file) 968 + struct trace_event_file *trace_file) 969 969 { 970 970 struct kretprobe_trace_entry_head *entry; 971 971 struct ring_buffer_event *event; 972 972 struct ring_buffer *buffer; 973 973 int size, pc, dsize; 974 974 unsigned long irq_flags; 975 - struct ftrace_event_call *call = &tk->tp.call; 975 + struct trace_event_call *call = &tk->tp.call; 976 976 977 - WARN_ON(call != ftrace_file->event_call); 977 + WARN_ON(call != trace_file->event_call); 978 978 979 - if (ftrace_trigger_soft_disabled(ftrace_file)) 979 + if (trace_trigger_soft_disabled(trace_file)) 980 980 return; 981 981 982 982 local_save_flags(irq_flags); ··· 985 985 dsize = __get_data_size(&tk->tp, regs); 986 986 size = sizeof(*entry) + tk->tp.size + dsize; 987 987 988 - event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 988 + event = trace_event_buffer_lock_reserve(&buffer, trace_file, 989 989 call->event.type, 990 990 size, irq_flags, pc); 991 991 if (!event) ··· 996 996 entry->ret_ip = (unsigned long)ri->ret_addr; 997 997 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); 998 998 999 - event_trigger_unlock_commit_regs(ftrace_file, buffer, event, 999 + event_trigger_unlock_commit_regs(trace_file, buffer, event, 1000 1000 entry, irq_flags, pc, regs); 1001 1001 } 1002 1002 ··· 1025 1025 field = (struct kprobe_trace_entry_head *)iter->ent; 1026 1026 tp = container_of(event, struct trace_probe, call.event); 1027 1027 1028 - trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); 1028 + trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1029 1029 1030 1030 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 1031 1031 goto out; ··· 1056 1056 field = (struct kretprobe_trace_entry_head *)iter->ent; 1057 1057 tp = container_of(event, struct trace_probe, call.event); 1058 1058 1059 - trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); 1059 + trace_seq_printf(s, "%s: (", trace_event_name(&tp->call)); 1060 1060 1061 1061 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 1062 1062 goto out; ··· 1081 1081 } 1082 1082 1083 1083 1084 - static int kprobe_event_define_fields(struct ftrace_event_call *event_call) 1084 + static int kprobe_event_define_fields(struct trace_event_call *event_call) 1085 1085 { 1086 1086 int ret, i; 1087 1087 struct kprobe_trace_entry_head field; ··· 1104 1104 return 0; 1105 1105 } 1106 1106 1107 - static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) 1107 + static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1108 1108 { 1109 1109 int ret, i; 1110 1110 struct kretprobe_trace_entry_head field; ··· 1134 1134 static void 1135 1135 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1136 1136 { 1137 - struct ftrace_event_call *call = &tk->tp.call; 1137 + struct trace_event_call *call = &tk->tp.call; 1138 1138 struct bpf_prog *prog = call->prog; 1139 1139 struct kprobe_trace_entry_head *entry; 1140 1140 struct hlist_head *head; ··· 1169 1169 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1170 1170 struct pt_regs *regs) 1171 1171 { 1172 - struct ftrace_event_call *call = &tk->tp.call; 1172 + struct trace_event_call *call = &tk->tp.call; 1173 1173 struct bpf_prog *prog = call->prog; 1174 1174 struct kretprobe_trace_entry_head *entry; 1175 1175 struct hlist_head *head; ··· 1206 1206 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1207 1207 * lockless, but we can't race with this __init function. 1208 1208 */ 1209 - static int kprobe_register(struct ftrace_event_call *event, 1209 + static int kprobe_register(struct trace_event_call *event, 1210 1210 enum trace_reg type, void *data) 1211 1211 { 1212 1212 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; 1213 - struct ftrace_event_file *file = data; 1213 + struct trace_event_file *file = data; 1214 1214 1215 1215 switch (type) { 1216 1216 case TRACE_REG_REGISTER: ··· 1276 1276 1277 1277 static int register_kprobe_event(struct trace_kprobe *tk) 1278 1278 { 1279 - struct ftrace_event_call *call = &tk->tp.call; 1279 + struct trace_event_call *call = &tk->tp.call; 1280 1280 int ret; 1281 1281 1282 - /* Initialize ftrace_event_call */ 1282 + /* Initialize trace_event_call */ 1283 1283 INIT_LIST_HEAD(&call->class->fields); 1284 1284 if (trace_kprobe_is_return(tk)) { 1285 1285 call->event.funcs = &kretprobe_funcs; ··· 1290 1290 } 1291 1291 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) 1292 1292 return -ENOMEM; 1293 - ret = register_ftrace_event(&call->event); 1293 + ret = register_trace_event(&call->event); 1294 1294 if (!ret) { 1295 1295 kfree(call->print_fmt); 1296 1296 return -ENODEV; ··· 1301 1301 ret = trace_add_event_call(call); 1302 1302 if (ret) { 1303 1303 pr_info("Failed to register kprobe event: %s\n", 1304 - ftrace_event_name(call)); 1304 + trace_event_name(call)); 1305 1305 kfree(call->print_fmt); 1306 - unregister_ftrace_event(&call->event); 1306 + unregister_trace_event(&call->event); 1307 1307 } 1308 1308 return ret; 1309 1309 } ··· 1364 1364 return a1 + a2 + a3 + a4 + a5 + a6; 1365 1365 } 1366 1366 1367 - static struct ftrace_event_file * 1367 + static struct trace_event_file * 1368 1368 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1369 1369 { 1370 - struct ftrace_event_file *file; 1370 + struct trace_event_file *file; 1371 1371 1372 1372 list_for_each_entry(file, &tr->events, list) 1373 1373 if (file->event_call == &tk->tp.call) ··· 1385 1385 int ret, warn = 0; 1386 1386 int (*target)(int, int, int, int, int, int); 1387 1387 struct trace_kprobe *tk; 1388 - struct ftrace_event_file *file; 1388 + struct trace_event_file *file; 1389 1389 1390 1390 if (tracing_is_disabled()) 1391 1391 return -ENODEV;
+2 -2
kernel/trace/trace_mmiotrace.c
··· 298 298 struct trace_array_cpu *data, 299 299 struct mmiotrace_rw *rw) 300 300 { 301 - struct ftrace_event_call *call = &event_mmiotrace_rw; 301 + struct trace_event_call *call = &event_mmiotrace_rw; 302 302 struct ring_buffer *buffer = tr->trace_buffer.buffer; 303 303 struct ring_buffer_event *event; 304 304 struct trace_mmiotrace_rw *entry; ··· 328 328 struct trace_array_cpu *data, 329 329 struct mmiotrace_map *map) 330 330 { 331 - struct ftrace_event_call *call = &event_mmiotrace_map; 331 + struct trace_event_call *call = &event_mmiotrace_map; 332 332 struct ring_buffer *buffer = tr->trace_buffer.buffer; 333 333 struct ring_buffer_event *event; 334 334 struct trace_mmiotrace_map *entry;
+39 -39
kernel/trace/trace_output.c
··· 60 60 } 61 61 62 62 const char * 63 - ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 64 - unsigned long flags, 65 - const struct trace_print_flags *flag_array) 63 + trace_print_flags_seq(struct trace_seq *p, const char *delim, 64 + unsigned long flags, 65 + const struct trace_print_flags *flag_array) 66 66 { 67 67 unsigned long mask; 68 68 const char *str; ··· 95 95 96 96 return ret; 97 97 } 98 - EXPORT_SYMBOL(ftrace_print_flags_seq); 98 + EXPORT_SYMBOL(trace_print_flags_seq); 99 99 100 100 const char * 101 - ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 102 - const struct trace_print_flags *symbol_array) 101 + trace_print_symbols_seq(struct trace_seq *p, unsigned long val, 102 + const struct trace_print_flags *symbol_array) 103 103 { 104 104 int i; 105 105 const char *ret = trace_seq_buffer_ptr(p); ··· 120 120 121 121 return ret; 122 122 } 123 - EXPORT_SYMBOL(ftrace_print_symbols_seq); 123 + EXPORT_SYMBOL(trace_print_symbols_seq); 124 124 125 125 #if BITS_PER_LONG == 32 126 126 const char * 127 - ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 127 + trace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, 128 128 const struct trace_print_flags_u64 *symbol_array) 129 129 { 130 130 int i; ··· 146 146 147 147 return ret; 148 148 } 149 - EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); 149 + EXPORT_SYMBOL(trace_print_symbols_seq_u64); 150 150 #endif 151 151 152 152 const char * 153 - ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 154 - unsigned int bitmask_size) 153 + trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 154 + unsigned int bitmask_size) 155 155 { 156 156 const char *ret = trace_seq_buffer_ptr(p); 157 157 ··· 160 160 161 161 return ret; 162 162 } 163 - EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq); 163 + EXPORT_SYMBOL_GPL(trace_print_bitmask_seq); 164 164 165 165 const char * 166 - ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 166 + trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) 167 167 { 168 168 int i; 169 169 const char *ret = trace_seq_buffer_ptr(p); ··· 175 175 176 176 return ret; 177 177 } 178 - EXPORT_SYMBOL(ftrace_print_hex_seq); 178 + EXPORT_SYMBOL(trace_print_hex_seq); 179 179 180 180 const char * 181 - ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, 182 - size_t el_size) 181 + trace_print_array_seq(struct trace_seq *p, const void *buf, int count, 182 + size_t el_size) 183 183 { 184 184 const char *ret = trace_seq_buffer_ptr(p); 185 185 const char *prefix = ""; ··· 220 220 221 221 return ret; 222 222 } 223 - EXPORT_SYMBOL(ftrace_print_array_seq); 223 + EXPORT_SYMBOL(trace_print_array_seq); 224 224 225 - int ftrace_raw_output_prep(struct trace_iterator *iter, 226 - struct trace_event *trace_event) 225 + int trace_raw_output_prep(struct trace_iterator *iter, 226 + struct trace_event *trace_event) 227 227 { 228 - struct ftrace_event_call *event; 228 + struct trace_event_call *event; 229 229 struct trace_seq *s = &iter->seq; 230 230 struct trace_seq *p = &iter->tmp_seq; 231 231 struct trace_entry *entry; 232 232 233 - event = container_of(trace_event, struct ftrace_event_call, event); 233 + event = container_of(trace_event, struct trace_event_call, event); 234 234 entry = iter->ent; 235 235 236 236 if (entry->type != event->event.type) { ··· 239 239 } 240 240 241 241 trace_seq_init(p); 242 - trace_seq_printf(s, "%s: ", ftrace_event_name(event)); 242 + trace_seq_printf(s, "%s: ", trace_event_name(event)); 243 243 244 244 return trace_handle_return(s); 245 245 } 246 - EXPORT_SYMBOL(ftrace_raw_output_prep); 246 + EXPORT_SYMBOL(trace_raw_output_prep); 247 247 248 - static int ftrace_output_raw(struct trace_iterator *iter, char *name, 249 - char *fmt, va_list ap) 248 + static int trace_output_raw(struct trace_iterator *iter, char *name, 249 + char *fmt, va_list ap) 250 250 { 251 251 struct trace_seq *s = &iter->seq; 252 252 ··· 256 256 return trace_handle_return(s); 257 257 } 258 258 259 - int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 259 + int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 260 260 { 261 261 va_list ap; 262 262 int ret; 263 263 264 264 va_start(ap, fmt); 265 - ret = ftrace_output_raw(iter, name, fmt, ap); 265 + ret = trace_output_raw(iter, name, fmt, ap); 266 266 va_end(ap); 267 267 268 268 return ret; 269 269 } 270 - EXPORT_SYMBOL_GPL(ftrace_output_call); 270 + EXPORT_SYMBOL_GPL(trace_output_call); 271 271 272 272 #ifdef CONFIG_KRETPROBES 273 273 static inline const char *kretprobed(const char *name) ··· 675 675 } 676 676 677 677 /* Did we used up all 65 thousand events??? */ 678 - if ((last + 1) > FTRACE_MAX_EVENT) 678 + if ((last + 1) > TRACE_EVENT_TYPE_MAX) 679 679 return 0; 680 680 681 681 *list = &e->list; ··· 693 693 } 694 694 695 695 /** 696 - * register_ftrace_event - register output for an event type 696 + * register_trace_event - register output for an event type 697 697 * @event: the event type to register 698 698 * 699 699 * Event types are stored in a hash and this hash is used to ··· 707 707 * 708 708 * Returns the event type number or zero on error. 709 709 */ 710 - int register_ftrace_event(struct trace_event *event) 710 + int register_trace_event(struct trace_event *event) 711 711 { 712 712 unsigned key; 713 713 int ret = 0; ··· 725 725 if (!event->type) { 726 726 struct list_head *list = NULL; 727 727 728 - if (next_event_type > FTRACE_MAX_EVENT) { 728 + if (next_event_type > TRACE_EVENT_TYPE_MAX) { 729 729 730 730 event->type = trace_search_list(&list); 731 731 if (!event->type) ··· 771 771 772 772 return ret; 773 773 } 774 - EXPORT_SYMBOL_GPL(register_ftrace_event); 774 + EXPORT_SYMBOL_GPL(register_trace_event); 775 775 776 776 /* 777 777 * Used by module code with the trace_event_sem held for write. 778 778 */ 779 - int __unregister_ftrace_event(struct trace_event *event) 779 + int __unregister_trace_event(struct trace_event *event) 780 780 { 781 781 hlist_del(&event->node); 782 782 list_del(&event->list); ··· 784 784 } 785 785 786 786 /** 787 - * unregister_ftrace_event - remove a no longer used event 787 + * unregister_trace_event - remove a no longer used event 788 788 * @event: the event to remove 789 789 */ 790 - int unregister_ftrace_event(struct trace_event *event) 790 + int unregister_trace_event(struct trace_event *event) 791 791 { 792 792 down_write(&trace_event_sem); 793 - __unregister_ftrace_event(event); 793 + __unregister_trace_event(event); 794 794 up_write(&trace_event_sem); 795 795 796 796 return 0; 797 797 } 798 - EXPORT_SYMBOL_GPL(unregister_ftrace_event); 798 + EXPORT_SYMBOL_GPL(unregister_trace_event); 799 799 800 800 /* 801 801 * Standard events ··· 1243 1243 for (i = 0; events[i]; i++) { 1244 1244 event = events[i]; 1245 1245 1246 - ret = register_ftrace_event(event); 1246 + ret = register_trace_event(event); 1247 1247 if (!ret) { 1248 1248 printk(KERN_WARNING "event %d failed to register\n", 1249 1249 event->type);
+1 -1
kernel/trace/trace_output.h
··· 32 32 trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); 33 33 34 34 /* used by module unregistering */ 35 - extern int __unregister_ftrace_event(struct trace_event *event); 35 + extern int __unregister_trace_event(struct trace_event *event); 36 36 extern struct rw_semaphore trace_event_sem; 37 37 38 38 #define SEQ_PUT_FIELD(s, x) \
+4 -4
kernel/trace/trace_probe.h
··· 272 272 273 273 struct trace_probe { 274 274 unsigned int flags; /* For TP_FLAG_* */ 275 - struct ftrace_event_class class; 276 - struct ftrace_event_call call; 275 + struct trace_event_class class; 276 + struct trace_event_call call; 277 277 struct list_head files; 278 278 ssize_t size; /* trace entry size */ 279 279 unsigned int nr_args; ··· 281 281 }; 282 282 283 283 struct event_file_link { 284 - struct ftrace_event_file *file; 284 + struct trace_event_file *file; 285 285 struct list_head list; 286 286 }; 287 287 ··· 314 314 } 315 315 316 316 static inline struct event_file_link * 317 - find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) 317 + find_event_file_link(struct trace_probe *tp, struct trace_event_file *file) 318 318 { 319 319 struct event_file_link *link; 320 320
+2 -2
kernel/trace/trace_sched_wakeup.c
··· 369 369 struct task_struct *next, 370 370 unsigned long flags, int pc) 371 371 { 372 - struct ftrace_event_call *call = &event_context_switch; 372 + struct trace_event_call *call = &event_context_switch; 373 373 struct ring_buffer *buffer = tr->trace_buffer.buffer; 374 374 struct ring_buffer_event *event; 375 375 struct ctx_switch_entry *entry; ··· 397 397 struct task_struct *curr, 398 398 unsigned long flags, int pc) 399 399 { 400 - struct ftrace_event_call *call = &event_wakeup; 400 + struct trace_event_call *call = &event_wakeup; 401 401 struct ring_buffer_event *event; 402 402 struct ctx_switch_entry *entry; 403 403 struct ring_buffer *buffer = tr->trace_buffer.buffer;
+36 -36
kernel/trace/trace_syscalls.c
··· 13 13 14 14 static DEFINE_MUTEX(syscall_trace_lock); 15 15 16 - static int syscall_enter_register(struct ftrace_event_call *event, 16 + static int syscall_enter_register(struct trace_event_call *event, 17 17 enum trace_reg type, void *data); 18 - static int syscall_exit_register(struct ftrace_event_call *event, 18 + static int syscall_exit_register(struct trace_event_call *event, 19 19 enum trace_reg type, void *data); 20 20 21 21 static struct list_head * 22 - syscall_get_enter_fields(struct ftrace_event_call *call) 22 + syscall_get_enter_fields(struct trace_event_call *call) 23 23 { 24 24 struct syscall_metadata *entry = call->data; 25 25 ··· 219 219 return pos; 220 220 } 221 221 222 - static int __init set_syscall_print_fmt(struct ftrace_event_call *call) 222 + static int __init set_syscall_print_fmt(struct trace_event_call *call) 223 223 { 224 224 char *print_fmt; 225 225 int len; ··· 244 244 return 0; 245 245 } 246 246 247 - static void __init free_syscall_print_fmt(struct ftrace_event_call *call) 247 + static void __init free_syscall_print_fmt(struct trace_event_call *call) 248 248 { 249 249 struct syscall_metadata *entry = call->data; 250 250 ··· 252 252 kfree(call->print_fmt); 253 253 } 254 254 255 - static int __init syscall_enter_define_fields(struct ftrace_event_call *call) 255 + static int __init syscall_enter_define_fields(struct trace_event_call *call) 256 256 { 257 257 struct syscall_trace_enter trace; 258 258 struct syscall_metadata *meta = call->data; ··· 275 275 return ret; 276 276 } 277 277 278 - static int __init syscall_exit_define_fields(struct ftrace_event_call *call) 278 + static int __init syscall_exit_define_fields(struct trace_event_call *call) 279 279 { 280 280 struct syscall_trace_exit trace; 281 281 int ret; ··· 293 293 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) 294 294 { 295 295 struct trace_array *tr = data; 296 - struct ftrace_event_file *ftrace_file; 296 + struct trace_event_file *trace_file; 297 297 struct syscall_trace_enter *entry; 298 298 struct syscall_metadata *sys_data; 299 299 struct ring_buffer_event *event; ··· 308 308 return; 309 309 310 310 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 311 - ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 312 - if (!ftrace_file) 311 + trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); 312 + if (!trace_file) 313 313 return; 314 314 315 - if (ftrace_trigger_soft_disabled(ftrace_file)) 315 + if (trace_trigger_soft_disabled(trace_file)) 316 316 return; 317 317 318 318 sys_data = syscall_nr_to_meta(syscall_nr); ··· 334 334 entry->nr = syscall_nr; 335 335 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 336 336 337 - event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 337 + event_trigger_unlock_commit(trace_file, buffer, event, entry, 338 338 irq_flags, pc); 339 339 } 340 340 341 341 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) 342 342 { 343 343 struct trace_array *tr = data; 344 - struct ftrace_event_file *ftrace_file; 344 + struct trace_event_file *trace_file; 345 345 struct syscall_trace_exit *entry; 346 346 struct syscall_metadata *sys_data; 347 347 struct ring_buffer_event *event; ··· 355 355 return; 356 356 357 357 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 358 - ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 359 - if (!ftrace_file) 358 + trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); 359 + if (!trace_file) 360 360 return; 361 361 362 - if (ftrace_trigger_soft_disabled(ftrace_file)) 362 + if (trace_trigger_soft_disabled(trace_file)) 363 363 return; 364 364 365 365 sys_data = syscall_nr_to_meta(syscall_nr); ··· 380 380 entry->nr = syscall_nr; 381 381 entry->ret = syscall_get_return_value(current, regs); 382 382 383 - event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 383 + event_trigger_unlock_commit(trace_file, buffer, event, entry, 384 384 irq_flags, pc); 385 385 } 386 386 387 - static int reg_event_syscall_enter(struct ftrace_event_file *file, 388 - struct ftrace_event_call *call) 387 + static int reg_event_syscall_enter(struct trace_event_file *file, 388 + struct trace_event_call *call) 389 389 { 390 390 struct trace_array *tr = file->tr; 391 391 int ret = 0; ··· 405 405 return ret; 406 406 } 407 407 408 - static void unreg_event_syscall_enter(struct ftrace_event_file *file, 409 - struct ftrace_event_call *call) 408 + static void unreg_event_syscall_enter(struct trace_event_file *file, 409 + struct trace_event_call *call) 410 410 { 411 411 struct trace_array *tr = file->tr; 412 412 int num; ··· 422 422 mutex_unlock(&syscall_trace_lock); 423 423 } 424 424 425 - static int reg_event_syscall_exit(struct ftrace_event_file *file, 426 - struct ftrace_event_call *call) 425 + static int reg_event_syscall_exit(struct trace_event_file *file, 426 + struct trace_event_call *call) 427 427 { 428 428 struct trace_array *tr = file->tr; 429 429 int ret = 0; ··· 443 443 return ret; 444 444 } 445 445 446 - static void unreg_event_syscall_exit(struct ftrace_event_file *file, 447 - struct ftrace_event_call *call) 446 + static void unreg_event_syscall_exit(struct trace_event_file *file, 447 + struct trace_event_call *call) 448 448 { 449 449 struct trace_array *tr = file->tr; 450 450 int num; ··· 460 460 mutex_unlock(&syscall_trace_lock); 461 461 } 462 462 463 - static int __init init_syscall_trace(struct ftrace_event_call *call) 463 + static int __init init_syscall_trace(struct trace_event_call *call) 464 464 { 465 465 int id; 466 466 int num; ··· 493 493 .trace = print_syscall_exit, 494 494 }; 495 495 496 - struct ftrace_event_class __refdata event_class_syscall_enter = { 496 + struct trace_event_class __refdata event_class_syscall_enter = { 497 497 .system = "syscalls", 498 498 .reg = syscall_enter_register, 499 499 .define_fields = syscall_enter_define_fields, ··· 501 501 .raw_init = init_syscall_trace, 502 502 }; 503 503 504 - struct ftrace_event_class __refdata event_class_syscall_exit = { 504 + struct trace_event_class __refdata event_class_syscall_exit = { 505 505 .system = "syscalls", 506 506 .reg = syscall_exit_register, 507 507 .define_fields = syscall_exit_define_fields, ··· 584 584 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 585 585 } 586 586 587 - static int perf_sysenter_enable(struct ftrace_event_call *call) 587 + static int perf_sysenter_enable(struct trace_event_call *call) 588 588 { 589 589 int ret = 0; 590 590 int num; ··· 605 605 return ret; 606 606 } 607 607 608 - static void perf_sysenter_disable(struct ftrace_event_call *call) 608 + static void perf_sysenter_disable(struct trace_event_call *call) 609 609 { 610 610 int num; 611 611 ··· 656 656 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 657 657 } 658 658 659 - static int perf_sysexit_enable(struct ftrace_event_call *call) 659 + static int perf_sysexit_enable(struct trace_event_call *call) 660 660 { 661 661 int ret = 0; 662 662 int num; ··· 677 677 return ret; 678 678 } 679 679 680 - static void perf_sysexit_disable(struct ftrace_event_call *call) 680 + static void perf_sysexit_disable(struct trace_event_call *call) 681 681 { 682 682 int num; 683 683 ··· 693 693 694 694 #endif /* CONFIG_PERF_EVENTS */ 695 695 696 - static int syscall_enter_register(struct ftrace_event_call *event, 696 + static int syscall_enter_register(struct trace_event_call *event, 697 697 enum trace_reg type, void *data) 698 698 { 699 - struct ftrace_event_file *file = data; 699 + struct trace_event_file *file = data; 700 700 701 701 switch (type) { 702 702 case TRACE_REG_REGISTER: ··· 721 721 return 0; 722 722 } 723 723 724 - static int syscall_exit_register(struct ftrace_event_call *event, 724 + static int syscall_exit_register(struct trace_event_call *event, 725 725 enum trace_reg type, void *data) 726 726 { 727 - struct ftrace_event_file *file = data; 727 + struct trace_event_file *file = data; 728 728 729 729 switch (type) { 730 730 case TRACE_REG_REGISTER:
+23 -23
kernel/trace/trace_uprobe.c
··· 293 293 struct trace_uprobe *tu; 294 294 295 295 list_for_each_entry(tu, &uprobe_list, list) 296 - if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && 296 + if (strcmp(trace_event_name(&tu->tp.call), event) == 0 && 297 297 strcmp(tu->tp.call.class->system, group) == 0) 298 298 return tu; 299 299 ··· 323 323 mutex_lock(&uprobe_lock); 324 324 325 325 /* register as an event */ 326 - old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), 326 + old_tu = find_probe_event(trace_event_name(&tu->tp.call), 327 327 tu->tp.call.class->system); 328 328 if (old_tu) { 329 329 /* delete old event */ ··· 600 600 int i; 601 601 602 602 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, 603 - ftrace_event_name(&tu->tp.call)); 603 + trace_event_name(&tu->tp.call)); 604 604 seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); 605 605 606 606 for (i = 0; i < tu->tp.nr_args; i++) ··· 651 651 struct trace_uprobe *tu = v; 652 652 653 653 seq_printf(m, " %s %-44s %15lu\n", tu->filename, 654 - ftrace_event_name(&tu->tp.call), tu->nhit); 654 + trace_event_name(&tu->tp.call), tu->nhit); 655 655 return 0; 656 656 } 657 657 ··· 770 770 static void __uprobe_trace_func(struct trace_uprobe *tu, 771 771 unsigned long func, struct pt_regs *regs, 772 772 struct uprobe_cpu_buffer *ucb, int dsize, 773 - struct ftrace_event_file *ftrace_file) 773 + struct trace_event_file *trace_file) 774 774 { 775 775 struct uprobe_trace_entry_head *entry; 776 776 struct ring_buffer_event *event; 777 777 struct ring_buffer *buffer; 778 778 void *data; 779 779 int size, esize; 780 - struct ftrace_event_call *call = &tu->tp.call; 780 + struct trace_event_call *call = &tu->tp.call; 781 781 782 - WARN_ON(call != ftrace_file->event_call); 782 + WARN_ON(call != trace_file->event_call); 783 783 784 784 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) 785 785 return; 786 786 787 - if (ftrace_trigger_soft_disabled(ftrace_file)) 787 + if (trace_trigger_soft_disabled(trace_file)) 788 788 return; 789 789 790 790 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); 791 791 size = esize + tu->tp.size + dsize; 792 - event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 792 + event = trace_event_buffer_lock_reserve(&buffer, trace_file, 793 793 call->event.type, size, 0, 0); 794 794 if (!event) 795 795 return; ··· 806 806 807 807 memcpy(data, ucb->buf, tu->tp.size + dsize); 808 808 809 - event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); 809 + event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0); 810 810 } 811 811 812 812 /* uprobe handler */ ··· 853 853 854 854 if (is_ret_probe(tu)) { 855 855 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", 856 - ftrace_event_name(&tu->tp.call), 856 + trace_event_name(&tu->tp.call), 857 857 entry->vaddr[1], entry->vaddr[0]); 858 858 data = DATAOF_TRACE_ENTRY(entry, true); 859 859 } else { 860 860 trace_seq_printf(s, "%s: (0x%lx)", 861 - ftrace_event_name(&tu->tp.call), 861 + trace_event_name(&tu->tp.call), 862 862 entry->vaddr[0]); 863 863 data = DATAOF_TRACE_ENTRY(entry, false); 864 864 } ··· 881 881 struct mm_struct *mm); 882 882 883 883 static int 884 - probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, 884 + probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, 885 885 filter_func_t filter) 886 886 { 887 887 bool enabled = trace_probe_is_enabled(&tu->tp); ··· 938 938 } 939 939 940 940 static void 941 - probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) 941 + probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) 942 942 { 943 943 if (!trace_probe_is_enabled(&tu->tp)) 944 944 return; ··· 967 967 uprobe_buffer_disable(); 968 968 } 969 969 970 - static int uprobe_event_define_fields(struct ftrace_event_call *event_call) 970 + static int uprobe_event_define_fields(struct trace_event_call *event_call) 971 971 { 972 972 int ret, i, size; 973 973 struct uprobe_trace_entry_head field; ··· 1093 1093 unsigned long func, struct pt_regs *regs, 1094 1094 struct uprobe_cpu_buffer *ucb, int dsize) 1095 1095 { 1096 - struct ftrace_event_call *call = &tu->tp.call; 1096 + struct trace_event_call *call = &tu->tp.call; 1097 1097 struct uprobe_trace_entry_head *entry; 1098 1098 struct hlist_head *head; 1099 1099 void *data; ··· 1159 1159 #endif /* CONFIG_PERF_EVENTS */ 1160 1160 1161 1161 static int 1162 - trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, 1162 + trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, 1163 1163 void *data) 1164 1164 { 1165 1165 struct trace_uprobe *tu = event->data; 1166 - struct ftrace_event_file *file = data; 1166 + struct trace_event_file *file = data; 1167 1167 1168 1168 switch (type) { 1169 1169 case TRACE_REG_REGISTER: ··· 1272 1272 1273 1273 static int register_uprobe_event(struct trace_uprobe *tu) 1274 1274 { 1275 - struct ftrace_event_call *call = &tu->tp.call; 1275 + struct trace_event_call *call = &tu->tp.call; 1276 1276 int ret; 1277 1277 1278 - /* Initialize ftrace_event_call */ 1278 + /* Initialize trace_event_call */ 1279 1279 INIT_LIST_HEAD(&call->class->fields); 1280 1280 call->event.funcs = &uprobe_funcs; 1281 1281 call->class->define_fields = uprobe_event_define_fields; ··· 1283 1283 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) 1284 1284 return -ENOMEM; 1285 1285 1286 - ret = register_ftrace_event(&call->event); 1286 + ret = register_trace_event(&call->event); 1287 1287 if (!ret) { 1288 1288 kfree(call->print_fmt); 1289 1289 return -ENODEV; ··· 1295 1295 1296 1296 if (ret) { 1297 1297 pr_info("Failed to register uprobe event: %s\n", 1298 - ftrace_event_name(call)); 1298 + trace_event_name(call)); 1299 1299 kfree(call->print_fmt); 1300 - unregister_ftrace_event(&call->event); 1300 + unregister_trace_event(&call->event); 1301 1301 } 1302 1302 1303 1303 return ret;
+1 -1
mm/debug.c
··· 7 7 8 8 #include <linux/kernel.h> 9 9 #include <linux/mm.h> 10 - #include <linux/ftrace_event.h> 10 + #include <linux/trace_events.h> 11 11 #include <linux/memcontrol.h> 12 12 13 13 static const struct trace_print_flags pageflag_names[] = {
+2 -2
tools/perf/util/scripting-engines/trace-event-perl.c
··· 55 55 56 56 INTERP my_perl; 57 57 58 - #define FTRACE_MAX_EVENT \ 58 + #define TRACE_EVENT_TYPE_MAX \ 59 59 ((1 << (sizeof(unsigned short) * 8)) - 1) 60 60 61 - static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); 61 + static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); 62 62 63 63 extern struct scripting_context *scripting_context; 64 64
+2 -2
tools/perf/util/scripting-engines/trace-event-python.c
··· 44 44 45 45 PyMODINIT_FUNC initperf_trace_context(void); 46 46 47 - #define FTRACE_MAX_EVENT \ 47 + #define TRACE_EVENT_TYPE_MAX \ 48 48 ((1 << (sizeof(unsigned short) * 8)) - 1) 49 49 50 - static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT); 50 + static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX); 51 51 52 52 #define MAX_FIELDS 64 53 53 #define N_COMMON_FIELDS 7