Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing: Rename ftrace_event_{call,class} to trace_event_{call,class}

The name "ftrace" really refers to the function hook infrastructure. It
is not about the trace_events. The structures ftrace_event_call and
ftrace_event_class have nothing to do with the function hooks, and are
really trace_event structures. Rename ftrace_event_* to trace_event_*.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

+172 -172
+1 -1
include/linux/module.h
··· 336 336 const char **trace_bprintk_fmt_start; 337 337 #endif 338 338 #ifdef CONFIG_EVENT_TRACING 339 - struct ftrace_event_call **trace_events; 339 + struct trace_event_call **trace_events; 340 340 unsigned int num_trace_events; 341 341 struct trace_enum_map **trace_enums; 342 342 unsigned int num_trace_enums;
+1 -1
include/linux/perf_event.h
··· 483 483 void *overflow_handler_context; 484 484 485 485 #ifdef CONFIG_EVENT_TRACING 486 - struct ftrace_event_call *tp_event; 486 + struct trace_event_call *tp_event; 487 487 struct event_filter *filter; 488 488 #ifdef CONFIG_FUNCTION_TRACER 489 489 struct ftrace_ops ftrace_ops;
+6 -6
include/linux/syscalls.h
··· 111 111 #define __SC_STR_ADECL(t, a) #a 112 112 #define __SC_STR_TDECL(t, a) #t 113 113 114 - extern struct ftrace_event_class event_class_syscall_enter; 115 - extern struct ftrace_event_class event_class_syscall_exit; 114 + extern struct trace_event_class event_class_syscall_enter; 115 + extern struct trace_event_class event_class_syscall_exit; 116 116 extern struct trace_event_functions enter_syscall_print_funcs; 117 117 extern struct trace_event_functions exit_syscall_print_funcs; 118 118 119 119 #define SYSCALL_TRACE_ENTER_EVENT(sname) \ 120 120 static struct syscall_metadata __syscall_meta_##sname; \ 121 - static struct ftrace_event_call __used \ 121 + static struct trace_event_call __used \ 122 122 event_enter_##sname = { \ 123 123 .class = &event_class_syscall_enter, \ 124 124 { \ ··· 128 128 .data = (void *)&__syscall_meta_##sname,\ 129 129 .flags = TRACE_EVENT_FL_CAP_ANY, \ 130 130 }; \ 131 - static struct ftrace_event_call __used \ 131 + static struct trace_event_call __used \ 132 132 __attribute__((section("_ftrace_events"))) \ 133 133 *__event_enter_##sname = &event_enter_##sname; 134 134 135 135 #define SYSCALL_TRACE_EXIT_EVENT(sname) \ 136 136 static struct syscall_metadata __syscall_meta_##sname; \ 137 - static struct ftrace_event_call __used \ 137 + static struct trace_event_call __used \ 138 138 event_exit_##sname = { \ 139 139 .class = &event_class_syscall_exit, \ 140 140 { \ ··· 144 144 .data = (void *)&__syscall_meta_##sname,\ 145 145 .flags = TRACE_EVENT_FL_CAP_ANY, \ 146 146 }; \ 147 - static struct ftrace_event_call __used \ 147 + static struct trace_event_call __used \ 148 148 __attribute__((section("_ftrace_events"))) \ 149 149 *__event_exit_##sname = &event_exit_##sname; 150 150
+19 -19
include/linux/trace_events.h
··· 200 200 #endif 201 201 }; 202 202 203 - struct ftrace_event_call; 203 + struct trace_event_call; 204 204 205 - struct ftrace_event_class { 205 + struct trace_event_class { 206 206 const char *system; 207 207 void *probe; 208 208 #ifdef CONFIG_PERF_EVENTS 209 209 void *perf_probe; 210 210 #endif 211 - int (*reg)(struct ftrace_event_call *event, 211 + int (*reg)(struct trace_event_call *event, 212 212 enum trace_reg type, void *data); 213 - int (*define_fields)(struct ftrace_event_call *); 214 - struct list_head *(*get_fields)(struct ftrace_event_call *); 213 + int (*define_fields)(struct trace_event_call *); 214 + struct list_head *(*get_fields)(struct trace_event_call *); 215 215 struct list_head fields; 216 - int (*raw_init)(struct ftrace_event_call *); 216 + int (*raw_init)(struct trace_event_call *); 217 217 }; 218 218 219 - extern int trace_event_reg(struct ftrace_event_call *event, 219 + extern int trace_event_reg(struct trace_event_call *event, 220 220 enum trace_reg type, void *data); 221 221 222 222 struct ftrace_event_buffer { ··· 269 269 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 270 270 }; 271 271 272 - struct ftrace_event_call { 272 + struct trace_event_call { 273 273 struct list_head list; 274 - struct ftrace_event_class *class; 274 + struct trace_event_class *class; 275 275 union { 276 276 char *name; 277 277 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ ··· 298 298 struct hlist_head __percpu *perf_events; 299 299 struct bpf_prog *prog; 300 300 301 - int (*perf_perm)(struct ftrace_event_call *, 301 + int (*perf_perm)(struct trace_event_call *, 302 302 struct perf_event *); 303 303 #endif 304 304 }; 305 305 306 306 static inline const char * 307 - ftrace_event_name(struct ftrace_event_call *call) 307 + ftrace_event_name(struct trace_event_call *call) 308 308 { 309 309 if (call->flags & TRACE_EVENT_FL_TRACEPOINT) 310 310 return call->tp ? call->tp->name : NULL; ··· 351 351 352 352 struct trace_event_file { 353 353 struct list_head list; 354 - struct ftrace_event_call *event_call; 354 + struct trace_event_call *event_call; 355 355 struct event_filter *filter; 356 356 struct dentry *dir; 357 357 struct trace_array *tr; ··· 388 388 early_initcall(trace_init_flags_##name); 389 389 390 390 #define __TRACE_EVENT_PERF_PERM(name, expr...) \ 391 - static int perf_perm_##name(struct ftrace_event_call *tp_event, \ 391 + static int perf_perm_##name(struct trace_event_call *tp_event, \ 392 392 struct perf_event *p_event) \ 393 393 { \ 394 394 return ({ expr; }); \ ··· 417 417 extern int filter_check_discard(struct trace_event_file *file, void *rec, 418 418 struct ring_buffer *buffer, 419 419 struct ring_buffer_event *event); 420 - extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 420 + extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 421 421 struct ring_buffer *buffer, 422 422 struct ring_buffer_event *event); 423 423 extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, ··· 559 559 FILTER_TRACE_FN, 560 560 }; 561 561 562 - extern int trace_event_raw_init(struct ftrace_event_call *call); 563 - extern int trace_define_field(struct ftrace_event_call *call, const char *type, 562 + extern int trace_event_raw_init(struct trace_event_call *call); 563 + extern int trace_define_field(struct trace_event_call *call, const char *type, 564 564 const char *name, int offset, int size, 565 565 int is_signed, int filter_type); 566 - extern int trace_add_event_call(struct ftrace_event_call *call); 567 - extern int trace_remove_event_call(struct ftrace_event_call *call); 566 + extern int trace_add_event_call(struct trace_event_call *call); 567 + extern int trace_remove_event_call(struct trace_event_call *call); 568 568 569 569 #define is_signed_type(type) (((type)(-1)) < (type)1) 570 570 ··· 613 613 } 614 614 #endif 615 615 616 - #endif /* _LINUX_FTRACE_EVENT_H */ 616 + #endif /* _LINUX_TRACE_EVENT_H */
+11 -11
include/trace/perf.h
··· 5 5 * 6 6 * For those macros defined with TRACE_EVENT: 7 7 * 8 - * static struct ftrace_event_call event_<call>; 8 + * static struct trace_event_call event_<call>; 9 9 * 10 10 * static void ftrace_raw_event_<call>(void *__data, proto) 11 11 * { 12 12 * struct trace_event_file *trace_file = __data; 13 - * struct ftrace_event_call *event_call = trace_file->event_call; 13 + * struct trace_event_call *event_call = trace_file->event_call; 14 14 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 15 15 * unsigned long eflags = trace_file->flags; 16 16 * enum event_trigger_type __tt = ETT_NONE; ··· 63 63 * 64 64 * static char print_fmt_<call>[] = <TP_printk>; 65 65 * 66 - * static struct ftrace_event_class __used event_class_<template> = { 66 + * static struct trace_event_class __used event_class_<template> = { 67 67 * .system = "<system>", 68 68 * .define_fields = ftrace_define_fields_<call>, 69 69 * .fields = LIST_HEAD_INIT(event_class_##call.fields), ··· 72 72 * .reg = trace_event_reg, 73 73 * }; 74 74 * 75 - * static struct ftrace_event_call event_<call> = { 75 + * static struct trace_event_call event_<call> = { 76 76 * .class = event_class_<template>, 77 77 * { 78 78 * .tp = &__tracepoint_<call>, ··· 83 83 * }; 84 84 * // its only safe to use pointers when doing linker tricks to 85 85 * // create an array. 86 - * static struct ftrace_event_call __used 86 + * static struct trace_event_call __used 87 87 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 88 88 * 89 89 */ ··· 213 213 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 214 214 _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 215 215 static char print_fmt_##call[] = print; \ 216 - static struct ftrace_event_class __used __refdata event_class_##call = { \ 216 + static struct trace_event_class __used __refdata event_class_##call = { \ 217 217 .system = TRACE_SYSTEM_STRING, \ 218 218 .define_fields = ftrace_define_fields_##call, \ 219 219 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ ··· 226 226 #undef DEFINE_EVENT 227 227 #define DEFINE_EVENT(template, call, proto, args) \ 228 228 \ 229 - static struct ftrace_event_call __used event_##call = { \ 229 + static struct trace_event_call __used event_##call = { \ 230 230 .class = &event_class_##template, \ 231 231 { \ 232 232 .tp = &__tracepoint_##call, \ ··· 235 235 .print_fmt = print_fmt_##template, \ 236 236 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 237 237 }; \ 238 - static struct ftrace_event_call __used \ 238 + static struct trace_event_call __used \ 239 239 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 240 240 241 241 #undef DEFINE_EVENT_PRINT ··· 243 243 \ 244 244 static char print_fmt_##call[] = print; \ 245 245 \ 246 - static struct ftrace_event_call __used event_##call = { \ 246 + static struct trace_event_call __used event_##call = { \ 247 247 .class = &event_class_##template, \ 248 248 { \ 249 249 .tp = &__tracepoint_##call, \ ··· 252 252 .print_fmt = print_fmt_##call, \ 253 253 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 254 254 }; \ 255 - static struct ftrace_event_call __used \ 255 + static struct trace_event_call __used \ 256 256 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 257 257 258 258 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) ··· 292 292 static notrace void \ 293 293 perf_trace_##call(void *__data, proto) \ 294 294 { \ 295 - struct ftrace_event_call *event_call = __data; \ 295 + struct trace_event_call *event_call = __data; \ 296 296 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 297 297 struct ftrace_raw_##call *entry; \ 298 298 struct pt_regs *__regs; \
+2 -2
include/trace/syscall.h
··· 29 29 const char **args; 30 30 struct list_head enter_fields; 31 31 32 - struct ftrace_event_call *enter_event; 33 - struct ftrace_event_call *exit_event; 32 + struct trace_event_call *enter_event; 33 + struct trace_event_call *exit_event; 34 34 }; 35 35 36 36 #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+3 -3
include/trace/trace_events.h
··· 101 101 char __data[0]; \ 102 102 }; \ 103 103 \ 104 - static struct ftrace_event_class event_class_##name; 104 + static struct trace_event_class event_class_##name; 105 105 106 106 #undef DEFINE_EVENT 107 107 #define DEFINE_EVENT(template, name, proto, args) \ 108 - static struct ftrace_event_call __used \ 108 + static struct trace_event_call __used \ 109 109 __attribute__((__aligned__(4))) event_##name 110 110 111 111 #undef DEFINE_EVENT_FN ··· 407 407 #undef DECLARE_EVENT_CLASS 408 408 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 409 409 static int notrace __init \ 410 - ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 410 + ftrace_define_fields_##call(struct trace_event_call *event_call) \ 411 411 { \ 412 412 struct ftrace_raw_##call field; \ 413 413 int ret; \
+6 -6
kernel/trace/trace.c
··· 311 311 } 312 312 EXPORT_SYMBOL_GPL(filter_check_discard); 313 313 314 - int call_filter_check_discard(struct ftrace_event_call *call, void *rec, 314 + int call_filter_check_discard(struct trace_event_call *call, void *rec, 315 315 struct ring_buffer *buffer, 316 316 struct ring_buffer_event *event) 317 317 { ··· 1761 1761 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1762 1762 int pc) 1763 1763 { 1764 - struct ftrace_event_call *call = &event_function; 1764 + struct trace_event_call *call = &event_function; 1765 1765 struct ring_buffer *buffer = tr->trace_buffer.buffer; 1766 1766 struct ring_buffer_event *event; 1767 1767 struct ftrace_entry *entry; ··· 1796 1796 unsigned long flags, 1797 1797 int skip, int pc, struct pt_regs *regs) 1798 1798 { 1799 - struct ftrace_event_call *call = &event_kernel_stack; 1799 + struct trace_event_call *call = &event_kernel_stack; 1800 1800 struct ring_buffer_event *event; 1801 1801 struct stack_entry *entry; 1802 1802 struct stack_trace trace; ··· 1924 1924 void 1925 1925 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1926 1926 { 1927 - struct ftrace_event_call *call = &event_user_stack; 1927 + struct trace_event_call *call = &event_user_stack; 1928 1928 struct ring_buffer_event *event; 1929 1929 struct userstack_entry *entry; 1930 1930 struct stack_trace trace; ··· 2130 2130 */ 2131 2131 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 2132 2132 { 2133 - struct ftrace_event_call *call = &event_bprint; 2133 + struct trace_event_call *call = &event_bprint; 2134 2134 struct ring_buffer_event *event; 2135 2135 struct ring_buffer *buffer; 2136 2136 struct trace_array *tr = &global_trace; ··· 2188 2188 __trace_array_vprintk(struct ring_buffer *buffer, 2189 2189 unsigned long ip, const char *fmt, va_list args) 2190 2190 { 2191 - struct ftrace_event_call *call = &event_print; 2191 + struct trace_event_call *call = &event_print; 2192 2192 struct ring_buffer_event *event; 2193 2193 int len = 0, size, pc; 2194 2194 struct print_entry *entry;
+5 -5
kernel/trace/trace.h
··· 858 858 #define ftrace_destroy_filter_files(ops) do { } while (0) 859 859 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 860 860 861 - int ftrace_event_is_function(struct ftrace_event_call *call); 861 + int ftrace_event_is_function(struct trace_event_call *call); 862 862 863 863 /* 864 864 * struct trace_parser - servers for reading the user input separated by spaces ··· 1061 1061 extern void print_subsystem_event_filter(struct event_subsystem *system, 1062 1062 struct trace_seq *s); 1063 1063 extern int filter_assign_type(const char *type); 1064 - extern int create_event_filter(struct ftrace_event_call *call, 1064 + extern int create_event_filter(struct trace_event_call *call, 1065 1065 char *filter_str, bool set_str, 1066 1066 struct event_filter **filterp); 1067 1067 extern void free_event_filter(struct event_filter *filter); 1068 1068 1069 1069 struct ftrace_event_field * 1070 - trace_find_event_field(struct ftrace_event_call *call, char *name); 1070 + trace_find_event_field(struct trace_event_call *call, char *name); 1071 1071 1072 1072 extern void trace_event_enable_cmd_record(bool enable); 1073 1073 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); ··· 1286 1286 1287 1287 #undef FTRACE_ENTRY 1288 1288 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1289 - extern struct ftrace_event_call \ 1289 + extern struct trace_event_call \ 1290 1290 __aligned(4) event_##call; 1291 1291 #undef FTRACE_ENTRY_DUP 1292 1292 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ ··· 1295 1295 #include "trace_entries.h" 1296 1296 1297 1297 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1298 - int perf_ftrace_event_register(struct ftrace_event_call *call, 1298 + int perf_ftrace_event_register(struct trace_event_call *call, 1299 1299 enum trace_reg type, void *data); 1300 1300 #else 1301 1301 #define perf_ftrace_event_register NULL
+1 -1
kernel/trace/trace_branch.c
··· 29 29 static void 30 30 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) 31 31 { 32 - struct ftrace_event_call *call = &event_branch; 32 + struct trace_event_call *call = &event_branch; 33 33 struct trace_array *tr = branch_tracer; 34 34 struct trace_array_cpu *data; 35 35 struct ring_buffer_event *event;
+10 -10
kernel/trace/trace_event_perf.c
··· 21 21 /* Count the events in use (per event id, not per instance) */ 22 22 static int total_ref_count; 23 23 24 - static int perf_trace_event_perm(struct ftrace_event_call *tp_event, 24 + static int perf_trace_event_perm(struct trace_event_call *tp_event, 25 25 struct perf_event *p_event) 26 26 { 27 27 if (tp_event->perf_perm) { ··· 83 83 return 0; 84 84 } 85 85 86 - static int perf_trace_event_reg(struct ftrace_event_call *tp_event, 86 + static int perf_trace_event_reg(struct trace_event_call *tp_event, 87 87 struct perf_event *p_event) 88 88 { 89 89 struct hlist_head __percpu *list; ··· 143 143 144 144 static void perf_trace_event_unreg(struct perf_event *p_event) 145 145 { 146 - struct ftrace_event_call *tp_event = p_event->tp_event; 146 + struct trace_event_call *tp_event = p_event->tp_event; 147 147 int i; 148 148 149 149 if (--tp_event->perf_refcount > 0) ··· 172 172 173 173 static int perf_trace_event_open(struct perf_event *p_event) 174 174 { 175 - struct ftrace_event_call *tp_event = p_event->tp_event; 175 + struct trace_event_call *tp_event = p_event->tp_event; 176 176 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event); 177 177 } 178 178 179 179 static void perf_trace_event_close(struct perf_event *p_event) 180 180 { 181 - struct ftrace_event_call *tp_event = p_event->tp_event; 181 + struct trace_event_call *tp_event = p_event->tp_event; 182 182 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event); 183 183 } 184 184 185 - static int perf_trace_event_init(struct ftrace_event_call *tp_event, 185 + static int perf_trace_event_init(struct trace_event_call *tp_event, 186 186 struct perf_event *p_event) 187 187 { 188 188 int ret; ··· 206 206 207 207 int perf_trace_init(struct perf_event *p_event) 208 208 { 209 - struct ftrace_event_call *tp_event; 209 + struct trace_event_call *tp_event; 210 210 u64 event_id = p_event->attr.config; 211 211 int ret = -EINVAL; 212 212 ··· 236 236 237 237 int perf_trace_add(struct perf_event *p_event, int flags) 238 238 { 239 - struct ftrace_event_call *tp_event = p_event->tp_event; 239 + struct trace_event_call *tp_event = p_event->tp_event; 240 240 struct hlist_head __percpu *pcpu_list; 241 241 struct hlist_head *list; 242 242 ··· 255 255 256 256 void perf_trace_del(struct perf_event *p_event, int flags) 257 257 { 258 - struct ftrace_event_call *tp_event = p_event->tp_event; 258 + struct trace_event_call *tp_event = p_event->tp_event; 259 259 hlist_del_rcu(&p_event->hlist_entry); 260 260 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event); 261 261 } ··· 357 357 ftrace_function_local_disable(&event->ftrace_ops); 358 358 } 359 359 360 - int perf_ftrace_event_register(struct ftrace_event_call *call, 360 + int perf_ftrace_event_register(struct trace_event_call *call, 361 361 enum trace_reg type, void *data) 362 362 { 363 363 switch (type) {
+40 -40
kernel/trace/trace_events.c
··· 68 68 } 69 69 70 70 static struct list_head * 71 - trace_get_fields(struct ftrace_event_call *event_call) 71 + trace_get_fields(struct trace_event_call *event_call) 72 72 { 73 73 if (!event_call->class->get_fields) 74 74 return &event_call->class->fields; ··· 89 89 } 90 90 91 91 struct ftrace_event_field * 92 - trace_find_event_field(struct ftrace_event_call *call, char *name) 92 + trace_find_event_field(struct trace_event_call *call, char *name) 93 93 { 94 94 struct ftrace_event_field *field; 95 95 struct list_head *head; ··· 129 129 return 0; 130 130 } 131 131 132 - int trace_define_field(struct ftrace_event_call *call, const char *type, 132 + int trace_define_field(struct trace_event_call *call, const char *type, 133 133 const char *name, int offset, int size, int is_signed, 134 134 int filter_type) 135 135 { ··· 166 166 return ret; 167 167 } 168 168 169 - static void trace_destroy_fields(struct ftrace_event_call *call) 169 + static void trace_destroy_fields(struct trace_event_call *call) 170 170 { 171 171 struct ftrace_event_field *field, *next; 172 172 struct list_head *head; ··· 178 178 } 179 179 } 180 180 181 - int trace_event_raw_init(struct ftrace_event_call *call) 181 + int trace_event_raw_init(struct trace_event_call *call) 182 182 { 183 183 int id; 184 184 ··· 194 194 struct trace_event_file *trace_file, 195 195 unsigned long len) 196 196 { 197 - struct ftrace_event_call *event_call = trace_file->event_call; 197 + struct trace_event_call *event_call = trace_file->event_call; 198 198 199 199 local_save_flags(fbuffer->flags); 200 200 fbuffer->pc = preempt_count(); ··· 216 216 217 217 static void output_printk(struct ftrace_event_buffer *fbuffer) 218 218 { 219 - struct ftrace_event_call *event_call; 219 + struct trace_event_call *event_call; 220 220 struct trace_event *event; 221 221 unsigned long flags; 222 222 struct trace_iterator *iter = tracepoint_print_iter; ··· 252 252 } 253 253 EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); 254 254 255 - int trace_event_reg(struct ftrace_event_call *call, 255 + int trace_event_reg(struct trace_event_call *call, 256 256 enum trace_reg type, void *data) 257 257 { 258 258 struct trace_event_file *file = data; ··· 315 315 static int __ftrace_event_enable_disable(struct trace_event_file *file, 316 316 int enable, int soft_disable) 317 317 { 318 - struct ftrace_event_call *call = file->event_call; 318 + struct trace_event_call *call = file->event_call; 319 319 int ret = 0; 320 320 int disable; 321 321 ··· 516 516 const char *sub, const char *event, int set) 517 517 { 518 518 struct trace_event_file *file; 519 - struct ftrace_event_call *call; 519 + struct trace_event_call *call; 520 520 const char *name; 521 521 int ret = -EINVAL; 522 522 ··· 672 672 t_next(struct seq_file *m, void *v, loff_t *pos) 673 673 { 674 674 struct trace_event_file *file = v; 675 - struct ftrace_event_call *call; 675 + struct trace_event_call *call; 676 676 struct trace_array *tr = m->private; 677 677 678 678 (*pos)++; ··· 743 743 static int t_show(struct seq_file *m, void *v) 744 744 { 745 745 struct trace_event_file *file = v; 746 - struct ftrace_event_call *call = file->event_call; 746 + struct trace_event_call *call = file->event_call; 747 747 748 748 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) 749 749 seq_printf(m, "%s:", call->class->system); ··· 830 830 const char set_to_char[4] = { '?', '0', '1', 'X' }; 831 831 struct ftrace_subsystem_dir *dir = filp->private_data; 832 832 struct event_subsystem *system = dir->subsystem; 833 - struct ftrace_event_call *call; 833 + struct trace_event_call *call; 834 834 struct trace_event_file *file; 835 835 struct trace_array *tr = dir->tr; 836 836 char buf[2]; ··· 917 917 918 918 static void *f_next(struct seq_file *m, void *v, loff_t *pos) 919 919 { 920 - struct ftrace_event_call *call = event_file_data(m->private); 920 + struct trace_event_call *call = event_file_data(m->private); 921 921 struct list_head *common_head = &ftrace_common_fields; 922 922 struct list_head *head = trace_get_fields(call); 923 923 struct list_head *node = v; ··· 949 949 950 950 static int f_show(struct seq_file *m, void *v) 951 951 { 952 - struct ftrace_event_call *call = event_file_data(m->private); 952 + struct trace_event_call *call = event_file_data(m->private); 953 953 struct ftrace_event_field *field; 954 954 const char *array_descriptor; 955 955 ··· 1573 1573 static int 1574 1574 event_create_dir(struct dentry *parent, struct trace_event_file *file) 1575 1575 { 1576 - struct ftrace_event_call *call = file->event_call; 1576 + struct trace_event_call *call = file->event_call; 1577 1577 struct trace_array *tr = file->tr; 1578 1578 struct list_head *head; 1579 1579 struct dentry *d_events; ··· 1634 1634 return 0; 1635 1635 } 1636 1636 1637 - static void remove_event_from_tracers(struct ftrace_event_call *call) 1637 + static void remove_event_from_tracers(struct trace_event_call *call) 1638 1638 { 1639 1639 struct trace_event_file *file; 1640 1640 struct trace_array *tr; ··· 1654 1654 } while_for_each_event_file(); 1655 1655 } 1656 1656 1657 - static void event_remove(struct ftrace_event_call *call) 1657 + static void event_remove(struct trace_event_call *call) 1658 1658 { 1659 1659 struct trace_array *tr; 1660 1660 struct trace_event_file *file; ··· 1678 1678 list_del(&call->list); 1679 1679 } 1680 1680 1681 - static int event_init(struct ftrace_event_call *call) 1681 + static int event_init(struct trace_event_call *call) 1682 1682 { 1683 1683 int ret = 0; 1684 1684 const char *name; ··· 1697 1697 } 1698 1698 1699 1699 static int 1700 - __register_event(struct ftrace_event_call *call, struct module *mod) 1700 + __register_event(struct trace_event_call *call, struct module *mod) 1701 1701 { 1702 1702 int ret; 1703 1703 ··· 1733 1733 return ptr + elen; 1734 1734 } 1735 1735 1736 - static void update_event_printk(struct ftrace_event_call *call, 1736 + static void update_event_printk(struct trace_event_call *call, 1737 1737 struct trace_enum_map *map) 1738 1738 { 1739 1739 char *ptr; ··· 1811 1811 1812 1812 void trace_event_enum_update(struct trace_enum_map **map, int len) 1813 1813 { 1814 - struct ftrace_event_call *call, *p; 1814 + struct trace_event_call *call, *p; 1815 1815 const char *last_system = NULL; 1816 1816 int last_i; 1817 1817 int i; ··· 1837 1837 } 1838 1838 1839 1839 static struct trace_event_file * 1840 - trace_create_new_event(struct ftrace_event_call *call, 1840 + trace_create_new_event(struct trace_event_call *call, 1841 1841 struct trace_array *tr) 1842 1842 { 1843 1843 struct trace_event_file *file; ··· 1858 1858 1859 1859 /* Add an event to a trace directory */ 1860 1860 static int 1861 - __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) 1861 + __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr) 1862 1862 { 1863 1863 struct trace_event_file *file; 1864 1864 ··· 1875 1875 * the filesystem is initialized. 1876 1876 */ 1877 1877 static __init int 1878 - __trace_early_add_new_event(struct ftrace_event_call *call, 1878 + __trace_early_add_new_event(struct trace_event_call *call, 1879 1879 struct trace_array *tr) 1880 1880 { 1881 1881 struct trace_event_file *file; ··· 1888 1888 } 1889 1889 1890 1890 struct ftrace_module_file_ops; 1891 - static void __add_event_to_tracers(struct ftrace_event_call *call); 1891 + static void __add_event_to_tracers(struct trace_event_call *call); 1892 1892 1893 1893 /* Add an additional event_call dynamically */ 1894 - int trace_add_event_call(struct ftrace_event_call *call) 1894 + int trace_add_event_call(struct trace_event_call *call) 1895 1895 { 1896 1896 int ret; 1897 1897 mutex_lock(&trace_types_lock); ··· 1910 1910 * Must be called under locking of trace_types_lock, event_mutex and 1911 1911 * trace_event_sem. 1912 1912 */ 1913 - static void __trace_remove_event_call(struct ftrace_event_call *call) 1913 + static void __trace_remove_event_call(struct trace_event_call *call) 1914 1914 { 1915 1915 event_remove(call); 1916 1916 trace_destroy_fields(call); ··· 1918 1918 call->filter = NULL; 1919 1919 } 1920 1920 1921 - static int probe_remove_event_call(struct ftrace_event_call *call) 1921 + static int probe_remove_event_call(struct trace_event_call *call) 1922 1922 { 1923 1923 struct trace_array *tr; 1924 1924 struct trace_event_file *file; ··· 1952 1952 } 1953 1953 1954 1954 /* Remove an event_call */ 1955 - int trace_remove_event_call(struct ftrace_event_call *call) 1955 + int trace_remove_event_call(struct trace_event_call *call) 1956 1956 { 1957 1957 int ret; 1958 1958 ··· 1976 1976 1977 1977 static void trace_module_add_events(struct module *mod) 1978 1978 { 1979 - struct ftrace_event_call **call, **start, **end; 1979 + struct trace_event_call **call, **start, **end; 1980 1980 1981 1981 if (!mod->num_trace_events) 1982 1982 return; ··· 1999 1999 2000 2000 static void trace_module_remove_events(struct module *mod) 2001 2001 { 2002 - struct ftrace_event_call *call, *p; 2002 + struct trace_event_call *call, *p; 2003 2003 bool clear_trace = false; 2004 2004 2005 2005 down_write(&trace_event_sem); ··· 2055 2055 static void 2056 2056 __trace_add_event_dirs(struct trace_array *tr) 2057 2057 { 2058 - struct ftrace_event_call *call; 2058 + struct trace_event_call *call; 2059 2059 int ret; 2060 2060 2061 2061 list_for_each_entry(call, &ftrace_events, list) { ··· 2070 2070 find_event_file(struct trace_array *tr, const char *system, const char *event) 2071 2071 { 2072 2072 struct trace_event_file *file; 2073 - struct ftrace_event_call *call; 2073 + struct trace_event_call *call; 2074 2074 const char *name; 2075 2075 2076 2076 list_for_each_entry(file, &tr->events, list) { ··· 2388 2388 static __init void 2389 2389 __trace_early_add_events(struct trace_array *tr) 2390 2390 { 2391 - struct ftrace_event_call *call; 2391 + struct trace_event_call *call; 2392 2392 int ret; 2393 2393 2394 2394 list_for_each_entry(call, &ftrace_events, list) { ··· 2413 2413 remove_event_file_dir(file); 2414 2414 } 2415 2415 2416 - static void __add_event_to_tracers(struct ftrace_event_call *call) 2416 + static void __add_event_to_tracers(struct trace_event_call *call) 2417 2417 { 2418 2418 struct trace_array *tr; 2419 2419 ··· 2421 2421 __trace_add_new_event(call, tr); 2422 2422 } 2423 2423 2424 - extern struct ftrace_event_call *__start_ftrace_events[]; 2425 - extern struct ftrace_event_call *__stop_ftrace_events[]; 2424 + extern struct trace_event_call *__start_ftrace_events[]; 2425 + extern struct trace_event_call *__stop_ftrace_events[]; 2426 2426 2427 2427 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; 2428 2428 ··· 2593 2593 static __init int event_trace_enable(void) 2594 2594 { 2595 2595 struct trace_array *tr = top_trace_array(); 2596 - struct ftrace_event_call **iter, *call; 2596 + struct trace_event_call **iter, *call; 2597 2597 int ret; 2598 2598 2599 2599 if (!tr) ··· 2756 2756 { 2757 2757 struct ftrace_subsystem_dir *dir; 2758 2758 struct trace_event_file *file; 2759 - struct ftrace_event_call *call; 2759 + struct trace_event_call *call; 2760 2760 struct event_subsystem *system; 2761 2761 struct trace_array *tr; 2762 2762 int ret;
+17 -17
kernel/trace/trace_events_filter.c
··· 782 782 783 783 static void filter_disable(struct trace_event_file *file) 784 784 { 785 - struct ftrace_event_call *call = file->event_call; 785 + struct trace_event_call *call = file->event_call; 786 786 787 787 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 788 788 call->flags &= ~TRACE_EVENT_FL_FILTERED; ··· 839 839 840 840 static inline void __remove_filter(struct trace_event_file *file) 841 841 { 842 - struct ftrace_event_call *call = file->event_call; 842 + struct trace_event_call *call = file->event_call; 843 843 844 844 filter_disable(file); 845 845 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) ··· 862 862 863 863 static inline void __free_subsystem_filter(struct trace_event_file *file) 864 864 { 865 - struct ftrace_event_call *call = file->event_call; 865 + struct trace_event_call *call = file->event_call; 866 866 867 867 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { 868 868 __free_filter(call->filter); ··· 1336 1336 } 1337 1337 1338 1338 static struct filter_pred *create_pred(struct filter_parse_state *ps, 1339 - struct ftrace_event_call *call, 1339 + struct trace_event_call *call, 1340 1340 int op, char *operand1, char *operand2) 1341 1341 { 1342 1342 struct ftrace_event_field *field; ··· 1549 1549 filter->preds); 1550 1550 } 1551 1551 1552 - static int replace_preds(struct ftrace_event_call *call, 1552 + static int replace_preds(struct trace_event_call *call, 1553 1553 struct event_filter *filter, 1554 1554 struct filter_parse_state *ps, 1555 1555 bool dry_run) ··· 1664 1664 1665 1665 static inline void event_set_filtered_flag(struct trace_event_file *file) 1666 1666 { 1667 - struct ftrace_event_call *call = file->event_call; 1667 + struct trace_event_call *call = file->event_call; 1668 1668 1669 1669 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1670 1670 call->flags |= TRACE_EVENT_FL_FILTERED; ··· 1675 1675 static inline void event_set_filter(struct trace_event_file *file, 1676 1676 struct event_filter *filter) 1677 1677 { 1678 - struct ftrace_event_call *call = file->event_call; 1678 + struct trace_event_call *call = file->event_call; 1679 1679 1680 1680 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1681 1681 rcu_assign_pointer(call->filter, filter); ··· 1685 1685 1686 1686 static inline void event_clear_filter(struct trace_event_file *file) 1687 1687 { 1688 - struct ftrace_event_call *call = file->event_call; 1688 + struct trace_event_call *call = file->event_call; 1689 1689 1690 1690 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1691 1691 RCU_INIT_POINTER(call->filter, NULL); ··· 1696 1696 static inline void 1697 1697 event_set_no_set_filter_flag(struct trace_event_file *file) 1698 1698 { 1699 - struct ftrace_event_call *call = file->event_call; 1699 + struct trace_event_call *call = file->event_call; 1700 1700 1701 1701 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1702 1702 call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; ··· 1707 1707 static inline void 1708 1708 event_clear_no_set_filter_flag(struct trace_event_file *file) 1709 1709 { 1710 - struct ftrace_event_call *call = file->event_call; 1710 + struct trace_event_call *call = file->event_call; 1711 1711 1712 1712 if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) 1713 1713 call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; ··· 1718 1718 static inline bool 1719 1719 event_no_set_filter_flag(struct trace_event_file *file) 1720 1720 { 1721 - struct ftrace_event_call *call = file->event_call; 1721 + struct trace_event_call *call = file->event_call; 1722 1722 1723 1723 if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) 1724 1724 return true; ··· 1884 1884 } 1885 1885 1886 1886 /** 1887 - * create_filter - create a filter for a ftrace_event_call 1888 - * @call: ftrace_event_call to create a filter for 1887 + * create_filter - create a filter for a trace_event_call 1888 + * @call: trace_event_call to create a filter for 1889 1889 * @filter_str: filter string 1890 1890 * @set_str: remember @filter_str and enable detailed error in filter 1891 1891 * @filterp: out param for created filter (always updated on return) ··· 1899 1899 * information if @set_str is %true and the caller is responsible for 1900 1900 * freeing it. 1901 1901 */ 1902 - static int create_filter(struct ftrace_event_call *call, 1902 + static int create_filter(struct trace_event_call *call, 1903 1903 char *filter_str, bool set_str, 1904 1904 struct event_filter **filterp) 1905 1905 { ··· 1919 1919 return err; 1920 1920 } 1921 1921 1922 - int create_event_filter(struct ftrace_event_call *call, 1922 + int create_event_filter(struct trace_event_call *call, 1923 1923 char *filter_str, bool set_str, 1924 1924 struct event_filter **filterp) 1925 1925 { ··· 1963 1963 /* caller must hold event_mutex */ 1964 1964 int apply_event_filter(struct trace_event_file *file, char *filter_string) 1965 1965 { 1966 - struct ftrace_event_call *call = file->event_call; 1966 + struct trace_event_call *call = file->event_call; 1967 1967 struct event_filter *filter; 1968 1968 int err; 1969 1969 ··· 2212 2212 { 2213 2213 int err; 2214 2214 struct event_filter *filter; 2215 - struct ftrace_event_call *call; 2215 + struct trace_event_call *call; 2216 2216 2217 2217 mutex_lock(&event_mutex); 2218 2218
+5 -5
kernel/trace/trace_export.c
··· 125 125 #undef FTRACE_ENTRY 126 126 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 127 127 static int __init \ 128 - ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ 128 + ftrace_define_fields_##name(struct trace_event_call *event_call) \ 129 129 { \ 130 130 struct struct_name field; \ 131 131 int ret; \ ··· 163 163 #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ 164 164 regfn) \ 165 165 \ 166 - struct ftrace_event_class __refdata event_class_ftrace_##call = { \ 166 + struct trace_event_class __refdata event_class_ftrace_##call = { \ 167 167 .system = __stringify(TRACE_SYSTEM), \ 168 168 .define_fields = ftrace_define_fields_##call, \ 169 169 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 170 170 .reg = regfn, \ 171 171 }; \ 172 172 \ 173 - struct ftrace_event_call __used event_##call = { \ 173 + struct trace_event_call __used event_##call = { \ 174 174 .class = &event_class_ftrace_##call, \ 175 175 { \ 176 176 .name = #call, \ ··· 179 179 .print_fmt = print, \ 180 180 .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 181 181 }; \ 182 - struct ftrace_event_call __used \ 182 + struct trace_event_call __used \ 183 183 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; 184 184 185 185 #undef FTRACE_ENTRY ··· 187 187 FTRACE_ENTRY_REG(call, struct_name, etype, \ 188 188 PARAMS(tstruct), PARAMS(print), filter, NULL) 189 189 190 - int ftrace_event_is_function(struct ftrace_event_call *call) 190 + int ftrace_event_is_function(struct trace_event_call *call) 191 191 { 192 192 return call == &event_function; 193 193 }
+2 -2
kernel/trace/trace_functions_graph.c
··· 278 278 unsigned long flags, 279 279 int pc) 280 280 { 281 - struct ftrace_event_call *call = &event_funcgraph_entry; 281 + struct trace_event_call *call = &event_funcgraph_entry; 282 282 struct ring_buffer_event *event; 283 283 struct ring_buffer *buffer = tr->trace_buffer.buffer; 284 284 struct ftrace_graph_ent_entry *entry; ··· 393 393 unsigned long flags, 394 394 int pc) 395 395 { 396 - struct ftrace_event_call *call = &event_funcgraph_exit; 396 + struct trace_event_call *call = &event_funcgraph_exit; 397 397 struct ring_buffer_event *event; 398 398 struct ring_buffer *buffer = tr->trace_buffer.buffer; 399 399 struct ftrace_graph_ret_entry *entry;
+9 -9
kernel/trace/trace_kprobe.c
··· 924 924 struct ring_buffer *buffer; 925 925 int size, dsize, pc; 926 926 unsigned long irq_flags; 927 - struct ftrace_event_call *call = &tk->tp.call; 927 + struct trace_event_call *call = &tk->tp.call; 928 928 929 929 WARN_ON(call != trace_file->event_call); 930 930 ··· 972 972 struct ring_buffer *buffer; 973 973 int size, pc, dsize; 974 974 unsigned long irq_flags; 975 - struct ftrace_event_call *call = &tk->tp.call; 975 + struct trace_event_call *call = &tk->tp.call; 976 976 977 977 WARN_ON(call != trace_file->event_call); 978 978 ··· 1081 1081 } 1082 1082 1083 1083 1084 - static int kprobe_event_define_fields(struct ftrace_event_call *event_call) 1084 + static int kprobe_event_define_fields(struct trace_event_call *event_call) 1085 1085 { 1086 1086 int ret, i; 1087 1087 struct kprobe_trace_entry_head field; ··· 1104 1104 return 0; 1105 1105 } 1106 1106 1107 - static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) 1107 + static int kretprobe_event_define_fields(struct trace_event_call *event_call) 1108 1108 { 1109 1109 int ret, i; 1110 1110 struct kretprobe_trace_entry_head field; ··· 1134 1134 static void 1135 1135 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) 1136 1136 { 1137 - struct ftrace_event_call *call = &tk->tp.call; 1137 + struct trace_event_call *call = &tk->tp.call; 1138 1138 struct bpf_prog *prog = call->prog; 1139 1139 struct kprobe_trace_entry_head *entry; 1140 1140 struct hlist_head *head; ··· 1169 1169 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, 1170 1170 struct pt_regs *regs) 1171 1171 { 1172 - struct ftrace_event_call *call = &tk->tp.call; 1172 + struct trace_event_call *call = &tk->tp.call; 1173 1173 struct bpf_prog *prog = call->prog; 1174 1174 struct kretprobe_trace_entry_head *entry; 1175 1175 struct hlist_head *head; ··· 1206 1206 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe 1207 1207 * lockless, but we can't race with this __init function. 1208 1208 */ 1209 - static int kprobe_register(struct ftrace_event_call *event, 1209 + static int kprobe_register(struct trace_event_call *event, 1210 1210 enum trace_reg type, void *data) 1211 1211 { 1212 1212 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; ··· 1276 1276 1277 1277 static int register_kprobe_event(struct trace_kprobe *tk) 1278 1278 { 1279 - struct ftrace_event_call *call = &tk->tp.call; 1279 + struct trace_event_call *call = &tk->tp.call; 1280 1280 int ret; 1281 1281 1282 - /* Initialize ftrace_event_call */ 1282 + /* Initialize trace_event_call */ 1283 1283 INIT_LIST_HEAD(&call->class->fields); 1284 1284 if (trace_kprobe_is_return(tk)) { 1285 1285 call->event.funcs = &kretprobe_funcs;
+2 -2
kernel/trace/trace_mmiotrace.c
··· 298 298 struct trace_array_cpu *data, 299 299 struct mmiotrace_rw *rw) 300 300 { 301 - struct ftrace_event_call *call = &event_mmiotrace_rw; 301 + struct trace_event_call *call = &event_mmiotrace_rw; 302 302 struct ring_buffer *buffer = tr->trace_buffer.buffer; 303 303 struct ring_buffer_event *event; 304 304 struct trace_mmiotrace_rw *entry; ··· 328 328 struct trace_array_cpu *data, 329 329 struct mmiotrace_map *map) 330 330 { 331 - struct ftrace_event_call *call = &event_mmiotrace_map; 331 + struct trace_event_call *call = &event_mmiotrace_map; 332 332 struct ring_buffer *buffer = tr->trace_buffer.buffer; 333 333 struct ring_buffer_event *event; 334 334 struct trace_mmiotrace_map *entry;
+2 -2
kernel/trace/trace_output.c
··· 225 225 int ftrace_raw_output_prep(struct trace_iterator *iter, 226 226 struct trace_event *trace_event) 227 227 { 228 - struct ftrace_event_call *event; 228 + struct trace_event_call *event; 229 229 struct trace_seq *s = &iter->seq; 230 230 struct trace_seq *p = &iter->tmp_seq; 231 231 struct trace_entry *entry; 232 232 233 - event = container_of(trace_event, struct ftrace_event_call, event); 233 + event = container_of(trace_event, struct trace_event_call, event); 234 234 entry = iter->ent; 235 235 236 236 if (entry->type != event->event.type) {
+2 -2
kernel/trace/trace_probe.h
··· 272 272 273 273 struct trace_probe { 274 274 unsigned int flags; /* For TP_FLAG_* */ 275 - struct ftrace_event_class class; 276 - struct ftrace_event_call call; 275 + struct trace_event_class class; 276 + struct trace_event_call call; 277 277 struct list_head files; 278 278 ssize_t size; /* trace entry size */ 279 279 unsigned int nr_args;
+2 -2
kernel/trace/trace_sched_wakeup.c
··· 369 369 struct task_struct *next, 370 370 unsigned long flags, int pc) 371 371 { 372 - struct ftrace_event_call *call = &event_context_switch; 372 + struct trace_event_call *call = &event_context_switch; 373 373 struct ring_buffer *buffer = tr->trace_buffer.buffer; 374 374 struct ring_buffer_event *event; 375 375 struct ctx_switch_entry *entry; ··· 397 397 struct task_struct *curr, 398 398 unsigned long flags, int pc) 399 399 { 400 - struct ftrace_event_call *call = &event_wakeup; 400 + struct trace_event_call *call = &event_wakeup; 401 401 struct ring_buffer_event *event; 402 402 struct ctx_switch_entry *entry; 403 403 struct ring_buffer *buffer = tr->trace_buffer.buffer;
+20 -20
kernel/trace/trace_syscalls.c
··· 13 13 14 14 static DEFINE_MUTEX(syscall_trace_lock); 15 15 16 - static int syscall_enter_register(struct ftrace_event_call *event, 16 + static int syscall_enter_register(struct trace_event_call *event, 17 17 enum trace_reg type, void *data); 18 - static int syscall_exit_register(struct ftrace_event_call *event, 18 + static int syscall_exit_register(struct trace_event_call *event, 19 19 enum trace_reg type, void *data); 20 20 21 21 static struct list_head * 22 - syscall_get_enter_fields(struct ftrace_event_call *call) 22 + syscall_get_enter_fields(struct trace_event_call *call) 23 23 { 24 24 struct syscall_metadata *entry = call->data; 25 25 ··· 219 219 return pos; 220 220 } 221 221 222 - static int __init set_syscall_print_fmt(struct ftrace_event_call *call) 222 + static int __init set_syscall_print_fmt(struct trace_event_call *call) 223 223 { 224 224 char *print_fmt; 225 225 int len; ··· 244 244 return 0; 245 245 } 246 246 247 - static void __init free_syscall_print_fmt(struct ftrace_event_call *call) 247 + static void __init free_syscall_print_fmt(struct trace_event_call *call) 248 248 { 249 249 struct syscall_metadata *entry = call->data; 250 250 ··· 252 252 kfree(call->print_fmt); 253 253 } 254 254 255 - static int __init syscall_enter_define_fields(struct ftrace_event_call *call) 255 + static int __init syscall_enter_define_fields(struct trace_event_call *call) 256 256 { 257 257 struct syscall_trace_enter trace; 258 258 struct syscall_metadata *meta = call->data; ··· 275 275 return ret; 276 276 } 277 277 278 - static int __init syscall_exit_define_fields(struct ftrace_event_call *call) 278 + static int __init syscall_exit_define_fields(struct trace_event_call *call) 279 279 { 280 280 struct syscall_trace_exit trace; 281 281 int ret; ··· 385 385 } 386 386 387 387 static int reg_event_syscall_enter(struct trace_event_file *file, 388 - struct ftrace_event_call *call) 388 + struct trace_event_call *call) 389 389 { 390 390 struct trace_array *tr = file->tr; 391 391 int ret = 0; ··· 406 406 } 407 407 408 408 static void unreg_event_syscall_enter(struct trace_event_file *file, 409 - struct ftrace_event_call *call) 409 + struct trace_event_call *call) 410 410 { 411 411 struct trace_array *tr = file->tr; 412 412 int num; ··· 423 423 } 424 424 425 425 static int reg_event_syscall_exit(struct trace_event_file *file, 426 - struct ftrace_event_call *call) 426 + struct trace_event_call *call) 427 427 { 428 428 struct trace_array *tr = file->tr; 429 429 int ret = 0; ··· 444 444 } 445 445 446 446 static void unreg_event_syscall_exit(struct trace_event_file *file, 447 - struct ftrace_event_call *call) 447 + struct trace_event_call *call) 448 448 { 449 449 struct trace_array *tr = file->tr; 450 450 int num; ··· 460 460 mutex_unlock(&syscall_trace_lock); 461 461 } 462 462 463 - static int __init init_syscall_trace(struct ftrace_event_call *call) 463 + static int __init init_syscall_trace(struct trace_event_call *call) 464 464 { 465 465 int id; 466 466 int num; ··· 493 493 .trace = print_syscall_exit, 494 494 }; 495 495 496 - struct ftrace_event_class __refdata event_class_syscall_enter = { 496 + struct trace_event_class __refdata event_class_syscall_enter = { 497 497 .system = "syscalls", 498 498 .reg = syscall_enter_register, 499 499 .define_fields = syscall_enter_define_fields, ··· 501 501 .raw_init = init_syscall_trace, 502 502 }; 503 503 504 - struct ftrace_event_class __refdata event_class_syscall_exit = { 504 + struct trace_event_class __refdata event_class_syscall_exit = { 505 505 .system = "syscalls", 506 506 .reg = syscall_exit_register, 507 507 .define_fields = syscall_exit_define_fields, ··· 584 584 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 585 585 } 586 586 587 - static int perf_sysenter_enable(struct ftrace_event_call *call) 587 + static int perf_sysenter_enable(struct trace_event_call *call) 588 588 { 589 589 int ret = 0; 590 590 int num; ··· 605 605 return ret; 606 606 } 607 607 608 - static void perf_sysenter_disable(struct ftrace_event_call *call) 608 + static void perf_sysenter_disable(struct trace_event_call *call) 609 609 { 610 610 int num; 611 611 ··· 656 656 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 657 657 } 658 658 659 - static int perf_sysexit_enable(struct ftrace_event_call *call) 659 + static int perf_sysexit_enable(struct trace_event_call *call) 660 660 { 661 661 int ret = 0; 662 662 int num; ··· 677 677 return ret; 678 678 } 679 679 680 - static void perf_sysexit_disable(struct ftrace_event_call *call) 680 + static void perf_sysexit_disable(struct trace_event_call *call) 681 681 { 682 682 int num; 683 683 ··· 693 693 694 694 #endif /* CONFIG_PERF_EVENTS */ 695 695 696 - static int syscall_enter_register(struct ftrace_event_call *event, 696 + static int syscall_enter_register(struct trace_event_call *event, 697 697 enum trace_reg type, void *data) 698 698 { 699 699 struct trace_event_file *file = data; ··· 721 721 return 0; 722 722 } 723 723 724 - static int syscall_exit_register(struct ftrace_event_call *event, 724 + static int syscall_exit_register(struct trace_event_call *event, 725 725 enum trace_reg type, void *data) 726 726 { 727 727 struct trace_event_file *file = data;
+6 -6
kernel/trace/trace_uprobe.c
··· 777 777 struct ring_buffer *buffer; 778 778 void *data; 779 779 int size, esize; 780 - struct ftrace_event_call *call = &tu->tp.call; 780 + struct trace_event_call *call = &tu->tp.call; 781 781 782 782 WARN_ON(call != trace_file->event_call); 783 783 ··· 967 967 uprobe_buffer_disable(); 968 968 } 969 969 970 - static int uprobe_event_define_fields(struct ftrace_event_call *event_call) 970 + static int uprobe_event_define_fields(struct trace_event_call *event_call) 971 971 { 972 972 int ret, i, size; 973 973 struct uprobe_trace_entry_head field; ··· 1093 1093 unsigned long func, struct pt_regs *regs, 1094 1094 struct uprobe_cpu_buffer *ucb, int dsize) 1095 1095 { 1096 - struct ftrace_event_call *call = &tu->tp.call; 1096 + struct trace_event_call *call = &tu->tp.call; 1097 1097 struct uprobe_trace_entry_head *entry; 1098 1098 struct hlist_head *head; 1099 1099 void *data; ··· 1159 1159 #endif /* CONFIG_PERF_EVENTS */ 1160 1160 1161 1161 static int 1162 - trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, 1162 + trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, 1163 1163 void *data) 1164 1164 { 1165 1165 struct trace_uprobe *tu = event->data; ··· 1272 1272 1273 1273 static int register_uprobe_event(struct trace_uprobe *tu) 1274 1274 { 1275 - struct ftrace_event_call *call = &tu->tp.call; 1275 + struct trace_event_call *call = &tu->tp.call; 1276 1276 int ret; 1277 1277 1278 - /* Initialize ftrace_event_call */ 1278 + /* Initialize trace_event_call */ 1279 1279 INIT_LIST_HEAD(&call->class->fields); 1280 1280 call->event.funcs = &uprobe_funcs; 1281 1281 call->class->define_fields = uprobe_event_define_fields;