at v3.9 709 lines 20 kB view raw
1/* 2 * Stage 1 of the trace events. 3 * 4 * Override the macros in <trace/trace_events.h> to include the following: 5 * 6 * struct ftrace_raw_<call> { 7 * struct trace_entry ent; 8 * <type> <item>; 9 * <type2> <item2>[<len>]; 10 * [...] 11 * }; 12 * 13 * The <type> <item> is created by the __field(type, item) macro or 14 * the __array(type2, item2, len) macro. 15 * We simply do "type item;", and that will create the fields 16 * in the structure. 17 */ 18 19#include <linux/ftrace_event.h> 20 21/* 22 * DECLARE_EVENT_CLASS can be used to add a generic function 23 * handlers for events. That is, if all events have the same 24 * parameters and just have distinct trace points. 25 * Each tracepoint can be defined with DEFINE_EVENT and that 26 * will map the DECLARE_EVENT_CLASS to the tracepoint. 27 * 28 * TRACE_EVENT is a one to one mapping between tracepoint and template. 29 */ 30#undef TRACE_EVENT 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 32 DECLARE_EVENT_CLASS(name, \ 33 PARAMS(proto), \ 34 PARAMS(args), \ 35 PARAMS(tstruct), \ 36 PARAMS(assign), \ 37 PARAMS(print)); \ 38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 39 40 41#undef __field 42#define __field(type, item) type item; 43 44#undef __field_ext 45#define __field_ext(type, item, filter_type) type item; 46 47#undef __array 48#define __array(type, item, len) type item[len]; 49 50#undef __dynamic_array 51#define __dynamic_array(type, item, len) u32 __data_loc_##item; 52 53#undef __string 54#define __string(item, src) __dynamic_array(char, item, -1) 55 56#undef TP_STRUCT__entry 57#define TP_STRUCT__entry(args...) args 58 59#undef DECLARE_EVENT_CLASS 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 61 struct ftrace_raw_##name { \ 62 struct trace_entry ent; \ 63 tstruct \ 64 char __data[0]; \ 65 }; \ 66 \ 67 static struct ftrace_event_class event_class_##name; 68 69#undef DEFINE_EVENT 70#define DEFINE_EVENT(template, name, proto, args) \ 71 static struct ftrace_event_call __used \ 72 __attribute__((__aligned__(4))) event_##name 73 74#undef DEFINE_EVENT_PRINT 75#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 77 78/* Callbacks are meaningless to ftrace. */ 79#undef TRACE_EVENT_FN 80#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 81 assign, print, reg, unreg) \ 82 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ 83 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 84 85#undef TRACE_EVENT_FLAGS 86#define TRACE_EVENT_FLAGS(name, value) \ 87 __TRACE_EVENT_FLAGS(name, value) 88 89#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 90 91 92/* 93 * Stage 2 of the trace events. 94 * 95 * Include the following: 96 * 97 * struct ftrace_data_offsets_<call> { 98 * u32 <item1>; 99 * u32 <item2>; 100 * [...] 101 * }; 102 * 103 * The __dynamic_array() macro will create each u32 <item>, this is 104 * to keep the offset of each array from the beginning of the event. 105 * The size of an array is also encoded, in the higher 16 bits of <item>. 106 */ 107 108#undef __field 109#define __field(type, item) 110 111#undef __field_ext 112#define __field_ext(type, item, filter_type) 113 114#undef __array 115#define __array(type, item, len) 116 117#undef __dynamic_array 118#define __dynamic_array(type, item, len) u32 item; 119 120#undef __string 121#define __string(item, src) __dynamic_array(char, item, -1) 122 123#undef DECLARE_EVENT_CLASS 124#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 125 struct ftrace_data_offsets_##call { \ 126 tstruct; \ 127 }; 128 129#undef DEFINE_EVENT 130#define DEFINE_EVENT(template, name, proto, args) 131 132#undef DEFINE_EVENT_PRINT 133#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 134 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 135 136#undef TRACE_EVENT_FLAGS 137#define TRACE_EVENT_FLAGS(event, flag) 138 139#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 140 141/* 142 * Stage 3 of the trace events. 143 * 144 * Override the macros in <trace/trace_events.h> to include the following: 145 * 146 * enum print_line_t 147 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 148 * { 149 * struct trace_seq *s = &iter->seq; 150 * struct ftrace_raw_<call> *field; <-- defined in stage 1 151 * struct trace_entry *entry; 152 * struct trace_seq *p = &iter->tmp_seq; 153 * int ret; 154 * 155 * entry = iter->ent; 156 * 157 * if (entry->type != event_<call>->event.type) { 158 * WARN_ON_ONCE(1); 159 * return TRACE_TYPE_UNHANDLED; 160 * } 161 * 162 * field = (typeof(field))entry; 163 * 164 * trace_seq_init(p); 165 * ret = trace_seq_printf(s, "%s: ", <call>); 166 * if (ret) 167 * ret = trace_seq_printf(s, <TP_printk> "\n"); 168 * if (!ret) 169 * return TRACE_TYPE_PARTIAL_LINE; 170 * 171 * return TRACE_TYPE_HANDLED; 172 * } 173 * 174 * This is the method used to print the raw event to the trace 175 * output format. Note, this is not needed if the data is read 176 * in binary. 177 */ 178 179#undef __entry 180#define __entry field 181 182#undef TP_printk 183#define TP_printk(fmt, args...) fmt "\n", args 184 185#undef __get_dynamic_array 186#define __get_dynamic_array(field) \ 187 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 188 189#undef __get_str 190#define __get_str(field) (char *)__get_dynamic_array(field) 191 192#undef __print_flags 193#define __print_flags(flag, delim, flag_array...) \ 194 ({ \ 195 static const struct trace_print_flags __flags[] = \ 196 { flag_array, { -1, NULL }}; \ 197 ftrace_print_flags_seq(p, delim, flag, __flags); \ 198 }) 199 200#undef __print_symbolic 201#define __print_symbolic(value, symbol_array...) \ 202 ({ \ 203 static const struct trace_print_flags symbols[] = \ 204 { symbol_array, { -1, NULL }}; \ 205 ftrace_print_symbols_seq(p, value, symbols); \ 206 }) 207 208#undef __print_symbolic_u64 209#if BITS_PER_LONG == 32 210#define __print_symbolic_u64(value, symbol_array...) \ 211 ({ \ 212 static const struct trace_print_flags_u64 symbols[] = \ 213 { symbol_array, { -1, NULL } }; \ 214 ftrace_print_symbols_seq_u64(p, value, symbols); \ 215 }) 216#else 217#define __print_symbolic_u64(value, symbol_array...) \ 218 __print_symbolic(value, symbol_array) 219#endif 220 221#undef __print_hex 222#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) 223 224#undef DECLARE_EVENT_CLASS 225#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 226static notrace enum print_line_t \ 227ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 228 struct trace_event *trace_event) \ 229{ \ 230 struct ftrace_event_call *event; \ 231 struct trace_seq *s = &iter->seq; \ 232 struct ftrace_raw_##call *field; \ 233 struct trace_entry *entry; \ 234 struct trace_seq *p = &iter->tmp_seq; \ 235 int ret; \ 236 \ 237 event = container_of(trace_event, struct ftrace_event_call, \ 238 event); \ 239 \ 240 entry = iter->ent; \ 241 \ 242 if (entry->type != event->event.type) { \ 243 WARN_ON_ONCE(1); \ 244 return TRACE_TYPE_UNHANDLED; \ 245 } \ 246 \ 247 field = (typeof(field))entry; \ 248 \ 249 trace_seq_init(p); \ 250 ret = trace_seq_printf(s, "%s: ", event->name); \ 251 if (ret) \ 252 ret = trace_seq_printf(s, print); \ 253 if (!ret) \ 254 return TRACE_TYPE_PARTIAL_LINE; \ 255 \ 256 return TRACE_TYPE_HANDLED; \ 257} \ 258static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 259 .trace = ftrace_raw_output_##call, \ 260}; 261 262#undef DEFINE_EVENT_PRINT 263#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 264static notrace enum print_line_t \ 265ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 266 struct trace_event *event) \ 267{ \ 268 struct trace_seq *s = &iter->seq; \ 269 struct ftrace_raw_##template *field; \ 270 struct trace_entry *entry; \ 271 struct trace_seq *p = &iter->tmp_seq; \ 272 int ret; \ 273 \ 274 entry = iter->ent; \ 275 \ 276 if (entry->type != event_##call.event.type) { \ 277 WARN_ON_ONCE(1); \ 278 return TRACE_TYPE_UNHANDLED; \ 279 } \ 280 \ 281 field = (typeof(field))entry; \ 282 \ 283 trace_seq_init(p); \ 284 ret = trace_seq_printf(s, "%s: ", #call); \ 285 if (ret) \ 286 ret = trace_seq_printf(s, print); \ 287 if (!ret) \ 288 return TRACE_TYPE_PARTIAL_LINE; \ 289 \ 290 return TRACE_TYPE_HANDLED; \ 291} \ 292static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 293 .trace = ftrace_raw_output_##call, \ 294}; 295 296#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 297 298#undef __field_ext 299#define __field_ext(type, item, filter_type) \ 300 ret = trace_define_field(event_call, #type, #item, \ 301 offsetof(typeof(field), item), \ 302 sizeof(field.item), \ 303 is_signed_type(type), filter_type); \ 304 if (ret) \ 305 return ret; 306 307#undef __field 308#define __field(type, item) __field_ext(type, item, FILTER_OTHER) 309 310#undef __array 311#define __array(type, item, len) \ 312 do { \ 313 mutex_lock(&event_storage_mutex); \ 314 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 315 snprintf(event_storage, sizeof(event_storage), \ 316 "%s[%d]", #type, len); \ 317 ret = trace_define_field(event_call, event_storage, #item, \ 318 offsetof(typeof(field), item), \ 319 sizeof(field.item), \ 320 is_signed_type(type), FILTER_OTHER); \ 321 mutex_unlock(&event_storage_mutex); \ 322 if (ret) \ 323 return ret; \ 324 } while (0); 325 326#undef __dynamic_array 327#define __dynamic_array(type, item, len) \ 328 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 329 offsetof(typeof(field), __data_loc_##item), \ 330 sizeof(field.__data_loc_##item), \ 331 is_signed_type(type), FILTER_OTHER); 332 333#undef __string 334#define __string(item, src) __dynamic_array(char, item, -1) 335 336#undef DECLARE_EVENT_CLASS 337#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 338static int notrace \ 339ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 340{ \ 341 struct ftrace_raw_##call field; \ 342 int ret; \ 343 \ 344 tstruct; \ 345 \ 346 return ret; \ 347} 348 349#undef DEFINE_EVENT 350#define DEFINE_EVENT(template, name, proto, args) 351 352#undef DEFINE_EVENT_PRINT 353#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 354 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 355 356#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 357 358/* 359 * remember the offset of each array from the beginning of the event. 360 */ 361 362#undef __entry 363#define __entry entry 364 365#undef __field 366#define __field(type, item) 367 368#undef __field_ext 369#define __field_ext(type, item, filter_type) 370 371#undef __array 372#define __array(type, item, len) 373 374#undef __dynamic_array 375#define __dynamic_array(type, item, len) \ 376 __data_offsets->item = __data_size + \ 377 offsetof(typeof(*entry), __data); \ 378 __data_offsets->item |= (len * sizeof(type)) << 16; \ 379 __data_size += (len) * sizeof(type); 380 381#undef __string 382#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) 383 384#undef DECLARE_EVENT_CLASS 385#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 386static inline notrace int ftrace_get_offsets_##call( \ 387 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 388{ \ 389 int __data_size = 0; \ 390 struct ftrace_raw_##call __maybe_unused *entry; \ 391 \ 392 tstruct; \ 393 \ 394 return __data_size; \ 395} 396 397#undef DEFINE_EVENT 398#define DEFINE_EVENT(template, name, proto, args) 399 400#undef DEFINE_EVENT_PRINT 401#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 402 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 403 404#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 405 406/* 407 * Stage 4 of the trace events. 408 * 409 * Override the macros in <trace/trace_events.h> to include the following: 410 * 411 * For those macros defined with TRACE_EVENT: 412 * 413 * static struct ftrace_event_call event_<call>; 414 * 415 * static void ftrace_raw_event_<call>(void *__data, proto) 416 * { 417 * struct ftrace_event_call *event_call = __data; 418 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 419 * struct ring_buffer_event *event; 420 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 421 * struct ring_buffer *buffer; 422 * unsigned long irq_flags; 423 * int __data_size; 424 * int pc; 425 * 426 * local_save_flags(irq_flags); 427 * pc = preempt_count(); 428 * 429 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 430 * 431 * event = trace_current_buffer_lock_reserve(&buffer, 432 * event_<call>->event.type, 433 * sizeof(*entry) + __data_size, 434 * irq_flags, pc); 435 * if (!event) 436 * return; 437 * entry = ring_buffer_event_data(event); 438 * 439 * { <assign>; } <-- Here we assign the entries by the __field and 440 * __array macros. 441 * 442 * if (!filter_current_check_discard(buffer, event_call, entry, event)) 443 * trace_current_buffer_unlock_commit(buffer, 444 * event, irq_flags, pc); 445 * } 446 * 447 * static struct trace_event ftrace_event_type_<call> = { 448 * .trace = ftrace_raw_output_<call>, <-- stage 2 449 * }; 450 * 451 * static const char print_fmt_<call>[] = <TP_printk>; 452 * 453 * static struct ftrace_event_class __used event_class_<template> = { 454 * .system = "<system>", 455 * .define_fields = ftrace_define_fields_<call>, 456 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 457 * .raw_init = trace_event_raw_init, 458 * .probe = ftrace_raw_event_##call, 459 * .reg = ftrace_event_reg, 460 * }; 461 * 462 * static struct ftrace_event_call event_<call> = { 463 * .name = "<call>", 464 * .class = event_class_<template>, 465 * .event = &ftrace_event_type_<call>, 466 * .print_fmt = print_fmt_<call>, 467 * }; 468 * // its only safe to use pointers when doing linker tricks to 469 * // create an array. 470 * static struct ftrace_event_call __used 471 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 472 * 473 */ 474 475#ifdef CONFIG_PERF_EVENTS 476 477#define _TRACE_PERF_PROTO(call, proto) \ 478 static notrace void \ 479 perf_trace_##call(void *__data, proto); 480 481#define _TRACE_PERF_INIT(call) \ 482 .perf_probe = perf_trace_##call, 483 484#else 485#define _TRACE_PERF_PROTO(call, proto) 486#define _TRACE_PERF_INIT(call) 487#endif /* CONFIG_PERF_EVENTS */ 488 489#undef __entry 490#define __entry entry 491 492#undef __field 493#define __field(type, item) 494 495#undef __array 496#define __array(type, item, len) 497 498#undef __dynamic_array 499#define __dynamic_array(type, item, len) \ 500 __entry->__data_loc_##item = __data_offsets.item; 501 502#undef __string 503#define __string(item, src) __dynamic_array(char, item, -1) \ 504 505#undef __assign_str 506#define __assign_str(dst, src) \ 507 strcpy(__get_str(dst), src); 508 509#undef TP_fast_assign 510#define TP_fast_assign(args...) args 511 512#undef TP_perf_assign 513#define TP_perf_assign(args...) 514 515#undef DECLARE_EVENT_CLASS 516#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 517 \ 518static notrace void \ 519ftrace_raw_event_##call(void *__data, proto) \ 520{ \ 521 struct ftrace_event_call *event_call = __data; \ 522 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 523 struct ring_buffer_event *event; \ 524 struct ftrace_raw_##call *entry; \ 525 struct ring_buffer *buffer; \ 526 unsigned long irq_flags; \ 527 int __data_size; \ 528 int pc; \ 529 \ 530 local_save_flags(irq_flags); \ 531 pc = preempt_count(); \ 532 \ 533 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 534 \ 535 event = trace_current_buffer_lock_reserve(&buffer, \ 536 event_call->event.type, \ 537 sizeof(*entry) + __data_size, \ 538 irq_flags, pc); \ 539 if (!event) \ 540 return; \ 541 entry = ring_buffer_event_data(event); \ 542 \ 543 tstruct \ 544 \ 545 { assign; } \ 546 \ 547 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 548 trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 549} 550/* 551 * The ftrace_test_probe is compiled out, it is only here as a build time check 552 * to make sure that if the tracepoint handling changes, the ftrace probe will 553 * fail to compile unless it too is updated. 554 */ 555 556#undef DEFINE_EVENT 557#define DEFINE_EVENT(template, call, proto, args) \ 558static inline void ftrace_test_probe_##call(void) \ 559{ \ 560 check_trace_callback_type_##call(ftrace_raw_event_##template); \ 561} 562 563#undef DEFINE_EVENT_PRINT 564#define DEFINE_EVENT_PRINT(template, name, proto, args, print) 565 566#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 567 568#undef __entry 569#define __entry REC 570 571#undef __print_flags 572#undef __print_symbolic 573#undef __print_hex 574#undef __get_dynamic_array 575#undef __get_str 576 577#undef TP_printk 578#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 579 580#undef DECLARE_EVENT_CLASS 581#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 582_TRACE_PERF_PROTO(call, PARAMS(proto)); \ 583static const char print_fmt_##call[] = print; \ 584static struct ftrace_event_class __used event_class_##call = { \ 585 .system = __stringify(TRACE_SYSTEM), \ 586 .define_fields = ftrace_define_fields_##call, \ 587 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 588 .raw_init = trace_event_raw_init, \ 589 .probe = ftrace_raw_event_##call, \ 590 .reg = ftrace_event_reg, \ 591 _TRACE_PERF_INIT(call) \ 592}; 593 594#undef DEFINE_EVENT 595#define DEFINE_EVENT(template, call, proto, args) \ 596 \ 597static struct ftrace_event_call __used event_##call = { \ 598 .name = #call, \ 599 .class = &event_class_##template, \ 600 .event.funcs = &ftrace_event_type_funcs_##template, \ 601 .print_fmt = print_fmt_##template, \ 602}; \ 603static struct ftrace_event_call __used \ 604__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 605 606#undef DEFINE_EVENT_PRINT 607#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 608 \ 609static const char print_fmt_##call[] = print; \ 610 \ 611static struct ftrace_event_call __used event_##call = { \ 612 .name = #call, \ 613 .class = &event_class_##template, \ 614 .event.funcs = &ftrace_event_type_funcs_##call, \ 615 .print_fmt = print_fmt_##call, \ 616}; \ 617static struct ftrace_event_call __used \ 618__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 619 620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 621 622 623#ifdef CONFIG_PERF_EVENTS 624 625#undef __entry 626#define __entry entry 627 628#undef __get_dynamic_array 629#define __get_dynamic_array(field) \ 630 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 631 632#undef __get_str 633#define __get_str(field) (char *)__get_dynamic_array(field) 634 635#undef __perf_addr 636#define __perf_addr(a) __addr = (a) 637 638#undef __perf_count 639#define __perf_count(c) __count = (c) 640 641#undef __perf_task 642#define __perf_task(t) __task = (t) 643 644#undef TP_perf_assign 645#define TP_perf_assign(args...) args 646 647#undef DECLARE_EVENT_CLASS 648#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 649static notrace void \ 650perf_trace_##call(void *__data, proto) \ 651{ \ 652 struct ftrace_event_call *event_call = __data; \ 653 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 654 struct ftrace_raw_##call *entry; \ 655 struct pt_regs __regs; \ 656 u64 __addr = 0, __count = 1; \ 657 struct task_struct *__task = NULL; \ 658 struct hlist_head *head; \ 659 int __entry_size; \ 660 int __data_size; \ 661 int rctx; \ 662 \ 663 perf_fetch_caller_regs(&__regs); \ 664 \ 665 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 666 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 667 sizeof(u64)); \ 668 __entry_size -= sizeof(u32); \ 669 \ 670 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ 671 "profile buffer not large enough")) \ 672 return; \ 673 \ 674 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ 675 __entry_size, event_call->event.type, &__regs, &rctx); \ 676 if (!entry) \ 677 return; \ 678 \ 679 tstruct \ 680 \ 681 { assign; } \ 682 \ 683 head = this_cpu_ptr(event_call->perf_events); \ 684 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 685 __count, &__regs, head, __task); \ 686} 687 688/* 689 * This part is compiled out, it is only here as a build time check 690 * to make sure that if the tracepoint handling changes, the 691 * perf probe will fail to compile unless it too is updated. 692 */ 693#undef DEFINE_EVENT 694#define DEFINE_EVENT(template, call, proto, args) \ 695static inline void perf_test_probe_##call(void) \ 696{ \ 697 check_trace_callback_type_##call(perf_trace_##template); \ 698} 699 700 701#undef DEFINE_EVENT_PRINT 702#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 703 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 704 705#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 706#endif /* CONFIG_PERF_EVENTS */ 707 708#undef _TRACE_PROFILE_INIT 709