Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.14-rc4 733 lines 21 kB view raw
1/* 2 * Stage 1 of the trace events. 3 * 4 * Override the macros in <trace/trace_events.h> to include the following: 5 * 6 * struct ftrace_raw_<call> { 7 * struct trace_entry ent; 8 * <type> <item>; 9 * <type2> <item2>[<len>]; 10 * [...] 11 * }; 12 * 13 * The <type> <item> is created by the __field(type, item) macro or 14 * the __array(type2, item2, len) macro. 15 * We simply do "type item;", and that will create the fields 16 * in the structure. 17 */ 18 19#include <linux/ftrace_event.h> 20 21/* 22 * DECLARE_EVENT_CLASS can be used to add a generic function 23 * handlers for events. That is, if all events have the same 24 * parameters and just have distinct trace points. 25 * Each tracepoint can be defined with DEFINE_EVENT and that 26 * will map the DECLARE_EVENT_CLASS to the tracepoint. 27 * 28 * TRACE_EVENT is a one to one mapping between tracepoint and template. 29 */ 30#undef TRACE_EVENT 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 32 DECLARE_EVENT_CLASS(name, \ 33 PARAMS(proto), \ 34 PARAMS(args), \ 35 PARAMS(tstruct), \ 36 PARAMS(assign), \ 37 PARAMS(print)); \ 38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 39 40 41#undef __field 42#define __field(type, item) type item; 43 44#undef __field_ext 45#define __field_ext(type, item, filter_type) type item; 46 47#undef __array 48#define __array(type, item, len) type item[len]; 49 50#undef __dynamic_array 51#define __dynamic_array(type, item, len) u32 __data_loc_##item; 52 53#undef __string 54#define __string(item, src) __dynamic_array(char, item, -1) 55 56#undef TP_STRUCT__entry 57#define TP_STRUCT__entry(args...) args 58 59#undef DECLARE_EVENT_CLASS 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 61 struct ftrace_raw_##name { \ 62 struct trace_entry ent; \ 63 tstruct \ 64 char __data[0]; \ 65 }; \ 66 \ 67 static struct ftrace_event_class event_class_##name; 68 69#undef DEFINE_EVENT 70#define DEFINE_EVENT(template, name, proto, args) \ 71 static struct ftrace_event_call __used \ 72 __attribute__((__aligned__(4))) event_##name 73 74#undef DEFINE_EVENT_FN 75#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ 76 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 77 78#undef DEFINE_EVENT_PRINT 79#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 80 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 81 82/* Callbacks are meaningless to ftrace. */ 83#undef TRACE_EVENT_FN 84#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 85 assign, print, reg, unreg) \ 86 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ 87 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ 88 89#undef TRACE_EVENT_FLAGS 90#define TRACE_EVENT_FLAGS(name, value) \ 91 __TRACE_EVENT_FLAGS(name, value) 92 93#undef TRACE_EVENT_PERF_PERM 94#define TRACE_EVENT_PERF_PERM(name, expr...) \ 95 __TRACE_EVENT_PERF_PERM(name, expr) 96 97#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 98 99 100/* 101 * Stage 2 of the trace events. 102 * 103 * Include the following: 104 * 105 * struct ftrace_data_offsets_<call> { 106 * u32 <item1>; 107 * u32 <item2>; 108 * [...] 109 * }; 110 * 111 * The __dynamic_array() macro will create each u32 <item>, this is 112 * to keep the offset of each array from the beginning of the event. 113 * The size of an array is also encoded, in the higher 16 bits of <item>. 114 */ 115 116#undef __field 117#define __field(type, item) 118 119#undef __field_ext 120#define __field_ext(type, item, filter_type) 121 122#undef __array 123#define __array(type, item, len) 124 125#undef __dynamic_array 126#define __dynamic_array(type, item, len) u32 item; 127 128#undef __string 129#define __string(item, src) __dynamic_array(char, item, -1) 130 131#undef DECLARE_EVENT_CLASS 132#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 133 struct ftrace_data_offsets_##call { \ 134 tstruct; \ 135 }; 136 137#undef DEFINE_EVENT 138#define DEFINE_EVENT(template, name, proto, args) 139 140#undef DEFINE_EVENT_PRINT 141#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 142 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 143 144#undef TRACE_EVENT_FLAGS 145#define TRACE_EVENT_FLAGS(event, flag) 146 147#undef TRACE_EVENT_PERF_PERM 148#define TRACE_EVENT_PERF_PERM(event, expr...) 149 150#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 151 152/* 153 * Stage 3 of the trace events. 154 * 155 * Override the macros in <trace/trace_events.h> to include the following: 156 * 157 * enum print_line_t 158 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 159 * { 160 * struct trace_seq *s = &iter->seq; 161 * struct ftrace_raw_<call> *field; <-- defined in stage 1 162 * struct trace_entry *entry; 163 * struct trace_seq *p = &iter->tmp_seq; 164 * int ret; 165 * 166 * entry = iter->ent; 167 * 168 * if (entry->type != event_<call>->event.type) { 169 * WARN_ON_ONCE(1); 170 * return TRACE_TYPE_UNHANDLED; 171 * } 172 * 173 * field = (typeof(field))entry; 174 * 175 * trace_seq_init(p); 176 * ret = trace_seq_printf(s, "%s: ", <call>); 177 * if (ret) 178 * ret = trace_seq_printf(s, <TP_printk> "\n"); 179 * if (!ret) 180 * return TRACE_TYPE_PARTIAL_LINE; 181 * 182 * return TRACE_TYPE_HANDLED; 183 * } 184 * 185 * This is the method used to print the raw event to the trace 186 * output format. Note, this is not needed if the data is read 187 * in binary. 188 */ 189 190#undef __entry 191#define __entry field 192 193#undef TP_printk 194#define TP_printk(fmt, args...) fmt "\n", args 195 196#undef __get_dynamic_array 197#define __get_dynamic_array(field) \ 198 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 199 200#undef __get_str 201#define __get_str(field) (char *)__get_dynamic_array(field) 202 203#undef __print_flags 204#define __print_flags(flag, delim, flag_array...) \ 205 ({ \ 206 static const struct trace_print_flags __flags[] = \ 207 { flag_array, { -1, NULL }}; \ 208 ftrace_print_flags_seq(p, delim, flag, __flags); \ 209 }) 210 211#undef __print_symbolic 212#define __print_symbolic(value, symbol_array...) \ 213 ({ \ 214 static const struct trace_print_flags symbols[] = \ 215 { symbol_array, { -1, NULL }}; \ 216 ftrace_print_symbols_seq(p, value, symbols); \ 217 }) 218 219#undef __print_symbolic_u64 220#if BITS_PER_LONG == 32 221#define __print_symbolic_u64(value, symbol_array...) \ 222 ({ \ 223 static const struct trace_print_flags_u64 symbols[] = \ 224 { symbol_array, { -1, NULL } }; \ 225 ftrace_print_symbols_seq_u64(p, value, symbols); \ 226 }) 227#else 228#define __print_symbolic_u64(value, symbol_array...) \ 229 __print_symbolic(value, symbol_array) 230#endif 231 232#undef __print_hex 233#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) 234 235#undef DECLARE_EVENT_CLASS 236#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 237static notrace enum print_line_t \ 238ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 239 struct trace_event *trace_event) \ 240{ \ 241 struct trace_seq *s = &iter->seq; \ 242 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 243 struct ftrace_raw_##call *field; \ 244 int ret; \ 245 \ 246 field = (typeof(field))iter->ent; \ 247 \ 248 ret = ftrace_raw_output_prep(iter, trace_event); \ 249 if (ret) \ 250 return ret; \ 251 \ 252 ret = trace_seq_printf(s, print); \ 253 if (!ret) \ 254 return TRACE_TYPE_PARTIAL_LINE; \ 255 \ 256 return TRACE_TYPE_HANDLED; \ 257} \ 258static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 259 .trace = ftrace_raw_output_##call, \ 260}; 261 262#undef DEFINE_EVENT_PRINT 263#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 264static notrace enum print_line_t \ 265ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 266 struct trace_event *event) \ 267{ \ 268 struct trace_seq *s = &iter->seq; \ 269 struct ftrace_raw_##template *field; \ 270 struct trace_entry *entry; \ 271 struct trace_seq *p = &iter->tmp_seq; \ 272 int ret; \ 273 \ 274 entry = iter->ent; \ 275 \ 276 if (entry->type != event_##call.event.type) { \ 277 WARN_ON_ONCE(1); \ 278 return TRACE_TYPE_UNHANDLED; \ 279 } \ 280 \ 281 field = (typeof(field))entry; \ 282 \ 283 trace_seq_init(p); \ 284 ret = trace_seq_printf(s, "%s: ", #call); \ 285 if (ret) \ 286 ret = trace_seq_printf(s, print); \ 287 if (!ret) \ 288 return TRACE_TYPE_PARTIAL_LINE; \ 289 \ 290 return TRACE_TYPE_HANDLED; \ 291} \ 292static struct trace_event_functions ftrace_event_type_funcs_##call = { \ 293 .trace = ftrace_raw_output_##call, \ 294}; 295 296#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 297 298#undef __field_ext 299#define __field_ext(type, item, filter_type) \ 300 ret = trace_define_field(event_call, #type, #item, \ 301 offsetof(typeof(field), item), \ 302 sizeof(field.item), \ 303 is_signed_type(type), filter_type); \ 304 if (ret) \ 305 return ret; 306 307#undef __field 308#define __field(type, item) __field_ext(type, item, FILTER_OTHER) 309 310#undef __array 311#define __array(type, item, len) \ 312 do { \ 313 mutex_lock(&event_storage_mutex); \ 314 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 315 snprintf(event_storage, sizeof(event_storage), \ 316 "%s[%d]", #type, len); \ 317 ret = trace_define_field(event_call, event_storage, #item, \ 318 offsetof(typeof(field), item), \ 319 sizeof(field.item), \ 320 is_signed_type(type), FILTER_OTHER); \ 321 mutex_unlock(&event_storage_mutex); \ 322 if (ret) \ 323 return ret; \ 324 } while (0); 325 326#undef __dynamic_array 327#define __dynamic_array(type, item, len) \ 328 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 329 offsetof(typeof(field), __data_loc_##item), \ 330 sizeof(field.__data_loc_##item), \ 331 is_signed_type(type), FILTER_OTHER); 332 333#undef __string 334#define __string(item, src) __dynamic_array(char, item, -1) 335 336#undef DECLARE_EVENT_CLASS 337#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 338static int notrace __init \ 339ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 340{ \ 341 struct ftrace_raw_##call field; \ 342 int ret; \ 343 \ 344 tstruct; \ 345 \ 346 return ret; \ 347} 348 349#undef DEFINE_EVENT 350#define DEFINE_EVENT(template, name, proto, args) 351 352#undef DEFINE_EVENT_PRINT 353#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 354 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 355 356#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 357 358/* 359 * remember the offset of each array from the beginning of the event. 360 */ 361 362#undef __entry 363#define __entry entry 364 365#undef __field 366#define __field(type, item) 367 368#undef __field_ext 369#define __field_ext(type, item, filter_type) 370 371#undef __array 372#define __array(type, item, len) 373 374#undef __dynamic_array 375#define __dynamic_array(type, item, len) \ 376 __data_offsets->item = __data_size + \ 377 offsetof(typeof(*entry), __data); \ 378 __data_offsets->item |= (len * sizeof(type)) << 16; \ 379 __data_size += (len) * sizeof(type); 380 381#undef __string 382#define __string(item, src) __dynamic_array(char, item, \ 383 strlen((src) ? (const char *)(src) : "(null)") + 1) 384 385#undef DECLARE_EVENT_CLASS 386#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 387static inline notrace int ftrace_get_offsets_##call( \ 388 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 389{ \ 390 int __data_size = 0; \ 391 struct ftrace_raw_##call __maybe_unused *entry; \ 392 \ 393 tstruct; \ 394 \ 395 return __data_size; \ 396} 397 398#undef DEFINE_EVENT 399#define DEFINE_EVENT(template, name, proto, args) 400 401#undef DEFINE_EVENT_PRINT 402#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 403 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 404 405#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 406 407/* 408 * Stage 4 of the trace events. 409 * 410 * Override the macros in <trace/trace_events.h> to include the following: 411 * 412 * For those macros defined with TRACE_EVENT: 413 * 414 * static struct ftrace_event_call event_<call>; 415 * 416 * static void ftrace_raw_event_<call>(void *__data, proto) 417 * { 418 * struct ftrace_event_file *ftrace_file = __data; 419 * struct ftrace_event_call *event_call = ftrace_file->event_call; 420 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 421 * unsigned long eflags = ftrace_file->flags; 422 * enum event_trigger_type __tt = ETT_NONE; 423 * struct ring_buffer_event *event; 424 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 425 * struct ring_buffer *buffer; 426 * unsigned long irq_flags; 427 * int __data_size; 428 * int pc; 429 * 430 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) { 431 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE) 432 * event_triggers_call(ftrace_file, NULL); 433 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED) 434 * return; 435 * } 436 * 437 * local_save_flags(irq_flags); 438 * pc = preempt_count(); 439 * 440 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 441 * 442 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 443 * event_<call>->event.type, 444 * sizeof(*entry) + __data_size, 445 * irq_flags, pc); 446 * if (!event) 447 * return; 448 * entry = ring_buffer_event_data(event); 449 * 450 * { <assign>; } <-- Here we assign the entries by the __field and 451 * __array macros. 452 * 453 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND) 454 * __tt = event_triggers_call(ftrace_file, entry); 455 * 456 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, 457 * &ftrace_file->flags)) 458 * ring_buffer_discard_commit(buffer, event); 459 * else if (!filter_check_discard(ftrace_file, entry, buffer, event)) 460 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 461 * 462 * if (__tt) 463 * event_triggers_post_call(ftrace_file, __tt); 464 * } 465 * 466 * static struct trace_event ftrace_event_type_<call> = { 467 * .trace = ftrace_raw_output_<call>, <-- stage 2 468 * }; 469 * 470 * static const char print_fmt_<call>[] = <TP_printk>; 471 * 472 * static struct ftrace_event_class __used event_class_<template> = { 473 * .system = "<system>", 474 * .define_fields = ftrace_define_fields_<call>, 475 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 476 * .raw_init = trace_event_raw_init, 477 * .probe = ftrace_raw_event_##call, 478 * .reg = ftrace_event_reg, 479 * }; 480 * 481 * static struct ftrace_event_call event_<call> = { 482 * .name = "<call>", 483 * .class = event_class_<template>, 484 * .event = &ftrace_event_type_<call>, 485 * .print_fmt = print_fmt_<call>, 486 * }; 487 * // its only safe to use pointers when doing linker tricks to 488 * // create an array. 489 * static struct ftrace_event_call __used 490 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 491 * 492 */ 493 494#ifdef CONFIG_PERF_EVENTS 495 496#define _TRACE_PERF_PROTO(call, proto) \ 497 static notrace void \ 498 perf_trace_##call(void *__data, proto); 499 500#define _TRACE_PERF_INIT(call) \ 501 .perf_probe = perf_trace_##call, 502 503#else 504#define _TRACE_PERF_PROTO(call, proto) 505#define _TRACE_PERF_INIT(call) 506#endif /* CONFIG_PERF_EVENTS */ 507 508#undef __entry 509#define __entry entry 510 511#undef __field 512#define __field(type, item) 513 514#undef __array 515#define __array(type, item, len) 516 517#undef __dynamic_array 518#define __dynamic_array(type, item, len) \ 519 __entry->__data_loc_##item = __data_offsets.item; 520 521#undef __string 522#define __string(item, src) __dynamic_array(char, item, -1) \ 523 524#undef __assign_str 525#define __assign_str(dst, src) \ 526 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 527 528#undef TP_fast_assign 529#define TP_fast_assign(args...) args 530 531#undef __perf_addr 532#define __perf_addr(a) (a) 533 534#undef __perf_count 535#define __perf_count(c) (c) 536 537#undef __perf_task 538#define __perf_task(t) (t) 539 540#undef DECLARE_EVENT_CLASS 541#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 542 \ 543static notrace void \ 544ftrace_raw_event_##call(void *__data, proto) \ 545{ \ 546 struct ftrace_event_file *ftrace_file = __data; \ 547 struct ftrace_event_call *event_call = ftrace_file->event_call; \ 548 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 549 struct ring_buffer_event *event; \ 550 struct ftrace_raw_##call *entry; \ 551 struct ring_buffer *buffer; \ 552 unsigned long irq_flags; \ 553 int __data_size; \ 554 int pc; \ 555 \ 556 if (ftrace_trigger_soft_disabled(ftrace_file)) \ 557 return; \ 558 \ 559 local_save_flags(irq_flags); \ 560 pc = preempt_count(); \ 561 \ 562 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 563 \ 564 event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \ 565 event_call->event.type, \ 566 sizeof(*entry) + __data_size, \ 567 irq_flags, pc); \ 568 if (!event) \ 569 return; \ 570 entry = ring_buffer_event_data(event); \ 571 \ 572 tstruct \ 573 \ 574 { assign; } \ 575 \ 576 event_trigger_unlock_commit(ftrace_file, buffer, event, entry, \ 577 irq_flags, pc); \ 578} 579/* 580 * The ftrace_test_probe is compiled out, it is only here as a build time check 581 * to make sure that if the tracepoint handling changes, the ftrace probe will 582 * fail to compile unless it too is updated. 583 */ 584 585#undef DEFINE_EVENT 586#define DEFINE_EVENT(template, call, proto, args) \ 587static inline void ftrace_test_probe_##call(void) \ 588{ \ 589 check_trace_callback_type_##call(ftrace_raw_event_##template); \ 590} 591 592#undef DEFINE_EVENT_PRINT 593#define DEFINE_EVENT_PRINT(template, name, proto, args, print) 594 595#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 596 597#undef __entry 598#define __entry REC 599 600#undef __print_flags 601#undef __print_symbolic 602#undef __print_hex 603#undef __get_dynamic_array 604#undef __get_str 605 606#undef TP_printk 607#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 608 609#undef DECLARE_EVENT_CLASS 610#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 611_TRACE_PERF_PROTO(call, PARAMS(proto)); \ 612static const char print_fmt_##call[] = print; \ 613static struct ftrace_event_class __used __refdata event_class_##call = { \ 614 .system = __stringify(TRACE_SYSTEM), \ 615 .define_fields = ftrace_define_fields_##call, \ 616 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 617 .raw_init = trace_event_raw_init, \ 618 .probe = ftrace_raw_event_##call, \ 619 .reg = ftrace_event_reg, \ 620 _TRACE_PERF_INIT(call) \ 621}; 622 623#undef DEFINE_EVENT 624#define DEFINE_EVENT(template, call, proto, args) \ 625 \ 626static struct ftrace_event_call __used event_##call = { \ 627 .name = #call, \ 628 .class = &event_class_##template, \ 629 .event.funcs = &ftrace_event_type_funcs_##template, \ 630 .print_fmt = print_fmt_##template, \ 631}; \ 632static struct ftrace_event_call __used \ 633__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 634 635#undef DEFINE_EVENT_PRINT 636#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 637 \ 638static const char print_fmt_##call[] = print; \ 639 \ 640static struct ftrace_event_call __used event_##call = { \ 641 .name = #call, \ 642 .class = &event_class_##template, \ 643 .event.funcs = &ftrace_event_type_funcs_##call, \ 644 .print_fmt = print_fmt_##call, \ 645}; \ 646static struct ftrace_event_call __used \ 647__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 648 649#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 650 651 652#ifdef CONFIG_PERF_EVENTS 653 654#undef __entry 655#define __entry entry 656 657#undef __get_dynamic_array 658#define __get_dynamic_array(field) \ 659 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 660 661#undef __get_str 662#define __get_str(field) (char *)__get_dynamic_array(field) 663 664#undef __perf_addr 665#define __perf_addr(a) (__addr = (a)) 666 667#undef __perf_count 668#define __perf_count(c) (__count = (c)) 669 670#undef __perf_task 671#define __perf_task(t) (__task = (t)) 672 673#undef DECLARE_EVENT_CLASS 674#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 675static notrace void \ 676perf_trace_##call(void *__data, proto) \ 677{ \ 678 struct ftrace_event_call *event_call = __data; \ 679 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 680 struct ftrace_raw_##call *entry; \ 681 struct pt_regs __regs; \ 682 u64 __addr = 0, __count = 1; \ 683 struct task_struct *__task = NULL; \ 684 struct hlist_head *head; \ 685 int __entry_size; \ 686 int __data_size; \ 687 int rctx; \ 688 \ 689 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 690 \ 691 head = this_cpu_ptr(event_call->perf_events); \ 692 if (__builtin_constant_p(!__task) && !__task && \ 693 hlist_empty(head)) \ 694 return; \ 695 \ 696 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 697 sizeof(u64)); \ 698 __entry_size -= sizeof(u32); \ 699 \ 700 perf_fetch_caller_regs(&__regs); \ 701 entry = perf_trace_buf_prepare(__entry_size, \ 702 event_call->event.type, &__regs, &rctx); \ 703 if (!entry) \ 704 return; \ 705 \ 706 tstruct \ 707 \ 708 { assign; } \ 709 \ 710 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ 711 __count, &__regs, head, __task); \ 712} 713 714/* 715 * This part is compiled out, it is only here as a build time check 716 * to make sure that if the tracepoint handling changes, the 717 * perf probe will fail to compile unless it too is updated. 718 */ 719#undef DEFINE_EVENT 720#define DEFINE_EVENT(template, call, proto, args) \ 721static inline void perf_test_probe_##call(void) \ 722{ \ 723 check_trace_callback_type_##call(perf_trace_##template); \ 724} 725 726 727#undef DEFINE_EVENT_PRINT 728#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 729 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 730 731#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 732#endif /* CONFIG_PERF_EVENTS */ 733