Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 4dfd459b738cf1f65b3eac4e0a9b19bc93cc91c6 933 lines 26 kB view raw
1/* 2 * Stage 1 of the trace events. 3 * 4 * Override the macros in <trace/trace_events.h> to include the following: 5 * 6 * struct ftrace_raw_<call> { 7 * struct trace_entry ent; 8 * <type> <item>; 9 * <type2> <item2>[<len>]; 10 * [...] 11 * }; 12 * 13 * The <type> <item> is created by the __field(type, item) macro or 14 * the __array(type2, item2, len) macro. 15 * We simply do "type item;", and that will create the fields 16 * in the structure. 17 */ 18 19#include <linux/ftrace_event.h> 20 21/* 22 * DECLARE_EVENT_CLASS can be used to add a generic function 23 * handlers for events. That is, if all events have the same 24 * parameters and just have distinct trace points. 25 * Each tracepoint can be defined with DEFINE_EVENT and that 26 * will map the DECLARE_EVENT_CLASS to the tracepoint. 27 * 28 * TRACE_EVENT is a one to one mapping between tracepoint and template. 29 */ 30#undef TRACE_EVENT 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 32 DECLARE_EVENT_CLASS(name, \ 33 PARAMS(proto), \ 34 PARAMS(args), \ 35 PARAMS(tstruct), \ 36 PARAMS(assign), \ 37 PARAMS(print)); \ 38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 39 40 41#undef __field 42#define __field(type, item) type item; 43 44#undef __field_ext 45#define __field_ext(type, item, filter_type) type item; 46 47#undef __array 48#define __array(type, item, len) type item[len]; 49 50#undef __dynamic_array 51#define __dynamic_array(type, item, len) u32 __data_loc_##item; 52 53#undef __string 54#define __string(item, src) __dynamic_array(char, item, -1) 55 56#undef TP_STRUCT__entry 57#define TP_STRUCT__entry(args...) args 58 59#undef DECLARE_EVENT_CLASS 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 61 struct ftrace_raw_##name { \ 62 struct trace_entry ent; \ 63 tstruct \ 64 char __data[0]; \ 65 }; 66#undef DEFINE_EVENT 67#define DEFINE_EVENT(template, name, proto, args) \ 68 static struct ftrace_event_call event_##name 69 70#undef DEFINE_EVENT_PRINT 71#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 72 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 73 74#undef __cpparg 75#define __cpparg(arg...) arg 76 77/* Callbacks are meaningless to ftrace. */ 78#undef TRACE_EVENT_FN 79#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 80 assign, print, reg, unreg) \ 81 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ 82 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ 83 84#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 85 86 87/* 88 * Stage 2 of the trace events. 89 * 90 * Include the following: 91 * 92 * struct ftrace_data_offsets_<call> { 93 * u32 <item1>; 94 * u32 <item2>; 95 * [...] 96 * }; 97 * 98 * The __dynamic_array() macro will create each u32 <item>, this is 99 * to keep the offset of each array from the beginning of the event. 100 * The size of an array is also encoded, in the higher 16 bits of <item>. 101 */ 102 103#undef __field 104#define __field(type, item) 105 106#undef __field_ext 107#define __field_ext(type, item, filter_type) 108 109#undef __array 110#define __array(type, item, len) 111 112#undef __dynamic_array 113#define __dynamic_array(type, item, len) u32 item; 114 115#undef __string 116#define __string(item, src) __dynamic_array(char, item, -1) 117 118#undef DECLARE_EVENT_CLASS 119#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 120 struct ftrace_data_offsets_##call { \ 121 tstruct; \ 122 }; 123 124#undef DEFINE_EVENT 125#define DEFINE_EVENT(template, name, proto, args) 126 127#undef DEFINE_EVENT_PRINT 128#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 129 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 130 131#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 132 133/* 134 * Setup the showing format of trace point. 135 * 136 * int 137 * ftrace_format_##call(struct trace_seq *s) 138 * { 139 * struct ftrace_raw_##call field; 140 * int ret; 141 * 142 * ret = trace_seq_printf(s, #type " " #item ";" 143 * " offset:%u; size:%u;\n", 144 * offsetof(struct ftrace_raw_##call, item), 145 * sizeof(field.type)); 146 * 147 * } 148 */ 149 150#undef TP_STRUCT__entry 151#define TP_STRUCT__entry(args...) args 152 153#undef __field 154#define __field(type, item) \ 155 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ 156 "offset:%u;\tsize:%u;\tsigned:%u;\n", \ 157 (unsigned int)offsetof(typeof(field), item), \ 158 (unsigned int)sizeof(field.item), \ 159 (unsigned int)is_signed_type(type)); \ 160 if (!ret) \ 161 return 0; 162 163#undef __field_ext 164#define __field_ext(type, item, filter_type) __field(type, item) 165 166#undef __array 167#define __array(type, item, len) \ 168 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ 169 "offset:%u;\tsize:%u;\tsigned:%u;\n", \ 170 (unsigned int)offsetof(typeof(field), item), \ 171 (unsigned int)sizeof(field.item), \ 172 (unsigned int)is_signed_type(type)); \ 173 if (!ret) \ 174 return 0; 175 176#undef __dynamic_array 177#define __dynamic_array(type, item, len) \ 178 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\ 179 "offset:%u;\tsize:%u;\tsigned:%u;\n", \ 180 (unsigned int)offsetof(typeof(field), \ 181 __data_loc_##item), \ 182 (unsigned int)sizeof(field.__data_loc_##item), \ 183 (unsigned int)is_signed_type(type)); \ 184 if (!ret) \ 185 return 0; 186 187#undef __string 188#define __string(item, src) __dynamic_array(char, item, -1) 189 190#undef __entry 191#define __entry REC 192 193#undef __print_symbolic 194#undef __get_dynamic_array 195#undef __get_str 196 197#undef TP_printk 198#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args) 199 200#undef TP_fast_assign 201#define TP_fast_assign(args...) args 202 203#undef TP_perf_assign 204#define TP_perf_assign(args...) 205 206#undef DECLARE_EVENT_CLASS 207#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 208static int \ 209ftrace_format_setup_##call(struct ftrace_event_call *unused, \ 210 struct trace_seq *s) \ 211{ \ 212 struct ftrace_raw_##call field __attribute__((unused)); \ 213 int ret = 0; \ 214 \ 215 tstruct; \ 216 \ 217 return ret; \ 218} \ 219 \ 220static int \ 221ftrace_format_##call(struct ftrace_event_call *unused, \ 222 struct trace_seq *s) \ 223{ \ 224 int ret = 0; \ 225 \ 226 ret = ftrace_format_setup_##call(unused, s); \ 227 if (!ret) \ 228 return ret; \ 229 \ 230 ret = trace_seq_printf(s, "\nprint fmt: " print); \ 231 \ 232 return ret; \ 233} 234 235#undef DEFINE_EVENT 236#define DEFINE_EVENT(template, name, proto, args) 237 238#undef DEFINE_EVENT_PRINT 239#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 240static int \ 241ftrace_format_##name(struct ftrace_event_call *unused, \ 242 struct trace_seq *s) \ 243{ \ 244 int ret = 0; \ 245 \ 246 ret = ftrace_format_setup_##template(unused, s); \ 247 if (!ret) \ 248 return ret; \ 249 \ 250 trace_seq_printf(s, "\nprint fmt: " print); \ 251 \ 252 return ret; \ 253} 254 255#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 256 257/* 258 * Stage 3 of the trace events. 259 * 260 * Override the macros in <trace/trace_events.h> to include the following: 261 * 262 * enum print_line_t 263 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 264 * { 265 * struct trace_seq *s = &iter->seq; 266 * struct ftrace_raw_<call> *field; <-- defined in stage 1 267 * struct trace_entry *entry; 268 * struct trace_seq *p; 269 * int ret; 270 * 271 * entry = iter->ent; 272 * 273 * if (entry->type != event_<call>.id) { 274 * WARN_ON_ONCE(1); 275 * return TRACE_TYPE_UNHANDLED; 276 * } 277 * 278 * field = (typeof(field))entry; 279 * 280 * p = get_cpu_var(ftrace_event_seq); 281 * trace_seq_init(p); 282 * ret = trace_seq_printf(s, <TP_printk> "\n"); 283 * put_cpu(); 284 * if (!ret) 285 * return TRACE_TYPE_PARTIAL_LINE; 286 * 287 * return TRACE_TYPE_HANDLED; 288 * } 289 * 290 * This is the method used to print the raw event to the trace 291 * output format. Note, this is not needed if the data is read 292 * in binary. 293 */ 294 295#undef __entry 296#define __entry field 297 298#undef TP_printk 299#define TP_printk(fmt, args...) fmt "\n", args 300 301#undef __get_dynamic_array 302#define __get_dynamic_array(field) \ 303 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 304 305#undef __get_str 306#define __get_str(field) (char *)__get_dynamic_array(field) 307 308#undef __print_flags 309#define __print_flags(flag, delim, flag_array...) \ 310 ({ \ 311 static const struct trace_print_flags __flags[] = \ 312 { flag_array, { -1, NULL }}; \ 313 ftrace_print_flags_seq(p, delim, flag, __flags); \ 314 }) 315 316#undef __print_symbolic 317#define __print_symbolic(value, symbol_array...) \ 318 ({ \ 319 static const struct trace_print_flags symbols[] = \ 320 { symbol_array, { -1, NULL }}; \ 321 ftrace_print_symbols_seq(p, value, symbols); \ 322 }) 323 324#undef DECLARE_EVENT_CLASS 325#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 326static enum print_line_t \ 327ftrace_raw_output_id_##call(int event_id, const char *name, \ 328 struct trace_iterator *iter, int flags) \ 329{ \ 330 struct trace_seq *s = &iter->seq; \ 331 struct ftrace_raw_##call *field; \ 332 struct trace_entry *entry; \ 333 struct trace_seq *p; \ 334 int ret; \ 335 \ 336 entry = iter->ent; \ 337 \ 338 if (entry->type != event_id) { \ 339 WARN_ON_ONCE(1); \ 340 return TRACE_TYPE_UNHANDLED; \ 341 } \ 342 \ 343 field = (typeof(field))entry; \ 344 \ 345 p = &get_cpu_var(ftrace_event_seq); \ 346 trace_seq_init(p); \ 347 ret = trace_seq_printf(s, "%s: ", name); \ 348 if (ret) \ 349 ret = trace_seq_printf(s, print); \ 350 put_cpu(); \ 351 if (!ret) \ 352 return TRACE_TYPE_PARTIAL_LINE; \ 353 \ 354 return TRACE_TYPE_HANDLED; \ 355} 356 357#undef DEFINE_EVENT 358#define DEFINE_EVENT(template, name, proto, args) \ 359static enum print_line_t \ 360ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ 361{ \ 362 return ftrace_raw_output_id_##template(event_##name.id, \ 363 #name, iter, flags); \ 364} 365 366#undef DEFINE_EVENT_PRINT 367#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 368static enum print_line_t \ 369ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 370{ \ 371 struct trace_seq *s = &iter->seq; \ 372 struct ftrace_raw_##template *field; \ 373 struct trace_entry *entry; \ 374 struct trace_seq *p; \ 375 int ret; \ 376 \ 377 entry = iter->ent; \ 378 \ 379 if (entry->type != event_##call.id) { \ 380 WARN_ON_ONCE(1); \ 381 return TRACE_TYPE_UNHANDLED; \ 382 } \ 383 \ 384 field = (typeof(field))entry; \ 385 \ 386 p = &get_cpu_var(ftrace_event_seq); \ 387 trace_seq_init(p); \ 388 ret = trace_seq_printf(s, "%s: ", #call); \ 389 if (ret) \ 390 ret = trace_seq_printf(s, print); \ 391 put_cpu(); \ 392 if (!ret) \ 393 return TRACE_TYPE_PARTIAL_LINE; \ 394 \ 395 return TRACE_TYPE_HANDLED; \ 396} 397 398#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 399 400#undef __field_ext 401#define __field_ext(type, item, filter_type) \ 402 ret = trace_define_field(event_call, #type, #item, \ 403 offsetof(typeof(field), item), \ 404 sizeof(field.item), \ 405 is_signed_type(type), filter_type); \ 406 if (ret) \ 407 return ret; 408 409#undef __field 410#define __field(type, item) __field_ext(type, item, FILTER_OTHER) 411 412#undef __array 413#define __array(type, item, len) \ 414 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 415 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 416 offsetof(typeof(field), item), \ 417 sizeof(field.item), \ 418 is_signed_type(type), FILTER_OTHER); \ 419 if (ret) \ 420 return ret; 421 422#undef __dynamic_array 423#define __dynamic_array(type, item, len) \ 424 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 425 offsetof(typeof(field), __data_loc_##item), \ 426 sizeof(field.__data_loc_##item), \ 427 is_signed_type(type), FILTER_OTHER); 428 429#undef __string 430#define __string(item, src) __dynamic_array(char, item, -1) 431 432#undef DECLARE_EVENT_CLASS 433#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 434static int \ 435ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 436{ \ 437 struct ftrace_raw_##call field; \ 438 int ret; \ 439 \ 440 tstruct; \ 441 \ 442 return ret; \ 443} 444 445#undef DEFINE_EVENT 446#define DEFINE_EVENT(template, name, proto, args) 447 448#undef DEFINE_EVENT_PRINT 449#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 450 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 451 452#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 453 454/* 455 * remember the offset of each array from the beginning of the event. 456 */ 457 458#undef __entry 459#define __entry entry 460 461#undef __field 462#define __field(type, item) 463 464#undef __field_ext 465#define __field_ext(type, item, filter_type) 466 467#undef __array 468#define __array(type, item, len) 469 470#undef __dynamic_array 471#define __dynamic_array(type, item, len) \ 472 __data_offsets->item = __data_size + \ 473 offsetof(typeof(*entry), __data); \ 474 __data_offsets->item |= (len * sizeof(type)) << 16; \ 475 __data_size += (len) * sizeof(type); 476 477#undef __string 478#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) 479 480#undef DECLARE_EVENT_CLASS 481#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 482static inline int ftrace_get_offsets_##call( \ 483 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 484{ \ 485 int __data_size = 0; \ 486 struct ftrace_raw_##call __maybe_unused *entry; \ 487 \ 488 tstruct; \ 489 \ 490 return __data_size; \ 491} 492 493#undef DEFINE_EVENT 494#define DEFINE_EVENT(template, name, proto, args) 495 496#undef DEFINE_EVENT_PRINT 497#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 498 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 499 500#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 501 502#ifdef CONFIG_EVENT_PROFILE 503 504/* 505 * Generate the functions needed for tracepoint perf_event support. 506 * 507 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 508 * 509 * static int ftrace_profile_enable_<call>(void) 510 * { 511 * return register_trace_<call>(ftrace_profile_<call>); 512 * } 513 * 514 * static void ftrace_profile_disable_<call>(void) 515 * { 516 * unregister_trace_<call>(ftrace_profile_<call>); 517 * } 518 * 519 */ 520 521#undef DECLARE_EVENT_CLASS 522#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) 523 524#undef DEFINE_EVENT 525#define DEFINE_EVENT(template, name, proto, args) \ 526 \ 527static void ftrace_profile_##name(proto); \ 528 \ 529static int ftrace_profile_enable_##name(struct ftrace_event_call *unused)\ 530{ \ 531 return register_trace_##name(ftrace_profile_##name); \ 532} \ 533 \ 534static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\ 535{ \ 536 unregister_trace_##name(ftrace_profile_##name); \ 537} 538 539#undef DEFINE_EVENT_PRINT 540#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 541 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 542 543#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 544 545#endif 546 547/* 548 * Stage 4 of the trace events. 549 * 550 * Override the macros in <trace/trace_events.h> to include the following: 551 * 552 * static void ftrace_event_<call>(proto) 553 * { 554 * event_trace_printk(_RET_IP_, "<call>: " <fmt>); 555 * } 556 * 557 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) 558 * { 559 * return register_trace_<call>(ftrace_event_<call>); 560 * } 561 * 562 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 563 * { 564 * unregister_trace_<call>(ftrace_event_<call>); 565 * } 566 * 567 * 568 * For those macros defined with TRACE_EVENT: 569 * 570 * static struct ftrace_event_call event_<call>; 571 * 572 * static void ftrace_raw_event_<call>(proto) 573 * { 574 * struct ring_buffer_event *event; 575 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 576 * struct ring_buffer *buffer; 577 * unsigned long irq_flags; 578 * int pc; 579 * 580 * local_save_flags(irq_flags); 581 * pc = preempt_count(); 582 * 583 * event = trace_current_buffer_lock_reserve(&buffer, 584 * event_<call>.id, 585 * sizeof(struct ftrace_raw_<call>), 586 * irq_flags, pc); 587 * if (!event) 588 * return; 589 * entry = ring_buffer_event_data(event); 590 * 591 * <assign>; <-- Here we assign the entries by the __field and 592 * __array macros. 593 * 594 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 595 * } 596 * 597 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) 598 * { 599 * int ret; 600 * 601 * ret = register_trace_<call>(ftrace_raw_event_<call>); 602 * if (!ret) 603 * pr_info("event trace: Could not activate trace point " 604 * "probe to <call>"); 605 * return ret; 606 * } 607 * 608 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 609 * { 610 * unregister_trace_<call>(ftrace_raw_event_<call>); 611 * } 612 * 613 * static struct trace_event ftrace_event_type_<call> = { 614 * .trace = ftrace_raw_output_<call>, <-- stage 2 615 * }; 616 * 617 * static struct ftrace_event_call __used 618 * __attribute__((__aligned__(4))) 619 * __attribute__((section("_ftrace_events"))) event_<call> = { 620 * .name = "<call>", 621 * .system = "<system>", 622 * .raw_init = trace_event_raw_init, 623 * .regfunc = ftrace_reg_event_<call>, 624 * .unregfunc = ftrace_unreg_event_<call>, 625 * .show_format = ftrace_format_<call>, 626 * } 627 * 628 */ 629 630#ifdef CONFIG_EVENT_PROFILE 631 632#define _TRACE_PROFILE_INIT(call) \ 633 .profile_enable = ftrace_profile_enable_##call, \ 634 .profile_disable = ftrace_profile_disable_##call, 635 636#else 637#define _TRACE_PROFILE_INIT(call) 638#endif 639 640#undef __entry 641#define __entry entry 642 643#undef __field 644#define __field(type, item) 645 646#undef __array 647#define __array(type, item, len) 648 649#undef __dynamic_array 650#define __dynamic_array(type, item, len) \ 651 __entry->__data_loc_##item = __data_offsets.item; 652 653#undef __string 654#define __string(item, src) __dynamic_array(char, item, -1) \ 655 656#undef __assign_str 657#define __assign_str(dst, src) \ 658 strcpy(__get_str(dst), src); 659 660#undef DECLARE_EVENT_CLASS 661#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 662 \ 663static void ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 664 proto) \ 665{ \ 666 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 667 struct ring_buffer_event *event; \ 668 struct ftrace_raw_##call *entry; \ 669 struct ring_buffer *buffer; \ 670 unsigned long irq_flags; \ 671 int __data_size; \ 672 int pc; \ 673 \ 674 local_save_flags(irq_flags); \ 675 pc = preempt_count(); \ 676 \ 677 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 678 \ 679 event = trace_current_buffer_lock_reserve(&buffer, \ 680 event_call->id, \ 681 sizeof(*entry) + __data_size, \ 682 irq_flags, pc); \ 683 if (!event) \ 684 return; \ 685 entry = ring_buffer_event_data(event); \ 686 \ 687 \ 688 tstruct \ 689 \ 690 { assign; } \ 691 \ 692 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 693 trace_nowake_buffer_unlock_commit(buffer, \ 694 event, irq_flags, pc); \ 695} 696 697#undef DEFINE_EVENT 698#define DEFINE_EVENT(template, call, proto, args) \ 699 \ 700static void ftrace_raw_event_##call(proto) \ 701{ \ 702 ftrace_raw_event_id_##template(&event_##call, args); \ 703} \ 704 \ 705static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ 706{ \ 707 return register_trace_##call(ftrace_raw_event_##call); \ 708} \ 709 \ 710static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ 711{ \ 712 unregister_trace_##call(ftrace_raw_event_##call); \ 713} \ 714 \ 715static struct trace_event ftrace_event_type_##call = { \ 716 .trace = ftrace_raw_output_##call, \ 717}; 718 719#undef DEFINE_EVENT_PRINT 720#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 721 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 722 723#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 724 725#undef DECLARE_EVENT_CLASS 726#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) 727 728#undef DEFINE_EVENT 729#define DEFINE_EVENT(template, call, proto, args) \ 730 \ 731static struct ftrace_event_call __used \ 732__attribute__((__aligned__(4))) \ 733__attribute__((section("_ftrace_events"))) event_##call = { \ 734 .name = #call, \ 735 .system = __stringify(TRACE_SYSTEM), \ 736 .event = &ftrace_event_type_##call, \ 737 .raw_init = trace_event_raw_init, \ 738 .regfunc = ftrace_raw_reg_event_##call, \ 739 .unregfunc = ftrace_raw_unreg_event_##call, \ 740 .show_format = ftrace_format_##template, \ 741 .define_fields = ftrace_define_fields_##template, \ 742 _TRACE_PROFILE_INIT(call) \ 743} 744 745#undef DEFINE_EVENT_PRINT 746#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 747 \ 748static struct ftrace_event_call __used \ 749__attribute__((__aligned__(4))) \ 750__attribute__((section("_ftrace_events"))) event_##call = { \ 751 .name = #call, \ 752 .system = __stringify(TRACE_SYSTEM), \ 753 .event = &ftrace_event_type_##call, \ 754 .raw_init = trace_event_raw_init, \ 755 .regfunc = ftrace_raw_reg_event_##call, \ 756 .unregfunc = ftrace_raw_unreg_event_##call, \ 757 .show_format = ftrace_format_##call, \ 758 .define_fields = ftrace_define_fields_##template, \ 759 _TRACE_PROFILE_INIT(call) \ 760} 761 762#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 763 764/* 765 * Define the insertion callback to profile events 766 * 767 * The job is very similar to ftrace_raw_event_<call> except that we don't 768 * insert in the ring buffer but in a perf counter. 769 * 770 * static void ftrace_profile_<call>(proto) 771 * { 772 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 773 * struct ftrace_event_call *event_call = &event_<call>; 774 * extern void perf_tp_event(int, u64, u64, void *, int); 775 * struct ftrace_raw_##call *entry; 776 * struct perf_trace_buf *trace_buf; 777 * u64 __addr = 0, __count = 1; 778 * unsigned long irq_flags; 779 * struct trace_entry *ent; 780 * int __entry_size; 781 * int __data_size; 782 * int __cpu 783 * int pc; 784 * 785 * pc = preempt_count(); 786 * 787 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 788 * 789 * // Below we want to get the aligned size by taking into account 790 * // the u32 field that will later store the buffer size 791 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), 792 * sizeof(u64)); 793 * __entry_size -= sizeof(u32); 794 * 795 * // Protect the non nmi buffer 796 * // This also protects the rcu read side 797 * local_irq_save(irq_flags); 798 * __cpu = smp_processor_id(); 799 * 800 * if (in_nmi()) 801 * trace_buf = rcu_dereference(perf_trace_buf_nmi); 802 * else 803 * trace_buf = rcu_dereference(perf_trace_buf); 804 * 805 * if (!trace_buf) 806 * goto end; 807 * 808 * trace_buf = per_cpu_ptr(trace_buf, __cpu); 809 * 810 * // Avoid recursion from perf that could mess up the buffer 811 * if (trace_buf->recursion++) 812 * goto end_recursion; 813 * 814 * raw_data = trace_buf->buf; 815 * 816 * // Make recursion update visible before entering perf_tp_event 817 * // so that we protect from perf recursions. 818 * 819 * barrier(); 820 * 821 * //zero dead bytes from alignment to avoid stack leak to userspace: 822 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 823 * entry = (struct ftrace_raw_<call> *)raw_data; 824 * ent = &entry->ent; 825 * tracing_generic_entry_update(ent, irq_flags, pc); 826 * ent->type = event_call->id; 827 * 828 * <tstruct> <- do some jobs with dynamic arrays 829 * 830 * <assign> <- affect our values 831 * 832 * perf_tp_event(event_call->id, __addr, __count, entry, 833 * __entry_size); <- submit them to perf counter 834 * 835 * } 836 */ 837 838#ifdef CONFIG_EVENT_PROFILE 839 840#undef __perf_addr 841#define __perf_addr(a) __addr = (a) 842 843#undef __perf_count 844#define __perf_count(c) __count = (c) 845 846#undef DECLARE_EVENT_CLASS 847#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 848static void \ 849ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 850 proto) \ 851{ \ 852 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 853 extern int perf_swevent_get_recursion_context(void); \ 854 extern void perf_swevent_put_recursion_context(int rctx); \ 855 extern void perf_tp_event(int, u64, u64, void *, int); \ 856 struct ftrace_raw_##call *entry; \ 857 u64 __addr = 0, __count = 1; \ 858 unsigned long irq_flags; \ 859 struct trace_entry *ent; \ 860 int __entry_size; \ 861 int __data_size; \ 862 char *trace_buf; \ 863 char *raw_data; \ 864 int __cpu; \ 865 int rctx; \ 866 int pc; \ 867 \ 868 pc = preempt_count(); \ 869 \ 870 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 871 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 872 sizeof(u64)); \ 873 __entry_size -= sizeof(u32); \ 874 \ 875 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 876 "profile buffer not large enough")) \ 877 return; \ 878 \ 879 local_irq_save(irq_flags); \ 880 \ 881 rctx = perf_swevent_get_recursion_context(); \ 882 if (rctx < 0) \ 883 goto end_recursion; \ 884 \ 885 __cpu = smp_processor_id(); \ 886 \ 887 if (in_nmi()) \ 888 trace_buf = rcu_dereference(perf_trace_buf_nmi); \ 889 else \ 890 trace_buf = rcu_dereference(perf_trace_buf); \ 891 \ 892 if (!trace_buf) \ 893 goto end; \ 894 \ 895 raw_data = per_cpu_ptr(trace_buf, __cpu); \ 896 \ 897 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 898 entry = (struct ftrace_raw_##call *)raw_data; \ 899 ent = &entry->ent; \ 900 tracing_generic_entry_update(ent, irq_flags, pc); \ 901 ent->type = event_call->id; \ 902 \ 903 tstruct \ 904 \ 905 { assign; } \ 906 \ 907 perf_tp_event(event_call->id, __addr, __count, entry, \ 908 __entry_size); \ 909 \ 910end: \ 911 perf_swevent_put_recursion_context(rctx); \ 912end_recursion: \ 913 local_irq_restore(irq_flags); \ 914} 915 916#undef DEFINE_EVENT 917#define DEFINE_EVENT(template, call, proto, args) \ 918static void ftrace_profile_##call(proto) \ 919{ \ 920 struct ftrace_event_call *event_call = &event_##call; \ 921 \ 922 ftrace_profile_templ_##template(event_call, args); \ 923} 924 925#undef DEFINE_EVENT_PRINT 926#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 927 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 928 929#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 930#endif /* CONFIG_EVENT_PROFILE */ 931 932#undef _TRACE_PROFILE_INIT 933