Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34-rc1 808 lines 22 kB view raw
1/* 2 * Stage 1 of the trace events. 3 * 4 * Override the macros in <trace/trace_events.h> to include the following: 5 * 6 * struct ftrace_raw_<call> { 7 * struct trace_entry ent; 8 * <type> <item>; 9 * <type2> <item2>[<len>]; 10 * [...] 11 * }; 12 * 13 * The <type> <item> is created by the __field(type, item) macro or 14 * the __array(type2, item2, len) macro. 15 * We simply do "type item;", and that will create the fields 16 * in the structure. 17 */ 18 19#include <linux/ftrace_event.h> 20 21/* 22 * DECLARE_EVENT_CLASS can be used to add a generic function 23 * handlers for events. That is, if all events have the same 24 * parameters and just have distinct trace points. 25 * Each tracepoint can be defined with DEFINE_EVENT and that 26 * will map the DECLARE_EVENT_CLASS to the tracepoint. 27 * 28 * TRACE_EVENT is a one to one mapping between tracepoint and template. 29 */ 30#undef TRACE_EVENT 31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 32 DECLARE_EVENT_CLASS(name, \ 33 PARAMS(proto), \ 34 PARAMS(args), \ 35 PARAMS(tstruct), \ 36 PARAMS(assign), \ 37 PARAMS(print)); \ 38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); 39 40 41#undef __field 42#define __field(type, item) type item; 43 44#undef __field_ext 45#define __field_ext(type, item, filter_type) type item; 46 47#undef __array 48#define __array(type, item, len) type item[len]; 49 50#undef __dynamic_array 51#define __dynamic_array(type, item, len) u32 __data_loc_##item; 52 53#undef __string 54#define __string(item, src) __dynamic_array(char, item, -1) 55 56#undef TP_STRUCT__entry 57#define TP_STRUCT__entry(args...) args 58 59#undef DECLARE_EVENT_CLASS 60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 61 struct ftrace_raw_##name { \ 62 struct trace_entry ent; \ 63 tstruct \ 64 char __data[0]; \ 65 }; 66#undef DEFINE_EVENT 67#define DEFINE_EVENT(template, name, proto, args) \ 68 static struct ftrace_event_call \ 69 __attribute__((__aligned__(4))) event_##name 70 71#undef DEFINE_EVENT_PRINT 72#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 73 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 74 75#undef __cpparg 76#define __cpparg(arg...) arg 77 78/* Callbacks are meaningless to ftrace. */ 79#undef TRACE_EVENT_FN 80#define TRACE_EVENT_FN(name, proto, args, tstruct, \ 81 assign, print, reg, unreg) \ 82 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \ 83 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \ 84 85#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 86 87 88/* 89 * Stage 2 of the trace events. 90 * 91 * Include the following: 92 * 93 * struct ftrace_data_offsets_<call> { 94 * u32 <item1>; 95 * u32 <item2>; 96 * [...] 97 * }; 98 * 99 * The __dynamic_array() macro will create each u32 <item>, this is 100 * to keep the offset of each array from the beginning of the event. 101 * The size of an array is also encoded, in the higher 16 bits of <item>. 102 */ 103 104#undef __field 105#define __field(type, item) 106 107#undef __field_ext 108#define __field_ext(type, item, filter_type) 109 110#undef __array 111#define __array(type, item, len) 112 113#undef __dynamic_array 114#define __dynamic_array(type, item, len) u32 item; 115 116#undef __string 117#define __string(item, src) __dynamic_array(char, item, -1) 118 119#undef DECLARE_EVENT_CLASS 120#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 121 struct ftrace_data_offsets_##call { \ 122 tstruct; \ 123 }; 124 125#undef DEFINE_EVENT 126#define DEFINE_EVENT(template, name, proto, args) 127 128#undef DEFINE_EVENT_PRINT 129#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 130 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 131 132#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 133 134/* 135 * Stage 3 of the trace events. 136 * 137 * Override the macros in <trace/trace_events.h> to include the following: 138 * 139 * enum print_line_t 140 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 141 * { 142 * struct trace_seq *s = &iter->seq; 143 * struct ftrace_raw_<call> *field; <-- defined in stage 1 144 * struct trace_entry *entry; 145 * struct trace_seq *p; 146 * int ret; 147 * 148 * entry = iter->ent; 149 * 150 * if (entry->type != event_<call>.id) { 151 * WARN_ON_ONCE(1); 152 * return TRACE_TYPE_UNHANDLED; 153 * } 154 * 155 * field = (typeof(field))entry; 156 * 157 * p = get_cpu_var(ftrace_event_seq); 158 * trace_seq_init(p); 159 * ret = trace_seq_printf(s, <TP_printk> "\n"); 160 * put_cpu(); 161 * if (!ret) 162 * return TRACE_TYPE_PARTIAL_LINE; 163 * 164 * return TRACE_TYPE_HANDLED; 165 * } 166 * 167 * This is the method used to print the raw event to the trace 168 * output format. Note, this is not needed if the data is read 169 * in binary. 170 */ 171 172#undef __entry 173#define __entry field 174 175#undef TP_printk 176#define TP_printk(fmt, args...) fmt "\n", args 177 178#undef __get_dynamic_array 179#define __get_dynamic_array(field) \ 180 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 181 182#undef __get_str 183#define __get_str(field) (char *)__get_dynamic_array(field) 184 185#undef __print_flags 186#define __print_flags(flag, delim, flag_array...) \ 187 ({ \ 188 static const struct trace_print_flags __flags[] = \ 189 { flag_array, { -1, NULL }}; \ 190 ftrace_print_flags_seq(p, delim, flag, __flags); \ 191 }) 192 193#undef __print_symbolic 194#define __print_symbolic(value, symbol_array...) \ 195 ({ \ 196 static const struct trace_print_flags symbols[] = \ 197 { symbol_array, { -1, NULL }}; \ 198 ftrace_print_symbols_seq(p, value, symbols); \ 199 }) 200 201#undef DECLARE_EVENT_CLASS 202#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 203static notrace enum print_line_t \ 204ftrace_raw_output_id_##call(int event_id, const char *name, \ 205 struct trace_iterator *iter, int flags) \ 206{ \ 207 struct trace_seq *s = &iter->seq; \ 208 struct ftrace_raw_##call *field; \ 209 struct trace_entry *entry; \ 210 struct trace_seq *p; \ 211 int ret; \ 212 \ 213 entry = iter->ent; \ 214 \ 215 if (entry->type != event_id) { \ 216 WARN_ON_ONCE(1); \ 217 return TRACE_TYPE_UNHANDLED; \ 218 } \ 219 \ 220 field = (typeof(field))entry; \ 221 \ 222 p = &get_cpu_var(ftrace_event_seq); \ 223 trace_seq_init(p); \ 224 ret = trace_seq_printf(s, "%s: ", name); \ 225 if (ret) \ 226 ret = trace_seq_printf(s, print); \ 227 put_cpu(); \ 228 if (!ret) \ 229 return TRACE_TYPE_PARTIAL_LINE; \ 230 \ 231 return TRACE_TYPE_HANDLED; \ 232} 233 234#undef DEFINE_EVENT 235#define DEFINE_EVENT(template, name, proto, args) \ 236static notrace enum print_line_t \ 237ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ 238{ \ 239 return ftrace_raw_output_id_##template(event_##name.id, \ 240 #name, iter, flags); \ 241} 242 243#undef DEFINE_EVENT_PRINT 244#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 245static notrace enum print_line_t \ 246ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ 247{ \ 248 struct trace_seq *s = &iter->seq; \ 249 struct ftrace_raw_##template *field; \ 250 struct trace_entry *entry; \ 251 struct trace_seq *p; \ 252 int ret; \ 253 \ 254 entry = iter->ent; \ 255 \ 256 if (entry->type != event_##call.id) { \ 257 WARN_ON_ONCE(1); \ 258 return TRACE_TYPE_UNHANDLED; \ 259 } \ 260 \ 261 field = (typeof(field))entry; \ 262 \ 263 p = &get_cpu_var(ftrace_event_seq); \ 264 trace_seq_init(p); \ 265 ret = trace_seq_printf(s, "%s: ", #call); \ 266 if (ret) \ 267 ret = trace_seq_printf(s, print); \ 268 put_cpu(); \ 269 if (!ret) \ 270 return TRACE_TYPE_PARTIAL_LINE; \ 271 \ 272 return TRACE_TYPE_HANDLED; \ 273} 274 275#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 276 277#undef __field_ext 278#define __field_ext(type, item, filter_type) \ 279 ret = trace_define_field(event_call, #type, #item, \ 280 offsetof(typeof(field), item), \ 281 sizeof(field.item), \ 282 is_signed_type(type), filter_type); \ 283 if (ret) \ 284 return ret; 285 286#undef __field 287#define __field(type, item) __field_ext(type, item, FILTER_OTHER) 288 289#undef __array 290#define __array(type, item, len) \ 291 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 292 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 293 offsetof(typeof(field), item), \ 294 sizeof(field.item), \ 295 is_signed_type(type), FILTER_OTHER); \ 296 if (ret) \ 297 return ret; 298 299#undef __dynamic_array 300#define __dynamic_array(type, item, len) \ 301 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 302 offsetof(typeof(field), __data_loc_##item), \ 303 sizeof(field.__data_loc_##item), \ 304 is_signed_type(type), FILTER_OTHER); 305 306#undef __string 307#define __string(item, src) __dynamic_array(char, item, -1) 308 309#undef DECLARE_EVENT_CLASS 310#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 311static int notrace \ 312ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ 313{ \ 314 struct ftrace_raw_##call field; \ 315 int ret; \ 316 \ 317 tstruct; \ 318 \ 319 return ret; \ 320} 321 322#undef DEFINE_EVENT 323#define DEFINE_EVENT(template, name, proto, args) 324 325#undef DEFINE_EVENT_PRINT 326#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 327 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 328 329#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 330 331/* 332 * remember the offset of each array from the beginning of the event. 333 */ 334 335#undef __entry 336#define __entry entry 337 338#undef __field 339#define __field(type, item) 340 341#undef __field_ext 342#define __field_ext(type, item, filter_type) 343 344#undef __array 345#define __array(type, item, len) 346 347#undef __dynamic_array 348#define __dynamic_array(type, item, len) \ 349 __data_offsets->item = __data_size + \ 350 offsetof(typeof(*entry), __data); \ 351 __data_offsets->item |= (len * sizeof(type)) << 16; \ 352 __data_size += (len) * sizeof(type); 353 354#undef __string 355#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) 356 357#undef DECLARE_EVENT_CLASS 358#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 359static inline notrace int ftrace_get_offsets_##call( \ 360 struct ftrace_data_offsets_##call *__data_offsets, proto) \ 361{ \ 362 int __data_size = 0; \ 363 struct ftrace_raw_##call __maybe_unused *entry; \ 364 \ 365 tstruct; \ 366 \ 367 return __data_size; \ 368} 369 370#undef DEFINE_EVENT 371#define DEFINE_EVENT(template, name, proto, args) 372 373#undef DEFINE_EVENT_PRINT 374#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 375 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 376 377#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 378 379#ifdef CONFIG_PERF_EVENTS 380 381/* 382 * Generate the functions needed for tracepoint perf_event support. 383 * 384 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 385 * 386 * static int ftrace_profile_enable_<call>(void) 387 * { 388 * return register_trace_<call>(ftrace_profile_<call>); 389 * } 390 * 391 * static void ftrace_profile_disable_<call>(void) 392 * { 393 * unregister_trace_<call>(ftrace_profile_<call>); 394 * } 395 * 396 */ 397 398#undef DECLARE_EVENT_CLASS 399#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) 400 401#undef DEFINE_EVENT 402#define DEFINE_EVENT(template, name, proto, args) \ 403 \ 404static void ftrace_profile_##name(proto); \ 405 \ 406static notrace int \ 407ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ 408{ \ 409 return register_trace_##name(ftrace_profile_##name); \ 410} \ 411 \ 412static notrace void \ 413ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ 414{ \ 415 unregister_trace_##name(ftrace_profile_##name); \ 416} 417 418#undef DEFINE_EVENT_PRINT 419#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 420 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 421 422#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 423 424#endif /* CONFIG_PERF_EVENTS */ 425 426/* 427 * Stage 4 of the trace events. 428 * 429 * Override the macros in <trace/trace_events.h> to include the following: 430 * 431 * static void ftrace_event_<call>(proto) 432 * { 433 * event_trace_printk(_RET_IP_, "<call>: " <fmt>); 434 * } 435 * 436 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) 437 * { 438 * return register_trace_<call>(ftrace_event_<call>); 439 * } 440 * 441 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 442 * { 443 * unregister_trace_<call>(ftrace_event_<call>); 444 * } 445 * 446 * 447 * For those macros defined with TRACE_EVENT: 448 * 449 * static struct ftrace_event_call event_<call>; 450 * 451 * static void ftrace_raw_event_<call>(proto) 452 * { 453 * struct ring_buffer_event *event; 454 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 455 * struct ring_buffer *buffer; 456 * unsigned long irq_flags; 457 * int pc; 458 * 459 * local_save_flags(irq_flags); 460 * pc = preempt_count(); 461 * 462 * event = trace_current_buffer_lock_reserve(&buffer, 463 * event_<call>.id, 464 * sizeof(struct ftrace_raw_<call>), 465 * irq_flags, pc); 466 * if (!event) 467 * return; 468 * entry = ring_buffer_event_data(event); 469 * 470 * <assign>; <-- Here we assign the entries by the __field and 471 * __array macros. 472 * 473 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); 474 * } 475 * 476 * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) 477 * { 478 * int ret; 479 * 480 * ret = register_trace_<call>(ftrace_raw_event_<call>); 481 * if (!ret) 482 * pr_info("event trace: Could not activate trace point " 483 * "probe to <call>"); 484 * return ret; 485 * } 486 * 487 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) 488 * { 489 * unregister_trace_<call>(ftrace_raw_event_<call>); 490 * } 491 * 492 * static struct trace_event ftrace_event_type_<call> = { 493 * .trace = ftrace_raw_output_<call>, <-- stage 2 494 * }; 495 * 496 * static struct ftrace_event_call __used 497 * __attribute__((__aligned__(4))) 498 * __attribute__((section("_ftrace_events"))) event_<call> = { 499 * .name = "<call>", 500 * .system = "<system>", 501 * .raw_init = trace_event_raw_init, 502 * .regfunc = ftrace_reg_event_<call>, 503 * .unregfunc = ftrace_unreg_event_<call>, 504 * } 505 * 506 */ 507 508#ifdef CONFIG_PERF_EVENTS 509 510#define _TRACE_PROFILE_INIT(call) \ 511 .profile_enable = ftrace_profile_enable_##call, \ 512 .profile_disable = ftrace_profile_disable_##call, 513 514#else 515#define _TRACE_PROFILE_INIT(call) 516#endif /* CONFIG_PERF_EVENTS */ 517 518#undef __entry 519#define __entry entry 520 521#undef __field 522#define __field(type, item) 523 524#undef __array 525#define __array(type, item, len) 526 527#undef __dynamic_array 528#define __dynamic_array(type, item, len) \ 529 __entry->__data_loc_##item = __data_offsets.item; 530 531#undef __string 532#define __string(item, src) __dynamic_array(char, item, -1) \ 533 534#undef __assign_str 535#define __assign_str(dst, src) \ 536 strcpy(__get_str(dst), src); 537 538#undef TP_fast_assign 539#define TP_fast_assign(args...) args 540 541#undef TP_perf_assign 542#define TP_perf_assign(args...) 543 544#undef DECLARE_EVENT_CLASS 545#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 546 \ 547static notrace void \ 548ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ 549 proto) \ 550{ \ 551 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 552 struct ring_buffer_event *event; \ 553 struct ftrace_raw_##call *entry; \ 554 struct ring_buffer *buffer; \ 555 unsigned long irq_flags; \ 556 int __data_size; \ 557 int pc; \ 558 \ 559 local_save_flags(irq_flags); \ 560 pc = preempt_count(); \ 561 \ 562 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 563 \ 564 event = trace_current_buffer_lock_reserve(&buffer, \ 565 event_call->id, \ 566 sizeof(*entry) + __data_size, \ 567 irq_flags, pc); \ 568 if (!event) \ 569 return; \ 570 entry = ring_buffer_event_data(event); \ 571 \ 572 \ 573 tstruct \ 574 \ 575 { assign; } \ 576 \ 577 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 578 trace_nowake_buffer_unlock_commit(buffer, \ 579 event, irq_flags, pc); \ 580} 581 582#undef DEFINE_EVENT 583#define DEFINE_EVENT(template, call, proto, args) \ 584 \ 585static notrace void ftrace_raw_event_##call(proto) \ 586{ \ 587 ftrace_raw_event_id_##template(&event_##call, args); \ 588} \ 589 \ 590static notrace int \ 591ftrace_raw_reg_event_##call(struct ftrace_event_call *unused) \ 592{ \ 593 return register_trace_##call(ftrace_raw_event_##call); \ 594} \ 595 \ 596static notrace void \ 597ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused) \ 598{ \ 599 unregister_trace_##call(ftrace_raw_event_##call); \ 600} \ 601 \ 602static struct trace_event ftrace_event_type_##call = { \ 603 .trace = ftrace_raw_output_##call, \ 604}; 605 606#undef DEFINE_EVENT_PRINT 607#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 608 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 609 610#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 611 612#undef __entry 613#define __entry REC 614 615#undef __print_flags 616#undef __print_symbolic 617#undef __get_dynamic_array 618#undef __get_str 619 620#undef TP_printk 621#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 622 623#undef DECLARE_EVENT_CLASS 624#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 625static const char print_fmt_##call[] = print; 626 627#undef DEFINE_EVENT 628#define DEFINE_EVENT(template, call, proto, args) \ 629 \ 630static struct ftrace_event_call __used \ 631__attribute__((__aligned__(4))) \ 632__attribute__((section("_ftrace_events"))) event_##call = { \ 633 .name = #call, \ 634 .system = __stringify(TRACE_SYSTEM), \ 635 .event = &ftrace_event_type_##call, \ 636 .raw_init = trace_event_raw_init, \ 637 .regfunc = ftrace_raw_reg_event_##call, \ 638 .unregfunc = ftrace_raw_unreg_event_##call, \ 639 .print_fmt = print_fmt_##template, \ 640 .define_fields = ftrace_define_fields_##template, \ 641 _TRACE_PROFILE_INIT(call) \ 642} 643 644#undef DEFINE_EVENT_PRINT 645#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 646 \ 647static const char print_fmt_##call[] = print; \ 648 \ 649static struct ftrace_event_call __used \ 650__attribute__((__aligned__(4))) \ 651__attribute__((section("_ftrace_events"))) event_##call = { \ 652 .name = #call, \ 653 .system = __stringify(TRACE_SYSTEM), \ 654 .event = &ftrace_event_type_##call, \ 655 .raw_init = trace_event_raw_init, \ 656 .regfunc = ftrace_raw_reg_event_##call, \ 657 .unregfunc = ftrace_raw_unreg_event_##call, \ 658 .print_fmt = print_fmt_##call, \ 659 .define_fields = ftrace_define_fields_##template, \ 660 _TRACE_PROFILE_INIT(call) \ 661} 662 663#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 664 665/* 666 * Define the insertion callback to profile events 667 * 668 * The job is very similar to ftrace_raw_event_<call> except that we don't 669 * insert in the ring buffer but in a perf counter. 670 * 671 * static void ftrace_profile_<call>(proto) 672 * { 673 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 674 * struct ftrace_event_call *event_call = &event_<call>; 675 * extern void perf_tp_event(int, u64, u64, void *, int); 676 * struct ftrace_raw_##call *entry; 677 * struct perf_trace_buf *trace_buf; 678 * u64 __addr = 0, __count = 1; 679 * unsigned long irq_flags; 680 * struct trace_entry *ent; 681 * int __entry_size; 682 * int __data_size; 683 * int __cpu 684 * int pc; 685 * 686 * pc = preempt_count(); 687 * 688 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); 689 * 690 * // Below we want to get the aligned size by taking into account 691 * // the u32 field that will later store the buffer size 692 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), 693 * sizeof(u64)); 694 * __entry_size -= sizeof(u32); 695 * 696 * // Protect the non nmi buffer 697 * // This also protects the rcu read side 698 * local_irq_save(irq_flags); 699 * __cpu = smp_processor_id(); 700 * 701 * if (in_nmi()) 702 * trace_buf = rcu_dereference(perf_trace_buf_nmi); 703 * else 704 * trace_buf = rcu_dereference(perf_trace_buf); 705 * 706 * if (!trace_buf) 707 * goto end; 708 * 709 * trace_buf = per_cpu_ptr(trace_buf, __cpu); 710 * 711 * // Avoid recursion from perf that could mess up the buffer 712 * if (trace_buf->recursion++) 713 * goto end_recursion; 714 * 715 * raw_data = trace_buf->buf; 716 * 717 * // Make recursion update visible before entering perf_tp_event 718 * // so that we protect from perf recursions. 719 * 720 * barrier(); 721 * 722 * //zero dead bytes from alignment to avoid stack leak to userspace: 723 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; 724 * entry = (struct ftrace_raw_<call> *)raw_data; 725 * ent = &entry->ent; 726 * tracing_generic_entry_update(ent, irq_flags, pc); 727 * ent->type = event_call->id; 728 * 729 * <tstruct> <- do some jobs with dynamic arrays 730 * 731 * <assign> <- affect our values 732 * 733 * perf_tp_event(event_call->id, __addr, __count, entry, 734 * __entry_size); <- submit them to perf counter 735 * 736 * } 737 */ 738 739#ifdef CONFIG_PERF_EVENTS 740 741#undef __entry 742#define __entry entry 743 744#undef __get_dynamic_array 745#define __get_dynamic_array(field) \ 746 ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 747 748#undef __get_str 749#define __get_str(field) (char *)__get_dynamic_array(field) 750 751#undef __perf_addr 752#define __perf_addr(a) __addr = (a) 753 754#undef __perf_count 755#define __perf_count(c) __count = (c) 756 757#undef DECLARE_EVENT_CLASS 758#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 759static notrace void \ 760ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ 761 proto) \ 762{ \ 763 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 764 struct ftrace_raw_##call *entry; \ 765 u64 __addr = 0, __count = 1; \ 766 unsigned long irq_flags; \ 767 int __entry_size; \ 768 int __data_size; \ 769 int rctx; \ 770 \ 771 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 772 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ 773 sizeof(u64)); \ 774 __entry_size -= sizeof(u32); \ 775 \ 776 if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ 777 "profile buffer not large enough")) \ 778 return; \ 779 entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ 780 __entry_size, event_call->id, &rctx, &irq_flags); \ 781 if (!entry) \ 782 return; \ 783 tstruct \ 784 \ 785 { assign; } \ 786 \ 787 ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ 788 __count, irq_flags); \ 789} 790 791#undef DEFINE_EVENT 792#define DEFINE_EVENT(template, call, proto, args) \ 793static notrace void ftrace_profile_##call(proto) \ 794{ \ 795 struct ftrace_event_call *event_call = &event_##call; \ 796 \ 797 ftrace_profile_templ_##template(event_call, args); \ 798} 799 800#undef DEFINE_EVENT_PRINT 801#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 802 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 803 804#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 805#endif /* CONFIG_PERF_EVENTS */ 806 807#undef _TRACE_PROFILE_INIT 808