Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing: Move non perf code out of perf.h

Commit ee53bbd17257 "tracing: Move the perf code out of trace_event.h" moved
more than just the perf code out of trace_event.h, but also removed a bit of
the tracing code too. Move it back.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

+258 -258
-258
include/trace/perf.h
··· 1 - /* 2 - * Stage 4 of the trace events. 3 - * 4 - * Override the macros in <trace/trace_events.h> to include the following: 5 - * 6 - * For those macros defined with TRACE_EVENT: 7 - * 8 - * static struct trace_event_call event_<call>; 9 - * 10 - * static void trace_event_raw_event_<call>(void *__data, proto) 11 - * { 12 - * struct trace_event_file *trace_file = __data; 13 - * struct trace_event_call *event_call = trace_file->event_call; 14 - * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 15 - * unsigned long eflags = trace_file->flags; 16 - * enum event_trigger_type __tt = ETT_NONE; 17 - * struct ring_buffer_event *event; 18 - * struct trace_event_raw_<call> *entry; <-- defined in stage 1 19 - * struct ring_buffer *buffer; 20 - * unsigned long irq_flags; 21 - * int __data_size; 22 - * int pc; 23 - * 24 - * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 25 - * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 26 - * event_triggers_call(trace_file, NULL); 27 - * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 28 - * return; 29 - * } 30 - * 31 - * local_save_flags(irq_flags); 32 - * pc = preempt_count(); 33 - * 34 - * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 35 - * 36 - * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 37 - * event_<call>->event.type, 38 - * sizeof(*entry) + __data_size, 39 - * irq_flags, pc); 40 - * if (!event) 41 - * return; 42 - * entry = ring_buffer_event_data(event); 43 - * 44 - * { <assign>; } <-- Here we assign the entries by the __field and 45 - * __array macros. 46 - * 47 - * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 48 - * __tt = event_triggers_call(trace_file, entry); 49 - * 50 - * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 51 - * &trace_file->flags)) 52 - * ring_buffer_discard_commit(buffer, event); 53 - * else if (!filter_check_discard(trace_file, entry, buffer, event)) 54 - * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 55 - * 56 - * if (__tt) 57 - * event_triggers_post_call(trace_file, __tt); 58 - * } 59 - * 60 - * static struct trace_event ftrace_event_type_<call> = { 61 - * .trace = trace_raw_output_<call>, <-- stage 2 62 - * }; 63 - * 64 - * static char print_fmt_<call>[] = <TP_printk>; 65 - * 66 - * static struct trace_event_class __used event_class_<template> = { 67 - * .system = "<system>", 68 - * .define_fields = trace_event_define_fields_<call>, 69 - * .fields = LIST_HEAD_INIT(event_class_##call.fields), 70 - * .raw_init = trace_event_raw_init, 71 - * .probe = trace_event_raw_event_##call, 72 - * .reg = trace_event_reg, 73 - * }; 74 - * 75 - * static struct trace_event_call event_<call> = { 76 - * .class = event_class_<template>, 77 - * { 78 - * .tp = &__tracepoint_<call>, 79 - * }, 80 - * .event = &ftrace_event_type_<call>, 81 - * .print_fmt = print_fmt_<call>, 82 - * .flags = TRACE_EVENT_FL_TRACEPOINT, 83 - * }; 84 - * // its only safe to use pointers when doing linker tricks to 85 - * // create an array. 86 - * static struct trace_event_call __used 87 - * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 88 - * 89 - */ 90 - 91 - #ifdef CONFIG_PERF_EVENTS 92 - 93 - #define _TRACE_PERF_PROTO(call, proto) \ 94 - static notrace void \ 95 - perf_trace_##call(void *__data, proto); 96 - 97 - #define _TRACE_PERF_INIT(call) \ 98 - .perf_probe = perf_trace_##call, 99 - 100 - #else 101 - #define _TRACE_PERF_PROTO(call, proto) 102 - #define _TRACE_PERF_INIT(call) 103 - #endif /* CONFIG_PERF_EVENTS */ 104 - 105 - #undef __entry 106 - #define __entry entry 107 - 108 - #undef __field 109 - #define __field(type, item) 110 - 111 - #undef __field_struct 112 - #define __field_struct(type, item) 113 - 114 - #undef __array 115 - #define __array(type, item, len) 116 - 117 - #undef __dynamic_array 118 - #define __dynamic_array(type, item, len) \ 119 - __entry->__data_loc_##item = __data_offsets.item; 120 - 121 - #undef __string 122 - #define __string(item, src) __dynamic_array(char, item, -1) 123 - 124 - #undef __assign_str 125 - #define __assign_str(dst, src) \ 126 - strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 127 - 128 - #undef __bitmask 129 - #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 130 - 131 - #undef __get_bitmask 132 - #define __get_bitmask(field) (char *)__get_dynamic_array(field) 133 - 134 - #undef __assign_bitmask 135 - #define __assign_bitmask(dst, src, nr_bits) \ 136 - memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 137 - 138 - #undef TP_fast_assign 139 - #define TP_fast_assign(args...) args 140 - 141 - #undef __perf_addr 142 - #define __perf_addr(a) (a) 143 - 144 - #undef __perf_count 145 - #define __perf_count(c) (c) 146 - 147 - #undef __perf_task 148 - #define __perf_task(t) (t) 149 - 150 - #undef DECLARE_EVENT_CLASS 151 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 152 - \ 153 - static notrace void \ 154 - trace_event_raw_event_##call(void *__data, proto) \ 155 - { \ 156 - struct trace_event_file *trace_file = __data; \ 157 - struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 158 - struct trace_event_buffer fbuffer; \ 159 - struct trace_event_raw_##call *entry; \ 160 - int __data_size; \ 161 - \ 162 - if (trace_trigger_soft_disabled(trace_file)) \ 163 - return; \ 164 - \ 165 - __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 166 - \ 167 - entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 168 - sizeof(*entry) + __data_size); \ 169 - \ 170 - if (!entry) \ 171 - return; \ 172 - \ 173 - tstruct \ 174 - \ 175 - { assign; } \ 176 - \ 177 - trace_event_buffer_commit(&fbuffer); \ 178 - } 179 - /* 180 - * The ftrace_test_probe is compiled out, it is only here as a build time check 181 - * to make sure that if the tracepoint handling changes, the ftrace probe will 182 - * fail to compile unless it too is updated. 183 - */ 184 - 185 - #undef DEFINE_EVENT 186 - #define DEFINE_EVENT(template, call, proto, args) \ 187 - static inline void ftrace_test_probe_##call(void) \ 188 - { \ 189 - check_trace_callback_type_##call(trace_event_raw_event_##template); \ 190 - } 191 - 192 - #undef DEFINE_EVENT_PRINT 193 - #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 194 - 195 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 196 - 197 - #undef __entry 198 - #define __entry REC 199 - 200 - #undef __print_flags 201 - #undef __print_symbolic 202 - #undef __print_hex 203 - #undef __get_dynamic_array 204 - #undef __get_dynamic_array_len 205 - #undef __get_str 206 - #undef __get_bitmask 207 - #undef __print_array 208 - 209 - #undef TP_printk 210 - #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 211 - 212 - #undef DECLARE_EVENT_CLASS 213 - #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 214 - _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 215 - static char print_fmt_##call[] = print; \ 216 - static struct trace_event_class __used __refdata event_class_##call = { \ 217 - .system = TRACE_SYSTEM_STRING, \ 218 - .define_fields = trace_event_define_fields_##call, \ 219 - .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 220 - .raw_init = trace_event_raw_init, \ 221 - .probe = trace_event_raw_event_##call, \ 222 - .reg = trace_event_reg, \ 223 - _TRACE_PERF_INIT(call) \ 224 - }; 225 - 226 - #undef DEFINE_EVENT 227 - #define DEFINE_EVENT(template, call, proto, args) \ 228 - \ 229 - static struct trace_event_call __used event_##call = { \ 230 - .class = &event_class_##template, \ 231 - { \ 232 - .tp = &__tracepoint_##call, \ 233 - }, \ 234 - .event.funcs = &trace_event_type_funcs_##template, \ 235 - .print_fmt = print_fmt_##template, \ 236 - .flags = TRACE_EVENT_FL_TRACEPOINT, \ 237 - }; \ 238 - static struct trace_event_call __used \ 239 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 240 - 241 - #undef DEFINE_EVENT_PRINT 242 - #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 243 - \ 244 - static char print_fmt_##call[] = print; \ 245 - \ 246 - static struct trace_event_call __used event_##call = { \ 247 - .class = &event_class_##template, \ 248 - { \ 249 - .tp = &__tracepoint_##call, \ 250 - }, \ 251 - .event.funcs = &trace_event_type_funcs_##call, \ 252 - .print_fmt = print_fmt_##call, \ 253 - .flags = TRACE_EVENT_FL_TRACEPOINT, \ 254 - }; \ 255 - static struct trace_event_call __used \ 256 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 257 - 258 - #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 259 1 260 2 #undef TRACE_SYSTEM_VAR 261 3
+258
include/trace/trace_events.h
··· 506 506 507 507 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 508 508 509 + /* 510 + * Stage 4 of the trace events. 511 + * 512 + * Override the macros in <trace/trace_events.h> to include the following: 513 + * 514 + * For those macros defined with TRACE_EVENT: 515 + * 516 + * static struct trace_event_call event_<call>; 517 + * 518 + * static void trace_event_raw_event_<call>(void *__data, proto) 519 + * { 520 + * struct trace_event_file *trace_file = __data; 521 + * struct trace_event_call *event_call = trace_file->event_call; 522 + * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets; 523 + * unsigned long eflags = trace_file->flags; 524 + * enum event_trigger_type __tt = ETT_NONE; 525 + * struct ring_buffer_event *event; 526 + * struct trace_event_raw_<call> *entry; <-- defined in stage 1 527 + * struct ring_buffer *buffer; 528 + * unsigned long irq_flags; 529 + * int __data_size; 530 + * int pc; 531 + * 532 + * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { 533 + * if (eflags & EVENT_FILE_FL_TRIGGER_MODE) 534 + * event_triggers_call(trace_file, NULL); 535 + * if (eflags & EVENT_FILE_FL_SOFT_DISABLED) 536 + * return; 537 + * } 538 + * 539 + * local_save_flags(irq_flags); 540 + * pc = preempt_count(); 541 + * 542 + * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args); 543 + * 544 + * event = trace_event_buffer_lock_reserve(&buffer, trace_file, 545 + * event_<call>->event.type, 546 + * sizeof(*entry) + __data_size, 547 + * irq_flags, pc); 548 + * if (!event) 549 + * return; 550 + * entry = ring_buffer_event_data(event); 551 + * 552 + * { <assign>; } <-- Here we assign the entries by the __field and 553 + * __array macros. 554 + * 555 + * if (eflags & EVENT_FILE_FL_TRIGGER_COND) 556 + * __tt = event_triggers_call(trace_file, entry); 557 + * 558 + * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, 559 + * &trace_file->flags)) 560 + * ring_buffer_discard_commit(buffer, event); 561 + * else if (!filter_check_discard(trace_file, entry, buffer, event)) 562 + * trace_buffer_unlock_commit(buffer, event, irq_flags, pc); 563 + * 564 + * if (__tt) 565 + * event_triggers_post_call(trace_file, __tt); 566 + * } 567 + * 568 + * static struct trace_event ftrace_event_type_<call> = { 569 + * .trace = trace_raw_output_<call>, <-- stage 2 570 + * }; 571 + * 572 + * static char print_fmt_<call>[] = <TP_printk>; 573 + * 574 + * static struct trace_event_class __used event_class_<template> = { 575 + * .system = "<system>", 576 + * .define_fields = trace_event_define_fields_<call>, 577 + * .fields = LIST_HEAD_INIT(event_class_##call.fields), 578 + * .raw_init = trace_event_raw_init, 579 + * .probe = trace_event_raw_event_##call, 580 + * .reg = trace_event_reg, 581 + * }; 582 + * 583 + * static struct trace_event_call event_<call> = { 584 + * .class = event_class_<template>, 585 + * { 586 + * .tp = &__tracepoint_<call>, 587 + * }, 588 + * .event = &ftrace_event_type_<call>, 589 + * .print_fmt = print_fmt_<call>, 590 + * .flags = TRACE_EVENT_FL_TRACEPOINT, 591 + * }; 592 + * // its only safe to use pointers when doing linker tricks to 593 + * // create an array. 594 + * static struct trace_event_call __used 595 + * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 596 + * 597 + */ 598 + 599 + #ifdef CONFIG_PERF_EVENTS 600 + 601 + #define _TRACE_PERF_PROTO(call, proto) \ 602 + static notrace void \ 603 + perf_trace_##call(void *__data, proto); 604 + 605 + #define _TRACE_PERF_INIT(call) \ 606 + .perf_probe = perf_trace_##call, 607 + 608 + #else 609 + #define _TRACE_PERF_PROTO(call, proto) 610 + #define _TRACE_PERF_INIT(call) 611 + #endif /* CONFIG_PERF_EVENTS */ 612 + 613 + #undef __entry 614 + #define __entry entry 615 + 616 + #undef __field 617 + #define __field(type, item) 618 + 619 + #undef __field_struct 620 + #define __field_struct(type, item) 621 + 622 + #undef __array 623 + #define __array(type, item, len) 624 + 625 + #undef __dynamic_array 626 + #define __dynamic_array(type, item, len) \ 627 + __entry->__data_loc_##item = __data_offsets.item; 628 + 629 + #undef __string 630 + #define __string(item, src) __dynamic_array(char, item, -1) 631 + 632 + #undef __assign_str 633 + #define __assign_str(dst, src) \ 634 + strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); 635 + 636 + #undef __bitmask 637 + #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) 638 + 639 + #undef __get_bitmask 640 + #define __get_bitmask(field) (char *)__get_dynamic_array(field) 641 + 642 + #undef __assign_bitmask 643 + #define __assign_bitmask(dst, src, nr_bits) \ 644 + memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) 645 + 646 + #undef TP_fast_assign 647 + #define TP_fast_assign(args...) args 648 + 649 + #undef __perf_addr 650 + #define __perf_addr(a) (a) 651 + 652 + #undef __perf_count 653 + #define __perf_count(c) (c) 654 + 655 + #undef __perf_task 656 + #define __perf_task(t) (t) 657 + 658 + #undef DECLARE_EVENT_CLASS 659 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 660 + \ 661 + static notrace void \ 662 + trace_event_raw_event_##call(void *__data, proto) \ 663 + { \ 664 + struct trace_event_file *trace_file = __data; \ 665 + struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ 666 + struct trace_event_buffer fbuffer; \ 667 + struct trace_event_raw_##call *entry; \ 668 + int __data_size; \ 669 + \ 670 + if (trace_trigger_soft_disabled(trace_file)) \ 671 + return; \ 672 + \ 673 + __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ 674 + \ 675 + entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ 676 + sizeof(*entry) + __data_size); \ 677 + \ 678 + if (!entry) \ 679 + return; \ 680 + \ 681 + tstruct \ 682 + \ 683 + { assign; } \ 684 + \ 685 + trace_event_buffer_commit(&fbuffer); \ 686 + } 687 + /* 688 + * The ftrace_test_probe is compiled out, it is only here as a build time check 689 + * to make sure that if the tracepoint handling changes, the ftrace probe will 690 + * fail to compile unless it too is updated. 691 + */ 692 + 693 + #undef DEFINE_EVENT 694 + #define DEFINE_EVENT(template, call, proto, args) \ 695 + static inline void ftrace_test_probe_##call(void) \ 696 + { \ 697 + check_trace_callback_type_##call(trace_event_raw_event_##template); \ 698 + } 699 + 700 + #undef DEFINE_EVENT_PRINT 701 + #define DEFINE_EVENT_PRINT(template, name, proto, args, print) 702 + 703 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 704 + 705 + #undef __entry 706 + #define __entry REC 707 + 708 + #undef __print_flags 709 + #undef __print_symbolic 710 + #undef __print_hex 711 + #undef __get_dynamic_array 712 + #undef __get_dynamic_array_len 713 + #undef __get_str 714 + #undef __get_bitmask 715 + #undef __print_array 716 + 717 + #undef TP_printk 718 + #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) 719 + 720 + #undef DECLARE_EVENT_CLASS 721 + #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 722 + _TRACE_PERF_PROTO(call, PARAMS(proto)); \ 723 + static char print_fmt_##call[] = print; \ 724 + static struct trace_event_class __used __refdata event_class_##call = { \ 725 + .system = TRACE_SYSTEM_STRING, \ 726 + .define_fields = trace_event_define_fields_##call, \ 727 + .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 728 + .raw_init = trace_event_raw_init, \ 729 + .probe = trace_event_raw_event_##call, \ 730 + .reg = trace_event_reg, \ 731 + _TRACE_PERF_INIT(call) \ 732 + }; 733 + 734 + #undef DEFINE_EVENT 735 + #define DEFINE_EVENT(template, call, proto, args) \ 736 + \ 737 + static struct trace_event_call __used event_##call = { \ 738 + .class = &event_class_##template, \ 739 + { \ 740 + .tp = &__tracepoint_##call, \ 741 + }, \ 742 + .event.funcs = &trace_event_type_funcs_##template, \ 743 + .print_fmt = print_fmt_##template, \ 744 + .flags = TRACE_EVENT_FL_TRACEPOINT, \ 745 + }; \ 746 + static struct trace_event_call __used \ 747 + __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 748 + 749 + #undef DEFINE_EVENT_PRINT 750 + #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ 751 + \ 752 + static char print_fmt_##call[] = print; \ 753 + \ 754 + static struct trace_event_call __used event_##call = { \ 755 + .class = &event_class_##template, \ 756 + { \ 757 + .tp = &__tracepoint_##call, \ 758 + }, \ 759 + .event.funcs = &trace_event_type_funcs_##call, \ 760 + .print_fmt = print_fmt_##call, \ 761 + .flags = TRACE_EVENT_FL_TRACEPOINT, \ 762 + }; \ 763 + static struct trace_event_call __used \ 764 + __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 765 + 766 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)