Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Stage 1 of the trace events.
4 *
5 * Override the macros in the event tracepoint header <trace/events/XXX.h>
6 * to include the following:
7 *
8 * struct trace_event_raw_<call> {
9 * struct trace_entry ent;
10 * <type> <item>;
11 * <type2> <item2>[<len>];
12 * [...]
13 * };
14 *
15 * The <type> <item> is created by the __field(type, item) macro or
16 * the __array(type2, item2, len) macro.
17 * We simply do "type item;", and that will create the fields
18 * in the structure.
19 */
20
21#include <linux/trace_events.h>
22
23#ifndef TRACE_SYSTEM_VAR
24#define TRACE_SYSTEM_VAR TRACE_SYSTEM
25#endif
26
27#define __app__(x, y) str__##x##y
28#define __app(x, y) __app__(x, y)
29
30#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
31
32#define TRACE_MAKE_SYSTEM_STR() \
33 static const char TRACE_SYSTEM_STRING[] = \
34 __stringify(TRACE_SYSTEM)
35
36TRACE_MAKE_SYSTEM_STR();
37
38#undef TRACE_DEFINE_ENUM
39#define TRACE_DEFINE_ENUM(a) \
40 static struct trace_eval_map __used __initdata \
41 __##TRACE_SYSTEM##_##a = \
42 { \
43 .system = TRACE_SYSTEM_STRING, \
44 .eval_string = #a, \
45 .eval_value = a \
46 }; \
47 static struct trace_eval_map __used \
48 __section("_ftrace_eval_map") \
49 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
50
51#undef TRACE_DEFINE_SIZEOF
52#define TRACE_DEFINE_SIZEOF(a) \
53 static struct trace_eval_map __used __initdata \
54 __##TRACE_SYSTEM##_##a = \
55 { \
56 .system = TRACE_SYSTEM_STRING, \
57 .eval_string = "sizeof(" #a ")", \
58 .eval_value = sizeof(a) \
59 }; \
60 static struct trace_eval_map __used \
61 __section("_ftrace_eval_map") \
62 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
63
64/*
65 * DECLARE_EVENT_CLASS can be used to add a generic function
66 * handlers for events. That is, if all events have the same
67 * parameters and just have distinct trace points.
68 * Each tracepoint can be defined with DEFINE_EVENT and that
69 * will map the DECLARE_EVENT_CLASS to the tracepoint.
70 *
71 * TRACE_EVENT is a one to one mapping between tracepoint and template.
72 */
73#undef TRACE_EVENT
74#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
75 DECLARE_EVENT_CLASS(name, \
76 PARAMS(proto), \
77 PARAMS(args), \
78 PARAMS(tstruct), \
79 PARAMS(assign), \
80 PARAMS(print)); \
81 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
82
83
84#undef __field
85#define __field(type, item) type item;
86
87#undef __field_ext
88#define __field_ext(type, item, filter_type) type item;
89
90#undef __field_struct
91#define __field_struct(type, item) type item;
92
93#undef __field_struct_ext
94#define __field_struct_ext(type, item, filter_type) type item;
95
96#undef __array
97#define __array(type, item, len) type item[len];
98
99#undef __dynamic_array
100#define __dynamic_array(type, item, len) u32 __data_loc_##item;
101
102#undef __string
103#define __string(item, src) __dynamic_array(char, item, -1)
104
105#undef __bitmask
106#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
107
108#undef TP_STRUCT__entry
109#define TP_STRUCT__entry(args...) args
110
111#undef DECLARE_EVENT_CLASS
112#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
113 struct trace_event_raw_##name { \
114 struct trace_entry ent; \
115 tstruct \
116 char __data[0]; \
117 }; \
118 \
119 static struct trace_event_class event_class_##name;
120
121#undef DEFINE_EVENT
122#define DEFINE_EVENT(template, name, proto, args) \
123 static struct trace_event_call __used \
124 __attribute__((__aligned__(4))) event_##name
125
126#undef DEFINE_EVENT_FN
127#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
128 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
129
130#undef DEFINE_EVENT_PRINT
131#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
132 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
133
134/* Callbacks are meaningless to ftrace. */
135#undef TRACE_EVENT_FN
136#define TRACE_EVENT_FN(name, proto, args, tstruct, \
137 assign, print, reg, unreg) \
138 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
139 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
140
141#undef TRACE_EVENT_FN_COND
142#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
143 assign, print, reg, unreg) \
144 TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
145 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
146
147#undef TRACE_EVENT_FLAGS
148#define TRACE_EVENT_FLAGS(name, value) \
149 __TRACE_EVENT_FLAGS(name, value)
150
151#undef TRACE_EVENT_PERF_PERM
152#define TRACE_EVENT_PERF_PERM(name, expr...) \
153 __TRACE_EVENT_PERF_PERM(name, expr)
154
155#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
156
157/*
158 * Stage 2 of the trace events.
159 *
160 * Include the following:
161 *
162 * struct trace_event_data_offsets_<call> {
163 * u32 <item1>;
164 * u32 <item2>;
165 * [...]
166 * };
167 *
168 * The __dynamic_array() macro will create each u32 <item>, this is
169 * to keep the offset of each array from the beginning of the event.
170 * The size of an array is also encoded, in the higher 16 bits of <item>.
171 */
172
173#undef TRACE_DEFINE_ENUM
174#define TRACE_DEFINE_ENUM(a)
175
176#undef TRACE_DEFINE_SIZEOF
177#define TRACE_DEFINE_SIZEOF(a)
178
179#undef __field
180#define __field(type, item)
181
182#undef __field_ext
183#define __field_ext(type, item, filter_type)
184
185#undef __field_struct
186#define __field_struct(type, item)
187
188#undef __field_struct_ext
189#define __field_struct_ext(type, item, filter_type)
190
191#undef __array
192#define __array(type, item, len)
193
194#undef __dynamic_array
195#define __dynamic_array(type, item, len) u32 item;
196
197#undef __string
198#define __string(item, src) __dynamic_array(char, item, -1)
199
200#undef __bitmask
201#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
202
203#undef DECLARE_EVENT_CLASS
204#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
205 struct trace_event_data_offsets_##call { \
206 tstruct; \
207 };
208
209#undef DEFINE_EVENT
210#define DEFINE_EVENT(template, name, proto, args)
211
212#undef DEFINE_EVENT_PRINT
213#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
214
215#undef TRACE_EVENT_FLAGS
216#define TRACE_EVENT_FLAGS(event, flag)
217
218#undef TRACE_EVENT_PERF_PERM
219#define TRACE_EVENT_PERF_PERM(event, expr...)
220
221#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
222
223/*
224 * Stage 3 of the trace events.
225 *
226 * Override the macros in the event tracepoint header <trace/events/XXX.h>
227 * to include the following:
228 *
229 * enum print_line_t
230 * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
231 * {
232 * struct trace_seq *s = &iter->seq;
233 * struct trace_event_raw_<call> *field; <-- defined in stage 1
234 * struct trace_seq *p = &iter->tmp_seq;
235 *
236 * -------(for event)-------
237 *
238 * struct trace_entry *entry;
239 *
240 * entry = iter->ent;
241 *
242 * if (entry->type != event_<call>->event.type) {
243 * WARN_ON_ONCE(1);
244 * return TRACE_TYPE_UNHANDLED;
245 * }
246 *
247 * field = (typeof(field))entry;
248 *
249 * trace_seq_init(p);
250 * return trace_output_call(iter, <call>, <TP_printk> "\n");
251 *
252 * ------(or, for event class)------
253 *
254 * int ret;
255 *
256 * field = (typeof(field))iter->ent;
257 *
258 * ret = trace_raw_output_prep(iter, trace_event);
259 * if (ret != TRACE_TYPE_HANDLED)
260 * return ret;
261 *
262 * trace_event_printf(iter, <TP_printk> "\n");
263 *
264 * return trace_handle_return(s);
265 * -------
266 * }
267 *
268 * This is the method used to print the raw event to the trace
269 * output format. Note, this is not needed if the data is read
270 * in binary.
271 */
272
273#undef __entry
274#define __entry field
275
276#undef TP_printk
277#define TP_printk(fmt, args...) fmt "\n", args
278
279#undef __get_dynamic_array
280#define __get_dynamic_array(field) \
281 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
282
283#undef __get_dynamic_array_len
284#define __get_dynamic_array_len(field) \
285 ((__entry->__data_loc_##field >> 16) & 0xffff)
286
287#undef __get_str
288#define __get_str(field) ((char *)__get_dynamic_array(field))
289
290#undef __get_bitmask
291#define __get_bitmask(field) \
292 ({ \
293 void *__bitmask = __get_dynamic_array(field); \
294 unsigned int __bitmask_size; \
295 __bitmask_size = __get_dynamic_array_len(field); \
296 trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
297 })
298
299#undef __print_flags
300#define __print_flags(flag, delim, flag_array...) \
301 ({ \
302 static const struct trace_print_flags __flags[] = \
303 { flag_array, { -1, NULL }}; \
304 trace_print_flags_seq(p, delim, flag, __flags); \
305 })
306
307#undef __print_symbolic
308#define __print_symbolic(value, symbol_array...) \
309 ({ \
310 static const struct trace_print_flags symbols[] = \
311 { symbol_array, { -1, NULL }}; \
312 trace_print_symbols_seq(p, value, symbols); \
313 })
314
315#undef __print_flags_u64
316#undef __print_symbolic_u64
317#if BITS_PER_LONG == 32
318#define __print_flags_u64(flag, delim, flag_array...) \
319 ({ \
320 static const struct trace_print_flags_u64 __flags[] = \
321 { flag_array, { -1, NULL } }; \
322 trace_print_flags_seq_u64(p, delim, flag, __flags); \
323 })
324
325#define __print_symbolic_u64(value, symbol_array...) \
326 ({ \
327 static const struct trace_print_flags_u64 symbols[] = \
328 { symbol_array, { -1, NULL } }; \
329 trace_print_symbols_seq_u64(p, value, symbols); \
330 })
331#else
332#define __print_flags_u64(flag, delim, flag_array...) \
333 __print_flags(flag, delim, flag_array)
334
335#define __print_symbolic_u64(value, symbol_array...) \
336 __print_symbolic(value, symbol_array)
337#endif
338
339#undef __print_hex
340#define __print_hex(buf, buf_len) \
341 trace_print_hex_seq(p, buf, buf_len, false)
342
343#undef __print_hex_str
344#define __print_hex_str(buf, buf_len) \
345 trace_print_hex_seq(p, buf, buf_len, true)
346
347#undef __print_array
348#define __print_array(array, count, el_size) \
349 ({ \
350 BUILD_BUG_ON(el_size != 1 && el_size != 2 && \
351 el_size != 4 && el_size != 8); \
352 trace_print_array_seq(p, array, count, el_size); \
353 })
354
355#undef __print_hex_dump
356#define __print_hex_dump(prefix_str, prefix_type, \
357 rowsize, groupsize, buf, len, ascii) \
358 trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
359 rowsize, groupsize, buf, len, ascii)
360
361#undef DECLARE_EVENT_CLASS
362#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
363static notrace enum print_line_t \
364trace_raw_output_##call(struct trace_iterator *iter, int flags, \
365 struct trace_event *trace_event) \
366{ \
367 struct trace_seq *s = &iter->seq; \
368 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
369 struct trace_event_raw_##call *field; \
370 int ret; \
371 \
372 field = (typeof(field))iter->ent; \
373 \
374 ret = trace_raw_output_prep(iter, trace_event); \
375 if (ret != TRACE_TYPE_HANDLED) \
376 return ret; \
377 \
378 trace_event_printf(iter, print); \
379 \
380 return trace_handle_return(s); \
381} \
382static struct trace_event_functions trace_event_type_funcs_##call = { \
383 .trace = trace_raw_output_##call, \
384};
385
386#undef DEFINE_EVENT_PRINT
387#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
388static notrace enum print_line_t \
389trace_raw_output_##call(struct trace_iterator *iter, int flags, \
390 struct trace_event *event) \
391{ \
392 struct trace_event_raw_##template *field; \
393 struct trace_entry *entry; \
394 struct trace_seq *p = &iter->tmp_seq; \
395 \
396 entry = iter->ent; \
397 \
398 if (entry->type != event_##call.event.type) { \
399 WARN_ON_ONCE(1); \
400 return TRACE_TYPE_UNHANDLED; \
401 } \
402 \
403 field = (typeof(field))entry; \
404 \
405 trace_seq_init(p); \
406 return trace_output_call(iter, #call, print); \
407} \
408static struct trace_event_functions trace_event_type_funcs_##call = { \
409 .trace = trace_raw_output_##call, \
410};
411
412#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
413
414#undef __field_ext
415#define __field_ext(_type, _item, _filter_type) { \
416 .type = #_type, .name = #_item, \
417 .size = sizeof(_type), .align = __alignof__(_type), \
418 .is_signed = is_signed_type(_type), .filter_type = _filter_type },
419
420#undef __field_struct_ext
421#define __field_struct_ext(_type, _item, _filter_type) { \
422 .type = #_type, .name = #_item, \
423 .size = sizeof(_type), .align = __alignof__(_type), \
424 0, .filter_type = _filter_type },
425
426#undef __field
427#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
428
429#undef __field_struct
430#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
431
432#undef __array
433#define __array(_type, _item, _len) { \
434 .type = #_type"["__stringify(_len)"]", .name = #_item, \
435 .size = sizeof(_type[_len]), .align = __alignof__(_type), \
436 .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
437
438#undef __dynamic_array
439#define __dynamic_array(_type, _item, _len) { \
440 .type = "__data_loc " #_type "[]", .name = #_item, \
441 .size = 4, .align = 4, \
442 .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
443
444#undef __string
445#define __string(item, src) __dynamic_array(char, item, -1)
446
447#undef __bitmask
448#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
449
450#undef DECLARE_EVENT_CLASS
451#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
452static struct trace_event_fields trace_event_fields_##call[] = { \
453 tstruct \
454 {} };
455
456#undef DEFINE_EVENT_PRINT
457#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
458
459#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
460
461/*
462 * remember the offset of each array from the beginning of the event.
463 */
464
465#undef __entry
466#define __entry entry
467
468#undef __field
469#define __field(type, item)
470
471#undef __field_ext
472#define __field_ext(type, item, filter_type)
473
474#undef __field_struct
475#define __field_struct(type, item)
476
477#undef __field_struct_ext
478#define __field_struct_ext(type, item, filter_type)
479
480#undef __array
481#define __array(type, item, len)
482
483#undef __dynamic_array
484#define __dynamic_array(type, item, len) \
485 __item_length = (len) * sizeof(type); \
486 __data_offsets->item = __data_size + \
487 offsetof(typeof(*entry), __data); \
488 __data_offsets->item |= __item_length << 16; \
489 __data_size += __item_length;
490
491#undef __string
492#define __string(item, src) __dynamic_array(char, item, \
493 strlen((src) ? (const char *)(src) : "(null)") + 1)
494
495/*
496 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
497 * num_possible_cpus().
498 */
499#define __bitmask_size_in_bytes_raw(nr_bits) \
500 (((nr_bits) + 7) / 8)
501
502#define __bitmask_size_in_longs(nr_bits) \
503 ((__bitmask_size_in_bytes_raw(nr_bits) + \
504 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
505
506/*
507 * __bitmask_size_in_bytes is the number of bytes needed to hold
508 * num_possible_cpus() padded out to the nearest long. This is what
509 * is saved in the buffer, just to be consistent.
510 */
511#define __bitmask_size_in_bytes(nr_bits) \
512 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
513
514#undef __bitmask
515#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
516 __bitmask_size_in_longs(nr_bits))
517
518#undef DECLARE_EVENT_CLASS
519#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
520static inline notrace int trace_event_get_offsets_##call( \
521 struct trace_event_data_offsets_##call *__data_offsets, proto) \
522{ \
523 int __data_size = 0; \
524 int __maybe_unused __item_length; \
525 struct trace_event_raw_##call __maybe_unused *entry; \
526 \
527 tstruct; \
528 \
529 return __data_size; \
530}
531
532#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
533
534/*
535 * Stage 4 of the trace events.
536 *
537 * Override the macros in the event tracepoint header <trace/events/XXX.h>
538 * to include the following:
539 *
540 * For those macros defined with TRACE_EVENT:
541 *
542 * static struct trace_event_call event_<call>;
543 *
544 * static void trace_event_raw_event_<call>(void *__data, proto)
545 * {
546 * struct trace_event_file *trace_file = __data;
547 * struct trace_event_call *event_call = trace_file->event_call;
548 * struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
549 * unsigned long eflags = trace_file->flags;
550 * enum event_trigger_type __tt = ETT_NONE;
551 * struct ring_buffer_event *event;
552 * struct trace_event_raw_<call> *entry; <-- defined in stage 1
553 * struct trace_buffer *buffer;
554 * unsigned long irq_flags;
555 * int __data_size;
556 * int pc;
557 *
558 * if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
559 * if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
560 * event_triggers_call(trace_file, NULL);
561 * if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
562 * return;
563 * }
564 *
565 * local_save_flags(irq_flags);
566 * pc = preempt_count();
567 *
568 * __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
569 *
570 * event = trace_event_buffer_lock_reserve(&buffer, trace_file,
571 * event_<call>->event.type,
572 * sizeof(*entry) + __data_size,
573 * irq_flags, pc);
574 * if (!event)
575 * return;
576 * entry = ring_buffer_event_data(event);
577 *
578 * { <assign>; } <-- Here we assign the entries by the __field and
579 * __array macros.
580 *
581 * if (eflags & EVENT_FILE_FL_TRIGGER_COND)
582 * __tt = event_triggers_call(trace_file, entry);
583 *
584 * if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
585 * &trace_file->flags))
586 * ring_buffer_discard_commit(buffer, event);
587 * else if (!filter_check_discard(trace_file, entry, buffer, event))
588 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
589 *
590 * if (__tt)
591 * event_triggers_post_call(trace_file, __tt);
592 * }
593 *
594 * static struct trace_event ftrace_event_type_<call> = {
595 * .trace = trace_raw_output_<call>, <-- stage 2
596 * };
597 *
598 * static char print_fmt_<call>[] = <TP_printk>;
599 *
600 * static struct trace_event_class __used event_class_<template> = {
601 * .system = "<system>",
602 * .fields_array = trace_event_fields_<call>,
603 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
604 * .raw_init = trace_event_raw_init,
605 * .probe = trace_event_raw_event_##call,
606 * .reg = trace_event_reg,
607 * };
608 *
609 * static struct trace_event_call event_<call> = {
610 * .class = event_class_<template>,
611 * {
612 * .tp = &__tracepoint_<call>,
613 * },
614 * .event = &ftrace_event_type_<call>,
615 * .print_fmt = print_fmt_<call>,
616 * .flags = TRACE_EVENT_FL_TRACEPOINT,
617 * };
618 * // its only safe to use pointers when doing linker tricks to
619 * // create an array.
620 * static struct trace_event_call __used
621 * __section("_ftrace_events") *__event_<call> = &event_<call>;
622 *
623 */
624
625#ifdef CONFIG_PERF_EVENTS
626
627#define _TRACE_PERF_PROTO(call, proto) \
628 static notrace void \
629 perf_trace_##call(void *__data, proto);
630
631#define _TRACE_PERF_INIT(call) \
632 .perf_probe = perf_trace_##call,
633
634#else
635#define _TRACE_PERF_PROTO(call, proto)
636#define _TRACE_PERF_INIT(call)
637#endif /* CONFIG_PERF_EVENTS */
638
639#undef __entry
640#define __entry entry
641
642#undef __field
643#define __field(type, item)
644
645#undef __field_struct
646#define __field_struct(type, item)
647
648#undef __array
649#define __array(type, item, len)
650
651#undef __dynamic_array
652#define __dynamic_array(type, item, len) \
653 __entry->__data_loc_##item = __data_offsets.item;
654
655#undef __string
656#define __string(item, src) __dynamic_array(char, item, -1)
657
658#undef __assign_str
659#define __assign_str(dst, src) \
660 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
661
662#undef __bitmask
663#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
664
665#undef __get_bitmask
666#define __get_bitmask(field) (char *)__get_dynamic_array(field)
667
668#undef __assign_bitmask
669#define __assign_bitmask(dst, src, nr_bits) \
670 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
671
672#undef TP_fast_assign
673#define TP_fast_assign(args...) args
674
675#undef __perf_count
676#define __perf_count(c) (c)
677
678#undef __perf_task
679#define __perf_task(t) (t)
680
681#undef DECLARE_EVENT_CLASS
682#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
683 \
684static notrace void \
685trace_event_raw_event_##call(void *__data, proto) \
686{ \
687 struct trace_event_file *trace_file = __data; \
688 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
689 struct trace_event_buffer fbuffer; \
690 struct trace_event_raw_##call *entry; \
691 int __data_size; \
692 \
693 if (trace_trigger_soft_disabled(trace_file)) \
694 return; \
695 \
696 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
697 \
698 entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
699 sizeof(*entry) + __data_size); \
700 \
701 if (!entry) \
702 return; \
703 \
704 tstruct \
705 \
706 { assign; } \
707 \
708 trace_event_buffer_commit(&fbuffer); \
709}
710/*
711 * The ftrace_test_probe is compiled out, it is only here as a build time check
712 * to make sure that if the tracepoint handling changes, the ftrace probe will
713 * fail to compile unless it too is updated.
714 */
715
716#undef DEFINE_EVENT
717#define DEFINE_EVENT(template, call, proto, args) \
718static inline void ftrace_test_probe_##call(void) \
719{ \
720 check_trace_callback_type_##call(trace_event_raw_event_##template); \
721}
722
723#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
724
725#undef __entry
726#define __entry REC
727
728#undef __print_flags
729#undef __print_symbolic
730#undef __print_hex
731#undef __print_hex_str
732#undef __get_dynamic_array
733#undef __get_dynamic_array_len
734#undef __get_str
735#undef __get_bitmask
736#undef __print_array
737#undef __print_hex_dump
738
739#undef TP_printk
740#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
741
742#undef DECLARE_EVENT_CLASS
743#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
744_TRACE_PERF_PROTO(call, PARAMS(proto)); \
745static char print_fmt_##call[] = print; \
746static struct trace_event_class __used __refdata event_class_##call = { \
747 .system = TRACE_SYSTEM_STRING, \
748 .fields_array = trace_event_fields_##call, \
749 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
750 .raw_init = trace_event_raw_init, \
751 .probe = trace_event_raw_event_##call, \
752 .reg = trace_event_reg, \
753 _TRACE_PERF_INIT(call) \
754};
755
756#undef DEFINE_EVENT
757#define DEFINE_EVENT(template, call, proto, args) \
758 \
759static struct trace_event_call __used event_##call = { \
760 .class = &event_class_##template, \
761 { \
762 .tp = &__tracepoint_##call, \
763 }, \
764 .event.funcs = &trace_event_type_funcs_##template, \
765 .print_fmt = print_fmt_##template, \
766 .flags = TRACE_EVENT_FL_TRACEPOINT, \
767}; \
768static struct trace_event_call __used \
769__section("_ftrace_events") *__event_##call = &event_##call
770
771#undef DEFINE_EVENT_PRINT
772#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
773 \
774static char print_fmt_##call[] = print; \
775 \
776static struct trace_event_call __used event_##call = { \
777 .class = &event_class_##template, \
778 { \
779 .tp = &__tracepoint_##call, \
780 }, \
781 .event.funcs = &trace_event_type_funcs_##call, \
782 .print_fmt = print_fmt_##call, \
783 .flags = TRACE_EVENT_FL_TRACEPOINT, \
784}; \
785static struct trace_event_call __used \
786__section("_ftrace_events") *__event_##call = &event_##call
787
788#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)