Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracepoint: Have tracepoints created with DECLARE_TRACE() have _tp suffix

Most tracepoints in the kernel are created with TRACE_EVENT(). The
TRACE_EVENT() macro (and DECLARE_EVENT_CLASS() and DEFINE_EVENT() where in
reality, TRACE_EVENT() is just a helper macro that calls those other two
macros), will create not only a tracepoint (the function trace_<event>()
used in the kernel), it also exposes the tracepoint to user space along
with defining what fields will be saved by that tracepoint.

There are a few places that tracepoints are created in the kernel that are
not exposed to userspace via tracefs. They can only be accessed from code
within the kernel. These tracepoints are created with DEFINE_TRACE()

Most of these tracepoints end with "_tp". This is useful as when the
developer sees that, they know that the tracepoint is for in-kernel only
(meaning it can only be accessed inside the kernel, either directly by the
kernel or indirectly via modules and BPF programs) and is not exposed to
user space.

Instead of making this only a process to add "_tp", enforce it by making
the DECLARE_TRACE() append the "_tp" suffix to the tracepoint. This
requires adding DECLARE_TRACE_EVENT() macros for the TRACE_EVENT() macro
to use that keeps the original name.

Link: https://lore.kernel.org/all/20250418083351.20a60e64@gandalf.local.home/

Cc: netdev <netdev@vger.kernel.org>
Cc: Jiri Olsa <olsajiri@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: David Ahern <dsahern@kernel.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
Cc: Gabriele Monaco <gmonaco@redhat.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Link: https://lore.kernel.org/20250510163730.092fad5b@gandalf.local.home
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

+83 -49
+11 -6
Documentation/trace/tracepoints.rst
··· 71 71 void somefct(void) 72 72 { 73 73 ... 74 - trace_subsys_eventname(arg, task); 74 + trace_subsys_eventname_tp(arg, task); 75 75 ... 76 76 } 77 77 ··· 129 129 for (i = 0; i < count; i++) 130 130 tot += calculate_nuggets(); 131 131 132 - trace_foo_bar(tot); 132 + trace_foo_bar_tp(tot); 133 133 } 134 134 135 - All trace_<tracepoint>() calls have a matching trace_<tracepoint>_enabled() 135 + All trace_<tracepoint>_tp() calls have a matching trace_<tracepoint>_enabled() 136 136 function defined that returns true if the tracepoint is enabled and 137 - false otherwise. The trace_<tracepoint>() should always be within the 137 + false otherwise. The trace_<tracepoint>_tp() should always be within the 138 138 block of the if (trace_<tracepoint>_enabled()) to prevent races between 139 139 the tracepoint being enabled and the check being seen. 140 140 ··· 143 143 with jump labels and avoid conditional branches. 144 144 145 145 .. note:: The convenience macro TRACE_EVENT provides an alternative way to 146 - define tracepoints. Check http://lwn.net/Articles/379903, 146 + define tracepoints. Note, DECLARE_TRACE(foo) creates a function 147 + "trace_foo_tp()" whereas TRACE_EVENT(foo) creates a function 148 + "trace_foo()", and also exposes the tracepoint as a trace event in 149 + /sys/kernel/tracing/events directory. Check http://lwn.net/Articles/379903, 147 150 http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362 148 151 for a series of articles with more details. 149 152 ··· 162 159 163 160 void do_trace_foo_bar_wrapper(args) 164 161 { 165 - trace_foo_bar(args); 162 + trace_foo_bar_tp(args); // for tracepoints created via DECLARE_TRACE 163 + // or 164 + trace_foo_bar(args); // for tracepoints created via TRACE_EVENT 166 165 } 167 166 168 167 In the header file::
+26 -12
include/linux/tracepoint.h
··· 464 464 #endif 465 465 466 466 #define DECLARE_TRACE(name, proto, args) \ 467 - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ 467 + __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \ 468 468 cpu_online(raw_smp_processor_id()), \ 469 469 PARAMS(void *__data, proto)) 470 470 471 471 #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ 472 - __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ 472 + __DECLARE_TRACE(name##_tp, PARAMS(proto), PARAMS(args), \ 473 473 cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ 474 474 PARAMS(void *__data, proto)) 475 475 476 476 #define DECLARE_TRACE_SYSCALL(name, proto, args) \ 477 + __DECLARE_TRACE_SYSCALL(name##_tp, PARAMS(proto), PARAMS(args), \ 478 + PARAMS(void *__data, proto)) 479 + 480 + #define DECLARE_TRACE_EVENT(name, proto, args) \ 481 + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ 482 + cpu_online(raw_smp_processor_id()), \ 483 + PARAMS(void *__data, proto)) 484 + 485 + #define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \ 486 + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ 487 + cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ 488 + PARAMS(void *__data, proto)) 489 + 490 + #define DECLARE_TRACE_EVENT_SYSCALL(name, proto, args) \ 477 491 __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \ 478 492 PARAMS(void *__data, proto)) 479 493 ··· 605 591 606 592 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) 607 593 #define DEFINE_EVENT(template, name, proto, args) \ 608 - DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 594 + DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) 609 595 #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\ 610 - DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 596 + DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) 611 597 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 612 - DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 598 + DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) 613 599 #define DEFINE_EVENT_CONDITION(template, name, proto, \ 614 600 args, cond) \ 615 - DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ 601 + DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \ 616 602 PARAMS(args), PARAMS(cond)) 617 603 618 604 #define TRACE_EVENT(name, proto, args, struct, assign, print) \ 619 - DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 605 + DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) 620 606 #define TRACE_EVENT_FN(name, proto, args, struct, \ 621 607 assign, print, reg, unreg) \ 622 - DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 623 - #define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ 608 + DECLARE_TRACE_EVENT(name, PARAMS(proto), PARAMS(args)) 609 + #define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ 624 610 assign, print, reg, unreg) \ 625 - DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ 611 + DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \ 626 612 PARAMS(args), PARAMS(cond)) 627 613 #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ 628 614 struct, assign, print) \ 629 - DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ 615 + DECLARE_TRACE_EVENT_CONDITION(name, PARAMS(proto), \ 630 616 PARAMS(args), PARAMS(cond)) 631 617 #define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, \ 632 618 print, reg, unreg) \ 633 - DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args)) 619 + DECLARE_TRACE_EVENT_SYSCALL(name, PARAMS(proto), PARAMS(args)) 634 620 635 621 #define TRACE_EVENT_FLAGS(event, flag) 636 622
+4 -4
include/trace/bpf_probe.h
··· 119 119 120 120 #undef DECLARE_TRACE 121 121 #define DECLARE_TRACE(call, proto, args) \ 122 - __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ 123 - __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0) 122 + __BPF_DECLARE_TRACE(call##_tp, PARAMS(proto), PARAMS(args)) \ 123 + __DEFINE_EVENT(call##_tp, call##_tp, PARAMS(proto), PARAMS(args), 0) 124 124 125 125 #undef DECLARE_TRACE_WRITABLE 126 126 #define DECLARE_TRACE_WRITABLE(call, proto, args, size) \ 127 127 __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \ 128 - __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ 129 - __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size) 128 + __BPF_DECLARE_TRACE(call##_tp, PARAMS(proto), PARAMS(args)) \ 129 + __DEFINE_EVENT(call##_tp, call##_tp, PARAMS(proto), PARAMS(args), size) 130 130 131 131 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 132 132
+16 -1
include/trace/define_trace.h
··· 74 74 75 75 #undef DECLARE_TRACE 76 76 #define DECLARE_TRACE(name, proto, args) \ 77 - DEFINE_TRACE(name, PARAMS(proto), PARAMS(args)) 77 + DEFINE_TRACE(name##_tp, PARAMS(proto), PARAMS(args)) 78 78 79 79 #undef DECLARE_TRACE_CONDITION 80 80 #define DECLARE_TRACE_CONDITION(name, proto, args, cond) \ 81 + DEFINE_TRACE(name##_tp, PARAMS(proto), PARAMS(args)) 82 + 83 + #undef DECLARE_TRACE_EVENT 84 + #define DECLARE_TRACE_EVENT(name, proto, args) \ 85 + DEFINE_TRACE(name, PARAMS(proto), PARAMS(args)) 86 + 87 + #undef DECLARE_TRACE_EVENT_CONDITION 88 + #define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) \ 81 89 DEFINE_TRACE(name, PARAMS(proto), PARAMS(args)) 82 90 83 91 /* If requested, create helpers for calling these tracepoints from Rust. */ ··· 123 115 #undef DECLARE_TRACE_CONDITION 124 116 #define DECLARE_TRACE_CONDITION(name, proto, args, cond) 125 117 118 + #undef DECLARE_TRACE_EVENT 119 + #define DECLARE_TRACE_EVENT(name, proto, args) 120 + #undef DECLARE_TRACE_EVENT_CONDITION 121 + #define DECLARE_TRACE_EVENT_CONDITION(name, proto, args, cond) 122 + 126 123 #ifdef TRACEPOINTS_ENABLED 127 124 #include <trace/trace_events.h> 128 125 #include <trace/perf.h> ··· 149 136 #undef TRACE_HEADER_MULTI_READ 150 137 #undef DECLARE_TRACE 151 138 #undef DECLARE_TRACE_CONDITION 139 + #undef DECLARE_TRACE_EVENT 140 + #undef DECLARE_TRACE_EVENT_CONDITION 152 141 153 142 /* Only undef what we defined in this file */ 154 143 #ifdef UNDEF_TRACE_INCLUDE_FILE
+15 -15
include/trace/events/sched.h
··· 773 773 * 774 774 * Postfixed with _tp to make them easily identifiable in the code. 775 775 */ 776 - DECLARE_TRACE(pelt_cfs_tp, 776 + DECLARE_TRACE(pelt_cfs, 777 777 TP_PROTO(struct cfs_rq *cfs_rq), 778 778 TP_ARGS(cfs_rq)); 779 779 780 - DECLARE_TRACE(pelt_rt_tp, 780 + DECLARE_TRACE(pelt_rt, 781 781 TP_PROTO(struct rq *rq), 782 782 TP_ARGS(rq)); 783 783 784 - DECLARE_TRACE(pelt_dl_tp, 784 + DECLARE_TRACE(pelt_dl, 785 785 TP_PROTO(struct rq *rq), 786 786 TP_ARGS(rq)); 787 787 788 - DECLARE_TRACE(pelt_hw_tp, 788 + DECLARE_TRACE(pelt_hw, 789 789 TP_PROTO(struct rq *rq), 790 790 TP_ARGS(rq)); 791 791 792 - DECLARE_TRACE(pelt_irq_tp, 792 + DECLARE_TRACE(pelt_irq, 793 793 TP_PROTO(struct rq *rq), 794 794 TP_ARGS(rq)); 795 795 796 - DECLARE_TRACE(pelt_se_tp, 796 + DECLARE_TRACE(pelt_se, 797 797 TP_PROTO(struct sched_entity *se), 798 798 TP_ARGS(se)); 799 799 800 - DECLARE_TRACE(sched_cpu_capacity_tp, 800 + DECLARE_TRACE(sched_cpu_capacity, 801 801 TP_PROTO(struct rq *rq), 802 802 TP_ARGS(rq)); 803 803 804 - DECLARE_TRACE(sched_overutilized_tp, 804 + DECLARE_TRACE(sched_overutilized, 805 805 TP_PROTO(struct root_domain *rd, bool overutilized), 806 806 TP_ARGS(rd, overutilized)); 807 807 808 - DECLARE_TRACE(sched_util_est_cfs_tp, 808 + DECLARE_TRACE(sched_util_est_cfs, 809 809 TP_PROTO(struct cfs_rq *cfs_rq), 810 810 TP_ARGS(cfs_rq)); 811 811 812 - DECLARE_TRACE(sched_util_est_se_tp, 812 + DECLARE_TRACE(sched_util_est_se, 813 813 TP_PROTO(struct sched_entity *se), 814 814 TP_ARGS(se)); 815 815 816 - DECLARE_TRACE(sched_update_nr_running_tp, 816 + DECLARE_TRACE(sched_update_nr_running, 817 817 TP_PROTO(struct rq *rq, int change), 818 818 TP_ARGS(rq, change)); 819 819 820 - DECLARE_TRACE(sched_compute_energy_tp, 820 + DECLARE_TRACE(sched_compute_energy, 821 821 TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy, 822 822 unsigned long max_util, unsigned long busy_time), 823 823 TP_ARGS(p, dst_cpu, energy, max_util, busy_time)); 824 824 825 - DECLARE_TRACE(sched_entry_tp, 825 + DECLARE_TRACE(sched_entry, 826 826 TP_PROTO(bool preempt, unsigned long ip), 827 827 TP_ARGS(preempt, ip)); 828 828 829 - DECLARE_TRACE(sched_exit_tp, 829 + DECLARE_TRACE(sched_exit, 830 830 TP_PROTO(bool is_switch, unsigned long ip), 831 831 TP_ARGS(is_switch, ip)); 832 832 833 - DECLARE_TRACE_CONDITION(sched_set_state_tp, 833 + DECLARE_TRACE_CONDITION(sched_set_state, 834 834 TP_PROTO(struct task_struct *tsk, int state), 835 835 TP_ARGS(tsk, state), 836 836 TP_CONDITION(!!(tsk->__state) != !!state));
+1 -1
include/trace/events/tcp.h
··· 259 259 __entry->saddr_v6, __entry->daddr_v6) 260 260 ); 261 261 262 - DECLARE_TRACE(tcp_cwnd_reduction_tp, 262 + DECLARE_TRACE(tcp_cwnd_reduction, 263 263 TP_PROTO(const struct sock *sk, int newly_acked_sacked, 264 264 int newly_lost, int flag), 265 265 TP_ARGS(sk, newly_acked_sacked, newly_lost, flag)
+1 -1
tools/testing/selftests/bpf/progs/raw_tp_null.c
··· 10 10 int tid; 11 11 int i; 12 12 13 - SEC("tp_btf/bpf_testmod_test_raw_tp_null") 13 + SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp") 14 14 int BPF_PROG(test_raw_tp_null, struct sk_buff *skb) 15 15 { 16 16 struct task_struct *task = bpf_get_current_task_btf();
+1 -1
tools/testing/selftests/bpf/progs/raw_tp_null_fail.c
··· 8 8 char _license[] SEC("license") = "GPL"; 9 9 10 10 /* Ensure module parameter has PTR_MAYBE_NULL */ 11 - SEC("tp_btf/bpf_testmod_test_raw_tp_null") 11 + SEC("tp_btf/bpf_testmod_test_raw_tp_null_tp") 12 12 __failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") 13 13 int test_raw_tp_null_bpf_testmod_test_raw_tp_null_arg_1(void *ctx) { 14 14 asm volatile("r1 = *(u64 *)(r1 +0); r1 = *(u64 *)(r1 +0);" ::: __clobber_all);
+2 -2
tools/testing/selftests/bpf/progs/test_module_attach.c
··· 19 19 20 20 __u32 raw_tp_bare_write_sz = 0; 21 21 22 - SEC("raw_tp/bpf_testmod_test_write_bare") 22 + SEC("raw_tp/bpf_testmod_test_write_bare_tp") 23 23 int BPF_PROG(handle_raw_tp_bare, 24 24 struct task_struct *task, struct bpf_testmod_test_write_ctx *write_ctx) 25 25 { ··· 31 31 int raw_tp_writable_bare_early_ret = 0; 32 32 int raw_tp_writable_bare_out_val = 0; 33 33 34 - SEC("raw_tp.w/bpf_testmod_test_writable_bare") 34 + SEC("raw_tp.w/bpf_testmod_test_writable_bare_tp") 35 35 int BPF_PROG(handle_raw_tp_writable_bare, 36 36 struct bpf_testmod_test_writable_ctx *writable) 37 37 {
+2 -2
tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c
··· 6 6 #include "../test_kmods/bpf_testmod.h" 7 7 #include "bpf_misc.h" 8 8 9 - SEC("tp_btf/bpf_testmod_test_nullable_bare") 9 + SEC("tp_btf/bpf_testmod_test_nullable_bare_tp") 10 10 __failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") 11 11 int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) 12 12 { 13 13 return nullable_ctx->len; 14 14 } 15 15 16 - SEC("tp_btf/bpf_testmod_test_nullable_bare") 16 + SEC("tp_btf/bpf_testmod_test_nullable_bare_tp") 17 17 int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx) 18 18 { 19 19 if (nullable_ctx)
+4 -4
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
··· 413 413 414 414 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); 415 415 416 - (void)trace_bpf_testmod_test_raw_tp_null(NULL); 416 + (void)trace_bpf_testmod_test_raw_tp_null_tp(NULL); 417 417 418 418 bpf_testmod_test_struct_ops3(); 419 419 ··· 431 431 if (bpf_testmod_loop_test(101) > 100) 432 432 trace_bpf_testmod_test_read(current, &ctx); 433 433 434 - trace_bpf_testmod_test_nullable_bare(NULL); 434 + trace_bpf_testmod_test_nullable_bare_tp(NULL); 435 435 436 436 /* Magic number to enable writable tp */ 437 437 if (len == 64) { 438 438 struct bpf_testmod_test_writable_ctx writable = { 439 439 .val = 1024, 440 440 }; 441 - trace_bpf_testmod_test_writable_bare(&writable); 441 + trace_bpf_testmod_test_writable_bare_tp(&writable); 442 442 if (writable.early_ret) 443 443 return snprintf(buf, len, "%d\n", writable.val); 444 444 } ··· 470 470 .len = len, 471 471 }; 472 472 473 - trace_bpf_testmod_test_write_bare(current, &ctx); 473 + trace_bpf_testmod_test_write_bare_tp(current, &ctx); 474 474 475 475 return -EIO; /* always fail */ 476 476 }