Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing: Remove definition of trace_*_rcuidle()

The trace_*_rcuidle() variant of a tracepoint was to handle places where a
tracepoint was located but RCU was not "watching". All those locations
have been removed, and RCU should be watching where all tracepoints are
located. We can now remove the trace_*_rcuidle() variant.

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Link: https://lore.kernel.org/20241003181629.36209057@gandalf.local.home
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

+8 -78
+2 -48
include/linux/tracepoint.h
··· 197 197 #endif /* CONFIG_HAVE_STATIC_CALL */ 198 198 199 199 /* 200 - * ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle 201 - * code that disallow any/all tracing/instrumentation when RCU isn't watching. 202 - */ 203 - #ifdef CONFIG_ARCH_WANTS_NO_INSTR 204 - #define RCUIDLE_COND(rcuidle) (rcuidle) 205 - #else 206 - /* srcu can't be used from NMI */ 207 - #define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi()) 208 - #endif 209 - 210 - /* 211 200 * it_func[0] is never NULL because there is at least one element in the array 212 201 * when the array itself is non NULL. 213 202 */ 214 - #define __DO_TRACE(name, args, cond, rcuidle) \ 203 + #define __DO_TRACE(name, args, cond) \ 215 204 do { \ 216 205 int __maybe_unused __idx = 0; \ 217 206 \ 218 207 if (!(cond)) \ 219 208 return; \ 220 209 \ 221 - if (WARN_ONCE(RCUIDLE_COND(rcuidle), \ 222 - "Bad RCU usage for tracepoint")) \ 223 - return; \ 224 - \ 225 210 /* keep srcu and sched-rcu usage consistent */ \ 226 211 preempt_disable_notrace(); \ 227 212 \ 228 - /* \ 229 - * For rcuidle callers, use srcu since sched-rcu \ 230 - * doesn't work from the idle path. \ 231 - */ \ 232 - if (rcuidle) { \ 233 - __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ 234 - ct_irq_enter_irqson(); \ 235 - } \ 236 - \ 237 213 __DO_TRACE_CALL(name, TP_ARGS(args)); \ 238 - \ 239 - if (rcuidle) { \ 240 - ct_irq_exit_irqson(); \ 241 - srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ 242 - } \ 243 214 \ 244 215 preempt_enable_notrace(); \ 245 216 } while (0) 246 - 247 - #ifndef MODULE 248 - #define __DECLARE_TRACE_RCU(name, proto, args, cond) \ 249 - static inline void trace_##name##_rcuidle(proto) \ 250 - { \ 251 - if (static_branch_unlikely(&__tracepoint_##name.key)) \ 252 - __DO_TRACE(name, \ 253 - TP_ARGS(args), \ 254 - TP_CONDITION(cond), 1); \ 255 - } 256 - #else 257 - #define __DECLARE_TRACE_RCU(name, proto, args, cond) 258 - #endif 259 217 260 218 /* 261 219 * Make sure the alignment of the structure in the __tracepoints section will ··· 235 277 if (static_branch_unlikely(&__tracepoint_##name.key)) \ 236 278 __DO_TRACE(name, \ 237 279 TP_ARGS(args), \ 238 - TP_CONDITION(cond), 0); \ 280 + TP_CONDITION(cond)); \ 239 281 if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ 240 282 WARN_ONCE(!rcu_is_watching(), \ 241 283 "RCU not watching for tracepoint"); \ 242 284 } \ 243 285 } \ 244 - __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ 245 - PARAMS(cond)) \ 246 286 static inline int \ 247 287 register_trace_##name(void (*probe)(data_proto), void *data) \ 248 288 { \ ··· 330 374 #else /* !TRACEPOINTS_ENABLED */ 331 375 #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ 332 376 static inline void trace_##name(proto) \ 333 - { } \ 334 - static inline void trace_##name##_rcuidle(proto) \ 335 377 { } \ 336 378 static inline int \ 337 379 register_trace_##name(void (*probe)(data_proto), \
-8
include/trace/events/preemptirq.h
··· 43 43 #else 44 44 #define trace_irq_enable(...) 45 45 #define trace_irq_disable(...) 46 - #define trace_irq_enable_rcuidle(...) 47 - #define trace_irq_disable_rcuidle(...) 48 46 #endif 49 47 50 48 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE ··· 56 58 #else 57 59 #define trace_preempt_enable(...) 58 60 #define trace_preempt_disable(...) 59 - #define trace_preempt_enable_rcuidle(...) 60 - #define trace_preempt_disable_rcuidle(...) 61 61 #endif 62 62 63 63 #endif /* _TRACE_PREEMPTIRQ_H */ ··· 65 69 #else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */ 66 70 #define trace_irq_enable(...) 67 71 #define trace_irq_disable(...) 68 - #define trace_irq_enable_rcuidle(...) 69 - #define trace_irq_disable_rcuidle(...) 70 72 #define trace_preempt_enable(...) 71 73 #define trace_preempt_disable(...) 72 - #define trace_preempt_enable_rcuidle(...) 73 - #define trace_preempt_disable_rcuidle(...) 74 74 #endif
+6 -20
kernel/trace/trace_preemptirq.c
··· 15 15 #define CREATE_TRACE_POINTS 16 16 #include <trace/events/preemptirq.h> 17 17 18 - /* 19 - * Use regular trace points on architectures that implement noinstr 20 - * tooling: these calls will only happen with RCU enabled, which can 21 - * use a regular tracepoint. 22 - * 23 - * On older architectures, use the rcuidle tracing methods (which 24 - * aren't NMI-safe - so exclude NMI contexts): 25 - */ 26 - #ifdef CONFIG_ARCH_WANTS_NO_INSTR 27 - #define trace(point) trace_##point 28 - #else 29 - #define trace(point) if (!in_nmi()) trace_##point##_rcuidle 30 - #endif 31 - 32 18 #ifdef CONFIG_TRACE_IRQFLAGS 33 19 /* Per-cpu variable to prevent redundant calls when IRQs already off */ 34 20 static DEFINE_PER_CPU(int, tracing_irq_cpu); ··· 28 42 void trace_hardirqs_on_prepare(void) 29 43 { 30 44 if (this_cpu_read(tracing_irq_cpu)) { 31 - trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1); 45 + trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1); 32 46 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); 33 47 this_cpu_write(tracing_irq_cpu, 0); 34 48 } ··· 39 53 void trace_hardirqs_on(void) 40 54 { 41 55 if (this_cpu_read(tracing_irq_cpu)) { 42 - trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1); 56 + trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1); 43 57 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); 44 58 this_cpu_write(tracing_irq_cpu, 0); 45 59 } ··· 61 75 if (!this_cpu_read(tracing_irq_cpu)) { 62 76 this_cpu_write(tracing_irq_cpu, 1); 63 77 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); 64 - trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1); 78 + trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1); 65 79 } 66 80 67 81 } ··· 75 89 if (!this_cpu_read(tracing_irq_cpu)) { 76 90 this_cpu_write(tracing_irq_cpu, 1); 77 91 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); 78 - trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1); 92 + trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1); 79 93 } 80 94 } 81 95 EXPORT_SYMBOL(trace_hardirqs_off); ··· 86 100 87 101 void trace_preempt_on(unsigned long a0, unsigned long a1) 88 102 { 89 - trace(preempt_enable)(a0, a1); 103 + trace_preempt_enable(a0, a1); 90 104 tracer_preempt_on(a0, a1); 91 105 } 92 106 93 107 void trace_preempt_off(unsigned long a0, unsigned long a1) 94 108 { 95 - trace(preempt_disable)(a0, a1); 109 + trace_preempt_disable(a0, a1); 96 110 tracer_preempt_off(a0, a1); 97 111 } 98 112 #endif
-2
scripts/tags.sh
··· 152 152 '/^BPF_CALL_[0-9]([[:space:]]*\([[:alnum:]_]*\).*/\1/' 153 153 '/^COMPAT_SYSCALL_DEFINE[0-9]([[:space:]]*\([[:alnum:]_]*\).*/compat_sys_\1/' 154 154 '/^TRACE_EVENT([[:space:]]*\([[:alnum:]_]*\).*/trace_\1/' 155 - '/^TRACE_EVENT([[:space:]]*\([[:alnum:]_]*\).*/trace_\1_rcuidle/' 156 155 '/^DEFINE_EVENT([^,)]*,[[:space:]]*\([[:alnum:]_]*\).*/trace_\1/' 157 - '/^DEFINE_EVENT([^,)]*,[[:space:]]*\([[:alnum:]_]*\).*/trace_\1_rcuidle/' 158 156 '/^DEFINE_INSN_CACHE_OPS([[:space:]]*\([[:alnum:]_]*\).*/get_\1_slot/' 159 157 '/^DEFINE_INSN_CACHE_OPS([[:space:]]*\([[:alnum:]_]*\).*/free_\1_slot/' 160 158 '/^PAGEFLAG([[:space:]]*\([[:alnum:]_]*\).*/Page\1/'