Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

context_tracking, rcu: Rename rcu_dyntick trace event into rcu_watching

The "rcu_dyntick" naming convention has been turned into "rcu_watching" for
all helpers now, align the trace event to that.

To add to the confusion, the strings passed to the trace event are now
reversed: when RCU "starts" the dyntick / EQS state, it "stops" watching.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>

authored by

Valentin Schneider and committed by
Neeraj Upadhyay
4f336dc0 c3dcd90b

+14 -14
+9 -9
include/trace/events/rcu.h
··· 466 466 /* 467 467 * Tracepoint for dyntick-idle entry/exit events. These take 2 strings 468 468 * as argument: 469 - * polarity: "Start", "End", "StillNonIdle" for entering, exiting or still not 470 - * being in dyntick-idle mode. 469 + * polarity: "Start", "End", "StillWatching" for entering, exiting or still not 470 + * being in EQS mode. 471 471 * context: "USER" or "IDLE" or "IRQ". 472 472 * NMIs nested in IRQs are inferred with nesting > 1 in IRQ context. 473 473 * 474 474 * These events also take a pair of numbers, which indicate the nesting 475 475 * depth before and after the event of interest, and a third number that is 476 - * the ->dynticks counter. Note that task-related and interrupt-related 476 + * the RCU_WATCHING counter. Note that task-related and interrupt-related 477 477 * events use two separate counters, and that the "++=" and "--=" events 478 478 * for irq/NMI will change the counter by two, otherwise by one. 479 479 */ 480 - TRACE_EVENT_RCU(rcu_dyntick, 480 + TRACE_EVENT_RCU(rcu_watching, 481 481 482 - TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks), 482 + TP_PROTO(const char *polarity, long oldnesting, long newnesting, int counter), 483 483 484 - TP_ARGS(polarity, oldnesting, newnesting, dynticks), 484 + TP_ARGS(polarity, oldnesting, newnesting, counter), 485 485 486 486 TP_STRUCT__entry( 487 487 __field(const char *, polarity) 488 488 __field(long, oldnesting) 489 489 __field(long, newnesting) 490 - __field(int, dynticks) 490 + __field(int, counter) 491 491 ), 492 492 493 493 TP_fast_assign( 494 494 __entry->polarity = polarity; 495 495 __entry->oldnesting = oldnesting; 496 496 __entry->newnesting = newnesting; 497 - __entry->dynticks = dynticks; 497 + __entry->counter = counter; 498 498 ), 499 499 500 500 TP_printk("%s %lx %lx %#3x", __entry->polarity, 501 501 __entry->oldnesting, __entry->newnesting, 502 - __entry->dynticks & 0xfff) 502 + __entry->counter & 0xfff) 503 503 ); 504 504 505 505 /*
+5 -5
kernel/context_tracking.c
··· 137 137 138 138 instrumentation_begin(); 139 139 lockdep_assert_irqs_disabled(); 140 - trace_rcu_dyntick(TPS("Start"), ct_nesting(), 0, ct_rcu_watching()); 140 + trace_rcu_watching(TPS("End"), ct_nesting(), 0, ct_rcu_watching()); 141 141 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 142 142 rcu_preempt_deferred_qs(current); 143 143 ··· 182 182 // instrumentation for the noinstr ct_kernel_enter_state() 183 183 instrument_atomic_write(&ct->state, sizeof(ct->state)); 184 184 185 - trace_rcu_dyntick(TPS("End"), ct_nesting(), 1, ct_rcu_watching()); 185 + trace_rcu_watching(TPS("Start"), ct_nesting(), 1, ct_rcu_watching()); 186 186 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); 187 187 WRITE_ONCE(ct->nesting, 1); 188 188 WARN_ON_ONCE(ct_nmi_nesting()); ··· 219 219 * leave it in non-RCU-idle state. 220 220 */ 221 221 if (ct_nmi_nesting() != 1) { 222 - trace_rcu_dyntick(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2, 222 + trace_rcu_watching(TPS("--="), ct_nmi_nesting(), ct_nmi_nesting() - 2, 223 223 ct_rcu_watching()); 224 224 WRITE_ONCE(ct->nmi_nesting, /* No store tearing. */ 225 225 ct_nmi_nesting() - 2); ··· 228 228 } 229 229 230 230 /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */ 231 - trace_rcu_dyntick(TPS("Startirq"), ct_nmi_nesting(), 0, ct_rcu_watching()); 231 + trace_rcu_watching(TPS("Endirq"), ct_nmi_nesting(), 0, ct_rcu_watching()); 232 232 WRITE_ONCE(ct->nmi_nesting, 0); /* Avoid store tearing. */ 233 233 234 234 // instrumentation for the noinstr ct_kernel_exit_state() ··· 294 294 instrumentation_begin(); 295 295 } 296 296 297 - trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="), 297 + trace_rcu_watching(incby == 1 ? TPS("Startirq") : TPS("++="), 298 298 ct_nmi_nesting(), 299 299 ct_nmi_nesting() + incby, ct_rcu_watching()); 300 300 instrumentation_end();