Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: Add transitivity to remaining rcu_node ->lock acquisitions

The rule is that all acquisitions of the rcu_node structure's ->lock
must provide transitivity: The lock is not acquired that frequently,
and sorting out exactly which required it and which did not would be
a maintenance nightmare. This commit therefore supplies the needed
transitivity to the remaining ->lock acquisitions.

Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

+14 -14
+12 -12
kernel/rcu/tree.c
··· 1214 1214 struct rcu_node *rnp; 1215 1215 1216 1216 rcu_for_each_leaf_node(rsp, rnp) { 1217 - raw_spin_lock_irqsave(&rnp->lock, flags); 1217 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 1218 1218 if (rnp->qsmask != 0) { 1219 1219 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 1220 1220 if (rnp->qsmask & (1UL << cpu)) ··· 1237 1237 1238 1238 /* Only let one CPU complain about others per time interval. */ 1239 1239 1240 - raw_spin_lock_irqsave(&rnp->lock, flags); 1240 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 1241 1241 delta = jiffies - READ_ONCE(rsp->jiffies_stall); 1242 1242 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 1243 1243 raw_spin_unlock_irqrestore(&rnp->lock, flags); ··· 1256 1256 rsp->name); 1257 1257 print_cpu_stall_info_begin(); 1258 1258 rcu_for_each_leaf_node(rsp, rnp) { 1259 - raw_spin_lock_irqsave(&rnp->lock, flags); 1259 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 1260 1260 ndetected += rcu_print_task_stall(rnp); 1261 1261 if (rnp->qsmask != 0) { 1262 1262 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) ··· 1327 1327 1328 1328 rcu_dump_cpu_stacks(rsp); 1329 1329 1330 - raw_spin_lock_irqsave(&rnp->lock, flags); 1330 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 1331 1331 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) 1332 1332 WRITE_ONCE(rsp->jiffies_stall, 1333 1333 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); ··· 2897 2897 /* Does this CPU require a not-yet-started grace period? */ 2898 2898 local_irq_save(flags); 2899 2899 if (cpu_needs_another_gp(rsp, rdp)) { 2900 - raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */ 2900 + raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */ 2901 2901 needwake = rcu_start_gp(rsp); 2902 2902 raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); 2903 2903 if (needwake) ··· 3718 3718 mask_ofl_ipi &= ~mask; 3719 3719 } else { 3720 3720 /* Failed, raced with offline. */ 3721 - raw_spin_lock_irqsave(&rnp->lock, flags); 3721 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 3722 3722 if (cpu_online(cpu) && 3723 3723 (rnp->expmask & mask)) { 3724 3724 raw_spin_unlock_irqrestore(&rnp->lock, ··· 3727 3727 if (cpu_online(cpu) && 3728 3728 (rnp->expmask & mask)) 3729 3729 goto retry_ipi; 3730 - raw_spin_lock_irqsave(&rnp->lock, 3731 - flags); 3730 + raw_spin_lock_irqsave_rcu_node(rnp, 3731 + flags); 3732 3732 } 3733 3733 if (!(rnp->expmask & mask)) 3734 3734 mask_ofl_ipi &= ~mask; ··· 4110 4110 rnp = rnp->parent; 4111 4111 if (rnp == NULL) 4112 4112 return; 4113 - raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */ 4113 + raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 4114 4114 rnp->qsmaskinit |= mask; 4115 4115 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ 4116 4116 } ··· 4127 4127 struct rcu_node *rnp = rcu_get_root(rsp); 4128 4128 4129 4129 /* Set up local state, ensuring consistent view of global state. */ 4130 - raw_spin_lock_irqsave(&rnp->lock, flags); 4130 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 4131 4131 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 4132 4132 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 4133 4133 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); ··· 4154 4154 struct rcu_node *rnp = rcu_get_root(rsp); 4155 4155 4156 4156 /* Set up local state, ensuring consistent view of global state. */ 4157 - raw_spin_lock_irqsave(&rnp->lock, flags); 4157 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 4158 4158 rdp->qlen_last_fqs_check = 0; 4159 4159 rdp->n_force_qs_snap = rsp->n_force_qs; 4160 4160 rdp->blimit = blimit; ··· 4301 4301 t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name); 4302 4302 BUG_ON(IS_ERR(t)); 4303 4303 rnp = rcu_get_root(rsp); 4304 - raw_spin_lock_irqsave(&rnp->lock, flags); 4304 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 4305 4305 rsp->gp_kthread = t; 4306 4306 if (kthread_prio) { 4307 4307 sp.sched_priority = kthread_prio;
+1 -1
kernel/rcu/tree_plugin.h
··· 525 525 unsigned long flags; 526 526 struct task_struct *t; 527 527 528 - raw_spin_lock_irqsave(&rnp->lock, flags); 528 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 529 529 if (!rcu_preempt_blocked_readers_cgp(rnp)) { 530 530 raw_spin_unlock_irqrestore(&rnp->lock, flags); 531 531 return;
+1 -1
kernel/rcu/tree_trace.c
··· 319 319 unsigned long gpmax; 320 320 struct rcu_node *rnp = &rsp->node[0]; 321 321 322 - raw_spin_lock_irqsave(&rnp->lock, flags); 322 + raw_spin_lock_irqsave_rcu_node(rnp, flags); 323 323 completed = READ_ONCE(rsp->completed); 324 324 gpnum = READ_ONCE(rsp->gpnum); 325 325 if (completed == gpnum)