Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: Make CPU_DYING_IDLE an explicit call

Make the RCU CPU_DYING_IDLE callback an explicit function call, so it gets
invoked at the proper place.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: Rik van Riel <riel@redhat.com>
Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Turner <pjt@google.com>
Link: http://lkml.kernel.org/r/20160226182341.870167933@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+42 -41
+1 -3
include/linux/cpu.h
··· 101 101 * Called on the new cpu, just before 102 102 * enabling interrupts. Must not sleep, 103 103 * must not fail */ 104 - #define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached 105 - * idle loop. */ 106 - #define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly, 104 + #define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly, 107 105 * perhaps due to preemption. */ 108 106 109 107 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
+2
include/linux/notifier.h
··· 47 47 * runtime initialization. 48 48 */ 49 49 50 + struct notifier_block; 51 + 50 52 typedef int (*notifier_fn_t)(struct notifier_block *nb, 51 53 unsigned long action, void *data); 52 54
+1 -3
include/linux/rcupdate.h
··· 332 332 void rcu_sched_qs(void); 333 333 void rcu_bh_qs(void); 334 334 void rcu_check_callbacks(int user); 335 - struct notifier_block; 336 - int rcu_cpu_notify(struct notifier_block *self, 337 - unsigned long action, void *hcpu); 335 + void rcu_report_dead(unsigned int cpu); 338 336 339 337 #ifndef CONFIG_TINY_RCU 340 338 void rcu_end_inkernel_boot(void);
+1
kernel/cpu.c
··· 762 762 BUG_ON(st->state != CPUHP_AP_OFFLINE); 763 763 st->state = CPUHP_AP_IDLE_DEAD; 764 764 complete(&st->done); 765 + rcu_report_dead(smp_processor_id()); 765 766 } 766 767 767 768 #else
+37 -33
kernel/rcu/tree.c
··· 2607 2607 } 2608 2608 2609 2609 /* 2610 - * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 2611 - * function. We now remove it from the rcu_node tree's ->qsmaskinit 2612 - * bit masks. 2613 - */ 2614 - static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) 2615 - { 2616 - unsigned long flags; 2617 - unsigned long mask; 2618 - struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 2619 - struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 2620 - 2621 - if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2622 - return; 2623 - 2624 - /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 2625 - mask = rdp->grpmask; 2626 - raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 2627 - rnp->qsmaskinitnext &= ~mask; 2628 - raw_spin_unlock_irqrestore(&rnp->lock, flags); 2629 - } 2630 - 2631 - /* 2632 2610 * The CPU has been completely removed, and some other CPU is reporting 2633 2611 * this fact from process context. Do the remainder of the cleanup, 2634 2612 * including orphaning the outgoing CPU's RCU callbacks, and also ··· 4225 4247 rcu_init_percpu_data(cpu, rsp); 4226 4248 } 4227 4249 4250 + #ifdef CONFIG_HOTPLUG_CPU 4251 + /* 4252 + * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 4253 + * function. We now remove it from the rcu_node tree's ->qsmaskinit 4254 + * bit masks. 4255 + */ 4256 + static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) 4257 + { 4258 + unsigned long flags; 4259 + unsigned long mask; 4260 + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 4261 + struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ 4262 + 4263 + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 4264 + return; 4265 + 4266 + /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 4267 + mask = rdp->grpmask; 4268 + raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 4269 + rnp->qsmaskinitnext &= ~mask; 4270 + raw_spin_unlock_irqrestore(&rnp->lock, flags); 4271 + } 4272 + 4273 + void rcu_report_dead(unsigned int cpu) 4274 + { 4275 + struct rcu_state *rsp; 4276 + 4277 + /* QS for any half-done expedited RCU-sched GP. */ 4278 + preempt_disable(); 4279 + rcu_report_exp_rdp(&rcu_sched_state, 4280 + this_cpu_ptr(rcu_sched_state.rda), true); 4281 + preempt_enable(); 4282 + for_each_rcu_flavor(rsp) 4283 + rcu_cleanup_dying_idle_cpu(cpu, rsp); 4284 + } 4285 + #endif 4286 + 4228 4287 /* 4229 4288 * Handle CPU online/offline notification events. 4230 4289 */ ··· 4292 4277 case CPU_DYING_FROZEN: 4293 4278 for_each_rcu_flavor(rsp) 4294 4279 rcu_cleanup_dying_cpu(rsp); 4295 - break; 4296 - case CPU_DYING_IDLE: 4297 - /* QS for any half-done expedited RCU-sched GP. */ 4298 - preempt_disable(); 4299 - rcu_report_exp_rdp(&rcu_sched_state, 4300 - this_cpu_ptr(rcu_sched_state.rda), true); 4301 - preempt_enable(); 4302 - 4303 - for_each_rcu_flavor(rsp) { 4304 - rcu_cleanup_dying_idle_cpu(cpu, rsp); 4305 - } 4306 4280 break; 4307 4281 case CPU_DEAD: 4308 4282 case CPU_DEAD_FROZEN:
-2
kernel/sched/idle.c
··· 220 220 rmb(); 221 221 222 222 if (cpu_is_offline(smp_processor_id())) { 223 - rcu_cpu_notify(NULL, CPU_DYING_IDLE, 224 - (void *)(long)smp_processor_id()); 225 223 cpuhp_report_idle_dead(); 226 224 arch_cpu_idle_dead(); 227 225 }