Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kernel/cpu.c: create a CPU_STARTING cpu_chain notifier

Right now, there is no notifier that is called on a new cpu, before the new
cpu begins processing interrupts/softirqs.
Various kernel function would need that notification, e.g. kvm works around
by calling smp_call_function_single(), rcu polls cpu_online_map.

The patch adds a CPU_STARTING notification. It also adds a helper function
that sends the message to all cpu_chain handlers.

Tested on x86-64.
All other archs are untested. Especially on sparc, I'm not sure if I got
it right.

Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Manfred Spraul and committed by
Ingo Molnar
e545a614 7686ad56

+51 -1
+3
arch/alpha/kernel/smp.c
··· 149 149 atomic_inc(&init_mm.mm_count); 150 150 current->active_mm = &init_mm; 151 151 152 + /* inform the notifiers about the new cpu */ 153 + notify_cpu_starting(cpuid); 154 + 152 155 /* Must have completely accurate bogos. */ 153 156 local_irq_enable(); 154 157
+1
arch/arm/kernel/smp.c
··· 277 277 /* 278 278 * Enable local interrupts. 279 279 */ 280 + notify_cpu_starting(cpu); 280 281 local_irq_enable(); 281 282 local_fiq_enable(); 282 283
+1
arch/cris/arch-v32/kernel/smp.c
··· 178 178 unmask_irq(IPI_INTR_VECT); 179 179 unmask_irq(TIMER0_INTR_VECT); 180 180 preempt_disable(); 181 + notify_cpu_starting(cpu); 181 182 local_irq_enable(); 182 183 183 184 cpu_set(cpu, cpu_online_map);
+1
arch/ia64/kernel/smpboot.c
··· 401 401 spin_lock(&vector_lock); 402 402 /* Setup the per cpu irq handling data structures */ 403 403 __setup_vector_irq(cpuid); 404 + notify_cpu_starting(cpuid); 404 405 cpu_set(cpuid, cpu_online_map); 405 406 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 406 407 spin_unlock(&vector_lock);
+2
arch/m32r/kernel/smpboot.c
··· 498 498 { 499 499 int cpu_id = smp_processor_id(); 500 500 501 + notify_cpu_starting(cpu_id); 502 + 501 503 local_irq_enable(); 502 504 503 505 /* Get our bogomips. */
+2
arch/mips/kernel/smp.c
··· 121 121 cpu = smp_processor_id(); 122 122 cpu_data[cpu].udelay_val = loops_per_jiffy; 123 123 124 + notify_cpu_starting(cpu); 125 + 124 126 mp_ops->smp_finish(); 125 127 set_cpu_sibling_map(cpu); 126 128
+1
arch/powerpc/kernel/smp.c
··· 453 453 secondary_cpu_time_init(); 454 454 455 455 ipi_call_lock(); 456 + notify_cpu_starting(cpu); 456 457 cpu_set(cpu, cpu_online_map); 457 458 /* Update sibling maps */ 458 459 base = cpu_first_thread_in_core(cpu);
+2
arch/s390/kernel/smp.c
··· 585 585 /* Enable pfault pseudo page faults on this cpu. */ 586 586 pfault_init(); 587 587 588 + /* call cpu notifiers */ 589 + notify_cpu_starting(smp_processor_id()); 588 590 /* Mark this cpu as online */ 589 591 spin_lock(&call_lock); 590 592 cpu_set(smp_processor_id(), cpu_online_map);
+2
arch/sh/kernel/smp.c
··· 82 82 83 83 preempt_disable(); 84 84 85 + notify_cpu_starting(smp_processor_id()); 86 + 85 87 local_irq_enable(); 86 88 87 89 calibrate_delay();
+1
arch/sparc/kernel/sun4d_smp.c
··· 88 88 local_flush_cache_all(); 89 89 local_flush_tlb_all(); 90 90 91 + notify_cpu_starting(cpuid); 91 92 /* 92 93 * Unblock the master CPU _only_ when the scheduler state 93 94 * of all secondary CPUs will be up-to-date, so after
+2
arch/sparc/kernel/sun4m_smp.c
··· 71 71 local_flush_cache_all(); 72 72 local_flush_tlb_all(); 73 73 74 + notify_cpu_starting(cpuid); 75 + 74 76 /* Get our local ticker going. */ 75 77 smp_setup_percpu_timer(); 76 78
+1
arch/um/kernel/smp.c
··· 85 85 while (!cpu_isset(cpu, smp_commenced_mask)) 86 86 cpu_relax(); 87 87 88 + notify_cpu_starting(cpu); 88 89 cpu_set(cpu, cpu_online_map); 89 90 default_idle(); 90 91 return 0;
+1
arch/x86/kernel/smpboot.c
··· 257 257 end_local_APIC_setup(); 258 258 map_cpu_to_logical_apicid(); 259 259 260 + notify_cpu_starting(cpuid); 260 261 /* 261 262 * Get our bogomips. 262 263 *
+2
arch/x86/mach-voyager/voyager_smp.c
··· 448 448 449 449 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid)); 450 450 451 + notify_cpu_starting(cpuid); 452 + 451 453 /* enable interrupts */ 452 454 local_irq_enable(); 453 455
+1
include/linux/cpu.h
··· 69 69 #endif 70 70 71 71 int cpu_up(unsigned int cpu); 72 + void notify_cpu_starting(unsigned int cpu); 72 73 extern void cpu_hotplug_init(void); 73 74 extern void cpu_maps_update_begin(void); 74 75 extern void cpu_maps_update_done(void);
+9 -1
include/linux/notifier.h
··· 213 213 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 214 214 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 215 215 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, 216 - * not handling interrupts, soon dead */ 216 + * not handling interrupts, soon dead. 217 + * Called on the dying cpu, interrupts 218 + * are already disabled. Must not 219 + * sleep, must not fail */ 217 220 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 218 221 * lock is dropped */ 222 + #define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. 223 + * Called on the new cpu, just before 224 + * enabling interrupts. Must not sleep, 225 + * must not fail */ 219 226 220 227 /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 221 228 * operation in progress ··· 236 229 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 237 230 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 238 231 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) 232 + #define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) 239 233 240 234 /* Hibernation and suspend events */ 241 235 #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
+19
kernel/cpu.c
··· 453 453 } 454 454 #endif /* CONFIG_PM_SLEEP_SMP */ 455 455 456 + /** 457 + * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 458 + * @cpu: cpu that just started 459 + * 460 + * This function calls the cpu_chain notifiers with CPU_STARTING. 461 + * It must be called by the arch code on the new cpu, before the new cpu 462 + * enables interrupts and before the "boot" cpu returns from __cpu_up(). 463 + */ 464 + void notify_cpu_starting(unsigned int cpu) 465 + { 466 + unsigned long val = CPU_STARTING; 467 + 468 + #ifdef CONFIG_PM_SLEEP_SMP 469 + if (cpu_isset(cpu, frozen_cpus)) 470 + val = CPU_STARTING_FROZEN; 471 + #endif /* CONFIG_PM_SLEEP_SMP */ 472 + raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 473 + } 474 + 456 475 #endif /* CONFIG_SMP */ 457 476 458 477 /*