Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM SMP] Add hotplug CPU infrastructure

This patch adds the infrastructure to support hotplug CPU on ARM
platforms.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Russell King and committed by
Russell King
a054a811 273c2cdb

+143
+7
arch/arm/Kconfig
··· 349 349 depends on SMP 350 350 default "4" 351 351 352 + config HOTPLUG_CPU 353 + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 354 + depends on SMP && HOTPLUG && EXPERIMENTAL 355 + help 356 + Say Y here to experiment with turning CPUs off and on. CPUs 357 + can be controlled through /sys/devices/system/cpu. 358 + 352 359 config PREEMPT 353 360 bool "Preemptible Kernel (EXPERIMENTAL)" 354 361 depends on EXPERIMENTAL
+31
arch/arm/kernel/irq.c
··· 1050 1050 } 1051 1051 1052 1052 __setup("noirqdebug", noirqdebug_setup); 1053 + 1054 + #ifdef CONFIG_HOTPLUG_CPU 1055 + /* 1056 + * The CPU has been marked offline. Migrate IRQs off this CPU. If 1057 + * the affinity settings do not allow other CPUs, force them onto any 1058 + * available CPU. 1059 + */ 1060 + void migrate_irqs(void) 1061 + { 1062 + unsigned int i, cpu = smp_processor_id(); 1063 + 1064 + for (i = 0; i < NR_IRQS; i++) { 1065 + struct irqdesc *desc = irq_desc + i; 1066 + 1067 + if (desc->cpu == cpu) { 1068 + unsigned int newcpu = any_online_cpu(desc->affinity); 1069 + 1070 + if (newcpu == NR_CPUS) { 1071 + if (printk_ratelimit()) 1072 + printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", 1073 + i, cpu); 1074 + 1075 + cpus_setall(desc->affinity); 1076 + newcpu = any_online_cpu(desc->affinity); 1077 + } 1078 + 1079 + route_irq(desc, i, newcpu); 1080 + } 1081 + } 1082 + } 1083 + #endif /* CONFIG_HOTPLUG_CPU */
+9
arch/arm/kernel/process.c
··· 26 26 #include <linux/interrupt.h> 27 27 #include <linux/kallsyms.h> 28 28 #include <linux/init.h> 29 + #include <linux/cpu.h> 29 30 30 31 #include <asm/system.h> 31 32 #include <asm/io.h> ··· 106 105 /* endless idle loop with no priority at all */ 107 106 while (1) { 108 107 void (*idle)(void) = pm_idle; 108 + 109 + #ifdef CONFIG_HOTPLUG_CPU 110 + if (cpu_is_offline(smp_processor_id())) { 111 + leds_event(led_idle_start); 112 + cpu_die(); 113 + } 114 + #endif 115 + 109 116 if (!idle) 110 117 idle = default_idle; 111 118 preempt_disable();
+85
arch/arm/kernel/smp.c
··· 159 159 return ret; 160 160 } 161 161 162 + #ifdef CONFIG_HOTPLUG_CPU 163 + /* 164 + * __cpu_disable runs on the processor to be shutdown. 165 + */ 166 + int __cpuexit __cpu_disable(void) 167 + { 168 + unsigned int cpu = smp_processor_id(); 169 + struct task_struct *p; 170 + int ret; 171 + 172 + ret = mach_cpu_disable(cpu); 173 + if (ret) 174 + return ret; 175 + 176 + /* 177 + * Take this CPU offline. Once we clear this, we can't return, 178 + * and we must not schedule until we're ready to give up the cpu. 179 + */ 180 + cpu_clear(cpu, cpu_online_map); 181 + 182 + /* 183 + * OK - migrate IRQs away from this CPU 184 + */ 185 + migrate_irqs(); 186 + 187 + /* 188 + * Flush user cache and TLB mappings, and then remove this CPU 189 + * from the vm mask set of all processes. 190 + */ 191 + flush_cache_all(); 192 + local_flush_tlb_all(); 193 + 194 + read_lock(&tasklist_lock); 195 + for_each_process(p) { 196 + if (p->mm) 197 + cpu_clear(cpu, p->mm->cpu_vm_mask); 198 + } 199 + read_unlock(&tasklist_lock); 200 + 201 + return 0; 202 + } 203 + 204 + /* 205 + * called on the thread which is asking for a CPU to be shutdown - 206 + * waits until shutdown has completed, or it is timed out. 207 + */ 208 + void __cpuexit __cpu_die(unsigned int cpu) 209 + { 210 + if (!platform_cpu_kill(cpu)) 211 + printk("CPU%u: unable to kill\n", cpu); 212 + } 213 + 214 + /* 215 + * Called from the idle thread for the CPU which has been shutdown. 216 + * 217 + * Note that we disable IRQs here, but do not re-enable them 218 + * before returning to the caller. This is also the behaviour 219 + * of the other hotplug-cpu capable cores, so presumably coming 220 + * out of idle fixes this. 221 + */ 222 + void __cpuexit cpu_die(void) 223 + { 224 + unsigned int cpu = smp_processor_id(); 225 + 226 + local_irq_disable(); 227 + idle_task_exit(); 228 + 229 + /* 230 + * actual CPU shutdown procedure is at least platform (if not 231 + * CPU) specific 232 + */ 233 + platform_cpu_die(cpu); 234 + 235 + /* 236 + * Do not return to the idle loop - jump back to the secondary 237 + * cpu initialisation. There's some initialisation which needs 238 + * to be repeated to undo the effects of taking the CPU offline. 239 + */ 240 + __asm__("mov sp, %0\n" 241 + " b secondary_start_kernel" 242 + : 243 + : "r" ((void *)current->thread_info + THREAD_SIZE - 8)); 244 + } 245 + #endif /* CONFIG_HOTPLUG_CPU */ 246 + 162 247 /* 163 248 * This is the secondary CPU boot entry. We're using this CPUs 164 249 * idle thread stack, but a set of temporary page tables.
+1
include/asm-arm/irq.h
··· 47 47 struct pt_regs; 48 48 int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); 49 49 50 + extern void migrate_irqs(void); 50 51 #endif 51 52
+10
include/asm-arm/smp.h
··· 66 66 }; 67 67 extern struct secondary_data secondary_data; 68 68 69 + extern int __cpu_disable(void); 70 + extern int mach_cpu_disable(unsigned int cpu); 71 + 72 + extern void __cpu_die(unsigned int cpu); 73 + extern void cpu_die(void); 74 + 75 + extern void platform_cpu_die(unsigned int cpu); 76 + extern int platform_cpu_kill(unsigned int cpu); 77 + extern void platform_cpu_enable(unsigned int cpu); 78 + 69 79 #endif /* ifndef __ASM_ARM_SMP_H */