···349349 depends on SMP350350 default "4"351351352352+config HOTPLUG_CPU353353+ bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"354354+ depends on SMP && HOTPLUG && EXPERIMENTAL355355+ help356356+ Say Y here to experiment with turning CPUs off and on. CPUs357357+ can be controlled through /sys/devices/system/cpu.358358+352359config PREEMPT353360 bool "Preemptible Kernel (EXPERIMENTAL)"354361 depends on EXPERIMENTAL
+31
arch/arm/kernel/irq.c
···10501050}1051105110521052__setup("noirqdebug", noirqdebug_setup);10531053+10541054+#ifdef CONFIG_HOTPLUG_CPU10551055+/*10561056+ * The CPU has been marked offline. Migrate IRQs off this CPU. If10571057+ * the affinity settings do not allow other CPUs, force them onto any10581058+ * available CPU.10591059+ */10601060+void migrate_irqs(void)10611061+{10621062+ unsigned int i, cpu = smp_processor_id();10631063+10641064+ for (i = 0; i < NR_IRQS; i++) {10651065+ struct irqdesc *desc = irq_desc + i;10661066+10671067+ if (desc->cpu == cpu) {10681068+ unsigned int newcpu = any_online_cpu(desc->affinity);10691069+10701070+ if (newcpu == NR_CPUS) {10711071+ if (printk_ratelimit())10721072+ printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",10731073+ i, cpu);10741074+10751075+ cpus_setall(desc->affinity);10761076+ newcpu = any_online_cpu(desc->affinity);10771077+ }10781078+10791079+ route_irq(desc, i, newcpu);10801080+ }10811081+ }10821082+}10831083+#endif /* CONFIG_HOTPLUG_CPU */
+9
arch/arm/kernel/process.c
···2626#include <linux/interrupt.h>2727#include <linux/kallsyms.h>2828#include <linux/init.h>2929+#include <linux/cpu.h>29303031#include <asm/system.h>3132#include <asm/io.h>···106105 /* endless idle loop with no priority at all */107106 while (1) {108107 void (*idle)(void) = pm_idle;108108+109109+#ifdef CONFIG_HOTPLUG_CPU110110+ if (cpu_is_offline(smp_processor_id())) {111111+ leds_event(led_idle_start);112112+ cpu_die();113113+ }114114+#endif115115+109116 if (!idle)110117 idle = default_idle;111118 preempt_disable();
+100-9
arch/arm/kernel/smp.c
···80808181int __cpuinit __cpu_up(unsigned int cpu)8282{8383- struct task_struct *idle;8383+ struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);8484+ struct task_struct *idle = ci->idle;8485 pgd_t *pgd;8586 pmd_t *pmd;8687 int ret;87888889 /*8989- * Spawn a new process manually. Grab a pointer to9090- * its task struct so we can mess with it9090+ * Spawn a new process manually, if not already done.9191+ * Grab a pointer to its task struct so we can mess with it9192 */9292- idle = fork_idle(cpu);9393- if (IS_ERR(idle)) {9494- printk(KERN_ERR "CPU%u: fork() failed\n", cpu);9595- return PTR_ERR(idle);9393+ if (!idle) {9494+ idle = fork_idle(cpu);9595+ if (IS_ERR(idle)) {9696+ printk(KERN_ERR "CPU%u: fork() failed\n", cpu);9797+ return PTR_ERR(idle);9898+ }9999+ ci->idle = idle;96100 }9710198102 /*···158154159155 return ret;160156}157157+158158+#ifdef CONFIG_HOTPLUG_CPU159159+/*160160+ * __cpu_disable runs on the processor to be shutdown.161161+ */162162+int __cpuexit __cpu_disable(void)163163+{164164+ unsigned int cpu = smp_processor_id();165165+ struct task_struct *p;166166+ int ret;167167+168168+ ret = mach_cpu_disable(cpu);169169+ if (ret)170170+ return ret;171171+172172+ /*173173+ * Take this CPU offline. Once we clear this, we can't return,174174+ * and we must not schedule until we're ready to give up the cpu.175175+ */176176+ cpu_clear(cpu, cpu_online_map);177177+178178+ /*179179+ * OK - migrate IRQs away from this CPU180180+ */181181+ migrate_irqs();182182+183183+ /*184184+ * Flush user cache and TLB mappings, and then remove this CPU185185+ * from the vm mask set of all processes.186186+ */187187+ flush_cache_all();188188+ local_flush_tlb_all();189189+190190+ read_lock(&tasklist_lock);191191+ for_each_process(p) {192192+ if (p->mm)193193+ cpu_clear(cpu, p->mm->cpu_vm_mask);194194+ }195195+ read_unlock(&tasklist_lock);196196+197197+ return 0;198198+}199199+200200+/*201201+ * called on the thread which is asking for a CPU to be shutdown -202202+ * waits until shutdown has completed, or it is timed out.203203+ */204204+void __cpuexit __cpu_die(unsigned int cpu)205205+{206206+ if (!platform_cpu_kill(cpu))207207+ printk("CPU%u: unable to kill\n", cpu);208208+}209209+210210+/*211211+ * Called from the idle thread for the CPU which has been shutdown.212212+ *213213+ * Note that we disable IRQs here, but do not re-enable them214214+ * before returning to the caller. This is also the behaviour215215+ * of the other hotplug-cpu capable cores, so presumably coming216216+ * out of idle fixes this.217217+ */218218+void __cpuexit cpu_die(void)219219+{220220+ unsigned int cpu = smp_processor_id();221221+222222+ local_irq_disable();223223+ idle_task_exit();224224+225225+ /*226226+ * actual CPU shutdown procedure is at least platform (if not227227+ * CPU) specific228228+ */229229+ platform_cpu_die(cpu);230230+231231+ /*232232+ * Do not return to the idle loop - jump back to the secondary233233+ * cpu initialisation. There's some initialisation which needs234234+ * to be repeated to undo the effects of taking the CPU offline.235235+ */236236+ __asm__("mov sp, %0\n"237237+ " b secondary_start_kernel"238238+ :239239+ : "r" ((void *)current->thread_info + THREAD_SIZE - 8));240240+}241241+#endif /* CONFIG_HOTPLUG_CPU */161242162243/*163244 * This is the secondary CPU boot entry. We're using this CPUs···325236{326237 unsigned int cpu = smp_processor_id();327238239239+ per_cpu(cpu_data, cpu).idle = current;240240+328241 cpu_set(cpu, cpu_possible_map);329242 cpu_set(cpu, cpu_present_map);330243 cpu_set(cpu, cpu_online_map);···400309 printk(KERN_CRIT401310 "CPU%u: smp_call_function timeout for %p(%p)\n"402311 " callmap %lx pending %lx, %swait\n",403403- smp_processor_id(), func, info, callmap, data.pending,404404- wait ? "" : "no ");312312+ smp_processor_id(), func, info, *cpus_addr(callmap),313313+ *cpus_addr(data.pending), wait ? "" : "no ");405314406315 /*407316 * TRACE
···486486487487 /*488488 * Ask the machine support to map in the statically mapped devices.489489- * After this point, we can start to touch devices again.490489 */491490 if (mdesc->map_io)492491 mdesc->map_io();492492+493493+ /*494494+ * Finally flush the tlb again - this ensures that we're in a495495+ * consistent state wrt the writebuffer if the writebuffer needs496496+ * draining. After this point, we can start to touch devices497497+ * again.498498+ */499499+ local_flush_tlb_all();493500}494501495502/*
···6666};6767extern struct secondary_data secondary_data;68686969+extern int __cpu_disable(void);7070+extern int mach_cpu_disable(unsigned int cpu);7171+7272+extern void __cpu_die(unsigned int cpu);7373+extern void cpu_die(void);7474+7575+extern void platform_cpu_die(unsigned int cpu);7676+extern int platform_cpu_kill(unsigned int cpu);7777+extern void platform_cpu_enable(unsigned int cpu);7878+6979#endif /* ifndef __ASM_ARM_SMP_H */
+3-3
include/asm-arm/spinlock.h
···8080 */8181#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)82828383-static inline void __raw_write_lock(rwlock_t *rw)8383+static inline void __raw_write_lock(raw_rwlock_t *rw)8484{8585 unsigned long tmp;8686···9797 smp_mb();9898}9999100100-static inline int __raw_write_trylock(rwlock_t *rw)100100+static inline int __raw_write_trylock(raw_rwlock_t *rw)101101{102102 unsigned long tmp;103103···157157 smp_mb();158158}159159160160-static inline void __raw_read_unlock(rwlock_t *rw)160160+static inline void __raw_read_unlock(raw_rwlock_t *rw)161161{162162 unsigned long tmp, tmp2;163163