···706706 This is purely to save memory - each supported CPU adds707707 approximately eight kilobytes to the kernel image.708708709709+config HOTPLUG_CPU710710+ bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"711711+ depends on SMP && HOTPLUG && EXPERIMENTAL712712+ help713713+ Say Y here to experiment with turning CPUs off and on. CPUs714714+ can be controlled through /sys/devices/system/cpu.715715+709716source "kernel/Kconfig.preempt"710717711718config GUSA
···11+#ifndef __ASM_SH_SMP_OPS_H22+#define __ASM_SH_SMP_OPS_H33+44+struct plat_smp_ops {55+ void (*smp_setup)(void);66+ unsigned int (*smp_processor_id)(void);77+ void (*prepare_cpus)(unsigned int max_cpus);88+ void (*start_cpu)(unsigned int cpu, unsigned long entry_point);99+ void (*send_ipi)(unsigned int cpu, unsigned int message);1010+ int (*cpu_disable)(unsigned int cpu);1111+ void (*cpu_die)(unsigned int cpu);1212+ void (*play_dead)(void);1313+};1414+1515+extern struct plat_smp_ops *mp_ops;1616+extern struct plat_smp_ops shx3_smp_ops;1717+1818+#ifdef CONFIG_SMP1919+2020+static inline void plat_smp_setup(void)2121+{2222+ BUG_ON(!mp_ops);2323+ mp_ops->smp_setup();2424+}2525+2626+static inline void play_dead(void)2727+{2828+ mp_ops->play_dead();2929+}3030+3131+extern void register_smp_ops(struct plat_smp_ops *ops);3232+3333+#else3434+3535+static inline void plat_smp_setup(void)3636+{3737+ /* UP, nothing to do ... */3838+}3939+4040+static inline void register_smp_ops(struct plat_smp_ops *ops)4141+{4242+}4343+4444+static inline void play_dead(void)4545+{4646+ BUG();4747+}4848+4949+#endif /* CONFIG_SMP */5050+5151+#endif /* __ASM_SH_SMP_OPS_H */
+32-8
arch/sh/include/asm/smp.h
···3344#include <linux/bitops.h>55#include <linux/cpumask.h>66+#include <asm/smp-ops.h>6778#ifdef CONFIG_SMP89910#include <linux/spinlock.h>1011#include <asm/atomic.h>1112#include <asm/current.h>1313+#include <asm/percpu.h>12141315#define raw_smp_processor_id() (current_thread_info()->cpu)1414-#define hard_smp_processor_id() plat_smp_processor_id()15161617/* Map from cpu id to sequential logical cpu number. */1718extern int __cpu_number_map[NR_CPUS];···3130 SMP_MSG_NR, /* must be last */3231};33323333+DECLARE_PER_CPU(int, cpu_state);3434+3435void smp_message_recv(unsigned int msg);3536void smp_timer_broadcast(const struct cpumask *mask);36373738void local_timer_interrupt(void);3839void local_timer_setup(unsigned int cpu);3939-4040-void plat_smp_setup(void);4141-void plat_prepare_cpus(unsigned int max_cpus);4242-int plat_smp_processor_id(void);4343-void plat_start_cpu(unsigned int cpu, unsigned long entry_point);4444-void plat_send_ipi(unsigned int cpu, unsigned int message);4040+void local_timer_stop(unsigned int cpu);45414642void arch_send_call_function_single_ipi(int cpu);4747-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);4343+void arch_send_call_function_ipi_mask(const struct cpumask *mask);4444+4545+void native_play_dead(void);4646+void native_cpu_die(unsigned int cpu);4747+int native_cpu_disable(unsigned int cpu);4848+4949+#ifdef CONFIG_HOTPLUG_CPU5050+void play_dead_common(void);5151+extern int __cpu_disable(void);5252+5353+static inline void __cpu_die(unsigned int cpu)5454+{5555+ extern struct plat_smp_ops *mp_ops; /* private */5656+5757+ mp_ops->cpu_die(cpu);5858+}5959+#endif6060+6161+static inline int hard_smp_processor_id(void)6262+{6363+ extern struct plat_smp_ops *mp_ops; /* private */6464+6565+ if (!mp_ops)6666+ return 0; /* boot CPU */6767+6868+ return mp_ops->smp_processor_id();6969+}48704971#else5072
+64-6
arch/sh/kernel/cpu/sh4a/smp-shx3.c
···11/*22 * SH-X3 SMP33 *44- * Copyright (C) 2007 - 2008 Paul Mundt44+ * Copyright (C) 2007 - 2010 Paul Mundt55 * Copyright (C) 2007 Magnus Damm66 *77 * This file is subject to the terms and conditions of the GNU General Public···99 * for more details.1010 */1111#include <linux/init.h>1212+#include <linux/kernel.h>1213#include <linux/cpumask.h>1314#include <linux/smp.h>1415#include <linux/interrupt.h>1516#include <linux/io.h>1717+#include <linux/sched.h>1818+#include <linux/delay.h>1919+#include <linux/cpu.h>2020+#include <asm/sections.h>16211722#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))1823#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))19242025#define STBCR_MSTP 0x000000012126#define STBCR_RESET 0x000000022727+#define STBCR_SLEEP 0x000000042228#define STBCR_LTSLP 0x8000000023292430static irqreturn_t ipi_interrupt_handler(int irq, void *arg)···4337 return IRQ_HANDLED;4438}45394646-void __init plat_smp_setup(void)4040+static void shx3_smp_setup(void)4741{4842 unsigned int cpu = 0;4943 int i, num;···6963 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);7064}71657272-void __init plat_prepare_cpus(unsigned int max_cpus)6666+static void shx3_prepare_cpus(unsigned int max_cpus)7367{7468 int i;7569···8074 for (i = 0; i < SMP_MSG_NR; i++)8175 request_irq(104 + i, ipi_interrupt_handler,8276 IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i);7777+7878+ for (i = 0; i < max_cpus; i++)7979+ set_cpu_present(i, true);8380}84818585-void plat_start_cpu(unsigned int cpu, unsigned long entry_point)8282+static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)8683{8784 if (__in_29bit_mode())8885 __raw_writel(entry_point, RESET_REG(cpu));···10293 __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));10394}10495105105-int plat_smp_processor_id(void)9696+static unsigned int shx3_smp_processor_id(void)10697{10798 return __raw_readl(0xff000048); /* CPIDR */10899}109100110110-void plat_send_ipi(unsigned int cpu, unsigned int message)101101+static void shx3_send_ipi(unsigned int cpu, unsigned int message)111102{112103 unsigned long addr = 0xfe410070 + (cpu * 4);113104···115106116107 __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */117108}109109+110110+static void shx3_update_boot_vector(unsigned int cpu)111111+{112112+ __raw_writel(STBCR_MSTP, STBCR_REG(cpu));113113+ while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))114114+ cpu_relax();115115+ __raw_writel(STBCR_RESET, STBCR_REG(cpu));116116+}117117+118118+static int __cpuinit119119+shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)120120+{121121+ unsigned int cpu = (unsigned int)hcpu;122122+123123+ switch (action) {124124+ case CPU_UP_PREPARE:125125+ shx3_update_boot_vector(cpu);126126+ break;127127+ case CPU_ONLINE:128128+ pr_info("CPU %u is now online\n", cpu);129129+ break;130130+ case CPU_DEAD:131131+ break;132132+ }133133+134134+ return NOTIFY_OK;135135+}136136+137137+static struct notifier_block __cpuinitdata shx3_cpu_notifier = {138138+ .notifier_call = shx3_cpu_callback,139139+};140140+141141+static int __cpuinit register_shx3_cpu_notifier(void)142142+{143143+ register_hotcpu_notifier(&shx3_cpu_notifier);144144+ return 0;145145+}146146+late_initcall(register_shx3_cpu_notifier);147147+148148+struct plat_smp_ops shx3_smp_ops = {149149+ .smp_setup = shx3_smp_setup,150150+ .prepare_cpus = shx3_prepare_cpus,151151+ .start_cpu = shx3_start_cpu,152152+ .smp_processor_id = shx3_smp_processor_id,153153+ .send_ipi = shx3_send_ipi,154154+ .cpu_die = native_cpu_die,155155+ .cpu_disable = native_cpu_disable,156156+ .play_dead = native_play_dead,157157+};
+6-2
arch/sh/kernel/idle.c
···1919#include <asm/pgalloc.h>2020#include <asm/system.h>2121#include <asm/atomic.h>2222+#include <asm/smp.h>22232324void (*pm_idle)(void) = NULL;2425···9089 while (1) {9190 tick_nohz_stop_sched_tick(1);92919393- while (!need_resched() && cpu_online(cpu)) {9292+ while (!need_resched()) {9493 check_pgt_cache();9594 rmb();9595+9696+ if (cpu_is_offline(cpu))9797+ play_dead();96989799 local_irq_disable();98100 /* Don't trace irqs off for idle */···137133void stop_this_cpu(void *unused)138134{139135 local_irq_disable();140140- cpu_clear(smp_processor_id(), cpu_online_map);136136+ set_cpu_online(smp_processor_id(), false);141137142138 for (;;)143139 cpu_sleep();
+42
arch/sh/kernel/irq.c
···1212#include <linux/kernel_stat.h>1313#include <linux/seq_file.h>1414#include <linux/ftrace.h>1515+#include <linux/delay.h>1516#include <asm/processor.h>1617#include <asm/machvec.h>1718#include <asm/uaccess.h>···291290{292291 nr_irqs = sh_mv.mv_nr_irqs;293292 return 0;293293+}294294+#endif295295+296296+#ifdef CONFIG_HOTPLUG_CPU297297+static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)298298+{299299+ printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",300300+ irq, desc->node, cpu);301301+302302+ raw_spin_lock_irq(&desc->lock);303303+ desc->chip->set_affinity(irq, cpumask_of(cpu));304304+ raw_spin_unlock_irq(&desc->lock);305305+}306306+307307+/*308308+ * The CPU has been marked offline. Migrate IRQs off this CPU. If309309+ * the affinity settings do not allow other CPUs, force them onto any310310+ * available CPU.311311+ */312312+void migrate_irqs(void)313313+{314314+ struct irq_desc *desc;315315+ unsigned int irq, cpu = smp_processor_id();316316+317317+ for_each_irq_desc(irq, desc) {318318+ if (desc->node == cpu) {319319+ unsigned int newcpu = cpumask_any_and(desc->affinity,320320+ cpu_online_mask);321321+ if (newcpu >= nr_cpu_ids) {322322+ if (printk_ratelimit())323323+ printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",324324+ irq, cpu);325325+326326+ cpumask_setall(desc->affinity);327327+ newcpu = cpumask_any_and(desc->affinity,328328+ cpu_online_mask);329329+ }330330+331331+ route_irq(desc, irq, newcpu);332332+ }333333+ }294334}295335#endif
+5-1
arch/sh/kernel/localtimer.c
···4444{4545}46464747-void __cpuinit local_timer_setup(unsigned int cpu)4747+void local_timer_setup(unsigned int cpu)4848{4949 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);5050···5959 clk->cpumask = cpumask_of(cpu);60606161 clockevents_register_device(clk);6262+}6363+6464+void local_timer_stop(unsigned int cpu)6565+{6266}
···33 *44 * SMP support for the SuperH processors.55 *66- * Copyright (C) 2002 - 2008 Paul Mundt66+ * Copyright (C) 2002 - 2010 Paul Mundt77 * Copyright (C) 2006 - 2007 Akio Idehara88 *99 * This file is subject to the terms and conditions of the GNU General Public···3131int __cpu_number_map[NR_CPUS]; /* Map physical to logical */3232int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */33333434-static inline void __init smp_store_cpu_info(unsigned int cpu)3434+struct plat_smp_ops *mp_ops = NULL;3535+3636+/* State of each CPU */3737+DEFINE_PER_CPU(int, cpu_state) = { 0 };3838+3939+void __cpuinit register_smp_ops(struct plat_smp_ops *ops)4040+{4141+ if (mp_ops)4242+ printk(KERN_WARNING "Overriding previously set SMP ops\n");4343+4444+ mp_ops = ops;4545+}4646+4747+static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)3548{3649 struct sh_cpuinfo *c = cpu_data + cpu;3750···59466047 init_new_context(current, &init_mm);6148 current_thread_info()->cpu = cpu;6262- plat_prepare_cpus(max_cpus);4949+ mp_ops->prepare_cpus(max_cpus);63506451#ifndef CONFIG_HOTPLUG_CPU6552 init_cpu_present(&cpu_possible_map);6653#endif6754}68556969-void __devinit smp_prepare_boot_cpu(void)5656+void __init smp_prepare_boot_cpu(void)7057{7158 unsigned int cpu = smp_processor_id();7259···75627663 set_cpu_online(cpu, true);7764 set_cpu_possible(cpu, true);6565+6666+ per_cpu(cpu_state, cpu) = CPU_ONLINE;7867}6868+6969+#ifdef CONFIG_HOTPLUG_CPU7070+void native_cpu_die(unsigned int cpu)7171+{7272+ unsigned int i;7373+7474+ for (i = 0; i < 10; i++) {7575+ smp_rmb();7676+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {7777+ if (system_state == SYSTEM_RUNNING)7878+ pr_info("CPU %u is now offline\n", cpu);7979+8080+ return;8181+ }8282+8383+ msleep(100);8484+ }8585+8686+ pr_err("CPU %u didn't die...\n", cpu);8787+}8888+8989+int native_cpu_disable(unsigned int cpu)9090+{9191+ return cpu == 0 ? -EPERM : 0;9292+}9393+9494+void play_dead_common(void)9595+{9696+ idle_task_exit();9797+ irq_ctx_exit(raw_smp_processor_id());9898+ mb();9999+100100+ __get_cpu_var(cpu_state) = CPU_DEAD;101101+ local_irq_disable();102102+}103103+104104+void native_play_dead(void)105105+{106106+ play_dead_common();107107+}108108+109109+int __cpu_disable(void)110110+{111111+ unsigned int cpu = smp_processor_id();112112+ struct task_struct *p;113113+ int ret;114114+115115+ ret = mp_ops->cpu_disable(cpu);116116+ if (ret)117117+ return ret;118118+119119+ /*120120+ * Take this CPU offline. Once we clear this, we can't return,121121+ * and we must not schedule until we're ready to give up the cpu.122122+ */123123+ set_cpu_online(cpu, false);124124+125125+ /*126126+ * OK - migrate IRQs away from this CPU127127+ */128128+ migrate_irqs();129129+130130+ /*131131+ * Stop the local timer for this CPU.132132+ */133133+ local_timer_stop(cpu);134134+135135+ /*136136+ * Flush user cache and TLB mappings, and then remove this CPU137137+ * from the vm mask set of all processes.138138+ */139139+ flush_cache_all();140140+ local_flush_tlb_all();141141+142142+ read_lock(&tasklist_lock);143143+ for_each_process(p)144144+ if (p->mm)145145+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));146146+ read_unlock(&tasklist_lock);147147+148148+ return 0;149149+}150150+#else /* ... !CONFIG_HOTPLUG_CPU */151151+int native_cpu_disable(void)152152+{153153+ return -ENOSYS;154154+}155155+156156+void native_cpu_die(unsigned int cpu)157157+{158158+ /* We said "no" in __cpu_disable */159159+ BUG();160160+}161161+162162+void native_play_dead(void)163163+{164164+ BUG();165165+}166166+#endif7916780168asmlinkage void __cpuinit start_secondary(void)81169{8282- unsigned int cpu;170170+ unsigned int cpu = smp_processor_id();83171 struct mm_struct *mm = &init_mm;8417285173 enable_mmu();86174 atomic_inc(&mm->mm_count);87175 atomic_inc(&mm->mm_users);88176 current->active_mm = mm;8989- BUG_ON(current->mm);90177 enter_lazy_tlb(mm, current);178178+ local_flush_tlb_all();9117992180 per_cpu_trap_init();9318194182 preempt_disable();951839696- notify_cpu_starting(smp_processor_id());184184+ notify_cpu_starting(cpu);9718598186 local_irq_enable();9999-100100- cpu = smp_processor_id();101187102188 /* Enable local timers */103189 local_timer_setup(cpu);···2049220593 smp_store_cpu_info(cpu);20694207207- cpu_set(cpu, cpu_online_map);9595+ set_cpu_online(cpu, true);9696+ per_cpu(cpu_state, cpu) = CPU_ONLINE;2089720998 cpu_idle();21099}···224111 struct task_struct *tsk;225112 unsigned long timeout;226113227227- tsk = fork_idle(cpu);228228- if (IS_ERR(tsk)) {229229- printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);230230- return PTR_ERR(tsk);114114+ tsk = cpu_data[cpu].idle;115115+ if (!tsk) {116116+ tsk = fork_idle(cpu);117117+ if (IS_ERR(tsk)) {118118+ pr_err("Failed forking idle task for cpu %d\n", cpu);119119+ return PTR_ERR(tsk);120120+ }121121+122122+ cpu_data[cpu].idle = tsk;231123 }124124+125125+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;232126233127 /* Fill in data in head.S for secondary cpus */234128 stack_start.sp = tsk->thread.sp;···247127 (unsigned long)&stack_start + sizeof(stack_start));248128 wmb();249129250250- plat_start_cpu(cpu, (unsigned long)_stext);130130+ mp_ops->start_cpu(cpu, (unsigned long)_stext);251131252132 timeout = jiffies + HZ;253133 while (time_before(jiffies, timeout)) {···255135 break;256136257137 udelay(10);138138+ barrier();258139 }259140260141 if (cpu_online(cpu))···280159281160void smp_send_reschedule(int cpu)282161{283283- plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);162162+ mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);284163}285164286165void smp_send_stop(void)···293172 int cpu;294173295174 for_each_cpu(cpu, mask)296296- plat_send_ipi(cpu, SMP_MSG_FUNCTION);175175+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);297176}298177299178void arch_send_call_function_single_ipi(int cpu)300179{301301- plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);180180+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);302181}303182304183void smp_timer_broadcast(const struct cpumask *mask)···306185 int cpu;307186308187 for_each_cpu(cpu, mask)309309- plat_send_ipi(cpu, SMP_MSG_TIMER);188188+ mp_ops->send_ipi(cpu, SMP_MSG_TIMER);310189}311190312191static void ipi_timer(void)···370249 * behalf of debugees, kswapd stealing pages from another process etc).371250 * Kanoj 07/00.372251 */373373-374252void flush_tlb_mm(struct mm_struct *mm)375253{376254 preempt_disable();
+5-1
arch/sh/kernel/topology.c
···5252#endif53535454 for_each_present_cpu(i) {5555- ret = register_cpu(&per_cpu(cpu_devices, i), i);5555+ struct cpu *c = &per_cpu(cpu_devices, i);5656+5757+ c->hotpluggable = 1;5858+5959+ ret = register_cpu(c, i);5660 if (unlikely(ret))5761 printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",5862 __func__, i, ret);