Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sh/smp'

+373 -40
+7
arch/sh/Kconfig
··· 706 706 This is purely to save memory - each supported CPU adds 707 707 approximately eight kilobytes to the kernel image. 708 708 709 + config HOTPLUG_CPU 710 + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" 711 + depends on SMP && HOTPLUG && EXPERIMENTAL 712 + help 713 + Say Y here to experiment with turning CPUs off and on. CPUs 714 + can be controlled through /sys/devices/system/cpu. 715 + 709 716 source "kernel/Kconfig.preempt" 710 717 711 718 config GUSA
+3
arch/sh/boards/board-urquell.c
··· 24 24 #include <cpu/sh7786.h> 25 25 #include <asm/heartbeat.h> 26 26 #include <asm/sizes.h> 27 + #include <asm/smp-ops.h> 27 28 28 29 /* 29 30 * bit 1234 5678 ··· 204 203 printk(KERN_INFO "Renesas Technology Corp. Urquell support.\n"); 205 204 206 205 pm_power_off = urquell_power_off; 206 + 207 + register_smp_ops(&shx3_smp_ops); 207 208 } 208 209 209 210 /*
+3
arch/sh/boards/mach-sdk7786/setup.c
··· 21 21 #include <asm/heartbeat.h> 22 22 #include <asm/sizes.h> 23 23 #include <asm/reboot.h> 24 + #include <asm/smp-ops.h> 24 25 25 26 static struct resource heartbeat_resource = { 26 27 .start = 0x07fff8b0, ··· 190 189 191 190 machine_ops.restart = sdk7786_restart; 192 191 pm_power_off = sdk7786_power_off; 192 + 193 + register_smp_ops(&shx3_smp_ops); 193 194 } 194 195 195 196 /*
+7
arch/sh/boards/mach-x3proto/setup.c
··· 19 19 #include <linux/usb/r8a66597.h> 20 20 #include <linux/usb/m66592.h> 21 21 #include <asm/ilsel.h> 22 + #include <asm/smp-ops.h> 22 23 23 24 static struct resource heartbeat_resources[] = { 24 25 [0] = { ··· 153 152 __raw_writel(__raw_readl(0xfe410000) | (1 << 21), 0xfe410000); 154 153 } 155 154 155 + static void __init x3proto_setup(char **cmdline_p) 156 + { 157 + register_smp_ops(&shx3_smp_ops); 158 + } 159 + 156 160 static struct sh_machine_vector mv_x3proto __initmv = { 157 161 .mv_name = "x3proto", 162 + .mv_setup = x3proto_setup, 158 163 .mv_init_irq = x3proto_init_irq, 159 164 };
+3
arch/sh/include/asm/irq.h
··· 1 1 #ifndef __ASM_SH_IRQ_H 2 2 #define __ASM_SH_IRQ_H 3 3 4 + #include <linux/cpumask.h> 4 5 #include <asm/machvec.h> 5 6 6 7 /* ··· 51 50 #define irq_demux(irq) sh_mv.mv_irq_demux(irq) 52 51 53 52 void init_IRQ(void); 53 + void migrate_irqs(void); 54 + 54 55 asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); 55 56 56 57 #ifdef CONFIG_IRQSTACKS
+4
arch/sh/include/asm/processor.h
··· 85 85 struct tlb_info itlb; 86 86 struct tlb_info dtlb; 87 87 88 + #ifdef CONFIG_SMP 89 + struct task_struct *idle; 90 + #endif 91 + 88 92 unsigned long flags; 89 93 } __attribute__ ((aligned(L1_CACHE_BYTES))); 90 94
+51
arch/sh/include/asm/smp-ops.h
··· 1 + #ifndef __ASM_SH_SMP_OPS_H 2 + #define __ASM_SH_SMP_OPS_H 3 + 4 + struct plat_smp_ops { 5 + void (*smp_setup)(void); 6 + unsigned int (*smp_processor_id)(void); 7 + void (*prepare_cpus)(unsigned int max_cpus); 8 + void (*start_cpu)(unsigned int cpu, unsigned long entry_point); 9 + void (*send_ipi)(unsigned int cpu, unsigned int message); 10 + int (*cpu_disable)(unsigned int cpu); 11 + void (*cpu_die)(unsigned int cpu); 12 + void (*play_dead)(void); 13 + }; 14 + 15 + extern struct plat_smp_ops *mp_ops; 16 + extern struct plat_smp_ops shx3_smp_ops; 17 + 18 + #ifdef CONFIG_SMP 19 + 20 + static inline void plat_smp_setup(void) 21 + { 22 + BUG_ON(!mp_ops); 23 + mp_ops->smp_setup(); 24 + } 25 + 26 + static inline void play_dead(void) 27 + { 28 + mp_ops->play_dead(); 29 + } 30 + 31 + extern void register_smp_ops(struct plat_smp_ops *ops); 32 + 33 + #else 34 + 35 + static inline void plat_smp_setup(void) 36 + { 37 + /* UP, nothing to do ... */ 38 + } 39 + 40 + static inline void register_smp_ops(struct plat_smp_ops *ops) 41 + { 42 + } 43 + 44 + static inline void play_dead(void) 45 + { 46 + BUG(); 47 + } 48 + 49 + #endif /* CONFIG_SMP */ 50 + 51 + #endif /* __ASM_SH_SMP_OPS_H */
+32 -8
arch/sh/include/asm/smp.h
··· 3 3 4 4 #include <linux/bitops.h> 5 5 #include <linux/cpumask.h> 6 + #include <asm/smp-ops.h> 6 7 7 8 #ifdef CONFIG_SMP 8 9 9 10 #include <linux/spinlock.h> 10 11 #include <asm/atomic.h> 11 12 #include <asm/current.h> 13 + #include <asm/percpu.h> 12 14 13 15 #define raw_smp_processor_id() (current_thread_info()->cpu) 14 - #define hard_smp_processor_id() plat_smp_processor_id() 15 16 16 17 /* Map from cpu id to sequential logical cpu number. */ 17 18 extern int __cpu_number_map[NR_CPUS]; ··· 31 30 SMP_MSG_NR, /* must be last */ 32 31 }; 33 32 33 + DECLARE_PER_CPU(int, cpu_state); 34 + 34 35 void smp_message_recv(unsigned int msg); 35 36 void smp_timer_broadcast(const struct cpumask *mask); 36 37 37 38 void local_timer_interrupt(void); 38 39 void local_timer_setup(unsigned int cpu); 39 - 40 - void plat_smp_setup(void); 41 - void plat_prepare_cpus(unsigned int max_cpus); 42 - int plat_smp_processor_id(void); 43 - void plat_start_cpu(unsigned int cpu, unsigned long entry_point); 44 - void plat_send_ipi(unsigned int cpu, unsigned int message); 40 + void local_timer_stop(unsigned int cpu); 45 41 46 42 void arch_send_call_function_single_ipi(int cpu); 47 - extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 43 + void arch_send_call_function_ipi_mask(const struct cpumask *mask); 44 + 45 + void native_play_dead(void); 46 + void native_cpu_die(unsigned int cpu); 47 + int native_cpu_disable(unsigned int cpu); 48 + 49 + #ifdef CONFIG_HOTPLUG_CPU 50 + void play_dead_common(void); 51 + extern int __cpu_disable(void); 52 + 53 + static inline void __cpu_die(unsigned int cpu) 54 + { 55 + extern struct plat_smp_ops *mp_ops; /* private */ 56 + 57 + mp_ops->cpu_die(cpu); 58 + } 59 + #endif 60 + 61 + static inline int hard_smp_processor_id(void) 62 + { 63 + extern struct plat_smp_ops *mp_ops; /* private */ 64 + 65 + if (!mp_ops) 66 + return 0; /* boot CPU */ 67 + 68 + return mp_ops->smp_processor_id(); 69 + } 48 70 49 71 #else 50 72
+64 -6
arch/sh/kernel/cpu/sh4a/smp-shx3.c
··· 1 1 /* 2 2 * SH-X3 SMP 3 3 * 4 - * Copyright (C) 2007 - 2008 Paul Mundt 4 + * Copyright (C) 2007 - 2010 Paul Mundt 5 5 * Copyright (C) 2007 Magnus Damm 6 6 * 7 7 * This file is subject to the terms and conditions of the GNU General Public ··· 9 9 * for more details. 10 10 */ 11 11 #include <linux/init.h> 12 + #include <linux/kernel.h> 12 13 #include <linux/cpumask.h> 13 14 #include <linux/smp.h> 14 15 #include <linux/interrupt.h> 15 16 #include <linux/io.h> 17 + #include <linux/sched.h> 18 + #include <linux/delay.h> 19 + #include <linux/cpu.h> 20 + #include <asm/sections.h> 16 21 17 22 #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) 18 23 #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) 19 24 20 25 #define STBCR_MSTP 0x00000001 21 26 #define STBCR_RESET 0x00000002 27 + #define STBCR_SLEEP 0x00000004 22 28 #define STBCR_LTSLP 0x80000000 23 29 24 30 static irqreturn_t ipi_interrupt_handler(int irq, void *arg) ··· 43 37 return IRQ_HANDLED; 44 38 } 45 39 46 - void __init plat_smp_setup(void) 40 + static void shx3_smp_setup(void) 47 41 { 48 42 unsigned int cpu = 0; 49 43 int i, num; ··· 69 63 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 70 64 } 71 65 72 - void __init plat_prepare_cpus(unsigned int max_cpus) 66 + static void shx3_prepare_cpus(unsigned int max_cpus) 73 67 { 74 68 int i; 75 69 ··· 80 74 for (i = 0; i < SMP_MSG_NR; i++) 81 75 request_irq(104 + i, ipi_interrupt_handler, 82 76 IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i); 77 + 78 + for (i = 0; i < max_cpus; i++) 79 + set_cpu_present(i, true); 83 80 } 84 81 85 - void plat_start_cpu(unsigned int cpu, unsigned long entry_point) 82 + static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point) 86 83 { 87 84 if (__in_29bit_mode()) 88 85 __raw_writel(entry_point, RESET_REG(cpu)); ··· 102 93 __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); 103 94 } 104 95 105 - int plat_smp_processor_id(void) 96 + static unsigned int shx3_smp_processor_id(void) 106 97 { 107 98 return __raw_readl(0xff000048); /* CPIDR */ 108 99 } 109 100 110 - void plat_send_ipi(unsigned int cpu, unsigned int message) 101 + static void shx3_send_ipi(unsigned int cpu, unsigned int message) 111 102 { 112 103 unsigned long addr = 0xfe410070 + (cpu * 4); 113 104 ··· 115 106 116 107 __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ 117 108 } 109 + 110 + static void shx3_update_boot_vector(unsigned int cpu) 111 + { 112 + __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); 113 + while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) 114 + cpu_relax(); 115 + __raw_writel(STBCR_RESET, STBCR_REG(cpu)); 116 + } 117 + 118 + static int __cpuinit 119 + shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 120 + { 121 + unsigned int cpu = (unsigned int)hcpu; 122 + 123 + switch (action) { 124 + case CPU_UP_PREPARE: 125 + shx3_update_boot_vector(cpu); 126 + break; 127 + case CPU_ONLINE: 128 + pr_info("CPU %u is now online\n", cpu); 129 + break; 130 + case CPU_DEAD: 131 + break; 132 + } 133 + 134 + return NOTIFY_OK; 135 + } 136 + 137 + static struct notifier_block __cpuinitdata shx3_cpu_notifier = { 138 + .notifier_call = shx3_cpu_callback, 139 + }; 140 + 141 + static int __cpuinit register_shx3_cpu_notifier(void) 142 + { 143 + register_hotcpu_notifier(&shx3_cpu_notifier); 144 + return 0; 145 + } 146 + late_initcall(register_shx3_cpu_notifier); 147 + 148 + struct plat_smp_ops shx3_smp_ops = { 149 + .smp_setup = shx3_smp_setup, 150 + .prepare_cpus = shx3_prepare_cpus, 151 + .start_cpu = shx3_start_cpu, 152 + .smp_processor_id = shx3_smp_processor_id, 153 + .send_ipi = shx3_send_ipi, 154 + .cpu_die = native_cpu_die, 155 + .cpu_disable = native_cpu_disable, 156 + .play_dead = native_play_dead, 157 + };
+6 -2
arch/sh/kernel/idle.c
··· 19 19 #include <asm/pgalloc.h> 20 20 #include <asm/system.h> 21 21 #include <asm/atomic.h> 22 + #include <asm/smp.h> 22 23 23 24 void (*pm_idle)(void) = NULL; 24 25 ··· 90 89 while (1) { 91 90 tick_nohz_stop_sched_tick(1); 92 91 93 - while (!need_resched() && cpu_online(cpu)) { 92 + while (!need_resched()) { 94 93 check_pgt_cache(); 95 94 rmb(); 95 + 96 + if (cpu_is_offline(cpu)) 97 + play_dead(); 96 98 97 99 local_irq_disable(); 98 100 /* Don't trace irqs off for idle */ ··· 137 133 void stop_this_cpu(void *unused) 138 134 { 139 135 local_irq_disable(); 140 - cpu_clear(smp_processor_id(), cpu_online_map); 136 + set_cpu_online(smp_processor_id(), false); 141 137 142 138 for (;;) 143 139 cpu_sleep();
+42
arch/sh/kernel/irq.c
··· 12 12 #include <linux/kernel_stat.h> 13 13 #include <linux/seq_file.h> 14 14 #include <linux/ftrace.h> 15 + #include <linux/delay.h> 15 16 #include <asm/processor.h> 16 17 #include <asm/machvec.h> 17 18 #include <asm/uaccess.h> ··· 291 290 { 292 291 nr_irqs = sh_mv.mv_nr_irqs; 293 292 return 0; 293 + } 294 + #endif 295 + 296 + #ifdef CONFIG_HOTPLUG_CPU 297 + static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 298 + { 299 + printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", 300 + irq, desc->node, cpu); 301 + 302 + raw_spin_lock_irq(&desc->lock); 303 + desc->chip->set_affinity(irq, cpumask_of(cpu)); 304 + raw_spin_unlock_irq(&desc->lock); 305 + } 306 + 307 + /* 308 + * The CPU has been marked offline. Migrate IRQs off this CPU. If 309 + * the affinity settings do not allow other CPUs, force them onto any 310 + * available CPU. 311 + */ 312 + void migrate_irqs(void) 313 + { 314 + struct irq_desc *desc; 315 + unsigned int irq, cpu = smp_processor_id(); 316 + 317 + for_each_irq_desc(irq, desc) { 318 + if (desc->node == cpu) { 319 + unsigned int newcpu = cpumask_any_and(desc->affinity, 320 + cpu_online_mask); 321 + if (newcpu >= nr_cpu_ids) { 322 + if (printk_ratelimit()) 323 + printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", 324 + irq, cpu); 325 + 326 + cpumask_setall(desc->affinity); 327 + newcpu = cpumask_any_and(desc->affinity, 328 + cpu_online_mask); 329 + } 330 + 331 + route_irq(desc, irq, newcpu); 332 + } 333 + } 294 334 } 295 335 #endif
+5 -1
arch/sh/kernel/localtimer.c
··· 44 44 { 45 45 } 46 46 47 - void __cpuinit local_timer_setup(unsigned int cpu) 47 + void local_timer_setup(unsigned int cpu) 48 48 { 49 49 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); 50 50 ··· 59 59 clk->cpumask = cpumask_of(cpu); 60 60 61 61 clockevents_register_device(clk); 62 + } 63 + 64 + void local_timer_stop(unsigned int cpu) 65 + { 62 66 }
+1 -2
arch/sh/kernel/setup.c
··· 39 39 #include <asm/irq.h> 40 40 #include <asm/setup.h> 41 41 #include <asm/clock.h> 42 + #include <asm/smp.h> 42 43 #include <asm/mmu_context.h> 43 44 44 45 /* ··· 460 459 if (likely(sh_mv.mv_setup)) 461 460 sh_mv.mv_setup(cmdline_p); 462 461 463 - #ifdef CONFIG_SMP 464 462 plat_smp_setup(); 465 - #endif 466 463 } 467 464 468 465 /* processor boot mode configuration */
+140 -20
arch/sh/kernel/smp.c
··· 3 3 * 4 4 * SMP support for the SuperH processors. 5 5 * 6 - * Copyright (C) 2002 - 2008 Paul Mundt 6 + * Copyright (C) 2002 - 2010 Paul Mundt 7 7 * Copyright (C) 2006 - 2007 Akio Idehara 8 8 * 9 9 * This file is subject to the terms and conditions of the GNU General Public ··· 31 31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ 32 32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ 33 33 34 - static inline void __init smp_store_cpu_info(unsigned int cpu) 34 + struct plat_smp_ops *mp_ops = NULL; 35 + 36 + /* State of each CPU */ 37 + DEFINE_PER_CPU(int, cpu_state) = { 0 }; 38 + 39 + void __cpuinit register_smp_ops(struct plat_smp_ops *ops) 40 + { 41 + if (mp_ops) 42 + printk(KERN_WARNING "Overriding previously set SMP ops\n"); 43 + 44 + mp_ops = ops; 45 + } 46 + 47 + static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) 35 48 { 36 49 struct sh_cpuinfo *c = cpu_data + cpu; 37 50 ··· 59 46 60 47 init_new_context(current, &init_mm); 61 48 current_thread_info()->cpu = cpu; 62 - plat_prepare_cpus(max_cpus); 49 + mp_ops->prepare_cpus(max_cpus); 63 50 64 51 #ifndef CONFIG_HOTPLUG_CPU 65 52 init_cpu_present(&cpu_possible_map); 66 53 #endif 67 54 } 68 55 69 - void __devinit smp_prepare_boot_cpu(void) 56 + void __init smp_prepare_boot_cpu(void) 70 57 { 71 58 unsigned int cpu = smp_processor_id(); 72 59 ··· 75 62 76 63 set_cpu_online(cpu, true); 77 64 set_cpu_possible(cpu, true); 65 + 66 + per_cpu(cpu_state, cpu) = CPU_ONLINE; 78 67 } 68 + 69 + #ifdef CONFIG_HOTPLUG_CPU 70 + void native_cpu_die(unsigned int cpu) 71 + { 72 + unsigned int i; 73 + 74 + for (i = 0; i < 10; i++) { 75 + smp_rmb(); 76 + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 77 + if (system_state == SYSTEM_RUNNING) 78 + pr_info("CPU %u is now offline\n", cpu); 79 + 80 + return; 81 + } 82 + 83 + msleep(100); 84 + } 85 + 86 + pr_err("CPU %u didn't die...\n", cpu); 87 + } 88 + 89 + int native_cpu_disable(unsigned int cpu) 90 + { 91 + return cpu == 0 ? -EPERM : 0; 92 + } 93 + 94 + void play_dead_common(void) 95 + { 96 + idle_task_exit(); 97 + irq_ctx_exit(raw_smp_processor_id()); 98 + mb(); 99 + 100 + __get_cpu_var(cpu_state) = CPU_DEAD; 101 + local_irq_disable(); 102 + } 103 + 104 + void native_play_dead(void) 105 + { 106 + play_dead_common(); 107 + } 108 + 109 + int __cpu_disable(void) 110 + { 111 + unsigned int cpu = smp_processor_id(); 112 + struct task_struct *p; 113 + int ret; 114 + 115 + ret = mp_ops->cpu_disable(cpu); 116 + if (ret) 117 + return ret; 118 + 119 + /* 120 + * Take this CPU offline. Once we clear this, we can't return, 121 + * and we must not schedule until we're ready to give up the cpu. 122 + */ 123 + set_cpu_online(cpu, false); 124 + 125 + /* 126 + * OK - migrate IRQs away from this CPU 127 + */ 128 + migrate_irqs(); 129 + 130 + /* 131 + * Stop the local timer for this CPU. 132 + */ 133 + local_timer_stop(cpu); 134 + 135 + /* 136 + * Flush user cache and TLB mappings, and then remove this CPU 137 + * from the vm mask set of all processes. 138 + */ 139 + flush_cache_all(); 140 + local_flush_tlb_all(); 141 + 142 + read_lock(&tasklist_lock); 143 + for_each_process(p) 144 + if (p->mm) 145 + cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); 146 + read_unlock(&tasklist_lock); 147 + 148 + return 0; 149 + } 150 + #else /* ... !CONFIG_HOTPLUG_CPU */ 151 + int native_cpu_disable(void) 152 + { 153 + return -ENOSYS; 154 + } 155 + 156 + void native_cpu_die(unsigned int cpu) 157 + { 158 + /* We said "no" in __cpu_disable */ 159 + BUG(); 160 + } 161 + 162 + void native_play_dead(void) 163 + { 164 + BUG(); 165 + } 166 + #endif 79 167 80 168 asmlinkage void __cpuinit start_secondary(void) 81 169 { 82 - unsigned int cpu; 170 + unsigned int cpu = smp_processor_id(); 83 171 struct mm_struct *mm = &init_mm; 84 172 85 173 enable_mmu(); 86 174 atomic_inc(&mm->mm_count); 87 175 atomic_inc(&mm->mm_users); 88 176 current->active_mm = mm; 89 - BUG_ON(current->mm); 90 177 enter_lazy_tlb(mm, current); 178 + local_flush_tlb_all(); 91 179 92 180 per_cpu_trap_init(); 93 181 94 182 preempt_disable(); 95 183 96 - notify_cpu_starting(smp_processor_id()); 184 + notify_cpu_starting(cpu); 97 185 98 186 local_irq_enable(); 99 - 100 - cpu = smp_processor_id(); 101 187 102 188 /* Enable local timers */ 103 189 local_timer_setup(cpu); ··· 204 92 205 93 smp_store_cpu_info(cpu); 206 94 207 - cpu_set(cpu, cpu_online_map); 95 + set_cpu_online(cpu, true); 96 + per_cpu(cpu_state, cpu) = CPU_ONLINE; 208 97 209 98 cpu_idle(); 210 99 } ··· 224 111 struct task_struct *tsk; 225 112 unsigned long timeout; 226 113 227 - tsk = fork_idle(cpu); 228 - if (IS_ERR(tsk)) { 229 - printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu); 230 - return PTR_ERR(tsk); 114 + tsk = cpu_data[cpu].idle; 115 + if (!tsk) { 116 + tsk = fork_idle(cpu); 117 + if (IS_ERR(tsk)) { 118 + pr_err("Failed forking idle task for cpu %d\n", cpu); 119 + return PTR_ERR(tsk); 120 + } 121 + 122 + cpu_data[cpu].idle = tsk; 231 123 } 124 + 125 + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 232 126 233 127 /* Fill in data in head.S for secondary cpus */ 234 128 stack_start.sp = tsk->thread.sp; ··· 247 127 (unsigned long)&stack_start + sizeof(stack_start)); 248 128 wmb(); 249 129 250 - plat_start_cpu(cpu, (unsigned long)_stext); 130 + mp_ops->start_cpu(cpu, (unsigned long)_stext); 251 131 252 132 timeout = jiffies + HZ; 253 133 while (time_before(jiffies, timeout)) { ··· 255 135 break; 256 136 257 137 udelay(10); 138 + barrier(); 258 139 } 259 140 260 141 if (cpu_online(cpu)) ··· 280 159 281 160 void smp_send_reschedule(int cpu) 282 161 { 283 - plat_send_ipi(cpu, SMP_MSG_RESCHEDULE); 162 + mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); 284 163 } 285 164 286 165 void smp_send_stop(void) ··· 293 172 int cpu; 294 173 295 174 for_each_cpu(cpu, mask) 296 - plat_send_ipi(cpu, SMP_MSG_FUNCTION); 175 + mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); 297 176 } 298 177 299 178 void arch_send_call_function_single_ipi(int cpu) 300 179 { 301 - plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 180 + mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); 302 181 } 303 182 304 183 void smp_timer_broadcast(const struct cpumask *mask) ··· 306 185 int cpu; 307 186 308 187 for_each_cpu(cpu, mask) 309 - plat_send_ipi(cpu, SMP_MSG_TIMER); 188 + mp_ops->send_ipi(cpu, SMP_MSG_TIMER); 310 189 } 311 190 312 191 static void ipi_timer(void) ··· 370 249 * behalf of debugees, kswapd stealing pages from another process etc). 371 250 * Kanoj 07/00. 372 251 */ 373 - 374 252 void flush_tlb_mm(struct mm_struct *mm) 375 253 { 376 254 preempt_disable();
+5 -1
arch/sh/kernel/topology.c
··· 52 52 #endif 53 53 54 54 for_each_present_cpu(i) { 55 - ret = register_cpu(&per_cpu(cpu_devices, i), i); 55 + struct cpu *c = &per_cpu(cpu_devices, i); 56 + 57 + c->hotpluggable = 1; 58 + 59 + ret = register_cpu(c, i); 56 60 if (unlikely(ret)) 57 61 printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", 58 62 __func__, i, ret);