Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: SMP: add PM/CPU hotplug support

Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>

authored by

Graf Yang and committed by
Mike Frysinger
0b39db28 0d152c27

+138 -13
+5 -1
arch/blackfin/Kconfig
··· 250 250 depends on SMP 251 251 default 2 if BF561 252 252 253 + config HOTPLUG_CPU 254 + bool "Support for hot-pluggable CPUs" 255 + depends on SMP && HOTPLUG 256 + default y 257 + 253 258 config IRQ_PER_CPU 254 259 bool 255 260 depends on SMP ··· 1135 1130 endmenu 1136 1131 1137 1132 menu "Power management options" 1138 - depends on !SMP 1139 1133 1140 1134 source "kernel/power/Kconfig" 1141 1135
+7
arch/blackfin/include/asm/smp.h
··· 25 25 26 26 void smp_icache_flush_range_others(unsigned long start, 27 27 unsigned long end); 28 + #ifdef CONFIG_HOTPLUG_CPU 29 + void coreb_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); 30 + void cpu_die(void); 31 + void platform_cpu_die(void); 32 + int __cpu_disable(void); 33 + int __cpu_die(unsigned int cpu); 34 + #endif 28 35 29 36 #endif /* !__ASM_BLACKFIN_SMP_H */
+1
arch/blackfin/mach-bf561/Makefile
··· 6 6 7 7 obj-$(CONFIG_BF561_COREB) += coreb.o 8 8 obj-$(CONFIG_SMP) += smp.o secondary.o atomic.o 9 + obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+32
arch/blackfin/mach-bf561/hotplug.c
··· 1 + /* 2 + * Copyright 2007-2009 Analog Devices Inc. 3 + * Graff Yang <graf.yang@analog.com> 4 + * 5 + * Licensed under the GPL-2 or later. 6 + */ 7 + 8 + #include <asm/blackfin.h> 9 + #include <asm/smp.h> 10 + #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 11 + 12 + int hotplug_coreb; 13 + 14 + void platform_cpu_die(void) 15 + { 16 + unsigned long iwr[2] = {0, 0}; 17 + unsigned long bank = SIC_SYSIRQ(IRQ_SUPPLE_0) / 32; 18 + unsigned long bit = 1 << (SIC_SYSIRQ(IRQ_SUPPLE_0) % 32); 19 + 20 + hotplug_coreb = 1; 21 + 22 + iwr[bank] = bit; 23 + 24 + /* disable core timer */ 25 + bfin_write_TCNTL(0); 26 + 27 + /* clear ipi interrupt IRQ_SUPPLE_0 */ 28 + bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + 1))); 29 + SSYNC(); 30 + 31 + coreb_sleep(iwr[0], iwr[1], 0); 32 + }
+48 -2
arch/blackfin/mach-bf561/secondary.S
··· 11 11 #include <linux/init.h> 12 12 #include <asm/blackfin.h> 13 13 #include <asm/asm-offsets.h> 14 + #include <asm/trace.h> 14 15 15 16 __INIT 16 17 ··· 62 61 M1 = r0; 63 62 M2 = r0; 64 63 M3 = r0; 64 + 65 + trace_buffer_init(p0,r0); 65 66 66 67 /* Turn off the icache */ 67 68 p0.l = LO(IMEM_CONTROL); ··· 162 159 ENDPROC(_coreb_trampoline_start) 163 160 ENTRY(_coreb_trampoline_end) 164 161 162 + .section ".text" 163 + ENTRY(_set_sicb_iwr) 164 + P0.H = hi(SICB_IWR0); 165 + P0.L = lo(SICB_IWR0); 166 + P1.H = hi(SICB_IWR1); 167 + P1.L = lo(SICB_IWR1); 168 + [P0] = R0; 169 + [P1] = R1; 170 + SSYNC; 171 + RTS; 172 + ENDPROC(_set_sicb_iwr) 173 + 174 + ENTRY(_coreb_sleep) 175 + sp.l = lo(INITIAL_STACK); 176 + sp.h = hi(INITIAL_STACK); 177 + fp = sp; 178 + usp = sp; 179 + 180 + call _set_sicb_iwr; 181 + 182 + CLI R2; 183 + SSYNC; 184 + IDLE; 185 + STI R2; 186 + 187 + R0 = IWR_DISABLE_ALL; 188 + R1 = IWR_DISABLE_ALL; 189 + call _set_sicb_iwr; 190 + 191 + p0.h = hi(COREB_L1_CODE_START); 192 + p0.l = lo(COREB_L1_CODE_START); 193 + jump (p0); 194 + ENDPROC(_coreb_sleep) 195 + 196 + __CPUINIT 165 197 ENTRY(_coreb_start) 166 198 [--sp] = reti; 167 199 ··· 214 176 sp = [p0]; 215 177 usp = sp; 216 178 fp = sp; 179 + #ifdef CONFIG_HOTPLUG_CPU 180 + p0.l = _hotplug_coreb; 181 + p0.h = _hotplug_coreb; 182 + r0 = [p0]; 183 + cc = BITTST(r0, 0); 184 + if cc jump 3f; 185 + #endif 217 186 sp += -12; 218 187 call _init_pda 219 188 sp += 12; 189 + #ifdef CONFIG_HOTPLUG_CPU 190 + 3: 191 + #endif 220 192 call _secondary_start_kernel; 221 193 .L_exit: 222 194 jump.s .L_exit; 223 195 ENDPROC(_coreb_start) 224 - 225 - __FINIT
+10 -7
arch/blackfin/mach-bf561/smp.c
··· 65 65 bfin_write_SICB_IAR5(bfin_read_SICA_IAR5()); 66 66 bfin_write_SICB_IAR6(bfin_read_SICA_IAR6()); 67 67 bfin_write_SICB_IAR7(bfin_read_SICA_IAR7()); 68 + bfin_write_SICB_IWR0(IWR_DISABLE_ALL); 69 + bfin_write_SICB_IWR1(IWR_DISABLE_ALL); 68 70 SSYNC(); 69 71 70 72 /* Store CPU-private information to the cpu_data array. */ ··· 82 80 { 83 81 unsigned long timeout; 84 82 85 - /* CoreB already running?! */ 86 - BUG_ON((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0); 87 - 88 83 printk(KERN_INFO "Booting Core B.\n"); 89 84 90 85 spin_lock(&boot_lock); 91 86 92 - /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ 93 - SSYNC(); 94 - bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT); 95 - SSYNC(); 87 + if ((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0) { 88 + /* CoreB already running, sending ipi to wakeup it */ 89 + platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 90 + } else { 91 + /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ 92 + bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT); 93 + SSYNC(); 94 + } 96 95 97 96 timeout = jiffies + 1 * HZ; 98 97 while (time_before(jiffies, timeout)) {
+35 -3
arch/blackfin/mach-common/smp.c
··· 344 344 345 345 int __cpuinit __cpu_up(unsigned int cpu) 346 346 { 347 - struct task_struct *idle; 348 347 int ret; 348 + static struct task_struct *idle; 349 + 350 + if (idle) 351 + free_task(idle); 349 352 350 353 idle = fork_idle(cpu); 351 354 if (IS_ERR(idle)) { ··· 357 354 } 358 355 359 356 secondary_stack = task_stack_page(idle) + THREAD_SIZE; 360 - smp_wmb(); 361 357 362 358 ret = platform_boot_secondary(cpu, idle); 363 359 ··· 415 413 atomic_inc(&mm->mm_users); 416 414 atomic_inc(&mm->mm_count); 417 415 current->active_mm = mm; 418 - BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */ 419 416 420 417 preempt_disable(); 421 418 ··· 495 494 put_cpu(); 496 495 } 497 496 EXPORT_SYMBOL(resync_core_dcache); 497 + #endif 498 + 499 + #ifdef CONFIG_HOTPLUG_CPU 500 + int __cpuexit __cpu_disable(void) 501 + { 502 + unsigned int cpu = smp_processor_id(); 503 + 504 + if (cpu == 0) 505 + return -EPERM; 506 + 507 + set_cpu_online(cpu, false); 508 + return 0; 509 + } 510 + 511 + static DECLARE_COMPLETION(cpu_killed); 512 + 513 + int __cpuexit __cpu_die(unsigned int cpu) 514 + { 515 + return wait_for_completion_timeout(&cpu_killed, 5000); 516 + } 517 + 518 + void cpu_die(void) 519 + { 520 + complete(&cpu_killed); 521 + 522 + atomic_dec(&init_mm.mm_users); 523 + atomic_dec(&init_mm.mm_count); 524 + 525 + local_irq_disable(); 526 + platform_cpu_die(); 527 + } 498 528 #endif