Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] ppc32: support hotplug cpu on powermacs

This allows cpus to be off-lined on 32-bit SMP powermacs. When a cpu
is off-lined, it is put into sleep mode with interrupts disabled. It
can be on-lined again by asserting its soft-reset pin, which is
connected to a GPIO pin.

With this I can off-line the second cpu in my dual G4 powermac, which
means that I can then suspend the machine (the suspend/resume code
refuses to suspend if more than one cpu is online, and making it cope
with multiple cpus is surprisingly messy).

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Paul Mackerras and committed by
Linus Torvalds
31139971 bb0bb3b6

+119 -61
+9
arch/ppc/Kconfig
··· 265 265 266 266 If in doubt, say Y here. 267 267 268 + config HOTPLUG_CPU 269 + bool "Support for enabling/disabling CPUs" 270 + depends on SMP && HOTPLUG && EXPERIMENTAL && PPC_PMAC 271 + ---help--- 272 + Say Y here to be able to disable and re-enable individual 273 + CPUs at runtime on SMP machines. 274 + 275 + Say N if you are unsure. 276 + 268 277 source arch/ppc/platforms/4xx/Kconfig 269 278 source arch/ppc/platforms/85xx/Kconfig 270 279
+13 -15
arch/ppc/kernel/head.S
··· 1023 1023 andc r4,r4,r3 1024 1024 mtspr SPRN_HID0,r4 1025 1025 sync 1026 - bl gemini_prom_init 1027 1026 b __secondary_start 1028 1027 #endif /* CONFIG_GEMINI */ 1029 - .globl __secondary_start_psurge 1030 - __secondary_start_psurge: 1031 - li r24,1 /* cpu # */ 1032 - b __secondary_start_psurge99 1033 - .globl __secondary_start_psurge2 1034 - __secondary_start_psurge2: 1035 - li r24,2 /* cpu # */ 1036 - b __secondary_start_psurge99 1037 - .globl __secondary_start_psurge3 1038 - __secondary_start_psurge3: 1039 - li r24,3 /* cpu # */ 1040 - b __secondary_start_psurge99 1041 - __secondary_start_psurge99: 1042 - /* we come in here with IR=0 and DR=1, and DBAT 0 1028 + 1029 + .globl __secondary_start_pmac_0 1030 + __secondary_start_pmac_0: 1031 + /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ 1032 + li r24,0 1033 + b 1f 1034 + li r24,1 1035 + b 1f 1036 + li r24,2 1037 + b 1f 1038 + li r24,3 1039 + 1: 1040 + /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0 1043 1041 set to map the 0xf0000000 - 0xffffffff region */ 1044 1042 mfmsr r0 1045 1043 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
+5 -1
arch/ppc/kernel/idle.c
··· 22 22 #include <linux/ptrace.h> 23 23 #include <linux/slab.h> 24 24 #include <linux/sysctl.h> 25 + #include <linux/cpu.h> 25 26 26 27 #include <asm/pgtable.h> 27 28 #include <asm/uaccess.h> ··· 36 35 void default_idle(void) 37 36 { 38 37 void (*powersave)(void); 38 + int cpu = smp_processor_id(); 39 39 40 40 powersave = ppc_md.power_save; 41 41 ··· 46 44 #ifdef CONFIG_SMP 47 45 else { 48 46 set_thread_flag(TIF_POLLING_NRFLAG); 49 - while (!need_resched()) 47 + while (!need_resched() && !cpu_is_offline(cpu)) 50 48 barrier(); 51 49 clear_thread_flag(TIF_POLLING_NRFLAG); 52 50 } ··· 54 52 } 55 53 if (need_resched()) 56 54 schedule(); 55 + if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 56 + cpu_die(); 57 57 } 58 58 59 59 /*
+30 -14
arch/ppc/kernel/smp.c
··· 45 45 cpumask_t cpu_possible_map; 46 46 int smp_hw_index[NR_CPUS]; 47 47 struct thread_info *secondary_ti; 48 + static struct task_struct *idle_tasks[NR_CPUS]; 48 49 49 50 EXPORT_SYMBOL(cpu_online_map); 50 51 EXPORT_SYMBOL(cpu_possible_map); ··· 287 286 288 287 void __init smp_prepare_cpus(unsigned int max_cpus) 289 288 { 290 - int num_cpus, i; 289 + int num_cpus, i, cpu; 290 + struct task_struct *p; 291 291 292 292 /* Fixup boot cpu */ 293 293 smp_store_cpu_info(smp_processor_id()); ··· 310 308 311 309 if (smp_ops->space_timers) 312 310 smp_ops->space_timers(num_cpus); 311 + 312 + for_each_cpu(cpu) { 313 + if (cpu == smp_processor_id()) 314 + continue; 315 + /* create a process for the processor */ 316 + p = fork_idle(cpu); 317 + if (IS_ERR(p)) 318 + panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 319 + p->thread_info->cpu = cpu; 320 + idle_tasks[cpu] = p; 321 + } 313 322 } 314 323 315 324 void __devinit smp_prepare_boot_cpu(void) ··· 347 334 set_dec(tb_ticks_per_jiffy); 348 335 cpu_callin_map[cpu] = 1; 349 336 350 - printk("CPU %i done callin...\n", cpu); 337 + printk("CPU %d done callin...\n", cpu); 351 338 smp_ops->setup_cpu(cpu); 352 - printk("CPU %i done setup...\n", cpu); 353 - local_irq_enable(); 339 + printk("CPU %d done setup...\n", cpu); 354 340 smp_ops->take_timebase(); 355 - printk("CPU %i done timebase take...\n", cpu); 341 + printk("CPU %d done timebase take...\n", cpu); 342 + 343 + spin_lock(&call_lock); 344 + cpu_set(cpu, cpu_online_map); 345 + spin_unlock(&call_lock); 346 + 347 + local_irq_enable(); 356 348 357 349 cpu_idle(); 358 350 return 0; ··· 365 347 366 348 int __cpu_up(unsigned int cpu) 367 349 { 368 - struct task_struct *p; 369 350 char buf[32]; 370 351 int c; 371 352 372 - /* create a process for the processor */ 373 - /* only regs.msr is actually used, and 0 is OK for it */ 374 - p = fork_idle(cpu); 375 - if (IS_ERR(p)) 376 - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 377 - secondary_ti = p->thread_info; 378 - p->thread_info->cpu = cpu; 353 + secondary_ti = idle_tasks[cpu]->thread_info; 354 + mb(); 379 355 380 356 /* 381 357 * There was a cache flush loop here to flush the cache ··· 401 389 printk("Processor %d found.\n", cpu); 402 390 403 391 smp_ops->give_timebase(); 404 - cpu_set(cpu, cpu_online_map); 392 + 393 + /* Wait until cpu puts itself in the online map */ 394 + while (!cpu_online(cpu)) 395 + cpu_relax(); 396 + 405 397 return 0; 406 398 } 407 399
+2
arch/ppc/platforms/pmac_sleep.S
··· 161 161 addi r3,r3,sleep_storage@l 162 162 stw r5,0(r3) 163 163 164 + .globl low_cpu_die 165 + low_cpu_die: 164 166 /* Flush & disable all caches */ 165 167 bl flush_disable_caches 166 168
+54 -31
arch/ppc/platforms/pmac_smp.c
··· 33 33 #include <linux/spinlock.h> 34 34 #include <linux/errno.h> 35 35 #include <linux/hardirq.h> 36 + #include <linux/cpu.h> 36 37 37 38 #include <asm/ptrace.h> 38 39 #include <asm/atomic.h> ··· 56 55 * Powersurge (old powermac SMP) support. 57 56 */ 58 57 59 - extern void __secondary_start_psurge(void); 60 - extern void __secondary_start_psurge2(void); /* Temporary horrible hack */ 61 - extern void __secondary_start_psurge3(void); /* Temporary horrible hack */ 58 + extern void __secondary_start_pmac_0(void); 62 59 63 60 /* Addresses for powersurge registers */ 64 61 #define HAMMERHEAD_BASE 0xf8000000 ··· 118 119 static unsigned int pri_tb_hi, pri_tb_lo; 119 120 static unsigned int pri_tb_stamp; 120 121 121 - static void __init core99_init_caches(int cpu) 122 + static void __devinit core99_init_caches(int cpu) 122 123 { 123 124 if (!cpu_has_feature(CPU_FTR_L2CR)) 124 125 return; ··· 345 346 346 347 static void __init smp_psurge_kick_cpu(int nr) 347 348 { 348 - void (*start)(void) = __secondary_start_psurge; 349 + unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; 349 350 unsigned long a; 350 351 351 352 /* may need to flush here if secondary bats aren't setup */ ··· 355 356 356 357 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); 357 358 358 - /* setup entry point of secondary processor */ 359 - switch (nr) { 360 - case 2: 361 - start = __secondary_start_psurge2; 362 - break; 363 - case 3: 364 - start = __secondary_start_psurge3; 365 - break; 366 - } 367 - 368 - out_be32(psurge_start, __pa(start)); 359 + out_be32(psurge_start, start); 369 360 mb(); 370 361 371 362 psurge_set_ipi(nr); ··· 489 500 return ncpus; 490 501 } 491 502 492 - static void __init smp_core99_kick_cpu(int nr) 503 + static void __devinit smp_core99_kick_cpu(int nr) 493 504 { 494 505 unsigned long save_vector, new_vector; 495 506 unsigned long flags; 496 507 497 508 volatile unsigned long *vector 498 509 = ((volatile unsigned long *)(KERNELBASE+0x100)); 499 - if (nr < 1 || nr > 3) 510 + if (nr < 0 || nr > 3) 500 511 return; 501 512 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); 502 513 ··· 507 518 save_vector = *vector; 508 519 509 520 /* Setup fake reset vector that does 510 - * b __secondary_start_psurge - KERNELBASE 521 + * b __secondary_start_pmac_0 + nr*8 - KERNELBASE 511 522 */ 512 - switch(nr) { 513 - case 1: 514 - new_vector = (unsigned long)__secondary_start_psurge; 515 - break; 516 - case 2: 517 - new_vector = (unsigned long)__secondary_start_psurge2; 518 - break; 519 - case 3: 520 - new_vector = (unsigned long)__secondary_start_psurge3; 521 - break; 522 - } 523 + new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8; 523 524 *vector = 0x48000002 + new_vector - KERNELBASE; 524 525 525 526 /* flush data cache and inval instruction cache */ ··· 533 554 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); 534 555 } 535 556 536 - static void __init smp_core99_setup_cpu(int cpu_nr) 557 + static void __devinit smp_core99_setup_cpu(int cpu_nr) 537 558 { 538 559 /* Setup L2/L3 */ 539 560 if (cpu_nr != 0) ··· 647 668 .give_timebase = smp_core99_give_timebase, 648 669 .take_timebase = smp_core99_take_timebase, 649 670 }; 671 + 672 + #ifdef CONFIG_HOTPLUG_CPU 673 + 674 + int __cpu_disable(void) 675 + { 676 + cpu_clear(smp_processor_id(), cpu_online_map); 677 + 678 + /* XXX reset cpu affinity here */ 679 + openpic_set_priority(0xf); 680 + asm volatile("mtdec %0" : : "r" (0x7fffffff)); 681 + mb(); 682 + udelay(20); 683 + asm volatile("mtdec %0" : : "r" (0x7fffffff)); 684 + return 0; 685 + } 686 + 687 + extern void low_cpu_die(void) __attribute__((noreturn)); /* in pmac_sleep.S */ 688 + static int cpu_dead[NR_CPUS]; 689 + 690 + void cpu_die(void) 691 + { 692 + local_irq_disable(); 693 + cpu_dead[smp_processor_id()] = 1; 694 + mb(); 695 + low_cpu_die(); 696 + } 697 + 698 + void __cpu_die(unsigned int cpu) 699 + { 700 + int timeout; 701 + 702 + timeout = 1000; 703 + while (!cpu_dead[cpu]) { 704 + if (--timeout == 0) { 705 + printk("CPU %u refused to die!\n", cpu); 706 + break; 707 + } 708 + msleep(1); 709 + } 710 + cpu_callin_map[cpu] = 0; 711 + cpu_dead[cpu] = 0; 712 + } 713 + 714 + #endif
+6
include/asm-ppc/smp.h
··· 41 41 struct pt_regs; 42 42 extern void smp_message_recv(int, struct pt_regs *); 43 43 44 + extern int __cpu_disable(void); 45 + extern void __cpu_die(unsigned int cpu); 46 + extern void cpu_die(void) __attribute__((noreturn)); 47 + 44 48 #define NO_PROC_ID 0xFF /* No processor magic marker */ 45 49 #define PROC_CHANGE_PENALTY 20 46 50 ··· 67 63 #endif /* __ASSEMBLY__ */ 68 64 69 65 #else /* !(CONFIG_SMP) */ 66 + 67 + static inline void cpu_die(void) { } 70 68 71 69 #endif /* !(CONFIG_SMP) */ 72 70