Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
kdump: Allow shrinking of kdump region to be overridden
powerpc/pmac/smp: Remove no-longer needed preempt workaround
powerpc/smp: Increase vdso_data->processorCount, not just decrease it
powerpc/smp: Create idle threads on demand and properly reset them
powerpc/smp: Don't expose per-cpu "cpu_state" array
powerpc/pmac/smp: Fix CPU hotplug crashes on some machines
powerpc/smp: Add a smp_ops->bringup_up() done callback
powerpc/pmac: Rename cpu_state in therm_pm72 to avoid collision
powerpc/pmac/smp: Properly NAP offlined CPU on G5
powerpc/pmac/smp: Remove HMT changes for PowerMac offline code
powerpc/pmac/smp: Consolidate 32-bit and 64-bit PowerMac cpu_die in one file
powerpc/pmac/smp: Fixup smp_core99_cpu_disable() and use it on 64-bit
powerpc/pmac/smp: Rename fixup_irqs() to migrate_irqs() and use it on ppc32
powerpc/pmac/smp: Fix 32-bit PowerMac cpu_die
powerpc/smp: Remove unused smp_ops->cpu_enable()
powerpc/smp: Remove unused generic_cpu_enable()
powerpc/smp: Fix generic_mach_cpu_die()
powerpc/smp: soft-replugged CPUs must go back to start_secondary
powerpc: Make decrementer interrupt robust against offlined CPUs

+282 -233
+1 -2
arch/powerpc/include/asm/machdep.h
··· 35 35 int (*probe)(void); 36 36 void (*kick_cpu)(int nr); 37 37 void (*setup_cpu)(int nr); 38 + void (*bringup_done)(void); 38 39 void (*take_timebase)(void); 39 40 void (*give_timebase)(void); 40 - int (*cpu_enable)(unsigned int nr); 41 41 int (*cpu_disable)(void); 42 42 void (*cpu_die)(unsigned int nr); 43 43 int (*cpu_bootable)(unsigned int nr); ··· 267 267 268 268 extern void e500_idle(void); 269 269 extern void power4_idle(void); 270 - extern void power4_cpu_offline_powersave(void); 271 270 extern void ppc6xx_idle(void); 272 271 extern void book3e_idle(void); 273 272
+3 -2
arch/powerpc/include/asm/smp.h
··· 36 36 37 37 extern void smp_send_debugger_break(int cpu); 38 38 extern void smp_message_recv(int); 39 + extern void start_secondary_resume(void); 39 40 40 41 DECLARE_PER_CPU(unsigned int, cpu_pvr); 41 42 42 43 #ifdef CONFIG_HOTPLUG_CPU 43 - extern void fixup_irqs(const struct cpumask *map); 44 + extern void migrate_irqs(void); 44 45 int generic_cpu_disable(void); 45 - int generic_cpu_enable(unsigned int cpu); 46 46 void generic_cpu_die(unsigned int cpu); 47 47 void generic_mach_cpu_die(void); 48 + void generic_set_cpu_dead(unsigned int cpu); 48 49 #endif 49 50 50 51 #ifdef CONFIG_PPC64
+9
arch/powerpc/kernel/head_32.S
··· 890 890 mtspr SPRN_SRR1,r4 891 891 SYNC 892 892 RFI 893 + 894 + _GLOBAL(start_secondary_resume) 895 + /* Reset stack */ 896 + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ 897 + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 898 + li r3,0 899 + std r3,0(r1) /* Zero the stack frame pointer */ 900 + bl start_secondary 901 + b . 893 902 #endif /* CONFIG_SMP */ 894 903 895 904 #ifdef CONFIG_KVM_BOOK3S_HANDLER
+7
arch/powerpc/kernel/head_64.S
··· 536 536 add r13,r13,r4 /* for this processor. */ 537 537 mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ 538 538 539 + /* Mark interrupts soft and hard disabled (they might be enabled 540 + * in the PACA when doing hotplug) 541 + */ 542 + li r0,0 543 + stb r0,PACASOFTIRQEN(r13) 544 + stb r0,PACAHARDIRQEN(r13) 545 + 539 546 /* Create a temp kernel stack for use before relocation is on. */ 540 547 ld r1,PACAEMERGSP(r13) 541 548 subi r1,r1,STACK_FRAME_OVERHEAD
-21
arch/powerpc/kernel/idle_power4.S
··· 53 53 isync 54 54 b 1b 55 55 56 - _GLOBAL(power4_cpu_offline_powersave) 57 - /* Go to NAP now */ 58 - mfmsr r7 59 - rldicl r0,r7,48,1 60 - rotldi r0,r0,16 61 - mtmsrd r0,1 /* hard-disable interrupts */ 62 - li r0,1 63 - li r6,0 64 - stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ 65 - stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ 66 - BEGIN_FTR_SECTION 67 - DSSALL 68 - sync 69 - END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 70 - ori r7,r7,MSR_EE 71 - oris r7,r7,MSR_POW@h 72 - sync 73 - isync 74 - mtmsrd r7 75 - isync 76 - blr
+2 -1
arch/powerpc/kernel/irq.c
··· 246 246 } 247 247 248 248 #ifdef CONFIG_HOTPLUG_CPU 249 - void fixup_irqs(const struct cpumask *map) 249 + void migrate_irqs(void) 250 250 { 251 251 struct irq_desc *desc; 252 252 unsigned int irq; 253 253 static int warned; 254 254 cpumask_var_t mask; 255 + const struct cpumask *map = cpu_online_mask; 255 256 256 257 alloc_cpumask_var(&mask, GFP_KERNEL); 257 258
+98 -57
arch/powerpc/kernel/smp.c
··· 57 57 #define DBG(fmt...) 58 58 #endif 59 59 60 + 61 + /* Store all idle threads, this can be reused instead of creating 62 + * a new thread. Also avoids complicated thread destroy functionality 63 + * for idle threads. 64 + */ 65 + #ifdef CONFIG_HOTPLUG_CPU 66 + /* 67 + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is 68 + * removed after init for !CONFIG_HOTPLUG_CPU. 69 + */ 70 + static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); 71 + #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) 72 + #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) 73 + #else 74 + static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; 75 + #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 76 + #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) 77 + #endif 78 + 60 79 struct thread_info *secondary_ti; 61 80 62 81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); ··· 257 238 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); 258 239 } 259 240 260 - static void __init smp_create_idle(unsigned int cpu) 261 - { 262 - struct task_struct *p; 263 - 264 - /* create a process for the processor */ 265 - p = fork_idle(cpu); 266 - if (IS_ERR(p)) 267 - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 268 - #ifdef CONFIG_PPC64 269 - paca[cpu].__current = p; 270 - paca[cpu].kstack = (unsigned long) task_thread_info(p) 271 - + THREAD_SIZE - STACK_FRAME_OVERHEAD; 272 - #endif 273 - current_set[cpu] = task_thread_info(p); 274 - task_thread_info(p)->cpu = cpu; 275 - } 276 - 277 241 void __init smp_prepare_cpus(unsigned int max_cpus) 278 242 { 279 243 unsigned int cpu; ··· 290 288 max_cpus = NR_CPUS; 291 289 else 292 290 max_cpus = 1; 293 - 294 - for_each_possible_cpu(cpu) 295 - if (cpu != boot_cpuid) 296 - smp_create_idle(cpu); 297 291 } 298 292 299 293 void __devinit smp_prepare_boot_cpu(void) ··· 303 305 304 306 #ifdef CONFIG_HOTPLUG_CPU 305 307 /* State of each CPU during hotplug phases */ 306 - DEFINE_PER_CPU(int, cpu_state) = { 0 }; 308 + static DEFINE_PER_CPU(int, cpu_state) = { 0 }; 307 309 308 310 int generic_cpu_disable(void) 309 311 { ··· 315 317 set_cpu_online(cpu, false); 316 318 #ifdef CONFIG_PPC64 317 319 vdso_data->processorCount--; 318 - fixup_irqs(cpu_online_mask); 319 320 #endif 320 - return 0; 321 - } 322 - 323 - int generic_cpu_enable(unsigned int cpu) 324 - { 325 - /* Do the normal bootup if we haven't 326 - * already bootstrapped. */ 327 - if (system_state != SYSTEM_RUNNING) 328 - return -ENOSYS; 329 - 330 - /* get the target out of it's holding state */ 331 - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 332 - smp_wmb(); 333 - 334 - while (!cpu_online(cpu)) 335 - cpu_relax(); 336 - 337 - #ifdef CONFIG_PPC64 338 - fixup_irqs(cpu_online_mask); 339 - /* counter the irq disable in fixup_irqs */ 340 - local_irq_enable(); 341 - #endif 321 + migrate_irqs(); 342 322 return 0; 343 323 } 344 324 ··· 338 362 unsigned int cpu; 339 363 340 364 local_irq_disable(); 365 + idle_task_exit(); 341 366 cpu = smp_processor_id(); 342 367 printk(KERN_DEBUG "CPU%d offline\n", cpu); 343 368 __get_cpu_var(cpu_state) = CPU_DEAD; 344 369 smp_wmb(); 345 370 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 346 371 cpu_relax(); 347 - set_cpu_online(cpu, true); 348 - local_irq_enable(); 372 + } 373 + 374 + void generic_set_cpu_dead(unsigned int cpu) 375 + { 376 + per_cpu(cpu_state, cpu) = CPU_DEAD; 349 377 } 350 378 #endif 351 379 352 - static int __devinit cpu_enable(unsigned int cpu) 353 - { 354 - if (smp_ops && smp_ops->cpu_enable) 355 - return smp_ops->cpu_enable(cpu); 380 + struct create_idle { 381 + struct work_struct work; 382 + struct task_struct *idle; 383 + struct completion done; 384 + int cpu; 385 + }; 356 386 357 - return -ENOSYS; 387 + static void __cpuinit do_fork_idle(struct work_struct *work) 388 + { 389 + struct create_idle *c_idle = 390 + container_of(work, struct create_idle, work); 391 + 392 + c_idle->idle = fork_idle(c_idle->cpu); 393 + complete(&c_idle->done); 394 + } 395 + 396 + static int __cpuinit create_idle(unsigned int cpu) 397 + { 398 + struct thread_info *ti; 399 + struct create_idle c_idle = { 400 + .cpu = cpu, 401 + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 402 + }; 403 + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 404 + 405 + c_idle.idle = get_idle_for_cpu(cpu); 406 + 407 + /* We can't use kernel_thread since we must avoid to 408 + * reschedule the child. We use a workqueue because 409 + * we want to fork from a kernel thread, not whatever 410 + * userspace process happens to be trying to online us. 411 + */ 412 + if (!c_idle.idle) { 413 + schedule_work(&c_idle.work); 414 + wait_for_completion(&c_idle.done); 415 + } else 416 + init_idle(c_idle.idle, cpu); 417 + if (IS_ERR(c_idle.idle)) { 418 + pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); 419 + return PTR_ERR(c_idle.idle); 420 + } 421 + ti = task_thread_info(c_idle.idle); 422 + 423 + #ifdef CONFIG_PPC64 424 + paca[cpu].__current = c_idle.idle; 425 + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; 426 + #endif 427 + ti->cpu = cpu; 428 + current_set[cpu] = ti; 429 + 430 + return 0; 358 431 } 359 432 360 433 int __cpuinit __cpu_up(unsigned int cpu) 361 434 { 362 - int c; 435 + int rc, c; 363 436 364 437 secondary_ti = current_set[cpu]; 365 - if (!cpu_enable(cpu)) 366 - return 0; 367 438 368 439 if (smp_ops == NULL || 369 440 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) 370 441 return -EINVAL; 442 + 443 + /* Make sure we have an idle thread */ 444 + rc = create_idle(cpu); 445 + if (rc) 446 + return rc; 371 447 372 448 /* Make sure callin-map entry is 0 (can be leftover a CPU 373 449 * hotplug ··· 530 502 } 531 503 532 504 /* Activate a secondary processor. */ 533 - int __devinit start_secondary(void *unused) 505 + void __devinit start_secondary(void *unused) 534 506 { 535 507 unsigned int cpu = smp_processor_id(); 536 508 struct device_node *l2_cache; ··· 551 523 552 524 secondary_cpu_time_init(); 553 525 526 + #ifdef CONFIG_PPC64 527 + if (system_state == SYSTEM_RUNNING) 528 + vdso_data->processorCount++; 529 + #endif 554 530 ipi_call_lock(); 555 531 notify_cpu_starting(cpu); 556 532 set_cpu_online(cpu, true); ··· 590 558 local_irq_enable(); 591 559 592 560 cpu_idle(); 593 - return 0; 561 + 562 + BUG(); 594 563 } 595 564 596 565 int setup_profiling_timer(unsigned int multiplier) ··· 618 585 619 586 free_cpumask_var(old_mask); 620 587 588 + if (smp_ops && smp_ops->bringup_done) 589 + smp_ops->bringup_done(); 590 + 621 591 dump_numa_cpu_topology(); 592 + 622 593 } 623 594 624 595 int arch_sd_sibling_asym_packing(void) ··· 697 660 { 698 661 if (ppc_md.cpu_die) 699 662 ppc_md.cpu_die(); 663 + 664 + /* If we return, we re-enter start_secondary */ 665 + start_secondary_resume(); 700 666 } 667 + 701 668 #endif
+11 -4
arch/powerpc/kernel/time.c
··· 577 577 struct clock_event_device *evt = &decrementer->event; 578 578 u64 now; 579 579 580 + /* Ensure a positive value is written to the decrementer, or else 581 + * some CPUs will continue to take decrementer exceptions. 582 + */ 583 + set_dec(DECREMENTER_MAX); 584 + 585 + /* Some implementations of hotplug will get timer interrupts while 586 + * offline, just ignore these 587 + */ 588 + if (!cpu_online(smp_processor_id())) 589 + return; 590 + 580 591 trace_timer_interrupt_entry(regs); 581 592 582 593 __get_cpu_var(irq_stat).timer_irqs++; 583 - 584 - /* Ensure a positive value is written to the decrementer, or else 585 - * some CPUs will continuue to take decrementer exceptions */ 586 - set_dec(DECREMENTER_MAX); 587 594 588 595 #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 589 596 if (atomic_read(&ppc_n_lost_interrupts) != 0)
-1
arch/powerpc/platforms/powermac/pmac.h
··· 33 33 extern void pmac_check_ht_link(void); 34 34 35 35 extern void pmac_setup_smp(void); 36 - extern void pmac32_cpu_die(void); 37 36 extern void low_cpu_die(void) __attribute__((noreturn)); 38 37 39 38 extern int pmac_nvram_init(void);
-56
arch/powerpc/platforms/powermac/setup.c
··· 650 650 return PCI_PROBE_NORMAL; 651 651 return PCI_PROBE_DEVTREE; 652 652 } 653 - 654 - #ifdef CONFIG_HOTPLUG_CPU 655 - /* access per cpu vars from generic smp.c */ 656 - DECLARE_PER_CPU(int, cpu_state); 657 - 658 - static void pmac64_cpu_die(void) 659 - { 660 - /* 661 - * turn off as much as possible, we'll be 662 - * kicked out as this will only be invoked 663 - * on core99 platforms for now ... 664 - */ 665 - 666 - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); 667 - __get_cpu_var(cpu_state) = CPU_DEAD; 668 - smp_wmb(); 669 - 670 - /* 671 - * during the path that leads here preemption is disabled, 672 - * reenable it now so that when coming up preempt count is 673 - * zero correctly 674 - */ 675 - preempt_enable(); 676 - 677 - /* 678 - * hard-disable interrupts for the non-NAP case, the NAP code 679 - * needs to re-enable interrupts (but soft-disables them) 680 - */ 681 - hard_irq_disable(); 682 - 683 - while (1) { 684 - /* let's not take timer interrupts too often ... */ 685 - set_dec(0x7fffffff); 686 - 687 - /* should always be true at this point */ 688 - if (cpu_has_feature(CPU_FTR_CAN_NAP)) 689 - power4_cpu_offline_powersave(); 690 - else { 691 - HMT_low(); 692 - HMT_very_low(); 693 - } 694 - } 695 - } 696 - #endif /* CONFIG_HOTPLUG_CPU */ 697 - 698 653 #endif /* CONFIG_PPC64 */ 699 654 700 655 define_machine(powermac) { ··· 680 725 .pcibios_enable_device_hook = pmac_pci_enable_device_hook, 681 726 .pcibios_after_init = pmac_pcibios_after_init, 682 727 .phys_mem_access_prot = pci_phys_mem_access_prot, 683 - #endif 684 - #ifdef CONFIG_HOTPLUG_CPU 685 - #ifdef CONFIG_PPC64 686 - .cpu_die = pmac64_cpu_die, 687 - #endif 688 - #ifdef CONFIG_PPC32 689 - .cpu_die = pmac32_cpu_die, 690 - #endif 691 - #endif 692 - #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) 693 - .cpu_die = generic_mach_cpu_die, 694 728 #endif 695 729 };
+117 -55
arch/powerpc/platforms/powermac/smp.c
··· 840 840 841 841 /* Setup openpic */ 842 842 mpic_setup_this_cpu(); 843 - 844 - if (cpu_nr == 0) { 845 - #ifdef CONFIG_PPC64 846 - extern void g5_phy_disable_cpu1(void); 847 - 848 - /* Close i2c bus if it was used for tb sync */ 849 - if (pmac_tb_clock_chip_host) { 850 - pmac_i2c_close(pmac_tb_clock_chip_host); 851 - pmac_tb_clock_chip_host = NULL; 852 - } 853 - 854 - /* If we didn't start the second CPU, we must take 855 - * it off the bus 856 - */ 857 - if (of_machine_is_compatible("MacRISC4") && 858 - num_online_cpus() < 2) 859 - g5_phy_disable_cpu1(); 860 - #endif /* CONFIG_PPC64 */ 861 - 862 - if (ppc_md.progress) 863 - ppc_md.progress("core99_setup_cpu 0 done", 0x349); 864 - } 865 843 } 866 844 867 - 868 - #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) 869 - 870 - int smp_core99_cpu_disable(void) 845 + #ifdef CONFIG_HOTPLUG_CPU 846 + static int smp_core99_cpu_notify(struct notifier_block *self, 847 + unsigned long action, void *hcpu) 871 848 { 872 - set_cpu_online(smp_processor_id(), false); 849 + int rc; 873 850 874 - /* XXX reset cpu affinity here */ 851 + switch(action) { 852 + case CPU_UP_PREPARE: 853 + case CPU_UP_PREPARE_FROZEN: 854 + /* Open i2c bus if it was used for tb sync */ 855 + if (pmac_tb_clock_chip_host) { 856 + rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); 857 + if (rc) { 858 + pr_err("Failed to open i2c bus for time sync\n"); 859 + return notifier_from_errno(rc); 860 + } 861 + } 862 + break; 863 + case CPU_ONLINE: 864 + case CPU_UP_CANCELED: 865 + /* Close i2c bus if it was used for tb sync */ 866 + if (pmac_tb_clock_chip_host) 867 + pmac_i2c_close(pmac_tb_clock_chip_host); 868 + break; 869 + default: 870 + break; 871 + } 872 + return NOTIFY_OK; 873 + } 874 + 875 + static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { 876 + .notifier_call = smp_core99_cpu_notify, 877 + }; 878 + #endif /* CONFIG_HOTPLUG_CPU */ 879 + 880 + static void __init smp_core99_bringup_done(void) 881 + { 882 + #ifdef CONFIG_PPC64 883 + extern void g5_phy_disable_cpu1(void); 884 + 885 + /* Close i2c bus if it was used for tb sync */ 886 + if (pmac_tb_clock_chip_host) 887 + pmac_i2c_close(pmac_tb_clock_chip_host); 888 + 889 + /* If we didn't start the second CPU, we must take 890 + * it off the bus. 891 + */ 892 + if (of_machine_is_compatible("MacRISC4") && 893 + num_online_cpus() < 2) { 894 + set_cpu_present(1, false); 895 + g5_phy_disable_cpu1(); 896 + } 897 + #endif /* CONFIG_PPC64 */ 898 + 899 + #ifdef CONFIG_HOTPLUG_CPU 900 + register_cpu_notifier(&smp_core99_cpu_nb); 901 + #endif 902 + if (ppc_md.progress) 903 + ppc_md.progress("smp_core99_bringup_done", 0x349); 904 + } 905 + 906 + #ifdef CONFIG_HOTPLUG_CPU 907 + 908 + static int smp_core99_cpu_disable(void) 909 + { 910 + int rc = generic_cpu_disable(); 911 + if (rc) 912 + return rc; 913 + 875 914 mpic_cpu_set_priority(0xf); 876 - asm volatile("mtdec %0" : : "r" (0x7fffffff)); 877 - mb(); 878 - udelay(20); 879 - asm volatile("mtdec %0" : : "r" (0x7fffffff)); 915 + 880 916 return 0; 881 917 } 882 918 883 - static int cpu_dead[NR_CPUS]; 919 + #ifdef CONFIG_PPC32 884 920 885 - void pmac32_cpu_die(void) 921 + static void pmac_cpu_die(void) 886 922 { 923 + int cpu = smp_processor_id(); 924 + 887 925 local_irq_disable(); 888 - cpu_dead[smp_processor_id()] = 1; 926 + idle_task_exit(); 927 + pr_debug("CPU%d offline\n", cpu); 928 + generic_set_cpu_dead(cpu); 929 + smp_wmb(); 889 930 mb(); 890 931 low_cpu_die(); 891 932 } 892 933 893 - void smp_core99_cpu_die(unsigned int cpu) 894 - { 895 - int timeout; 934 + #else /* CONFIG_PPC32 */ 896 935 897 - timeout = 1000; 898 - while (!cpu_dead[cpu]) { 899 - if (--timeout == 0) { 900 - printk("CPU %u refused to die!\n", cpu); 901 - break; 902 - } 903 - msleep(1); 936 + static void pmac_cpu_die(void) 937 + { 938 + int cpu = smp_processor_id(); 939 + 940 + local_irq_disable(); 941 + idle_task_exit(); 942 + 943 + /* 944 + * turn off as much as possible, we'll be 945 + * kicked out as this will only be invoked 946 + * on core99 platforms for now ... 947 + */ 948 + 949 + printk(KERN_INFO "CPU#%d offline\n", cpu); 950 + generic_set_cpu_dead(cpu); 951 + smp_wmb(); 952 + 953 + /* 954 + * Re-enable interrupts. The NAP code needs to enable them 955 + * anyways, do it now so we deal with the case where one already 956 + * happened while soft-disabled. 957 + * We shouldn't get any external interrupts, only decrementer, and the 958 + * decrementer handler is safe for use on offline CPUs 959 + */ 960 + local_irq_enable(); 961 + 962 + while (1) { 963 + /* let's not take timer interrupts too often ... */ 964 + set_dec(0x7fffffff); 965 + 966 + /* Enter NAP mode */ 967 + power4_idle(); 904 968 } 905 - cpu_dead[cpu] = 0; 906 969 } 907 970 908 - #endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ 971 + #endif /* else CONFIG_PPC32 */ 972 + #endif /* CONFIG_HOTPLUG_CPU */ 909 973 910 974 /* Core99 Macs (dual G4s and G5s) */ 911 975 struct smp_ops_t core99_smp_ops = { 912 976 .message_pass = smp_mpic_message_pass, 913 977 .probe = smp_core99_probe, 978 + .bringup_done = smp_core99_bringup_done, 914 979 .kick_cpu = smp_core99_kick_cpu, 915 980 .setup_cpu = smp_core99_setup_cpu, 916 981 .give_timebase = smp_core99_give_timebase, 917 982 .take_timebase = smp_core99_take_timebase, 918 983 #if defined(CONFIG_HOTPLUG_CPU) 919 - # if defined(CONFIG_PPC32) 920 984 .cpu_disable = smp_core99_cpu_disable, 921 - .cpu_die = smp_core99_cpu_die, 922 - # endif 923 - # if defined(CONFIG_PPC64) 924 - .cpu_disable = generic_cpu_disable, 925 985 .cpu_die = generic_cpu_die, 926 - /* intentionally do *NOT* assign cpu_enable, 927 - * the generic code will use kick_cpu then! */ 928 - # endif 929 986 #endif 930 987 }; 931 988 ··· 1014 957 smp_ops = &psurge_smp_ops; 1015 958 } 1016 959 #endif /* CONFIG_PPC32 */ 960 + 961 + #ifdef CONFIG_HOTPLUG_CPU 962 + ppc_md.cpu_die = pmac_cpu_die; 963 + #endif 1017 964 } 965 + 1018 966
-2
arch/powerpc/platforms/pseries/offline_states.h
··· 34 34 #endif 35 35 36 36 extern enum cpu_state_vals get_preferred_offline_state(int cpu); 37 - extern int start_secondary(void); 38 - extern void start_secondary_resume(void); 39 37 #endif
+30 -30
drivers/macintosh/therm_pm72.c
··· 153 153 static struct i2c_adapter * u3_1; 154 154 static struct i2c_adapter * k2; 155 155 static struct i2c_client * fcu; 156 - static struct cpu_pid_state cpu_state[2]; 156 + static struct cpu_pid_state processor_state[2]; 157 157 static struct basckside_pid_params backside_params; 158 158 static struct backside_pid_state backside_state; 159 159 static struct drives_pid_state drives_state; ··· 664 664 665 665 static void fetch_cpu_pumps_minmax(void) 666 666 { 667 - struct cpu_pid_state *state0 = &cpu_state[0]; 668 - struct cpu_pid_state *state1 = &cpu_state[1]; 667 + struct cpu_pid_state *state0 = &processor_state[0]; 668 + struct cpu_pid_state *state1 = &processor_state[1]; 669 669 u16 pump_min = 0, pump_max = 0xffff; 670 670 u16 tmp[4]; 671 671 ··· 717 717 return sprintf(buf, "%d", data); \ 718 718 } 719 719 720 - BUILD_SHOW_FUNC_FIX(cpu0_temperature, cpu_state[0].last_temp) 721 - BUILD_SHOW_FUNC_FIX(cpu0_voltage, cpu_state[0].voltage) 722 - BUILD_SHOW_FUNC_FIX(cpu0_current, cpu_state[0].current_a) 723 - BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, cpu_state[0].rpm) 724 - BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, cpu_state[0].intake_rpm) 720 + BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp) 721 + BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage) 722 + BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a) 723 + BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm) 724 + BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm) 725 725 726 - BUILD_SHOW_FUNC_FIX(cpu1_temperature, cpu_state[1].last_temp) 727 - BUILD_SHOW_FUNC_FIX(cpu1_voltage, cpu_state[1].voltage) 728 - BUILD_SHOW_FUNC_FIX(cpu1_current, cpu_state[1].current_a) 729 - BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, cpu_state[1].rpm) 730 - BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, cpu_state[1].intake_rpm) 726 + BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp) 727 + BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage) 728 + BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a) 729 + BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm) 730 + BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm) 731 731 732 732 BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp) 733 733 BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm) ··· 919 919 920 920 static void do_monitor_cpu_combined(void) 921 921 { 922 - struct cpu_pid_state *state0 = &cpu_state[0]; 923 - struct cpu_pid_state *state1 = &cpu_state[1]; 922 + struct cpu_pid_state *state0 = &processor_state[0]; 923 + struct cpu_pid_state *state1 = &processor_state[1]; 924 924 s32 temp0, power0, temp1, power1; 925 925 s32 temp_combi, power_combi; 926 926 int rc, intake, pump; ··· 1150 1150 /* 1151 1151 * Initialize the state structure for one CPU control loop 1152 1152 */ 1153 - static int init_cpu_state(struct cpu_pid_state *state, int index) 1153 + static int init_processor_state(struct cpu_pid_state *state, int index) 1154 1154 { 1155 1155 int err; 1156 1156 ··· 1205 1205 /* 1206 1206 * Dispose of the state data for one CPU control loop 1207 1207 */ 1208 - static void dispose_cpu_state(struct cpu_pid_state *state) 1208 + static void dispose_processor_state(struct cpu_pid_state *state) 1209 1209 { 1210 1210 if (state->monitor == NULL) 1211 1211 return; ··· 1804 1804 set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM); 1805 1805 1806 1806 /* Initialize ADCs */ 1807 - initialize_adc(&cpu_state[0]); 1808 - if (cpu_state[1].monitor != NULL) 1809 - initialize_adc(&cpu_state[1]); 1807 + initialize_adc(&processor_state[0]); 1808 + if (processor_state[1].monitor != NULL) 1809 + initialize_adc(&processor_state[1]); 1810 1810 1811 1811 fcu_tickle_ticks = FCU_TICKLE_TICKS; 1812 1812 ··· 1833 1833 if (cpu_pid_type == CPU_PID_TYPE_COMBINED) 1834 1834 do_monitor_cpu_combined(); 1835 1835 else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) { 1836 - do_monitor_cpu_rack(&cpu_state[0]); 1837 - if (cpu_state[1].monitor != NULL) 1838 - do_monitor_cpu_rack(&cpu_state[1]); 1836 + do_monitor_cpu_rack(&processor_state[0]); 1837 + if (processor_state[1].monitor != NULL) 1838 + do_monitor_cpu_rack(&processor_state[1]); 1839 1839 // better deal with UP 1840 1840 } else { 1841 - do_monitor_cpu_split(&cpu_state[0]); 1842 - if (cpu_state[1].monitor != NULL) 1843 - do_monitor_cpu_split(&cpu_state[1]); 1841 + do_monitor_cpu_split(&processor_state[0]); 1842 + if (processor_state[1].monitor != NULL) 1843 + do_monitor_cpu_split(&processor_state[1]); 1844 1844 // better deal with UP 1845 1845 } 1846 1846 /* Then, the rest */ ··· 1885 1885 */ 1886 1886 static void dispose_control_loops(void) 1887 1887 { 1888 - dispose_cpu_state(&cpu_state[0]); 1889 - dispose_cpu_state(&cpu_state[1]); 1888 + dispose_processor_state(&processor_state[0]); 1889 + dispose_processor_state(&processor_state[1]); 1890 1890 dispose_backside_state(&backside_state); 1891 1891 dispose_drives_state(&drives_state); 1892 1892 dispose_slots_state(&slots_state); ··· 1928 1928 /* Create control loops for everything. If any fail, everything 1929 1929 * fails 1930 1930 */ 1931 - if (init_cpu_state(&cpu_state[0], 0)) 1931 + if (init_processor_state(&processor_state[0], 0)) 1932 1932 goto fail; 1933 1933 if (cpu_pid_type == CPU_PID_TYPE_COMBINED) 1934 1934 fetch_cpu_pumps_minmax(); 1935 1935 1936 - if (cpu_count > 1 && init_cpu_state(&cpu_state[1], 1)) 1936 + if (cpu_count > 1 && init_processor_state(&processor_state[1], 1)) 1937 1937 goto fail; 1938 1938 if (init_backside_state(&backside_state)) 1939 1939 goto fail;
+1
include/linux/kexec.h
··· 208 208 unsigned long long *crash_size, unsigned long long *crash_base); 209 209 int crash_shrink_memory(unsigned long new_size); 210 210 size_t crash_get_memory_size(void); 211 + void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); 211 212 212 213 #else /* !CONFIG_KEXEC */ 213 214 struct pt_regs;
+3 -2
kernel/kexec.c
··· 1099 1099 return size; 1100 1100 } 1101 1101 1102 - static void free_reserved_phys_range(unsigned long begin, unsigned long end) 1102 + void __weak crash_free_reserved_phys_range(unsigned long begin, 1103 + unsigned long end) 1103 1104 { 1104 1105 unsigned long addr; 1105 1106 ··· 1136 1135 start = roundup(start, PAGE_SIZE); 1137 1136 end = roundup(start + new_size, PAGE_SIZE); 1138 1137 1139 - free_reserved_phys_range(end, crashk_res.end); 1138 + crash_free_reserved_phys_range(end, crashk_res.end); 1140 1139 1141 1140 if ((start == end) && (crashk_res.parent != NULL)) 1142 1141 release_resource(&crashk_res);