Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] x86_64: Change init sections for CPU hotplug support

This patch adds __cpuinit and __cpuinitdata sections that need to exist past
boot to support cpu hotplug.

Caveat: This is done *only* for EM64T CPU Hotplug support, on request from
Andi Kleen. Much of the generic hotplug code in kernel, and none of the other
archs that support CPU hotplug today, i386, ia64, ppc64, s390 and parisc dont
mark sections with __cpuinit, but only mark them as __devinit, and
__devinitdata.

If someone is motivated to change generic code, we need to make sure all
existing hotplug code does not break, on other arch's that dont use __cpuinit,
and __cpudevinit.

Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Andi Kleen <ak@muc.de>
Acked-by: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ashok Raj and committed by
Linus Torvalds
e6982c67 52a119fe

+44 -37
+4 -4
arch/x86_64/kernel/apic.c
··· 285 285 apic_write_around(APIC_LVT1, value); 286 286 } 287 287 288 - void __init setup_local_APIC (void) 288 + void __cpuinit setup_local_APIC (void) 289 289 { 290 290 unsigned int value, ver, maxlvt; 291 291 ··· 534 534 .cls = &lapic_sysclass, 535 535 }; 536 536 537 - static void __init apic_pm_activate(void) 537 + static void __cpuinit apic_pm_activate(void) 538 538 { 539 539 apic_pm_state.active = 1; 540 540 } ··· 774 774 local_irq_enable(); 775 775 } 776 776 777 - void __init setup_secondary_APIC_clock(void) 777 + void __cpuinit setup_secondary_APIC_clock(void) 778 778 { 779 779 local_irq_disable(); /* FIXME: Do we need this? --RR */ 780 780 setup_APIC_timer(calibration_result); 781 781 local_irq_enable(); 782 782 } 783 783 784 - void __init disable_APIC_timer(void) 784 + void __cpuinit disable_APIC_timer(void) 785 785 { 786 786 if (using_apic_timer) { 787 787 unsigned long v;
+1 -1
arch/x86_64/kernel/i387.c
··· 42 42 * Called at bootup to set up the initial FPU state that is later cloned 43 43 * into all processes. 44 44 */ 45 - void __init fpu_init(void) 45 + void __cpuinit fpu_init(void) 46 46 { 47 47 unsigned long oldcr0 = read_cr0(); 48 48 extern void __bad_fxsave_alignment(void);
+4 -4
arch/x86_64/kernel/mce.c
··· 327 327 } 328 328 329 329 /* Add per CPU specific workarounds here */ 330 - static void __init mce_cpu_quirks(struct cpuinfo_x86 *c) 330 + static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) 331 331 { 332 332 /* This should be disabled by the BIOS, but isn't always */ 333 333 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { ··· 337 337 } 338 338 } 339 339 340 - static void __init mce_cpu_features(struct cpuinfo_x86 *c) 340 + static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) 341 341 { 342 342 switch (c->x86_vendor) { 343 343 case X86_VENDOR_INTEL: ··· 352 352 * Called for each booted CPU to set up machine checks. 353 353 * Must be called with preempt off. 354 354 */ 355 - void __init mcheck_init(struct cpuinfo_x86 *c) 355 + void __cpuinit mcheck_init(struct cpuinfo_x86 *c) 356 356 { 357 357 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE; 358 358 ··· 542 542 ACCESSOR(tolerant,tolerant,) 543 543 ACCESSOR(check_interval,check_interval,mce_restart()) 544 544 545 - static __init int mce_init_device(void) 545 + static __cpuinit int mce_init_device(void) 546 546 { 547 547 int err; 548 548 if (!mce_available(&boot_cpu_data))
+2 -2
arch/x86_64/kernel/mce_intel.c
··· 42 42 irq_exit(); 43 43 } 44 44 45 - static void __init intel_init_thermal(struct cpuinfo_x86 *c) 45 + static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) 46 46 { 47 47 u32 l, h; 48 48 int tm2 = 0; ··· 93 93 return; 94 94 } 95 95 96 - void __init mce_intel_feature_init(struct cpuinfo_x86 *c) 96 + void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) 97 97 { 98 98 intel_init_thermal(c); 99 99 }
+2 -2
arch/x86_64/kernel/nmi.c
··· 98 98 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 99 99 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 100 100 101 - static __init inline int nmi_known_cpu(void) 101 + static __cpuinit inline int nmi_known_cpu(void) 102 102 { 103 103 switch (boot_cpu_data.x86_vendor) { 104 104 case X86_VENDOR_AMD: ··· 110 110 } 111 111 112 112 /* Run after command line and cpu_init init, but before all other checks */ 113 - void __init nmi_watchdog_default(void) 113 + void __cpuinit nmi_watchdog_default(void) 114 114 { 115 115 if (nmi_watchdog != NMI_DEFAULT) 116 116 return;
+1 -1
arch/x86_64/kernel/process.c
··· 204 204 } 205 205 } 206 206 207 - void __init select_idle_routine(const struct cpuinfo_x86 *c) 207 + void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 208 208 { 209 209 static int printed; 210 210 if (cpu_has(c, X86_FEATURE_MWAIT)) {
+9 -9
arch/x86_64/kernel/setup.c
··· 676 676 #endif 677 677 } 678 678 679 - static int __init get_model_name(struct cpuinfo_x86 *c) 679 + static int __cpuinit get_model_name(struct cpuinfo_x86 *c) 680 680 { 681 681 unsigned int *v; 682 682 ··· 692 692 } 693 693 694 694 695 - static void __init display_cacheinfo(struct cpuinfo_x86 *c) 695 + static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 696 696 { 697 697 unsigned int n, dummy, eax, ebx, ecx, edx; 698 698 ··· 803 803 return r; 804 804 } 805 805 806 - static void __init detect_ht(struct cpuinfo_x86 *c) 806 + static void __cpuinit detect_ht(struct cpuinfo_x86 *c) 807 807 { 808 808 #ifdef CONFIG_SMP 809 809 u32 eax, ebx, ecx, edx; ··· 864 864 /* 865 865 * find out the number of processor cores on the die 866 866 */ 867 - static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c) 867 + static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 868 868 { 869 869 unsigned int eax; 870 870 ··· 882 882 return 1; 883 883 } 884 884 885 - static void __init init_intel(struct cpuinfo_x86 *c) 885 + static void __cpuinit init_intel(struct cpuinfo_x86 *c) 886 886 { 887 887 /* Cache sizes */ 888 888 unsigned n; ··· 902 902 c->x86_num_cores = intel_num_cpu_cores(c); 903 903 } 904 904 905 - void __init get_cpu_vendor(struct cpuinfo_x86 *c) 905 + void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 906 906 { 907 907 char *v = c->x86_vendor_id; 908 908 ··· 923 923 /* Do some early cpuid on the boot CPU to get some parameter that are 924 924 needed before check_bugs. Everything advanced is in identify_cpu 925 925 below. */ 926 - void __init early_identify_cpu(struct cpuinfo_x86 *c) 926 + void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c) 927 927 { 928 928 u32 tfms; 929 929 ··· 977 977 /* 978 978 * This does the hard work of actually picking apart the CPU stuff... 979 979 */ 980 - void __init identify_cpu(struct cpuinfo_x86 *c) 980 + void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 981 981 { 982 982 int i; 983 983 u32 xlvl; ··· 1054 1054 } 1055 1055 1056 1056 1057 - void __init print_cpu_info(struct cpuinfo_x86 *c) 1057 + void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1058 1058 { 1059 1059 if (c->x86_model_id[0]) 1060 1060 printk("%s", c->x86_model_id);
+3 -3
arch/x86_64/kernel/setup64.c
··· 29 29 30 30 char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; 31 31 32 - cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; 32 + cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; 33 33 34 34 struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; 35 35 ··· 171 171 wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); 172 172 } 173 173 174 - void __init check_efer(void) 174 + void __cpuinit check_efer(void) 175 175 { 176 176 unsigned long efer; 177 177 ··· 188 188 * 'CPU state barrier', nothing should get across. 189 189 * A lot of state is already set up in PDA init. 190 190 */ 191 - void __init cpu_init (void) 191 + void __cpuinit cpu_init (void) 192 192 { 193 193 #ifdef CONFIG_SMP 194 194 int cpu = stack_smp_processor_id();
+5 -10
arch/x86_64/kernel/smpboot.c
··· 58 58 #include <asm/proto.h> 59 59 #include <asm/nmi.h> 60 60 61 - /* Change for real CPU hotplug. Note other files need to be fixed 62 - first too. */ 63 - #define __cpuinit __init 64 - #define __cpuinitdata __initdata 65 - 66 61 /* Number of siblings per CPU package */ 67 62 int smp_num_siblings = 1; 68 63 /* Package ID of each logical CPU */ ··· 818 823 * 819 824 * RED-PEN audit/test this more. I bet there is more state messed up here. 820 825 */ 821 - static __cpuinit void disable_smp(void) 826 + static __init void disable_smp(void) 822 827 { 823 828 cpu_present_map = cpumask_of_cpu(0); 824 829 cpu_possible_map = cpumask_of_cpu(0); ··· 833 838 /* 834 839 * Handle user cpus=... parameter. 835 840 */ 836 - static __cpuinit void enforce_max_cpus(unsigned max_cpus) 841 + static __init void enforce_max_cpus(unsigned max_cpus) 837 842 { 838 843 int i, k; 839 844 k = 0; ··· 850 855 /* 851 856 * Various sanity checks. 852 857 */ 853 - static int __cpuinit smp_sanity_check(unsigned max_cpus) 858 + static int __init smp_sanity_check(unsigned max_cpus) 854 859 { 855 860 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { 856 861 printk("weird, boot CPU (#%d) not listed by the BIOS.\n", ··· 908 913 * Prepare for SMP bootup. The MP table or ACPI has been read 909 914 * earlier. Just do some sanity checking here and enable APIC mode. 910 915 */ 911 - void __cpuinit smp_prepare_cpus(unsigned int max_cpus) 916 + void __init smp_prepare_cpus(unsigned int max_cpus) 912 917 { 913 918 int i; 914 919 ··· 1014 1019 /* 1015 1020 * Finish the SMP boot. 1016 1021 */ 1017 - void __cpuinit smp_cpus_done(unsigned int max_cpus) 1022 + void __init smp_cpus_done(unsigned int max_cpus) 1018 1023 { 1019 1024 zap_low_mappings(); 1020 1025 smp_cleanup_boot();
+1 -1
arch/x86_64/mm/numa.c
··· 251 251 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); 252 252 } 253 253 254 - __init void numa_add_cpu(int cpu) 254 + __cpuinit void numa_add_cpu(int cpu) 255 255 { 256 256 /* BP is initialized elsewhere */ 257 257 if (cpu)
+12
include/linux/init.h
··· 229 229 #define __devexitdata __exitdata 230 230 #endif 231 231 232 + #ifdef CONFIG_HOTPLUG_CPU 233 + #define __cpuinit 234 + #define __cpuinitdata 235 + #define __cpuexit 236 + #define __cpuexitdata 237 + #else 238 + #define __cpuinit __init 239 + #define __cpuinitdata __initdata 240 + #define __cpuexit __exit 241 + #define __cpuexitdata __exitdata 242 + #endif 243 + 232 244 /* Functions marked as __devexit may be discarded at kernel link time, depending 233 245 on config options. Newer versions of binutils detect references from 234 246 retained sections to discarded sections and flag an error. Pointers to