Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] init call cleanup

Trival patch for CPU hotplug. In CPU identify part, only did cleaup for intel
CPUs. Need do for other CPUs if they support S3 SMP.

Signed-off-by: Li Shaohua<shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Li Shaohua and committed by
Linus Torvalds
0bb3184d d720803a

+44 -44
+7 -7
arch/i386/kernel/apic.c
··· 364 364 apic_write_around(APIC_LVT1, value); 365 365 } 366 366 367 - void __init setup_local_APIC (void) 367 + void __devinit setup_local_APIC(void) 368 368 { 369 369 unsigned long oldvalue, value, ver, maxlvt; 370 370 ··· 635 635 .cls = &lapic_sysclass, 636 636 }; 637 637 638 - static void __init apic_pm_activate(void) 638 + static void __devinit apic_pm_activate(void) 639 639 { 640 640 apic_pm_state.active = 1; 641 641 } ··· 856 856 * but we do not accept timer interrupts yet. We only allow the BP 857 857 * to calibrate. 858 858 */ 859 - static unsigned int __init get_8254_timer_count(void) 859 + static unsigned int __devinit get_8254_timer_count(void) 860 860 { 861 861 extern spinlock_t i8253_lock; 862 862 unsigned long flags; ··· 875 875 } 876 876 877 877 /* next tick in 8254 can be caught by catching timer wraparound */ 878 - static void __init wait_8254_wraparound(void) 878 + static void __devinit wait_8254_wraparound(void) 879 879 { 880 880 unsigned int curr_count, prev_count; 881 881 ··· 895 895 * Default initialization for 8254 timers. If we use other timers like HPET, 896 896 * we override this later 897 897 */ 898 - void (*wait_timer_tick)(void) __initdata = wait_8254_wraparound; 898 + void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound; 899 899 900 900 /* 901 901 * This function sets up the local APIC timer, with a timeout of ··· 931 931 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); 932 932 } 933 933 934 - static void __init setup_APIC_timer(unsigned int clocks) 934 + static void __devinit setup_APIC_timer(unsigned int clocks) 935 935 { 936 936 unsigned long flags; 937 937 ··· 1044 1044 local_irq_enable(); 1045 1045 } 1046 1046 1047 - void __init setup_secondary_APIC_clock(void) 1047 + void __devinit setup_secondary_APIC_clock(void) 1048 1048 { 1049 1049 setup_APIC_timer(calibration_result); 1050 1050 }
+15 -15
arch/i386/kernel/cpu/common.c
··· 24 24 DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 25 25 EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 26 26 27 - static int cachesize_override __initdata = -1; 28 - static int disable_x86_fxsr __initdata = 0; 29 - static int disable_x86_serial_nr __initdata = 1; 27 + static int cachesize_override __devinitdata = -1; 28 + static int disable_x86_fxsr __devinitdata = 0; 29 + static int disable_x86_serial_nr __devinitdata = 1; 30 30 31 31 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; 32 32 ··· 59 59 } 60 60 __setup("cachesize=", cachesize_setup); 61 61 62 - int __init get_model_name(struct cpuinfo_x86 *c) 62 + int __devinit get_model_name(struct cpuinfo_x86 *c) 63 63 { 64 64 unsigned int *v; 65 65 char *p, *q; ··· 89 89 } 90 90 91 91 92 - void __init display_cacheinfo(struct cpuinfo_x86 *c) 92 + void __devinit display_cacheinfo(struct cpuinfo_x86 *c) 93 93 { 94 94 unsigned int n, dummy, ecx, edx, l2size; 95 95 ··· 130 130 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ 131 131 132 132 /* Look up CPU names by table lookup. */ 133 - static char __init *table_lookup_model(struct cpuinfo_x86 *c) 133 + static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) 134 134 { 135 135 struct cpu_model_info *info; 136 136 ··· 151 151 } 152 152 153 153 154 - void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early) 154 + void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) 155 155 { 156 156 char *v = c->x86_vendor_id; 157 157 int i; ··· 202 202 203 203 204 204 /* Probe for the CPUID instruction */ 205 - static int __init have_cpuid_p(void) 205 + static int __devinit have_cpuid_p(void) 206 206 { 207 207 return flag_is_changeable_p(X86_EFLAGS_ID); 208 208 } ··· 249 249 #endif 250 250 } 251 251 252 - void __init generic_identify(struct cpuinfo_x86 * c) 252 + void __devinit generic_identify(struct cpuinfo_x86 * c) 253 253 { 254 254 u32 tfms, xlvl; 255 255 int junk; ··· 296 296 } 297 297 } 298 298 299 - static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 299 + static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 300 300 { 301 301 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { 302 302 /* Disable processor serial number */ ··· 324 324 /* 325 325 * This does the hard work of actually picking apart the CPU stuff... 326 326 */ 327 - void __init identify_cpu(struct cpuinfo_x86 *c) 327 + void __devinit identify_cpu(struct cpuinfo_x86 *c) 328 328 { 329 329 int i; 330 330 ··· 438 438 } 439 439 440 440 #ifdef CONFIG_X86_HT 441 - void __init detect_ht(struct cpuinfo_x86 *c) 441 + void __devinit detect_ht(struct cpuinfo_x86 *c) 442 442 { 443 443 u32 eax, ebx, ecx, edx; 444 444 int index_msb, tmp; ··· 493 493 } 494 494 #endif 495 495 496 - void __init print_cpu_info(struct cpuinfo_x86 *c) 496 + void __devinit print_cpu_info(struct cpuinfo_x86 *c) 497 497 { 498 498 char *vendor = NULL; 499 499 ··· 516 516 printk("\n"); 517 517 } 518 518 519 - cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; 519 + cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; 520 520 521 521 /* This is hacky. :) 522 522 * We're emulating future behavior. ··· 563 563 * and IDT. We reload them nevertheless, this function acts as a 564 564 * 'CPU state barrier', nothing should get across. 565 565 */ 566 - void __init cpu_init (void) 566 + void __devinit cpu_init(void) 567 567 { 568 568 int cpu = smp_processor_id(); 569 569 struct tss_struct * t = &per_cpu(init_tss, cpu);
+6 -6
arch/i386/kernel/cpu/intel.c
··· 28 28 struct movsl_mask movsl_mask; 29 29 #endif 30 30 31 - void __init early_intel_workaround(struct cpuinfo_x86 *c) 31 + void __devinit early_intel_workaround(struct cpuinfo_x86 *c) 32 32 { 33 33 if (c->x86_vendor != X86_VENDOR_INTEL) 34 34 return; ··· 43 43 * This is called before we do cpu ident work 44 44 */ 45 45 46 - int __init ppro_with_ram_bug(void) 46 + int __devinit ppro_with_ram_bug(void) 47 47 { 48 48 /* Uses data from early_cpu_detect now */ 49 49 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && ··· 61 61 * P4 Xeon errata 037 workaround. 62 62 * Hardware prefetcher may cause stale data to be loaded into the cache. 63 63 */ 64 - static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c) 64 + static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) 65 65 { 66 66 unsigned long lo, hi; 67 67 ··· 80 80 /* 81 81 * find out the number of processor cores on the die 82 82 */ 83 - static int __init num_cpu_cores(struct cpuinfo_x86 *c) 83 + static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) 84 84 { 85 85 unsigned int eax; 86 86 ··· 98 98 return 1; 99 99 } 100 100 101 - static void __init init_intel(struct cpuinfo_x86 *c) 101 + static void __devinit init_intel(struct cpuinfo_x86 *c) 102 102 { 103 103 unsigned int l2 = 0; 104 104 char *p = NULL; ··· 204 204 return size; 205 205 } 206 206 207 - static struct cpu_dev intel_cpu_dev __initdata = { 207 + static struct cpu_dev intel_cpu_dev __devinitdata = { 208 208 .c_vendor = "Intel", 209 209 .c_ident = { "GenuineIntel" }, 210 210 .c_models = {
+2 -2
arch/i386/kernel/cpu/intel_cacheinfo.c
··· 28 28 }; 29 29 30 30 /* all the cache descriptor types we care about (no TLB or trace cache entries) */ 31 - static struct _cache_table cache_table[] __initdata = 31 + static struct _cache_table cache_table[] __devinitdata = 32 32 { 33 33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 34 34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ ··· 160 160 return retval; 161 161 } 162 162 163 - unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c) 163 + unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 164 164 { 165 165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 166 166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
+1 -1
arch/i386/kernel/cpu/mcheck/mce.c
··· 31 31 void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; 32 32 33 33 /* This has to be run for each processor */ 34 - void __init mcheck_init(struct cpuinfo_x86 *c) 34 + void __devinit mcheck_init(struct cpuinfo_x86 *c) 35 35 { 36 36 if (mce_disabled==1) 37 37 return;
+1 -1
arch/i386/kernel/cpu/mcheck/p5.c
··· 29 29 } 30 30 31 31 /* Set up machine check reporting for processors with Intel style MCE */ 32 - void __init intel_p5_mcheck_init(struct cpuinfo_x86 *c) 32 + void __devinit intel_p5_mcheck_init(struct cpuinfo_x86 *c) 33 33 { 34 34 u32 l, h; 35 35
+1 -1
arch/i386/kernel/process.c
··· 260 260 } 261 261 } 262 262 263 - void __init select_idle_routine(const struct cpuinfo_x86 *c) 263 + void __devinit select_idle_routine(const struct cpuinfo_x86 *c) 264 264 { 265 265 if (cpu_has(c, X86_FEATURE_MWAIT)) { 266 266 printk("monitor/mwait feature present.\n");
+1 -1
arch/i386/kernel/setup.c
··· 60 60 address, and must not be in the .bss segment! */ 61 61 unsigned long init_pg_tables_end __initdata = ~0UL; 62 62 63 - int disable_pse __initdata = 0; 63 + int disable_pse __devinitdata = 0; 64 64 65 65 /* 66 66 * Machine setup..
+9 -9
arch/i386/kernel/smpboot.c
··· 59 59 #include <smpboot_hooks.h> 60 60 61 61 /* Set if we find a B stepping CPU */ 62 - static int __initdata smp_b_stepping; 62 + static int __devinitdata smp_b_stepping; 63 63 64 64 /* Number of siblings per CPU package */ 65 65 int smp_num_siblings = 1; ··· 118 118 * has made sure it's suitably aligned. 119 119 */ 120 120 121 - static unsigned long __init setup_trampoline(void) 121 + static unsigned long __devinit setup_trampoline(void) 122 122 { 123 123 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); 124 124 return virt_to_phys(trampoline_base); ··· 148 148 * a given CPU 149 149 */ 150 150 151 - static void __init smp_store_cpu_info(int id) 151 + static void __devinit smp_store_cpu_info(int id) 152 152 { 153 153 struct cpuinfo_x86 *c = cpu_data + id; 154 154 ··· 342 342 343 343 static atomic_t init_deasserted; 344 344 345 - static void __init smp_callin(void) 345 + static void __devinit smp_callin(void) 346 346 { 347 347 int cpuid, phys_id; 348 348 unsigned long timeout; ··· 468 468 /* 469 469 * Activate a secondary processor. 470 470 */ 471 - static void __init start_secondary(void *unused) 471 + static void __devinit start_secondary(void *unused) 472 472 { 473 473 /* 474 474 * Dont put anything before smp_callin(), SMP ··· 521 521 * from the task structure 522 522 * This function must not return. 523 523 */ 524 - void __init initialize_secondary(void) 524 + void __devinit initialize_secondary(void) 525 525 { 526 526 /* 527 527 * We don't actually need to load the full TSS, ··· 635 635 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 636 636 * won't ... remember to clear down the APIC, etc later. 637 637 */ 638 - static int __init 638 + static int __devinit 639 639 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) 640 640 { 641 641 unsigned long send_status = 0, accept_status = 0; ··· 681 681 #endif /* WAKE_SECONDARY_VIA_NMI */ 682 682 683 683 #ifdef WAKE_SECONDARY_VIA_INIT 684 - static int __init 684 + static int __devinit 685 685 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) 686 686 { 687 687 unsigned long send_status = 0, accept_status = 0; ··· 817 817 818 818 extern cpumask_t cpu_initialized; 819 819 820 - static int __init do_boot_cpu(int apicid) 820 + static int __devinit do_boot_cpu(int apicid) 821 821 /* 822 822 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 823 823 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
+1 -1
arch/i386/kernel/timers/timer_tsc.c
··· 33 33 34 34 static inline void cpufreq_delayed_get(void); 35 35 36 - int tsc_disable __initdata = 0; 36 + int tsc_disable __devinitdata = 0; 37 37 38 38 extern spinlock_t i8253_lock; 39 39