Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: delete __cpuinit usage from all x86 files

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.

This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>

+345 -356
+1 -1
arch/x86/include/asm/cpu.h
··· 28 28 #ifdef CONFIG_HOTPLUG_CPU 29 29 extern int arch_register_cpu(int num); 30 30 extern void arch_unregister_cpu(int); 31 - extern void __cpuinit start_cpu0(void); 31 + extern void start_cpu0(void); 32 32 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 33 33 extern int _debug_hotplug_cpu(int cpu, int action); 34 34 #endif
+2 -2
arch/x86/include/asm/microcode.h
··· 60 60 #ifdef CONFIG_MICROCODE_EARLY 61 61 #define MAX_UCODE_COUNT 128 62 62 extern void __init load_ucode_bsp(void); 63 - extern void __cpuinit load_ucode_ap(void); 63 + extern void load_ucode_ap(void); 64 64 extern int __init save_microcode_in_initrd(void); 65 65 #else 66 66 static inline void __init load_ucode_bsp(void) {} 67 - static inline void __cpuinit load_ucode_ap(void) {} 67 + static inline void load_ucode_ap(void) {} 68 68 static inline int __init save_microcode_in_initrd(void) 69 69 { 70 70 return 0;
+2 -2
arch/x86/include/asm/microcode_amd.h
··· 67 67 extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; 68 68 #endif 69 69 extern void __init load_ucode_amd_bsp(void); 70 - extern void __cpuinit load_ucode_amd_ap(void); 70 + extern void load_ucode_amd_ap(void); 71 71 extern int __init save_microcode_in_initrd_amd(void); 72 72 #else 73 73 static inline void __init load_ucode_amd_bsp(void) {} 74 - static inline void __cpuinit load_ucode_amd_ap(void) {} 74 + static inline void load_ucode_amd_ap(void) {} 75 75 static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } 76 76 #endif 77 77
+2 -2
arch/x86/include/asm/microcode_intel.h
··· 65 65 66 66 #ifdef CONFIG_MICROCODE_INTEL_EARLY 67 67 extern void __init load_ucode_intel_bsp(void); 68 - extern void __cpuinit load_ucode_intel_ap(void); 68 + extern void load_ucode_intel_ap(void); 69 69 extern void show_ucode_info_early(void); 70 70 extern int __init save_microcode_in_initrd_intel(void); 71 71 #else 72 72 static inline __init void load_ucode_intel_bsp(void) {} 73 - static inline __cpuinit void load_ucode_intel_ap(void) {} 73 + static inline void load_ucode_intel_ap(void) {} 74 74 static inline void show_ucode_info_early(void) {} 75 75 static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } 76 76 #endif
+2 -2
arch/x86/include/asm/mmconfig.h
··· 2 2 #define _ASM_X86_MMCONFIG_H 3 3 4 4 #ifdef CONFIG_PCI_MMCONFIG 5 - extern void __cpuinit fam10h_check_enable_mmcfg(void); 6 - extern void __cpuinit check_enable_amd_mmconf_dmi(void); 5 + extern void fam10h_check_enable_mmcfg(void); 6 + extern void check_enable_amd_mmconf_dmi(void); 7 7 #else 8 8 static inline void fam10h_check_enable_mmcfg(void) { } 9 9 static inline void check_enable_amd_mmconf_dmi(void) { }
+1 -1
arch/x86/include/asm/mpspec.h
··· 94 94 #define default_get_smp_config x86_init_uint_noop 95 95 #endif 96 96 97 - void __cpuinit generic_processor_info(int apicid, int version); 97 + void generic_processor_info(int apicid, int version); 98 98 #ifdef CONFIG_ACPI 99 99 extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); 100 100 extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
+3 -3
arch/x86/include/asm/numa.h
··· 39 39 __apicid_to_node[apicid] = node; 40 40 } 41 41 42 - extern int __cpuinit numa_cpu_node(int cpu); 42 + extern int numa_cpu_node(int cpu); 43 43 44 44 #else /* CONFIG_NUMA */ 45 45 static inline void set_apicid_to_node(int apicid, s16 node) ··· 60 60 extern void numa_set_node(int cpu, int node); 61 61 extern void numa_clear_node(int cpu); 62 62 extern void __init init_cpu_to_node(void); 63 - extern void __cpuinit numa_add_cpu(int cpu); 64 - extern void __cpuinit numa_remove_cpu(int cpu); 63 + extern void numa_add_cpu(int cpu); 64 + extern void numa_remove_cpu(int cpu); 65 65 #else /* CONFIG_NUMA */ 66 66 static inline void numa_set_node(int cpu, int node) { } 67 67 static inline void numa_clear_node(int cpu) { }
+1 -1
arch/x86/include/asm/processor.h
··· 164 164 #define cache_line_size() (boot_cpu_data.x86_cache_alignment) 165 165 166 166 extern void cpu_detect(struct cpuinfo_x86 *c); 167 - extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c); 167 + extern void fpu_detect(struct cpuinfo_x86 *c); 168 168 169 169 extern void early_cpu_init(void); 170 170 extern void identify_boot_cpu(void);
+1 -1
arch/x86/include/asm/prom.h
··· 27 27 extern u64 initial_dtb; 28 28 extern void add_dtb(u64 data); 29 29 extern void x86_add_irq_domains(void); 30 - void __cpuinit x86_of_pci_init(void); 30 + void x86_of_pci_init(void); 31 31 void x86_dtb_init(void); 32 32 #else 33 33 static inline void add_dtb(u64 data) { }
+1 -1
arch/x86/include/asm/smp.h
··· 179 179 } 180 180 #endif /* CONFIG_SMP */ 181 181 182 - extern unsigned disabled_cpus __cpuinitdata; 182 + extern unsigned disabled_cpus; 183 183 184 184 #ifdef CONFIG_X86_32_SMP 185 185 /*
+3 -3
arch/x86/kernel/acpi/boot.c
··· 195 195 return 0; 196 196 } 197 197 198 - static void __cpuinit acpi_register_lapic(int id, u8 enabled) 198 + static void acpi_register_lapic(int id, u8 enabled) 199 199 { 200 200 unsigned int ver = 0; 201 201 ··· 607 607 #ifdef CONFIG_ACPI_HOTPLUG_CPU 608 608 #include <acpi/processor.h> 609 609 610 - static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 610 + static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 611 611 { 612 612 #ifdef CONFIG_ACPI_NUMA 613 613 int nid; ··· 620 620 #endif 621 621 } 622 622 623 - static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 623 + static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) 624 624 { 625 625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 626 626 union acpi_object *obj;
+15 -15
arch/x86/kernel/apic/apic.c
··· 58 58 59 59 unsigned int num_processors; 60 60 61 - unsigned disabled_cpus __cpuinitdata; 61 + unsigned disabled_cpus; 62 62 63 63 /* Processor that is doing the boot up */ 64 64 unsigned int boot_cpu_physical_apicid = -1U; ··· 544 544 * Setup the local APIC timer for this CPU. Copy the initialized values 545 545 * of the boot CPU and register the clock event in the framework. 546 546 */ 547 - static void __cpuinit setup_APIC_timer(void) 547 + static void setup_APIC_timer(void) 548 548 { 549 549 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 550 550 ··· 866 866 setup_APIC_timer(); 867 867 } 868 868 869 - void __cpuinit setup_secondary_APIC_clock(void) 869 + void setup_secondary_APIC_clock(void) 870 870 { 871 871 setup_APIC_timer(); 872 872 } ··· 1229 1229 apic_write(APIC_LVT1, value); 1230 1230 } 1231 1231 1232 - static void __cpuinit lapic_setup_esr(void) 1232 + static void lapic_setup_esr(void) 1233 1233 { 1234 1234 unsigned int oldvalue, value, maxlvt; 1235 1235 ··· 1276 1276 * Used to setup local APIC while initializing BSP or bringin up APs. 1277 1277 * Always called with preemption disabled. 1278 1278 */ 1279 - void __cpuinit setup_local_APIC(void) 1279 + void setup_local_APIC(void) 1280 1280 { 1281 1281 int cpu = smp_processor_id(); 1282 1282 unsigned int value, queued; ··· 1471 1471 #endif 1472 1472 } 1473 1473 1474 - void __cpuinit end_local_APIC_setup(void) 1474 + void end_local_APIC_setup(void) 1475 1475 { 1476 1476 lapic_setup_esr(); 1477 1477 ··· 2107 2107 apic_write(APIC_LVT1, value); 2108 2108 } 2109 2109 2110 - void __cpuinit generic_processor_info(int apicid, int version) 2110 + void generic_processor_info(int apicid, int version) 2111 2111 { 2112 2112 int cpu, max = nr_cpu_ids; 2113 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, ··· 2377 2377 .suspend = lapic_suspend, 2378 2378 }; 2379 2379 2380 - static void __cpuinit apic_pm_activate(void) 2380 + static void apic_pm_activate(void) 2381 2381 { 2382 2382 apic_pm_state.active = 1; 2383 2383 } ··· 2402 2402 2403 2403 #ifdef CONFIG_X86_64 2404 2404 2405 - static int __cpuinit apic_cluster_num(void) 2405 + static int apic_cluster_num(void) 2406 2406 { 2407 2407 int i, clusters, zeros; 2408 2408 unsigned id; ··· 2447 2447 return clusters; 2448 2448 } 2449 2449 2450 - static int __cpuinitdata multi_checked; 2451 - static int __cpuinitdata multi; 2450 + static int multi_checked; 2451 + static int multi; 2452 2452 2453 - static int __cpuinit set_multi(const struct dmi_system_id *d) 2453 + static int set_multi(const struct dmi_system_id *d) 2454 2454 { 2455 2455 if (multi) 2456 2456 return 0; ··· 2459 2459 return 0; 2460 2460 } 2461 2461 2462 - static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { 2462 + static const struct dmi_system_id multi_dmi_table[] = { 2463 2463 { 2464 2464 .callback = set_multi, 2465 2465 .ident = "IBM System Summit2", ··· 2471 2471 {} 2472 2472 }; 2473 2473 2474 - static void __cpuinit dmi_check_multi(void) 2474 + static void dmi_check_multi(void) 2475 2475 { 2476 2476 if (multi_checked) 2477 2477 return; ··· 2488 2488 * multi-chassis. 2489 2489 * Use DMI to check them 2490 2490 */ 2491 - __cpuinit int apic_is_clustered_box(void) 2491 + int apic_is_clustered_box(void) 2492 2492 { 2493 2493 dmi_check_multi(); 2494 2494 if (multi)
+1 -1
arch/x86/kernel/apic/apic_numachip.c
··· 74 74 return initial_apic_id >> index_msb; 75 75 } 76 76 77 - static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 77 + static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 78 78 { 79 79 union numachip_csr_g3_ext_irq_gen int_gen; 80 80
+1 -1
arch/x86/kernel/apic/es7000_32.c
··· 130 130 */ 131 131 132 132 133 - static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 133 + static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 134 134 { 135 135 unsigned long vect = 0, psaival = 0; 136 136
+1 -1
arch/x86/kernel/apic/numaq_32.c
··· 105 105 } 106 106 } 107 107 108 - void __cpuinit numaq_tsc_disable(void) 108 + void numaq_tsc_disable(void) 109 109 { 110 110 if (!found_numaq) 111 111 return;
+1 -1
arch/x86/kernel/apic/x2apic_cluster.c
··· 148 148 /* 149 149 * At CPU state changes, update the x2apic cluster sibling info. 150 150 */ 151 - static int __cpuinit 151 + static int 152 152 update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) 153 153 { 154 154 unsigned int this_cpu = (unsigned long)hcpu;
+7 -7
arch/x86/kernel/apic/x2apic_uv_x.c
··· 209 209 unsigned long sn_rtc_cycles_per_second; 210 210 EXPORT_SYMBOL(sn_rtc_cycles_per_second); 211 211 212 - static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 212 + static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 213 213 { 214 214 #ifdef CONFIG_SMP 215 215 unsigned long val; ··· 416 416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 417 417 }; 418 418 419 - static __cpuinit void set_x2apic_extra_bits(int pnode) 419 + static void set_x2apic_extra_bits(int pnode) 420 420 { 421 421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); 422 422 } ··· 735 735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 736 736 } 737 737 738 - static void __cpuinit uv_heartbeat_enable(int cpu) 738 + static void uv_heartbeat_enable(int cpu) 739 739 { 740 740 while (!uv_cpu_hub_info(cpu)->scir.enabled) { 741 741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; ··· 752 752 } 753 753 754 754 #ifdef CONFIG_HOTPLUG_CPU 755 - static void __cpuinit uv_heartbeat_disable(int cpu) 755 + static void uv_heartbeat_disable(int cpu) 756 756 { 757 757 if (uv_cpu_hub_info(cpu)->scir.enabled) { 758 758 uv_cpu_hub_info(cpu)->scir.enabled = 0; ··· 764 764 /* 765 765 * cpu hotplug notifier 766 766 */ 767 - static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 768 - unsigned long action, void *hcpu) 767 + static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action, 768 + void *hcpu) 769 769 { 770 770 long cpu = (long)hcpu; 771 771 ··· 835 835 * Called on each cpu to initialize the per_cpu UV data area. 836 836 * FIXME: hotplug not supported yet 837 837 */ 838 - void __cpuinit uv_cpu_init(void) 838 + void uv_cpu_init(void) 839 839 { 840 840 /* CPU 0 initilization will be done via uv_system_init. */ 841 841 if (!uv_blade_info)
+16 -17
arch/x86/kernel/cpu/amd.c
··· 69 69 extern void vide(void); 70 70 __asm__(".align 4\nvide: ret"); 71 71 72 - static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 72 + static void init_amd_k5(struct cpuinfo_x86 *c) 73 73 { 74 74 /* 75 75 * General Systems BIOSen alias the cpu frequency registers ··· 87 87 } 88 88 89 89 90 - static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 90 + static void init_amd_k6(struct cpuinfo_x86 *c) 91 91 { 92 92 u32 l, h; 93 93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); ··· 179 179 } 180 180 } 181 181 182 - static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 182 + static void amd_k7_smp_check(struct cpuinfo_x86 *c) 183 183 { 184 184 /* calling is from identify_secondary_cpu() ? */ 185 185 if (!c->cpu_index) ··· 222 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 223 223 } 224 224 225 - static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225 + static void init_amd_k7(struct cpuinfo_x86 *c) 226 226 { 227 227 u32 l, h; 228 228 ··· 267 267 * To workaround broken NUMA config. Read the comment in 268 268 * srat_detect_node(). 269 269 */ 270 - static int __cpuinit nearby_node(int apicid) 270 + static int nearby_node(int apicid) 271 271 { 272 272 int i, node; 273 273 ··· 292 292 * (2) AMD processors supporting compute units 293 293 */ 294 294 #ifdef CONFIG_X86_HT 295 - static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) 295 + static void amd_get_topology(struct cpuinfo_x86 *c) 296 296 { 297 297 u32 nodes, cores_per_cu = 1; 298 298 u8 node_id; ··· 342 342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 343 343 * Assumes number of cores is a power of two. 344 344 */ 345 - static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 345 + static void amd_detect_cmp(struct cpuinfo_x86 *c) 346 346 { 347 347 #ifdef CONFIG_X86_HT 348 348 unsigned bits; ··· 369 369 } 370 370 EXPORT_SYMBOL_GPL(amd_get_nb_id); 371 371 372 - static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 372 + static void srat_detect_node(struct cpuinfo_x86 *c) 373 373 { 374 374 #ifdef CONFIG_NUMA 375 375 int cpu = smp_processor_id(); ··· 421 421 #endif 422 422 } 423 423 424 - static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 424 + static void early_init_amd_mc(struct cpuinfo_x86 *c) 425 425 { 426 426 #ifdef CONFIG_X86_HT 427 427 unsigned bits, ecx; ··· 447 447 #endif 448 448 } 449 449 450 - static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) 450 + static void bsp_init_amd(struct cpuinfo_x86 *c) 451 451 { 452 452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 453 453 ··· 475 475 } 476 476 } 477 477 478 - static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 478 + static void early_init_amd(struct cpuinfo_x86 *c) 479 479 { 480 480 early_init_amd_mc(c); 481 481 ··· 514 514 static const int amd_erratum_400[]; 515 515 static bool cpu_has_amd_erratum(const int *erratum); 516 516 517 - static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517 + static void init_amd(struct cpuinfo_x86 *c) 518 518 { 519 519 u32 dummy; 520 520 unsigned long long value; ··· 740 740 } 741 741 742 742 #ifdef CONFIG_X86_32 743 - static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, 744 - unsigned int size) 743 + static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 745 744 { 746 745 /* AMD errata T13 (order #21922) */ 747 746 if ((c->x86 == 6)) { ··· 756 757 } 757 758 #endif 758 759 759 - static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 760 + static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 760 761 { 761 762 tlb_flushall_shift = 5; 762 763 ··· 764 765 tlb_flushall_shift = 4; 765 766 } 766 767 767 - static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 768 + static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 768 769 { 769 770 u32 ebx, eax, ecx, edx; 770 771 u16 mask = 0xfff; ··· 819 820 cpu_set_tlb_flushall_shift(c); 820 821 } 821 822 822 - static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 823 + static const struct cpu_dev amd_cpu_dev = { 823 824 .c_vendor = "AMD", 824 825 .c_ident = { "AuthenticAMD" }, 825 826 #ifdef CONFIG_X86_32
+13 -13
arch/x86/kernel/cpu/centaur.c
··· 11 11 12 12 #ifdef CONFIG_X86_OOSTORE 13 13 14 - static u32 __cpuinit power2(u32 x) 14 + static u32 power2(u32 x) 15 15 { 16 16 u32 s = 1; 17 17 ··· 25 25 /* 26 26 * Set up an actual MCR 27 27 */ 28 - static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) 28 + static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) 29 29 { 30 30 u32 lo, hi; 31 31 ··· 42 42 * 43 43 * Shortcut: We know you can't put 4Gig of RAM on a winchip 44 44 */ 45 - static u32 __cpuinit ramtop(void) 45 + static u32 ramtop(void) 46 46 { 47 47 u32 clip = 0xFFFFFFFFUL; 48 48 u32 top = 0; ··· 91 91 /* 92 92 * Compute a set of MCR's to give maximum coverage 93 93 */ 94 - static int __cpuinit centaur_mcr_compute(int nr, int key) 94 + static int centaur_mcr_compute(int nr, int key) 95 95 { 96 96 u32 mem = ramtop(); 97 97 u32 root = power2(mem); ··· 157 157 return ct; 158 158 } 159 159 160 - static void __cpuinit centaur_create_optimal_mcr(void) 160 + static void centaur_create_optimal_mcr(void) 161 161 { 162 162 int used; 163 163 int i; ··· 181 181 wrmsr(MSR_IDT_MCR0+i, 0, 0); 182 182 } 183 183 184 - static void __cpuinit winchip2_create_optimal_mcr(void) 184 + static void winchip2_create_optimal_mcr(void) 185 185 { 186 186 u32 lo, hi; 187 187 int used; ··· 217 217 /* 218 218 * Handle the MCR key on the Winchip 2. 219 219 */ 220 - static void __cpuinit winchip2_unprotect_mcr(void) 220 + static void winchip2_unprotect_mcr(void) 221 221 { 222 222 u32 lo, hi; 223 223 u32 key; ··· 229 229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 230 230 } 231 231 232 - static void __cpuinit winchip2_protect_mcr(void) 232 + static void winchip2_protect_mcr(void) 233 233 { 234 234 u32 lo, hi; 235 235 ··· 247 247 #define RNG_ENABLED (1 << 3) 248 248 #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ 249 249 250 - static void __cpuinit init_c3(struct cpuinfo_x86 *c) 250 + static void init_c3(struct cpuinfo_x86 *c) 251 251 { 252 252 u32 lo, hi; 253 253 ··· 318 318 EAMD3D = 1<<20, 319 319 }; 320 320 321 - static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 321 + static void early_init_centaur(struct cpuinfo_x86 *c) 322 322 { 323 323 switch (c->x86) { 324 324 #ifdef CONFIG_X86_32 ··· 337 337 #endif 338 338 } 339 339 340 - static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 340 + static void init_centaur(struct cpuinfo_x86 *c) 341 341 { 342 342 #ifdef CONFIG_X86_32 343 343 char *name; ··· 468 468 #endif 469 469 } 470 470 471 - static unsigned int __cpuinit 471 + static unsigned int 472 472 centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 473 473 { 474 474 #ifdef CONFIG_X86_32 ··· 488 488 return size; 489 489 } 490 490 491 - static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 491 + static const struct cpu_dev centaur_cpu_dev = { 492 492 .c_vendor = "Centaur", 493 493 .c_ident = { "CentaurHauls" }, 494 494 .c_early_init = early_init_centaur,
+32 -32
arch/x86/kernel/cpu/common.c
··· 63 63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 64 64 } 65 65 66 - static void __cpuinit default_init(struct cpuinfo_x86 *c) 66 + static void default_init(struct cpuinfo_x86 *c) 67 67 { 68 68 #ifdef CONFIG_X86_64 69 69 cpu_detect_cache_sizes(c); ··· 80 80 #endif 81 81 } 82 82 83 - static const struct cpu_dev __cpuinitconst default_cpu = { 83 + static const struct cpu_dev default_cpu = { 84 84 .c_init = default_init, 85 85 .c_vendor = "Unknown", 86 86 .c_x86_vendor = X86_VENDOR_UNKNOWN, 87 87 }; 88 88 89 - static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 89 + static const struct cpu_dev *this_cpu = &default_cpu; 90 90 91 91 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 92 92 #ifdef CONFIG_X86_64 ··· 160 160 __setup("noxsaveopt", x86_xsaveopt_setup); 161 161 162 162 #ifdef CONFIG_X86_32 163 - static int cachesize_override __cpuinitdata = -1; 164 - static int disable_x86_serial_nr __cpuinitdata = 1; 163 + static int cachesize_override = -1; 164 + static int disable_x86_serial_nr = 1; 165 165 166 166 static int __init cachesize_setup(char *str) 167 167 { ··· 215 215 } 216 216 217 217 /* Probe for the CPUID instruction */ 218 - int __cpuinit have_cpuid_p(void) 218 + int have_cpuid_p(void) 219 219 { 220 220 return flag_is_changeable_p(X86_EFLAGS_ID); 221 221 } 222 222 223 - static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 223 + static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 224 224 { 225 225 unsigned long lo, hi; 226 226 ··· 298 298 u32 level; 299 299 }; 300 300 301 - static const struct cpuid_dependent_feature __cpuinitconst 301 + static const struct cpuid_dependent_feature 302 302 cpuid_dependent_features[] = { 303 303 { X86_FEATURE_MWAIT, 0x00000005 }, 304 304 { X86_FEATURE_DCA, 0x00000009 }, ··· 306 306 { 0, 0 } 307 307 }; 308 308 309 - static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 309 + static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 310 310 { 311 311 const struct cpuid_dependent_feature *df; 312 312 ··· 344 344 */ 345 345 346 346 /* Look up CPU names by table lookup. */ 347 - static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 347 + static const char *table_lookup_model(struct cpuinfo_x86 *c) 348 348 { 349 349 const struct cpu_model_info *info; 350 350 ··· 364 364 return NULL; /* Not found */ 365 365 } 366 366 367 - __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 368 - __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 367 + __u32 cpu_caps_cleared[NCAPINTS]; 368 + __u32 cpu_caps_set[NCAPINTS]; 369 369 370 370 void load_percpu_segment(int cpu) 371 371 { ··· 394 394 load_percpu_segment(cpu); 395 395 } 396 396 397 - static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 397 + static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 398 398 399 - static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 399 + static void get_model_name(struct cpuinfo_x86 *c) 400 400 { 401 401 unsigned int *v; 402 402 char *p, *q; ··· 425 425 } 426 426 } 427 427 428 - void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 428 + void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 429 429 { 430 430 unsigned int n, dummy, ebx, ecx, edx, l2size; 431 431 ··· 479 479 */ 480 480 s8 __read_mostly tlb_flushall_shift = -1; 481 481 482 - void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) 482 + void cpu_detect_tlb(struct cpuinfo_x86 *c) 483 483 { 484 484 if (this_cpu->c_detect_tlb) 485 485 this_cpu->c_detect_tlb(c); ··· 493 493 tlb_flushall_shift); 494 494 } 495 495 496 - void __cpuinit detect_ht(struct cpuinfo_x86 *c) 496 + void detect_ht(struct cpuinfo_x86 *c) 497 497 { 498 498 #ifdef CONFIG_X86_HT 499 499 u32 eax, ebx, ecx, edx; ··· 544 544 #endif 545 545 } 546 546 547 - static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 547 + static void get_cpu_vendor(struct cpuinfo_x86 *c) 548 548 { 549 549 char *v = c->x86_vendor_id; 550 550 int i; ··· 571 571 this_cpu = &default_cpu; 572 572 } 573 573 574 - void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 574 + void cpu_detect(struct cpuinfo_x86 *c) 575 575 { 576 576 /* Get vendor name */ 577 577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, ··· 601 601 } 602 602 } 603 603 604 - void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 604 + void get_cpu_cap(struct cpuinfo_x86 *c) 605 605 { 606 606 u32 tfms, xlvl; 607 607 u32 ebx; ··· 652 652 init_scattered_cpuid_features(c); 653 653 } 654 654 655 - static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 655 + static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 656 656 { 657 657 #ifdef CONFIG_X86_32 658 658 int i; ··· 769 769 * unless we can find a reliable way to detect all the broken cases. 770 770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 771 771 */ 772 - static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 772 + static void detect_nopl(struct cpuinfo_x86 *c) 773 773 { 774 774 #ifdef CONFIG_X86_32 775 775 clear_cpu_cap(c, X86_FEATURE_NOPL); ··· 778 778 #endif 779 779 } 780 780 781 - static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 781 + static void generic_identify(struct cpuinfo_x86 *c) 782 782 { 783 783 c->extended_cpuid_level = 0; 784 784 ··· 815 815 /* 816 816 * This does the hard work of actually picking apart the CPU stuff... 817 817 */ 818 - static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 818 + static void identify_cpu(struct cpuinfo_x86 *c) 819 819 { 820 820 int i; 821 821 ··· 960 960 cpu_detect_tlb(&boot_cpu_data); 961 961 } 962 962 963 - void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 963 + void identify_secondary_cpu(struct cpuinfo_x86 *c) 964 964 { 965 965 BUG_ON(c == &boot_cpu_data); 966 966 identify_cpu(c); ··· 975 975 unsigned max; 976 976 }; 977 977 978 - static const struct msr_range msr_range_array[] __cpuinitconst = { 978 + static const struct msr_range msr_range_array[] = { 979 979 { 0x00000000, 0x00000418}, 980 980 { 0xc0000000, 0xc000040b}, 981 981 { 0xc0010000, 0xc0010142}, 982 982 { 0xc0011000, 0xc001103b}, 983 983 }; 984 984 985 - static void __cpuinit __print_cpu_msr(void) 985 + static void __print_cpu_msr(void) 986 986 { 987 987 unsigned index_min, index_max; 988 988 unsigned index; ··· 1001 1001 } 1002 1002 } 1003 1003 1004 - static int show_msr __cpuinitdata; 1004 + static int show_msr; 1005 1005 1006 1006 static __init int setup_show_msr(char *arg) 1007 1007 { ··· 1022 1022 } 1023 1023 __setup("noclflush", setup_noclflush); 1024 1024 1025 - void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1025 + void print_cpu_info(struct cpuinfo_x86 *c) 1026 1026 { 1027 1027 const char *vendor = NULL; 1028 1028 ··· 1051 1051 print_cpu_msr(c); 1052 1052 } 1053 1053 1054 - void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1054 + void print_cpu_msr(struct cpuinfo_x86 *c) 1055 1055 { 1056 1056 if (c->cpu_index < show_msr) 1057 1057 __print_cpu_msr(); ··· 1216 1216 */ 1217 1217 #ifdef CONFIG_X86_64 1218 1218 1219 - void __cpuinit cpu_init(void) 1219 + void cpu_init(void) 1220 1220 { 1221 1221 struct orig_ist *oist; 1222 1222 struct task_struct *me; ··· 1315 1315 1316 1316 #else 1317 1317 1318 - void __cpuinit cpu_init(void) 1318 + void cpu_init(void) 1319 1319 { 1320 1320 int cpu = smp_processor_id(); 1321 1321 struct task_struct *curr = current;
+20 -20
arch/x86/kernel/cpu/cyrix.c
··· 15 15 /* 16 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 17 17 */ 18 - static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18 + static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 19 19 { 20 20 unsigned char ccr2, ccr3; 21 21 ··· 44 44 } 45 45 } 46 46 47 - static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 47 + static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 48 48 { 49 49 unsigned long flags; 50 50 ··· 59 59 * Actually since bugs.h doesn't even reference this perhaps someone should 60 60 * fix the documentation ??? 61 61 */ 62 - static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62 + static unsigned char Cx86_dir0_msb = 0; 63 63 64 - static const char __cpuinitconst Cx86_model[][9] = { 64 + static const char Cx86_model[][9] = { 65 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 66 66 "M II ", "Unknown" 67 67 }; 68 - static const char __cpuinitconst Cx486_name[][5] = { 68 + static const char Cx486_name[][5] = { 69 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 70 70 "SRx2", "DRx2" 71 71 }; 72 - static const char __cpuinitconst Cx486S_name[][4] = { 72 + static const char Cx486S_name[][4] = { 73 73 "S", "S2", "Se", "S2e" 74 74 }; 75 - static const char __cpuinitconst Cx486D_name[][4] = { 75 + static const char Cx486D_name[][4] = { 76 76 "DX", "DX2", "?", "?", "?", "DX4" 77 77 }; 78 - static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 79 - static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; 80 - static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; 78 + static char Cx86_cb[] = "?.5x Core/Bus Clock"; 79 + static const char cyrix_model_mult1[] = "12??43"; 80 + static const char cyrix_model_mult2[] = "12233445"; 81 81 82 82 /* 83 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old ··· 87 87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP 88 88 */ 89 89 90 - static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) 90 + static void check_cx686_slop(struct cpuinfo_x86 *c) 91 91 { 92 92 unsigned long flags; 93 93 ··· 112 112 } 113 113 114 114 115 - static void __cpuinit set_cx86_reorder(void) 115 + static void set_cx86_reorder(void) 116 116 { 117 117 u8 ccr3; 118 118 ··· 127 127 setCx86(CX86_CCR3, ccr3); 128 128 } 129 129 130 - static void __cpuinit set_cx86_memwb(void) 130 + static void set_cx86_memwb(void) 131 131 { 132 132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 133 133 ··· 143 143 * Configure later MediaGX and/or Geode processor. 144 144 */ 145 145 146 - static void __cpuinit geode_configure(void) 146 + static void geode_configure(void) 147 147 { 148 148 unsigned long flags; 149 149 u8 ccr3; ··· 166 166 local_irq_restore(flags); 167 167 } 168 168 169 - static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) 169 + static void early_init_cyrix(struct cpuinfo_x86 *c) 170 170 { 171 171 unsigned char dir0, dir0_msn, dir1 = 0; 172 172 ··· 185 185 } 186 186 } 187 187 188 - static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188 + static void init_cyrix(struct cpuinfo_x86 *c) 189 189 { 190 190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; 191 191 char *buf = c->x86_model_id; ··· 356 356 /* 357 357 * Handle National Semiconductor branded processors 358 358 */ 359 - static void __cpuinit init_nsc(struct cpuinfo_x86 *c) 359 + static void init_nsc(struct cpuinfo_x86 *c) 360 360 { 361 361 /* 362 362 * There may be GX1 processors in the wild that are branded ··· 405 405 return (unsigned char) (test >> 8) == 0x02; 406 406 } 407 407 408 - static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) 408 + static void cyrix_identify(struct cpuinfo_x86 *c) 409 409 { 410 410 /* Detect Cyrix with disabled CPUID */ 411 411 if (c->x86 == 4 && test_cyrix_52div()) { ··· 441 441 } 442 442 } 443 443 444 - static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { 444 + static const struct cpu_dev cyrix_cpu_dev = { 445 445 .c_vendor = "Cyrix", 446 446 .c_ident = { "CyrixInstead" }, 447 447 .c_early_init = early_init_cyrix, ··· 452 452 453 453 cpu_dev_register(cyrix_cpu_dev); 454 454 455 - static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { 455 + static const struct cpu_dev nsc_cpu_dev = { 456 456 .c_vendor = "NSC", 457 457 .c_ident = { "Geode by NSC" }, 458 458 .c_init = init_nsc,
+1 -1
arch/x86/kernel/cpu/hypervisor.c
··· 60 60 } 61 61 } 62 62 63 - void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) 63 + void init_hypervisor(struct cpuinfo_x86 *c) 64 64 { 65 65 if (x86_hyper && x86_hyper->set_cpu_features) 66 66 x86_hyper->set_cpu_features(c);
+15 -15
arch/x86/kernel/cpu/intel.c
··· 26 26 #include <asm/apic.h> 27 27 #endif 28 28 29 - static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29 + static void early_init_intel(struct cpuinfo_x86 *c) 30 30 { 31 31 u64 misc_enable; 32 32 ··· 163 163 * This is called before we do cpu ident work 164 164 */ 165 165 166 - int __cpuinit ppro_with_ram_bug(void) 166 + int ppro_with_ram_bug(void) 167 167 { 168 168 /* Uses data from early_cpu_detect now */ 169 169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && ··· 176 176 return 0; 177 177 } 178 178 179 - static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179 + static void intel_smp_check(struct cpuinfo_x86 *c) 180 180 { 181 181 /* calling is from identify_secondary_cpu() ? */ 182 182 if (!c->cpu_index) ··· 196 196 } 197 197 } 198 198 199 - static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 199 + static void intel_workarounds(struct cpuinfo_x86 *c) 200 200 { 201 201 unsigned long lo, hi; 202 202 ··· 275 275 intel_smp_check(c); 276 276 } 277 277 #else 278 - static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 278 + static void intel_workarounds(struct cpuinfo_x86 *c) 279 279 { 280 280 } 281 281 #endif 282 282 283 - static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 283 + static void srat_detect_node(struct cpuinfo_x86 *c) 284 284 { 285 285 #ifdef CONFIG_NUMA 286 286 unsigned node; ··· 300 300 /* 301 301 * find out the number of processor cores on the die 302 302 */ 303 - static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 303 + static int intel_num_cpu_cores(struct cpuinfo_x86 *c) 304 304 { 305 305 unsigned int eax, ebx, ecx, edx; 306 306 ··· 315 315 return 1; 316 316 } 317 317 318 - static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 318 + static void detect_vmx_virtcap(struct cpuinfo_x86 *c) 319 319 { 320 320 /* Intel VMX MSR indicated features */ 321 321 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 ··· 353 353 } 354 354 } 355 355 356 - static void __cpuinit init_intel(struct cpuinfo_x86 *c) 356 + static void init_intel(struct cpuinfo_x86 *c) 357 357 { 358 358 unsigned int l2 = 0; 359 359 ··· 472 472 } 473 473 474 474 #ifdef CONFIG_X86_32 475 - static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 475 + static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 476 476 { 477 477 /* 478 478 * Intel PIII Tualatin. This comes in two flavours. ··· 506 506 507 507 #define STLB_4K 0x41 508 508 509 - static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { 509 + static const struct _tlb_table intel_tlb_table[] = { 510 510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 511 511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 512 512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, ··· 536 536 { 0x00, 0, 0 } 537 537 }; 538 538 539 - static void __cpuinit intel_tlb_lookup(const unsigned char desc) 539 + static void intel_tlb_lookup(const unsigned char desc) 540 540 { 541 541 unsigned char k; 542 542 if (desc == 0) ··· 605 605 } 606 606 } 607 607 608 - static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 608 + static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 609 609 { 610 610 switch ((c->x86 << 8) + c->x86_model) { 611 611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ ··· 634 634 } 635 635 } 636 636 637 - static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) 637 + static void intel_detect_tlb(struct cpuinfo_x86 *c) 638 638 { 639 639 int i, j, n; 640 640 unsigned int regs[4]; ··· 661 661 intel_tlb_flushall_shift_set(c); 662 662 } 663 663 664 - static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 664 + static const struct cpu_dev intel_cpu_dev = { 665 665 .c_vendor = "Intel", 666 666 .c_ident = { "GenuineIntel" }, 667 667 #ifdef CONFIG_X86_32
+27 -28
arch/x86/kernel/cpu/intel_cacheinfo.c
··· 37 37 /* All the cache descriptor types we care about (no TLB or 38 38 trace cache entries) */ 39 39 40 - static const struct _cache_table __cpuinitconst cache_table[] = 40 + static const struct _cache_table cache_table[] = 41 41 { 42 42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 43 43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ ··· 203 203 unsigned val; 204 204 }; 205 205 206 - static const unsigned short __cpuinitconst assocs[] = { 206 + static const unsigned short assocs[] = { 207 207 [1] = 1, 208 208 [2] = 2, 209 209 [4] = 4, ··· 217 217 [0xf] = 0xffff /* fully associative - no way to show this currently */ 218 218 }; 219 219 220 - static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; 221 - static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; 220 + static const unsigned char levels[] = { 1, 1, 2, 3 }; 221 + static const unsigned char types[] = { 1, 2, 3, 3 }; 222 222 223 - static void __cpuinit 223 + static void 224 224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 225 225 union _cpuid4_leaf_ebx *ebx, 226 226 union _cpuid4_leaf_ecx *ecx) ··· 302 302 /* 303 303 * L3 cache descriptors 304 304 */ 305 - static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) 305 + static void amd_calc_l3_indices(struct amd_northbridge *nb) 306 306 { 307 307 struct amd_l3_cache *l3 = &nb->l3_cache; 308 308 unsigned int sc0, sc1, sc2, sc3; ··· 325 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 326 326 } 327 327 328 - static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) 328 + static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) 329 329 { 330 330 int node; 331 331 ··· 528 528 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 529 529 530 530 static int 531 - __cpuinit cpuid4_cache_lookup_regs(int index, 532 - struct _cpuid4_info_regs *this_leaf) 531 + cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) 533 532 { 534 533 union _cpuid4_leaf_eax eax; 535 534 union _cpuid4_leaf_ebx ebx; ··· 559 560 return 0; 560 561 } 561 562 562 - static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) 563 + static int find_num_cache_leaves(struct cpuinfo_x86 *c) 563 564 { 564 565 unsigned int eax, ebx, ecx, edx, op; 565 566 union _cpuid4_leaf_eax cache_eax; ··· 579 580 return i; 580 581 } 581 582 582 - void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) 583 + void init_amd_cacheinfo(struct cpuinfo_x86 *c) 583 584 { 584 585 585 586 if (cpu_has_topoext) { ··· 592 593 } 593 594 } 594 595 595 - unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 596 + unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) 596 597 { 597 598 /* Cache sizes */ 598 599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; ··· 743 744 744 745 #ifdef CONFIG_SMP 745 746 746 - static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) 747 + static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) 747 748 { 748 749 struct _cpuid4_info *this_leaf; 749 750 int i, sibling; ··· 792 793 return 1; 793 794 } 794 795 795 - static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 796 + static void cache_shared_cpu_map_setup(unsigned int cpu, int index) 796 797 { 797 798 struct _cpuid4_info *this_leaf, *sibling_leaf; 798 799 unsigned long num_threads_sharing; ··· 827 828 } 828 829 } 829 830 } 830 - static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 831 + static void cache_remove_shared_cpu_map(unsigned int cpu, int index) 831 832 { 832 833 struct _cpuid4_info *this_leaf, *sibling_leaf; 833 834 int sibling; ··· 840 841 } 841 842 } 842 843 #else 843 - static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 844 + static void cache_shared_cpu_map_setup(unsigned int cpu, int index) 844 845 { 845 846 } 846 847 847 - static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 848 + static void cache_remove_shared_cpu_map(unsigned int cpu, int index) 848 849 { 849 850 } 850 851 #endif 851 852 852 - static void __cpuinit free_cache_attributes(unsigned int cpu) 853 + static void free_cache_attributes(unsigned int cpu) 853 854 { 854 855 int i; 855 856 ··· 860 861 per_cpu(ici_cpuid4_info, cpu) = NULL; 861 862 } 862 863 863 - static void __cpuinit get_cpu_leaves(void *_retval) 864 + static void get_cpu_leaves(void *_retval) 864 865 { 865 866 int j, *retval = _retval, cpu = smp_processor_id(); 866 867 ··· 880 881 } 881 882 } 882 883 883 - static int __cpuinit detect_cache_attributes(unsigned int cpu) 884 + static int detect_cache_attributes(unsigned int cpu) 884 885 { 885 886 int retval; 886 887 ··· 1014 1015 }; 1015 1016 1016 1017 #ifdef CONFIG_AMD_NB 1017 - static struct attribute ** __cpuinit amd_l3_attrs(void) 1018 + static struct attribute **amd_l3_attrs(void) 1018 1019 { 1019 1020 static struct attribute **attrs; 1020 1021 int n; ··· 1090 1091 .sysfs_ops = &sysfs_ops, 1091 1092 }; 1092 1093 1093 - static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 1094 + static void cpuid4_cache_sysfs_exit(unsigned int cpu) 1094 1095 { 1095 1096 kfree(per_cpu(ici_cache_kobject, cpu)); 1096 1097 kfree(per_cpu(ici_index_kobject, cpu)); ··· 1099 1100 free_cache_attributes(cpu); 1100 1101 } 1101 1102 1102 - static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) 1103 + static int cpuid4_cache_sysfs_init(unsigned int cpu) 1103 1104 { 1104 1105 int err; 1105 1106 ··· 1131 1132 static DECLARE_BITMAP(cache_dev_map, NR_CPUS); 1132 1133 1133 1134 /* Add/Remove cache interface for CPU device */ 1134 - static int __cpuinit cache_add_dev(struct device *dev) 1135 + static int cache_add_dev(struct device *dev) 1135 1136 { 1136 1137 unsigned int cpu = dev->id; 1137 1138 unsigned long i, j; ··· 1182 1183 return 0; 1183 1184 } 1184 1185 1185 - static void __cpuinit cache_remove_dev(struct device *dev) 1186 + static void cache_remove_dev(struct device *dev) 1186 1187 { 1187 1188 unsigned int cpu = dev->id; 1188 1189 unsigned long i; ··· 1199 1200 cpuid4_cache_sysfs_exit(cpu); 1200 1201 } 1201 1202 1202 - static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 1203 - unsigned long action, void *hcpu) 1203 + static int cacheinfo_cpu_callback(struct notifier_block *nfb, 1204 + unsigned long action, void *hcpu) 1204 1205 { 1205 1206 unsigned int cpu = (unsigned long)hcpu; 1206 1207 struct device *dev; ··· 1219 1220 return NOTIFY_OK; 1220 1221 } 1221 1222 1222 - static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { 1223 + static struct notifier_block cacheinfo_cpu_notifier = { 1223 1224 .notifier_call = cacheinfo_cpu_callback, 1224 1225 }; 1225 1226
+11 -12
arch/x86/kernel/cpu/mcheck/mce.c
··· 1363 1363 } 1364 1364 EXPORT_SYMBOL_GPL(mce_notify_irq); 1365 1365 1366 - static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1366 + static int __mcheck_cpu_mce_banks_init(void) 1367 1367 { 1368 1368 int i; 1369 1369 u8 num_banks = mca_cfg.banks; ··· 1384 1384 /* 1385 1385 * Initialize Machine Checks for a CPU. 1386 1386 */ 1387 - static int __cpuinit __mcheck_cpu_cap_init(void) 1387 + static int __mcheck_cpu_cap_init(void) 1388 1388 { 1389 1389 unsigned b; 1390 1390 u64 cap; ··· 1483 1483 } 1484 1484 1485 1485 /* Add per CPU specific workarounds here */ 1486 - static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1486 + static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1487 1487 { 1488 1488 struct mca_config *cfg = &mca_cfg; 1489 1489 ··· 1593 1593 return 0; 1594 1594 } 1595 1595 1596 - static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1596 + static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1597 1597 { 1598 1598 if (c->x86 != 5) 1599 1599 return 0; ··· 1664 1664 * Called for each booted CPU to set up machine checks. 1665 1665 * Must be called with preempt off: 1666 1666 */ 1667 - void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1667 + void mcheck_cpu_init(struct cpuinfo_x86 *c) 1668 1668 { 1669 1669 if (mca_cfg.disabled) 1670 1670 return; ··· 2082 2082 2083 2083 DEFINE_PER_CPU(struct device *, mce_device); 2084 2084 2085 - __cpuinitdata 2086 2085 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 2087 2086 2088 2087 static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) ··· 2227 2228 } 2228 2229 2229 2230 /* Per cpu device init. All of the cpus still share the same ctrl bank: */ 2230 - static __cpuinit int mce_device_create(unsigned int cpu) 2231 + static int mce_device_create(unsigned int cpu) 2231 2232 { 2232 2233 struct device *dev; 2233 2234 int err; ··· 2273 2274 return err; 2274 2275 } 2275 2276 2276 - static __cpuinit void mce_device_remove(unsigned int cpu) 2277 + static void mce_device_remove(unsigned int cpu) 2277 2278 { 2278 2279 struct device *dev = per_cpu(mce_device, cpu); 2279 2280 int i; ··· 2293 2294 } 2294 2295 2295 2296 /* Make sure there are no machine checks on offlined CPUs. */ 2296 - static void __cpuinit mce_disable_cpu(void *h) 2297 + static void mce_disable_cpu(void *h) 2297 2298 { 2298 2299 unsigned long action = *(unsigned long *)h; 2299 2300 int i; ··· 2311 2312 } 2312 2313 } 2313 2314 2314 - static void __cpuinit mce_reenable_cpu(void *h) 2315 + static void mce_reenable_cpu(void *h) 2315 2316 { 2316 2317 unsigned long action = *(unsigned long *)h; 2317 2318 int i; ··· 2330 2331 } 2331 2332 2332 2333 /* Get notified when a cpu comes on/off. Be hotplug friendly. */ 2333 - static int __cpuinit 2334 + static int 2334 2335 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 2335 2336 { 2336 2337 unsigned int cpu = (unsigned long)hcpu; ··· 2366 2367 return NOTIFY_OK; 2367 2368 } 2368 2369 2369 - static struct notifier_block mce_cpu_notifier __cpuinitdata = { 2370 + static struct notifier_block mce_cpu_notifier = { 2370 2371 .notifier_call = mce_cpu_callback, 2371 2372 }; 2372 2373
+6 -8
arch/x86/kernel/cpu/mcheck/mce_amd.c
··· 458 458 .default_attrs = default_attrs, 459 459 }; 460 460 461 - static __cpuinit int allocate_threshold_blocks(unsigned int cpu, 462 - unsigned int bank, 463 - unsigned int block, 464 - u32 address) 461 + static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, 462 + unsigned int block, u32 address) 465 463 { 466 464 struct threshold_block *b = NULL; 467 465 u32 low, high; ··· 541 543 return err; 542 544 } 543 545 544 - static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) 546 + static int __threshold_add_blocks(struct threshold_bank *b) 545 547 { 546 548 struct list_head *head = &b->blocks->miscj; 547 549 struct threshold_block *pos = NULL; ··· 565 567 return err; 566 568 } 567 569 568 - static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 570 + static int threshold_create_bank(unsigned int cpu, unsigned int bank) 569 571 { 570 572 struct device *dev = per_cpu(mce_device, cpu); 571 573 struct amd_northbridge *nb = NULL; ··· 630 632 } 631 633 632 634 /* create dir/files for all valid threshold banks */ 633 - static __cpuinit int threshold_create_device(unsigned int cpu) 635 + static int threshold_create_device(unsigned int cpu) 634 636 { 635 637 unsigned int bank; 636 638 struct threshold_bank **bp; ··· 734 736 } 735 737 736 738 /* get notified when a cpu comes on/off */ 737 - static void __cpuinit 739 + static void 738 740 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) 739 741 { 740 742 switch (action) {
+4 -5
arch/x86/kernel/cpu/mcheck/therm_throt.c
··· 240 240 241 241 #ifdef CONFIG_SYSFS 242 242 /* Add/Remove thermal_throttle interface for CPU device: */ 243 - static __cpuinit int thermal_throttle_add_dev(struct device *dev, 244 - unsigned int cpu) 243 + static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) 245 244 { 246 245 int err; 247 246 struct cpuinfo_x86 *c = &cpu_data(cpu); ··· 266 267 return err; 267 268 } 268 269 269 - static __cpuinit void thermal_throttle_remove_dev(struct device *dev) 270 + static void thermal_throttle_remove_dev(struct device *dev) 270 271 { 271 272 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 272 273 } ··· 275 276 static DEFINE_MUTEX(therm_cpu_lock); 276 277 277 278 /* Get notified when a cpu comes on/off. Be hotplug friendly. */ 278 - static __cpuinit int 279 + static int 279 280 thermal_throttle_cpu_callback(struct notifier_block *nfb, 280 281 unsigned long action, 281 282 void *hcpu) ··· 306 307 return notifier_from_errno(err); 307 308 } 308 309 309 - static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 310 + static struct notifier_block thermal_throttle_cpu_notifier = 310 311 { 311 312 .notifier_call = thermal_throttle_cpu_callback, 312 313 };
+1 -1
arch/x86/kernel/cpu/perf_event.c
··· 1295 1295 struct event_constraint emptyconstraint; 1296 1296 struct event_constraint unconstrained; 1297 1297 1298 - static int __cpuinit 1298 + static int 1299 1299 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1300 1300 { 1301 1301 unsigned int cpu = (long)hcpu;
+1 -1
arch/x86/kernel/cpu/perf_event_amd_ibs.c
··· 851 851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 852 852 } 853 853 854 - static int __cpuinit 854 + static int 855 855 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 856 856 { 857 857 switch (action & ~CPU_TASKS_FROZEN) {
+15 -16
arch/x86/kernel/cpu/perf_event_amd_uncore.c
··· 288 288 .read = amd_uncore_read, 289 289 }; 290 290 291 - static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) 291 + static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) 292 292 { 293 293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, 294 294 cpu_to_node(cpu)); 295 295 } 296 296 297 - static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) 297 + static void amd_uncore_cpu_up_prepare(unsigned int cpu) 298 298 { 299 299 struct amd_uncore *uncore; 300 300 ··· 322 322 } 323 323 324 324 static struct amd_uncore * 325 - __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, 326 - struct amd_uncore * __percpu *uncores) 325 + amd_uncore_find_online_sibling(struct amd_uncore *this, 326 + struct amd_uncore * __percpu *uncores) 327 327 { 328 328 unsigned int cpu; 329 329 struct amd_uncore *that; ··· 348 348 return this; 349 349 } 350 350 351 - static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) 351 + static void amd_uncore_cpu_starting(unsigned int cpu) 352 352 { 353 353 unsigned int eax, ebx, ecx, edx; 354 354 struct amd_uncore *uncore; ··· 376 376 } 377 377 } 378 378 379 - static void __cpuinit uncore_online(unsigned int cpu, 380 - struct amd_uncore * __percpu *uncores) 379 + static void uncore_online(unsigned int cpu, 380 + struct amd_uncore * __percpu *uncores) 381 381 { 382 382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 383 383 ··· 388 388 cpumask_set_cpu(cpu, uncore->active_mask); 389 389 } 390 390 391 - static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) 391 + static void amd_uncore_cpu_online(unsigned int cpu) 392 392 { 393 393 if (amd_uncore_nb) 394 394 uncore_online(cpu, amd_uncore_nb); ··· 397 397 uncore_online(cpu, amd_uncore_l2); 398 398 } 399 399 400 - static void __cpuinit uncore_down_prepare(unsigned int cpu, 401 - struct amd_uncore * __percpu *uncores) 400 + static void uncore_down_prepare(unsigned int cpu, 401 + struct amd_uncore * __percpu *uncores) 402 402 { 403 403 unsigned int i; 404 404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); ··· 423 423 } 424 424 } 425 425 426 - static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) 426 + static void amd_uncore_cpu_down_prepare(unsigned int cpu) 427 427 { 428 428 if (amd_uncore_nb) 429 429 uncore_down_prepare(cpu, amd_uncore_nb); ··· 432 432 uncore_down_prepare(cpu, amd_uncore_l2); 433 433 } 434 434 435 - static void __cpuinit uncore_dead(unsigned int cpu, 436 - struct amd_uncore * __percpu *uncores) 435 + static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) 437 436 { 438 437 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 439 438 ··· 444 445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; 445 446 } 446 447 447 - static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) 448 + static void amd_uncore_cpu_dead(unsigned int cpu) 448 449 { 449 450 if (amd_uncore_nb) 450 451 uncore_dead(cpu, amd_uncore_nb); ··· 453 454 uncore_dead(cpu, amd_uncore_l2); 454 455 } 455 456 456 - static int __cpuinit 457 + static int 457 458 amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, 458 459 void *hcpu) 459 460 { ··· 488 489 return NOTIFY_OK; 489 490 } 490 491 491 - static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { 492 + static struct notifier_block amd_uncore_cpu_notifier_block = { 492 493 .notifier_call = amd_uncore_cpu_notifier, 493 494 .priority = CPU_PRI_PERF + 1, 494 495 };
+10 -10
arch/x86/kernel/cpu/perf_event_intel_uncore.c
··· 3297 3297 /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ 3298 3298 static LIST_HEAD(boxes_to_free); 3299 3299 3300 - static void __cpuinit uncore_kfree_boxes(void) 3300 + static void uncore_kfree_boxes(void) 3301 3301 { 3302 3302 struct intel_uncore_box *box; 3303 3303 ··· 3309 3309 } 3310 3310 } 3311 3311 3312 - static void __cpuinit uncore_cpu_dying(int cpu) 3312 + static void uncore_cpu_dying(int cpu) 3313 3313 { 3314 3314 struct intel_uncore_type *type; 3315 3315 struct intel_uncore_pmu *pmu; ··· 3328 3328 } 3329 3329 } 3330 3330 3331 - static int __cpuinit uncore_cpu_starting(int cpu) 3331 + static int uncore_cpu_starting(int cpu) 3332 3332 { 3333 3333 struct intel_uncore_type *type; 3334 3334 struct intel_uncore_pmu *pmu; ··· 3371 3371 return 0; 3372 3372 } 3373 3373 3374 - static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) 3374 + static int uncore_cpu_prepare(int cpu, int phys_id) 3375 3375 { 3376 3376 struct intel_uncore_type *type; 3377 3377 struct intel_uncore_pmu *pmu; ··· 3397 3397 return 0; 3398 3398 } 3399 3399 3400 - static void __cpuinit 3400 + static void 3401 3401 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) 3402 3402 { 3403 3403 struct intel_uncore_type *type; ··· 3435 3435 } 3436 3436 } 3437 3437 3438 - static void __cpuinit uncore_event_exit_cpu(int cpu) 3438 + static void uncore_event_exit_cpu(int cpu) 3439 3439 { 3440 3440 int i, phys_id, target; 3441 3441 ··· 3463 3463 uncore_change_context(pci_uncores, cpu, target); 3464 3464 } 3465 3465 3466 - static void __cpuinit uncore_event_init_cpu(int cpu) 3466 + static void uncore_event_init_cpu(int cpu) 3467 3467 { 3468 3468 int i, phys_id; 3469 3469 ··· 3479 3479 uncore_change_context(pci_uncores, -1, cpu); 3480 3480 } 3481 3481 3482 - static int 3483 - __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 3482 + static int uncore_cpu_notifier(struct notifier_block *self, 3483 + unsigned long action, void *hcpu) 3484 3484 { 3485 3485 unsigned int cpu = (long)hcpu; 3486 3486 ··· 3520 3520 return NOTIFY_OK; 3521 3521 } 3522 3522 3523 - static struct notifier_block uncore_cpu_nb __cpuinitdata = { 3523 + static struct notifier_block uncore_cpu_nb = { 3524 3524 .notifier_call = uncore_cpu_notifier, 3525 3525 /* 3526 3526 * to migrate uncore events, our notifier should be executed
+1 -1
arch/x86/kernel/cpu/rdrand.c
··· 52 52 */ 53 53 #define RESEED_LOOP ((512*128)/sizeof(unsigned long)) 54 54 55 - void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) 55 + void x86_init_rdrand(struct cpuinfo_x86 *c) 56 56 { 57 57 #ifdef CONFIG_ARCH_RANDOM 58 58 unsigned long tmp;
+2 -2
arch/x86/kernel/cpu/scattered.c
··· 24 24 CR_EBX 25 25 }; 26 26 27 - void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) 27 + void init_scattered_cpuid_features(struct cpuinfo_x86 *c) 28 28 { 29 29 u32 max_level; 30 30 u32 regs[4]; 31 31 const struct cpuid_bit *cb; 32 32 33 - static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 + static const struct cpuid_bit cpuid_bits[] = { 34 34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, 35 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 36 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
+1 -1
arch/x86/kernel/cpu/topology.c
··· 26 26 * exists, use it for populating initial_apicid and cpu topology 27 27 * detection. 28 28 */ 29 - void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 29 + void detect_extended_topology(struct cpuinfo_x86 *c) 30 30 { 31 31 #ifdef CONFIG_SMP 32 32 unsigned int eax, ebx, ecx, edx, sub_index;
+3 -3
arch/x86/kernel/cpu/transmeta.c
··· 5 5 #include <asm/msr.h> 6 6 #include "cpu.h" 7 7 8 - static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) 8 + static void early_init_transmeta(struct cpuinfo_x86 *c) 9 9 { 10 10 u32 xlvl; 11 11 ··· 17 17 } 18 18 } 19 19 20 - static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) 20 + static void init_transmeta(struct cpuinfo_x86 *c) 21 21 { 22 22 unsigned int cap_mask, uk, max, dummy; 23 23 unsigned int cms_rev1, cms_rev2; ··· 98 98 #endif 99 99 } 100 100 101 - static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { 101 + static const struct cpu_dev transmeta_cpu_dev = { 102 102 .c_vendor = "Transmeta", 103 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 104 104 .c_early_init = early_init_transmeta,
+1 -1
arch/x86/kernel/cpu/umc.c
··· 8 8 * so no special init takes place. 9 9 */ 10 10 11 - static const struct cpu_dev __cpuinitconst umc_cpu_dev = { 11 + static const struct cpu_dev umc_cpu_dev = { 12 12 .c_vendor = "UMC", 13 13 .c_ident = { "UMC UMC UMC" }, 14 14 .c_models = {
+1 -1
arch/x86/kernel/cpu/vmware.c
··· 122 122 * so that the kernel could just trust the hypervisor with providing a 123 123 * reliable virtual TSC that is suitable for timekeeping. 124 124 */ 125 - static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) 125 + static void vmware_set_cpu_features(struct cpuinfo_x86 *c) 126 126 { 127 127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 128 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
+3 -4
arch/x86/kernel/cpuid.c
··· 137 137 .open = cpuid_open, 138 138 }; 139 139 140 - static __cpuinit int cpuid_device_create(int cpu) 140 + static int cpuid_device_create(int cpu) 141 141 { 142 142 struct device *dev; 143 143 ··· 151 151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 152 152 } 153 153 154 - static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 155 - unsigned long action, 156 - void *hcpu) 154 + static int cpuid_class_cpu_callback(struct notifier_block *nfb, 155 + unsigned long action, void *hcpu) 157 156 { 158 157 unsigned int cpu = (unsigned long)hcpu; 159 158 int err = 0;
+1 -1
arch/x86/kernel/devicetree.c
··· 133 133 { 134 134 } 135 135 136 - void __cpuinit x86_of_pci_init(void) 136 + void x86_of_pci_init(void) 137 137 { 138 138 pcibios_enable_irq = x86_of_pci_irq_enable; 139 139 pcibios_disable_irq = x86_of_pci_irq_disable;
-1
arch/x86/kernel/head_32.S
··· 292 292 * If cpu hotplug is not supported then this code can go in init section 293 293 * which will be freed later 294 294 */ 295 - __CPUINIT 296 295 ENTRY(startup_32_smp) 297 296 cld 298 297 movl $(__BOOT_DS),%eax
+5 -5
arch/x86/kernel/i387.c
··· 108 108 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 109 109 unsigned int xstate_size; 110 110 EXPORT_SYMBOL_GPL(xstate_size); 111 - static struct i387_fxsave_struct fx_scratch __cpuinitdata; 111 + static struct i387_fxsave_struct fx_scratch; 112 112 113 - static void __cpuinit mxcsr_feature_mask_init(void) 113 + static void mxcsr_feature_mask_init(void) 114 114 { 115 115 unsigned long mask = 0; 116 116 ··· 124 124 mxcsr_feature_mask &= mask; 125 125 } 126 126 127 - static void __cpuinit init_thread_xstate(void) 127 + static void init_thread_xstate(void) 128 128 { 129 129 /* 130 130 * Note that xstate_size might be overwriten later during ··· 153 153 * into all processes. 154 154 */ 155 155 156 - void __cpuinit fpu_init(void) 156 + void fpu_init(void) 157 157 { 158 158 unsigned long cr0; 159 159 unsigned long cr4_mask = 0; ··· 608 608 609 609 __setup("no387", no_387); 610 610 611 - void __cpuinit fpu_detect(struct cpuinfo_x86 *c) 611 + void fpu_detect(struct cpuinfo_x86 *c) 612 612 { 613 613 unsigned long cr0; 614 614 u16 fsw, fcw;
+1 -1
arch/x86/kernel/irq_32.c
··· 119 119 /* 120 120 * allocate per-cpu stacks for hardirq and for softirq processing 121 121 */ 122 - void __cpuinit irq_ctx_init(int cpu) 122 + void irq_ctx_init(int cpu) 123 123 { 124 124 union irq_ctx *irqctx; 125 125
+5 -5
arch/x86/kernel/kvm.c
··· 320 320 apic_write(APIC_EOI, APIC_EOI_ACK); 321 321 } 322 322 323 - void __cpuinit kvm_guest_cpu_init(void) 323 + void kvm_guest_cpu_init(void) 324 324 { 325 325 if (!kvm_para_available()) 326 326 return; ··· 421 421 native_smp_prepare_boot_cpu(); 422 422 } 423 423 424 - static void __cpuinit kvm_guest_cpu_online(void *dummy) 424 + static void kvm_guest_cpu_online(void *dummy) 425 425 { 426 426 kvm_guest_cpu_init(); 427 427 } ··· 435 435 apf_task_wake_all(); 436 436 } 437 437 438 - static int __cpuinit kvm_cpu_notify(struct notifier_block *self, 439 - unsigned long action, void *hcpu) 438 + static int kvm_cpu_notify(struct notifier_block *self, unsigned long action, 439 + void *hcpu) 440 440 { 441 441 int cpu = (unsigned long)hcpu; 442 442 switch (action) { ··· 455 455 return NOTIFY_OK; 456 456 } 457 457 458 - static struct notifier_block __cpuinitdata kvm_cpu_notifier = { 458 + static struct notifier_block kvm_cpu_notifier = { 459 459 .notifier_call = kvm_cpu_notify, 460 460 }; 461 461 #endif
+1 -1
arch/x86/kernel/kvmclock.c
··· 182 182 } 183 183 184 184 #ifdef CONFIG_X86_LOCAL_APIC 185 - static void __cpuinit kvm_setup_secondary_clock(void) 185 + static void kvm_setup_secondary_clock(void) 186 186 { 187 187 /* 188 188 * Now that the first cpu already had this clocksource initialized,
+4 -4
arch/x86/kernel/microcode_amd_early.c
··· 82 82 * load_microcode_amd() to save equivalent cpu table and microcode patches in 83 83 * kernel heap memory. 84 84 */ 85 - static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size) 85 + static void apply_ucode_in_initrd(void *ucode, size_t size) 86 86 { 87 87 struct equiv_cpu_entry *eq; 88 88 u32 *header; ··· 206 206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which 207 207 * is used upon resume from suspend. 208 208 */ 209 - void __cpuinit load_ucode_amd_ap(void) 209 + void load_ucode_amd_ap(void) 210 210 { 211 211 struct microcode_amd *mc; 212 212 unsigned long *initrd; ··· 238 238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 239 239 } 240 240 #else 241 - static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241 + static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 242 242 struct ucode_cpu_info *uci) 243 243 { 244 244 u32 rev, eax; ··· 252 252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 253 253 } 254 254 255 - void __cpuinit load_ucode_amd_ap(void) 255 + void load_ucode_amd_ap(void) 256 256 { 257 257 unsigned int cpu = smp_processor_id(); 258 258
+1 -1
arch/x86/kernel/microcode_core.c
··· 468 468 .resume = mc_bp_resume, 469 469 }; 470 470 471 - static __cpuinit int 471 + static int 472 472 mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 473 473 { 474 474 unsigned int cpu = (unsigned long)hcpu;
+3 -3
arch/x86/kernel/microcode_core_early.c
··· 41 41 * 42 42 * x86_vendor() gets vendor information directly through cpuid. 43 43 */ 44 - static int __cpuinit x86_vendor(void) 44 + static int x86_vendor(void) 45 45 { 46 46 u32 eax = 0x00000000; 47 47 u32 ebx, ecx = 0, edx; ··· 57 57 return X86_VENDOR_UNKNOWN; 58 58 } 59 59 60 - static int __cpuinit x86_family(void) 60 + static int x86_family(void) 61 61 { 62 62 u32 eax = 0x00000001; 63 63 u32 ebx, ecx = 0, edx; ··· 96 96 } 97 97 } 98 98 99 - void __cpuinit load_ucode_ap(void) 99 + void load_ucode_ap(void) 100 100 { 101 101 int vendor, x86; 102 102
+13 -13
arch/x86/kernel/microcode_intel_early.c
··· 34 34 struct microcode_intel **mc_saved; 35 35 } mc_saved_data; 36 36 37 - static enum ucode_state __cpuinit 37 + static enum ucode_state 38 38 generic_load_microcode_early(struct microcode_intel **mc_saved_p, 39 39 unsigned int mc_saved_count, 40 40 struct ucode_cpu_info *uci) ··· 69 69 return state; 70 70 } 71 71 72 - static void __cpuinit 72 + static void 73 73 microcode_pointer(struct microcode_intel **mc_saved, 74 74 unsigned long *mc_saved_in_initrd, 75 75 unsigned long initrd_start, int mc_saved_count) ··· 82 82 } 83 83 84 84 #ifdef CONFIG_X86_32 85 - static void __cpuinit 85 + static void 86 86 microcode_phys(struct microcode_intel **mc_saved_tmp, 87 87 struct mc_saved_data *mc_saved_data) 88 88 { ··· 101 101 } 102 102 #endif 103 103 104 - static enum ucode_state __cpuinit 104 + static enum ucode_state 105 105 load_microcode(struct mc_saved_data *mc_saved_data, 106 106 unsigned long *mc_saved_in_initrd, 107 107 unsigned long initrd_start, ··· 375 375 #define native_wrmsr(msr, low, high) \ 376 376 native_write_msr(msr, low, high); 377 377 378 - static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) 378 + static int collect_cpu_info_early(struct ucode_cpu_info *uci) 379 379 { 380 380 unsigned int val[2]; 381 381 u8 x86, x86_model; ··· 584 584 /* 585 585 * Print ucode update info. 586 586 */ 587 - static void __cpuinit 587 + static void 588 588 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 589 589 { 590 590 int cpu = smp_processor_id(); ··· 605 605 /* 606 606 * Print early updated ucode info after printk works. This is delayed info dump. 607 607 */ 608 - void __cpuinit show_ucode_info_early(void) 608 + void show_ucode_info_early(void) 609 609 { 610 610 struct ucode_cpu_info uci; 611 611 ··· 621 621 * mc_saved_data.mc_saved and delay printing microcode info in 622 622 * show_ucode_info_early() until printk() works. 623 623 */ 624 - static void __cpuinit print_ucode(struct ucode_cpu_info *uci) 624 + static void print_ucode(struct ucode_cpu_info *uci) 625 625 { 626 626 struct microcode_intel *mc_intel; 627 627 int *delay_ucode_info_p; ··· 643 643 * Flush global tlb. We only do this in x86_64 where paging has been enabled 644 644 * already and PGE should be enabled as well. 645 645 */ 646 - static inline void __cpuinit flush_tlb_early(void) 646 + static inline void flush_tlb_early(void) 647 647 { 648 648 __native_flush_tlb_global_irq_disabled(); 649 649 } 650 650 651 - static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) 651 + static inline void print_ucode(struct ucode_cpu_info *uci) 652 652 { 653 653 struct microcode_intel *mc_intel; 654 654 ··· 660 660 } 661 661 #endif 662 662 663 - static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, 664 - struct ucode_cpu_info *uci) 663 + static int apply_microcode_early(struct mc_saved_data *mc_saved_data, 664 + struct ucode_cpu_info *uci) 665 665 { 666 666 struct microcode_intel *mc_intel; 667 667 unsigned int val[2]; ··· 763 763 #endif 764 764 } 765 765 766 - void __cpuinit load_ucode_intel_ap(void) 766 + void load_ucode_intel_ap(void) 767 767 { 768 768 struct mc_saved_data *mc_saved_data_p; 769 769 struct ucode_cpu_info uci;
+6 -6
arch/x86/kernel/mmconf-fam10h_64.c
··· 24 24 u32 device; 25 25 }; 26 26 27 - static u64 __cpuinitdata fam10h_pci_mmconf_base; 27 + static u64 fam10h_pci_mmconf_base; 28 28 29 - static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 29 + static struct pci_hostbridge_probe pci_probes[] = { 30 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 31 31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 32 32 }; 33 33 34 - static int __cpuinit cmp_range(const void *x1, const void *x2) 34 + static int cmp_range(const void *x1, const void *x2) 35 35 { 36 36 const struct range *r1 = x1; 37 37 const struct range *r2 = x2; ··· 49 49 /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ 50 50 #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 51 51 #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) 52 - static void __cpuinit get_fam10h_pci_mmconf_base(void) 52 + static void get_fam10h_pci_mmconf_base(void) 53 53 { 54 54 int i; 55 55 unsigned bus; ··· 166 166 fam10h_pci_mmconf_base = base; 167 167 } 168 168 169 - void __cpuinit fam10h_check_enable_mmcfg(void) 169 + void fam10h_check_enable_mmcfg(void) 170 170 { 171 171 u64 val; 172 172 u32 address; ··· 230 230 {} 231 231 }; 232 232 233 - /* Called from a __cpuinit function, but only on the BSP. */ 233 + /* Called from a non __init function, but only on the BSP. */ 234 234 void __ref check_enable_amd_mmconf_dmi(void) 235 235 { 236 236 dmi_check_system(mmconf_dmi_table);
+3 -3
arch/x86/kernel/msr.c
··· 200 200 .compat_ioctl = msr_ioctl, 201 201 }; 202 202 203 - static int __cpuinit msr_device_create(int cpu) 203 + static int msr_device_create(int cpu) 204 204 { 205 205 struct device *dev; 206 206 ··· 214 214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 215 215 } 216 216 217 - static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, 218 - unsigned long action, void *hcpu) 217 + static int msr_class_cpu_callback(struct notifier_block *nfb, 218 + unsigned long action, void *hcpu) 219 219 { 220 220 unsigned int cpu = (unsigned long)hcpu; 221 221 int err = 0;
+1 -1
arch/x86/kernel/process.c
··· 398 398 default_idle(); 399 399 } 400 400 401 - void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 401 + void select_idle_routine(const struct cpuinfo_x86 *c) 402 402 { 403 403 #ifdef CONFIG_SMP 404 404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
+1 -1
arch/x86/kernel/setup.c
··· 170 170 171 171 #ifdef CONFIG_X86_32 172 172 /* cpu data as detected by the assembly code in head.S */ 173 - struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173 + struct cpuinfo_x86 new_cpu_data = { 174 174 .wp_works_ok = -1, 175 175 }; 176 176 /* common cpu data for all cpus */
+14 -14
arch/x86/kernel/smpboot.c
··· 130 130 * Report back to the Boot Processor during boot time or to the caller processor 131 131 * during CPU online. 132 132 */ 133 - static void __cpuinit smp_callin(void) 133 + static void smp_callin(void) 134 134 { 135 135 int cpuid, phys_id; 136 136 unsigned long timeout; ··· 237 237 /* 238 238 * Activate a secondary processor. 239 239 */ 240 - notrace static void __cpuinit start_secondary(void *unused) 240 + static void notrace start_secondary(void *unused) 241 241 { 242 242 /* 243 243 * Don't put *anything* before cpu_init(), SMP booting is too ··· 300 300 * The bootstrap kernel entry code has set these up. Save them for 301 301 * a given CPU 302 302 */ 303 - void __cpuinit smp_store_cpu_info(int id) 303 + void smp_store_cpu_info(int id) 304 304 { 305 305 struct cpuinfo_x86 *c = &cpu_data(id); 306 306 ··· 313 313 identify_secondary_cpu(c); 314 314 } 315 315 316 - static bool __cpuinit 316 + static bool 317 317 topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 318 318 { 319 319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; ··· 330 330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 331 331 } while (0) 332 332 333 - static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 333 + static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 334 334 { 335 335 if (cpu_has_topoext) { 336 336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; ··· 348 348 return false; 349 349 } 350 350 351 - static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 351 + static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 352 352 { 353 353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 354 354 ··· 359 359 return false; 360 360 } 361 361 362 - static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 362 + static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 363 363 { 364 364 if (c->phys_proc_id == o->phys_proc_id) { 365 365 if (cpu_has(c, X86_FEATURE_AMD_DCM)) ··· 370 370 return false; 371 371 } 372 372 373 - void __cpuinit set_cpu_sibling_map(int cpu) 373 + void set_cpu_sibling_map(int cpu) 374 374 { 375 375 bool has_smt = smp_num_siblings > 1; 376 376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; ··· 499 499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 500 500 * won't ... remember to clear down the APIC, etc later. 501 501 */ 502 - int __cpuinit 502 + int 503 503 wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 504 504 { 505 505 unsigned long send_status, accept_status = 0; ··· 533 533 return (send_status | accept_status); 534 534 } 535 535 536 - static int __cpuinit 536 + static int 537 537 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 538 538 { 539 539 unsigned long send_status, accept_status = 0; ··· 649 649 } 650 650 651 651 /* reduce the number of lines printed when booting a large cpu count system */ 652 - static void __cpuinit announce_cpu(int cpu, int apicid) 652 + static void announce_cpu(int cpu, int apicid) 653 653 { 654 654 static int current_node = -1; 655 655 int node = early_cpu_to_node(cpu); ··· 691 691 * We'll change this code in the future to wake up hard offlined CPU0 if 692 692 * real platform and request are available. 693 693 */ 694 - static int __cpuinit 694 + static int 695 695 wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 696 696 int *cpu0_nmi_registered) 697 697 { ··· 731 731 * Returns zero if CPU booted OK, else error code from 732 732 * ->wakeup_secondary_cpu. 733 733 */ 734 - static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 734 + static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 735 735 { 736 736 volatile u32 *trampoline_status = 737 737 (volatile u32 *) __va(real_mode_header->trampoline_status); ··· 872 872 return boot_error; 873 873 } 874 874 875 - int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) 875 + int native_cpu_up(unsigned int cpu, struct task_struct *tidle) 876 876 { 877 877 int apicid = apic->cpu_present_to_apicid(cpu); 878 878 unsigned long flags;
+3 -3
arch/x86/kernel/tboot.c
··· 320 320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); 321 321 } 322 322 323 - static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, 324 - unsigned long action, void *hcpu) 323 + static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action, 324 + void *hcpu) 325 325 { 326 326 switch (action) { 327 327 case CPU_DYING: ··· 334 334 return NOTIFY_OK; 335 335 } 336 336 337 - static struct notifier_block tboot_cpu_notifier __cpuinitdata = 337 + static struct notifier_block tboot_cpu_notifier = 338 338 { 339 339 .notifier_call = tboot_cpu_callback, 340 340 };
+2 -2
arch/x86/kernel/tsc.c
··· 824 824 * Make an educated guess if the TSC is trustworthy and synchronized 825 825 * over all CPUs. 826 826 */ 827 - __cpuinit int unsynchronized_tsc(void) 827 + int unsynchronized_tsc(void) 828 828 { 829 829 if (!cpu_has_tsc || tsc_unstable) 830 830 return 1; ··· 1020 1020 * been calibrated. This assumes that CONSTANT_TSC applies to all 1021 1021 * cpus in the socket - this should be a safe assumption. 1022 1022 */ 1023 - unsigned long __cpuinit calibrate_delay_is_known(void) 1023 + unsigned long calibrate_delay_is_known(void) 1024 1024 { 1025 1025 int i, cpu = smp_processor_id(); 1026 1026
+9 -9
arch/x86/kernel/tsc_sync.c
··· 25 25 * Entry/exit counters that make sure that both CPUs 26 26 * run the measurement code at once: 27 27 */ 28 - static __cpuinitdata atomic_t start_count; 29 - static __cpuinitdata atomic_t stop_count; 28 + static atomic_t start_count; 29 + static atomic_t stop_count; 30 30 31 31 /* 32 32 * We use a raw spinlock in this exceptional case, because 33 33 * we want to have the fastest, inlined, non-debug version 34 34 * of a critical section, to be able to prove TSC time-warps: 35 35 */ 36 - static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 36 + static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 37 37 38 - static __cpuinitdata cycles_t last_tsc; 39 - static __cpuinitdata cycles_t max_warp; 40 - static __cpuinitdata int nr_warps; 38 + static cycles_t last_tsc; 39 + static cycles_t max_warp; 40 + static int nr_warps; 41 41 42 42 /* 43 43 * TSC-warp measurement loop running on both CPUs: 44 44 */ 45 - static __cpuinit void check_tsc_warp(unsigned int timeout) 45 + static void check_tsc_warp(unsigned int timeout) 46 46 { 47 47 cycles_t start, now, prev, end; 48 48 int i; ··· 121 121 * Source CPU calls into this - it waits for the freshly booted 122 122 * target CPU to arrive and then starts the measurement: 123 123 */ 124 - void __cpuinit check_tsc_sync_source(int cpu) 124 + void check_tsc_sync_source(int cpu) 125 125 { 126 126 int cpus = 2; 127 127 ··· 187 187 /* 188 188 * Freshly booted CPUs call into this: 189 189 */ 190 - void __cpuinit check_tsc_sync_target(void) 190 + void check_tsc_sync_target(void) 191 191 { 192 192 int cpus = 2; 193 193
+3 -3
arch/x86/kernel/vsyscall_64.c
··· 331 331 * Assume __initcall executes before all user space. Hopefully kmod 332 332 * doesn't violate that. We'll find out if it does. 333 333 */ 334 - static void __cpuinit vsyscall_set_cpu(int cpu) 334 + static void vsyscall_set_cpu(int cpu) 335 335 { 336 336 unsigned long d; 337 337 unsigned long node = 0; ··· 353 353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 354 354 } 355 355 356 - static void __cpuinit cpu_vsyscall_init(void *arg) 356 + static void cpu_vsyscall_init(void *arg) 357 357 { 358 358 /* preemption should be already off */ 359 359 vsyscall_set_cpu(raw_smp_processor_id()); 360 360 } 361 361 362 - static int __cpuinit 362 + static int 363 363 cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 364 364 { 365 365 long cpu = (long)arg;
+2 -2
arch/x86/kernel/x86_init.c
··· 25 25 #include <asm/iommu.h> 26 26 #include <asm/mach_traps.h> 27 27 28 - void __cpuinit x86_init_noop(void) { } 28 + void x86_init_noop(void) { } 29 29 void __init x86_init_uint_noop(unsigned int unused) { } 30 30 int __init iommu_init_noop(void) { return 0; } 31 31 void iommu_shutdown_noop(void) { } ··· 85 85 }, 86 86 }; 87 87 88 - struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 88 + struct x86_cpuinit_ops x86_cpuinit = { 89 89 .early_percpu_clock_init = x86_init_noop, 90 90 .setup_percpu_clockev = setup_secondary_APIC_clock, 91 91 };
+2 -2
arch/x86/kernel/xsave.c
··· 573 573 * This is somewhat obfuscated due to the lack of powerful enough 574 574 * overrides for the section checks. 575 575 */ 576 - void __cpuinit xsave_init(void) 576 + void xsave_init(void) 577 577 { 578 578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; 579 579 void (*this_func)(void); ··· 594 594 setup_init_fpu_buf(); 595 595 } 596 596 597 - void __cpuinit eager_fpu_init(void) 597 + void eager_fpu_init(void) 598 598 { 599 599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp; 600 600
+1 -3
arch/x86/mm/mmio-mod.c
··· 410 410 pr_warning("multiple CPUs still online, may miss events.\n"); 411 411 } 412 412 413 - /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, 414 - but this whole function is ifdefed CONFIG_HOTPLUG_CPU */ 415 - static void __ref leave_uniprocessor(void) 413 + static void leave_uniprocessor(void) 416 414 { 417 415 int cpu; 418 416 int err;
+6 -6
arch/x86/mm/numa.c
··· 60 60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 61 61 }; 62 62 63 - int __cpuinit numa_cpu_node(int cpu) 63 + int numa_cpu_node(int cpu) 64 64 { 65 65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 66 66 ··· 691 691 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 692 692 693 693 # ifndef CONFIG_NUMA_EMU 694 - void __cpuinit numa_add_cpu(int cpu) 694 + void numa_add_cpu(int cpu) 695 695 { 696 696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 697 697 } 698 698 699 - void __cpuinit numa_remove_cpu(int cpu) 699 + void numa_remove_cpu(int cpu) 700 700 { 701 701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 702 702 } ··· 763 763 } 764 764 765 765 # ifndef CONFIG_NUMA_EMU 766 - static void __cpuinit numa_set_cpumask(int cpu, bool enable) 766 + static void numa_set_cpumask(int cpu, bool enable) 767 767 { 768 768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 769 769 } 770 770 771 - void __cpuinit numa_add_cpu(int cpu) 771 + void numa_add_cpu(int cpu) 772 772 { 773 773 numa_set_cpumask(cpu, true); 774 774 } 775 775 776 - void __cpuinit numa_remove_cpu(int cpu) 776 + void numa_remove_cpu(int cpu) 777 777 { 778 778 numa_set_cpumask(cpu, false); 779 779 }
+6 -6
arch/x86/mm/numa_emulation.c
··· 10 10 11 11 #include "numa_internal.h" 12 12 13 - static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; 13 + static int emu_nid_to_phys[MAX_NUMNODES]; 14 14 static char *emu_cmdline __initdata; 15 15 16 16 void __init numa_emu_cmdline(char *str) ··· 444 444 } 445 445 446 446 #ifndef CONFIG_DEBUG_PER_CPU_MAPS 447 - void __cpuinit numa_add_cpu(int cpu) 447 + void numa_add_cpu(int cpu) 448 448 { 449 449 int physnid, nid; 450 450 ··· 462 462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); 463 463 } 464 464 465 - void __cpuinit numa_remove_cpu(int cpu) 465 + void numa_remove_cpu(int cpu) 466 466 { 467 467 int i; 468 468 ··· 470 470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 471 471 } 472 472 #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 473 - static void __cpuinit numa_set_cpumask(int cpu, bool enable) 473 + static void numa_set_cpumask(int cpu, bool enable) 474 474 { 475 475 int nid, physnid; 476 476 ··· 490 490 } 491 491 } 492 492 493 - void __cpuinit numa_add_cpu(int cpu) 493 + void numa_add_cpu(int cpu) 494 494 { 495 495 numa_set_cpumask(cpu, true); 496 496 } 497 497 498 - void __cpuinit numa_remove_cpu(int cpu) 498 + void numa_remove_cpu(int cpu) 499 499 { 500 500 numa_set_cpumask(cpu, false); 501 501 }
+2 -2
arch/x86/mm/setup_nx.c
··· 5 5 #include <asm/pgtable.h> 6 6 #include <asm/proto.h> 7 7 8 - static int disable_nx __cpuinitdata; 8 + static int disable_nx; 9 9 10 10 /* 11 11 * noexec = on|off ··· 29 29 } 30 30 early_param("noexec", noexec_setup); 31 31 32 - void __cpuinit x86_configure_nx(void) 32 + void x86_configure_nx(void) 33 33 { 34 34 if (cpu_has_nx && !disable_nx) 35 35 __supported_pte_mask |= _PAGE_NX;
+4 -4
arch/x86/pci/amd_bus.c
··· 312 312 313 313 #define ENABLE_CF8_EXT_CFG (1ULL << 46) 314 314 315 - static void __cpuinit enable_pci_io_ecs(void *unused) 315 + static void enable_pci_io_ecs(void *unused) 316 316 { 317 317 u64 reg; 318 318 rdmsrl(MSR_AMD64_NB_CFG, reg); ··· 322 322 } 323 323 } 324 324 325 - static int __cpuinit amd_cpu_notify(struct notifier_block *self, 326 - unsigned long action, void *hcpu) 325 + static int amd_cpu_notify(struct notifier_block *self, unsigned long action, 326 + void *hcpu) 327 327 { 328 328 int cpu = (long)hcpu; 329 329 switch (action) { ··· 337 337 return NOTIFY_OK; 338 338 } 339 339 340 - static struct notifier_block __cpuinitdata amd_cpu_notifier = { 340 + static struct notifier_block amd_cpu_notifier = { 341 341 .notifier_call = amd_cpu_notify, 342 342 }; 343 343
+1 -1
arch/x86/platform/ce4100/ce4100.c
··· 134 134 } 135 135 136 136 #ifdef CONFIG_X86_IO_APIC 137 - static void __cpuinit sdv_pci_init(void) 137 + static void sdv_pci_init(void) 138 138 { 139 139 x86_of_pci_init(); 140 140 /* We can't set this earlier, because we need to calibrate the timer */
+2 -2
arch/x86/platform/mrst/mrst.c
··· 65 65 * lapic (always-on,ARAT) ------ 150 66 66 */ 67 67 68 - __cpuinitdata enum mrst_timer_options mrst_timer_options; 68 + enum mrst_timer_options mrst_timer_options; 69 69 70 70 static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; 71 71 static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; ··· 248 248 apbt_time_init(); 249 249 } 250 250 251 - static void __cpuinit mrst_arch_setup(void) 251 + static void mrst_arch_setup(void) 252 252 { 253 253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) 254 254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
+3 -3
arch/x86/xen/enlighten.c
··· 1681 1681 xen_domain_type = XEN_HVM_DOMAIN; 1682 1682 } 1683 1683 1684 - static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, 1685 - unsigned long action, void *hcpu) 1684 + static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, 1685 + void *hcpu) 1686 1686 { 1687 1687 int cpu = (long)hcpu; 1688 1688 switch (action) { ··· 1700 1700 return NOTIFY_OK; 1701 1701 } 1702 1702 1703 - static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { 1703 + static struct notifier_block xen_hvm_cpu_notifier = { 1704 1704 .notifier_call = xen_hvm_cpu_notify, 1705 1705 }; 1706 1706
+3 -3
arch/x86/xen/setup.c
··· 475 475 #endif 476 476 } 477 477 478 - static int __cpuinit register_callback(unsigned type, const void *func) 478 + static int register_callback(unsigned type, const void *func) 479 479 { 480 480 struct callback_register callback = { 481 481 .type = type, ··· 486 486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); 487 487 } 488 488 489 - void __cpuinit xen_enable_sysenter(void) 489 + void xen_enable_sysenter(void) 490 490 { 491 491 int ret; 492 492 unsigned sysenter_feature; ··· 505 505 setup_clear_cpu_cap(sysenter_feature); 506 506 } 507 507 508 - void __cpuinit xen_enable_syscall(void) 508 + void xen_enable_syscall(void) 509 509 { 510 510 #ifdef CONFIG_X86_64 511 511 int ret;
+6 -6
arch/x86/xen/smp.c
··· 65 65 return IRQ_HANDLED; 66 66 } 67 67 68 - static void __cpuinit cpu_bringup(void) 68 + static void cpu_bringup(void) 69 69 { 70 70 int cpu; 71 71 ··· 97 97 wmb(); /* make sure everything is out */ 98 98 } 99 99 100 - static void __cpuinit cpu_bringup_and_idle(void) 100 + static void cpu_bringup_and_idle(void) 101 101 { 102 102 cpu_bringup(); 103 103 cpu_startup_entry(CPUHP_ONLINE); ··· 326 326 set_cpu_present(cpu, true); 327 327 } 328 328 329 - static int __cpuinit 329 + static int 330 330 cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 331 331 { 332 332 struct vcpu_guest_context *ctxt; ··· 397 397 return 0; 398 398 } 399 399 400 - static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 400 + static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) 401 401 { 402 402 int rc; 403 403 ··· 470 470 xen_teardown_timer(cpu); 471 471 } 472 472 473 - static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 473 + static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 474 474 { 475 475 play_dead_common(); 476 476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); ··· 691 691 xen_init_lock_cpu(0); 692 692 } 693 693 694 - static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694 + static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 695 695 { 696 696 int rc; 697 697 rc = native_cpu_up(cpu, tidle);
+1 -1
arch/x86/xen/spinlock.c
··· 361 361 return IRQ_HANDLED; 362 362 } 363 363 364 - void __cpuinit xen_init_lock_cpu(int cpu) 364 + void xen_init_lock_cpu(int cpu) 365 365 { 366 366 int irq; 367 367 char *name;
+1 -1
arch/x86/xen/xen-ops.h
··· 73 73 74 74 #ifdef CONFIG_PARAVIRT_SPINLOCKS 75 75 void __init xen_init_spinlocks(void); 76 - void __cpuinit xen_init_lock_cpu(int cpu); 76 + void xen_init_lock_cpu(int cpu); 77 77 void xen_uninit_lock_cpu(int cpu); 78 78 #else 79 79 static inline void xen_init_spinlocks(void)