Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: delete __cpuinit usage from all sh files

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.

This removes all the arch/sh uses of the __cpuinit macros from
all C files. Currently sh does not have any __CPUINIT used in
assembly files.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: Paul Mundt <lethal@linux-sh.org>
Cc: linux-sh@vger.kernel.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>

+28 -28
+9 -9
arch/sh/kernel/cpu/init.c
··· 43 43 * peripherals (nofpu, nodsp, and so forth). 44 44 */ 45 45 #define onchip_setup(x) \ 46 - static int x##_disabled __cpuinitdata = !cpu_has_##x; \ 46 + static int x##_disabled = !cpu_has_##x; \ 47 47 \ 48 - static int __cpuinit x##_setup(char *opts) \ 48 + static int x##_setup(char *opts) \ 49 49 { \ 50 50 x##_disabled = 1; \ 51 51 return 1; \ ··· 59 59 #define CPUOPM 0xff2f0000 60 60 #define CPUOPM_RABD (1 << 5) 61 61 62 - static void __cpuinit speculative_execution_init(void) 62 + static void speculative_execution_init(void) 63 63 { 64 64 /* Clear RABD */ 65 65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); ··· 78 78 #define EXPMASK_BRDSSLP (1 << 1) 79 79 #define EXPMASK_MMCAW (1 << 4) 80 80 81 - static void __cpuinit expmask_init(void) 81 + static void expmask_init(void) 82 82 { 83 83 unsigned long expmask = __raw_readl(EXPMASK); 84 84 ··· 217 217 l2_cache_shape = -1; /* No S-cache */ 218 218 } 219 219 220 - static void __cpuinit fpu_init(void) 220 + static void fpu_init(void) 221 221 { 222 222 /* Disable the FPU */ 223 223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { ··· 230 230 } 231 231 232 232 #ifdef CONFIG_SH_DSP 233 - static void __cpuinit release_dsp(void) 233 + static void release_dsp(void) 234 234 { 235 235 unsigned long sr; 236 236 ··· 244 244 ); 245 245 } 246 246 247 - static void __cpuinit dsp_init(void) 247 + static void dsp_init(void) 248 248 { 249 249 unsigned long sr; 250 250 ··· 276 276 release_dsp(); 277 277 } 278 278 #else 279 - static inline void __cpuinit dsp_init(void) { } 279 + static inline void dsp_init(void) { } 280 280 #endif /* CONFIG_SH_DSP */ 281 281 282 282 /** ··· 295 295 * Each processor family is still responsible for doing its own probing 296 296 * and cache configuration in cpu_probe(). 297 297 */ 298 - asmlinkage void __cpuinit cpu_init(void) 298 + asmlinkage void cpu_init(void) 299 299 { 300 300 current_thread_info()->cpu = hard_smp_processor_id(); 301 301
+1 -1
arch/sh/kernel/cpu/sh2/probe.c
··· 13 13 #include <asm/processor.h> 14 14 #include <asm/cache.h> 15 15 16 - void __cpuinit cpu_probe(void) 16 + void cpu_probe(void) 17 17 { 18 18 #if defined(CONFIG_CPU_SUBTYPE_SH7619) 19 19 boot_cpu_data.type = CPU_SH7619;
+1 -1
arch/sh/kernel/cpu/sh2a/probe.c
··· 13 13 #include <asm/processor.h> 14 14 #include <asm/cache.h> 15 15 16 - void __cpuinit cpu_probe(void) 16 + void cpu_probe(void) 17 17 { 18 18 boot_cpu_data.family = CPU_FAMILY_SH2A; 19 19
+1 -1
arch/sh/kernel/cpu/sh3/probe.c
··· 16 16 #include <asm/cache.h> 17 17 #include <asm/io.h> 18 18 19 - void __cpuinit cpu_probe(void) 19 + void cpu_probe(void) 20 20 { 21 21 unsigned long addr0, addr1, data0, data1, data2, data3; 22 22
+1 -1
arch/sh/kernel/cpu/sh4/probe.c
··· 15 15 #include <asm/processor.h> 16 16 #include <asm/cache.h> 17 17 18 - void __cpuinit cpu_probe(void) 18 + void cpu_probe(void) 19 19 { 20 20 unsigned long pvr, prr, cvr; 21 21 unsigned long size;
+3 -3
arch/sh/kernel/cpu/sh4a/smp-shx3.c
··· 124 124 __raw_writel(STBCR_RESET, STBCR_REG(cpu)); 125 125 } 126 126 127 - static int __cpuinit 127 + static int 128 128 shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 129 129 { 130 130 unsigned int cpu = (unsigned int)hcpu; ··· 143 143 return NOTIFY_OK; 144 144 } 145 145 146 - static struct notifier_block __cpuinitdata shx3_cpu_notifier = { 146 + static struct notifier_block shx3_cpu_notifier = { 147 147 .notifier_call = shx3_cpu_callback, 148 148 }; 149 149 150 - static int __cpuinit register_shx3_cpu_notifier(void) 150 + static int register_shx3_cpu_notifier(void) 151 151 { 152 152 register_hotcpu_notifier(&shx3_cpu_notifier); 153 153 return 0;
+1 -1
arch/sh/kernel/cpu/sh5/probe.c
··· 17 17 #include <asm/cache.h> 18 18 #include <asm/tlb.h> 19 19 20 - void __cpuinit cpu_probe(void) 20 + void cpu_probe(void) 21 21 { 22 22 unsigned long long cir; 23 23
+2 -2
arch/sh/kernel/perf_event.c
··· 367 367 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 368 368 } 369 369 370 - static int __cpuinit 370 + static int 371 371 sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 372 372 { 373 373 unsigned int cpu = (long)hcpu; ··· 384 384 return NOTIFY_OK; 385 385 } 386 386 387 - int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) 387 + int register_sh_pmu(struct sh_pmu *_pmu) 388 388 { 389 389 if (sh_pmu) 390 390 return -EBUSY;
+1 -1
arch/sh/kernel/process.c
··· 65 65 # define HAVE_SOFTFP 0 66 66 #endif 67 67 68 - void __cpuinit init_thread_xstate(void) 68 + void init_thread_xstate(void) 69 69 { 70 70 if (boot_cpu_data.flags & CPU_HAS_FPU) 71 71 xstate_size = sizeof(struct sh_fpu_hard_struct);
+1 -1
arch/sh/kernel/setup.c
··· 172 172 #endif 173 173 } 174 174 175 - void __cpuinit calibrate_delay(void) 175 + void calibrate_delay(void) 176 176 { 177 177 struct clk *clk = clk_get(NULL, "cpu_clk"); 178 178
+4 -4
arch/sh/kernel/smp.c
··· 37 37 /* State of each CPU */ 38 38 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 39 39 40 - void __cpuinit register_smp_ops(struct plat_smp_ops *ops) 40 + void register_smp_ops(struct plat_smp_ops *ops) 41 41 { 42 42 if (mp_ops) 43 43 printk(KERN_WARNING "Overriding previously set SMP ops\n"); ··· 45 45 mp_ops = ops; 46 46 } 47 47 48 - static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) 48 + static inline void smp_store_cpu_info(unsigned int cpu) 49 49 { 50 50 struct sh_cpuinfo *c = cpu_data + cpu; 51 51 ··· 174 174 } 175 175 #endif 176 176 177 - asmlinkage void __cpuinit start_secondary(void) 177 + asmlinkage void start_secondary(void) 178 178 { 179 179 unsigned int cpu = smp_processor_id(); 180 180 struct mm_struct *mm = &init_mm; ··· 215 215 void *thread_info; 216 216 } stack_start; 217 217 218 - int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk) 218 + int __cpu_up(unsigned int cpu, struct task_struct *tsk) 219 219 { 220 220 unsigned long timeout; 221 221
+1 -1
arch/sh/kernel/traps_32.c
··· 741 741 die_if_kernel("exception", regs, ex); 742 742 } 743 743 744 - void __cpuinit per_cpu_trap_init(void) 744 + void per_cpu_trap_init(void) 745 745 { 746 746 extern void *vbr_base; 747 747
+1 -1
arch/sh/kernel/traps_64.c
··· 810 810 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); 811 811 } 812 812 813 - void __cpuinit per_cpu_trap_init(void) 813 + void per_cpu_trap_init(void) 814 814 { 815 815 /* Nothing to do for now, VBR initialization later. */ 816 816 }
+1 -1
arch/sh/mm/tlb-sh5.c
··· 17 17 /** 18 18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. 19 19 */ 20 - int __cpuinit sh64_tlb_init(void) 20 + int sh64_tlb_init(void) 21 21 { 22 22 /* Assign some sane DTLB defaults */ 23 23 cpu_data->dtlb.entries = 64;