Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Delete __cpuinit usage from all users

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

This removes all the powerpc uses of the __cpuinit macros. There
are no __CPUINIT users in assembly files in powerpc.

[1] https://lkml.org/lkml/2013/5/20/589

Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Josh Boyer <jwboyer@gmail.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Kumar Gala <galak@kernel.crashing.org>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Paul Gortmaker and committed by
Benjamin Herrenschmidt
061d19f2 5eb969d0

+54 -50
+2 -2
arch/powerpc/include/asm/rtas.h
··· 350 350 (devfn << 8) | (reg & 0xff); 351 351 } 352 352 353 - extern void __cpuinit rtas_give_timebase(void); 354 - extern void __cpuinit rtas_take_timebase(void); 353 + extern void rtas_give_timebase(void); 354 + extern void rtas_take_timebase(void); 355 355 356 356 #ifdef CONFIG_PPC_RTAS 357 357 static inline int page_is_rtas_user_buf(unsigned long pfn)
+1 -1
arch/powerpc/include/asm/vdso.h
··· 22 22 extern unsigned long vdso32_sigtramp; 23 23 extern unsigned long vdso32_rt_sigtramp; 24 24 25 - int __cpuinit vdso_getcpu_init(void); 25 + int vdso_getcpu_init(void); 26 26 27 27 #else /* __ASSEMBLY__ */ 28 28
+21 -15
arch/powerpc/kernel/cacheinfo.c
··· 131 131 return cache_type_info[cache->type].name; 132 132 } 133 133 134 - static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode) 134 + static void cache_init(struct cache *cache, int type, int level, 135 + struct device_node *ofnode) 135 136 { 136 137 cache->type = type; 137 138 cache->level = level; ··· 141 140 list_add(&cache->list, &cache_list); 142 141 } 143 142 144 - static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode) 143 + static struct cache *new_cache(int type, int level, struct device_node *ofnode) 145 144 { 146 145 struct cache *cache; 147 146 ··· 325 324 return of_get_property(np, "cache-unified", NULL); 326 325 } 327 326 328 - static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level) 327 + static struct cache *cache_do_one_devnode_unified(struct device_node *node, 328 + int level) 329 329 { 330 330 struct cache *cache; 331 331 ··· 337 335 return cache; 338 336 } 339 337 340 - static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level) 338 + static struct cache *cache_do_one_devnode_split(struct device_node *node, 339 + int level) 341 340 { 342 341 struct cache *dcache, *icache; 343 342 ··· 360 357 return NULL; 361 358 } 362 359 363 - static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level) 360 + static struct cache *cache_do_one_devnode(struct device_node *node, int level) 364 361 { 365 362 struct cache *cache; 366 363 ··· 372 369 return cache; 373 370 } 374 371 375 - static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level) 372 + static struct cache *cache_lookup_or_instantiate(struct device_node *node, 373 + int level) 376 374 { 377 375 struct cache *cache; 378 376 ··· 389 385 return cache; 390 386 } 391 387 392 - static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger) 388 + static void link_cache_lists(struct cache *smaller, struct cache *bigger) 393 389 { 394 390 while (smaller->next_local) { 395 391 if (smaller->next_local == bigger) ··· 400 396 smaller->next_local = bigger; 401 397 } 402 398 403 - static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache) 399 + static void do_subsidiary_caches_debugcheck(struct cache *cache) 404 400 { 405 401 WARN_ON_ONCE(cache->level != 1); 406 402 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); 407 403 } 408 404 409 - static void __cpuinit do_subsidiary_caches(struct cache *cache) 405 + static void do_subsidiary_caches(struct cache *cache) 410 406 { 411 407 struct device_node *subcache_node; 412 408 int level = cache->level; ··· 427 423 } 428 424 } 429 425 430 - static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id) 426 + static struct cache *cache_chain_instantiate(unsigned int cpu_id) 431 427 { 432 428 struct device_node *cpu_node; 433 429 struct cache *cpu_cache = NULL; ··· 452 448 return cpu_cache; 453 449 } 454 450 455 - static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id) 451 + static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id) 456 452 { 457 453 struct cache_dir *cache_dir; 458 454 struct device *dev; ··· 657 653 .default_attrs = cache_index_default_attrs, 658 654 }; 659 655 660 - static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 656 + static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir) 661 657 { 662 658 const char *cache_name; 663 659 const char *cache_type; ··· 700 696 kfree(buf); 701 697 } 702 698 703 - static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir) 699 + static void cacheinfo_create_index_dir(struct cache *cache, int index, 700 + struct cache_dir *cache_dir) 704 701 { 705 702 struct cache_index_dir *index_dir; 706 703 int rc; ··· 727 722 kfree(index_dir); 728 723 } 729 724 730 - static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list) 725 + static void cacheinfo_sysfs_populate(unsigned int cpu_id, 726 + struct cache *cache_list) 731 727 { 732 728 struct cache_dir *cache_dir; 733 729 struct cache *cache; ··· 746 740 } 747 741 } 748 742 749 - void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id) 743 + void cacheinfo_cpu_online(unsigned int cpu_id) 750 744 { 751 745 struct cache *cache; 752 746
+2 -2
arch/powerpc/kernel/rtas.c
··· 1172 1172 static arch_spinlock_t timebase_lock; 1173 1173 static u64 timebase = 0; 1174 1174 1175 - void __cpuinit rtas_give_timebase(void) 1175 + void rtas_give_timebase(void) 1176 1176 { 1177 1177 unsigned long flags; 1178 1178 ··· 1189 1189 local_irq_restore(flags); 1190 1190 } 1191 1191 1192 - void __cpuinit rtas_take_timebase(void) 1192 + void rtas_take_timebase(void) 1193 1193 { 1194 1194 while (!timebase) 1195 1195 barrier();
+2 -2
arch/powerpc/kernel/smp.c
··· 480 480 secondary_ti = current_set[cpu] = ti; 481 481 } 482 482 483 - int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 483 + int __cpu_up(unsigned int cpu, struct task_struct *tidle) 484 484 { 485 485 int rc, c; 486 486 ··· 610 610 } 611 611 612 612 /* Activate a secondary processor. */ 613 - __cpuinit void start_secondary(void *unused) 613 + void start_secondary(void *unused) 614 614 { 615 615 unsigned int cpu = smp_processor_id(); 616 616 struct device_node *l2_cache;
+3 -3
arch/powerpc/kernel/sysfs.c
··· 341 341 #endif /* HAS_PPC_PMC_PA6T */ 342 342 #endif /* HAS_PPC_PMC_CLASSIC */ 343 343 344 - static void __cpuinit register_cpu_online(unsigned int cpu) 344 + static void register_cpu_online(unsigned int cpu) 345 345 { 346 346 struct cpu *c = &per_cpu(cpu_devices, cpu); 347 347 struct device *s = &c->dev; ··· 502 502 503 503 #endif /* CONFIG_HOTPLUG_CPU */ 504 504 505 - static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, 505 + static int sysfs_cpu_notify(struct notifier_block *self, 506 506 unsigned long action, void *hcpu) 507 507 { 508 508 unsigned int cpu = (unsigned int)(long)hcpu; ··· 522 522 return NOTIFY_OK; 523 523 } 524 524 525 - static struct notifier_block __cpuinitdata sysfs_cpu_nb = { 525 + static struct notifier_block sysfs_cpu_nb = { 526 526 .notifier_call = sysfs_cpu_notify, 527 527 }; 528 528
-1
arch/powerpc/kernel/time.c
··· 631 631 return found; 632 632 } 633 633 634 - /* should become __cpuinit when secondary_cpu_time_init also is */ 635 634 void start_cpu_decrementer(void) 636 635 { 637 636 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+1 -1
arch/powerpc/kernel/vdso.c
··· 711 711 } 712 712 713 713 #ifdef CONFIG_PPC64 714 - int __cpuinit vdso_getcpu_init(void) 714 + int vdso_getcpu_init(void) 715 715 { 716 716 unsigned long cpu, node, val; 717 717
+3 -3
arch/powerpc/mm/44x_mmu.c
··· 41 41 42 42 unsigned long tlb_47x_boltmap[1024/8]; 43 43 44 - static void __cpuinit ppc44x_update_tlb_hwater(void) 44 + static void ppc44x_update_tlb_hwater(void) 45 45 { 46 46 extern unsigned int tlb_44x_patch_hwater_D[]; 47 47 extern unsigned int tlb_44x_patch_hwater_I[]; ··· 134 134 /* 135 135 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU 136 136 */ 137 - static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys) 137 + static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys) 138 138 { 139 139 unsigned int rA; 140 140 int bolted; ··· 229 229 } 230 230 231 231 #ifdef CONFIG_SMP 232 - void __cpuinit mmu_init_secondary(int cpu) 232 + void mmu_init_secondary(int cpu) 233 233 { 234 234 unsigned long addr; 235 235 unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
+1 -1
arch/powerpc/mm/hash_utils_64.c
··· 807 807 } 808 808 809 809 #ifdef CONFIG_SMP 810 - void __cpuinit early_init_mmu_secondary(void) 810 + void early_init_mmu_secondary(void) 811 811 { 812 812 /* Initialize hash table for that CPU */ 813 813 if (!firmware_has_feature(FW_FEATURE_LPAR))
+3 -3
arch/powerpc/mm/mmu_context_nohash.c
··· 332 332 333 333 #ifdef CONFIG_SMP 334 334 335 - static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, 336 - unsigned long action, void *hcpu) 335 + static int mmu_context_cpu_notify(struct notifier_block *self, 336 + unsigned long action, void *hcpu) 337 337 { 338 338 unsigned int cpu = (unsigned int)(long)hcpu; 339 339 ··· 366 366 return NOTIFY_OK; 367 367 } 368 368 369 - static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { 369 + static struct notifier_block mmu_context_cpu_nb = { 370 370 .notifier_call = mmu_context_cpu_notify, 371 371 }; 372 372
+3 -4
arch/powerpc/mm/numa.c
··· 516 516 * Figure out to which domain a cpu belongs and stick it there. 517 517 * Return the id of the domain used. 518 518 */ 519 - static int __cpuinit numa_setup_cpu(unsigned long lcpu) 519 + static int numa_setup_cpu(unsigned long lcpu) 520 520 { 521 521 int nid = 0; 522 522 struct device_node *cpu = of_get_cpu_node(lcpu, NULL); ··· 538 538 return nid; 539 539 } 540 540 541 - static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, 542 - unsigned long action, 541 + static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, 543 542 void *hcpu) 544 543 { 545 544 unsigned long lcpu = (unsigned long)hcpu; ··· 918 919 return ret; 919 920 } 920 921 921 - static struct notifier_block __cpuinitdata ppc64_numa_nb = { 922 + static struct notifier_block ppc64_numa_nb = { 922 923 .notifier_call = cpu_numa_callback, 923 924 .priority = 1 /* Must run before sched domains notifier. */ 924 925 };
+1 -1
arch/powerpc/mm/tlb_nohash.c
··· 648 648 __early_init_mmu(1); 649 649 } 650 650 651 - void __cpuinit early_init_mmu_secondary(void) 651 + void early_init_mmu_secondary(void) 652 652 { 653 653 __early_init_mmu(0); 654 654 }
+2 -2
arch/powerpc/perf/core-book3s.c
··· 1786 1786 cpuhw->mmcr[0] = MMCR0_FC; 1787 1787 } 1788 1788 1789 - static int __cpuinit 1789 + static int 1790 1790 power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1791 1791 { 1792 1792 unsigned int cpu = (long)hcpu; ··· 1803 1803 return NOTIFY_OK; 1804 1804 } 1805 1805 1806 - int __cpuinit register_power_pmu(struct power_pmu *pmu) 1806 + int register_power_pmu(struct power_pmu *pmu) 1807 1807 { 1808 1808 if (ppmu) 1809 1809 return -EBUSY; /* something's already registered */
+2 -2
arch/powerpc/platforms/44x/currituck.c
··· 91 91 } 92 92 93 93 #ifdef CONFIG_SMP 94 - static void __cpuinit smp_ppc47x_setup_cpu(int cpu) 94 + static void smp_ppc47x_setup_cpu(int cpu) 95 95 { 96 96 mpic_setup_this_cpu(); 97 97 } 98 98 99 - static int __cpuinit smp_ppc47x_kick_cpu(int cpu) 99 + static int smp_ppc47x_kick_cpu(int cpu) 100 100 { 101 101 struct device_node *cpunode = of_get_cpu_node(cpu, NULL); 102 102 const u64 *spin_table_addr_prop;
+2 -2
arch/powerpc/platforms/44x/iss4xx.c
··· 81 81 } 82 82 83 83 #ifdef CONFIG_SMP 84 - static void __cpuinit smp_iss4xx_setup_cpu(int cpu) 84 + static void smp_iss4xx_setup_cpu(int cpu) 85 85 { 86 86 mpic_setup_this_cpu(); 87 87 } 88 88 89 - static int __cpuinit smp_iss4xx_kick_cpu(int cpu) 89 + static int smp_iss4xx_kick_cpu(int cpu) 90 90 { 91 91 struct device_node *cpunode = of_get_cpu_node(cpu, NULL); 92 92 const u64 *spin_table_addr_prop;
+3 -3
arch/powerpc/platforms/85xx/smp.c
··· 99 99 } 100 100 101 101 #ifdef CONFIG_HOTPLUG_CPU 102 - static void __cpuinit smp_85xx_mach_cpu_die(void) 102 + static void smp_85xx_mach_cpu_die(void) 103 103 { 104 104 unsigned int cpu = smp_processor_id(); 105 105 u32 tmp; ··· 141 141 return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); 142 142 } 143 143 144 - static int __cpuinit smp_85xx_kick_cpu(int nr) 144 + static int smp_85xx_kick_cpu(int nr) 145 145 { 146 146 unsigned long flags; 147 147 const u64 *cpu_rel_addr; ··· 362 362 } 363 363 #endif /* CONFIG_KEXEC */ 364 364 365 - static void __cpuinit smp_85xx_setup_cpu(int cpu_nr) 365 + static void smp_85xx_setup_cpu(int cpu_nr) 366 366 { 367 367 if (smp_85xx_ops.probe == smp_mpic_probe) 368 368 mpic_setup_this_cpu();
+1 -1
arch/powerpc/platforms/powermac/smp.c
··· 885 885 return NOTIFY_OK; 886 886 } 887 887 888 - static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { 888 + static struct notifier_block smp_core99_cpu_nb = { 889 889 .notifier_call = smp_core99_cpu_notify, 890 890 }; 891 891 #endif /* CONFIG_HOTPLUG_CPU */
+1 -1
arch/powerpc/platforms/powernv/smp.c
··· 40 40 #define DBG(fmt...) 41 41 #endif 42 42 43 - static void __cpuinit pnv_smp_setup_cpu(int cpu) 43 + static void pnv_smp_setup_cpu(int cpu) 44 44 { 45 45 if (cpu != boot_cpuid) 46 46 xics_setup_cpu();