Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kernel: delete __cpuinit usage from all core kernel files

The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.

After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.

This removes all the uses of the __cpuinit macros from C files in
the core kernel directories (kernel, init, lib, mm, and include)
that don't really have a specific maintainer.

[1] https://lkml.org/lkml/2013/5/20/589

Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>

+62 -59
+3 -3
Documentation/cpu-hotplug.txt
··· 267 267 A: This is what you would need in your kernel code to receive notifications. 268 268 269 269 #include <linux/cpu.h> 270 - static int __cpuinit foobar_cpu_callback(struct notifier_block *nfb, 271 - unsigned long action, void *hcpu) 270 + static int foobar_cpu_callback(struct notifier_block *nfb, 271 + unsigned long action, void *hcpu) 272 272 { 273 273 unsigned int cpu = (unsigned long)hcpu; 274 274 ··· 285 285 return NOTIFY_OK; 286 286 } 287 287 288 - static struct notifier_block __cpuinitdata foobar_cpu_notifer = 288 + static struct notifier_block foobar_cpu_notifer = 289 289 { 290 290 .notifier_call = foobar_cpu_callback, 291 291 };
+1 -1
include/linux/cpu.h
··· 114 114 /* Need to know about CPUs going up/down? */ 115 115 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) 116 116 #define cpu_notifier(fn, pri) { \ 117 - static struct notifier_block fn##_nb __cpuinitdata = \ 117 + static struct notifier_block fn##_nb = \ 118 118 { .notifier_call = fn, .priority = pri }; \ 119 119 register_cpu_notifier(&fn##_nb); \ 120 120 }
+1 -1
include/linux/perf_event.h
··· 826 826 */ 827 827 #define perf_cpu_notifier(fn) \ 828 828 do { \ 829 - static struct notifier_block fn##_nb __cpuinitdata = \ 829 + static struct notifier_block fn##_nb = \ 830 830 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ 831 831 unsigned long cpu = smp_processor_id(); \ 832 832 unsigned long flags; \
+8 -5
init/calibrate.c
··· 31 31 #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) 32 32 #define MAX_DIRECT_CALIBRATION_RETRIES 5 33 33 34 - static unsigned long __cpuinit calibrate_delay_direct(void) 34 + static unsigned long calibrate_delay_direct(void) 35 35 { 36 36 unsigned long pre_start, start, post_start; 37 37 unsigned long pre_end, end, post_end; ··· 166 166 return 0; 167 167 } 168 168 #else 169 - static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} 169 + static unsigned long calibrate_delay_direct(void) 170 + { 171 + return 0; 172 + } 170 173 #endif 171 174 172 175 /* ··· 183 180 */ 184 181 #define LPS_PREC 8 185 182 186 - static unsigned long __cpuinit calibrate_delay_converge(void) 183 + static unsigned long calibrate_delay_converge(void) 187 184 { 188 185 /* First stage - slowly accelerate to find initial bounds */ 189 186 unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit; ··· 257 254 * Architectures should override this function if a faster calibration 258 255 * method is available. 259 256 */ 260 - unsigned long __attribute__((weak)) __cpuinit calibrate_delay_is_known(void) 257 + unsigned long __attribute__((weak)) calibrate_delay_is_known(void) 261 258 { 262 259 return 0; 263 260 } 264 261 265 - void __cpuinit calibrate_delay(void) 262 + void calibrate_delay(void) 266 263 { 267 264 unsigned long lpj; 268 265 static bool printed;
+3 -3
kernel/cpu.c
··· 366 366 #endif /*CONFIG_HOTPLUG_CPU*/ 367 367 368 368 /* Requires cpu_add_remove_lock to be held */ 369 - static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 369 + static int _cpu_up(unsigned int cpu, int tasks_frozen) 370 370 { 371 371 int ret, nr_calls = 0; 372 372 void *hcpu = (void *)(long)cpu; ··· 419 419 return ret; 420 420 } 421 421 422 - int __cpuinit cpu_up(unsigned int cpu) 422 + int cpu_up(unsigned int cpu) 423 423 { 424 424 int err = 0; 425 425 ··· 618 618 * It must be called by the arch code on the new cpu, before the new cpu 619 619 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 620 620 */ 621 - void __cpuinit notify_cpu_starting(unsigned int cpu) 621 + void notify_cpu_starting(unsigned int cpu) 622 622 { 623 623 unsigned long val = CPU_STARTING; 624 624
+2 -2
kernel/events/core.c
··· 7630 7630 } 7631 7631 } 7632 7632 7633 - static void __cpuinit perf_event_init_cpu(int cpu) 7633 + static void perf_event_init_cpu(int cpu) 7634 7634 { 7635 7635 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7636 7636 ··· 7719 7719 .priority = INT_MIN, 7720 7720 }; 7721 7721 7722 - static int __cpuinit 7722 + static int 7723 7723 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 7724 7724 { 7725 7725 unsigned int cpu = (long)hcpu;
+1 -1
kernel/fork.c
··· 1546 1546 } 1547 1547 } 1548 1548 1549 - struct task_struct * __cpuinit fork_idle(int cpu) 1549 + struct task_struct *fork_idle(int cpu) 1550 1550 { 1551 1551 struct task_struct *task; 1552 1552 task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
+3 -3
kernel/hrtimer.c
··· 1659 1659 /* 1660 1660 * Functions related to boot-time initialization: 1661 1661 */ 1662 - static void __cpuinit init_hrtimers_cpu(int cpu) 1662 + static void init_hrtimers_cpu(int cpu) 1663 1663 { 1664 1664 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1665 1665 int i; ··· 1740 1740 1741 1741 #endif /* CONFIG_HOTPLUG_CPU */ 1742 1742 1743 - static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1743 + static int hrtimer_cpu_notify(struct notifier_block *self, 1744 1744 unsigned long action, void *hcpu) 1745 1745 { 1746 1746 int scpu = (long)hcpu; ··· 1773 1773 return NOTIFY_OK; 1774 1774 } 1775 1775 1776 - static struct notifier_block __cpuinitdata hrtimers_nb = { 1776 + static struct notifier_block hrtimers_nb = { 1777 1777 .notifier_call = hrtimer_cpu_notify, 1778 1778 }; 1779 1779
+1 -1
kernel/printk.c
··· 1921 1921 * called when a new CPU comes online (or fails to come up), and ensures 1922 1922 * that any such output gets printed. 1923 1923 */ 1924 - static int __cpuinit console_cpu_notify(struct notifier_block *self, 1924 + static int console_cpu_notify(struct notifier_block *self, 1925 1925 unsigned long action, void *hcpu) 1926 1926 { 1927 1927 switch (action) {
+1 -1
kernel/profile.c
··· 331 331 put_cpu(); 332 332 } 333 333 334 - static int __cpuinit profile_cpu_callback(struct notifier_block *info, 334 + static int profile_cpu_callback(struct notifier_block *info, 335 335 unsigned long action, void *__cpu) 336 336 { 337 337 int node, cpu = (unsigned long)__cpu;
+1 -1
kernel/relay.c
··· 516 516 * 517 517 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) 518 518 */ 519 - static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, 519 + static int relay_hotcpu_callback(struct notifier_block *nb, 520 520 unsigned long action, 521 521 void *hcpu) 522 522 {
+6 -6
kernel/sched/core.c
··· 4133 4133 debug_show_all_locks(); 4134 4134 } 4135 4135 4136 - void __cpuinit init_idle_bootup_task(struct task_struct *idle) 4136 + void init_idle_bootup_task(struct task_struct *idle) 4137 4137 { 4138 4138 idle->sched_class = &idle_sched_class; 4139 4139 } ··· 4146 4146 * NOTE: this function does not set the idle thread's NEED_RESCHED 4147 4147 * flag, to make booting more robust. 4148 4148 */ 4149 - void __cpuinit init_idle(struct task_struct *idle, int cpu) 4149 + void init_idle(struct task_struct *idle, int cpu) 4150 4150 { 4151 4151 struct rq *rq = cpu_rq(cpu); 4152 4152 unsigned long flags; ··· 4630 4630 * migration_call - callback that gets triggered when a CPU is added. 4631 4631 * Here we can start up the necessary migration thread for the new CPU. 4632 4632 */ 4633 - static int __cpuinit 4633 + static int 4634 4634 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 4635 4635 { 4636 4636 int cpu = (long)hcpu; ··· 4684 4684 * happens before everything else. This has to be lower priority than 4685 4685 * the notifier in the perf_event subsystem, though. 4686 4686 */ 4687 - static struct notifier_block __cpuinitdata migration_notifier = { 4687 + static struct notifier_block migration_notifier = { 4688 4688 .notifier_call = migration_call, 4689 4689 .priority = CPU_PRI_MIGRATION, 4690 4690 }; 4691 4691 4692 - static int __cpuinit sched_cpu_active(struct notifier_block *nfb, 4692 + static int sched_cpu_active(struct notifier_block *nfb, 4693 4693 unsigned long action, void *hcpu) 4694 4694 { 4695 4695 switch (action & ~CPU_TASKS_FROZEN) { ··· 4702 4702 } 4703 4703 } 4704 4704 4705 - static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, 4705 + static int sched_cpu_inactive(struct notifier_block *nfb, 4706 4706 unsigned long action, void *hcpu) 4707 4707 { 4708 4708 switch (action & ~CPU_TASKS_FROZEN) {
+1 -1
kernel/sched/fair.c
··· 5506 5506 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 5507 5507 } 5508 5508 5509 - static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, 5509 + static int sched_ilb_notifier(struct notifier_block *nfb, 5510 5510 unsigned long action, void *hcpu) 5511 5511 { 5512 5512 switch (action & ~CPU_TASKS_FROZEN) {
+1 -1
kernel/smp.c
··· 73 73 return NOTIFY_OK; 74 74 } 75 75 76 - static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { 76 + static struct notifier_block hotplug_cfd_notifier = { 77 77 .notifier_call = hotplug_cfd, 78 78 }; 79 79
+1 -1
kernel/smpboot.c
··· 24 24 */ 25 25 static DEFINE_PER_CPU(struct task_struct *, idle_threads); 26 26 27 - struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) 27 + struct task_struct *idle_thread_get(unsigned int cpu) 28 28 { 29 29 struct task_struct *tsk = per_cpu(idle_threads, cpu); 30 30
+4 -4
kernel/softirq.c
··· 699 699 } 700 700 EXPORT_SYMBOL(send_remote_softirq); 701 701 702 - static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, 702 + static int remote_softirq_cpu_notify(struct notifier_block *self, 703 703 unsigned long action, void *hcpu) 704 704 { 705 705 /* ··· 728 728 return NOTIFY_OK; 729 729 } 730 730 731 - static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { 731 + static struct notifier_block remote_softirq_cpu_notifier = { 732 732 .notifier_call = remote_softirq_cpu_notify, 733 733 }; 734 734 ··· 830 830 } 831 831 #endif /* CONFIG_HOTPLUG_CPU */ 832 832 833 - static int __cpuinit cpu_callback(struct notifier_block *nfb, 833 + static int cpu_callback(struct notifier_block *nfb, 834 834 unsigned long action, 835 835 void *hcpu) 836 836 { ··· 845 845 return NOTIFY_OK; 846 846 } 847 847 848 - static struct notifier_block __cpuinitdata cpu_nfb = { 848 + static struct notifier_block cpu_nfb = { 849 849 .notifier_call = cpu_callback 850 850 }; 851 851
+1 -1
kernel/time/tick-sched.c
··· 298 298 } 299 299 __setup("nohz_full=", tick_nohz_full_setup); 300 300 301 - static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, 301 + static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, 302 302 unsigned long action, 303 303 void *hcpu) 304 304 {
+5 -5
kernel/timer.c
··· 1505 1505 } 1506 1506 EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1507 1507 1508 - static int __cpuinit init_timers_cpu(int cpu) 1508 + static int init_timers_cpu(int cpu) 1509 1509 { 1510 1510 int j; 1511 1511 struct tvec_base *base; 1512 - static char __cpuinitdata tvec_base_done[NR_CPUS]; 1512 + static char tvec_base_done[NR_CPUS]; 1513 1513 1514 1514 if (!tvec_base_done[cpu]) { 1515 1515 static char boot_done; ··· 1577 1577 } 1578 1578 } 1579 1579 1580 - static void __cpuinit migrate_timers(int cpu) 1580 + static void migrate_timers(int cpu) 1581 1581 { 1582 1582 struct tvec_base *old_base; 1583 1583 struct tvec_base *new_base; ··· 1610 1610 } 1611 1611 #endif /* CONFIG_HOTPLUG_CPU */ 1612 1612 1613 - static int __cpuinit timer_cpu_notify(struct notifier_block *self, 1613 + static int timer_cpu_notify(struct notifier_block *self, 1614 1614 unsigned long action, void *hcpu) 1615 1615 { 1616 1616 long cpu = (long)hcpu; ··· 1635 1635 return NOTIFY_OK; 1636 1636 } 1637 1637 1638 - static struct notifier_block __cpuinitdata timers_nb = { 1638 + static struct notifier_block timers_nb = { 1639 1639 .notifier_call = timer_cpu_notify, 1640 1640 }; 1641 1641
+2 -2
kernel/workqueue.c
··· 4644 4644 * Workqueues should be brought up before normal priority CPU notifiers. 4645 4645 * This will be registered high priority CPU notifier. 4646 4646 */ 4647 - static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, 4647 + static int workqueue_cpu_up_callback(struct notifier_block *nfb, 4648 4648 unsigned long action, 4649 4649 void *hcpu) 4650 4650 { ··· 4697 4697 * Workqueues should be brought down after normal priority CPU notifiers. 4698 4698 * This will be registered as low priority CPU notifier. 4699 4699 */ 4700 - static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, 4700 + static int workqueue_cpu_down_callback(struct notifier_block *nfb, 4701 4701 unsigned long action, 4702 4702 void *hcpu) 4703 4703 {
+1 -1
lib/Kconfig.debug
··· 238 238 any use of code/data previously in these sections would 239 239 most likely result in an oops. 240 240 In the code, functions and variables are annotated with 241 - __init, __cpuinit, etc. (see the full list in include/linux/init.h), 241 + __init,, etc. (see the full list in include/linux/init.h), 242 242 which results in the code/data being placed in specific sections. 243 243 The section mismatch analysis is always performed after a full 244 244 kernel build, and enabling this option causes the following
+1 -1
lib/earlycpio.c
··· 63 63 * the match returned an empty filename string. 64 64 */ 65 65 66 - struct cpio_data __cpuinit find_cpio_data(const char *path, void *data, 66 + struct cpio_data find_cpio_data(const char *path, void *data, 67 67 size_t len, long *offset) 68 68 { 69 69 const size_t cpio_header_len = 8*C_NFIELDS - 2;
+1 -1
lib/percpu_counter.c
··· 158 158 percpu_counter_batch = max(32, nr*2); 159 159 } 160 160 161 - static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, 161 + static int percpu_counter_hotcpu_callback(struct notifier_block *nb, 162 162 unsigned long action, void *hcpu) 163 163 { 164 164 #ifdef CONFIG_HOTPLUG_CPU
+1 -1
mm/memcontrol.c
··· 2522 2522 spin_unlock(&memcg->pcp_counter_lock); 2523 2523 } 2524 2524 2525 - static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, 2525 + static int memcg_cpu_hotplug_callback(struct notifier_block *nb, 2526 2526 unsigned long action, 2527 2527 void *hcpu) 2528 2528 {
+2 -2
mm/page-writeback.c
··· 1619 1619 ratelimit_pages = 16; 1620 1620 } 1621 1621 1622 - static int __cpuinit 1622 + static int 1623 1623 ratelimit_handler(struct notifier_block *self, unsigned long action, 1624 1624 void *hcpu) 1625 1625 { ··· 1634 1634 } 1635 1635 } 1636 1636 1637 - static struct notifier_block __cpuinitdata ratelimit_nb = { 1637 + static struct notifier_block ratelimit_nb = { 1638 1638 .notifier_call = ratelimit_handler, 1639 1639 .next = NULL, 1640 1640 };
+5 -5
mm/slab.c
··· 787 787 * the CPUs getting into lockstep and contending for the global cache chain 788 788 * lock. 789 789 */ 790 - static void __cpuinit start_cpu_timer(int cpu) 790 + static void start_cpu_timer(int cpu) 791 791 { 792 792 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 793 793 ··· 1186 1186 return (n->free_objects + cachep->num - 1) / cachep->num; 1187 1187 } 1188 1188 1189 - static void __cpuinit cpuup_canceled(long cpu) 1189 + static void cpuup_canceled(long cpu) 1190 1190 { 1191 1191 struct kmem_cache *cachep; 1192 1192 struct kmem_cache_node *n = NULL; ··· 1251 1251 } 1252 1252 } 1253 1253 1254 - static int __cpuinit cpuup_prepare(long cpu) 1254 + static int cpuup_prepare(long cpu) 1255 1255 { 1256 1256 struct kmem_cache *cachep; 1257 1257 struct kmem_cache_node *n = NULL; ··· 1334 1334 return -ENOMEM; 1335 1335 } 1336 1336 1337 - static int __cpuinit cpuup_callback(struct notifier_block *nfb, 1337 + static int cpuup_callback(struct notifier_block *nfb, 1338 1338 unsigned long action, void *hcpu) 1339 1339 { 1340 1340 long cpu = (long)hcpu; ··· 1390 1390 return notifier_from_errno(err); 1391 1391 } 1392 1392 1393 - static struct notifier_block __cpuinitdata cpucache_notifier = { 1393 + static struct notifier_block cpucache_notifier = { 1394 1394 &cpuup_callback, NULL, 0 1395 1395 }; 1396 1396
+2 -2
mm/slub.c
··· 3773 3773 * Use the cpu notifier to insure that the cpu slabs are flushed when 3774 3774 * necessary. 3775 3775 */ 3776 - static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, 3776 + static int slab_cpuup_callback(struct notifier_block *nfb, 3777 3777 unsigned long action, void *hcpu) 3778 3778 { 3779 3779 long cpu = (long)hcpu; ··· 3799 3799 return NOTIFY_OK; 3800 3800 } 3801 3801 3802 - static struct notifier_block __cpuinitdata slab_notifier = { 3802 + static struct notifier_block slab_notifier = { 3803 3803 .notifier_call = slab_cpuup_callback 3804 3804 }; 3805 3805
+3 -3
mm/vmstat.c
··· 1182 1182 round_jiffies_relative(sysctl_stat_interval)); 1183 1183 } 1184 1184 1185 - static void __cpuinit start_cpu_timer(int cpu) 1185 + static void start_cpu_timer(int cpu) 1186 1186 { 1187 1187 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 1188 1188 ··· 1194 1194 * Use the cpu notifier to insure that the thresholds are recalculated 1195 1195 * when necessary. 1196 1196 */ 1197 - static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 1197 + static int vmstat_cpuup_callback(struct notifier_block *nfb, 1198 1198 unsigned long action, 1199 1199 void *hcpu) 1200 1200 { ··· 1226 1226 return NOTIFY_OK; 1227 1227 } 1228 1228 1229 - static struct notifier_block __cpuinitdata vmstat_notifier = 1229 + static struct notifier_block vmstat_notifier = 1230 1230 { &vmstat_cpuup_callback, NULL, 0 }; 1231 1231 #endif 1232 1232