Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Merge the x86_32 and x86_64 cpu_idle() functions

Both functions are mostly identical.
The differences are:

- x86_32's cpu_idle() makes use of check_pgt_cache(), which is a
nop on both x86_32 and x86_64.

- x86_64's cpu_idle() uses enter/__exit_idle/(), on x86_32 these
function are a nop.

- In contrast to x86_32, x86_64 calls rcu_idle_enter/exit() in
the innermost loop because idle notifications need RCU.
Calling these function on x86_32 also in the innermost loop
does not hurt.

So we can merge both functions.

Signed-off-by: Richard Weinberger <richard@nod.at>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: paulmck@linux.vnet.ibm.com
Cc: josh@joshtriplett.org
Cc: tj@kernel.org
Link: http://lkml.kernel.org/r/1332709204-22496-1-git-send-email-richard@nod.at
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Richard Weinberger and committed by
Ingo Molnar
90e24014 f5243d6d

+115 -165
+1
arch/x86/include/asm/idle.h
··· 14 14 #else /* !CONFIG_X86_64 */ 15 15 static inline void enter_idle(void) { } 16 16 static inline void exit_idle(void) { } 17 + static inline void __exit_idle(void) { } 17 18 #endif /* CONFIG_X86_64 */ 18 19 19 20 void amd_e400_remove_cpu(int cpu);
+114
arch/x86/kernel/process.c
··· 12 12 #include <linux/user-return-notifier.h> 13 13 #include <linux/dmi.h> 14 14 #include <linux/utsname.h> 15 + #include <linux/stackprotector.h> 16 + #include <linux/tick.h> 17 + #include <linux/cpuidle.h> 15 18 #include <trace/events/power.h> 16 19 #include <linux/hw_breakpoint.h> 17 20 #include <asm/cpu.h> ··· 26 23 #include <asm/i387.h> 27 24 #include <asm/fpu-internal.h> 28 25 #include <asm/debugreg.h> 26 + #include <asm/nmi.h> 27 + 28 + #ifdef CONFIG_X86_64 29 + static DEFINE_PER_CPU(unsigned char, is_idle); 30 + static ATOMIC_NOTIFIER_HEAD(idle_notifier); 31 + 32 + void idle_notifier_register(struct notifier_block *n) 33 + { 34 + atomic_notifier_chain_register(&idle_notifier, n); 35 + } 36 + EXPORT_SYMBOL_GPL(idle_notifier_register); 37 + 38 + void idle_notifier_unregister(struct notifier_block *n) 39 + { 40 + atomic_notifier_chain_unregister(&idle_notifier, n); 41 + } 42 + EXPORT_SYMBOL_GPL(idle_notifier_unregister); 43 + #endif 29 44 30 45 struct kmem_cache *task_xstate_cachep; 31 46 EXPORT_SYMBOL_GPL(task_xstate_cachep); ··· 391 370 return 1; 392 371 } 393 372 #endif 373 + 374 + #ifndef CONFIG_SMP 375 + static inline void play_dead(void) 376 + { 377 + BUG(); 378 + } 379 + #endif 380 + 381 + #ifdef CONFIG_X86_64 382 + void enter_idle(void) 383 + { 384 + percpu_write(is_idle, 1); 385 + atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); 386 + } 387 + 388 + static void __exit_idle(void) 389 + { 390 + if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) 391 + return; 392 + atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 393 + } 394 + 395 + /* Called from interrupts to signify idle end */ 396 + void exit_idle(void) 397 + { 398 + /* idle loop has pid 0 */ 399 + if (current->pid) 400 + return; 401 + __exit_idle(); 402 + } 403 + #endif 404 + 405 + /* 406 + * The idle thread. There's no useful work to be 407 + * done, so just try to conserve power and have a 408 + * low exit latency (ie sit in a loop waiting for 409 + * somebody to say that they'd like to reschedule) 410 + */ 411 + void cpu_idle(void) 412 + { 413 + /* 414 + * If we're the non-boot CPU, nothing set the stack canary up 415 + * for us. CPU0 already has it initialized but no harm in 416 + * doing it again. This is a good place for updating it, as 417 + * we wont ever return from this function (so the invalid 418 + * canaries already on the stack wont ever trigger). 419 + */ 420 + boot_init_stack_canary(); 421 + current_thread_info()->status |= TS_POLLING; 422 + 423 + while (1) { 424 + tick_nohz_idle_enter(); 425 + 426 + while (!need_resched()) { 427 + rmb(); 428 + 429 + if (cpu_is_offline(smp_processor_id())) 430 + play_dead(); 431 + 432 + /* 433 + * Idle routines should keep interrupts disabled 434 + * from here on, until they go to idle. 435 + * Otherwise, idle callbacks can misfire. 436 + */ 437 + local_touch_nmi(); 438 + local_irq_disable(); 439 + 440 + enter_idle(); 441 + 442 + /* Don't trace irqs off for idle */ 443 + stop_critical_timings(); 444 + 445 + /* enter_idle() needs rcu for notifiers */ 446 + rcu_idle_enter(); 447 + 448 + if (cpuidle_idle_call()) 449 + pm_idle(); 450 + 451 + rcu_idle_exit(); 452 + start_critical_timings(); 453 + 454 + /* In many cases the interrupt that ended idle 455 + has already called exit_idle. But some idle 456 + loops can be woken up without interrupt. */ 457 + __exit_idle(); 458 + } 459 + 460 + tick_nohz_idle_exit(); 461 + preempt_enable_no_resched(); 462 + schedule(); 463 + preempt_disable(); 464 + } 465 + } 394 466 395 467 /* 396 468 * We use this if we don't have any better
-58
arch/x86/kernel/process_32.c
··· 9 9 * This file handles the architecture-dependent parts of process handling.. 10 10 */ 11 11 12 - #include <linux/stackprotector.h> 13 12 #include <linux/cpu.h> 14 13 #include <linux/errno.h> 15 14 #include <linux/sched.h> ··· 30 31 #include <linux/kallsyms.h> 31 32 #include <linux/ptrace.h> 32 33 #include <linux/personality.h> 33 - #include <linux/tick.h> 34 34 #include <linux/percpu.h> 35 35 #include <linux/prctl.h> 36 36 #include <linux/ftrace.h> 37 37 #include <linux/uaccess.h> 38 38 #include <linux/io.h> 39 39 #include <linux/kdebug.h> 40 - #include <linux/cpuidle.h> 41 40 42 41 #include <asm/pgtable.h> 43 42 #include <asm/system.h> ··· 55 58 #include <asm/idle.h> 56 59 #include <asm/syscalls.h> 57 60 #include <asm/debugreg.h> 58 - #include <asm/nmi.h> 59 61 60 62 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 61 63 ··· 64 68 unsigned long thread_saved_pc(struct task_struct *tsk) 65 69 { 66 70 return ((unsigned long *)tsk->thread.sp)[3]; 67 - } 68 - 69 - #ifndef CONFIG_SMP 70 - static inline void play_dead(void) 71 - { 72 - BUG(); 73 - } 74 - #endif 75 - 76 - /* 77 - * The idle thread. There's no useful work to be 78 - * done, so just try to conserve power and have a 79 - * low exit latency (ie sit in a loop waiting for 80 - * somebody to say that they'd like to reschedule) 81 - */ 82 - void cpu_idle(void) 83 - { 84 - int cpu = smp_processor_id(); 85 - 86 - /* 87 - * If we're the non-boot CPU, nothing set the stack canary up 88 - * for us. CPU0 already has it initialized but no harm in 89 - * doing it again. This is a good place for updating it, as 90 - * we wont ever return from this function (so the invalid 91 - * canaries already on the stack wont ever trigger). 92 - */ 93 - boot_init_stack_canary(); 94 - 95 - current_thread_info()->status |= TS_POLLING; 96 - 97 - /* endless idle loop with no priority at all */ 98 - while (1) { 99 - tick_nohz_idle_enter(); 100 - rcu_idle_enter(); 101 - while (!need_resched()) { 102 - 103 - check_pgt_cache(); 104 - rmb(); 105 - 106 - if (cpu_is_offline(cpu)) 107 - play_dead(); 108 - 109 - local_touch_nmi(); 110 - local_irq_disable(); 111 - /* Don't trace irqs off for idle */ 112 - stop_critical_timings(); 113 - if (cpuidle_idle_call()) 114 - pm_idle(); 115 - start_critical_timings(); 116 - } 117 - rcu_idle_exit(); 118 - tick_nohz_idle_exit(); 119 - schedule_preempt_disabled(); 120 - } 121 71 } 122 72 123 73 void __show_regs(struct pt_regs *regs, int all)
-107
arch/x86/kernel/process_64.c
··· 14 14 * This file handles the architecture-dependent parts of process handling.. 15 15 */ 16 16 17 - #include <linux/stackprotector.h> 18 17 #include <linux/cpu.h> 19 18 #include <linux/errno.h> 20 19 #include <linux/sched.h> ··· 31 32 #include <linux/notifier.h> 32 33 #include <linux/kprobes.h> 33 34 #include <linux/kdebug.h> 34 - #include <linux/tick.h> 35 35 #include <linux/prctl.h> 36 36 #include <linux/uaccess.h> 37 37 #include <linux/io.h> 38 38 #include <linux/ftrace.h> 39 - #include <linux/cpuidle.h> 40 39 41 40 #include <asm/pgtable.h> 42 41 #include <asm/system.h> ··· 49 52 #include <asm/idle.h> 50 53 #include <asm/syscalls.h> 51 54 #include <asm/debugreg.h> 52 - #include <asm/nmi.h> 53 55 54 56 asmlinkage extern void ret_from_fork(void); 55 57 56 58 DEFINE_PER_CPU(unsigned long, old_rsp); 57 - static DEFINE_PER_CPU(unsigned char, is_idle); 58 - 59 - static ATOMIC_NOTIFIER_HEAD(idle_notifier); 60 - 61 - void idle_notifier_register(struct notifier_block *n) 62 - { 63 - atomic_notifier_chain_register(&idle_notifier, n); 64 - } 65 - EXPORT_SYMBOL_GPL(idle_notifier_register); 66 - 67 - void idle_notifier_unregister(struct notifier_block *n) 68 - { 69 - atomic_notifier_chain_unregister(&idle_notifier, n); 70 - } 71 - EXPORT_SYMBOL_GPL(idle_notifier_unregister); 72 - 73 - void enter_idle(void) 74 - { 75 - percpu_write(is_idle, 1); 76 - atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); 77 - } 78 - 79 - static void __exit_idle(void) 80 - { 81 - if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) 82 - return; 83 - atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); 84 - } 85 - 86 - /* Called from interrupts to signify idle end */ 87 - void exit_idle(void) 88 - { 89 - /* idle loop has pid 0 */ 90 - if (current->pid) 91 - return; 92 - __exit_idle(); 93 - } 94 - 95 - #ifndef CONFIG_SMP 96 - static inline void play_dead(void) 97 - { 98 - BUG(); 99 - } 100 - #endif 101 - 102 - /* 103 - * The idle thread. There's no useful work to be 104 - * done, so just try to conserve power and have a 105 - * low exit latency (ie sit in a loop waiting for 106 - * somebody to say that they'd like to reschedule) 107 - */ 108 - void cpu_idle(void) 109 - { 110 - current_thread_info()->status |= TS_POLLING; 111 - 112 - /* 113 - * If we're the non-boot CPU, nothing set the stack canary up 114 - * for us. CPU0 already has it initialized but no harm in 115 - * doing it again. This is a good place for updating it, as 116 - * we wont ever return from this function (so the invalid 117 - * canaries already on the stack wont ever trigger). 118 - */ 119 - boot_init_stack_canary(); 120 - 121 - /* endless idle loop with no priority at all */ 122 - while (1) { 123 - tick_nohz_idle_enter(); 124 - while (!need_resched()) { 125 - 126 - rmb(); 127 - 128 - if (cpu_is_offline(smp_processor_id())) 129 - play_dead(); 130 - /* 131 - * Idle routines should keep interrupts disabled 132 - * from here on, until they go to idle. 133 - * Otherwise, idle callbacks can misfire. 134 - */ 135 - local_touch_nmi(); 136 - local_irq_disable(); 137 - enter_idle(); 138 - /* Don't trace irqs off for idle */ 139 - stop_critical_timings(); 140 - 141 - /* enter_idle() needs rcu for notifiers */ 142 - rcu_idle_enter(); 143 - 144 - if (cpuidle_idle_call()) 145 - pm_idle(); 146 - 147 - rcu_idle_exit(); 148 - start_critical_timings(); 149 - 150 - /* In many cases the interrupt that ended idle 151 - has already called exit_idle. But some idle 152 - loops can be woken up without interrupt. */ 153 - __exit_idle(); 154 - } 155 - 156 - tick_nohz_idle_exit(); 157 - schedule_preempt_disabled(); 158 - } 159 - } 160 59 161 60 /* Prints also some state that isn't saved in the pt_regs */ 162 61 void __show_regs(struct pt_regs *regs, int all)