Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull SMP/hotplug changes from Ingo Molnar:
"This is a pretty large, multi-arch series unifying and generalizing
the various disjunct pieces of idle routines that architectures have
historically copied from each other and have grown in random, wildly
inconsistent and sometimes buggy directions:

101 files changed, 455 insertions(+), 1328 deletions(-)

this went through a number of review and test iterations before it was
committed, it was tested on various architectures, was exposed to
linux-next for quite some time - nevertheless it might cause problems
on architectures that don't read the mailing lists and don't regularly
test linux-next.

This cat herding excercise was motivated by the -rt kernel, and was
brought to you by Thomas "the Whip" Gleixner."

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits)
idle: Remove GENERIC_IDLE_LOOP config switch
um: Use generic idle loop
ia64: Make sure interrupts enabled when we "safe_halt()"
sparc: Use generic idle loop
idle: Remove unused ARCH_HAS_DEFAULT_IDLE
bfin: Fix typo in arch_cpu_idle()
xtensa: Use generic idle loop
x86: Use generic idle loop
unicore: Use generic idle loop
tile: Use generic idle loop
tile: Enter idle with preemption disabled
sh: Use generic idle loop
score: Use generic idle loop
s390: Use generic idle loop
powerpc: Use generic idle loop
parisc: Use generic idle loop
openrisc: Use generic idle loop
mn10300: Use generic idle loop
mips: Use generic idle loop
microblaze: Use generic idle loop
...

+461 -1334
-2
arch/alpha/include/asm/thread_info.h
··· 95 95 #define TS_POLLING 0x0010 /* idle task polling need_resched, 96 96 skip sending interrupt */ 97 97 98 - #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 99 - 100 98 #ifndef __ASSEMBLY__ 101 99 #define HAVE_SET_RESTORE_SIGMASK 1 102 100 static inline void set_restore_sigmask(void)
-19
arch/alpha/kernel/process.c
··· 46 46 void (*pm_power_off)(void) = machine_power_off; 47 47 EXPORT_SYMBOL(pm_power_off); 48 48 49 - void 50 - cpu_idle(void) 51 - { 52 - current_thread_info()->status |= TS_POLLING; 53 - 54 - while (1) { 55 - /* FIXME -- EV6 and LCA45 know how to power down 56 - the CPU. */ 57 - 58 - rcu_idle_enter(); 59 - while (!need_resched()) 60 - cpu_relax(); 61 - 62 - rcu_idle_exit(); 63 - schedule_preempt_disabled(); 64 - } 65 - } 66 - 67 - 68 49 struct halt_info { 69 50 int mode; 70 51 char *restart_cmd;
+1 -2
arch/alpha/kernel/smp.c
··· 167 167 cpuid, current, current->active_mm)); 168 168 169 169 preempt_disable(); 170 - /* Do nothing. */ 171 - cpu_idle(); 170 + cpu_startup_entry(CPUHP_ONLINE); 172 171 } 173 172 174 173 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
+1 -26
arch/arc/kernel/process.c
··· 41 41 return task_thread_info(current)->thr_ptr; 42 42 } 43 43 44 - static inline void arch_idle(void) 44 + void arch_cpu_idle(void) 45 45 { 46 46 /* sleep, but enable all interrupts before committing */ 47 47 __asm__("sleep 0x3"); 48 - } 49 - 50 - void cpu_idle(void) 51 - { 52 - /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */ 53 - 54 - /* endless idle loop with no priority at all */ 55 - while (1) { 56 - tick_nohz_idle_enter(); 57 - rcu_idle_enter(); 58 - 59 - doze: 60 - local_irq_disable(); 61 - if (!need_resched()) { 62 - arch_idle(); 63 - goto doze; 64 - } else { 65 - local_irq_enable(); 66 - } 67 - 68 - rcu_idle_exit(); 69 - tick_nohz_idle_exit(); 70 - 71 - schedule_preempt_disabled(); 72 - } 73 48 } 74 49 75 50 asmlinkage void ret_from_fork(void);
+1 -1
arch/arc/kernel/smp.c
··· 141 141 142 142 local_irq_enable(); 143 143 preempt_disable(); 144 - cpu_idle(); 144 + cpu_startup_entry(CPUHP_ONLINE); 145 145 } 146 146 147 147 /*
+1
arch/arm/Kconfig
··· 15 15 select GENERIC_IRQ_SHOW 16 16 select GENERIC_PCI_IOMAP 17 17 select GENERIC_SMP_IDLE_THREAD 18 + select GENERIC_IDLE_POLL_SETUP 18 19 select GENERIC_STRNCPY_FROM_USER 19 20 select GENERIC_STRNLEN_USER 20 21 select HARDIRQS_SW_RESEND
-3
arch/arm/include/asm/system_misc.h
··· 21 21 22 22 extern unsigned int user_debug; 23 23 24 - extern void disable_hlt(void); 25 - extern void enable_hlt(void); 26 - 27 24 #endif /* !__ASSEMBLY__ */ 28 25 29 26 #endif /* __ASM_ARM_SYSTEM_MISC_H */
+27 -75
arch/arm/kernel/process.c
··· 57 57 "ARM" , "Thumb" , "Jazelle", "ThumbEE" 58 58 }; 59 59 60 - static volatile int hlt_counter; 61 - 62 - void disable_hlt(void) 63 - { 64 - hlt_counter++; 65 - } 66 - 67 - EXPORT_SYMBOL(disable_hlt); 68 - 69 - void enable_hlt(void) 70 - { 71 - hlt_counter--; 72 - BUG_ON(hlt_counter < 0); 73 - } 74 - 75 - EXPORT_SYMBOL(enable_hlt); 76 - 77 - static int __init nohlt_setup(char *__unused) 78 - { 79 - hlt_counter = 1; 80 - return 1; 81 - } 82 - 83 - static int __init hlt_setup(char *__unused) 84 - { 85 - hlt_counter = 0; 86 - return 1; 87 - } 88 - 89 - __setup("nohlt", nohlt_setup); 90 - __setup("hlt", hlt_setup); 91 - 92 60 extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); 93 61 typedef void (*phys_reset_t)(unsigned long); 94 62 ··· 140 172 local_irq_enable(); 141 173 } 142 174 143 - /* 144 - * The idle thread. 145 - * We always respect 'hlt_counter' to prevent low power idle. 146 - */ 147 - void cpu_idle(void) 175 + void arch_cpu_idle_prepare(void) 148 176 { 149 177 local_fiq_enable(); 178 + } 150 179 151 - /* endless idle loop with no priority at all */ 152 - while (1) { 153 - tick_nohz_idle_enter(); 154 - rcu_idle_enter(); 155 - ledtrig_cpu(CPU_LED_IDLE_START); 156 - while (!need_resched()) { 157 - #ifdef CONFIG_HOTPLUG_CPU 158 - if (cpu_is_offline(smp_processor_id())) 159 - cpu_die(); 160 - #endif 161 - 162 - /* 163 - * We need to disable interrupts here 164 - * to ensure we don't miss a wakeup call. 165 - */ 166 - local_irq_disable(); 180 + void arch_cpu_idle_enter(void) 181 + { 182 + ledtrig_cpu(CPU_LED_IDLE_START); 167 183 #ifdef CONFIG_PL310_ERRATA_769419 168 - wmb(); 184 + wmb(); 169 185 #endif 170 - if (hlt_counter) { 171 - local_irq_enable(); 172 - cpu_relax(); 173 - } else if (!need_resched()) { 174 - stop_critical_timings(); 175 - if (cpuidle_idle_call()) 176 - default_idle(); 177 - start_critical_timings(); 178 - /* 179 - * default_idle functions must always 180 - * return with IRQs enabled. 181 - */ 182 - WARN_ON(irqs_disabled()); 183 - } else 184 - local_irq_enable(); 185 - } 186 - ledtrig_cpu(CPU_LED_IDLE_END); 187 - rcu_idle_exit(); 188 - tick_nohz_idle_exit(); 189 - schedule_preempt_disabled(); 190 - } 186 + } 187 + 188 + void arch_cpu_idle_exit(void) 189 + { 190 + ledtrig_cpu(CPU_LED_IDLE_END); 191 + } 192 + 193 + #ifdef CONFIG_HOTPLUG_CPU 194 + void arch_cpu_idle_dead(void) 195 + { 196 + cpu_die(); 197 + } 198 + #endif 199 + 200 + /* 201 + * Called from the core idle loop. 202 + */ 203 + void arch_cpu_idle(void) 204 + { 205 + if (cpuidle_idle_call()) 206 + default_idle(); 191 207 } 192 208 193 209 static char reboot_mode = 'h';
+1 -1
arch/arm/kernel/smp.c
··· 336 336 /* 337 337 * OK, it's off to the idle thread for us 338 338 */ 339 - cpu_idle(); 339 + cpu_startup_entry(CPUHP_ONLINE); 340 340 } 341 341 342 342 void __init smp_cpus_done(unsigned int max_cpus)
+3 -1
arch/arm/mach-gemini/idle.c
··· 13 13 * will never wakeup... Acctualy it is not very good to enable 14 14 * interrupts first since scheduler can miss a tick, but there is 15 15 * no other way around this. Platforms that needs it for power saving 16 - * should call enable_hlt() in init code, since by default it is 16 + * should enable it in init code, since by default it is 17 17 * disabled. 18 18 */ 19 + 20 + /* FIXME: Enabling interrupts here is racy! */ 19 21 local_irq_enable(); 20 22 cpu_do_idle(); 21 23 }
+3 -1
arch/arm/mach-gemini/irq.c
··· 15 15 #include <linux/stddef.h> 16 16 #include <linux/list.h> 17 17 #include <linux/sched.h> 18 + #include <linux/cpu.h> 19 + 18 20 #include <asm/irq.h> 19 21 #include <asm/mach/irq.h> 20 22 #include <asm/system_misc.h> ··· 79 77 * Disable the idle handler by default since it is buggy 80 78 * For more info see arch/arm/mach-gemini/idle.c 81 79 */ 82 - disable_hlt(); 80 + cpu_idle_poll_ctrl(true); 83 81 84 82 request_resource(&iomem_resource, &irq_resource); 85 83
+2 -1
arch/arm/mach-ixp4xx/common.c
··· 29 29 #include <linux/io.h> 30 30 #include <linux/export.h> 31 31 #include <linux/gpio.h> 32 + #include <linux/cpu.h> 32 33 33 34 #include <mach/udc.h> 34 35 #include <mach/hardware.h> ··· 240 239 * ixp4xx does not implement the XScale PWRMODE register 241 240 * so it must not call cpu_do_idle(). 242 241 */ 243 - disable_hlt(); 242 + cpu_idle_poll_ctrl(true); 244 243 245 244 /* Route all sources to IRQ instead of FIQ */ 246 245 *IXP4XX_ICLR = 0x0;
+3 -3
arch/arm/mach-omap1/pm.c
··· 43 43 #include <linux/module.h> 44 44 #include <linux/io.h> 45 45 #include <linux/atomic.h> 46 + #include <linux/cpu.h> 46 47 47 48 #include <asm/fncpy.h> 48 49 #include <asm/system_misc.h> ··· 585 584 static int omap_pm_prepare(void) 586 585 { 587 586 /* We cannot sleep in idle until we have resumed */ 588 - disable_hlt(); 589 - 587 + cpu_idle_poll_ctrl(true); 590 588 return 0; 591 589 } 592 590 ··· 621 621 622 622 static void omap_pm_finish(void) 623 623 { 624 - enable_hlt(); 624 + cpu_idle_poll_ctrl(false); 625 625 } 626 626 627 627
+4 -3
arch/arm/mach-omap2/omap_hwmod.c
··· 138 138 #include <linux/spinlock.h> 139 139 #include <linux/slab.h> 140 140 #include <linux/bootmem.h> 141 + #include <linux/cpu.h> 141 142 142 143 #include <asm/system_misc.h> 143 144 ··· 2158 2157 if (soc_ops.enable_module) 2159 2158 soc_ops.enable_module(oh); 2160 2159 if (oh->flags & HWMOD_BLOCK_WFI) 2161 - disable_hlt(); 2160 + cpu_idle_poll_ctrl(true); 2162 2161 2163 2162 if (soc_ops.update_context_lost) 2164 2163 soc_ops.update_context_lost(oh); ··· 2222 2221 _del_initiator_dep(oh, mpu_oh); 2223 2222 2224 2223 if (oh->flags & HWMOD_BLOCK_WFI) 2225 - enable_hlt(); 2224 + cpu_idle_poll_ctrl(false); 2226 2225 if (soc_ops.disable_module) 2227 2226 soc_ops.disable_module(oh); 2228 2227 ··· 2332 2331 _del_initiator_dep(oh, mpu_oh); 2333 2332 /* XXX what about the other system initiators here? dma, dsp */ 2334 2333 if (oh->flags & HWMOD_BLOCK_WFI) 2335 - enable_hlt(); 2334 + cpu_idle_poll_ctrl(false); 2336 2335 if (soc_ops.disable_module) 2337 2336 soc_ops.disable_module(oh); 2338 2337 _disable_clocks(oh);
+2 -3
arch/arm/mach-omap2/pm.c
··· 218 218 219 219 static int omap_pm_begin(suspend_state_t state) 220 220 { 221 - disable_hlt(); 221 + cpu_idle_poll_ctrl(true); 222 222 if (cpu_is_omap34xx()) 223 223 omap_prcm_irq_prepare(); 224 224 return 0; ··· 226 226 227 227 static void omap_pm_end(void) 228 228 { 229 - enable_hlt(); 230 - return; 229 + cpu_idle_poll_ctrl(false); 231 230 } 232 231 233 232 static void omap_pm_finish(void)
+2 -1
arch/arm/mach-orion5x/board-dt.c
··· 14 14 #include <linux/init.h> 15 15 #include <linux/of.h> 16 16 #include <linux/of_platform.h> 17 + #include <linux/cpu.h> 17 18 #include <asm/system_misc.h> 18 19 #include <asm/mach/arch.h> 19 20 #include <mach/orion5x.h> ··· 53 52 */ 54 53 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { 55 54 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); 56 - disable_hlt(); 55 + cpu_idle_poll_ctrl(true); 57 56 } 58 57 59 58 if (of_machine_is_compatible("lacie,ethernet-disk-mini-v2"))
+1 -1
arch/arm/mach-orion5x/common.c
··· 293 293 */ 294 294 if (dev == MV88F5281_DEV_ID && rev == MV88F5281_REV_D0) { 295 295 printk(KERN_INFO "Orion: Applying 5281 D0 WFI workaround.\n"); 296 - disable_hlt(); 296 + cpu_idle_poll_ctrl(true); 297 297 } 298 298 299 299 /*
+2 -1
arch/arm/mach-shark/core.c
··· 10 10 #include <linux/sched.h> 11 11 #include <linux/serial_8250.h> 12 12 #include <linux/io.h> 13 + #include <linux/cpu.h> 13 14 14 15 #include <asm/setup.h> 15 16 #include <asm/mach-types.h> ··· 131 130 132 131 static void shark_init_early(void) 133 132 { 134 - disable_hlt(); 133 + cpu_idle_poll_ctrl(true); 135 134 } 136 135 137 136 MACHINE_START(SHARK, "Shark")
+4 -2
arch/arm/mach-shmobile/suspend.c
··· 12 12 #include <linux/suspend.h> 13 13 #include <linux/module.h> 14 14 #include <linux/err.h> 15 + #include <linux/cpu.h> 16 + 15 17 #include <asm/io.h> 16 18 #include <asm/system_misc.h> 17 19 ··· 25 23 26 24 static int shmobile_suspend_begin(suspend_state_t state) 27 25 { 28 - disable_hlt(); 26 + cpu_idle_poll_ctrl(true); 29 27 return 0; 30 28 } 31 29 32 30 static void shmobile_suspend_end(void) 33 31 { 34 - enable_hlt(); 32 + cpu_idle_poll_ctrl(false); 35 33 } 36 34 37 35 struct platform_suspend_ops shmobile_suspend_ops = {
+2 -1
arch/arm/mach-w90x900/dev.c
··· 19 19 #include <linux/init.h> 20 20 #include <linux/platform_device.h> 21 21 #include <linux/slab.h> 22 + #include <linux/cpu.h> 22 23 23 24 #include <linux/mtd/physmap.h> 24 25 #include <linux/mtd/mtd.h> ··· 532 531 533 532 void __init nuc900_board_init(struct platform_device **device, int size) 534 533 { 535 - disable_hlt(); 534 + cpu_idle_poll_ctrl(true); 536 535 platform_add_devices(device, size); 537 536 platform_add_devices(nuc900_public_dev, ARRAY_SIZE(nuc900_public_dev)); 538 537 spi_register_board_info(nuc900_spi_board_info,
+5 -38
arch/arm64/kernel/process.c
··· 84 84 void (*pm_restart)(const char *cmd); 85 85 EXPORT_SYMBOL_GPL(pm_restart); 86 86 87 + void arch_cpu_idle_prepare(void) 88 + { 89 + local_fiq_enable(); 90 + } 87 91 88 92 /* 89 93 * This is our default idle handler. 90 94 */ 91 - static void default_idle(void) 95 + void arch_cpu_idle(void) 92 96 { 93 97 /* 94 98 * This should do all the clock switching and wait for interrupt ··· 100 96 */ 101 97 cpu_do_idle(); 102 98 local_irq_enable(); 103 - } 104 - 105 - /* 106 - * The idle thread. 107 - * We always respect 'hlt_counter' to prevent low power idle. 108 - */ 109 - void cpu_idle(void) 110 - { 111 - local_fiq_enable(); 112 - 113 - /* endless idle loop with no priority at all */ 114 - while (1) { 115 - tick_nohz_idle_enter(); 116 - rcu_idle_enter(); 117 - while (!need_resched()) { 118 - /* 119 - * We need to disable interrupts here to ensure 120 - * we don't miss a wakeup call. 121 - */ 122 - local_irq_disable(); 123 - if (!need_resched()) { 124 - stop_critical_timings(); 125 - default_idle(); 126 - start_critical_timings(); 127 - /* 128 - * default_idle functions should always return 129 - * with IRQs enabled. 130 - */ 131 - WARN_ON(irqs_disabled()); 132 - } else { 133 - local_irq_enable(); 134 - } 135 - } 136 - rcu_idle_exit(); 137 - tick_nohz_idle_exit(); 138 - schedule_preempt_disabled(); 139 - } 140 99 } 141 100 142 101 void machine_shutdown(void)
+1 -1
arch/arm64/kernel/smp.c
··· 216 216 /* 217 217 * OK, it's off to the idle thread for us 218 218 */ 219 - cpu_idle(); 219 + cpu_startup_entry(CPUHP_ONLINE); 220 220 } 221 221 222 222 void __init smp_cpus_done(unsigned int max_cpus)
+2 -11
arch/avr32/kernel/process.c
··· 30 30 * This file handles the architecture-dependent parts of process handling.. 31 31 */ 32 32 33 - void cpu_idle(void) 33 + void arch_cpu_idle(void) 34 34 { 35 - /* endless idle loop with no priority at all */ 36 - while (1) { 37 - tick_nohz_idle_enter(); 38 - rcu_idle_enter(); 39 - while (!need_resched()) 40 - cpu_idle_sleep(); 41 - rcu_idle_exit(); 42 - tick_nohz_idle_exit(); 43 - schedule_preempt_disabled(); 44 - } 35 + cpu_enter_idle(); 45 36 } 46 37 47 38 void machine_halt(void)
+7 -2
arch/avr32/kernel/time.c
··· 12 12 #include <linux/irq.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/time.h> 15 + #include <linux/cpu.h> 15 16 16 17 #include <asm/sysreg.h> 17 18 ··· 88 87 pr_debug("%s: start\n", evdev->name); 89 88 /* FALLTHROUGH */ 90 89 case CLOCK_EVT_MODE_RESUME: 91 - cpu_disable_idle_sleep(); 90 + /* 91 + * If we're using the COUNT and COMPARE registers we 92 + * need to force idle poll. 93 + */ 94 + cpu_idle_poll_ctrl(true); 92 95 break; 93 96 case CLOCK_EVT_MODE_UNUSED: 94 97 case CLOCK_EVT_MODE_SHUTDOWN: 95 98 sysreg_write(COMPARE, 0); 96 99 pr_debug("%s: stop\n", evdev->name); 97 - cpu_enable_idle_sleep(); 100 + cpu_idle_poll_ctrl(false); 98 101 break; 99 102 default: 100 103 BUG();
-24
arch/avr32/mach-at32ap/include/mach/pm.h
··· 21 21 extern void cpu_enter_idle(void); 22 22 extern void cpu_enter_standby(unsigned long sdramc_base); 23 23 24 - extern bool disable_idle_sleep; 25 - 26 - static inline void cpu_disable_idle_sleep(void) 27 - { 28 - disable_idle_sleep = true; 29 - } 30 - 31 - static inline void cpu_enable_idle_sleep(void) 32 - { 33 - disable_idle_sleep = false; 34 - } 35 - 36 - static inline void cpu_idle_sleep(void) 37 - { 38 - /* 39 - * If we're using the COUNT and COMPARE registers for 40 - * timekeeping, we can't use the IDLE state. 41 - */ 42 - if (disable_idle_sleep) 43 - cpu_relax(); 44 - else 45 - cpu_enter_idle(); 46 - } 47 - 48 24 void intc_set_suspend_handler(unsigned long offset); 49 25 #endif 50 26
-7
arch/avr32/mach-at32ap/pm-at32ap700x.S
··· 18 18 /* Same as 0xfff00000 but fits in a 21 bit signed immediate */ 19 19 #define PM_BASE -0x100000 20 20 21 - .section .bss, "wa", @nobits 22 - .global disable_idle_sleep 23 - .type disable_idle_sleep, @object 24 - disable_idle_sleep: 25 - .int 4 26 - .size disable_idle_sleep, . - disable_idle_sleep 27 - 28 21 /* Keep this close to the irq handlers */ 29 22 .section .irq.text, "ax", @progbits 30 23
+6 -26
arch/blackfin/kernel/process.c
··· 46 46 * The idle loop on BFIN 47 47 */ 48 48 #ifdef CONFIG_IDLE_L1 49 - static void default_idle(void)__attribute__((l1_text)); 50 - void cpu_idle(void)__attribute__((l1_text)); 49 + void arch_cpu_idle(void)__attribute__((l1_text)); 51 50 #endif 52 51 53 52 /* 54 53 * This is our default idle handler. We need to disable 55 54 * interrupts here to ensure we don't miss a wakeup call. 56 55 */ 57 - static void default_idle(void) 56 + void arch_cpu_idle(void) 58 57 { 59 58 #ifdef CONFIG_IPIPE 60 59 ipipe_suspend_domain(); ··· 65 66 hard_local_irq_enable(); 66 67 } 67 68 68 - /* 69 - * The idle thread. We try to conserve power, while trying to keep 70 - * overall latency low. The architecture specific idle is passed 71 - * a value to indicate the level of "idleness" of the system. 72 - */ 73 - void cpu_idle(void) 74 - { 75 - /* endless idle loop with no priority at all */ 76 - while (1) { 77 - 78 69 #ifdef CONFIG_HOTPLUG_CPU 79 - if (cpu_is_offline(smp_processor_id())) 80 - cpu_die(); 81 - #endif 82 - tick_nohz_idle_enter(); 83 - rcu_idle_enter(); 84 - while (!need_resched()) 85 - default_idle(); 86 - rcu_idle_exit(); 87 - tick_nohz_idle_exit(); 88 - preempt_enable_no_resched(); 89 - schedule(); 90 - preempt_disable(); 91 - } 70 + void arch_cpu_idle_dead(void) 71 + { 72 + cpu_die(); 92 73 } 74 + #endif 93 75 94 76 /* 95 77 * Do necessary setup to start up a newly executed thread.
+1 -1
arch/blackfin/mach-common/smp.c
··· 335 335 */ 336 336 calibrate_delay(); 337 337 338 - cpu_idle(); 338 + cpu_startup_entry(CPUHP_ONLINE); 339 339 } 340 340 341 341 void __init smp_prepare_boot_cpu(void)
+1 -27
arch/c6x/kernel/process.c
··· 33 33 void (*pm_power_off)(void); 34 34 EXPORT_SYMBOL(pm_power_off); 35 35 36 - static void c6x_idle(void) 36 + void arch_cpu_idle(void) 37 37 { 38 38 unsigned long tmp; 39 39 ··· 47 47 " mvc .s2 %0,CSR\n" 48 48 "|| idle\n" 49 49 : "=b"(tmp)); 50 - } 51 - 52 - /* 53 - * The idle loop for C64x 54 - */ 55 - void cpu_idle(void) 56 - { 57 - /* endless idle loop with no priority at all */ 58 - while (1) { 59 - tick_nohz_idle_enter(); 60 - rcu_idle_enter(); 61 - while (1) { 62 - local_irq_disable(); 63 - if (need_resched()) { 64 - local_irq_enable(); 65 - break; 66 - } 67 - c6x_idle(); /* enables local irqs */ 68 - } 69 - rcu_idle_exit(); 70 - tick_nohz_idle_exit(); 71 - 72 - preempt_enable_no_resched(); 73 - schedule(); 74 - preempt_disable(); 75 - } 76 50 } 77 51 78 52 static void halt_loop(void)
+2 -1
arch/cris/arch-v10/kernel/process.c
··· 30 30 void default_idle(void) 31 31 { 32 32 #ifdef CONFIG_ETRAX_GPIO 33 - etrax_gpio_wake_up_check(); 33 + etrax_gpio_wake_up_check(); 34 34 #endif 35 + local_irq_enable(); 35 36 } 36 37 37 38 /*
+3 -9
arch/cris/arch-v32/kernel/process.c
··· 20 20 21 21 extern void stop_watchdog(void); 22 22 23 - extern int cris_hlt_counter; 24 - 25 23 /* We use this if we don't have any better idle routine. */ 26 24 void default_idle(void) 27 25 { 28 - local_irq_disable(); 29 - if (!need_resched() && !cris_hlt_counter) { 30 - /* Halt until exception. */ 31 - __asm__ volatile("ei \n\t" 32 - "halt "); 33 - } 34 - local_irq_enable(); 26 + /* Halt until exception. */ 27 + __asm__ volatile("ei \n\t" 28 + "halt "); 35 29 } 36 30 37 31 /*
+1 -3
arch/cris/arch-v32/kernel/smp.c
··· 145 145 * specific stuff such as the local timer and the MMU. */ 146 146 void __init smp_callin(void) 147 147 { 148 - extern void cpu_idle(void); 149 - 150 148 int cpu = cpu_now_booting; 151 149 reg_intr_vect_rw_mask vect_mask = {0}; 152 150 ··· 168 170 local_irq_enable(); 169 171 170 172 set_cpu_online(cpu, true); 171 - cpu_idle(); 173 + cpu_startup_entry(CPUHP_ONLINE); 172 174 } 173 175 174 176 /* Stop execution on this CPU.*/
-7
arch/cris/include/asm/processor.h
··· 65 65 66 66 #define cpu_relax() barrier() 67 67 68 - /* 69 - * disable hlt during certain critical i/o operations 70 - */ 71 - #define HAVE_DISABLE_HLT 72 - void disable_hlt(void); 73 - void enable_hlt(void); 74 - 75 68 void default_idle(void); 76 69 77 70 #endif /* __ASM_CRIS_PROCESSOR_H */
+2 -47
arch/cris/kernel/process.c
··· 29 29 30 30 //#define DEBUG 31 31 32 - /* 33 - * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if 34 - * there would ever be a halt sequence (for power save when idle) with 35 - * some largish delay when halting or resuming *and* a driver that can't 36 - * afford that delay. The hlt_counter would then be checked before 37 - * executing the halt sequence, and the driver marks the unhaltable 38 - * region by enable_hlt/disable_hlt. 39 - */ 40 - 41 - int cris_hlt_counter=0; 42 - 43 - void disable_hlt(void) 44 - { 45 - cris_hlt_counter++; 46 - } 47 - 48 - EXPORT_SYMBOL(disable_hlt); 49 - 50 - void enable_hlt(void) 51 - { 52 - cris_hlt_counter--; 53 - } 54 - 55 - EXPORT_SYMBOL(enable_hlt); 56 - 57 32 extern void default_idle(void); 58 33 59 34 void (*pm_power_off)(void); 60 35 EXPORT_SYMBOL(pm_power_off); 61 36 62 - /* 63 - * The idle thread. There's no useful work to be 64 - * done, so just try to conserve power and have a 65 - * low exit latency (ie sit in a loop waiting for 66 - * somebody to say that they'd like to reschedule) 67 - */ 68 - 69 - void cpu_idle (void) 37 + void arch_cpu_idle(void) 70 38 { 71 - /* endless idle loop with no priority at all */ 72 - while (1) { 73 - rcu_idle_enter(); 74 - while (!need_resched()) { 75 - /* 76 - * Mark this as an RCU critical section so that 77 - * synchronize_kernel() in the unload path waits 78 - * for our completion. 79 - */ 80 - default_idle(); 81 - } 82 - rcu_idle_exit(); 83 - schedule_preempt_disabled(); 84 - } 39 + default_idle(); 85 40 } 86 41 87 42 void hard_reset_now (void);
+5 -22
arch/frv/kernel/process.c
··· 59 59 mb(); 60 60 } 61 61 62 - void (*idle)(void) = core_sleep_idle; 63 - 64 - /* 65 - * The idle thread. There's no useful work to be 66 - * done, so just try to conserve power and have a 67 - * low exit latency (ie sit in a loop waiting for 68 - * somebody to say that they'd like to reschedule) 69 - */ 70 - void cpu_idle(void) 62 + void arch_cpu_idle(void) 71 63 { 72 - /* endless idle loop with no priority at all */ 73 - while (1) { 74 - rcu_idle_enter(); 75 - while (!need_resched()) { 76 - check_pgt_cache(); 77 - 78 - if (!frv_dma_inprogress && idle) 79 - idle(); 80 - } 81 - rcu_idle_exit(); 82 - 83 - schedule_preempt_disabled(); 84 - } 64 + if (!frv_dma_inprogress) 65 + core_sleep_idle(); 66 + else 67 + local_irq_enable(); 85 68 } 86 69 87 70 void machine_restart(char * __unused)
+4 -31
arch/h8300/kernel/process.c
··· 53 53 * The idle loop on an H8/300.. 54 54 */ 55 55 #if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) 56 - static void default_idle(void) 56 + void arch_cpu_idle(void) 57 57 { 58 - local_irq_disable(); 59 - if (!need_resched()) { 60 - local_irq_enable(); 61 - /* XXX: race here! What if need_resched() gets set now? */ 62 - __asm__("sleep"); 63 - } else 64 - local_irq_enable(); 65 - } 66 - #else 67 - static void default_idle(void) 68 - { 69 - cpu_relax(); 58 + local_irq_enable(); 59 + /* XXX: race here! What if need_resched() gets set now? */ 60 + __asm__("sleep"); 70 61 } 71 62 #endif 72 - void (*idle)(void) = default_idle; 73 - 74 - /* 75 - * The idle thread. There's no useful work to be 76 - * done, so just try to conserve power and have a 77 - * low exit latency (ie sit in a loop waiting for 78 - * somebody to say that they'd like to reschedule) 79 - */ 80 - void cpu_idle(void) 81 - { 82 - while (1) { 83 - rcu_idle_enter(); 84 - while (!need_resched()) 85 - idle(); 86 - rcu_idle_exit(); 87 - schedule_preempt_disabled(); 88 - } 89 - } 90 63 91 64 void machine_restart(char * __unused) 92 65 {
+3 -20
arch/hexagon/kernel/process.c
··· 51 51 * If hardware or VM offer wait termination even though interrupts 52 52 * are disabled. 53 53 */ 54 - static void default_idle(void) 54 + void arch_cpu_idle(void) 55 55 { 56 56 __vmwait(); 57 - } 58 - 59 - void (*idle_sleep)(void) = default_idle; 60 - 61 - void cpu_idle(void) 62 - { 63 - while (1) { 64 - tick_nohz_idle_enter(); 65 - local_irq_disable(); 66 - while (!need_resched()) { 67 - idle_sleep(); 68 - /* interrupts wake us up, but aren't serviced */ 69 - local_irq_enable(); /* service interrupt */ 70 - local_irq_disable(); 71 - } 72 - local_irq_enable(); 73 - tick_nohz_idle_exit(); 74 - schedule(); 75 - } 57 + /* interrupts wake us up, but irqs are still disabled */ 58 + local_irq_enable(); 76 59 } 77 60 78 61 /*
+1 -1
arch/hexagon/kernel/smp.c
··· 184 184 185 185 local_irq_enable(); 186 186 187 - cpu_idle(); 187 + cpu_startup_entry(CPUHP_ONLINE); 188 188 } 189 189 190 190
+1
arch/ia64/include/asm/irqflags.h
··· 89 89 90 90 static inline void arch_safe_halt(void) 91 91 { 92 + arch_local_irq_enable(); 92 93 ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ 93 94 } 94 95
-2
arch/ia64/include/asm/thread_info.h
··· 131 131 #define TS_POLLING 1 /* true if in idle loop and not sleeping */ 132 132 #define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */ 133 133 134 - #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 135 - 136 134 #ifndef __ASSEMBLY__ 137 135 #define HAVE_SET_RESTORE_SIGMASK 1 138 136 static inline void set_restore_sigmask(void)
+5 -9
arch/ia64/kernel/perfmon.c
··· 42 42 #include <linux/completion.h> 43 43 #include <linux/tracehook.h> 44 44 #include <linux/slab.h> 45 + #include <linux/cpu.h> 45 46 46 47 #include <asm/errno.h> 47 48 #include <asm/intrinsics.h> ··· 1323 1322 } 1324 1323 EXPORT_SYMBOL(pfm_unregister_buffer_fmt); 1325 1324 1326 - extern void update_pal_halt_status(int); 1327 - 1328 1325 static int 1329 1326 pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) 1330 1327 { ··· 1370 1371 cpu)); 1371 1372 1372 1373 /* 1373 - * disable default_idle() to go to PAL_HALT 1374 + * Force idle() into poll mode 1374 1375 */ 1375 - update_pal_halt_status(0); 1376 + cpu_idle_poll_ctrl(true); 1376 1377 1377 1378 UNLOCK_PFS(flags); 1378 1379 ··· 1429 1430 is_syswide, 1430 1431 cpu)); 1431 1432 1432 - /* 1433 - * if possible, enable default_idle() to go into PAL_HALT 1434 - */ 1435 - if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) 1436 - update_pal_halt_status(1); 1433 + /* Undo forced polling. Last session reenables pal_halt */ 1434 + cpu_idle_poll_ctrl(false); 1437 1435 1438 1436 UNLOCK_PFS(flags); 1439 1437
+16 -62
arch/ia64/kernel/process.c
··· 209 209 local_irq_disable(); /* force interrupt disable */ 210 210 } 211 211 212 - static int pal_halt = 1; 213 - static int can_do_pal_halt = 1; 214 - 215 212 static int __init nohalt_setup(char * str) 216 213 { 217 - pal_halt = can_do_pal_halt = 0; 214 + cpu_idle_poll_ctrl(true); 218 215 return 1; 219 216 } 220 217 __setup("nohalt", nohalt_setup); 221 - 222 - void 223 - update_pal_halt_status(int status) 224 - { 225 - can_do_pal_halt = pal_halt && status; 226 - } 227 - 228 - /* 229 - * We use this if we don't have any better idle routine.. 230 - */ 231 - void 232 - default_idle (void) 233 - { 234 - local_irq_enable(); 235 - while (!need_resched()) { 236 - if (can_do_pal_halt) { 237 - local_irq_disable(); 238 - if (!need_resched()) { 239 - safe_halt(); 240 - } 241 - local_irq_enable(); 242 - } else 243 - cpu_relax(); 244 - } 245 - } 246 218 247 219 #ifdef CONFIG_HOTPLUG_CPU 248 220 /* We don't actually take CPU down, just spin without interrupts. */ ··· 242 270 } 243 271 #endif /* CONFIG_HOTPLUG_CPU */ 244 272 245 - void __attribute__((noreturn)) 246 - cpu_idle (void) 273 + void arch_cpu_idle_dead(void) 274 + { 275 + play_dead(); 276 + } 277 + 278 + void arch_cpu_idle(void) 247 279 { 248 280 void (*mark_idle)(int) = ia64_mark_idle; 249 - int cpu = smp_processor_id(); 250 281 251 - /* endless idle loop with no priority at all */ 252 - while (1) { 253 - rcu_idle_enter(); 254 - if (can_do_pal_halt) { 255 - current_thread_info()->status &= ~TS_POLLING; 256 - /* 257 - * TS_POLLING-cleared state must be visible before we 258 - * test NEED_RESCHED: 259 - */ 260 - smp_mb(); 261 - } else { 262 - current_thread_info()->status |= TS_POLLING; 263 - } 264 - 265 - if (!need_resched()) { 266 282 #ifdef CONFIG_SMP 267 - min_xtp(); 283 + min_xtp(); 268 284 #endif 269 - rmb(); 270 - if (mark_idle) 271 - (*mark_idle)(1); 285 + rmb(); 286 + if (mark_idle) 287 + (*mark_idle)(1); 272 288 273 - default_idle(); 274 - if (mark_idle) 275 - (*mark_idle)(0); 289 + safe_halt(); 290 + 291 + if (mark_idle) 292 + (*mark_idle)(0); 276 293 #ifdef CONFIG_SMP 277 - normal_xtp(); 294 + normal_xtp(); 278 295 #endif 279 - } 280 - rcu_idle_exit(); 281 - schedule_preempt_disabled(); 282 - check_pgt_cache(); 283 - if (cpu_is_offline(cpu)) 284 - play_dead(); 285 - } 286 296 } 287 297 288 298 void
+1 -1
arch/ia64/kernel/smpboot.c
··· 455 455 preempt_disable(); 456 456 smp_callin(); 457 457 458 - cpu_idle(); 458 + cpu_startup_entry(CPUHP_ONLINE); 459 459 return 0; 460 460 } 461 461
-18
arch/m32r/kernel/process.c
··· 47 47 void (*pm_power_off)(void) = NULL; 48 48 EXPORT_SYMBOL(pm_power_off); 49 49 50 - /* 51 - * The idle thread. There's no useful work to be 52 - * done, so just try to conserve power and have a 53 - * low exit latency (ie sit in a loop waiting for 54 - * somebody to say that they'd like to reschedule) 55 - */ 56 - void cpu_idle (void) 57 - { 58 - /* endless idle loop with no priority at all */ 59 - while (1) { 60 - rcu_idle_enter(); 61 - while (!need_resched()) 62 - cpu_relax(); 63 - rcu_idle_exit(); 64 - schedule_preempt_disabled(); 65 - } 66 - } 67 - 68 50 void machine_restart(char *__unused) 69 51 { 70 52 #if defined(CONFIG_PLAT_MAPPI3)
+1 -1
arch/m32r/kernel/smpboot.c
··· 432 432 */ 433 433 local_flush_tlb_all(); 434 434 435 - cpu_idle(); 435 + cpu_startup_entry(CPUHP_ONLINE); 436 436 return 0; 437 437 } 438 438
+4 -28
arch/m68k/kernel/process.c
··· 51 51 return sw->retpc; 52 52 } 53 53 54 - /* 55 - * The idle loop on an m68k.. 56 - */ 57 - static void default_idle(void) 54 + void arch_cpu_idle(void) 58 55 { 59 - if (!need_resched()) 60 56 #if defined(MACH_ATARI_ONLY) 61 - /* block out HSYNC on the atari (falcon) */ 62 - __asm__("stop #0x2200" : : : "cc"); 57 + /* block out HSYNC on the atari (falcon) */ 58 + __asm__("stop #0x2200" : : : "cc"); 63 59 #else 64 - __asm__("stop #0x2000" : : : "cc"); 60 + __asm__("stop #0x2000" : : : "cc"); 65 61 #endif 66 - } 67 - 68 - void (*idle)(void) = default_idle; 69 - 70 - /* 71 - * The idle thread. There's no useful work to be 72 - * done, so just try to conserve power and have a 73 - * low exit latency (ie sit in a loop waiting for 74 - * somebody to say that they'd like to reschedule) 75 - */ 76 - void cpu_idle(void) 77 - { 78 - /* endless idle loop with no priority at all */ 79 - while (1) { 80 - rcu_idle_enter(); 81 - while (!need_resched()) 82 - idle(); 83 - rcu_idle_exit(); 84 - schedule_preempt_disabled(); 85 - } 86 62 } 87 63 88 64 void machine_restart(char * __unused)
-2
arch/metag/include/asm/thread_info.h
··· 150 150 #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ 151 151 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) 152 152 153 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 154 - 155 153 #endif /* _ASM_THREAD_INFO_H */
+6 -29
arch/metag/kernel/process.c
··· 22 22 #include <linux/pm.h> 23 23 #include <linux/syscalls.h> 24 24 #include <linux/uaccess.h> 25 + #include <linux/smp.h> 25 26 #include <asm/core_reg.h> 26 27 #include <asm/user_gateway.h> 27 28 #include <asm/tcm.h> ··· 32 31 /* 33 32 * Wait for the next interrupt and enable local interrupts 34 33 */ 35 - static inline void arch_idle(void) 34 + void arch_cpu_idle(void) 36 35 { 37 36 int tmp; 38 37 ··· 60 59 : "r" (get_trigger_mask())); 61 60 } 62 61 63 - void cpu_idle(void) 64 - { 65 - set_thread_flag(TIF_POLLING_NRFLAG); 66 - 67 - while (1) { 68 - tick_nohz_idle_enter(); 69 - rcu_idle_enter(); 70 - 71 - while (!need_resched()) { 72 - /* 73 - * We need to disable interrupts here to ensure we don't 74 - * miss a wakeup call. 75 - */ 76 - local_irq_disable(); 77 - if (!need_resched()) { 78 62 #ifdef CONFIG_HOTPLUG_CPU 79 - if (cpu_is_offline(smp_processor_id())) 80 - cpu_die(); 81 - #endif 82 - arch_idle(); 83 - } else { 84 - local_irq_enable(); 85 - } 86 - } 87 - 88 - rcu_idle_exit(); 89 - tick_nohz_idle_exit(); 90 - schedule_preempt_disabled(); 91 - } 63 + void arch_cpu_idle_dead(void) 64 + { 65 + cpu_die(); 92 66 } 67 + #endif 93 68 94 69 void (*pm_power_off)(void); 95 70 EXPORT_SYMBOL(pm_power_off);
+1 -1
arch/metag/kernel/smp.c
··· 297 297 /* 298 298 * OK, it's off to the idle thread for us 299 299 */ 300 - cpu_idle(); 300 + cpu_startup_entry(CPUHP_ONLINE); 301 301 } 302 302 303 303 void __init smp_cpus_done(unsigned int max_cpus)
+1
arch/microblaze/Kconfig
··· 26 26 select GENERIC_CPU_DEVICES 27 27 select GENERIC_ATOMIC64 28 28 select GENERIC_CLOCKEVENTS 29 + select GENERIC_IDLE_POLL_SETUP 29 30 select MODULES_USE_ELF_RELA 30 31 select CLONE_BACKWARDS 31 32
-5
arch/microblaze/include/asm/processor.h
··· 22 22 extern const struct seq_operations cpuinfo_op; 23 23 24 24 # define cpu_relax() barrier() 25 - # define cpu_sleep() do {} while (0) 26 25 27 26 #define task_pt_regs(tsk) \ 28 27 (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) ··· 158 159 159 160 # define STACK_TOP TASK_SIZE 160 161 # define STACK_TOP_MAX STACK_TOP 161 - 162 - void disable_hlt(void); 163 - void enable_hlt(void); 164 - void default_idle(void); 165 162 166 163 #ifdef CONFIG_DEBUG_FS 167 164 extern struct dentry *of_debugfs_root;
-1
arch/microblaze/include/asm/thread_info.h
··· 182 182 ti->status &= ~TS_RESTORE_SIGMASK; 183 183 return true; 184 184 } 185 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 186 185 #endif 187 186 188 187 #endif /* __KERNEL__ */
-65
arch/microblaze/kernel/process.c
··· 44 44 void (*pm_power_off)(void) = NULL; 45 45 EXPORT_SYMBOL(pm_power_off); 46 46 47 - static int hlt_counter = 1; 48 - 49 - void disable_hlt(void) 50 - { 51 - hlt_counter++; 52 - } 53 - EXPORT_SYMBOL(disable_hlt); 54 - 55 - void enable_hlt(void) 56 - { 57 - hlt_counter--; 58 - } 59 - EXPORT_SYMBOL(enable_hlt); 60 - 61 - static int __init nohlt_setup(char *__unused) 62 - { 63 - hlt_counter = 1; 64 - return 1; 65 - } 66 - __setup("nohlt", nohlt_setup); 67 - 68 - static int __init hlt_setup(char *__unused) 69 - { 70 - hlt_counter = 0; 71 - return 1; 72 - } 73 - __setup("hlt", hlt_setup); 74 - 75 - void default_idle(void) 76 - { 77 - if (likely(hlt_counter)) { 78 - local_irq_disable(); 79 - stop_critical_timings(); 80 - cpu_relax(); 81 - start_critical_timings(); 82 - local_irq_enable(); 83 - } else { 84 - clear_thread_flag(TIF_POLLING_NRFLAG); 85 - smp_mb__after_clear_bit(); 86 - local_irq_disable(); 87 - while (!need_resched()) 88 - cpu_sleep(); 89 - local_irq_enable(); 90 - set_thread_flag(TIF_POLLING_NRFLAG); 91 - } 92 - } 93 - 94 - void cpu_idle(void) 95 - { 96 - set_thread_flag(TIF_POLLING_NRFLAG); 97 - 98 - /* endless idle loop with no priority at all */ 99 - while (1) { 100 - tick_nohz_idle_enter(); 101 - rcu_idle_enter(); 102 - while (!need_resched()) 103 - default_idle(); 104 - rcu_idle_exit(); 105 - tick_nohz_idle_exit(); 106 - 107 - schedule_preempt_disabled(); 108 - check_pgt_cache(); 109 - } 110 - } 111 - 112 47 void flush_thread(void) 113 48 { 114 49 }
+18 -36
arch/mips/kernel/process.c
··· 41 41 #include <asm/inst.h> 42 42 #include <asm/stacktrace.h> 43 43 44 - /* 45 - * The idle thread. There's no useful work to be done, so just try to conserve 46 - * power and have a low exit latency (ie sit in a loop waiting for somebody to 47 - * say that they'd like to reschedule) 48 - */ 49 - void __noreturn cpu_idle(void) 50 - { 51 - int cpu; 52 - 53 - /* CPU is going idle. */ 54 - cpu = smp_processor_id(); 55 - 56 - /* endless idle loop with no priority at all */ 57 - while (1) { 58 - tick_nohz_idle_enter(); 59 - rcu_idle_enter(); 60 - while (!need_resched() && cpu_online(cpu)) { 61 - #ifdef CONFIG_MIPS_MT_SMTC 62 - extern void smtc_idle_loop_hook(void); 63 - 64 - smtc_idle_loop_hook(); 65 - #endif 66 - 67 - if (cpu_wait) { 68 - /* Don't trace irqs off for idle */ 69 - stop_critical_timings(); 70 - (*cpu_wait)(); 71 - start_critical_timings(); 72 - } 73 - } 74 44 #ifdef CONFIG_HOTPLUG_CPU 75 - if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map)) 76 - play_dead(); 45 + void arch_cpu_idle_dead(void) 46 + { 47 + /* What the heck is this check doing ? */ 48 + if (!cpu_isset(smp_processor_id(), cpu_callin_map)) 49 + play_dead(); 50 + } 77 51 #endif 78 - rcu_idle_exit(); 79 - tick_nohz_idle_exit(); 80 - schedule_preempt_disabled(); 81 - } 52 + 53 + void arch_cpu_idle(void) 54 + { 55 + #ifdef CONFIG_MIPS_MT_SMTC 56 + extern void smtc_idle_loop_hook(void); 57 + 58 + smtc_idle_loop_hook(); 59 + #endif 60 + if (cpu_wait) 61 + (*cpu_wait)(); 62 + else 63 + local_irq_enable(); 82 64 } 83 65 84 66 asmlinkage void ret_from_fork(void);
+1 -1
arch/mips/kernel/smp.c
··· 139 139 WARN_ON_ONCE(!irqs_disabled()); 140 140 mp_ops->smp_finish(); 141 141 142 - cpu_idle(); 142 + cpu_startup_entry(CPUHP_ONLINE); 143 143 } 144 144 145 145 /*
-2
arch/mn10300/include/asm/thread_info.h
··· 165 165 #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 166 166 #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 167 167 168 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 169 - 170 168 #endif /* __KERNEL__ */ 171 169 172 170 #endif /* _ASM_THREAD_INFO_H */
+6 -64
arch/mn10300/kernel/process.c
··· 50 50 void (*pm_power_off)(void); 51 51 EXPORT_SYMBOL(pm_power_off); 52 52 53 - #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) 54 - /* 55 - * we use this if we don't have any better idle routine 56 - */ 57 - static void default_idle(void) 58 - { 59 - local_irq_disable(); 60 - if (!need_resched()) 61 - safe_halt(); 62 - else 63 - local_irq_enable(); 64 - } 65 - 66 - #else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ 67 53 /* 68 54 * On SMP it's slightly faster (but much more power-consuming!) 69 55 * to poll the ->work.need_resched flag instead of waiting for the 70 56 * cross-CPU IPI to arrive. Use this option with caution. 57 + * 58 + * tglx: No idea why this depends on HOTPLUG_CPU !?! 71 59 */ 72 - static inline void poll_idle(void) 60 + #if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) 61 + void arch_cpu_idle(void) 73 62 { 74 - int oldval; 75 - 76 - local_irq_enable(); 77 - 78 - /* 79 - * Deal with another CPU just having chosen a thread to 80 - * run here: 81 - */ 82 - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); 83 - 84 - if (!oldval) { 85 - set_thread_flag(TIF_POLLING_NRFLAG); 86 - while (!need_resched()) 87 - cpu_relax(); 88 - clear_thread_flag(TIF_POLLING_NRFLAG); 89 - } else { 90 - set_need_resched(); 91 - } 63 + safe_halt(); 92 64 } 93 - #endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ 94 - 95 - /* 96 - * the idle thread 97 - * - there's no useful work to be done, so just try to conserve power and have 98 - * a low exit latency (ie sit in a loop waiting for somebody to say that 99 - * they'd like to reschedule) 100 - */ 101 - void cpu_idle(void) 102 - { 103 - /* endless idle loop with no priority at all */ 104 - for (;;) { 105 - rcu_idle_enter(); 106 - while (!need_resched()) { 107 - void (*idle)(void); 108 - 109 - smp_rmb(); 110 - if (!idle) { 111 - #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) 112 - idle = poll_idle; 113 - #else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ 114 - idle = default_idle; 115 - #endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ 116 - } 117 - idle(); 118 - } 119 - rcu_idle_exit(); 120 - 121 - schedule_preempt_disabled(); 122 - } 123 - } 65 + #endif 124 66 125 67 void release_segments(struct mm_struct *mm) 126 68 {
+1 -6
arch/mn10300/kernel/smp.c
··· 675 675 #ifdef CONFIG_GENERIC_CLOCKEVENTS 676 676 init_clockevents(); 677 677 #endif 678 - cpu_idle(); 678 + cpu_startup_entry(CPUHP_ONLINE); 679 679 return 0; 680 680 } 681 681 ··· 935 935 int timeout; 936 936 937 937 #ifdef CONFIG_HOTPLUG_CPU 938 - if (num_online_cpus() == 1) 939 - disable_hlt(); 940 938 if (sleep_mode[cpu]) 941 939 run_wakeup_cpu(cpu); 942 940 #endif /* CONFIG_HOTPLUG_CPU */ ··· 1001 1003 void __cpu_die(unsigned int cpu) 1002 1004 { 1003 1005 run_sleep_cpu(cpu); 1004 - 1005 - if (num_online_cpus() == 1) 1006 - enable_hlt(); 1007 1006 } 1008 1007 1009 1008 #ifdef CONFIG_MN10300_CACHE_ENABLED
-2
arch/openrisc/include/asm/thread_info.h
··· 128 128 /* For OpenRISC, this is anything in the LSW other than syscall trace */ 129 129 #define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP)) 130 130 131 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 132 - 133 131 #endif /* __KERNEL__ */ 134 132 135 133 #endif /* _ASM_THREAD_INFO_H */
+1 -1
arch/openrisc/kernel/Makefile
··· 4 4 5 5 extra-y := head.o vmlinux.lds 6 6 7 - obj-y := setup.o idle.o or32_ksyms.o process.o dma.o \ 7 + obj-y := setup.o or32_ksyms.o process.o dma.o \ 8 8 traps.o time.o irq.o entry.o ptrace.o signal.o \ 9 9 sys_call_table.o 10 10
-73
arch/openrisc/kernel/idle.c
··· 1 - /* 2 - * OpenRISC idle.c 3 - * 4 - * Linux architectural port borrowing liberally from similar works of 5 - * others. All original copyrights apply as per the original source 6 - * declaration. 7 - * 8 - * Modifications for the OpenRISC architecture: 9 - * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 10 - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 11 - * 12 - * This program is free software; you can redistribute it and/or 13 - * modify it under the terms of the GNU General Public License 14 - * as published by the Free Software Foundation; either version 15 - * 2 of the License, or (at your option) any later version. 16 - * 17 - * Idle daemon for or32. Idle daemon will handle any action 18 - * that needs to be taken when the system becomes idle. 19 - */ 20 - 21 - #include <linux/errno.h> 22 - #include <linux/sched.h> 23 - #include <linux/kernel.h> 24 - #include <linux/mm.h> 25 - #include <linux/smp.h> 26 - #include <linux/stddef.h> 27 - #include <linux/unistd.h> 28 - #include <linux/ptrace.h> 29 - #include <linux/slab.h> 30 - #include <linux/tick.h> 31 - 32 - #include <asm/pgtable.h> 33 - #include <asm/uaccess.h> 34 - #include <asm/io.h> 35 - #include <asm/processor.h> 36 - #include <asm/mmu.h> 37 - #include <asm/cache.h> 38 - #include <asm/pgalloc.h> 39 - 40 - void (*powersave) (void) = NULL; 41 - 42 - void cpu_idle(void) 43 - { 44 - set_thread_flag(TIF_POLLING_NRFLAG); 45 - 46 - /* endless idle loop with no priority at all */ 47 - while (1) { 48 - tick_nohz_idle_enter(); 49 - rcu_idle_enter(); 50 - 51 - while (!need_resched()) { 52 - check_pgt_cache(); 53 - rmb(); 54 - 55 - clear_thread_flag(TIF_POLLING_NRFLAG); 56 - 57 - local_irq_disable(); 58 - /* Don't trace irqs off for idle */ 59 - stop_critical_timings(); 60 - if (!need_resched() && powersave != NULL) 61 - powersave(); 62 - start_critical_timings(); 63 - local_irq_enable(); 64 - set_thread_flag(TIF_POLLING_NRFLAG); 65 - } 66 - 67 - rcu_idle_exit(); 68 - tick_nohz_idle_exit(); 69 - preempt_enable_no_resched(); 70 - schedule(); 71 - preempt_disable(); 72 - } 73 - }
-2
arch/parisc/include/asm/thread_info.h
··· 77 77 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ 78 78 _TIF_BLOCKSTEP) 79 79 80 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 81 - 82 80 #endif /* __KERNEL__ */ 83 81 84 82 #endif /* _ASM_PARISC_THREAD_INFO_H */
-22
arch/parisc/kernel/process.c
··· 59 59 #include <asm/unwind.h> 60 60 #include <asm/sections.h> 61 61 62 - /* 63 - * The idle thread. There's no useful work to be 64 - * done, so just try to conserve power and have a 65 - * low exit latency (ie sit in a loop waiting for 66 - * somebody to say that they'd like to reschedule) 67 - */ 68 - void cpu_idle(void) 69 - { 70 - set_thread_flag(TIF_POLLING_NRFLAG); 71 - 72 - /* endless idle loop with no priority at all */ 73 - while (1) { 74 - rcu_idle_enter(); 75 - while (!need_resched()) 76 - barrier(); 77 - rcu_idle_exit(); 78 - schedule_preempt_disabled(); 79 - check_pgt_cache(); 80 - } 81 - } 82 - 83 - 84 62 #define COMMAND_GLOBAL F_EXTEND(0xfffe0030) 85 63 #define CMD_RESET 5 /* reset any module */ 86 64
+1 -1
arch/parisc/kernel/smp.c
··· 329 329 330 330 local_irq_enable(); /* Interrupts have been off until now */ 331 331 332 - cpu_idle(); /* Wait for timer to schedule some work */ 332 + cpu_startup_entry(CPUHP_ONLINE); 333 333 334 334 /* NOTREACHED */ 335 335 panic("smp_callin() AAAAaaaaahhhh....\n");
-2
arch/powerpc/include/asm/thread_info.h
··· 182 182 #define is_32bit_task() (1) 183 183 #endif 184 184 185 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 186 - 187 185 #endif /* !__ASSEMBLY__ */ 188 186 189 187 #endif /* __KERNEL__ */
+28 -59
arch/powerpc/kernel/idle.c
··· 33 33 #include <asm/runlatch.h> 34 34 #include <asm/smp.h> 35 35 36 - #ifdef CONFIG_HOTPLUG_CPU 37 - #define cpu_should_die() cpu_is_offline(smp_processor_id()) 38 - #else 39 - #define cpu_should_die() 0 40 - #endif 41 36 42 37 unsigned long cpuidle_disable = IDLE_NO_OVERRIDE; 43 38 EXPORT_SYMBOL(cpuidle_disable); ··· 45 50 } 46 51 __setup("powersave=off", powersave_off); 47 52 48 - /* 49 - * The body of the idle task. 50 - */ 51 - void cpu_idle(void) 53 + #ifdef CONFIG_HOTPLUG_CPU 54 + void arch_cpu_idle_dead(void) 52 55 { 53 - set_thread_flag(TIF_POLLING_NRFLAG); 54 - while (1) { 55 - tick_nohz_idle_enter(); 56 - rcu_idle_enter(); 56 + sched_preempt_enable_no_resched(); 57 + cpu_die(); 58 + } 59 + #endif 57 60 58 - while (!need_resched() && !cpu_should_die()) { 59 - ppc64_runlatch_off(); 61 + void arch_cpu_idle(void) 62 + { 63 + ppc64_runlatch_off(); 60 64 61 - if (ppc_md.power_save) { 62 - clear_thread_flag(TIF_POLLING_NRFLAG); 63 - /* 64 - * smp_mb is so clearing of TIF_POLLING_NRFLAG 65 - * is ordered w.r.t. need_resched() test. 66 - */ 67 - smp_mb(); 68 - local_irq_disable(); 69 - 70 - /* Don't trace irqs off for idle */ 71 - stop_critical_timings(); 72 - 73 - /* check again after disabling irqs */ 74 - if (!need_resched() && !cpu_should_die()) 75 - ppc_md.power_save(); 76 - 77 - start_critical_timings(); 78 - 79 - /* Some power_save functions return with 80 - * interrupts enabled, some don't. 81 - */ 82 - if (irqs_disabled()) 83 - local_irq_enable(); 84 - set_thread_flag(TIF_POLLING_NRFLAG); 85 - 86 - } else { 87 - /* 88 - * Go into low thread priority and possibly 89 - * low power mode. 90 - */ 91 - HMT_low(); 92 - HMT_very_low(); 93 - } 94 - } 95 - 96 - HMT_medium(); 97 - ppc64_runlatch_on(); 98 - rcu_idle_exit(); 99 - tick_nohz_idle_exit(); 100 - if (cpu_should_die()) { 101 - sched_preempt_enable_no_resched(); 102 - cpu_die(); 103 - } 104 - schedule_preempt_disabled(); 65 + if (ppc_md.power_save) { 66 + ppc_md.power_save(); 67 + /* 68 + * Some power_save functions return with 69 + * interrupts enabled, some don't. 70 + */ 71 + if (irqs_disabled()) 72 + local_irq_enable(); 73 + } else { 74 + local_irq_enable(); 75 + /* 76 + * Go into low thread priority and possibly 77 + * low power mode. 78 + */ 79 + HMT_low(); 80 + HMT_very_low(); 105 81 } 82 + 83 + HMT_medium(); 84 + ppc64_runlatch_on(); 106 85 } 107 86 108 87 int powersave_nap;
+1 -1
arch/powerpc/kernel/smp.c
··· 669 669 670 670 local_irq_enable(); 671 671 672 - cpu_idle(); 672 + cpu_startup_entry(CPUHP_ONLINE); 673 673 674 674 BUG(); 675 675 }
+9 -23
arch/s390/kernel/process.c
··· 61 61 return sf->gprs[8]; 62 62 } 63 63 64 - /* 65 - * The idle loop on a S390... 66 - */ 67 - static void default_idle(void) 64 + void arch_cpu_idle(void) 68 65 { 69 - if (cpu_is_offline(smp_processor_id())) 70 - cpu_die(); 71 - local_irq_disable(); 72 - if (need_resched()) { 73 - local_irq_enable(); 74 - return; 75 - } 76 66 local_mcck_disable(); 77 67 if (test_thread_flag(TIF_MCCK_PENDING)) { 78 68 local_mcck_enable(); ··· 73 83 vtime_stop_cpu(); 74 84 } 75 85 76 - void cpu_idle(void) 86 + void arch_cpu_idle_exit(void) 77 87 { 78 - for (;;) { 79 - tick_nohz_idle_enter(); 80 - rcu_idle_enter(); 81 - while (!need_resched() && !test_thread_flag(TIF_MCCK_PENDING)) 82 - default_idle(); 83 - rcu_idle_exit(); 84 - tick_nohz_idle_exit(); 85 - if (test_thread_flag(TIF_MCCK_PENDING)) 86 - s390_handle_mcck(); 87 - schedule_preempt_disabled(); 88 - } 88 + if (test_thread_flag(TIF_MCCK_PENDING)) 89 + s390_handle_mcck(); 90 + } 91 + 92 + void arch_cpu_idle_dead(void) 93 + { 94 + cpu_die(); 89 95 } 90 96 91 97 extern void __kprobes kernel_thread_starter(void);
+1 -2
arch/s390/kernel/smp.c
··· 714 714 set_cpu_online(smp_processor_id(), true); 715 715 inc_irq_stat(CPU_RST); 716 716 local_irq_enable(); 717 - /* cpu_idle will call schedule for us */ 718 - cpu_idle(); 717 + cpu_startup_entry(CPUHP_ONLINE); 719 718 } 720 719 721 720 /* Upping and downing of CPUs */
-5
arch/s390/kernel/vtime.c
··· 158 158 unsigned long psw_mask; 159 159 160 160 trace_hardirqs_on(); 161 - /* Don't trace preempt off for idle. */ 162 - stop_critical_timings(); 163 161 164 162 /* Wait for external, I/O or machine check interrupt. */ 165 163 psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | ··· 166 168 167 169 /* Call the assembler magic in entry.S */ 168 170 psw_idle(idle, psw_mask); 169 - 170 - /* Reenable preemption tracer. */ 171 - start_critical_timings(); 172 171 173 172 /* Account time spent with enabled wait psw loaded as idle time. */ 174 173 idle->sequence++;
-18
arch/score/kernel/process.c
··· 41 41 /* If or when software machine-power-off is implemented, add code here. */ 42 42 void machine_power_off(void) {} 43 43 44 - /* 45 - * The idle thread. There's no useful work to be 46 - * done, so just try to conserve power and have a 47 - * low exit latency (ie sit in a loop waiting for 48 - * somebody to say that they'd like to reschedule) 49 - */ 50 - void __noreturn cpu_idle(void) 51 - { 52 - /* endless idle loop with no priority at all */ 53 - while (1) { 54 - rcu_idle_enter(); 55 - while (!need_resched()) 56 - barrier(); 57 - rcu_idle_exit(); 58 - schedule_preempt_disabled(); 59 - } 60 - } 61 - 62 44 void ret_from_fork(void); 63 45 void ret_from_kernel_thread(void); 64 46
+1 -3
arch/sh/Kconfig
··· 33 33 select GENERIC_ATOMIC64 34 34 select GENERIC_IRQ_SHOW 35 35 select GENERIC_SMP_IDLE_THREAD 36 + select GENERIC_IDLE_POLL_SETUP 36 37 select GENERIC_CLOCKEVENTS 37 38 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 38 39 select GENERIC_STRNCPY_FROM_USER ··· 148 147 149 148 config ARCH_HAS_ILOG2_U64 150 149 def_bool n 151 - 152 - config ARCH_HAS_DEFAULT_IDLE 153 - def_bool y 154 150 155 151 config NO_IOPORT 156 152 def_bool !PCI
-2
arch/sh/include/asm/thread_info.h
··· 207 207 return true; 208 208 } 209 209 210 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 211 - 212 210 #endif /* !__ASSEMBLY__ */ 213 211 214 212 #endif /* __KERNEL__ */
+13 -92
arch/sh/kernel/idle.c
··· 24 24 25 25 static void (*sh_idle)(void); 26 26 27 - static int hlt_counter; 28 - 29 - static int __init nohlt_setup(char *__unused) 30 - { 31 - hlt_counter = 1; 32 - return 1; 33 - } 34 - __setup("nohlt", nohlt_setup); 35 - 36 - static int __init hlt_setup(char *__unused) 37 - { 38 - hlt_counter = 0; 39 - return 1; 40 - } 41 - __setup("hlt", hlt_setup); 42 - 43 - static inline int hlt_works(void) 44 - { 45 - return !hlt_counter; 46 - } 47 - 48 - /* 49 - * On SMP it's slightly faster (but much more power-consuming!) 50 - * to poll the ->work.need_resched flag instead of waiting for the 51 - * cross-CPU IPI to arrive. Use this option with caution. 52 - */ 53 - static void poll_idle(void) 54 - { 55 - local_irq_enable(); 56 - while (!need_resched()) 57 - cpu_relax(); 58 - } 59 - 60 27 void default_idle(void) 61 28 { 62 - if (hlt_works()) { 63 - clear_thread_flag(TIF_POLLING_NRFLAG); 64 - smp_mb__after_clear_bit(); 65 - 66 - set_bl_bit(); 67 - if (!need_resched()) { 68 - local_irq_enable(); 69 - cpu_sleep(); 70 - } else 71 - local_irq_enable(); 72 - 73 - set_thread_flag(TIF_POLLING_NRFLAG); 74 - clear_bl_bit(); 75 - } else 76 - poll_idle(); 29 + set_bl_bit(); 30 + local_irq_enable(); 31 + /* Isn't this racy ? */ 32 + cpu_sleep(); 33 + clear_bl_bit(); 77 34 } 78 35 79 - /* 80 - * The idle thread. There's no useful work to be done, so just try to conserve 81 - * power and have a low exit latency (ie sit in a loop waiting for somebody to 82 - * say that they'd like to reschedule) 83 - */ 84 - void cpu_idle(void) 36 + void arch_cpu_idle_dead(void) 85 37 { 86 - unsigned int cpu = smp_processor_id(); 38 + play_dead(); 39 + } 87 40 88 - set_thread_flag(TIF_POLLING_NRFLAG); 89 - 90 - /* endless idle loop with no priority at all */ 91 - while (1) { 92 - tick_nohz_idle_enter(); 93 - rcu_idle_enter(); 94 - 95 - while (!need_resched()) { 96 - check_pgt_cache(); 97 - rmb(); 98 - 99 - if (cpu_is_offline(cpu)) 100 - play_dead(); 101 - 102 - local_irq_disable(); 103 - /* Don't trace irqs off for idle */ 104 - stop_critical_timings(); 105 - if (cpuidle_idle_call()) 106 - sh_idle(); 107 - /* 108 - * Sanity check to ensure that sh_idle() returns 109 - * with IRQs enabled 110 - */ 111 - WARN_ON(irqs_disabled()); 112 - start_critical_timings(); 113 - } 114 - 115 - rcu_idle_exit(); 116 - tick_nohz_idle_exit(); 117 - schedule_preempt_disabled(); 118 - } 41 + void arch_cpu_idle(void) 42 + { 43 + if (cpuidle_idle_call()) 44 + sh_idle(); 119 45 } 120 46 121 47 void __init select_idle_routine(void) ··· 49 123 /* 50 124 * If a platform has set its own idle routine, leave it alone. 51 125 */ 52 - if (sh_idle) 53 - return; 54 - 55 - if (hlt_works()) 126 + if (!sh_idle) 56 127 sh_idle = default_idle; 57 - else 58 - sh_idle = poll_idle; 59 128 } 60 129 61 130 void stop_this_cpu(void *unused)
+1 -1
arch/sh/kernel/smp.c
··· 203 203 set_cpu_online(cpu, true); 204 204 per_cpu(cpu_state, cpu) = CPU_ONLINE; 205 205 206 - cpu_idle(); 206 + cpu_startup_entry(CPUHP_ONLINE); 207 207 } 208 208 209 209 extern struct {
-2
arch/sparc/include/asm/thread_info_32.h
··· 132 132 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ 133 133 _TIF_SIGPENDING) 134 134 135 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 136 - 137 135 #endif /* __KERNEL__ */ 138 136 139 137 #endif /* _ASM_THREAD_INFO_H */
-2
arch/sparc/include/asm/thread_info_64.h
··· 256 256 return true; 257 257 } 258 258 259 - #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 260 - 261 259 #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) 262 260 #define test_thread_64bit_stack(__SP) \ 263 261 ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
+1 -2
arch/sparc/kernel/hvtramp.S
··· 128 128 129 129 call smp_callin 130 130 nop 131 - call cpu_idle 132 - mov 0, %o0 131 + 133 132 call cpu_panic 134 133 nop 135 134
+5 -16
arch/sparc/kernel/process_32.c
··· 64 64 struct task_struct *last_task_used_math = NULL; 65 65 struct thread_info *current_set[NR_CPUS]; 66 66 67 - /* 68 - * the idle loop on a Sparc... ;) 69 - */ 70 - void cpu_idle(void) 67 + /* Idle loop support. */ 68 + void arch_cpu_idle(void) 71 69 { 72 - set_thread_flag(TIF_POLLING_NRFLAG); 73 - 74 - /* endless idle loop with no priority at all */ 75 - for (;;) { 76 - while (!need_resched()) { 77 - if (sparc_idle) 78 - (*sparc_idle)(); 79 - else 80 - cpu_relax(); 81 - } 82 - schedule_preempt_disabled(); 83 - } 70 + if (sparc_idle) 71 + (*sparc_idle)(); 72 + local_irq_enable(); 84 73 } 85 74 86 75 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
+13 -36
arch/sparc/kernel/process_64.c
··· 52 52 53 53 #include "kstack.h" 54 54 55 - static void sparc64_yield(int cpu) 55 + /* Idle loop support on sparc64. */ 56 + void arch_cpu_idle(void) 56 57 { 57 58 if (tlb_type != hypervisor) { 58 59 touch_nmi_watchdog(); 59 - return; 60 - } 61 - 62 - clear_thread_flag(TIF_POLLING_NRFLAG); 63 - smp_mb__after_clear_bit(); 64 - 65 - while (!need_resched() && !cpu_is_offline(cpu)) { 60 + } else { 66 61 unsigned long pstate; 67 62 68 - /* Disable interrupts. */ 63 + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over 64 + * the cpu sleep hypervisor call. 65 + */ 69 66 __asm__ __volatile__( 70 67 "rdpr %%pstate, %0\n\t" 71 68 "andn %0, %1, %0\n\t" ··· 70 73 : "=&r" (pstate) 71 74 : "i" (PSTATE_IE)); 72 75 73 - if (!need_resched() && !cpu_is_offline(cpu)) 76 + if (!need_resched() && !cpu_is_offline(smp_processor_id())) 74 77 sun4v_cpu_yield(); 75 78 76 79 /* Re-enable interrupts. */ ··· 81 84 : "=&r" (pstate) 82 85 : "i" (PSTATE_IE)); 83 86 } 84 - 85 - set_thread_flag(TIF_POLLING_NRFLAG); 87 + local_irq_enable(); 86 88 } 87 - 88 - /* The idle loop on sparc64. */ 89 - void cpu_idle(void) 90 - { 91 - int cpu = smp_processor_id(); 92 - 93 - set_thread_flag(TIF_POLLING_NRFLAG); 94 - 95 - while(1) { 96 - tick_nohz_idle_enter(); 97 - rcu_idle_enter(); 98 - 99 - while (!need_resched() && !cpu_is_offline(cpu)) 100 - sparc64_yield(cpu); 101 - 102 - rcu_idle_exit(); 103 - tick_nohz_idle_exit(); 104 89 105 90 #ifdef CONFIG_HOTPLUG_CPU 106 - if (cpu_is_offline(cpu)) { 107 - sched_preempt_enable_no_resched(); 108 - cpu_play_dead(); 109 - } 110 - #endif 111 - schedule_preempt_disabled(); 112 - } 91 + void arch_cpu_idle_dead() 92 + { 93 + sched_preempt_enable_no_resched(); 94 + cpu_play_dead(); 113 95 } 96 + #endif 114 97 115 98 #ifdef CONFIG_COMPAT 116 99 static void show_regwindow32(struct pt_regs *regs)
+1 -1
arch/sparc/kernel/smp_32.c
··· 369 369 local_irq_enable(); 370 370 371 371 wmb(); 372 - cpu_idle(); 372 + cpu_startup_entry(CPUHP_ONLINE); 373 373 374 374 /* We should never reach here! */ 375 375 BUG();
+2
arch/sparc/kernel/smp_64.c
··· 127 127 128 128 /* idle thread is expected to have preempt disabled */ 129 129 preempt_disable(); 130 + 131 + cpu_startup_entry(CPUHP_ONLINE); 130 132 } 131 133 132 134 void cpu_panic(void)
+1 -2
arch/sparc/kernel/trampoline_64.S
··· 407 407 408 408 call smp_callin 409 409 nop 410 - call cpu_idle 411 - mov 0, %o0 410 + 412 411 call cpu_panic 413 412 nop 414 413 1: b,a,pt %xcc, 1b
-2
arch/tile/include/asm/thread_info.h
··· 153 153 #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ 154 154 #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ 155 155 156 - #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 157 - 158 156 #ifndef __ASSEMBLY__ 159 157 #define HAVE_SET_RESTORE_SIGMASK 1 160 158 static inline void set_restore_sigmask(void)
+9 -56
arch/tile/kernel/process.c
··· 40 40 #include <arch/abi.h> 41 41 #include <arch/sim_def.h> 42 42 43 - 44 43 /* 45 44 * Use the (x86) "idle=poll" option to prefer low latency when leaving the 46 45 * idle loop over low power while in the idle loop, e.g. if we have 47 46 * one thread per core and we want to get threads out of futex waits fast. 48 47 */ 49 - static int no_idle_nap; 50 48 static int __init idle_setup(char *str) 51 49 { 52 50 if (!str) ··· 52 54 53 55 if (!strcmp(str, "poll")) { 54 56 pr_info("using polling idle threads.\n"); 55 - no_idle_nap = 1; 56 - } else if (!strcmp(str, "halt")) 57 - no_idle_nap = 0; 58 - else 59 - return -1; 60 - 61 - return 0; 57 + cpu_idle_poll_ctrl(true); 58 + return 0; 59 + } else if (!strcmp(str, "halt")) { 60 + return 0; 61 + } 62 + return -1; 62 63 } 63 64 early_param("idle", idle_setup); 64 65 65 - /* 66 - * The idle thread. There's no useful work to be 67 - * done, so just try to conserve power and have a 68 - * low exit latency (ie sit in a loop waiting for 69 - * somebody to say that they'd like to reschedule) 70 - */ 71 - void cpu_idle(void) 66 + void arch_cpu_idle(void) 72 67 { 73 - int cpu = smp_processor_id(); 74 - 75 - 76 - current_thread_info()->status |= TS_POLLING; 77 - 78 - if (no_idle_nap) { 79 - while (1) { 80 - while (!need_resched()) 81 - cpu_relax(); 82 - schedule(); 83 - } 84 - } 85 - 86 - /* endless idle loop with no priority at all */ 87 - while (1) { 88 - tick_nohz_idle_enter(); 89 - rcu_idle_enter(); 90 - while (!need_resched()) { 91 - if (cpu_is_offline(cpu)) 92 - BUG(); /* no HOTPLUG_CPU */ 93 - 94 - local_irq_disable(); 95 - __get_cpu_var(irq_stat).idle_timestamp = jiffies; 96 - current_thread_info()->status &= ~TS_POLLING; 97 - /* 98 - * TS_POLLING-cleared state must be visible before we 99 - * test NEED_RESCHED: 100 - */ 101 - smp_mb(); 102 - 103 - if (!need_resched()) 104 - _cpu_idle(); 105 - else 106 - local_irq_enable(); 107 - current_thread_info()->status |= TS_POLLING; 108 - } 109 - rcu_idle_exit(); 110 - tick_nohz_idle_exit(); 111 - schedule_preempt_disabled(); 112 - } 68 + __get_cpu_var(irq_stat).idle_timestamp = jiffies; 69 + _cpu_idle(); 113 70 } 114 71 115 72 /*
+1 -3
arch/tile/kernel/smpboot.c
··· 207 207 /* Set up tile-timer clock-event device on this cpu */ 208 208 setup_tile_timer(); 209 209 210 - preempt_enable(); 211 - 212 - cpu_idle(); 210 + cpu_startup_entry(CPUHP_ONLINE); 213 211 } 214 212 215 213 int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+4 -23
arch/um/kernel/process.c
··· 210 210 kmalloc_ok = save_kmalloc_ok; 211 211 } 212 212 213 - void default_idle(void) 213 + void arch_cpu_idle(void) 214 214 { 215 215 unsigned long long nsecs; 216 216 217 - while (1) { 218 - /* endless idle loop with no priority at all */ 219 - 220 - /* 221 - * although we are an idle CPU, we do not want to 222 - * get into the scheduler unnecessarily. 223 - */ 224 - if (need_resched()) 225 - schedule(); 226 - 227 - tick_nohz_idle_enter(); 228 - rcu_idle_enter(); 229 - nsecs = disable_timer(); 230 - idle_sleep(nsecs); 231 - rcu_idle_exit(); 232 - tick_nohz_idle_exit(); 233 - } 234 - } 235 - 236 - void cpu_idle(void) 237 - { 238 217 cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); 239 - default_idle(); 218 + nsecs = disable_timer(); 219 + idle_sleep(nsecs); 220 + local_irq_enable(); 240 221 } 241 222 242 223 int __cant_sleep(void) {
+3 -18
arch/unicore32/kernel/process.c
··· 45 45 "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR" 46 46 }; 47 47 48 - void cpu_idle(void) 48 + void arch_cpu_idle(void) 49 49 { 50 - /* endless idle loop with no priority at all */ 51 - while (1) { 52 - tick_nohz_idle_enter(); 53 - rcu_idle_enter(); 54 - while (!need_resched()) { 55 - local_irq_disable(); 56 - stop_critical_timings(); 57 - cpu_do_idle(); 58 - local_irq_enable(); 59 - start_critical_timings(); 60 - } 61 - rcu_idle_exit(); 62 - tick_nohz_idle_exit(); 63 - preempt_enable_no_resched(); 64 - schedule(); 65 - preempt_disable(); 66 - } 50 + cpu_do_idle(); 51 + local_irq_enable(); 67 52 } 68 53 69 54 static char reboot_mode = 'h';
-3
arch/x86/Kconfig
··· 188 188 config ARCH_HAS_CPU_RELAX 189 189 def_bool y 190 190 191 - config ARCH_HAS_DEFAULT_IDLE 192 - def_bool y 193 - 194 191 config ARCH_HAS_CACHE_LINE_SIZE 195 192 def_bool y 196 193
-2
arch/x86/include/asm/thread_info.h
··· 241 241 skip sending interrupt */ 242 242 #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ 243 243 244 - #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) 245 - 246 244 #ifndef __ASSEMBLY__ 247 245 #define HAVE_SET_RESTORE_SIGMASK 1 248 246 static inline void set_restore_sigmask(void)
+28 -79
arch/x86/kernel/process.c
··· 301 301 } 302 302 #endif 303 303 304 - /* 305 - * The idle thread. There's no useful work to be 306 - * done, so just try to conserve power and have a 307 - * low exit latency (ie sit in a loop waiting for 308 - * somebody to say that they'd like to reschedule) 309 - */ 310 - void cpu_idle(void) 304 + void arch_cpu_idle_prepare(void) 311 305 { 312 306 /* 313 307 * If we're the non-boot CPU, nothing set the stack canary up ··· 311 317 * canaries already on the stack wont ever trigger). 312 318 */ 313 319 boot_init_stack_canary(); 314 - current_thread_info()->status |= TS_POLLING; 320 + } 315 321 316 - while (1) { 317 - tick_nohz_idle_enter(); 322 + void arch_cpu_idle_enter(void) 323 + { 324 + local_touch_nmi(); 325 + enter_idle(); 326 + } 318 327 319 - while (!need_resched()) { 320 - rmb(); 328 + void arch_cpu_idle_exit(void) 329 + { 330 + __exit_idle(); 331 + } 321 332 322 - if (cpu_is_offline(smp_processor_id())) 323 - play_dead(); 324 - 325 - /* 326 - * Idle routines should keep interrupts disabled 327 - * from here on, until they go to idle. 328 - * Otherwise, idle callbacks can misfire. 329 - */ 330 - local_touch_nmi(); 331 - local_irq_disable(); 332 - 333 - enter_idle(); 334 - 335 - /* Don't trace irqs off for idle */ 336 - stop_critical_timings(); 337 - 338 - /* enter_idle() needs rcu for notifiers */ 339 - rcu_idle_enter(); 340 - 341 - if (cpuidle_idle_call()) 342 - x86_idle(); 343 - 344 - rcu_idle_exit(); 345 - start_critical_timings(); 346 - 347 - /* In many cases the interrupt that ended idle 348 - has already called exit_idle. But some idle 349 - loops can be woken up without interrupt. */ 350 - __exit_idle(); 351 - } 352 - 353 - tick_nohz_idle_exit(); 354 - preempt_enable_no_resched(); 355 - schedule(); 356 - preempt_disable(); 357 - } 333 + void arch_cpu_idle_dead(void) 334 + { 335 + play_dead(); 358 336 } 359 337 360 338 /* 361 - * We use this if we don't have any better 362 - * idle routine.. 339 + * Called from the generic idle code. 340 + */ 341 + void arch_cpu_idle(void) 342 + { 343 + if (cpuidle_idle_call()) 344 + x86_idle(); 345 + } 346 + 347 + /* 348 + * We use this if we don't have any better idle routine.. 363 349 */ 364 350 void default_idle(void) 365 351 { 366 352 trace_cpu_idle_rcuidle(1, smp_processor_id()); 367 - current_thread_info()->status &= ~TS_POLLING; 368 - /* 369 - * TS_POLLING-cleared state must be visible before we 370 - * test NEED_RESCHED: 371 - */ 372 - smp_mb(); 373 - 374 - if (!need_resched()) 375 - safe_halt(); /* enables interrupts racelessly */ 376 - else 377 - local_irq_enable(); 378 - current_thread_info()->status |= TS_POLLING; 353 + safe_halt(); 379 354 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 380 355 } 381 356 #ifdef CONFIG_APM_MODULE ··· 372 409 373 410 for (;;) 374 411 halt(); 375 - } 376 - 377 - /* 378 - * On SMP it's slightly faster (but much more power-consuming!) 379 - * to poll the ->work.need_resched flag instead of waiting for the 380 - * cross-CPU IPI to arrive. Use this option with caution. 381 - */ 382 - static void poll_idle(void) 383 - { 384 - trace_cpu_idle_rcuidle(0, smp_processor_id()); 385 - local_irq_enable(); 386 - while (!need_resched()) 387 - cpu_relax(); 388 - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 389 412 } 390 413 391 414 bool amd_e400_c1e_detected; ··· 438 489 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 439 490 { 440 491 #ifdef CONFIG_SMP 441 - if (x86_idle == poll_idle && smp_num_siblings > 1) 492 + if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) 442 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 443 494 #endif 444 - if (x86_idle) 495 + if (x86_idle || boot_option_idle_override == IDLE_POLL) 445 496 return; 446 497 447 498 if (cpu_has_amd_erratum(amd_erratum_400)) { ··· 466 517 467 518 if (!strcmp(str, "poll")) { 468 519 pr_info("using polling idle threads\n"); 469 - x86_idle = poll_idle; 470 520 boot_option_idle_override = IDLE_POLL; 521 + cpu_idle_poll_ctrl(true); 471 522 } else if (!strcmp(str, "halt")) { 472 523 /* 473 524 * When the boot option of idle=halt is added, halt is
+1 -1
arch/x86/kernel/smpboot.c
··· 284 284 x86_cpuinit.setup_percpu_clockev(); 285 285 286 286 wmb(); 287 - cpu_idle(); 287 + cpu_startup_entry(CPUHP_ONLINE); 288 288 } 289 289 290 290 void __init smp_store_boot_cpu_info(void)
+1 -1
arch/x86/xen/smp.c
··· 95 95 static void __cpuinit cpu_bringup_and_idle(void) 96 96 { 97 97 cpu_bringup(); 98 - cpu_idle(); 98 + cpu_startup_entry(CPUHP_ONLINE); 99 99 } 100 100 101 101 static int xen_smp_intr_init(unsigned int cpu)
+2 -12
arch/xtensa/kernel/process.c
··· 105 105 /* 106 106 * Powermanagement idle function, if any is provided by the platform. 107 107 */ 108 - 109 - void cpu_idle(void) 108 + void arch_cpu_idle(void) 110 109 { 111 - local_irq_enable(); 112 - 113 - /* endless idle loop with no priority at all */ 114 - while (1) { 115 - rcu_idle_enter(); 116 - while (!need_resched()) 117 - platform_idle(); 118 - rcu_idle_exit(); 119 - schedule_preempt_disabled(); 120 - } 110 + platform_idle(); 121 111 } 122 112 123 113 /*
+16
include/linux/cpu.h
··· 212 212 static inline void enable_nonboot_cpus(void) {} 213 213 #endif /* !CONFIG_PM_SLEEP_SMP */ 214 214 215 + enum cpuhp_state { 216 + CPUHP_OFFLINE, 217 + CPUHP_ONLINE, 218 + }; 219 + 220 + void cpu_startup_entry(enum cpuhp_state state); 221 + void cpu_idle(void); 222 + 223 + void cpu_idle_poll_ctrl(bool enable); 224 + 225 + void arch_cpu_idle(void); 226 + void arch_cpu_idle_prepare(void); 227 + void arch_cpu_idle_enter(void); 228 + void arch_cpu_idle_exit(void); 229 + void arch_cpu_idle_dead(void); 230 + 215 231 #endif /* _LINUX_CPU_H_ */
+41
include/linux/sched.h
··· 2457 2457 } 2458 2458 2459 2459 /* 2460 + * Idle thread specific functions to determine the need_resched 2461 + * polling state. We have two versions, one based on TS_POLLING in 2462 + * thread_info.status and one based on TIF_POLLING_NRFLAG in 2463 + * thread_info.flags 2464 + */ 2465 + #ifdef TS_POLLING 2466 + static inline int tsk_is_polling(struct task_struct *p) 2467 + { 2468 + return task_thread_info(p)->status & TS_POLLING; 2469 + } 2470 + static inline void current_set_polling(void) 2471 + { 2472 + current_thread_info()->status |= TS_POLLING; 2473 + } 2474 + 2475 + static inline void current_clr_polling(void) 2476 + { 2477 + current_thread_info()->status &= ~TS_POLLING; 2478 + smp_mb__after_clear_bit(); 2479 + } 2480 + #elif defined(TIF_POLLING_NRFLAG) 2481 + static inline int tsk_is_polling(struct task_struct *p) 2482 + { 2483 + return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG); 2484 + } 2485 + static inline void current_set_polling(void) 2486 + { 2487 + set_thread_flag(TIF_POLLING_NRFLAG); 2488 + } 2489 + 2490 + static inline void current_clr_polling(void) 2491 + { 2492 + clear_thread_flag(TIF_POLLING_NRFLAG); 2493 + } 2494 + #else 2495 + static inline int tsk_is_polling(struct task_struct *p) { return 0; } 2496 + static inline void current_set_polling(void) { } 2497 + static inline void current_clr_polling(void) { } 2498 + #endif 2499 + 2500 + /* 2460 2501 * Thread group CPU time accounting. 2461 2502 */ 2462 2503 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+1 -1
init/main.c
··· 386 386 init_idle_bootup_task(current); 387 387 schedule_preempt_disabled(); 388 388 /* Call into cpu_idle with preempt disabled */ 389 - cpu_idle(); 389 + cpu_startup_entry(CPUHP_ONLINE); 390 390 } 391 391 392 392 /* Check for early params. */
+1
kernel/Makefile
··· 24 24 25 25 obj-y += sched/ 26 26 obj-y += power/ 27 + obj-y += cpu/ 27 28 28 29 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o 29 30 obj-$(CONFIG_FREEZER) += freezer.o
+1
kernel/cpu/Makefile
··· 1 + obj-y = idle.o
+107
kernel/cpu/idle.c
··· 1 + /* 2 + * Generic entry point for the idle threads 3 + */ 4 + #include <linux/sched.h> 5 + #include <linux/cpu.h> 6 + #include <linux/tick.h> 7 + #include <linux/mm.h> 8 + 9 + #include <asm/tlb.h> 10 + 11 + #include <trace/events/power.h> 12 + 13 + static int __read_mostly cpu_idle_force_poll; 14 + 15 + void cpu_idle_poll_ctrl(bool enable) 16 + { 17 + if (enable) { 18 + cpu_idle_force_poll++; 19 + } else { 20 + cpu_idle_force_poll--; 21 + WARN_ON_ONCE(cpu_idle_force_poll < 0); 22 + } 23 + } 24 + 25 + #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP 26 + static int __init cpu_idle_poll_setup(char *__unused) 27 + { 28 + cpu_idle_force_poll = 1; 29 + return 1; 30 + } 31 + __setup("nohlt", cpu_idle_poll_setup); 32 + 33 + static int __init cpu_idle_nopoll_setup(char *__unused) 34 + { 35 + cpu_idle_force_poll = 0; 36 + return 1; 37 + } 38 + __setup("hlt", cpu_idle_nopoll_setup); 39 + #endif 40 + 41 + static inline int cpu_idle_poll(void) 42 + { 43 + trace_cpu_idle_rcuidle(0, smp_processor_id()); 44 + local_irq_enable(); 45 + while (!need_resched()) 46 + cpu_relax(); 47 + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 48 + return 1; 49 + } 50 + 51 + /* Weak implementations for optional arch specific functions */ 52 + void __weak arch_cpu_idle_prepare(void) { } 53 + void __weak arch_cpu_idle_enter(void) { } 54 + void __weak arch_cpu_idle_exit(void) { } 55 + void __weak arch_cpu_idle_dead(void) { } 56 + void __weak arch_cpu_idle(void) 57 + { 58 + cpu_idle_force_poll = 1; 59 + } 60 + 61 + /* 62 + * Generic idle loop implementation 63 + */ 64 + static void cpu_idle_loop(void) 65 + { 66 + while (1) { 67 + tick_nohz_idle_enter(); 68 + 69 + while (!need_resched()) { 70 + check_pgt_cache(); 71 + rmb(); 72 + 73 + if (cpu_is_offline(smp_processor_id())) 74 + arch_cpu_idle_dead(); 75 + 76 + local_irq_disable(); 77 + arch_cpu_idle_enter(); 78 + 79 + if (cpu_idle_force_poll) { 80 + cpu_idle_poll(); 81 + } else { 82 + current_clr_polling(); 83 + if (!need_resched()) { 84 + stop_critical_timings(); 85 + rcu_idle_enter(); 86 + arch_cpu_idle(); 87 + WARN_ON_ONCE(irqs_disabled()); 88 + rcu_idle_exit(); 89 + start_critical_timings(); 90 + } else { 91 + local_irq_enable(); 92 + } 93 + current_set_polling(); 94 + } 95 + arch_cpu_idle_exit(); 96 + } 97 + tick_nohz_idle_exit(); 98 + schedule_preempt_disabled(); 99 + } 100 + } 101 + 102 + void cpu_startup_entry(enum cpuhp_state state) 103 + { 104 + current_set_polling(); 105 + arch_cpu_idle_prepare(); 106 + cpu_idle_loop(); 107 + }
-5
kernel/sched/core.c
··· 512 512 * the target CPU. 513 513 */ 514 514 #ifdef CONFIG_SMP 515 - 516 - #ifndef tsk_is_polling 517 - #define tsk_is_polling(t) 0 518 - #endif 519 - 520 515 void resched_task(struct task_struct *p) 521 516 { 522 517 int cpu;