Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86/idle' into sched/core

Merge these x86 specific bits - we are going to add generic bits as well.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+48 -78
+43
arch/x86/include/asm/mwait.h
··· 1 1 #ifndef _ASM_X86_MWAIT_H 2 2 #define _ASM_X86_MWAIT_H 3 3 4 + #include <linux/sched.h> 5 + 4 6 #define MWAIT_SUBSTATE_MASK 0xf 5 7 #define MWAIT_CSTATE_MASK 0xf 6 8 #define MWAIT_SUBSTATE_SIZE 4 ··· 14 12 #define CPUID5_ECX_INTERRUPT_BREAK 0x2 15 13 16 14 #define MWAIT_ECX_INTERRUPT_BREAK 0x1 15 + 16 + static inline void __monitor(const void *eax, unsigned long ecx, 17 + unsigned long edx) 18 + { 19 + /* "monitor %eax, %ecx, %edx;" */ 20 + asm volatile(".byte 0x0f, 0x01, 0xc8;" 21 + :: "a" (eax), "c" (ecx), "d"(edx)); 22 + } 23 + 24 + static inline void __mwait(unsigned long eax, unsigned long ecx) 25 + { 26 + /* "mwait %eax, %ecx;" */ 27 + asm volatile(".byte 0x0f, 0x01, 0xc9;" 28 + :: "a" (eax), "c" (ecx)); 29 + } 30 + 31 + /* 32 + * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 33 + * which can obviate IPI to trigger checking of need_resched. 34 + * We execute MONITOR against need_resched and enter optimized wait state 35 + * through MWAIT. Whenever someone changes need_resched, we would be woken 36 + * up from MWAIT (without an IPI). 37 + * 38 + * New with Core Duo processors, MWAIT can take some hints based on CPU 39 + * capability. 40 + */ 41 + static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 42 + { 43 + if (!current_set_polling_and_test()) { 44 + if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { 45 + mb(); 46 + clflush((void *)&current_thread_info()->flags); 47 + mb(); 48 + } 49 + 50 + __monitor((void *)&current_thread_info()->flags, 0, 0); 51 + if (!need_resched()) 52 + __mwait(eax, ecx); 53 + } 54 + __current_clr_polling(); 55 + } 17 56 18 57 #endif /* _ASM_X86_MWAIT_H */
-23
arch/x86/include/asm/processor.h
··· 700 700 #endif 701 701 } 702 702 703 - static inline void __monitor(const void *eax, unsigned long ecx, 704 - unsigned long edx) 705 - { 706 - /* "monitor %eax, %ecx, %edx;" */ 707 - asm volatile(".byte 0x0f, 0x01, 0xc8;" 708 - :: "a" (eax), "c" (ecx), "d"(edx)); 709 - } 710 - 711 - static inline void __mwait(unsigned long eax, unsigned long ecx) 712 - { 713 - /* "mwait %eax, %ecx;" */ 714 - asm volatile(".byte 0x0f, 0x01, 0xc9;" 715 - :: "a" (eax), "c" (ecx)); 716 - } 717 - 718 - static inline void __sti_mwait(unsigned long eax, unsigned long ecx) 719 - { 720 - trace_hardirqs_on(); 721 - /* "mwait %eax, %ecx;" */ 722 - asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" 723 - :: "a" (eax), "c" (ecx)); 724 - } 725 - 726 703 extern void select_idle_routine(const struct cpuinfo_x86 *c); 727 704 extern void init_amd_e400_c1e_mask(void); 728 705
-23
arch/x86/kernel/acpi/cstate.c
··· 150 150 } 151 151 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); 152 152 153 - /* 154 - * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, 155 - * which can obviate IPI to trigger checking of need_resched. 156 - * We execute MONITOR against need_resched and enter optimized wait state 157 - * through MWAIT. Whenever someone changes need_resched, we would be woken 158 - * up from MWAIT (without an IPI). 159 - * 160 - * New with Core Duo processors, MWAIT can take some hints based on CPU 161 - * capability. 162 - */ 163 - void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 164 - { 165 - if (!need_resched()) { 166 - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) 167 - clflush((void *)&current_thread_info()->flags); 168 - 169 - __monitor((void *)&current_thread_info()->flags, 0, 0); 170 - smp_mb(); 171 - if (!need_resched()) 172 - __mwait(ax, cx); 173 - } 174 - } 175 - 176 153 void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) 177 154 { 178 155 unsigned int cpu = smp_processor_id();
+2
arch/x86/kernel/smpboot.c
··· 1417 1417 * The WBINVD is insufficient due to the spurious-wakeup 1418 1418 * case where we return around the loop. 1419 1419 */ 1420 + mb(); 1420 1421 clflush(mwait_ptr); 1422 + mb(); 1421 1423 __monitor(mwait_ptr, 0, 0); 1422 1424 mb(); 1423 1425 __mwait(eax, 0);
+1 -4
drivers/acpi/acpi_pad.c
··· 193 193 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 194 194 stop_critical_timings(); 195 195 196 - __monitor((void *)&current_thread_info()->flags, 0, 0); 197 - smp_mb(); 198 - if (!need_resched()) 199 - __mwait(power_saving_mwait_eax, 1); 196 + mwait_idle_with_hints(power_saving_mwait_eax, 1); 200 197 201 198 start_critical_timings(); 202 199 if (lapic_marked_unstable)
-15
drivers/acpi/processor_idle.c
··· 727 727 if (unlikely(!pr)) 728 728 return -EINVAL; 729 729 730 - if (cx->entry_method == ACPI_CSTATE_FFH) { 731 - if (current_set_polling_and_test()) 732 - return -EINVAL; 733 - } 734 - 735 730 lapic_timer_state_broadcast(pr, cx, 1); 736 731 acpi_idle_do_entry(cx); 737 732 ··· 780 785 if (unlikely(!pr)) 781 786 return -EINVAL; 782 787 783 - if (cx->entry_method == ACPI_CSTATE_FFH) { 784 - if (current_set_polling_and_test()) 785 - return -EINVAL; 786 - } 787 - 788 788 /* 789 789 * Must be done before busmaster disable as we might need to 790 790 * access HPET ! ··· 829 839 acpi_safe_halt(); 830 840 return -EBUSY; 831 841 } 832 - } 833 - 834 - if (cx->entry_method == ACPI_CSTATE_FFH) { 835 - if (current_set_polling_and_test()) 836 - return -EINVAL; 837 842 } 838 843 839 844 acpi_unlazy_tlb(smp_processor_id());
+1 -10
drivers/idle/intel_idle.c
··· 377 377 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 378 378 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 379 379 380 - if (!current_set_polling_and_test()) { 381 - 382 - if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) 383 - clflush((void *)&current_thread_info()->flags); 384 - 385 - __monitor((void *)&current_thread_info()->flags, 0, 0); 386 - smp_mb(); 387 - if (!need_resched()) 388 - __mwait(eax, ecx); 389 - } 380 + mwait_idle_with_hints(eax, ecx); 390 381 391 382 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 392 383 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+1 -3
drivers/thermal/intel_powerclamp.c
··· 438 438 */ 439 439 local_touch_nmi(); 440 440 stop_critical_timings(); 441 - __monitor((void *)&current_thread_info()->flags, 0, 0); 442 - cpu_relax(); /* allow HT sibling to run */ 443 - __mwait(eax, ecx); 441 + mwait_idle_with_hints(eax, ecx); 444 442 start_critical_timings(); 445 443 atomic_inc(&idle_wakeup_counter); 446 444 }