···11#ifndef _ASM_X86_MWAIT_H22#define _ASM_X86_MWAIT_H3344+#include <linux/sched.h>55+46#define MWAIT_SUBSTATE_MASK 0xf57#define MWAIT_CSTATE_MASK 0xf68#define MWAIT_SUBSTATE_SIZE 4···1412#define CPUID5_ECX_INTERRUPT_BREAK 0x215131614#define MWAIT_ECX_INTERRUPT_BREAK 0x11515+1616+static inline void __monitor(const void *eax, unsigned long ecx,1717+ unsigned long edx)1818+{1919+ /* "monitor %eax, %ecx, %edx;" */2020+ asm volatile(".byte 0x0f, 0x01, 0xc8;"2121+ :: "a" (eax), "c" (ecx), "d"(edx));2222+}2323+2424+static inline void __mwait(unsigned long eax, unsigned long ecx)2525+{2626+ /* "mwait %eax, %ecx;" */2727+ asm volatile(".byte 0x0f, 0x01, 0xc9;"2828+ :: "a" (eax), "c" (ecx));2929+}3030+3131+/*3232+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,3333+ * which can obviate IPI to trigger checking of need_resched.3434+ * We execute MONITOR against need_resched and enter optimized wait state3535+ * through MWAIT. Whenever someone changes need_resched, we would be woken3636+ * up from MWAIT (without an IPI).3737+ *3838+ * New with Core Duo processors, MWAIT can take some hints based on CPU3939+ * capability.4040+ */4141+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)4242+{4343+ if (!current_set_polling_and_test()) {4444+ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {4545+ mb();4646+ clflush((void *)¤t_thread_info()->flags);4747+ mb();4848+ }4949+5050+ __monitor((void *)¤t_thread_info()->flags, 0, 0);5151+ if (!need_resched())5252+ __mwait(eax, ecx);5353+ }5454+ __current_clr_polling();5555+}17561857#endif /* _ASM_X86_MWAIT_H */
-23
arch/x86/include/asm/processor.h
···700700#endif701701}702702703703-static inline void __monitor(const void *eax, unsigned long ecx,704704- unsigned long edx)705705-{706706- /* "monitor %eax, %ecx, %edx;" */707707- asm volatile(".byte 0x0f, 0x01, 0xc8;"708708- :: "a" (eax), "c" (ecx), "d"(edx));709709-}710710-711711-static inline void __mwait(unsigned long eax, unsigned long ecx)712712-{713713- /* "mwait %eax, %ecx;" */714714- asm volatile(".byte 0x0f, 0x01, 0xc9;"715715- :: "a" (eax), "c" (ecx));716716-}717717-718718-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)719719-{720720- trace_hardirqs_on();721721- /* "mwait %eax, %ecx;" */722722- asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"723723- :: "a" (eax), "c" (ecx));724724-}725725-726703extern void select_idle_routine(const struct cpuinfo_x86 *c);727704extern void init_amd_e400_c1e_mask(void);728705
-23
arch/x86/kernel/acpi/cstate.c
···150150}151151EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);152152153153-/*154154- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,155155- * which can obviate IPI to trigger checking of need_resched.156156- * We execute MONITOR against need_resched and enter optimized wait state157157- * through MWAIT. Whenever someone changes need_resched, we would be woken158158- * up from MWAIT (without an IPI).159159- *160160- * New with Core Duo processors, MWAIT can take some hints based on CPU161161- * capability.162162- */163163-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)164164-{165165- if (!need_resched()) {166166- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))167167- clflush((void *)¤t_thread_info()->flags);168168-169169- __monitor((void *)¤t_thread_info()->flags, 0, 0);170170- smp_mb();171171- if (!need_resched())172172- __mwait(ax, cx);173173- }174174-}175175-176153void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)177154{178155 unsigned int cpu = smp_processor_id();
+2
arch/x86/kernel/smpboot.c
···14171417 * The WBINVD is insufficient due to the spurious-wakeup14181418 * case where we return around the loop.14191419 */14201420+ mb();14201421 clflush(mwait_ptr);14221422+ mb();14211423 __monitor(mwait_ptr, 0, 0);14221424 mb();14231425 __mwait(eax, 0);
+1-4
drivers/acpi/acpi_pad.c
···193193 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);194194 stop_critical_timings();195195196196- __monitor((void *)¤t_thread_info()->flags, 0, 0);197197- smp_mb();198198- if (!need_resched())199199- __mwait(power_saving_mwait_eax, 1);196196+ mwait_idle_with_hints(power_saving_mwait_eax, 1);200197201198 start_critical_timings();202199 if (lapic_marked_unstable)
-15
drivers/acpi/processor_idle.c
···727727 if (unlikely(!pr))728728 return -EINVAL;729729730730- if (cx->entry_method == ACPI_CSTATE_FFH) {731731- if (current_set_polling_and_test())732732- return -EINVAL;733733- }734734-735730 lapic_timer_state_broadcast(pr, cx, 1);736731 acpi_idle_do_entry(cx);737732···780785 if (unlikely(!pr))781786 return -EINVAL;782787783783- if (cx->entry_method == ACPI_CSTATE_FFH) {784784- if (current_set_polling_and_test())785785- return -EINVAL;786786- }787787-788788 /*789789 * Must be done before busmaster disable as we might need to790790 * access HPET !···829839 acpi_safe_halt();830840 return -EBUSY;831841 }832832- }833833-834834- if (cx->entry_method == ACPI_CSTATE_FFH) {835835- if (current_set_polling_and_test())836836- return -EINVAL;837842 }838843839844 acpi_unlazy_tlb(smp_processor_id());
+1-10
drivers/idle/intel_idle.c
···377377 if (!(lapic_timer_reliable_states & (1 << (cstate))))378378 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);379379380380- if (!current_set_polling_and_test()) {381381-382382- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))383383- clflush((void *)¤t_thread_info()->flags);384384-385385- __monitor((void *)¤t_thread_info()->flags, 0, 0);386386- smp_mb();387387- if (!need_resched())388388- __mwait(eax, ecx);389389- }380380+ mwait_idle_with_hints(eax, ecx);390381391382 if (!(lapic_timer_reliable_states & (1 << (cstate))))392383 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);