Merge branch 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6

* 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6:
cpuidle/x86/perf: fix power:cpu_idle double end events and throw cpu_idle events from the cpuidle layer
intel_idle: open broadcast clock event
cpuidle: CPUIDLE_FLAG_CHECK_BM is omap3_idle specific
cpuidle: CPUIDLE_FLAG_TLB_FLUSHED is specific to intel_idle
cpuidle: delete unused CPUIDLE_FLAG_SHALLOW, BALANCED, DEEP definitions
SH, cpuidle: delete use of NOP CPUIDLE_FLAGS_SHALLOW
cpuidle: delete NOP CPUIDLE_FLAG_POLL
ACPI: processor_idle: delete use of NOP CPUIDLE_FLAGs
cpuidle: Rename X86 specific idle poll state[0] from C0 to POLL
ACPI, intel_idle: Cleanup idle= internal variables
cpuidle: Make cpuidle_enable_device() call poll_idle_init()
intel_idle: update Sandy Bridge core C-state residency targets

+145 -113
+2
arch/arm/mach-omap2/cpuidle34xx.c
··· 47 47 48 48 #define OMAP3_STATE_MAX OMAP3_STATE_C7 49 49 50 + #define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */ 51 + 50 52 struct omap3_processor_cx { 51 53 u8 valid; 52 54 u8 type;
+3 -2
arch/ia64/include/asm/processor.h
··· 717 717 #define spin_lock_prefetch(x) prefetchw(x) 718 718 719 719 extern unsigned long boot_option_idle_override; 720 - extern unsigned long idle_halt; 721 - extern unsigned long idle_nomwait; 720 + 721 + enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT, 722 + IDLE_NOMWAIT, IDLE_POLL}; 722 723 723 724 #endif /* !__ASSEMBLY__ */ 724 725
+1 -5
arch/ia64/kernel/process.c
··· 53 53 54 54 void (*ia64_mark_idle)(int); 55 55 56 - unsigned long boot_option_idle_override = 0; 56 + unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 57 57 EXPORT_SYMBOL(boot_option_idle_override); 58 - unsigned long idle_halt; 59 - EXPORT_SYMBOL(idle_halt); 60 - unsigned long idle_nomwait; 61 - EXPORT_SYMBOL(idle_nomwait); 62 58 void (*pm_idle) (void); 63 59 EXPORT_SYMBOL(pm_idle); 64 60 void (*pm_power_off) (void);
-1
arch/sh/kernel/cpu/shmobile/cpuidle.c
··· 81 81 state->target_residency = 1 * 2; 82 82 state->power_usage = 3; 83 83 state->flags = 0; 84 - state->flags |= CPUIDLE_FLAG_SHALLOW; 85 84 state->flags |= CPUIDLE_FLAG_TIME_VALID; 86 85 state->enter = cpuidle_sleep_enter; 87 86
+3 -2
arch/x86/include/asm/processor.h
··· 761 761 extern void init_c1e_mask(void); 762 762 763 763 extern unsigned long boot_option_idle_override; 764 - extern unsigned long idle_halt; 765 - extern unsigned long idle_nomwait; 766 764 extern bool c1e_detected; 765 + 766 + enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 767 + IDLE_POLL, IDLE_FORCE_MWAIT}; 767 768 768 769 extern void enable_sep_cpu(void); 769 770 extern int sysenter_setup(void);
+12 -18
arch/x86/kernel/process.c
··· 22 22 #include <asm/i387.h> 23 23 #include <asm/debugreg.h> 24 24 25 - unsigned long idle_halt; 26 - EXPORT_SYMBOL(idle_halt); 27 - unsigned long idle_nomwait; 28 - EXPORT_SYMBOL(idle_nomwait); 29 - 30 25 struct kmem_cache *task_xstate_cachep; 31 26 EXPORT_SYMBOL_GPL(task_xstate_cachep); 32 27 ··· 322 327 /* 323 328 * Idle related variables and functions 324 329 */ 325 - unsigned long boot_option_idle_override = 0; 330 + unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 326 331 EXPORT_SYMBOL(boot_option_idle_override); 327 332 328 333 /* ··· 381 386 else 382 387 local_irq_enable(); 383 388 current_thread_info()->status |= TS_POLLING; 389 + trace_power_end(smp_processor_id()); 390 + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 384 391 } else { 385 392 local_irq_enable(); 386 393 /* loop is done by the caller */ ··· 440 443 */ 441 444 void mwait_idle_with_hints(unsigned long ax, unsigned long cx) 442 445 { 443 - trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); 444 - trace_cpu_idle((ax>>4)+1, smp_processor_id()); 445 446 if (!need_resched()) { 446 447 if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR)) 447 448 clflush((void *)&current_thread_info()->flags); ··· 466 471 __sti_mwait(0, 0); 467 472 else 468 473 local_irq_enable(); 474 + trace_power_end(smp_processor_id()); 475 + trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 469 476 } else 470 477 local_irq_enable(); 471 478 } ··· 500 503 * 501 504 * idle=mwait overrides this decision and forces the usage of mwait. 502 505 */ 503 - static int __cpuinitdata force_mwait; 504 506 505 507 #define MWAIT_INFO 0x05 506 508 #define MWAIT_ECX_EXTENDED_INFO 0x01 ··· 509 513 { 510 514 u32 eax, ebx, ecx, edx; 511 515 512 - if (force_mwait) 516 + if (boot_option_idle_override == IDLE_FORCE_MWAIT) 513 517 return 1; 514 518 515 519 if (c->cpuid_level < MWAIT_INFO) ··· 629 633 if (!strcmp(str, "poll")) { 630 634 printk("using polling idle threads.\n"); 631 635 pm_idle = poll_idle; 632 - } else if (!strcmp(str, "mwait")) 633 - force_mwait = 1; 634 - else if (!strcmp(str, "halt")) { 636 + boot_option_idle_override = IDLE_POLL; 637 + } else if (!strcmp(str, "mwait")) { 638 + boot_option_idle_override = IDLE_FORCE_MWAIT; 639 + } else if (!strcmp(str, "halt")) { 635 640 /* 636 641 * When the boot option of idle=halt is added, halt is 637 642 * forced to be used for CPU idle. In such case CPU C2/C3 ··· 641 644 * the boot_option_idle_override. 642 645 */ 643 646 pm_idle = default_idle; 644 - idle_halt = 1; 645 - return 0; 647 + boot_option_idle_override = IDLE_HALT; 646 648 } else if (!strcmp(str, "nomwait")) { 647 649 /* 648 650 * If the boot option of "idle=nomwait" is added, ··· 649 653 * states. In such case it won't touch the variable 650 654 * of boot_option_idle_override. 651 655 */ 652 - idle_nomwait = 1; 653 - return 0; 656 + boot_option_idle_override = IDLE_NOMWAIT; 654 657 } else 655 658 return -1; 656 659 657 - boot_option_idle_override = 1; 658 660 return 0; 659 661 } 660 662 early_param("idle", idle_setup);
-4
arch/x86/kernel/process_32.c
··· 57 57 #include <asm/syscalls.h> 58 58 #include <asm/debugreg.h> 59 59 60 - #include <trace/events/power.h> 61 - 62 60 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 63 61 64 62 /* ··· 111 113 stop_critical_timings(); 112 114 pm_idle(); 113 115 start_critical_timings(); 114 - trace_power_end(smp_processor_id()); 115 - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 116 116 } 117 117 tick_nohz_restart_sched_tick(); 118 118 preempt_enable_no_resched();
-6
arch/x86/kernel/process_64.c
··· 51 51 #include <asm/syscalls.h> 52 52 #include <asm/debugreg.h> 53 53 54 - #include <trace/events/power.h> 55 - 56 54 asmlinkage extern void ret_from_fork(void); 57 55 58 56 DEFINE_PER_CPU(unsigned long, old_rsp); ··· 138 140 stop_critical_timings(); 139 141 pm_idle(); 140 142 start_critical_timings(); 141 - 142 - trace_power_end(smp_processor_id()); 143 - trace_cpu_idle(PWR_EVENT_EXIT, 144 - smp_processor_id()); 145 143 146 144 /* In many cases the interrupt that ended idle 147 145 has already called exit_idle. But some idle
+2 -2
drivers/acpi/processor_core.c
··· 23 23 { 24 24 printk(KERN_NOTICE PREFIX "%s detected - " 25 25 "disabling mwait for CPU C-states\n", id->ident); 26 - idle_nomwait = 1; 26 + boot_option_idle_override = IDLE_NOMWAIT; 27 27 return 0; 28 28 } 29 29 ··· 283 283 { 284 284 acpi_status status = AE_OK; 285 285 286 - if (idle_nomwait) { 286 + if (boot_option_idle_override == IDLE_NOMWAIT) { 287 287 /* 288 288 * If mwait is disabled for CPU C-states, the C2C3_FFH access 289 289 * mode will be disabled in the parameter of _PDC object.
+11 -17
drivers/acpi/processor_idle.c
··· 79 79 static unsigned int latency_factor __read_mostly = 2; 80 80 module_param(latency_factor, uint, 0644); 81 81 82 + static int disabled_by_idle_boot_param(void) 83 + { 84 + return boot_option_idle_override == IDLE_POLL || 85 + boot_option_idle_override == IDLE_FORCE_MWAIT || 86 + boot_option_idle_override == IDLE_HALT; 87 + } 88 + 82 89 /* 83 90 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 84 91 * For now disable this. Probably a bug somewhere else. ··· 462 455 continue; 463 456 } 464 457 if (cx.type == ACPI_STATE_C1 && 465 - (idle_halt || idle_nomwait)) { 458 + (boot_option_idle_override == IDLE_NOMWAIT)) { 466 459 /* 467 460 * In most cases the C1 space_id obtained from 468 461 * _CST object is FIXED_HARDWARE access mode. ··· 1023 1016 state->flags = 0; 1024 1017 switch (cx->type) { 1025 1018 case ACPI_STATE_C1: 1026 - state->flags |= CPUIDLE_FLAG_SHALLOW; 1027 1019 if (cx->entry_method == ACPI_CSTATE_FFH) 1028 1020 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1029 1021 ··· 1031 1025 break; 1032 1026 1033 1027 case ACPI_STATE_C2: 1034 - state->flags |= CPUIDLE_FLAG_BALANCED; 1035 1028 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1036 1029 state->enter = acpi_idle_enter_simple; 1037 1030 dev->safe_state = state; 1038 1031 break; 1039 1032 1040 1033 case ACPI_STATE_C3: 1041 - state->flags |= CPUIDLE_FLAG_DEEP; 1042 1034 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1043 - state->flags |= CPUIDLE_FLAG_CHECK_BM; 1044 1035 state->enter = pr->flags.bm_check ? 1045 1036 acpi_idle_enter_bm : 1046 1037 acpi_idle_enter_simple; ··· 1061 1058 { 1062 1059 int ret = 0; 1063 1060 1064 - if (boot_option_idle_override) 1061 + if (disabled_by_idle_boot_param()) 1065 1062 return 0; 1066 1063 1067 1064 if (!pr) ··· 1092 1089 acpi_status status = 0; 1093 1090 static int first_run; 1094 1091 1095 - if (boot_option_idle_override) 1092 + if (disabled_by_idle_boot_param()) 1096 1093 return 0; 1097 1094 1098 1095 if (!first_run) { 1099 - if (idle_halt) { 1100 - /* 1101 - * When the boot option of "idle=halt" is added, halt 1102 - * is used for CPU IDLE. 1103 - * In such case C2/C3 is meaningless. So the max_cstate 1104 - * is set to one. 1105 - */ 1106 - max_cstate = 1; 1107 - } 1108 1096 dmi_check_system(processor_power_dmi_table); 1109 1097 max_cstate = acpi_processor_cstate_check(max_cstate); 1110 1098 if (max_cstate < ACPI_C_STATES_MAX) ··· 1136 1142 int acpi_processor_power_exit(struct acpi_processor *pr, 1137 1143 struct acpi_device *device) 1138 1144 { 1139 - if (boot_option_idle_override) 1145 + if (disabled_by_idle_boot_param()) 1140 1146 return 0; 1141 1147 1142 1148 cpuidle_unregister_device(&pr->power.dev);
+49 -43
drivers/cpuidle/cpuidle.c
··· 96 96 97 97 /* enter the state and update stats */ 98 98 dev->last_state = target_state; 99 + 100 + trace_power_start(POWER_CSTATE, next_state, dev->cpu); 101 + trace_cpu_idle(next_state, dev->cpu); 102 + 99 103 dev->last_residency = target_state->enter(dev, target_state); 104 + 105 + trace_power_end(dev->cpu); 106 + trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 107 + 100 108 if (dev->last_state) 101 109 target_state = dev->last_state; 102 110 ··· 114 106 /* give the governor an opportunity to reflect on the outcome */ 115 107 if (cpuidle_curr_governor->reflect) 116 108 cpuidle_curr_governor->reflect(dev); 117 - trace_power_end(smp_processor_id()); 118 - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 119 109 } 120 110 121 111 /** ··· 161 155 162 156 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); 163 157 158 + #ifdef CONFIG_ARCH_HAS_CPU_RELAX 159 + static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) 160 + { 161 + ktime_t t1, t2; 162 + s64 diff; 163 + int ret; 164 + 165 + t1 = ktime_get(); 166 + local_irq_enable(); 167 + while (!need_resched()) 168 + cpu_relax(); 169 + 170 + t2 = ktime_get(); 171 + diff = ktime_to_us(ktime_sub(t2, t1)); 172 + if (diff > INT_MAX) 173 + diff = INT_MAX; 174 + 175 + ret = (int) diff; 176 + return ret; 177 + } 178 + 179 + static void poll_idle_init(struct cpuidle_device *dev) 180 + { 181 + struct cpuidle_state *state = &dev->states[0]; 182 + 183 + cpuidle_set_statedata(state, NULL); 184 + 185 + snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); 186 + snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 187 + state->exit_latency = 0; 188 + state->target_residency = 0; 189 + state->power_usage = -1; 190 + state->flags = 0; 191 + state->enter = poll_idle; 192 + } 193 + #else 194 + static void poll_idle_init(struct cpuidle_device *dev) {} 195 + #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 196 + 164 197 /** 165 198 * cpuidle_enable_device - enables idle PM for a CPU 166 199 * @dev: the CPU ··· 223 178 if (ret) 224 179 return ret; 225 180 } 181 + 182 + poll_idle_init(dev); 226 183 227 184 if ((ret = cpuidle_add_state_sysfs(dev))) 228 185 return ret; ··· 280 233 281 234 EXPORT_SYMBOL_GPL(cpuidle_disable_device); 282 235 283 - #ifdef CONFIG_ARCH_HAS_CPU_RELAX 284 - static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) 285 - { 286 - ktime_t t1, t2; 287 - s64 diff; 288 - int ret; 289 - 290 - t1 = ktime_get(); 291 - local_irq_enable(); 292 - while (!need_resched()) 293 - cpu_relax(); 294 - 295 - t2 = ktime_get(); 296 - diff = ktime_to_us(ktime_sub(t2, t1)); 297 - if (diff > INT_MAX) 298 - diff = INT_MAX; 299 - 300 - ret = (int) diff; 301 - return ret; 302 - } 303 - 304 - static void poll_idle_init(struct cpuidle_device *dev) 305 - { 306 - struct cpuidle_state *state = &dev->states[0]; 307 - 308 - cpuidle_set_statedata(state, NULL); 309 - 310 - snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); 311 - snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); 312 - state->exit_latency = 0; 313 - state->target_residency = 0; 314 - state->power_usage = -1; 315 - state->flags = CPUIDLE_FLAG_POLL; 316 - state->enter = poll_idle; 317 - } 318 - #else 319 - static void poll_idle_init(struct cpuidle_device *dev) {} 320 - #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ 321 - 322 236 /** 323 237 * __cpuidle_register_device - internal register function called before register 324 238 * and enable routines ··· 299 291 return -EINVAL; 300 292 301 293 init_completion(&dev->kobj_unregister); 302 - 303 - poll_idle_init(dev); 304 294 305 295 /* 306 296 * cpuidle driver should set the dev->power_specified bit
+62 -7
drivers/idle/intel_idle.c
··· 59 59 #include <linux/hrtimer.h> /* ktime_get_real() */ 60 60 #include <trace/events/power.h> 61 61 #include <linux/sched.h> 62 + #include <linux/notifier.h> 63 + #include <linux/cpu.h> 62 64 #include <asm/mwait.h> 63 65 64 66 #define INTEL_IDLE_VERSION "0.4" ··· 75 73 76 74 static unsigned int mwait_substates; 77 75 76 + #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF 78 77 /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 79 78 static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ 80 79 ··· 83 80 static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 84 81 85 82 static struct cpuidle_state *cpuidle_state_table; 83 + 84 + /* 85 + * Set this flag for states where the HW flushes the TLB for us 86 + * and so we don't need cross-calls to keep it consistent. 87 + * If this flag is set, SW flushes the TLB, so even if the 88 + * HW doesn't do the flushing, this flag is safe to use. 89 + */ 90 + #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 86 91 87 92 /* 88 93 * States are indexed by the cstate number, ··· 133 122 .driver_data = (void *) 0x00, 134 123 .flags = CPUIDLE_FLAG_TIME_VALID, 135 124 .exit_latency = 1, 136 - .target_residency = 4, 125 + .target_residency = 1, 137 126 .enter = &intel_idle }, 138 127 { /* MWAIT C2 */ 139 128 .name = "SNB-C3", ··· 141 130 .driver_data = (void *) 0x10, 142 131 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 143 132 .exit_latency = 80, 144 - .target_residency = 160, 133 + .target_residency = 211, 145 134 .enter = &intel_idle }, 146 135 { /* MWAIT C3 */ 147 136 .name = "SNB-C6", ··· 149 138 .driver_data = (void *) 0x20, 150 139 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 151 140 .exit_latency = 104, 152 - .target_residency = 208, 141 + .target_residency = 345, 153 142 .enter = &intel_idle }, 154 143 { /* MWAIT C4 */ 155 144 .name = "SNB-C7", ··· 157 146 .driver_data = (void *) 0x30, 158 147 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, 159 148 .exit_latency = 109, 160 - .target_residency = 300, 149 + .target_residency = 345, 161 150 .enter = &intel_idle }, 162 151 }; 163 152 ··· 231 220 kt_before = ktime_get_real(); 232 221 233 222 stop_critical_timings(); 234 - trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu); 235 - trace_cpu_idle((eax >> 4) + 1, cpu); 236 223 if (!need_resched()) { 237 224 238 225 __monitor((void *)&current_thread_info()->flags, 0, 0); ··· 251 242 252 243 return usec_delta; 253 244 } 245 + 246 + static void __setup_broadcast_timer(void *arg) 247 + { 248 + unsigned long reason = (unsigned long)arg; 249 + int cpu = smp_processor_id(); 250 + 251 + reason = reason ? 252 + CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 253 + 254 + clockevents_notify(reason, &cpu); 255 + } 256 + 257 + static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n, 258 + unsigned long action, void *hcpu) 259 + { 260 + int hotcpu = (unsigned long)hcpu; 261 + 262 + switch (action & 0xf) { 263 + case CPU_ONLINE: 264 + smp_call_function_single(hotcpu, __setup_broadcast_timer, 265 + (void *)true, 1); 266 + break; 267 + case CPU_DOWN_PREPARE: 268 + smp_call_function_single(hotcpu, __setup_broadcast_timer, 269 + (void *)false, 1); 270 + break; 271 + } 272 + return NOTIFY_OK; 273 + } 274 + 275 + static struct notifier_block __cpuinitdata setup_broadcast_notifier = { 276 + .notifier_call = setup_broadcast_cpuhp_notify, 277 + }; 254 278 255 279 /* 256 280 * intel_idle_probe() ··· 347 305 } 348 306 349 307 if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ 350 - lapic_timer_reliable_states = 0xFFFFFFFF; 308 + lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; 309 + else { 310 + smp_call_function(__setup_broadcast_timer, (void *)true, 1); 311 + register_cpu_notifier(&setup_broadcast_notifier); 312 + } 351 313 352 314 pr_debug(PREFIX "v" INTEL_IDLE_VERSION 353 315 " model 0x%X\n", boot_cpu_data.x86_model); ··· 449 403 { 450 404 int retval; 451 405 406 + /* Do not load intel_idle at all for now if idle= is passed */ 407 + if (boot_option_idle_override != IDLE_NO_OVERRIDE) 408 + return -ENODEV; 409 + 452 410 retval = intel_idle_probe(); 453 411 if (retval) 454 412 return retval; ··· 477 427 { 478 428 intel_idle_cpuidle_devices_uninit(); 479 429 cpuidle_unregister_driver(&intel_idle_driver); 430 + 431 + if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { 432 + smp_call_function(__setup_broadcast_timer, (void *)false, 1); 433 + unregister_cpu_notifier(&setup_broadcast_notifier); 434 + } 480 435 481 436 return; 482 437 }
-6
include/linux/cpuidle.h
··· 47 47 48 48 /* Idle State Flags */ 49 49 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ 50 - #define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */ 51 - #define CPUIDLE_FLAG_POLL (0x10) /* no latency, no savings */ 52 - #define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */ 53 - #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ 54 - #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ 55 50 #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ 56 - #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ 57 51 58 52 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) 59 53