at v5.8 10 kB view raw
1/* 2 * cpuidle.h - a generic framework for CPU idle power management 3 * 4 * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Shaohua Li <shaohua.li@intel.com> 6 * Adam Belay <abelay@novell.com> 7 * 8 * This code is licenced under the GPL. 9 */ 10 11#ifndef _LINUX_CPUIDLE_H 12#define _LINUX_CPUIDLE_H 13 14#include <linux/percpu.h> 15#include <linux/list.h> 16#include <linux/hrtimer.h> 17 18#define CPUIDLE_STATE_MAX 10 19#define CPUIDLE_NAME_LEN 16 20#define CPUIDLE_DESC_LEN 32 21 22struct module; 23 24struct cpuidle_device; 25struct cpuidle_driver; 26 27 28/**************************** 29 * CPUIDLE DEVICE INTERFACE * 30 ****************************/ 31 32#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0) 33#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1) 34 35struct cpuidle_state_usage { 36 unsigned long long disable; 37 unsigned long long usage; 38 u64 time_ns; 39 unsigned long long above; /* Number of times it's been too deep */ 40 unsigned long long below; /* Number of times it's been too shallow */ 41#ifdef CONFIG_SUSPEND 42 unsigned long long s2idle_usage; 43 unsigned long long s2idle_time; /* in US */ 44#endif 45}; 46 47struct cpuidle_state { 48 char name[CPUIDLE_NAME_LEN]; 49 char desc[CPUIDLE_DESC_LEN]; 50 51 u64 exit_latency_ns; 52 u64 target_residency_ns; 53 unsigned int flags; 54 unsigned int exit_latency; /* in US */ 55 int power_usage; /* in mW */ 56 unsigned int target_residency; /* in US */ 57 58 int (*enter) (struct cpuidle_device *dev, 59 struct cpuidle_driver *drv, 60 int index); 61 62 int (*enter_dead) (struct cpuidle_device *dev, int index); 63 64 /* 65 * CPUs execute ->enter_s2idle with the local tick or entire timekeeping 66 * suspended, so it must not re-enable interrupts at any point (even 67 * temporarily) or attempt to change states of clock event devices. 68 */ 69 void (*enter_s2idle) (struct cpuidle_device *dev, 70 struct cpuidle_driver *drv, 71 int index); 72}; 73 74/* Idle State Flags */ 75#define CPUIDLE_FLAG_NONE (0x00) 76#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ 77#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ 78#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ 79#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ 80#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ 81 82struct cpuidle_device_kobj; 83struct cpuidle_state_kobj; 84struct cpuidle_driver_kobj; 85 86struct cpuidle_device { 87 unsigned int registered:1; 88 unsigned int enabled:1; 89 unsigned int poll_time_limit:1; 90 unsigned int cpu; 91 ktime_t next_hrtimer; 92 93 int last_state_idx; 94 u64 last_residency_ns; 95 u64 poll_limit_ns; 96 u64 forced_idle_latency_limit_ns; 97 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; 98 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; 99 struct cpuidle_driver_kobj *kobj_driver; 100 struct cpuidle_device_kobj *kobj_dev; 101 struct list_head device_list; 102 103#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 104 cpumask_t coupled_cpus; 105 struct cpuidle_coupled *coupled; 106#endif 107}; 108 109DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); 110DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); 111 112/**************************** 113 * CPUIDLE DRIVER INTERFACE * 114 ****************************/ 115 116struct cpuidle_driver { 117 const char *name; 118 struct module *owner; 119 120 /* used by the cpuidle framework to setup the broadcast timer */ 121 unsigned int bctimer:1; 122 /* states array must be ordered in decreasing power consumption */ 123 struct cpuidle_state states[CPUIDLE_STATE_MAX]; 124 int state_count; 125 int safe_state_index; 126 127 /* the driver handles the cpus in cpumask */ 128 struct cpumask *cpumask; 129 130 /* preferred governor to switch at register time */ 131 const char *governor; 132}; 133 134#ifdef CONFIG_CPU_IDLE 135extern void disable_cpuidle(void); 136extern bool cpuidle_not_available(struct cpuidle_driver *drv, 137 struct cpuidle_device *dev); 138 139extern int cpuidle_select(struct cpuidle_driver *drv, 140 struct cpuidle_device *dev, 141 bool *stop_tick); 142extern int cpuidle_enter(struct cpuidle_driver *drv, 143 struct cpuidle_device *dev, int index); 144extern void cpuidle_reflect(struct cpuidle_device *dev, int index); 145extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, 146 struct cpuidle_device *dev); 147 148extern int cpuidle_register_driver(struct cpuidle_driver *drv); 149extern struct cpuidle_driver *cpuidle_get_driver(void); 150extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, 151 bool disable); 152extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); 153extern int cpuidle_register_device(struct cpuidle_device *dev); 154extern void cpuidle_unregister_device(struct cpuidle_device *dev); 155extern int cpuidle_register(struct cpuidle_driver *drv, 156 const struct cpumask *const coupled_cpus); 157extern void cpuidle_unregister(struct cpuidle_driver *drv); 158extern void cpuidle_pause_and_lock(void); 159extern void cpuidle_resume_and_unlock(void); 160extern void cpuidle_pause(void); 161extern void cpuidle_resume(void); 162extern int cpuidle_enable_device(struct cpuidle_device *dev); 163extern void cpuidle_disable_device(struct cpuidle_device *dev); 164extern int cpuidle_play_dead(void); 165 166extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 167static inline struct cpuidle_device *cpuidle_get_device(void) 168{return __this_cpu_read(cpuidle_devices); } 169#else 170static inline void disable_cpuidle(void) { } 171static inline bool cpuidle_not_available(struct cpuidle_driver *drv, 172 struct cpuidle_device *dev) 173{return true; } 174static inline int cpuidle_select(struct cpuidle_driver *drv, 175 struct cpuidle_device *dev, bool *stop_tick) 176{return -ENODEV; } 177static inline int cpuidle_enter(struct cpuidle_driver *drv, 178 struct cpuidle_device *dev, int index) 179{return -ENODEV; } 180static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } 181static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, 182 struct cpuidle_device *dev) 183{return 0; } 184static inline int cpuidle_register_driver(struct cpuidle_driver *drv) 185{return -ENODEV; } 186static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } 187static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, 188 int idx, bool disable) { } 189static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } 190static inline int cpuidle_register_device(struct cpuidle_device *dev) 191{return -ENODEV; } 192static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } 193static inline int cpuidle_register(struct cpuidle_driver *drv, 194 const struct cpumask *const coupled_cpus) 195{return -ENODEV; } 196static inline void cpuidle_unregister(struct cpuidle_driver *drv) { } 197static inline void cpuidle_pause_and_lock(void) { } 198static inline void cpuidle_resume_and_unlock(void) { } 199static inline void cpuidle_pause(void) { } 200static inline void cpuidle_resume(void) { } 201static inline int cpuidle_enable_device(struct cpuidle_device *dev) 202{return -ENODEV; } 203static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 204static inline int cpuidle_play_dead(void) {return -ENODEV; } 205static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 206 struct cpuidle_device *dev) {return NULL; } 207static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } 208#endif 209 210#ifdef CONFIG_CPU_IDLE 211extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 212 struct cpuidle_device *dev, 213 u64 latency_limit_ns); 214extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 215 struct cpuidle_device *dev); 216extern void cpuidle_use_deepest_state(u64 latency_limit_ns); 217#else 218static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 219 struct cpuidle_device *dev, 220 u64 latency_limit_ns) 221{return -ENODEV; } 222static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 223 struct cpuidle_device *dev) 224{return -ENODEV; } 225static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) 226{ 227} 228#endif 229 230/* kernel/sched/idle.c */ 231extern void sched_idle_set_state(struct cpuidle_state *idle_state); 232extern void default_idle_call(void); 233 234#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED 235void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); 236#else 237static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) 238{ 239} 240#endif 241 242#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_ARCH_HAS_CPU_RELAX) 243void cpuidle_poll_state_init(struct cpuidle_driver *drv); 244#else 245static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {} 246#endif 247 248/****************************** 249 * CPUIDLE GOVERNOR INTERFACE * 250 ******************************/ 251 252struct cpuidle_governor { 253 char name[CPUIDLE_NAME_LEN]; 254 struct list_head governor_list; 255 unsigned int rating; 256 257 int (*enable) (struct cpuidle_driver *drv, 258 struct cpuidle_device *dev); 259 void (*disable) (struct cpuidle_driver *drv, 260 struct cpuidle_device *dev); 261 262 int (*select) (struct cpuidle_driver *drv, 263 struct cpuidle_device *dev, 264 bool *stop_tick); 265 void (*reflect) (struct cpuidle_device *dev, int index); 266}; 267 268#ifdef CONFIG_CPU_IDLE 269extern int cpuidle_register_governor(struct cpuidle_governor *gov); 270extern s64 cpuidle_governor_latency_req(unsigned int cpu); 271#else 272static inline int cpuidle_register_governor(struct cpuidle_governor *gov) 273{return 0;} 274#endif 275 276#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ 277 idx, \ 278 state, \ 279 is_retention) \ 280({ \ 281 int __ret = 0; \ 282 \ 283 if (!idx) { \ 284 cpu_do_idle(); \ 285 return idx; \ 286 } \ 287 \ 288 if (!is_retention) \ 289 __ret = cpu_pm_enter(); \ 290 if (!__ret) { \ 291 __ret = low_level_idle_enter(state); \ 292 if (!is_retention) \ 293 cpu_pm_exit(); \ 294 } \ 295 \ 296 __ret ? -1 : idx; \ 297}) 298 299#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ 300 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) 301 302#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ 303 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) 304 305#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ 306 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) 307 308#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ 309 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) 310 311#endif /* _LINUX_CPUIDLE_H */