Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: idle: Respect the CPU system wakeup QoS limit for s2idle

A CPU system wakeup QoS limit may have been requested by user space. To
avoid breaking this constraint when entering a low power state during
s2idle, let's start to take into account the QoS limit.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Dhruva Gole <d-gole@ti.com>
Reviewed-by: Kevin Hilman (TI) <khilman@baylibre.com>
Tested-by: Kevin Hilman (TI) <khilman@baylibre.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Link: https://patch.msgid.link/20251125112650.329269-5-ulf.hansson@linaro.org
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Ulf Hansson and committed by
Rafael J. Wysocki
99b42445 e2e4695f

+18 -12
+7 -5
drivers/cpuidle/cpuidle.c
··· 184 184 * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. 185 185 * @drv: cpuidle driver for the given CPU. 186 186 * @dev: cpuidle device for the given CPU. 187 + * @latency_limit_ns: Idle state exit latency limit 187 188 * 188 189 * If there are states with the ->enter_s2idle callback, find the deepest of 189 190 * them and enter it with frozen tick. 190 191 */ 191 - int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) 192 + int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev, 193 + u64 latency_limit_ns) 192 194 { 193 195 int index; 194 196 195 197 /* 196 - * Find the deepest state with ->enter_s2idle present, which guarantees 197 - * that interrupts won't be enabled when it exits and allows the tick to 198 - * be frozen safely. 198 + * Find the deepest state with ->enter_s2idle present that meets the 199 + * specified latency limit, which guarantees that interrupts won't be 200 + * enabled when it exits and allows the tick to be frozen safely. 199 201 */ 200 - index = find_deepest_state(drv, dev, U64_MAX, 0, true); 202 + index = find_deepest_state(drv, dev, latency_limit_ns, 0, true); 201 203 if (index > 0) { 202 204 enter_s2idle_proper(drv, dev, index); 203 205 local_irq_enable();
+4 -2
include/linux/cpuidle.h
··· 248 248 struct cpuidle_device *dev, 249 249 u64 latency_limit_ns); 250 250 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 251 - struct cpuidle_device *dev); 251 + struct cpuidle_device *dev, 252 + u64 latency_limit_ns); 252 253 extern void cpuidle_use_deepest_state(u64 latency_limit_ns); 253 254 #else 254 255 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, ··· 257 256 u64 latency_limit_ns) 258 257 {return -ENODEV; } 259 258 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 260 - struct cpuidle_device *dev) 259 + struct cpuidle_device *dev, 260 + u64 latency_limit_ns) 261 261 {return -ENODEV; } 262 262 static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) 263 263 {
+7 -5
kernel/sched/idle.c
··· 131 131 } 132 132 133 133 static int call_cpuidle_s2idle(struct cpuidle_driver *drv, 134 - struct cpuidle_device *dev) 134 + struct cpuidle_device *dev, 135 + u64 max_latency_ns) 135 136 { 136 137 if (current_clr_polling_and_test()) 137 138 return -EBUSY; 138 139 139 - return cpuidle_enter_s2idle(drv, dev); 140 + return cpuidle_enter_s2idle(drv, dev, max_latency_ns); 140 141 } 141 142 142 143 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, ··· 206 205 u64 max_latency_ns; 207 206 208 207 if (idle_should_enter_s2idle()) { 208 + max_latency_ns = cpu_wakeup_latency_qos_limit() * 209 + NSEC_PER_USEC; 209 210 210 - entered_state = call_cpuidle_s2idle(drv, dev); 211 + entered_state = call_cpuidle_s2idle(drv, dev, 212 + max_latency_ns); 211 213 if (entered_state > 0) 212 214 goto exit_idle; 213 - 214 - max_latency_ns = U64_MAX; 215 215 } else { 216 216 max_latency_ns = dev->forced_idle_latency_limit_ns; 217 217 }