Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'acpi-dev-pm' into acpi-enumeration

Subsequent commits in this branch will depend on 'acpi-dev-pm'
material.

+1337 -330
+31
Documentation/ABI/testing/sysfs-devices-power
··· 204 204 205 205 This attribute has no effect on system-wide suspend/resume and 206 206 hibernation. 207 + 208 + What: /sys/devices/.../power/pm_qos_no_power_off 209 + Date: September 2012 210 + Contact: Rafael J. Wysocki <rjw@sisk.pl> 211 + Description: 212 + The /sys/devices/.../power/pm_qos_no_power_off attribute 213 + is used for manipulating the PM QoS "no power off" flag. If 214 + set, this flag indicates to the kernel that power should not 215 + be removed entirely from the device. 216 + 217 + Not all drivers support this attribute. If it isn't supported, 218 + it is not present. 219 + 220 + This attribute has no effect on system-wide suspend/resume and 221 + hibernation. 222 + 223 + What: /sys/devices/.../power/pm_qos_remote_wakeup 224 + Date: September 2012 225 + Contact: Rafael J. Wysocki <rjw@sisk.pl> 226 + Description: 227 + The /sys/devices/.../power/pm_qos_remote_wakeup attribute 228 + is used for manipulating the PM QoS "remote wakeup required" 229 + flag. If set, this flag indicates to the kernel that the 230 + device is a source of user events that have to be signaled from 231 + its low-power states. 232 + 233 + Not all drivers support this attribute. If it isn't supported, 234 + it is not present. 235 + 236 + This attribute has no effect on system-wide suspend/resume and 237 + hibernation.
+1 -1
Documentation/power/pm_qos_interface.txt
··· 99 99 100 100 From kernel mode the use of this interface is the following: 101 101 102 - int dev_pm_qos_add_request(device, handle, value): 102 + int dev_pm_qos_add_request(device, handle, type, value): 103 103 Will insert an element into the list for that identified device with the 104 104 target value. Upon change to this list the new target is recomputed and any 105 105 registered notifiers are called only if the target value is now different.
+2 -1
drivers/acpi/Makefile
··· 21 21 acpi-y += osl.o utils.o reboot.o 22 22 acpi-y += nvs.o 23 23 24 - # sleep related files 24 + # Power management related files 25 25 acpi-y += wakeup.o 26 26 acpi-y += sleep.o 27 + acpi-$(CONFIG_PM) += device_pm.o 27 28 acpi-$(CONFIG_ACPI_SLEEP) += proc.o 28 29 29 30
+18 -3
drivers/acpi/bus.c
··· 257 257 } 258 258 259 259 260 - static int __acpi_bus_set_power(struct acpi_device *device, int state) 260 + /** 261 + * acpi_device_set_power - Set power state of an ACPI device. 262 + * @device: Device to set the power state of. 263 + * @state: New power state to set. 264 + * 265 + * Callers must ensure that the device is power manageable before using this 266 + * function. 267 + */ 268 + int acpi_device_set_power(struct acpi_device *device, int state) 261 269 { 262 270 int result = 0; 263 271 acpi_status status = AE_OK; ··· 306 298 * a lower-powered state. 307 299 */ 308 300 if (state < device->power.state) { 301 + if (device->power.state >= ACPI_STATE_D3_HOT && 302 + state != ACPI_STATE_D0) { 303 + printk(KERN_WARNING PREFIX 304 + "Cannot transition to non-D0 state from D3\n"); 305 + return -ENODEV; 306 + } 309 307 if (device->power.flags.power_resources) { 310 308 result = acpi_power_transition(device, state); 311 309 if (result) ··· 355 341 356 342 return result; 357 343 } 344 + EXPORT_SYMBOL(acpi_device_set_power); 358 345 359 346 360 347 int acpi_bus_set_power(acpi_handle handle, int state) ··· 374 359 return -ENODEV; 375 360 } 376 361 377 - return __acpi_bus_set_power(device, state); 362 + return acpi_device_set_power(device, state); 378 363 } 379 364 EXPORT_SYMBOL(acpi_bus_set_power); 380 365 ··· 417 402 if (result) 418 403 return result; 419 404 420 - result = __acpi_bus_set_power(device, state); 405 + result = acpi_device_set_power(device, state); 421 406 if (!result && state_p) 422 407 *state_p = state; 423 408
+668
drivers/acpi/device_pm.c
··· 1 + /* 2 + * drivers/acpi/device_pm.c - ACPI device power management routines. 3 + * 4 + * Copyright (C) 2012, Intel Corp. 5 + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 6 + * 7 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as published 11 + * by the Free Software Foundation. 12 + * 13 + * This program is distributed in the hope that it will be useful, but 14 + * WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 + * General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License along 19 + * with this program; if not, write to the Free Software Foundation, Inc., 20 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 + * 22 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 + */ 24 + 25 + #include <linux/device.h> 26 + #include <linux/export.h> 27 + #include <linux/mutex.h> 28 + #include <linux/pm_qos.h> 29 + #include <linux/pm_runtime.h> 30 + 31 + #include <acpi/acpi.h> 32 + #include <acpi/acpi_bus.h> 33 + 34 + static DEFINE_MUTEX(acpi_pm_notifier_lock); 35 + 36 + /** 37 + * acpi_add_pm_notifier - Register PM notifier for given ACPI device. 38 + * @adev: ACPI device to add the notifier for. 39 + * @context: Context information to pass to the notifier routine. 40 + * 41 + * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of 42 + * PM wakeup events. For example, wakeup events may be generated for bridges 43 + * if one of the devices below the bridge is signaling wakeup, even if the 44 + * bridge itself doesn't have a wakeup GPE associated with it. 45 + */ 46 + acpi_status acpi_add_pm_notifier(struct acpi_device *adev, 47 + acpi_notify_handler handler, void *context) 48 + { 49 + acpi_status status = AE_ALREADY_EXISTS; 50 + 51 + mutex_lock(&acpi_pm_notifier_lock); 52 + 53 + if (adev->wakeup.flags.notifier_present) 54 + goto out; 55 + 56 + status = acpi_install_notify_handler(adev->handle, 57 + ACPI_SYSTEM_NOTIFY, 58 + handler, context); 59 + if (ACPI_FAILURE(status)) 60 + goto out; 61 + 62 + adev->wakeup.flags.notifier_present = true; 63 + 64 + out: 65 + mutex_unlock(&acpi_pm_notifier_lock); 66 + return status; 67 + } 68 + 69 + /** 70 + * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device. 71 + * @adev: ACPI device to remove the notifier from. 72 + */ 73 + acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, 74 + acpi_notify_handler handler) 75 + { 76 + acpi_status status = AE_BAD_PARAMETER; 77 + 78 + mutex_lock(&acpi_pm_notifier_lock); 79 + 80 + if (!adev->wakeup.flags.notifier_present) 81 + goto out; 82 + 83 + status = acpi_remove_notify_handler(adev->handle, 84 + ACPI_SYSTEM_NOTIFY, 85 + handler); 86 + if (ACPI_FAILURE(status)) 87 + goto out; 88 + 89 + adev->wakeup.flags.notifier_present = false; 90 + 91 + out: 92 + mutex_unlock(&acpi_pm_notifier_lock); 93 + return status; 94 + } 95 + 96 + /** 97 + * acpi_device_power_state - Get preferred power state of ACPI device. 98 + * @dev: Device whose preferred target power state to return. 99 + * @adev: ACPI device node corresponding to @dev. 100 + * @target_state: System state to match the resultant device state. 101 + * @d_max_in: Deepest low-power state to take into consideration. 102 + * @d_min_p: Location to store the upper limit of the allowed states range. 103 + * Return value: Preferred power state of the device on success, -ENODEV 104 + * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure 105 + * 106 + * Find the lowest power (highest number) ACPI device power state that the 107 + * device can be in while the system is in the state represented by 108 + * @target_state. If @d_min_p is set, the highest power (lowest number) device 109 + * power state that @dev can be in for the given system sleep state is stored 110 + * at the location pointed to by it. 111 + * 112 + * Callers must ensure that @dev and @adev are valid pointers and that @adev 113 + * actually corresponds to @dev before using this function. 114 + */ 115 + int acpi_device_power_state(struct device *dev, struct acpi_device *adev, 116 + u32 target_state, int d_max_in, int *d_min_p) 117 + { 118 + char acpi_method[] = "_SxD"; 119 + unsigned long long d_min, d_max; 120 + bool wakeup = false; 121 + 122 + if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3) 123 + return -EINVAL; 124 + 125 + if (d_max_in > ACPI_STATE_D3_HOT) { 126 + enum pm_qos_flags_status stat; 127 + 128 + stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF); 129 + if (stat == PM_QOS_FLAGS_ALL) 130 + d_max_in = ACPI_STATE_D3_HOT; 131 + } 132 + 133 + acpi_method[2] = '0' + target_state; 134 + /* 135 + * If the sleep state is S0, the lowest limit from ACPI is D3, 136 + * but if the device has _S0W, we will use the value from _S0W 137 + * as the lowest limit from ACPI. Finally, we will constrain 138 + * the lowest limit with the specified one. 139 + */ 140 + d_min = ACPI_STATE_D0; 141 + d_max = ACPI_STATE_D3; 142 + 143 + /* 144 + * If present, _SxD methods return the minimum D-state (highest power 145 + * state) we can use for the corresponding S-states. Otherwise, the 146 + * minimum D-state is D0 (ACPI 3.x). 147 + * 148 + * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer 149 + * provided -- that's our fault recovery, we ignore retval. 150 + */ 151 + if (target_state > ACPI_STATE_S0) { 152 + acpi_evaluate_integer(adev->handle, acpi_method, NULL, &d_min); 153 + wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid 154 + && adev->wakeup.sleep_state >= target_state; 155 + } else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) != 156 + PM_QOS_FLAGS_NONE) { 157 + wakeup = adev->wakeup.flags.valid; 158 + } 159 + 160 + /* 161 + * If _PRW says we can wake up the system from the target sleep state, 162 + * the D-state returned by _SxD is sufficient for that (we assume a 163 + * wakeup-aware driver if wake is set). Still, if _SxW exists 164 + * (ACPI 3.x), it should return the maximum (lowest power) D-state that 165 + * can wake the system. _S0W may be valid, too. 166 + */ 167 + if (wakeup) { 168 + acpi_status status; 169 + 170 + acpi_method[3] = 'W'; 171 + status = acpi_evaluate_integer(adev->handle, acpi_method, NULL, 172 + &d_max); 173 + if (ACPI_FAILURE(status)) { 174 + if (target_state != ACPI_STATE_S0 || 175 + status != AE_NOT_FOUND) 176 + d_max = d_min; 177 + } else if (d_max < d_min) { 178 + /* Warn the user of the broken DSDT */ 179 + printk(KERN_WARNING "ACPI: Wrong value from %s\n", 180 + acpi_method); 181 + /* Sanitize it */ 182 + d_min = d_max; 183 + } 184 + } 185 + 186 + if (d_max_in < d_min) 187 + return -EINVAL; 188 + if (d_min_p) 189 + *d_min_p = d_min; 190 + /* constrain d_max with specified lowest limit (max number) */ 191 + if (d_max > d_max_in) { 192 + for (d_max = d_max_in; d_max > d_min; d_max--) { 193 + if (adev->power.states[d_max].flags.valid) 194 + break; 195 + } 196 + } 197 + return d_max; 198 + } 199 + EXPORT_SYMBOL_GPL(acpi_device_power_state); 200 + 201 + /** 202 + * acpi_pm_device_sleep_state - Get preferred power state of ACPI device. 203 + * @dev: Device whose preferred target power state to return. 204 + * @d_min_p: Location to store the upper limit of the allowed states range. 205 + * @d_max_in: Deepest low-power state to take into consideration. 206 + * Return value: Preferred power state of the device on success, -ENODEV 207 + * (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure 208 + * 209 + * The caller must ensure that @dev is valid before using this function. 210 + */ 211 + int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) 212 + { 213 + acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 214 + struct acpi_device *adev; 215 + 216 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 217 + dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); 218 + return -ENODEV; 219 + } 220 + 221 + return acpi_device_power_state(dev, adev, acpi_target_system_state(), 222 + d_max_in, d_min_p); 223 + } 224 + EXPORT_SYMBOL(acpi_pm_device_sleep_state); 225 + 226 + #ifdef CONFIG_PM_RUNTIME 227 + /** 228 + * acpi_wakeup_device - Wakeup notification handler for ACPI devices. 229 + * @handle: ACPI handle of the device the notification is for. 230 + * @event: Type of the signaled event. 231 + * @context: Device corresponding to @handle. 232 + */ 233 + static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context) 234 + { 235 + struct device *dev = context; 236 + 237 + if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) { 238 + pm_wakeup_event(dev, 0); 239 + pm_runtime_resume(dev); 240 + } 241 + } 242 + 243 + /** 244 + * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device. 245 + * @adev: ACPI device to enable/disable the remote wakeup for. 246 + * @enable: Whether to enable or disable the wakeup functionality. 247 + * 248 + * Enable/disable the GPE associated with @adev so that it can generate 249 + * wakeup signals for the device in response to external (remote) events and 250 + * enable/disable device wakeup power. 251 + * 252 + * Callers must ensure that @adev is a valid ACPI device node before executing 253 + * this function. 254 + */ 255 + int __acpi_device_run_wake(struct acpi_device *adev, bool enable) 256 + { 257 + struct acpi_device_wakeup *wakeup = &adev->wakeup; 258 + 259 + if (enable) { 260 + acpi_status res; 261 + int error; 262 + 263 + error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0); 264 + if (error) 265 + return error; 266 + 267 + res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number); 268 + if (ACPI_FAILURE(res)) { 269 + acpi_disable_wakeup_device_power(adev); 270 + return -EIO; 271 + } 272 + } else { 273 + acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number); 274 + acpi_disable_wakeup_device_power(adev); 275 + } 276 + return 0; 277 + } 278 + 279 + /** 280 + * acpi_pm_device_run_wake - Enable/disable remote wakeup for given device. 281 + * @dev: Device to enable/disable the platform to wake up. 282 + * @enable: Whether to enable or disable the wakeup functionality. 283 + */ 284 + int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) 285 + { 286 + struct acpi_device *adev; 287 + acpi_handle handle; 288 + 289 + if (!device_run_wake(phys_dev)) 290 + return -EINVAL; 291 + 292 + handle = DEVICE_ACPI_HANDLE(phys_dev); 293 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 294 + dev_dbg(phys_dev, "ACPI handle without context in %s!\n", 295 + __func__); 296 + return -ENODEV; 297 + } 298 + 299 + return __acpi_device_run_wake(adev, enable); 300 + } 301 + EXPORT_SYMBOL(acpi_pm_device_run_wake); 302 + #else 303 + static inline void acpi_wakeup_device(acpi_handle handle, u32 event, 304 + void *context) {} 305 + #endif /* CONFIG_PM_RUNTIME */ 306 + 307 + #ifdef CONFIG_PM_SLEEP 308 + /** 309 + * __acpi_device_sleep_wake - Enable or disable device to wake up the system. 310 + * @dev: Device to enable/desible to wake up the system. 311 + * @target_state: System state the device is supposed to wake up from. 312 + * @enable: Whether to enable or disable @dev to wake up the system. 313 + */ 314 + int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state, 315 + bool enable) 316 + { 317 + return enable ? 318 + acpi_enable_wakeup_device_power(adev, target_state) : 319 + acpi_disable_wakeup_device_power(adev); 320 + } 321 + 322 + /** 323 + * acpi_pm_device_sleep_wake - Enable or disable device to wake up the system. 324 + * @dev: Device to enable/desible to wake up the system from sleep states. 325 + * @enable: Whether to enable or disable @dev to wake up the system. 326 + */ 327 + int acpi_pm_device_sleep_wake(struct device *dev, bool enable) 328 + { 329 + acpi_handle handle; 330 + struct acpi_device *adev; 331 + int error; 332 + 333 + if (!device_can_wakeup(dev)) 334 + return -EINVAL; 335 + 336 + handle = DEVICE_ACPI_HANDLE(dev); 337 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 338 + dev_dbg(dev, "ACPI handle without context in %s!\n", __func__); 339 + return -ENODEV; 340 + } 341 + 342 + error = __acpi_device_sleep_wake(adev, acpi_target_system_state(), 343 + enable); 344 + if (!error) 345 + dev_info(dev, "System wakeup %s by ACPI\n", 346 + enable ? "enabled" : "disabled"); 347 + 348 + return error; 349 + } 350 + #endif /* CONFIG_PM_SLEEP */ 351 + 352 + /** 353 + * acpi_dev_pm_get_node - Get ACPI device node for the given physical device. 354 + * @dev: Device to get the ACPI node for. 355 + */ 356 + static struct acpi_device *acpi_dev_pm_get_node(struct device *dev) 357 + { 358 + acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 359 + struct acpi_device *adev; 360 + 361 + return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ? 362 + adev : NULL; 363 + } 364 + 365 + /** 366 + * acpi_dev_pm_low_power - Put ACPI device into a low-power state. 367 + * @dev: Device to put into a low-power state. 368 + * @adev: ACPI device node corresponding to @dev. 369 + * @system_state: System state to choose the device state for. 370 + */ 371 + static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev, 372 + u32 system_state) 373 + { 374 + int power_state; 375 + 376 + if (!acpi_device_power_manageable(adev)) 377 + return 0; 378 + 379 + power_state = acpi_device_power_state(dev, adev, system_state, 380 + ACPI_STATE_D3, NULL); 381 + if (power_state < ACPI_STATE_D0 || power_state > ACPI_STATE_D3) 382 + return -EIO; 383 + 384 + return acpi_device_set_power(adev, power_state); 385 + } 386 + 387 + /** 388 + * acpi_dev_pm_full_power - Put ACPI device into the full-power state. 389 + * @adev: ACPI device node to put into the full-power state. 390 + */ 391 + static int acpi_dev_pm_full_power(struct acpi_device *adev) 392 + { 393 + return acpi_device_power_manageable(adev) ? 394 + acpi_device_set_power(adev, ACPI_STATE_D0) : 0; 395 + } 396 + 397 + #ifdef CONFIG_PM_RUNTIME 398 + /** 399 + * acpi_dev_runtime_suspend - Put device into a low-power state using ACPI. 400 + * @dev: Device to put into a low-power state. 401 + * 402 + * Put the given device into a runtime low-power state using the standard ACPI 403 + * mechanism. Set up remote wakeup if desired, choose the state to put the 404 + * device into (this checks if remote wakeup is expected to work too), and set 405 + * the power state of the device. 406 + */ 407 + int acpi_dev_runtime_suspend(struct device *dev) 408 + { 409 + struct acpi_device *adev = acpi_dev_pm_get_node(dev); 410 + bool remote_wakeup; 411 + int error; 412 + 413 + if (!adev) 414 + return 0; 415 + 416 + remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) > 417 + PM_QOS_FLAGS_NONE; 418 + error = __acpi_device_run_wake(adev, remote_wakeup); 419 + if (remote_wakeup && error) 420 + return -EAGAIN; 421 + 422 + error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); 423 + if (error) 424 + __acpi_device_run_wake(adev, false); 425 + 426 + return error; 427 + } 428 + EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend); 429 + 430 + /** 431 + * acpi_dev_runtime_resume - Put device into the full-power state using ACPI. 432 + * @dev: Device to put into the full-power state. 433 + * 434 + * Put the given device into the full-power state using the standard ACPI 435 + * mechanism at run time. Set the power state of the device to ACPI D0 and 436 + * disable remote wakeup. 437 + */ 438 + int acpi_dev_runtime_resume(struct device *dev) 439 + { 440 + struct acpi_device *adev = acpi_dev_pm_get_node(dev); 441 + int error; 442 + 443 + if (!adev) 444 + return 0; 445 + 446 + error = acpi_dev_pm_full_power(adev); 447 + __acpi_device_run_wake(adev, false); 448 + return error; 449 + } 450 + EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume); 451 + 452 + /** 453 + * acpi_subsys_runtime_suspend - Suspend device using ACPI. 454 + * @dev: Device to suspend. 455 + * 456 + * Carry out the generic runtime suspend procedure for @dev and use ACPI to put 457 + * it into a runtime low-power state. 458 + */ 459 + int acpi_subsys_runtime_suspend(struct device *dev) 460 + { 461 + int ret = pm_generic_runtime_suspend(dev); 462 + return ret ? ret : acpi_dev_runtime_suspend(dev); 463 + } 464 + EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend); 465 + 466 + /** 467 + * acpi_subsys_runtime_resume - Resume device using ACPI. 468 + * @dev: Device to Resume. 469 + * 470 + * Use ACPI to put the given device into the full-power state and carry out the 471 + * generic runtime resume procedure for it. 472 + */ 473 + int acpi_subsys_runtime_resume(struct device *dev) 474 + { 475 + int ret = acpi_dev_runtime_resume(dev); 476 + return ret ? ret : pm_generic_runtime_resume(dev); 477 + } 478 + EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume); 479 + #endif /* CONFIG_PM_RUNTIME */ 480 + 481 + #ifdef CONFIG_PM_SLEEP 482 + /** 483 + * acpi_dev_suspend_late - Put device into a low-power state using ACPI. 484 + * @dev: Device to put into a low-power state. 485 + * 486 + * Put the given device into a low-power state during system transition to a 487 + * sleep state using the standard ACPI mechanism. Set up system wakeup if 488 + * desired, choose the state to put the device into (this checks if system 489 + * wakeup is expected to work too), and set the power state of the device. 490 + */ 491 + int acpi_dev_suspend_late(struct device *dev) 492 + { 493 + struct acpi_device *adev = acpi_dev_pm_get_node(dev); 494 + u32 target_state; 495 + bool wakeup; 496 + int error; 497 + 498 + if (!adev) 499 + return 0; 500 + 501 + target_state = acpi_target_system_state(); 502 + wakeup = device_may_wakeup(dev); 503 + error = __acpi_device_sleep_wake(adev, target_state, wakeup); 504 + if (wakeup && error) 505 + return error; 506 + 507 + error = acpi_dev_pm_low_power(dev, adev, target_state); 508 + if (error) 509 + __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); 510 + 511 + return error; 512 + } 513 + EXPORT_SYMBOL_GPL(acpi_dev_suspend_late); 514 + 515 + /** 516 + * acpi_dev_resume_early - Put device into the full-power state using ACPI. 517 + * @dev: Device to put into the full-power state. 518 + * 519 + * Put the given device into the full-power state using the standard ACPI 520 + * mechanism during system transition to the working state. Set the power 521 + * state of the device to ACPI D0 and disable remote wakeup. 522 + */ 523 + int acpi_dev_resume_early(struct device *dev) 524 + { 525 + struct acpi_device *adev = acpi_dev_pm_get_node(dev); 526 + int error; 527 + 528 + if (!adev) 529 + return 0; 530 + 531 + error = acpi_dev_pm_full_power(adev); 532 + __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false); 533 + return error; 534 + } 535 + EXPORT_SYMBOL_GPL(acpi_dev_resume_early); 536 + 537 + /** 538 + * acpi_subsys_prepare - Prepare device for system transition to a sleep state. 539 + * @dev: Device to prepare. 540 + */ 541 + int acpi_subsys_prepare(struct device *dev) 542 + { 543 + /* 544 + * Follow PCI and resume devices suspended at run time before running 545 + * their system suspend callbacks. 546 + */ 547 + pm_runtime_resume(dev); 548 + return pm_generic_prepare(dev); 549 + } 550 + EXPORT_SYMBOL_GPL(acpi_subsys_prepare); 551 + 552 + /** 553 + * acpi_subsys_suspend_late - Suspend device using ACPI. 554 + * @dev: Device to suspend. 555 + * 556 + * Carry out the generic late suspend procedure for @dev and use ACPI to put 557 + * it into a low-power state during system transition into a sleep state. 558 + */ 559 + int acpi_subsys_suspend_late(struct device *dev) 560 + { 561 + int ret = pm_generic_suspend_late(dev); 562 + return ret ? ret : acpi_dev_suspend_late(dev); 563 + } 564 + EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); 565 + 566 + /** 567 + * acpi_subsys_resume_early - Resume device using ACPI. 568 + * @dev: Device to Resume. 569 + * 570 + * Use ACPI to put the given device into the full-power state and carry out the 571 + * generic early resume procedure for it during system transition into the 572 + * working state. 573 + */ 574 + int acpi_subsys_resume_early(struct device *dev) 575 + { 576 + int ret = acpi_dev_resume_early(dev); 577 + return ret ? ret : pm_generic_resume_early(dev); 578 + } 579 + EXPORT_SYMBOL_GPL(acpi_subsys_resume_early); 580 + #endif /* CONFIG_PM_SLEEP */ 581 + 582 + static struct dev_pm_domain acpi_general_pm_domain = { 583 + .ops = { 584 + #ifdef CONFIG_PM_RUNTIME 585 + .runtime_suspend = acpi_subsys_runtime_suspend, 586 + .runtime_resume = acpi_subsys_runtime_resume, 587 + .runtime_idle = pm_generic_runtime_idle, 588 + #endif 589 + #ifdef CONFIG_PM_SLEEP 590 + .prepare = acpi_subsys_prepare, 591 + .suspend_late = acpi_subsys_suspend_late, 592 + .resume_early = acpi_subsys_resume_early, 593 + .poweroff_late = acpi_subsys_suspend_late, 594 + .restore_early = acpi_subsys_resume_early, 595 + #endif 596 + }, 597 + }; 598 + 599 + /** 600 + * acpi_dev_pm_attach - Prepare device for ACPI power management. 601 + * @dev: Device to prepare. 602 + * @power_on: Whether or not to power on the device. 603 + * 604 + * If @dev has a valid ACPI handle that has a valid struct acpi_device object 605 + * attached to it, install a wakeup notification handler for the device and 606 + * add it to the general ACPI PM domain. If @power_on is set, the device will 607 + * be put into the ACPI D0 state before the function returns. 608 + * 609 + * This assumes that the @dev's bus type uses generic power management callbacks 610 + * (or doesn't use any power management callbacks at all). 611 + * 612 + * Callers must ensure proper synchronization of this function with power 613 + * management callbacks. 614 + */ 615 + int acpi_dev_pm_attach(struct device *dev, bool power_on) 616 + { 617 + struct acpi_device *adev = acpi_dev_pm_get_node(dev); 618 + 619 + if (!adev) 620 + return -ENODEV; 621 + 622 + if (dev->pm_domain) 623 + return -EEXIST; 624 + 625 + acpi_add_pm_notifier(adev, acpi_wakeup_device, dev); 626 + dev->pm_domain = &acpi_general_pm_domain; 627 + if (power_on) { 628 + acpi_dev_pm_full_power(adev); 629 + __acpi_device_run_wake(adev, false); 630 + } 631 + return 0; 632 + } 633 + EXPORT_SYMBOL_GPL(acpi_dev_pm_attach); 634 + 635 + /** 636 + * acpi_dev_pm_detach - Remove ACPI power management from the device. 637 + * @dev: Device to take care of. 638 + * @power_off: Whether or not to try to remove power from the device. 639 + * 640 + * Remove the device from the general ACPI PM domain and remove its wakeup 641 + * notifier. If @power_off is set, additionally remove power from the device if 642 + * possible. 643 + * 644 + * Callers must ensure proper synchronization of this function with power 645 + * management callbacks. 646 + */ 647 + void acpi_dev_pm_detach(struct device *dev, bool power_off) 648 + { 649 + struct acpi_device *adev = acpi_dev_pm_get_node(dev); 650 + 651 + if (adev && dev->pm_domain == &acpi_general_pm_domain) { 652 + dev->pm_domain = NULL; 653 + acpi_remove_pm_notifier(adev, acpi_wakeup_device); 654 + if (power_off) { 655 + /* 656 + * If the device's PM QoS resume latency limit or flags 657 + * have been exposed to user space, they have to be 658 + * hidden at this point, so that they don't affect the 659 + * choice of the low-power state to put the device into. 660 + */ 661 + dev_pm_qos_hide_latency_limit(dev); 662 + dev_pm_qos_hide_flags(dev); 663 + __acpi_device_run_wake(adev, false); 664 + acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0); 665 + } 666 + } 667 + } 668 + EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
+8 -1
drivers/acpi/scan.c
··· 1006 1006 * D3hot is only valid if _PR3 present. 1007 1007 */ 1008 1008 if (ps->resources.count || 1009 - (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) 1009 + (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) { 1010 1010 ps->flags.valid = 1; 1011 + ps->flags.os_accessible = 1; 1012 + } 1011 1013 1012 1014 ps->power = -1; /* Unknown - driver assigned */ 1013 1015 ps->latency = -1; /* Unknown - driver assigned */ ··· 1024 1022 /* Set D3cold's explicit_set flag if _PS3 exists. */ 1025 1023 if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set) 1026 1024 device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1; 1025 + 1026 + /* Presence of _PS3 or _PRx means we can put the device into D3 cold */ 1027 + if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set || 1028 + device->power.flags.power_resources) 1029 + device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; 1027 1030 1028 1031 acpi_bus_init_power(device); 1029 1032
+6 -172
drivers/acpi/sleep.c
··· 18 18 #include <linux/reboot.h> 19 19 #include <linux/acpi.h> 20 20 #include <linux/module.h> 21 - #include <linux/pm_runtime.h> 22 21 23 22 #include <asm/io.h> 24 23 ··· 80 81 81 82 #ifdef CONFIG_ACPI_SLEEP 82 83 static u32 acpi_target_sleep_state = ACPI_STATE_S0; 84 + 85 + u32 acpi_target_system_state(void) 86 + { 87 + return acpi_target_sleep_state; 88 + } 89 + 83 90 static bool pwr_btn_event_pending; 84 91 85 92 /* ··· 685 680 return hibernate(); 686 681 return -EINVAL; 687 682 } 688 - 689 - #ifdef CONFIG_PM 690 - /** 691 - * acpi_pm_device_sleep_state - return preferred power state of ACPI device 692 - * in the system sleep state given by %acpi_target_sleep_state 693 - * @dev: device to examine; its driver model wakeup flags control 694 - * whether it should be able to wake up the system 695 - * @d_min_p: used to store the upper limit of allowed states range 696 - * @d_max_in: specify the lowest allowed states 697 - * Return value: preferred power state of the device on success, -ENODEV 698 - * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure 699 - * 700 - * Find the lowest power (highest number) ACPI device power state that 701 - * device @dev can be in while the system is in the sleep state represented 702 - * by %acpi_target_sleep_state. If @wake is nonzero, the device should be 703 - * able to wake up the system from this sleep state. If @d_min_p is set, 704 - * the highest power (lowest number) device power state of @dev allowed 705 - * in this system sleep state is stored at the location pointed to by it. 706 - * 707 - * The caller must ensure that @dev is valid before using this function. 708 - * The caller is also responsible for figuring out if the device is 709 - * supposed to be able to wake up the system and passing this information 710 - * via @wake. 711 - */ 712 - 713 - int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in) 714 - { 715 - acpi_handle handle = DEVICE_ACPI_HANDLE(dev); 716 - struct acpi_device *adev; 717 - char acpi_method[] = "_SxD"; 718 - unsigned long long d_min, d_max; 719 - 720 - if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3) 721 - return -EINVAL; 722 - if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 723 - printk(KERN_DEBUG "ACPI handle has no context!\n"); 724 - return -ENODEV; 725 - } 726 - 727 - acpi_method[2] = '0' + acpi_target_sleep_state; 728 - /* 729 - * If the sleep state is S0, the lowest limit from ACPI is D3, 730 - * but if the device has _S0W, we will use the value from _S0W 731 - * as the lowest limit from ACPI. Finally, we will constrain 732 - * the lowest limit with the specified one. 733 - */ 734 - d_min = ACPI_STATE_D0; 735 - d_max = ACPI_STATE_D3; 736 - 737 - /* 738 - * If present, _SxD methods return the minimum D-state (highest power 739 - * state) we can use for the corresponding S-states. Otherwise, the 740 - * minimum D-state is D0 (ACPI 3.x). 741 - * 742 - * NOTE: We rely on acpi_evaluate_integer() not clobbering the integer 743 - * provided -- that's our fault recovery, we ignore retval. 744 - */ 745 - if (acpi_target_sleep_state > ACPI_STATE_S0) 746 - acpi_evaluate_integer(handle, acpi_method, NULL, &d_min); 747 - 748 - /* 749 - * If _PRW says we can wake up the system from the target sleep state, 750 - * the D-state returned by _SxD is sufficient for that (we assume a 751 - * wakeup-aware driver if wake is set). Still, if _SxW exists 752 - * (ACPI 3.x), it should return the maximum (lowest power) D-state that 753 - * can wake the system. _S0W may be valid, too. 754 - */ 755 - if (acpi_target_sleep_state == ACPI_STATE_S0 || 756 - (device_may_wakeup(dev) && adev->wakeup.flags.valid && 757 - adev->wakeup.sleep_state >= acpi_target_sleep_state)) { 758 - acpi_status status; 759 - 760 - acpi_method[3] = 'W'; 761 - status = acpi_evaluate_integer(handle, acpi_method, NULL, 762 - &d_max); 763 - if (ACPI_FAILURE(status)) { 764 - if (acpi_target_sleep_state != ACPI_STATE_S0 || 765 - status != AE_NOT_FOUND) 766 - d_max = d_min; 767 - } else if (d_max < d_min) { 768 - /* Warn the user of the broken DSDT */ 769 - printk(KERN_WARNING "ACPI: Wrong value from %s\n", 770 - acpi_method); 771 - /* Sanitize it */ 772 - d_min = d_max; 773 - } 774 - } 775 - 776 - if (d_max_in < d_min) 777 - return -EINVAL; 778 - if (d_min_p) 779 - *d_min_p = d_min; 780 - /* constrain d_max with specified lowest limit (max number) */ 781 - if (d_max > d_max_in) { 782 - for (d_max = d_max_in; d_max > d_min; d_max--) { 783 - if (adev->power.states[d_max].flags.valid) 784 - break; 785 - } 786 - } 787 - return d_max; 788 - } 789 - EXPORT_SYMBOL(acpi_pm_device_sleep_state); 790 - #endif /* CONFIG_PM */ 791 - 792 - #ifdef CONFIG_PM_SLEEP 793 - /** 794 - * acpi_pm_device_run_wake - Enable/disable wake-up for given device. 795 - * @phys_dev: Device to enable/disable the platform to wake-up the system for. 796 - * @enable: Whether enable or disable the wake-up functionality. 797 - * 798 - * Find the ACPI device object corresponding to @pci_dev and try to 799 - * enable/disable the GPE associated with it. 800 - */ 801 - int acpi_pm_device_run_wake(struct device *phys_dev, bool enable) 802 - { 803 - struct acpi_device *dev; 804 - acpi_handle handle; 805 - 806 - if (!device_run_wake(phys_dev)) 807 - return -EINVAL; 808 - 809 - handle = DEVICE_ACPI_HANDLE(phys_dev); 810 - if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) { 811 - dev_dbg(phys_dev, "ACPI handle has no context in %s!\n", 812 - __func__); 813 - return -ENODEV; 814 - } 815 - 816 - if (enable) { 817 - acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0); 818 - acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); 819 - } else { 820 - acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); 821 - acpi_disable_wakeup_device_power(dev); 822 - } 823 - 824 - return 0; 825 - } 826 - EXPORT_SYMBOL(acpi_pm_device_run_wake); 827 - 828 - /** 829 - * acpi_pm_device_sleep_wake - enable or disable the system wake-up 830 - * capability of given device 831 - * @dev: device to handle 832 - * @enable: 'true' - enable, 'false' - disable the wake-up capability 833 - */ 834 - int acpi_pm_device_sleep_wake(struct device *dev, bool enable) 835 - { 836 - acpi_handle handle; 837 - struct acpi_device *adev; 838 - int error; 839 - 840 - if (!device_can_wakeup(dev)) 841 - return -EINVAL; 842 - 843 - handle = DEVICE_ACPI_HANDLE(dev); 844 - if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) { 845 - dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__); 846 - return -ENODEV; 847 - } 848 - 849 - error = enable ? 850 - acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) : 851 - acpi_disable_wakeup_device_power(adev); 852 - if (!error) 853 - dev_info(dev, "wake-up capability %s by ACPI\n", 854 - enable ? "enabled" : "disabled"); 855 - 856 - return error; 857 - } 858 - #endif /* CONFIG_PM_SLEEP */ 859 683 860 684 static void acpi_power_off_prepare(void) 861 685 {
+10 -1
drivers/base/power/domain.c
··· 470 470 return -EBUSY; 471 471 472 472 not_suspended = 0; 473 - list_for_each_entry(pdd, &genpd->dev_list, list_node) 473 + list_for_each_entry(pdd, &genpd->dev_list, list_node) { 474 + enum pm_qos_flags_status stat; 475 + 476 + stat = dev_pm_qos_flags(pdd->dev, 477 + PM_QOS_FLAG_NO_POWER_OFF 478 + | PM_QOS_FLAG_REMOTE_WAKEUP); 479 + if (stat > PM_QOS_FLAGS_NONE) 480 + return -EBUSY; 481 + 474 482 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 475 483 || pdd->dev->power.irq_safe)) 476 484 not_suspended++; 485 + } 477 486 478 487 if (not_suspended > genpd->in_progress) 479 488 return -EBUSY;
+4 -2
drivers/base/power/power.h
··· 93 93 extern void rpm_sysfs_remove(struct device *dev); 94 94 extern int wakeup_sysfs_add(struct device *dev); 95 95 extern void wakeup_sysfs_remove(struct device *dev); 96 - extern int pm_qos_sysfs_add(struct device *dev); 97 - extern void pm_qos_sysfs_remove(struct device *dev); 96 + extern int pm_qos_sysfs_add_latency(struct device *dev); 97 + extern void pm_qos_sysfs_remove_latency(struct device *dev); 98 + extern int pm_qos_sysfs_add_flags(struct device *dev); 99 + extern void pm_qos_sysfs_remove_flags(struct device *dev); 98 100 99 101 #else /* CONFIG_PM */ 100 102
+246 -62
drivers/base/power/qos.c
··· 40 40 #include <linux/device.h> 41 41 #include <linux/mutex.h> 42 42 #include <linux/export.h> 43 + #include <linux/pm_runtime.h> 43 44 44 45 #include "power.h" 45 46 46 47 static DEFINE_MUTEX(dev_pm_qos_mtx); 47 48 48 49 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 50 + 51 + /** 52 + * __dev_pm_qos_flags - Check PM QoS flags for a given device. 53 + * @dev: Device to check the PM QoS flags for. 54 + * @mask: Flags to check against. 55 + * 56 + * This routine must be called with dev->power.lock held. 57 + */ 58 + enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) 59 + { 60 + struct dev_pm_qos *qos = dev->power.qos; 61 + struct pm_qos_flags *pqf; 62 + s32 val; 63 + 64 + if (!qos) 65 + return PM_QOS_FLAGS_UNDEFINED; 66 + 67 + pqf = &qos->flags; 68 + if (list_empty(&pqf->list)) 69 + return PM_QOS_FLAGS_UNDEFINED; 70 + 71 + val = pqf->effective_flags & mask; 72 + if (val) 73 + return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; 74 + 75 + return PM_QOS_FLAGS_NONE; 76 + } 77 + 78 + /** 79 + * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). 80 + * @dev: Device to check the PM QoS flags for. 81 + * @mask: Flags to check against. 82 + */ 83 + enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) 84 + { 85 + unsigned long irqflags; 86 + enum pm_qos_flags_status ret; 87 + 88 + spin_lock_irqsave(&dev->power.lock, irqflags); 89 + ret = __dev_pm_qos_flags(dev, mask); 90 + spin_unlock_irqrestore(&dev->power.lock, irqflags); 91 + 92 + return ret; 93 + } 49 94 50 95 /** 51 96 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. ··· 100 55 */ 101 56 s32 __dev_pm_qos_read_value(struct device *dev) 102 57 { 103 - struct pm_qos_constraints *c = dev->power.constraints; 104 - 105 - return c ? pm_qos_read_value(c) : 0; 58 + return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 106 59 } 107 60 108 61 /** ··· 119 76 return ret; 120 77 } 121 78 122 - /* 123 - * apply_constraint 124 - * @req: constraint request to apply 125 - * @action: action to perform add/update/remove, of type enum pm_qos_req_action 126 - * @value: defines the qos request 79 + /** 80 + * apply_constraint - Add/modify/remove device PM QoS request. 81 + * @req: Constraint request to apply 82 + * @action: Action to perform (add/update/remove). 83 + * @value: Value to assign to the QoS request. 127 84 * 128 85 * Internal function to update the constraints list using the PM QoS core 129 86 * code and if needed call the per-device and the global notification 130 87 * callbacks 131 88 */ 132 89 static int apply_constraint(struct dev_pm_qos_request *req, 133 - enum pm_qos_req_action action, int value) 90 + enum pm_qos_req_action action, s32 value) 134 91 { 135 - int ret, curr_value; 92 + struct dev_pm_qos *qos = req->dev->power.qos; 93 + int ret; 136 94 137 - ret = pm_qos_update_target(req->dev->power.constraints, 138 - &req->node, action, value); 139 - 140 - if (ret) { 141 - /* Call the global callbacks if needed */ 142 - curr_value = pm_qos_read_value(req->dev->power.constraints); 143 - blocking_notifier_call_chain(&dev_pm_notifiers, 144 - (unsigned long)curr_value, 145 - req); 95 + switch(req->type) { 96 + case DEV_PM_QOS_LATENCY: 97 + ret = pm_qos_update_target(&qos->latency, &req->data.pnode, 98 + action, value); 99 + if (ret) { 100 + value = pm_qos_read_value(&qos->latency); 101 + blocking_notifier_call_chain(&dev_pm_notifiers, 102 + (unsigned long)value, 103 + req); 104 + } 105 + break; 106 + case DEV_PM_QOS_FLAGS: 107 + ret = pm_qos_update_flags(&qos->flags, &req->data.flr, 108 + action, value); 109 + break; 110 + default: 111 + ret = -EINVAL; 146 112 } 147 113 148 114 return ret; ··· 166 114 */ 167 115 static int dev_pm_qos_constraints_allocate(struct device *dev) 168 116 { 117 + struct dev_pm_qos *qos; 169 118 struct pm_qos_constraints *c; 170 119 struct blocking_notifier_head *n; 171 120 172 - c = kzalloc(sizeof(*c), GFP_KERNEL); 173 - if (!c) 121 + qos = kzalloc(sizeof(*qos), GFP_KERNEL); 122 + if (!qos) 174 123 return -ENOMEM; 175 124 176 125 n = kzalloc(sizeof(*n), GFP_KERNEL); 177 126 if (!n) { 178 - kfree(c); 127 + kfree(qos); 179 128 return -ENOMEM; 180 129 } 181 130 BLOCKING_INIT_NOTIFIER_HEAD(n); 182 131 132 + c = &qos->latency; 183 133 plist_head_init(&c->list); 184 134 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 185 135 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 186 136 c->type = PM_QOS_MIN; 187 137 c->notifiers = n; 188 138 139 + INIT_LIST_HEAD(&qos->flags.list); 140 + 189 141 spin_lock_irq(&dev->power.lock); 190 - dev->power.constraints = c; 142 + dev->power.qos = qos; 191 143 spin_unlock_irq(&dev->power.lock); 192 144 193 145 return 0; ··· 207 151 void dev_pm_qos_constraints_init(struct device *dev) 208 152 { 209 153 mutex_lock(&dev_pm_qos_mtx); 210 - dev->power.constraints = NULL; 154 + dev->power.qos = NULL; 211 155 dev->power.power_state = PMSG_ON; 212 156 mutex_unlock(&dev_pm_qos_mtx); 213 157 } ··· 220 164 */ 221 165 void dev_pm_qos_constraints_destroy(struct device *dev) 222 166 { 167 + struct dev_pm_qos *qos; 223 168 struct dev_pm_qos_request *req, *tmp; 224 169 struct pm_qos_constraints *c; 225 170 ··· 233 176 mutex_lock(&dev_pm_qos_mtx); 234 177 235 178 dev->power.power_state = PMSG_INVALID; 236 - c = dev->power.constraints; 237 - if (!c) 179 + qos = dev->power.qos; 180 + if (!qos) 238 181 goto out; 239 182 183 + c = &qos->latency; 240 184 /* Flush the constraints list for the device */ 241 - plist_for_each_entry_safe(req, tmp, &c->list, node) { 185 + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { 242 186 /* 243 187 * Update constraints list and call the notification 244 188 * callbacks if needed ··· 249 191 } 250 192 251 193 spin_lock_irq(&dev->power.lock); 252 - dev->power.constraints = NULL; 194 + dev->power.qos = NULL; 253 195 spin_unlock_irq(&dev->power.lock); 254 196 255 197 kfree(c->notifiers); 256 - kfree(c); 198 + kfree(qos); 257 199 258 200 out: 259 201 mutex_unlock(&dev_pm_qos_mtx); ··· 263 205 * dev_pm_qos_add_request - inserts new qos request into the list 264 206 * @dev: target device for the constraint 265 207 * @req: pointer to a preallocated handle 208 + * @type: type of the request 266 209 * @value: defines the qos request 267 210 * 268 211 * This function inserts a new entry in the device constraints list of ··· 277 218 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory 278 219 * to allocate for data structures, -ENODEV if the device has just been removed 279 220 * from the system. 221 + * 222 + * Callers should ensure that the target device is not RPM_SUSPENDED before 223 + * using this function for requests of type DEV_PM_QOS_FLAGS. 280 224 */ 281 225 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 282 - s32 value) 226 + enum dev_pm_qos_req_type type, s32 value) 283 227 { 284 228 int ret = 0; 285 229 ··· 297 235 298 236 mutex_lock(&dev_pm_qos_mtx); 299 237 300 - if (!dev->power.constraints) { 238 + if (!dev->power.qos) { 301 239 if (dev->power.power_state.event == PM_EVENT_INVALID) { 302 240 /* The device has been removed from the system. */ 303 241 req->dev = NULL; ··· 313 251 } 314 252 } 315 253 316 - if (!ret) 254 + if (!ret) { 255 + req->type = type; 317 256 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 257 + } 318 258 319 259 out: 320 260 mutex_unlock(&dev_pm_qos_mtx); ··· 324 260 return ret; 325 261 } 326 262 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 263 + 264 + /** 265 + * __dev_pm_qos_update_request - Modify an existing device PM QoS request. 266 + * @req : PM QoS request to modify. 267 + * @new_value: New value to request. 268 + */ 269 + static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, 270 + s32 new_value) 271 + { 272 + s32 curr_value; 273 + int ret = 0; 274 + 275 + if (!req->dev->power.qos) 276 + return -ENODEV; 277 + 278 + switch(req->type) { 279 + case DEV_PM_QOS_LATENCY: 280 + curr_value = req->data.pnode.prio; 281 + break; 282 + case DEV_PM_QOS_FLAGS: 283 + curr_value = req->data.flr.flags; 284 + break; 285 + default: 286 + return -EINVAL; 287 + } 288 + 289 + if (curr_value != new_value) 290 + ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); 291 + 292 + return ret; 293 + } 327 294 328 295 /** 329 296 * dev_pm_qos_update_request - modifies an existing qos request ··· 370 275 * 0 if the aggregated constraint value has not changed, 371 276 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 372 277 * removed from the system 278 + * 279 + * Callers should ensure that the target device is not RPM_SUSPENDED before 280 + * using this function for requests of type DEV_PM_QOS_FLAGS. 373 281 */ 374 - int dev_pm_qos_update_request(struct dev_pm_qos_request *req, 375 - s32 new_value) 282 + int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) 376 283 { 377 - int ret = 0; 284 + int ret; 378 285 379 286 if (!req) /*guard against callers passing in null */ 380 287 return -EINVAL; ··· 386 289 return -EINVAL; 387 290 388 291 mutex_lock(&dev_pm_qos_mtx); 389 - 390 - if (req->dev->power.constraints) { 391 - if (new_value != req->node.prio) 392 - ret = apply_constraint(req, PM_QOS_UPDATE_REQ, 393 - new_value); 394 - } else { 395 - /* Return if the device has been removed */ 396 - ret = -ENODEV; 397 - } 398 - 292 + ret = __dev_pm_qos_update_request(req, new_value); 399 293 mutex_unlock(&dev_pm_qos_mtx); 294 + 400 295 return ret; 401 296 } 402 297 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); ··· 404 315 * 0 if the aggregated constraint value has not changed, 405 316 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 406 317 * removed from the system 318 + * 319 + * Callers should ensure that the target device is not RPM_SUSPENDED before 320 + * using this function for requests of type DEV_PM_QOS_FLAGS. 407 321 */ 408 322 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 409 323 { ··· 421 329 422 330 mutex_lock(&dev_pm_qos_mtx); 423 331 424 - if (req->dev->power.constraints) { 332 + if (req->dev->power.qos) { 425 333 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, 426 334 PM_QOS_DEFAULT_VALUE); 427 335 memset(req, 0, sizeof(*req)); ··· 454 362 455 363 mutex_lock(&dev_pm_qos_mtx); 456 364 457 - if (!dev->power.constraints) 365 + if (!dev->power.qos) 458 366 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 459 367 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 460 368 461 369 if (!ret) 462 370 ret = blocking_notifier_chain_register( 463 - dev->power.constraints->notifiers, notifier); 371 + dev->power.qos->latency.notifiers, notifier); 464 372 465 373 mutex_unlock(&dev_pm_qos_mtx); 466 374 return ret; ··· 485 393 mutex_lock(&dev_pm_qos_mtx); 486 394 487 395 /* Silently return if the constraints object is not present. */ 488 - if (dev->power.constraints) 396 + if (dev->power.qos) 489 397 retval = blocking_notifier_chain_unregister( 490 - dev->power.constraints->notifiers, 398 + dev->power.qos->latency.notifiers, 491 399 notifier); 492 400 493 401 mutex_unlock(&dev_pm_qos_mtx); ··· 541 449 ancestor = ancestor->parent; 542 450 543 451 if (ancestor) 544 - error = dev_pm_qos_add_request(ancestor, req, value); 452 + error = dev_pm_qos_add_request(ancestor, req, 453 + DEV_PM_QOS_LATENCY, value); 545 454 546 455 if (error) 547 456 req->dev = NULL; ··· 552 459 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 553 460 554 461 #ifdef CONFIG_PM_RUNTIME 555 - static void __dev_pm_qos_drop_user_request(struct device *dev) 462 + static void __dev_pm_qos_drop_user_request(struct device *dev, 463 + enum dev_pm_qos_req_type type) 556 464 { 557 - dev_pm_qos_remove_request(dev->power.pq_req); 558 - dev->power.pq_req = NULL; 465 + switch(type) { 466 + case DEV_PM_QOS_LATENCY: 467 + dev_pm_qos_remove_request(dev->power.qos->latency_req); 468 + dev->power.qos->latency_req = NULL; 469 + break; 470 + case DEV_PM_QOS_FLAGS: 471 + dev_pm_qos_remove_request(dev->power.qos->flags_req); 472 + dev->power.qos->flags_req = NULL; 473 + break; 474 + } 559 475 } 560 476 561 477 /** ··· 580 478 if (!device_is_registered(dev) || value < 0) 581 479 return -EINVAL; 582 480 583 - if (dev->power.pq_req) 481 + if (dev->power.qos && dev->power.qos->latency_req) 584 482 return -EEXIST; 585 483 586 484 req = kzalloc(sizeof(*req), GFP_KERNEL); 587 485 if (!req) 588 486 return -ENOMEM; 589 487 590 - ret = dev_pm_qos_add_request(dev, req, value); 488 + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 591 489 if (ret < 0) 592 490 return ret; 593 491 594 - dev->power.pq_req = req; 595 - ret = pm_qos_sysfs_add(dev); 492 + dev->power.qos->latency_req = req; 493 + ret = pm_qos_sysfs_add_latency(dev); 596 494 if (ret) 597 - __dev_pm_qos_drop_user_request(dev); 495 + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 598 496 599 497 return ret; 600 498 } ··· 606 504 */ 607 505 void dev_pm_qos_hide_latency_limit(struct device *dev) 608 506 { 609 - if (dev->power.pq_req) { 610 - pm_qos_sysfs_remove(dev); 611 - __dev_pm_qos_drop_user_request(dev); 507 + if (dev->power.qos && dev->power.qos->latency_req) { 508 + pm_qos_sysfs_remove_latency(dev); 509 + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 612 510 } 613 511 } 614 512 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 513 + 514 + /** 515 + * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. 516 + * @dev: Device whose PM QoS flags are to be exposed to user space. 517 + * @val: Initial values of the flags. 518 + */ 519 + int dev_pm_qos_expose_flags(struct device *dev, s32 val) 520 + { 521 + struct dev_pm_qos_request *req; 522 + int ret; 523 + 524 + if (!device_is_registered(dev)) 525 + return -EINVAL; 526 + 527 + if (dev->power.qos && dev->power.qos->flags_req) 528 + return -EEXIST; 529 + 530 + req = kzalloc(sizeof(*req), GFP_KERNEL); 531 + if (!req) 532 + return -ENOMEM; 533 + 534 + pm_runtime_get_sync(dev); 535 + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 536 + if (ret < 0) 537 + goto fail; 538 + 539 + dev->power.qos->flags_req = req; 540 + ret = pm_qos_sysfs_add_flags(dev); 541 + if (ret) 542 + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 543 + 544 + fail: 545 + pm_runtime_put(dev); 546 + return ret; 547 + } 548 + EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 549 + 550 + /** 551 + * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 552 + * @dev: Device whose PM QoS flags are to be hidden from user space. 553 + */ 554 + void dev_pm_qos_hide_flags(struct device *dev) 555 + { 556 + if (dev->power.qos && dev->power.qos->flags_req) { 557 + pm_qos_sysfs_remove_flags(dev); 558 + pm_runtime_get_sync(dev); 559 + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 560 + pm_runtime_put(dev); 561 + } 562 + } 563 + EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 564 + 565 + /** 566 + * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. 567 + * @dev: Device to update the PM QoS flags request for. 568 + * @mask: Flags to set/clear. 569 + * @set: Whether to set or clear the flags (true means set). 570 + */ 571 + int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) 572 + { 573 + s32 value; 574 + int ret; 575 + 576 + if (!dev->power.qos || !dev->power.qos->flags_req) 577 + return -EINVAL; 578 + 579 + pm_runtime_get_sync(dev); 580 + mutex_lock(&dev_pm_qos_mtx); 581 + 582 + value = dev_pm_qos_requested_flags(dev); 583 + if (set) 584 + value |= mask; 585 + else 586 + value &= ~mask; 587 + 588 + ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 589 + 590 + mutex_unlock(&dev_pm_qos_mtx); 591 + pm_runtime_put(dev); 592 + 593 + return ret; 594 + } 615 595 #endif /* CONFIG_PM_RUNTIME */
+85 -9
drivers/base/power/sysfs.c
··· 221 221 static ssize_t pm_qos_latency_show(struct device *dev, 222 222 struct device_attribute *attr, char *buf) 223 223 { 224 - return sprintf(buf, "%d\n", dev->power.pq_req->node.prio); 224 + return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev)); 225 225 } 226 226 227 227 static ssize_t pm_qos_latency_store(struct device *dev, ··· 237 237 if (value < 0) 238 238 return -EINVAL; 239 239 240 - ret = dev_pm_qos_update_request(dev->power.pq_req, value); 240 + ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value); 241 241 return ret < 0 ? ret : n; 242 242 } 243 243 244 244 static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, 245 245 pm_qos_latency_show, pm_qos_latency_store); 246 + 247 + static ssize_t pm_qos_no_power_off_show(struct device *dev, 248 + struct device_attribute *attr, 249 + char *buf) 250 + { 251 + return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) 252 + & PM_QOS_FLAG_NO_POWER_OFF)); 253 + } 254 + 255 + static ssize_t pm_qos_no_power_off_store(struct device *dev, 256 + struct device_attribute *attr, 257 + const char *buf, size_t n) 258 + { 259 + int ret; 260 + 261 + if (kstrtoint(buf, 0, &ret)) 262 + return -EINVAL; 263 + 264 + if (ret != 0 && ret != 1) 265 + return -EINVAL; 266 + 267 + ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); 268 + return ret < 0 ? ret : n; 269 + } 270 + 271 + static DEVICE_ATTR(pm_qos_no_power_off, 0644, 272 + pm_qos_no_power_off_show, pm_qos_no_power_off_store); 273 + 274 + static ssize_t pm_qos_remote_wakeup_show(struct device *dev, 275 + struct device_attribute *attr, 276 + char *buf) 277 + { 278 + return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) 279 + & PM_QOS_FLAG_REMOTE_WAKEUP)); 280 + } 281 + 282 + static ssize_t pm_qos_remote_wakeup_store(struct device *dev, 283 + struct device_attribute *attr, 284 + const char *buf, size_t n) 285 + { 286 + int ret; 287 + 288 + if (kstrtoint(buf, 0, &ret)) 289 + return -EINVAL; 290 + 291 + if (ret != 0 && ret != 1) 292 + return -EINVAL; 293 + 294 + ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret); 295 + return ret < 0 ? ret : n; 296 + } 297 + 298 + static DEVICE_ATTR(pm_qos_remote_wakeup, 0644, 299 + pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store); 246 300 #endif /* CONFIG_PM_RUNTIME */ 247 301 248 302 #ifdef CONFIG_PM_SLEEP ··· 618 564 .attrs = runtime_attrs, 619 565 }; 620 566 621 - static struct attribute *pm_qos_attrs[] = { 567 + static struct attribute *pm_qos_latency_attrs[] = { 622 568 #ifdef CONFIG_PM_RUNTIME 623 569 &dev_attr_pm_qos_resume_latency_us.attr, 624 570 #endif /* CONFIG_PM_RUNTIME */ 625 571 NULL, 626 572 }; 627 - static struct attribute_group pm_qos_attr_group = { 573 + static struct attribute_group pm_qos_latency_attr_group = { 628 574 .name = power_group_name, 629 - .attrs = pm_qos_attrs, 575 + .attrs = pm_qos_latency_attrs, 576 + }; 577 + 578 + static struct attribute *pm_qos_flags_attrs[] = { 579 + #ifdef CONFIG_PM_RUNTIME 580 + &dev_attr_pm_qos_no_power_off.attr, 581 + &dev_attr_pm_qos_remote_wakeup.attr, 582 + #endif /* CONFIG_PM_RUNTIME */ 583 + NULL, 584 + }; 585 + static struct attribute_group pm_qos_flags_attr_group = { 586 + .name = power_group_name, 587 + .attrs = pm_qos_flags_attrs, 630 588 }; 631 589 632 590 int dpm_sysfs_add(struct device *dev) ··· 681 615 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 682 616 } 683 617 684 - int pm_qos_sysfs_add(struct device *dev) 618 + int pm_qos_sysfs_add_latency(struct device *dev) 685 619 { 686 - return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group); 620 + return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group); 687 621 } 688 622 689 - void pm_qos_sysfs_remove(struct device *dev) 623 + void pm_qos_sysfs_remove_latency(struct device *dev) 690 624 { 691 - sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group); 625 + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group); 626 + } 627 + 628 + int pm_qos_sysfs_add_flags(struct device *dev) 629 + { 630 + return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); 631 + } 632 + 633 + void pm_qos_sysfs_remove_flags(struct device *dev) 634 + { 635 + sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 692 636 } 693 637 694 638 void rpm_sysfs_remove(struct device *dev)
+3 -1
drivers/mtd/nand/sh_flctl.c
··· 727 727 728 728 if (!flctl->qos_request) { 729 729 ret = dev_pm_qos_add_request(&flctl->pdev->dev, 730 - &flctl->pm_qos, 100); 730 + &flctl->pm_qos, 731 + DEV_PM_QOS_LATENCY, 732 + 100); 731 733 if (ret < 0) 732 734 dev_err(&flctl->pdev->dev, 733 735 "PM QoS request failed: %d\n", ret);
+11 -68
drivers/pci/pci-acpi.c
··· 17 17 18 18 #include <linux/pci-acpi.h> 19 19 #include <linux/pm_runtime.h> 20 + #include <linux/pm_qos.h> 20 21 #include "pci.h" 21 - 22 - static DEFINE_MUTEX(pci_acpi_pm_notify_mtx); 23 22 24 23 /** 25 24 * pci_acpi_wake_bus - Wake-up notification handler for root buses. ··· 67 68 } 68 69 69 70 /** 70 - * add_pm_notifier - Register PM notifier for given ACPI device. 71 - * @dev: ACPI device to add the notifier for. 72 - * @context: PCI device or bus to check for PME status if an event is signaled. 73 - * 74 - * NOTE: @dev need not be a run-wake or wake-up device to be a valid source of 75 - * PM wake-up events. For example, wake-up events may be generated for bridges 76 - * if one of the devices below the bridge is signaling PME, even if the bridge 77 - * itself doesn't have a wake-up GPE associated with it. 78 - */ 79 - static acpi_status add_pm_notifier(struct acpi_device *dev, 80 - acpi_notify_handler handler, 81 - void *context) 82 - { 83 - acpi_status status = AE_ALREADY_EXISTS; 84 - 85 - mutex_lock(&pci_acpi_pm_notify_mtx); 86 - 87 - if (dev->wakeup.flags.notifier_present) 88 - goto out; 89 - 90 - status = acpi_install_notify_handler(dev->handle, 91 - ACPI_SYSTEM_NOTIFY, 92 - handler, context); 93 - if (ACPI_FAILURE(status)) 94 - goto out; 95 - 96 - dev->wakeup.flags.notifier_present = true; 97 - 98 - out: 99 - mutex_unlock(&pci_acpi_pm_notify_mtx); 100 - return status; 101 - } 102 - 103 - /** 104 - * remove_pm_notifier - Unregister PM notifier from given ACPI device. 105 - * @dev: ACPI device to remove the notifier from. 106 - */ 107 - static acpi_status remove_pm_notifier(struct acpi_device *dev, 108 - acpi_notify_handler handler) 109 - { 110 - acpi_status status = AE_BAD_PARAMETER; 111 - 112 - mutex_lock(&pci_acpi_pm_notify_mtx); 113 - 114 - if (!dev->wakeup.flags.notifier_present) 115 - goto out; 116 - 117 - status = acpi_remove_notify_handler(dev->handle, 118 - ACPI_SYSTEM_NOTIFY, 119 - handler); 120 - if (ACPI_FAILURE(status)) 121 - goto out; 122 - 123 - dev->wakeup.flags.notifier_present = false; 124 - 125 - out: 126 - mutex_unlock(&pci_acpi_pm_notify_mtx); 127 - return status; 128 - } 129 - 130 - /** 131 71 * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus. 132 72 * @dev: ACPI device to add the notifier for. 133 73 * @pci_bus: PCI bus to walk checking for PME status if an event is signaled. ··· 74 136 acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, 75 137 struct pci_bus *pci_bus) 76 138 { 77 - return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); 139 + return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus); 78 140 } 79 141 80 142 /** ··· 83 145 */ 84 146 acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) 85 147 { 86 - return remove_pm_notifier(dev, pci_acpi_wake_bus); 148 + return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus); 87 149 } 88 150 89 151 /** ··· 94 156 acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, 95 157 struct pci_dev *pci_dev) 96 158 { 97 - return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); 159 + return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev); 98 160 } 99 161 100 162 /** ··· 103 165 */ 104 166 acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) 105 167 { 106 - return remove_pm_notifier(dev, pci_acpi_wake_dev); 168 + return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev); 107 169 } 108 170 109 171 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) ··· 195 257 return -ENODEV; 196 258 197 259 switch (state) { 260 + case PCI_D3cold: 261 + if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == 262 + PM_QOS_FLAGS_ALL) { 263 + error = -EBUSY; 264 + break; 265 + } 198 266 case PCI_D0: 199 267 case PCI_D1: 200 268 case PCI_D2: 201 269 case PCI_D3hot: 202 - case PCI_D3cold: 203 270 error = acpi_bus_set_power(handle, state_conv[state]); 204 271 } 205 272
+69 -3
include/acpi/acpi_bus.h
··· 201 201 struct acpi_device_power_state { 202 202 struct { 203 203 u8 valid:1; 204 + u8 os_accessible:1; 204 205 u8 explicit_set:1; /* _PSx present? */ 205 206 u8 reserved:6; 206 207 } flags; ··· 340 339 unsigned long long *sta); 341 340 int acpi_bus_get_status(struct acpi_device *device); 342 341 int acpi_bus_set_power(acpi_handle handle, int state); 342 + int acpi_device_set_power(struct acpi_device *device, int state); 343 343 int acpi_bus_update_power(acpi_handle handle, int *state_p); 344 344 bool acpi_bus_power_manageable(acpi_handle handle); 345 345 bool acpi_bus_can_wakeup(acpi_handle handle); ··· 418 416 int acpi_disable_wakeup_device_power(struct acpi_device *dev); 419 417 420 418 #ifdef CONFIG_PM 419 + acpi_status acpi_add_pm_notifier(struct acpi_device *adev, 420 + acpi_notify_handler handler, void *context); 421 + acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, 422 + acpi_notify_handler handler); 423 + int acpi_device_power_state(struct device *dev, struct acpi_device *adev, 424 + u32 target_state, int d_max_in, int *d_min_p); 421 425 int acpi_pm_device_sleep_state(struct device *, int *, int); 422 426 #else 423 - static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) 427 + static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, 428 + acpi_notify_handler handler, 429 + void *context) 430 + { 431 + return AE_SUPPORT; 432 + } 433 + static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev, 434 + acpi_notify_handler handler) 435 + { 436 + return AE_SUPPORT; 437 + } 438 + static inline int __acpi_device_power_state(int m, int *p) 424 439 { 425 440 if (p) 426 441 *p = ACPI_STATE_D0; 427 442 return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0; 428 443 } 444 + static inline int acpi_device_power_state(struct device *dev, 445 + struct acpi_device *adev, 446 + u32 target_state, int d_max_in, 447 + int *d_min_p) 448 + { 449 + return __acpi_device_power_state(d_max_in, d_min_p); 450 + } 451 + static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) 452 + { 453 + return __acpi_device_power_state(m, p); 454 + } 455 + #endif 456 + 457 + #ifdef CONFIG_PM_RUNTIME 458 + int __acpi_device_run_wake(struct acpi_device *, bool); 459 + int acpi_pm_device_run_wake(struct device *, bool); 460 + #else 461 + static inline int __acpi_device_run_wake(struct acpi_device *adev, bool en) 462 + { 463 + return -ENODEV; 464 + } 465 + static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) 466 + { 467 + return -ENODEV; 468 + } 429 469 #endif 430 470 431 471 #ifdef CONFIG_PM_SLEEP 432 - int acpi_pm_device_run_wake(struct device *, bool); 472 + int __acpi_device_sleep_wake(struct acpi_device *, u32, bool); 433 473 int acpi_pm_device_sleep_wake(struct device *, bool); 434 474 #else 435 - static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) 475 + static inline int __acpi_device_sleep_wake(struct acpi_device *adev, 476 + u32 target_state, bool enable) 436 477 { 437 478 return -ENODEV; 438 479 } ··· 484 439 return -ENODEV; 485 440 } 486 441 #endif 442 + 443 + #ifdef CONFIG_ACPI_SLEEP 444 + u32 acpi_target_system_state(void); 445 + #else 446 + static inline u32 acpi_target_system_state(void) { return ACPI_STATE_S0; } 447 + #endif 448 + 449 + static inline bool acpi_device_power_manageable(struct acpi_device *adev) 450 + { 451 + return adev->flags.power_manageable; 452 + } 453 + 454 + static inline bool acpi_device_can_wakeup(struct acpi_device *adev) 455 + { 456 + return adev->wakeup.flags.valid; 457 + } 458 + 459 + static inline bool acpi_device_can_poweroff(struct acpi_device *adev) 460 + { 461 + return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible; 462 + } 487 463 488 464 #else /* CONFIG_ACPI */ 489 465
+38
include/linux/acpi.h
··· 25 25 #ifndef _LINUX_ACPI_H 26 26 #define _LINUX_ACPI_H 27 27 28 + #include <linux/errno.h> 28 29 #include <linux/ioport.h> /* for struct resource */ 29 30 #include <linux/device.h> 30 31 ··· 477 476 u32 pm1a_control, u32 pm1b_control); 478 477 #else 479 478 #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) 479 + #endif 480 + 481 + #if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) 482 + int acpi_dev_runtime_suspend(struct device *dev); 483 + int acpi_dev_runtime_resume(struct device *dev); 484 + int acpi_subsys_runtime_suspend(struct device *dev); 485 + int acpi_subsys_runtime_resume(struct device *dev); 486 + #else 487 + static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } 488 + static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } 489 + static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } 490 + static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } 491 + #endif 492 + 493 + #ifdef CONFIG_ACPI_SLEEP 494 + int acpi_dev_suspend_late(struct device *dev); 495 + int acpi_dev_resume_early(struct device *dev); 496 + int acpi_subsys_prepare(struct device *dev); 497 + int acpi_subsys_suspend_late(struct device *dev); 498 + int acpi_subsys_resume_early(struct device *dev); 499 + #else 500 + static inline int acpi_dev_suspend_late(struct device *dev) { return 0; } 501 + static inline int acpi_dev_resume_early(struct device *dev) { return 0; } 502 + static inline int acpi_subsys_prepare(struct device *dev) { return 0; } 503 + static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } 504 + static inline int acpi_subsys_resume_early(struct device *dev) { return 0; } 505 + #endif 506 + 507 + #if defined(CONFIG_ACPI) && defined(CONFIG_PM) 508 + int acpi_dev_pm_attach(struct device *dev, bool power_on); 509 + int acpi_dev_pm_detach(struct device *dev, bool power_off); 510 + #else 511 + static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) 512 + { 513 + return -ENODEV; 514 + } 515 + static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {} 480 516 #endif 481 517 482 518 #endif /*_LINUX_ACPI_H*/
+1 -2
include/linux/pm.h
··· 546 546 unsigned long active_jiffies; 547 547 unsigned long suspended_jiffies; 548 548 unsigned long accounting_timestamp; 549 - struct dev_pm_qos_request *pq_req; 550 549 #endif 551 550 struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ 552 - struct pm_qos_constraints *constraints; 551 + struct dev_pm_qos *qos; 553 552 }; 554 553 555 554 extern void update_pm_runtime_accounting(struct device *dev);
+73 -4
include/linux/pm_qos.h
··· 20 20 PM_QOS_NUM_CLASSES, 21 21 }; 22 22 23 + enum pm_qos_flags_status { 24 + PM_QOS_FLAGS_UNDEFINED = -1, 25 + PM_QOS_FLAGS_NONE, 26 + PM_QOS_FLAGS_SOME, 27 + PM_QOS_FLAGS_ALL, 28 + }; 29 + 23 30 #define PM_QOS_DEFAULT_VALUE -1 24 31 25 32 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) ··· 34 27 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 35 28 #define PM_QOS_DEV_LAT_DEFAULT_VALUE 0 36 29 30 + #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) 31 + #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) 32 + 37 33 struct pm_qos_request { 38 34 struct plist_node node; 39 35 int pm_qos_class; 40 36 struct delayed_work work; /* for pm_qos_update_request_timeout */ 41 37 }; 42 38 39 + struct pm_qos_flags_request { 40 + struct list_head node; 41 + s32 flags; /* Do not change to 64 bit */ 42 + }; 43 + 44 + enum dev_pm_qos_req_type { 45 + DEV_PM_QOS_LATENCY = 1, 46 + DEV_PM_QOS_FLAGS, 47 + }; 48 + 43 49 struct dev_pm_qos_request { 44 - struct plist_node node; 50 + enum dev_pm_qos_req_type type; 51 + union { 52 + struct plist_node pnode; 53 + struct pm_qos_flags_request flr; 54 + } data; 45 55 struct device *dev; 46 56 }; 47 57 ··· 69 45 }; 70 46 71 47 /* 72 - * Note: The lockless read path depends on the CPU accessing 73 - * target_value atomically. Atomic access is only guaranteed on all CPU 48 + * Note: The lockless read path depends on the CPU accessing target_value 49 + * or effective_flags atomically. Atomic access is only guaranteed on all CPU 74 50 * types linux supports for 32 bit quantites 75 51 */ 76 52 struct pm_qos_constraints { ··· 79 55 s32 default_value; 80 56 enum pm_qos_type type; 81 57 struct blocking_notifier_head *notifiers; 58 + }; 59 + 60 + struct pm_qos_flags { 61 + struct list_head list; 62 + s32 effective_flags; /* Do not change to 64 bit */ 63 + }; 64 + 65 + struct dev_pm_qos { 66 + struct pm_qos_constraints latency; 67 + struct pm_qos_flags flags; 68 + struct dev_pm_qos_request *latency_req; 69 + struct dev_pm_qos_request *flags_req; 82 70 }; 83 71 84 72 /* Action requested to pm_qos_update_target */ ··· 107 71 108 72 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, 109 73 enum pm_qos_req_action action, int value); 74 + bool pm_qos_update_flags(struct pm_qos_flags *pqf, 75 + struct pm_qos_flags_request *req, 76 + enum pm_qos_req_action action, s32 val); 110 77 void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, 111 78 s32 value); 112 79 void pm_qos_update_request(struct pm_qos_request *req, ··· 125 86 s32 pm_qos_read_value(struct pm_qos_constraints *c); 126 87 127 88 #ifdef CONFIG_PM 89 + enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); 90 + enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); 128 91 s32 __dev_pm_qos_read_value(struct device *dev); 129 92 s32 dev_pm_qos_read_value(struct device *dev); 130 93 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 131 - s32 value); 94 + enum dev_pm_qos_req_type type, s32 value); 132 95 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); 133 96 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); 134 97 int dev_pm_qos_add_notifier(struct device *dev, ··· 144 103 int dev_pm_qos_add_ancestor_request(struct device *dev, 145 104 struct dev_pm_qos_request *req, s32 value); 146 105 #else 106 + static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, 107 + s32 mask) 108 + { return PM_QOS_FLAGS_UNDEFINED; } 109 + static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, 110 + s32 mask) 111 + { return PM_QOS_FLAGS_UNDEFINED; } 147 112 static inline s32 __dev_pm_qos_read_value(struct device *dev) 148 113 { return 0; } 149 114 static inline s32 dev_pm_qos_read_value(struct device *dev) 150 115 { return 0; } 151 116 static inline int dev_pm_qos_add_request(struct device *dev, 152 117 struct dev_pm_qos_request *req, 118 + enum dev_pm_qos_req_type type, 153 119 s32 value) 154 120 { return 0; } 155 121 static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, ··· 192 144 #ifdef CONFIG_PM_RUNTIME 193 145 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); 194 146 void dev_pm_qos_hide_latency_limit(struct device *dev); 147 + int dev_pm_qos_expose_flags(struct device *dev, s32 value); 148 + void dev_pm_qos_hide_flags(struct device *dev); 149 + int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); 150 + 151 + static inline s32 dev_pm_qos_requested_latency(struct device *dev) 152 + { 153 + return dev->power.qos->latency_req->data.pnode.prio; 154 + } 155 + 156 + static inline s32 dev_pm_qos_requested_flags(struct device *dev) 157 + { 158 + return dev->power.qos->flags_req->data.flr.flags; 159 + } 195 160 #else 196 161 static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) 197 162 { return 0; } 198 163 static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} 164 + static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) 165 + { return 0; } 166 + static inline void dev_pm_qos_hide_flags(struct device *dev) {} 167 + static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) 168 + { return 0; } 169 + 170 + static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; } 171 + static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } 199 172 #endif 200 173 201 174 #endif
+63
kernel/power/qos.c
··· 213 213 } 214 214 215 215 /** 216 + * pm_qos_flags_remove_req - Remove device PM QoS flags request. 217 + * @pqf: Device PM QoS flags set to remove the request from. 218 + * @req: Request to remove from the set. 219 + */ 220 + static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf, 221 + struct pm_qos_flags_request *req) 222 + { 223 + s32 val = 0; 224 + 225 + list_del(&req->node); 226 + list_for_each_entry(req, &pqf->list, node) 227 + val |= req->flags; 228 + 229 + pqf->effective_flags = val; 230 + } 231 + 232 + /** 233 + * pm_qos_update_flags - Update a set of PM QoS flags. 234 + * @pqf: Set of flags to update. 235 + * @req: Request to add to the set, to modify, or to remove from the set. 236 + * @action: Action to take on the set. 237 + * @val: Value of the request to add or modify. 238 + * 239 + * Update the given set of PM QoS flags and call notifiers if the aggregate 240 + * value has changed. Returns 1 if the aggregate constraint value has changed, 241 + * 0 otherwise. 242 + */ 243 + bool pm_qos_update_flags(struct pm_qos_flags *pqf, 244 + struct pm_qos_flags_request *req, 245 + enum pm_qos_req_action action, s32 val) 246 + { 247 + unsigned long irqflags; 248 + s32 prev_value, curr_value; 249 + 250 + spin_lock_irqsave(&pm_qos_lock, irqflags); 251 + 252 + prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; 253 + 254 + switch (action) { 255 + case PM_QOS_REMOVE_REQ: 256 + pm_qos_flags_remove_req(pqf, req); 257 + break; 258 + case PM_QOS_UPDATE_REQ: 259 + pm_qos_flags_remove_req(pqf, req); 260 + case PM_QOS_ADD_REQ: 261 + req->flags = val; 262 + INIT_LIST_HEAD(&req->node); 263 + list_add_tail(&req->node, &pqf->list); 264 + pqf->effective_flags |= val; 265 + break; 266 + default: 267 + /* no action */ 268 + ; 269 + } 270 + 271 + curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags; 272 + 273 + spin_unlock_irqrestore(&pm_qos_lock, irqflags); 274 + 275 + return prev_value != curr_value; 276 + } 277 + 278 + /** 216 279 * pm_qos_request - returns current system wide qos expectation 217 280 * @pm_qos_class: identification of which qos value is requested 218 281 *