Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge back PM core changes for v5.17.

+131 -75
+10 -4
Documentation/power/runtime_pm.rst
··· 265 265 RPM_SUSPENDED, which means that each device is initially regarded by the 266 266 PM core as 'suspended', regardless of its real hardware status 267 267 268 + `enum rpm_status last_status;` 269 + - the last runtime PM status of the device captured before disabling runtime 270 + PM for it (invalid initially and when disable_depth is 0) 271 + 268 272 `unsigned int runtime_auto;` 269 273 - if set, indicates that the user space has allowed the device driver to 270 274 power manage the device at run time via the /sys/devices/.../power/control ··· 337 333 338 334 `int pm_runtime_resume(struct device *dev);` 339 335 - execute the subsystem-level resume callback for the device; returns 0 on 340 - success, 1 if the device's runtime PM status was already 'active' or 341 - error code on failure, where -EAGAIN means it may be safe to attempt to 342 - resume the device again in future, but 'power.runtime_error' should be 343 - checked additionally, and -EACCES means that 'power.disable_depth' is 336 + success, 1 if the device's runtime PM status is already 'active' (also if 337 + 'power.disable_depth' is nonzero, but the status was 'active' when it was 338 + changing from 0 to 1) or error code on failure, where -EAGAIN means it may 339 + be safe to attempt to resume the device again in future, but 340 + 'power.runtime_error' should be checked additionally, and -EACCES means 341 + that the callback could not be run, because 'power.disable_depth' was 344 342 different from 0 345 343 346 344 `int pm_runtime_resume_and_get(struct device *dev);`
+1 -2
drivers/base/core.c
··· 485 485 /* Ensure that all references to the link object have been dropped. */ 486 486 device_link_synchronize_removal(); 487 487 488 - while (refcount_dec_not_one(&link->rpm_active)) 489 - pm_runtime_put(link->supplier); 488 + pm_runtime_release_supplier(link, true); 490 489 491 490 put_device(link->consumer); 492 491 put_device(link->supplier);
+55 -31
drivers/base/power/runtime.c
··· 305 305 return 0; 306 306 } 307 307 308 + /** 309 + * pm_runtime_release_supplier - Drop references to device link's supplier. 310 + * @link: Target device link. 311 + * @check_idle: Whether or not to check if the supplier device is idle. 312 + * 313 + * Drop all runtime PM references associated with @link to its supplier device 314 + * and if @check_idle is set, check if that device is idle (and so it can be 315 + * suspended). 316 + */ 317 + void pm_runtime_release_supplier(struct device_link *link, bool check_idle) 318 + { 319 + struct device *supplier = link->supplier; 320 + 321 + /* 322 + * The additional power.usage_count check is a safety net in case 323 + * the rpm_active refcount becomes saturated, in which case 324 + * refcount_dec_not_one() would return true forever, but it is not 325 + * strictly necessary. 326 + */ 327 + while (refcount_dec_not_one(&link->rpm_active) && 328 + atomic_read(&supplier->power.usage_count) > 0) 329 + pm_runtime_put_noidle(supplier); 330 + 331 + if (check_idle) 332 + pm_request_idle(supplier); 333 + } 334 + 308 335 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) 309 336 { 310 337 struct device_link *link; 311 338 312 339 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, 313 - device_links_read_lock_held()) { 314 - 315 - while (refcount_dec_not_one(&link->rpm_active)) 316 - pm_runtime_put_noidle(link->supplier); 317 - 318 - if (try_to_suspend) 319 - pm_request_idle(link->supplier); 320 - } 340 + device_links_read_lock_held()) 341 + pm_runtime_release_supplier(link, try_to_suspend); 321 342 } 322 343 323 344 static void rpm_put_suppliers(struct device *dev) ··· 763 742 trace_rpm_resume_rcuidle(dev, rpmflags); 764 743 765 744 repeat: 766 - if (dev->power.runtime_error) 745 + if (dev->power.runtime_error) { 767 746 retval = -EINVAL; 768 - else if (dev->power.disable_depth == 1 && dev->power.is_suspended 769 - && dev->power.runtime_status == RPM_ACTIVE) 770 - retval = 1; 771 - else if (dev->power.disable_depth > 0) 772 - retval = -EACCES; 747 + } else if (dev->power.disable_depth > 0) { 748 + if (dev->power.runtime_status == RPM_ACTIVE && 749 + dev->power.last_status == RPM_ACTIVE) 750 + retval = 1; 751 + else 752 + retval = -EACCES; 753 + } 773 754 if (retval) 774 755 goto out; 775 756 ··· 1433 1410 /* Update time accounting before disabling PM-runtime. */ 1434 1411 update_pm_runtime_accounting(dev); 1435 1412 1436 - if (!dev->power.disable_depth++) 1413 + if (!dev->power.disable_depth++) { 1437 1414 __pm_runtime_barrier(dev); 1415 + dev->power.last_status = dev->power.runtime_status; 1416 + } 1438 1417 1439 1418 out: 1440 1419 spin_unlock_irq(&dev->power.lock); ··· 1453 1428 1454 1429 spin_lock_irqsave(&dev->power.lock, flags); 1455 1430 1456 - if (dev->power.disable_depth > 0) { 1457 - dev->power.disable_depth--; 1458 - 1459 - /* About to enable runtime pm, set accounting_timestamp to now */ 1460 - if (!dev->power.disable_depth) 1461 - dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); 1462 - } else { 1431 + if (!dev->power.disable_depth) { 1463 1432 dev_warn(dev, "Unbalanced %s!\n", __func__); 1433 + goto out; 1464 1434 } 1465 1435 1466 - WARN(!dev->power.disable_depth && 1467 - dev->power.runtime_status == RPM_SUSPENDED && 1468 - !dev->power.ignore_children && 1469 - atomic_read(&dev->power.child_count) > 0, 1470 - "Enabling runtime PM for inactive device (%s) with active children\n", 1471 - dev_name(dev)); 1436 + if (--dev->power.disable_depth > 0) 1437 + goto out; 1472 1438 1439 + dev->power.last_status = RPM_INVALID; 1440 + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); 1441 + 1442 + if (dev->power.runtime_status == RPM_SUSPENDED && 1443 + !dev->power.ignore_children && 1444 + atomic_read(&dev->power.child_count) > 0) 1445 + dev_warn(dev, "Enabling runtime PM for inactive device with active children\n"); 1446 + 1447 + out: 1473 1448 spin_unlock_irqrestore(&dev->power.lock, flags); 1474 1449 } 1475 1450 EXPORT_SYMBOL_GPL(pm_runtime_enable); ··· 1665 1640 void pm_runtime_init(struct device *dev) 1666 1641 { 1667 1642 dev->power.runtime_status = RPM_SUSPENDED; 1643 + dev->power.last_status = RPM_INVALID; 1668 1644 dev->power.idle_notification = false; 1669 1645 1670 1646 dev->power.disable_depth = 1; ··· 1798 1772 return; 1799 1773 1800 1774 pm_runtime_drop_link_count(link->consumer); 1801 - 1802 - while (refcount_dec_not_one(&link->rpm_active)) 1803 - pm_runtime_put(link->supplier); 1775 + pm_runtime_release_supplier(link, true); 1804 1776 } 1805 1777 1806 1778 static bool pm_runtime_need_not_resume(struct device *dev)
+4 -4
drivers/mmc/host/jz4740_mmc.c
··· 1103 1103 return 0; 1104 1104 } 1105 1105 1106 - static int __maybe_unused jz4740_mmc_suspend(struct device *dev) 1106 + static int jz4740_mmc_suspend(struct device *dev) 1107 1107 { 1108 1108 return pinctrl_pm_select_sleep_state(dev); 1109 1109 } 1110 1110 1111 - static int __maybe_unused jz4740_mmc_resume(struct device *dev) 1111 + static int jz4740_mmc_resume(struct device *dev) 1112 1112 { 1113 1113 return pinctrl_select_default_state(dev); 1114 1114 } 1115 1115 1116 - static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1116 + DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1117 1117 jz4740_mmc_resume); 1118 1118 1119 1119 static struct platform_driver jz4740_mmc_driver = { ··· 1123 1123 .name = "jz4740-mmc", 1124 1124 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1125 1125 .of_match_table = of_match_ptr(jz4740_mmc_of_match), 1126 - .pm = pm_ptr(&jz4740_mmc_pm_ops), 1126 + .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops), 1127 1127 }, 1128 1128 }; 1129 1129
+2 -4
drivers/mmc/host/mxcmmc.c
··· 1183 1183 return 0; 1184 1184 } 1185 1185 1186 - #ifdef CONFIG_PM_SLEEP 1187 1186 static int mxcmci_suspend(struct device *dev) 1188 1187 { 1189 1188 struct mmc_host *mmc = dev_get_drvdata(dev); ··· 1209 1210 1210 1211 return ret; 1211 1212 } 1212 - #endif 1213 1213 1214 - static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); 1214 + DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); 1215 1215 1216 1216 static struct platform_driver mxcmci_driver = { 1217 1217 .probe = mxcmci_probe, ··· 1218 1220 .driver = { 1219 1221 .name = DRIVER_NAME, 1220 1222 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1221 - .pm = &mxcmci_pm_ops, 1223 + .pm = pm_sleep_ptr(&mxcmci_pm_ops), 1222 1224 .of_match_table = mxcmci_of_match, 1223 1225 } 1224 1226 };
+3 -1
drivers/net/ethernet/realtek/r8169_main.c
··· 5441 5441 .probe = rtl_init_one, 5442 5442 .remove = rtl_remove_one, 5443 5443 .shutdown = rtl_shutdown, 5444 - .driver.pm = pm_ptr(&rtl8169_pm_ops), 5444 + #ifdef CONFIG_PM 5445 + .driver.pm = &rtl8169_pm_ops, 5446 + #endif 5445 5447 }; 5446 5448 5447 5449 module_pci_driver(rtl8169_pci_driver);
+53 -29
include/linux/pm.h
··· 300 300 int (*runtime_idle)(struct device *dev); 301 301 }; 302 302 303 + #define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 304 + .suspend = pm_sleep_ptr(suspend_fn), \ 305 + .resume = pm_sleep_ptr(resume_fn), \ 306 + .freeze = pm_sleep_ptr(suspend_fn), \ 307 + .thaw = pm_sleep_ptr(resume_fn), \ 308 + .poweroff = pm_sleep_ptr(suspend_fn), \ 309 + .restore = pm_sleep_ptr(resume_fn), 310 + 311 + #define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 312 + .suspend_late = pm_sleep_ptr(suspend_fn), \ 313 + .resume_early = pm_sleep_ptr(resume_fn), \ 314 + .freeze_late = pm_sleep_ptr(suspend_fn), \ 315 + .thaw_early = pm_sleep_ptr(resume_fn), \ 316 + .poweroff_late = pm_sleep_ptr(suspend_fn), \ 317 + .restore_early = pm_sleep_ptr(resume_fn), 318 + 319 + #define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 320 + .suspend_noirq = pm_sleep_ptr(suspend_fn), \ 321 + .resume_noirq = pm_sleep_ptr(resume_fn), \ 322 + .freeze_noirq = pm_sleep_ptr(suspend_fn), \ 323 + .thaw_noirq = pm_sleep_ptr(resume_fn), \ 324 + .poweroff_noirq = pm_sleep_ptr(suspend_fn), \ 325 + .restore_noirq = pm_sleep_ptr(resume_fn), 326 + 327 + #define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 328 + .runtime_suspend = suspend_fn, \ 329 + .runtime_resume = resume_fn, \ 330 + .runtime_idle = idle_fn, 331 + 303 332 #ifdef CONFIG_PM_SLEEP 304 333 #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 305 - .suspend = suspend_fn, \ 306 - .resume = resume_fn, \ 307 - .freeze = suspend_fn, \ 308 - .thaw = resume_fn, \ 309 - .poweroff = suspend_fn, \ 310 - .restore = resume_fn, 334 + SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 311 335 #else 312 336 #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 313 337 #endif 314 338 315 339 #ifdef CONFIG_PM_SLEEP 316 340 #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 317 - .suspend_late = suspend_fn, \ 318 - .resume_early = resume_fn, \ 319 - .freeze_late = suspend_fn, \ 320 - .thaw_early = resume_fn, \ 321 - .poweroff_late = suspend_fn, \ 322 - .restore_early = resume_fn, 341 + LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 323 342 #else 324 343 #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 325 344 #endif 326 345 327 346 #ifdef CONFIG_PM_SLEEP 328 347 #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 329 - .suspend_noirq = suspend_fn, \ 330 - .resume_noirq = resume_fn, \ 331 - .freeze_noirq = suspend_fn, \ 332 - .thaw_noirq = resume_fn, \ 333 - .poweroff_noirq = suspend_fn, \ 334 - .restore_noirq = resume_fn, 348 + NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 335 349 #else 336 350 #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 337 351 #endif 338 352 339 353 #ifdef CONFIG_PM 340 354 #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 341 - .runtime_suspend = suspend_fn, \ 342 - .runtime_resume = resume_fn, \ 343 - .runtime_idle = idle_fn, 355 + RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 344 356 #else 345 357 #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 346 358 #endif ··· 361 349 * Use this if you want to use the same suspend and resume callbacks for suspend 362 350 * to RAM and hibernation. 363 351 */ 364 - #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 365 - const struct dev_pm_ops __maybe_unused name = { \ 366 - SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 352 + #define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 353 + static const struct dev_pm_ops name = { \ 354 + SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 367 355 } 368 356 369 357 /* ··· 379 367 * .resume_early(), to the same routines as .runtime_suspend() and 380 368 * .runtime_resume(), respectively (and analogously for hibernation). 381 369 */ 370 + #define DEFINE_UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 371 + static const struct dev_pm_ops name = { \ 372 + SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 373 + RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 374 + } 375 + 376 + /* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */ 377 + #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 378 + const struct dev_pm_ops __maybe_unused name = { \ 379 + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 380 + } 381 + 382 + /* Deprecated. Use DEFINE_UNIVERSAL_DEV_PM_OPS() instead. */ 382 383 #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ 383 384 const struct dev_pm_ops __maybe_unused name = { \ 384 385 SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 385 386 SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 386 387 } 387 388 388 - #ifdef CONFIG_PM 389 - #define pm_ptr(_ptr) (_ptr) 390 - #else 391 - #define pm_ptr(_ptr) NULL 392 - #endif 389 + #define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr)) 390 + #define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr)) 393 391 394 392 /* 395 393 * PM_EVENT_ messages ··· 521 499 */ 522 500 523 501 enum rpm_status { 502 + RPM_INVALID = -1, 524 503 RPM_ACTIVE = 0, 525 504 RPM_RESUMING, 526 505 RPM_SUSPENDED, ··· 635 612 unsigned int links_count; 636 613 enum rpm_request request; 637 614 enum rpm_status runtime_status; 615 + enum rpm_status last_status; 638 616 int runtime_error; 639 617 int autosuspend_delay; 640 618 u64 last_busy;
+3
include/linux/pm_runtime.h
··· 58 58 extern void pm_runtime_put_suppliers(struct device *dev); 59 59 extern void pm_runtime_new_link(struct device *dev); 60 60 extern void pm_runtime_drop_link(struct device_link *link); 61 + extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle); 61 62 62 63 extern int devm_pm_runtime_enable(struct device *dev); 63 64 ··· 284 283 static inline void pm_runtime_put_suppliers(struct device *dev) {} 285 284 static inline void pm_runtime_new_link(struct device *dev) {} 286 285 static inline void pm_runtime_drop_link(struct device_link *link) {} 286 + static inline void pm_runtime_release_supplier(struct device_link *link, 287 + bool check_idle) {} 287 288 288 289 #endif /* !CONFIG_PM */ 289 290