Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

PM: sleep: Make suspend of devices more asynchronous

In analogy with previous changes, make device_suspend_late() and
device_suspend_noirq() start the async suspend of the device's parent
after the device itself has been processed and make dpm_suspend_late()
and dpm_noirq_suspend_devices() start processing "async" leaf devices
(that is, devices without children) upfront so they don't need to wait
for the other devices they don't depend on.

This change reduces the total duration of device suspend on some systems
measurably, but not significantly.

Suggested-by: Saravana Kannan <saravanak@google.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://patch.msgid.link/1924195.CQOukoFCf9@rjwysocki.net

+56 -8
+56 -8
drivers/base/power/main.c
··· 1308 1308 device_links_read_unlock(idx); 1309 1309 } 1310 1310 1311 + static void async_suspend_noirq(void *data, async_cookie_t cookie); 1312 + 1311 1313 /** 1312 1314 * device_suspend_noirq - Execute a "noirq suspend" callback for given device. 1313 1315 * @dev: Device to handle. ··· 1388 1386 Complete: 1389 1387 complete_all(&dev->power.completion); 1390 1388 TRACE_SUSPEND(error); 1391 - return error; 1389 + 1390 + if (error || async_error) 1391 + return error; 1392 + 1393 + dpm_async_suspend_parent(dev, async_suspend_noirq); 1394 + 1395 + return 0; 1392 1396 } 1393 1397 1394 1398 static void async_suspend_noirq(void *data, async_cookie_t cookie) ··· 1408 1400 static int dpm_noirq_suspend_devices(pm_message_t state) 1409 1401 { 1410 1402 ktime_t starttime = ktime_get(); 1403 + struct device *dev; 1411 1404 int error = 0; 1412 1405 1413 1406 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); ··· 1418 1409 1419 1410 mutex_lock(&dpm_list_mtx); 1420 1411 1412 + /* 1413 + * Start processing "async" leaf devices upfront so they don't need to 1414 + * wait for the "sync" devices they don't depend on. 1415 + */ 1416 + list_for_each_entry_reverse(dev, &dpm_late_early_list, power.entry) { 1417 + dpm_clear_async_state(dev); 1418 + if (dpm_leaf_device(dev)) 1419 + dpm_async_with_cleanup(dev, async_suspend_noirq); 1420 + } 1421 + 1421 1422 while (!list_empty(&dpm_late_early_list)) { 1422 - struct device *dev = to_device(dpm_late_early_list.prev); 1423 + dev = to_device(dpm_late_early_list.prev); 1423 1424 1424 1425 list_move(&dev->power.entry, &dpm_noirq_list); 1425 1426 1426 - dpm_clear_async_state(dev); 1427 1427 if (dpm_async_fn(dev, async_suspend_noirq)) 1428 1428 continue; 1429 1429 ··· 1446 1428 1447 1429 mutex_lock(&dpm_list_mtx); 1448 1430 1449 - if (error || async_error) 1431 + if (error || async_error) { 1432 + /* 1433 + * Move all devices to the target list to resume them 1434 + * properly. 1435 + */ 1436 + list_splice(&dpm_late_early_list, &dpm_noirq_list); 1450 1437 break; 1438 + } 1451 1439 } 1452 1440 1453 1441 mutex_unlock(&dpm_list_mtx); ··· 1505 1481 1506 1482 spin_unlock_irq(&parent->power.lock); 1507 1483 } 1484 + 1485 + static void async_suspend_late(void *data, async_cookie_t cookie); 1508 1486 1509 1487 /** 1510 1488 * device_suspend_late - Execute a "late suspend" callback for given device. ··· 1584 1558 Complete: 1585 1559 TRACE_SUSPEND(error); 1586 1560 complete_all(&dev->power.completion); 1587 - return error; 1561 + 1562 + if (error || async_error) 1563 + return error; 1564 + 1565 + dpm_async_suspend_parent(dev, async_suspend_late); 1566 + 1567 + return 0; 1588 1568 } 1589 1569 1590 1570 static void async_suspend_late(void *data, async_cookie_t cookie) ··· 1608 1576 int dpm_suspend_late(pm_message_t state) 1609 1577 { 1610 1578 ktime_t starttime = ktime_get(); 1579 + struct device *dev; 1611 1580 int error = 0; 1612 1581 1613 1582 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); ··· 1620 1587 1621 1588 mutex_lock(&dpm_list_mtx); 1622 1589 1590 + /* 1591 + * Start processing "async" leaf devices upfront so they don't need to 1592 + * wait for the "sync" devices they don't depend on. 1593 + */ 1594 + list_for_each_entry_reverse(dev, &dpm_suspended_list, power.entry) { 1595 + dpm_clear_async_state(dev); 1596 + if (dpm_leaf_device(dev)) 1597 + dpm_async_with_cleanup(dev, async_suspend_late); 1598 + } 1599 + 1623 1600 while (!list_empty(&dpm_suspended_list)) { 1624 - struct device *dev = to_device(dpm_suspended_list.prev); 1601 + dev = to_device(dpm_suspended_list.prev); 1625 1602 1626 1603 list_move(&dev->power.entry, &dpm_late_early_list); 1627 1604 1628 - dpm_clear_async_state(dev); 1629 1605 if (dpm_async_fn(dev, async_suspend_late)) 1630 1606 continue; 1631 1607 ··· 1648 1606 1649 1607 mutex_lock(&dpm_list_mtx); 1650 1608 1651 - if (error || async_error) 1609 + if (error || async_error) { 1610 + /* 1611 + * Move all devices to the target list to resume them 1612 + * properly. 1613 + */ 1614 + list_splice(&dpm_suspended_list, &dpm_late_early_list); 1652 1615 break; 1616 + } 1653 1617 } 1654 1618 1655 1619 mutex_unlock(&dpm_list_mtx);