Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'pm-sleep' and 'pm-runtime'

* pm-sleep:
PM / sleep: trace_device_pm_callback coverage in dpm_prepare/complete
PM / wakeup: add a dummy wakeup_source to record statistics
PM / sleep: Make suspend-to-idle-specific code depend on CONFIG_SUSPEND
PM / sleep: Return -EBUSY from suspend_enter() on wakeup detection
PM / tick: Add tracepoints for suspend-to-idle diagnostics
PM / sleep: Fix symbol name in a comment in kernel/power/main.c
leds / PM: fix hibernation on arm when gpio-led used with CPU led trigger
ARM: omap-device: use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS
bus: omap_l3_noc: add missed callbacks for suspend-to-disk
PM / sleep: Add macro to define common noirq system PM callbacks
PM / sleep: Refine diagnostic messages in enter_state()
PM / wakeup: validate wakeup source before activating it.

* pm-runtime:
PM / Runtime: Update last_busy in rpm_resume
PM / runtime: add note about re-calling in during device probe()

+124 -35
+6
Documentation/power/runtime_pm.txt
··· 556 556 should be used. Of course, for this purpose the device's runtime PM has to be 557 557 enabled earlier by calling pm_runtime_enable(). 558 558 559 + Note, if the device may execute pm_runtime calls during the probe (such as 560 + if it is registers with a subsystem that may call back in) then the 561 + pm_runtime_get_sync() call paired with a pm_runtime_put() call will be 562 + appropriate to ensure that the device is not put back to sleep during the 563 + probe. This can happen with systems such as the network device layer. 564 + 559 565 It may be desirable to suspend the device once ->probe() has finished. 560 566 Therefore the driver core uses the asyncronous pm_request_idle() to submit a 561 567 request to execute the subsystem-level idle callback for the device at that
+2 -5
arch/arm/mach-omap2/omap_device.c
··· 688 688 SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, 689 689 NULL) 690 690 USE_PLATFORM_PM_SLEEP_OPS 691 - .suspend_noirq = _od_suspend_noirq, 692 - .resume_noirq = _od_resume_noirq, 693 - .freeze_noirq = _od_suspend_noirq, 694 - .thaw_noirq = _od_resume_noirq, 695 - .restore_noirq = _od_resume_noirq, 691 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, 692 + _od_resume_noirq) 696 693 } 697 694 }; 698 695
+5 -6
drivers/base/power/main.c
··· 920 920 921 921 if (callback) { 922 922 pm_dev_dbg(dev, state, info); 923 - trace_device_pm_callback_start(dev, info, state.event); 924 923 callback(dev); 925 - trace_device_pm_callback_end(dev, 0); 926 924 } 927 925 928 926 device_unlock(dev); ··· 952 954 list_move(&dev->power.entry, &list); 953 955 mutex_unlock(&dpm_list_mtx); 954 956 957 + trace_device_pm_callback_start(dev, "", state.event); 955 958 device_complete(dev, state); 959 + trace_device_pm_callback_end(dev, 0); 956 960 957 961 mutex_lock(&dpm_list_mtx); 958 962 put_device(dev); ··· 1585 1585 callback = dev->driver->pm->prepare; 1586 1586 } 1587 1587 1588 - if (callback) { 1589 - trace_device_pm_callback_start(dev, info, state.event); 1588 + if (callback) 1590 1589 ret = callback(dev); 1591 - trace_device_pm_callback_end(dev, ret); 1592 - } 1593 1590 1594 1591 device_unlock(dev); 1595 1592 ··· 1628 1631 get_device(dev); 1629 1632 mutex_unlock(&dpm_list_mtx); 1630 1633 1634 + trace_device_pm_callback_start(dev, "", state.event); 1631 1635 error = device_prepare(dev, state); 1636 + trace_device_pm_callback_end(dev, error); 1632 1637 1633 1638 mutex_lock(&dpm_list_mtx); 1634 1639 if (error) {
+1
drivers/base/power/runtime.c
··· 741 741 } else { 742 742 no_callback: 743 743 __update_runtime_status(dev, RPM_ACTIVE); 744 + pm_runtime_mark_last_busy(dev); 744 745 if (parent) 745 746 atomic_inc(&parent->power.child_count); 746 747 }
+54
drivers/base/power/wakeup.c
··· 56 56 57 57 static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); 58 58 59 + static struct wakeup_source deleted_ws = { 60 + .name = "deleted", 61 + .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), 62 + }; 63 + 59 64 /** 60 65 * wakeup_source_prepare - Prepare a new wakeup source for initialization. 61 66 * @ws: Wakeup source to prepare. ··· 112 107 } 113 108 EXPORT_SYMBOL_GPL(wakeup_source_drop); 114 109 110 + /* 111 + * Record wakeup_source statistics being deleted into a dummy wakeup_source. 112 + */ 113 + static void wakeup_source_record(struct wakeup_source *ws) 114 + { 115 + unsigned long flags; 116 + 117 + spin_lock_irqsave(&deleted_ws.lock, flags); 118 + 119 + if (ws->event_count) { 120 + deleted_ws.total_time = 121 + ktime_add(deleted_ws.total_time, ws->total_time); 122 + deleted_ws.prevent_sleep_time = 123 + ktime_add(deleted_ws.prevent_sleep_time, 124 + ws->prevent_sleep_time); 125 + deleted_ws.max_time = 126 + ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? 127 + deleted_ws.max_time : ws->max_time; 128 + deleted_ws.event_count += ws->event_count; 129 + deleted_ws.active_count += ws->active_count; 130 + deleted_ws.relax_count += ws->relax_count; 131 + deleted_ws.expire_count += ws->expire_count; 132 + deleted_ws.wakeup_count += ws->wakeup_count; 133 + } 134 + 135 + spin_unlock_irqrestore(&deleted_ws.lock, flags); 136 + } 137 + 115 138 /** 116 139 * wakeup_source_destroy - Destroy a struct wakeup_source object. 117 140 * @ws: Wakeup source to destroy. ··· 152 119 return; 153 120 154 121 wakeup_source_drop(ws); 122 + wakeup_source_record(ws); 155 123 kfree(ws->name); 156 124 kfree(ws); 157 125 } ··· 385 351 } 386 352 EXPORT_SYMBOL_GPL(device_set_wakeup_enable); 387 353 354 + /** 355 + * wakeup_source_not_registered - validate the given wakeup source. 356 + * @ws: Wakeup source to be validated. 357 + */ 358 + static bool wakeup_source_not_registered(struct wakeup_source *ws) 359 + { 360 + /* 361 + * Use timer struct to check if the given source is initialized 362 + * by wakeup_source_add. 363 + */ 364 + return ws->timer.function != pm_wakeup_timer_fn || 365 + ws->timer.data != (unsigned long)ws; 366 + } 367 + 388 368 /* 389 369 * The functions below use the observation that each wakeup event starts a 390 370 * period in which the system should not be suspended. The moment this period ··· 438 390 static void wakeup_source_activate(struct wakeup_source *ws) 439 391 { 440 392 unsigned int cec; 393 + 394 + if (WARN_ONCE(wakeup_source_not_registered(ws), 395 + "unregistered wakeup source\n")) 396 + return; 441 397 442 398 /* 443 399 * active wakeup source should bring the system ··· 945 893 list_for_each_entry_rcu(ws, &wakeup_sources, entry) 946 894 print_wakeup_source_stats(m, ws); 947 895 rcu_read_unlock(); 896 + 897 + print_wakeup_source_stats(m, &deleted_ws); 948 898 949 899 return 0; 950 900 }
+2 -2
drivers/bus/omap_l3_noc.c
··· 301 301 return ret; 302 302 } 303 303 304 - #ifdef CONFIG_PM 304 + #ifdef CONFIG_PM_SLEEP 305 305 306 306 /** 307 307 * l3_resume_noirq() - resume function for l3_noc ··· 347 347 } 348 348 349 349 static const struct dev_pm_ops l3_dev_pm_ops = { 350 - .resume_noirq = l3_resume_noirq, 350 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq) 351 351 }; 352 352 353 353 #define L3_DEV_PM_OPS (&l3_dev_pm_ops)
+2
drivers/cpuidle/cpuidle.c
··· 97 97 return ret; 98 98 } 99 99 100 + #ifdef CONFIG_SUSPEND 100 101 /** 101 102 * cpuidle_find_deepest_state - Find the deepest available idle state. 102 103 * @drv: cpuidle driver for the given CPU. ··· 151 150 152 151 return index; 153 152 } 153 + #endif /* CONFIG_SUSPEND */ 154 154 155 155 /** 156 156 * cpuidle_enter_state - enter the state and update stats
+3 -4
drivers/leds/led-class.c
··· 187 187 } 188 188 EXPORT_SYMBOL_GPL(led_classdev_resume); 189 189 190 + #ifdef CONFIG_PM_SLEEP 190 191 static int led_suspend(struct device *dev) 191 192 { 192 193 struct led_classdev *led_cdev = dev_get_drvdata(dev); ··· 207 206 208 207 return 0; 209 208 } 209 + #endif 210 210 211 - static const struct dev_pm_ops leds_class_dev_pm_ops = { 212 - .suspend = led_suspend, 213 - .resume = led_resume, 214 - }; 211 + static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume); 215 212 216 213 static int match_name(struct device *dev, const void *data) 217 214 {
+10 -6
include/linux/cpuidle.h
··· 151 151 extern int cpuidle_enable_device(struct cpuidle_device *dev); 152 152 extern void cpuidle_disable_device(struct cpuidle_device *dev); 153 153 extern int cpuidle_play_dead(void); 154 - extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 155 - struct cpuidle_device *dev); 156 - extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, 157 - struct cpuidle_device *dev); 158 154 159 155 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 160 156 #else ··· 186 190 {return -ENODEV; } 187 191 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 188 192 static inline int cpuidle_play_dead(void) {return -ENODEV; } 193 + static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 194 + struct cpuidle_device *dev) {return NULL; } 195 + #endif 196 + 197 + #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) 198 + extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 199 + struct cpuidle_device *dev); 200 + extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, 201 + struct cpuidle_device *dev); 202 + #else 189 203 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 190 204 struct cpuidle_device *dev) 191 205 {return -ENODEV; } 192 206 static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, 193 207 struct cpuidle_device *dev) 194 208 {return -ENODEV; } 195 - static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 196 - struct cpuidle_device *dev) {return NULL; } 197 209 #endif 198 210 199 211 /* kernel/sched/idle.c */
+12
include/linux/pm.h
··· 342 342 #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 343 343 #endif 344 344 345 + #ifdef CONFIG_PM_SLEEP 346 + #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ 347 + .suspend_noirq = suspend_fn, \ 348 + .resume_noirq = resume_fn, \ 349 + .freeze_noirq = suspend_fn, \ 350 + .thaw_noirq = resume_fn, \ 351 + .poweroff_noirq = suspend_fn, \ 352 + .restore_noirq = resume_fn, 353 + #else 354 + #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 355 + #endif 356 + 345 357 #ifdef CONFIG_PM 346 358 #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 347 359 .runtime_suspend = suspend_fn, \
+8 -4
include/linux/tick.h
··· 13 13 14 14 #ifdef CONFIG_GENERIC_CLOCKEVENTS 15 15 extern void __init tick_init(void); 16 - extern void tick_freeze(void); 17 - extern void tick_unfreeze(void); 18 16 /* Should be core only, but ARM BL switcher requires it */ 19 17 extern void tick_suspend_local(void); 20 18 /* Should be core only, but XEN resume magic and ARM BL switcher require it */ ··· 21 23 extern void tick_cleanup_dead_cpu(int cpu); 22 24 #else /* CONFIG_GENERIC_CLOCKEVENTS */ 23 25 static inline void tick_init(void) { } 24 - static inline void tick_freeze(void) { } 25 - static inline void tick_unfreeze(void) { } 26 26 static inline void tick_suspend_local(void) { } 27 27 static inline void tick_resume_local(void) { } 28 28 static inline void tick_handover_do_timer(void) { } 29 29 static inline void tick_cleanup_dead_cpu(int cpu) { } 30 30 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 31 + 32 + #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND) 33 + extern void tick_freeze(void); 34 + extern void tick_unfreeze(void); 35 + #else 36 + static inline void tick_freeze(void) { } 37 + static inline void tick_unfreeze(void) { } 38 + #endif 31 39 32 40 #ifdef CONFIG_TICK_ONESHOT 33 41 extern void tick_irq_enter(void);
+1 -1
kernel/power/main.c
··· 272 272 { 273 273 pm_print_times_enabled = !!initcall_debug; 274 274 } 275 - #else /* !CONFIG_PP_SLEEP_DEBUG */ 275 + #else /* !CONFIG_PM_SLEEP_DEBUG */ 276 276 static inline void pm_print_times_init(void) {} 277 277 #endif /* CONFIG_PM_SLEEP_DEBUG */ 278 278
+5 -3
kernel/power/suspend.c
··· 366 366 trace_suspend_resume(TPS("machine_suspend"), 367 367 state, false); 368 368 events_check_enabled = false; 369 + } else if (*wakeup) { 370 + error = -EBUSY; 369 371 } 370 372 syscore_resume(); 371 373 } ··· 470 468 if (state == PM_SUSPEND_FREEZE) { 471 469 #ifdef CONFIG_PM_DEBUG 472 470 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { 473 - pr_warning("PM: Unsupported test mode for freeze state," 471 + pr_warning("PM: Unsupported test mode for suspend to idle," 474 472 "please choose none/freezer/devices/platform.\n"); 475 473 return -EAGAIN; 476 474 } ··· 490 488 printk("done.\n"); 491 489 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 492 490 493 - pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 491 + pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); 494 492 error = suspend_prepare(state); 495 493 if (error) 496 494 goto Unlock; ··· 499 497 goto Finish; 500 498 501 499 trace_suspend_resume(TPS("suspend_enter"), state, false); 502 - pr_debug("PM: Entering %s sleep\n", pm_states[state]); 500 + pr_debug("PM: Suspending system (%s)\n", pm_states[state]); 503 501 pm_restrict_gfp_mask(); 504 502 error = suspend_devices_and_enter(state); 505 503 pm_restore_gfp_mask();
+13 -4
kernel/time/tick-common.c
··· 19 19 #include <linux/profile.h> 20 20 #include <linux/sched.h> 21 21 #include <linux/module.h> 22 + #include <trace/events/power.h> 22 23 23 24 #include <asm/irq_regs.h> 24 25 ··· 441 440 tick_resume_local(); 442 441 } 443 442 443 + #ifdef CONFIG_SUSPEND 444 444 static DEFINE_RAW_SPINLOCK(tick_freeze_lock); 445 445 static unsigned int tick_freeze_depth; 446 446 ··· 459 457 raw_spin_lock(&tick_freeze_lock); 460 458 461 459 tick_freeze_depth++; 462 - if (tick_freeze_depth == num_online_cpus()) 460 + if (tick_freeze_depth == num_online_cpus()) { 461 + trace_suspend_resume(TPS("timekeeping_freeze"), 462 + smp_processor_id(), true); 463 463 timekeeping_suspend(); 464 - else 464 + } else { 465 465 tick_suspend_local(); 466 + } 466 467 467 468 raw_spin_unlock(&tick_freeze_lock); 468 469 } ··· 483 478 { 484 479 raw_spin_lock(&tick_freeze_lock); 485 480 486 - if (tick_freeze_depth == num_online_cpus()) 481 + if (tick_freeze_depth == num_online_cpus()) { 487 482 timekeeping_resume(); 488 - else 483 + trace_suspend_resume(TPS("timekeeping_freeze"), 484 + smp_processor_id(), false); 485 + } else { 489 486 tick_resume_local(); 487 + } 490 488 491 489 tick_freeze_depth--; 492 490 493 491 raw_spin_unlock(&tick_freeze_lock); 494 492 } 493 + #endif /* CONFIG_SUSPEND */ 495 494 496 495 /** 497 496 * tick_init - initialize the tick control