Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM / Runtime: Add runtime PM statistics (v3)
PM / Runtime: Make runtime_status attribute not debug-only (v. 2)
PM: Do not use dynamically allocated objects in pm_wakeup_event()
PM / Suspend: Fix ordering of calls in suspend error paths
PM / Hibernate: Fix snapshot error code path
PM / Hibernate: Fix hibernation_platform_enter()
pm_qos: Get rid of the allocation in pm_qos_add_request()
pm_qos: Reimplement using plists
plist: Add plist_last
PM: Make it possible to avoid races between wakeup and system sleep
PNPACPI: Add support for remote wakeup
PM: describe kernel policy regarding wakeup defaults (v. 2)
PM / Hibernate: Fix typos in comments in kernel/power/swap.c

+734 -188
+15
Documentation/ABI/testing/sysfs-power
··· 114 114 if this file contains "1", which is the default. It may be 115 115 disabled by writing "0" to this file, in which case all devices 116 116 will be suspended and resumed synchronously. 117 + 118 + What: /sys/power/wakeup_count 119 + Date: July 2010 120 + Contact: Rafael J. Wysocki <rjw@sisk.pl> 121 + Description: 122 + The /sys/power/wakeup_count file allows user space to put the 123 + system into a sleep state while taking into account the 124 + concurrent arrival of wakeup events. Reading from it returns 125 + the current number of registered wakeup events and it blocks if 126 + some wakeup events are being processed at the time the file is 127 + read from. Writing to it will only succeed if the current 128 + number of wakeup events is equal to the written value and, if 129 + successful, will make the kernel abort a subsequent transition 130 + to a sleep state if any wakeup events are reported after the 131 + write has returned.
+1 -1
drivers/base/power/Makefile
··· 1 1 obj-$(CONFIG_PM) += sysfs.o 2 - obj-$(CONFIG_PM_SLEEP) += main.o 2 + obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o 3 3 obj-$(CONFIG_PM_RUNTIME) += runtime.o 4 4 obj-$(CONFIG_PM_OPS) += generic_ops.o 5 5 obj-$(CONFIG_PM_TRACE_RTC) += trace.o
+1
drivers/base/power/main.c
··· 59 59 { 60 60 dev->power.status = DPM_ON; 61 61 init_completion(&dev->power.completion); 62 + dev->power.wakeup_count = 0; 62 63 pm_runtime_init(dev); 63 64 } 64 65
+47 -7
drivers/base/power/runtime.c
··· 123 123 } 124 124 EXPORT_SYMBOL_GPL(pm_runtime_idle); 125 125 126 + 127 + /** 128 + * update_pm_runtime_accounting - Update the time accounting of power states 129 + * @dev: Device to update the accounting for 130 + * 131 + * In order to be able to have time accounting of the various power states 132 + * (as used by programs such as PowerTOP to show the effectiveness of runtime 133 + * PM), we need to track the time spent in each state. 134 + * update_pm_runtime_accounting must be called each time before the 135 + * runtime_status field is updated, to account the time in the old state 136 + * correctly. 137 + */ 138 + void update_pm_runtime_accounting(struct device *dev) 139 + { 140 + unsigned long now = jiffies; 141 + int delta; 142 + 143 + delta = now - dev->power.accounting_timestamp; 144 + 145 + if (delta < 0) 146 + delta = 0; 147 + 148 + dev->power.accounting_timestamp = now; 149 + 150 + if (dev->power.disable_depth > 0) 151 + return; 152 + 153 + if (dev->power.runtime_status == RPM_SUSPENDED) 154 + dev->power.suspended_jiffies += delta; 155 + else 156 + dev->power.active_jiffies += delta; 157 + } 158 + 159 + static void __update_runtime_status(struct device *dev, enum rpm_status status) 160 + { 161 + update_pm_runtime_accounting(dev); 162 + dev->power.runtime_status = status; 163 + } 164 + 126 165 /** 127 166 * __pm_runtime_suspend - Carry out run-time suspend of given device. 128 167 * @dev: Device to suspend. ··· 236 197 goto repeat; 237 198 } 238 199 239 - dev->power.runtime_status = RPM_SUSPENDING; 200 + __update_runtime_status(dev, RPM_SUSPENDING); 240 201 dev->power.deferred_resume = false; 241 202 242 203 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { ··· 267 228 } 268 229 269 230 if (retval) { 270 - dev->power.runtime_status = RPM_ACTIVE; 231 + __update_runtime_status(dev, RPM_ACTIVE); 271 232 if (retval == -EAGAIN || retval == -EBUSY) { 272 233 if (dev->power.timer_expires == 0) 273 234 notify = true; ··· 276 237 pm_runtime_cancel_pending(dev); 277 238 } 278 239 } else { 279 - dev->power.runtime_status = RPM_SUSPENDED; 240 + __update_runtime_status(dev, RPM_SUSPENDED); 280 241 pm_runtime_deactivate_timer(dev); 281 242 282 243 if (dev->parent) { ··· 420 381 goto repeat; 421 382 } 422 383 423 - dev->power.runtime_status = RPM_RESUMING; 384 + __update_runtime_status(dev, RPM_RESUMING); 424 385 425 386 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { 426 387 spin_unlock_irq(&dev->power.lock); ··· 450 411 } 451 412 452 413 if (retval) { 453 - dev->power.runtime_status = RPM_SUSPENDED; 414 + __update_runtime_status(dev, RPM_SUSPENDED); 454 415 pm_runtime_cancel_pending(dev); 455 416 } else { 456 - dev->power.runtime_status = RPM_ACTIVE; 417 + __update_runtime_status(dev, RPM_ACTIVE); 457 418 if (parent) 458 419 atomic_inc(&parent->power.child_count); 459 420 } ··· 887 848 } 888 849 889 850 out_set: 890 - dev->power.runtime_status = status; 851 + __update_runtime_status(dev, status); 891 852 dev->power.runtime_error = 0; 892 853 out: 893 854 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 1116 1077 dev->power.request_pending = false; 1117 1078 dev->power.request = RPM_REQ_NONE; 1118 1079 dev->power.deferred_resume = false; 1080 + dev->power.accounting_timestamp = jiffies; 1119 1081 INIT_WORK(&dev->power.work, pm_runtime_work); 1120 1082 1121 1083 dev->power.timer_expires = 0;
+78 -20
drivers/base/power/sysfs.c
··· 6 6 #include <linux/string.h> 7 7 #include <linux/pm_runtime.h> 8 8 #include <asm/atomic.h> 9 + #include <linux/jiffies.h> 9 10 #include "power.h" 10 11 11 12 /* ··· 74 73 * device are known to the PM core. However, for some devices this 75 74 * attribute is set to "enabled" by bus type code or device drivers and in 76 75 * that cases it should be safe to leave the default value. 76 + * 77 + * wakeup_count - Report the number of wakeup events related to the device 77 78 */ 78 79 79 80 static const char enabled[] = "enabled"; ··· 111 108 } 112 109 113 110 static DEVICE_ATTR(control, 0644, control_show, control_store); 111 + 112 + static ssize_t rtpm_active_time_show(struct device *dev, 113 + struct device_attribute *attr, char *buf) 114 + { 115 + int ret; 116 + spin_lock_irq(&dev->power.lock); 117 + update_pm_runtime_accounting(dev); 118 + ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); 119 + spin_unlock_irq(&dev->power.lock); 120 + return ret; 121 + } 122 + 123 + static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); 124 + 125 + static ssize_t rtpm_suspended_time_show(struct device *dev, 126 + struct device_attribute *attr, char *buf) 127 + { 128 + int ret; 129 + spin_lock_irq(&dev->power.lock); 130 + update_pm_runtime_accounting(dev); 131 + ret = sprintf(buf, "%i\n", 132 + jiffies_to_msecs(dev->power.suspended_jiffies)); 133 + spin_unlock_irq(&dev->power.lock); 134 + return ret; 135 + } 136 + 137 + static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); 138 + 139 + static ssize_t rtpm_status_show(struct device *dev, 140 + struct device_attribute *attr, char *buf) 141 + { 142 + const char *p; 143 + 144 + if (dev->power.runtime_error) { 145 + p = "error\n"; 146 + } else if (dev->power.disable_depth) { 147 + p = "unsupported\n"; 148 + } else { 149 + switch (dev->power.runtime_status) { 150 + case RPM_SUSPENDED: 151 + p = "suspended\n"; 152 + break; 153 + case RPM_SUSPENDING: 154 + p = "suspending\n"; 155 + break; 156 + case RPM_RESUMING: 157 + p = "resuming\n"; 158 + break; 159 + case RPM_ACTIVE: 160 + p = "active\n"; 161 + break; 162 + default: 163 + return -EIO; 164 + } 165 + } 166 + return sprintf(buf, p); 167 + } 168 + 169 + static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); 114 170 #endif 115 171 116 172 static ssize_t ··· 206 144 207 145 static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 208 146 147 + #ifdef CONFIG_PM_SLEEP 148 + static ssize_t wakeup_count_show(struct device *dev, 149 + struct device_attribute *attr, char *buf) 150 + { 151 + return sprintf(buf, "%lu\n", dev->power.wakeup_count); 152 + } 153 + 154 + static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); 155 + #endif 156 + 209 157 #ifdef CONFIG_PM_ADVANCED_DEBUG 210 158 #ifdef CONFIG_PM_RUNTIME 211 159 ··· 244 172 return sprintf(buf, "enabled\n"); 245 173 } 246 174 247 - static ssize_t rtpm_status_show(struct device *dev, 248 - struct device_attribute *attr, char *buf) 249 - { 250 - if (dev->power.runtime_error) 251 - return sprintf(buf, "error\n"); 252 - switch (dev->power.runtime_status) { 253 - case RPM_SUSPENDED: 254 - return sprintf(buf, "suspended\n"); 255 - case RPM_SUSPENDING: 256 - return sprintf(buf, "suspending\n"); 257 - case RPM_RESUMING: 258 - return sprintf(buf, "resuming\n"); 259 - case RPM_ACTIVE: 260 - return sprintf(buf, "active\n"); 261 - } 262 - return -EIO; 263 - } 264 - 265 175 static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); 266 176 static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); 267 - static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); 268 177 static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); 269 178 270 179 #endif ··· 281 228 static struct attribute * power_attrs[] = { 282 229 #ifdef CONFIG_PM_RUNTIME 283 230 &dev_attr_control.attr, 231 + &dev_attr_runtime_status.attr, 232 + &dev_attr_runtime_suspended_time.attr, 233 + &dev_attr_runtime_active_time.attr, 284 234 #endif 285 235 &dev_attr_wakeup.attr, 236 + #ifdef CONFIG_PM_SLEEP 237 + &dev_attr_wakeup_count.attr, 238 + #endif 286 239 #ifdef CONFIG_PM_ADVANCED_DEBUG 287 240 &dev_attr_async.attr, 288 241 #ifdef CONFIG_PM_RUNTIME 289 242 &dev_attr_runtime_usage.attr, 290 243 &dev_attr_runtime_active_kids.attr, 291 - &dev_attr_runtime_status.attr, 292 244 &dev_attr_runtime_enabled.attr, 293 245 #endif 294 246 #endif
+247
drivers/base/power/wakeup.c
··· 1 + /* 2 + * drivers/base/power/wakeup.c - System wakeup events framework 3 + * 4 + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 + * 6 + * This file is released under the GPLv2. 7 + */ 8 + 9 + #include <linux/device.h> 10 + #include <linux/slab.h> 11 + #include <linux/sched.h> 12 + #include <linux/capability.h> 13 + #include <linux/suspend.h> 14 + #include <linux/pm.h> 15 + 16 + /* 17 + * If set, the suspend/hibernate code will abort transitions to a sleep state 18 + * if wakeup events are registered during or immediately before the transition. 19 + */ 20 + bool events_check_enabled; 21 + 22 + /* The counter of registered wakeup events. */ 23 + static unsigned long event_count; 24 + /* A preserved old value of event_count. */ 25 + static unsigned long saved_event_count; 26 + /* The counter of wakeup events being processed. */ 27 + static unsigned long events_in_progress; 28 + 29 + static DEFINE_SPINLOCK(events_lock); 30 + 31 + static void pm_wakeup_timer_fn(unsigned long data); 32 + 33 + static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); 34 + static unsigned long events_timer_expires; 35 + 36 + /* 37 + * The functions below use the observation that each wakeup event starts a 38 + * period in which the system should not be suspended. The moment this period 39 + * will end depends on how the wakeup event is going to be processed after being 40 + * detected and all of the possible cases can be divided into two distinct 41 + * groups. 42 + * 43 + * First, a wakeup event may be detected by the same functional unit that will 44 + * carry out the entire processing of it and possibly will pass it to user space 45 + * for further processing. In that case the functional unit that has detected 46 + * the event may later "close" the "no suspend" period associated with it 47 + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and 48 + * pm_relax(), balanced with each other, is supposed to be used in such 49 + * situations. 50 + * 51 + * Second, a wakeup event may be detected by one functional unit and processed 52 + * by another one. In that case the unit that has detected it cannot really 53 + * "close" the "no suspend" period associated with it, unless it knows in 54 + * advance what's going to happen to the event during processing. This 55 + * knowledge, however, may not be available to it, so it can simply specify time 56 + * to wait before the system can be suspended and pass it as the second 57 + * argument of pm_wakeup_event(). 58 + */ 59 + 60 + /** 61 + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 62 + * @dev: Device the wakeup event is related to. 63 + * 64 + * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the 65 + * counter of wakeup events being processed. If @dev is not NULL, the counter 66 + * of wakeup events related to @dev is incremented too. 67 + * 68 + * Call this function after detecting of a wakeup event if pm_relax() is going 69 + * to be called directly after processing the event (and possibly passing it to 70 + * user space for further processing). 71 + * 72 + * It is safe to call this function from interrupt context. 73 + */ 74 + void pm_stay_awake(struct device *dev) 75 + { 76 + unsigned long flags; 77 + 78 + spin_lock_irqsave(&events_lock, flags); 79 + if (dev) 80 + dev->power.wakeup_count++; 81 + 82 + events_in_progress++; 83 + spin_unlock_irqrestore(&events_lock, flags); 84 + } 85 + 86 + /** 87 + * pm_relax - Notify the PM core that processing of a wakeup event has ended. 88 + * 89 + * Notify the PM core that a wakeup event has been processed by decrementing 90 + * the counter of wakeup events being processed and incrementing the counter 91 + * of registered wakeup events. 92 + * 93 + * Call this function for wakeup events whose processing started with calling 94 + * pm_stay_awake(). 95 + * 96 + * It is safe to call it from interrupt context. 97 + */ 98 + void pm_relax(void) 99 + { 100 + unsigned long flags; 101 + 102 + spin_lock_irqsave(&events_lock, flags); 103 + if (events_in_progress) { 104 + events_in_progress--; 105 + event_count++; 106 + } 107 + spin_unlock_irqrestore(&events_lock, flags); 108 + } 109 + 110 + /** 111 + * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 112 + * 113 + * Decrease the counter of wakeup events being processed after it was increased 114 + * by pm_wakeup_event(). 115 + */ 116 + static void pm_wakeup_timer_fn(unsigned long data) 117 + { 118 + unsigned long flags; 119 + 120 + spin_lock_irqsave(&events_lock, flags); 121 + if (events_timer_expires 122 + && time_before_eq(events_timer_expires, jiffies)) { 123 + events_in_progress--; 124 + events_timer_expires = 0; 125 + } 126 + spin_unlock_irqrestore(&events_lock, flags); 127 + } 128 + 129 + /** 130 + * pm_wakeup_event - Notify the PM core of a wakeup event. 131 + * @dev: Device the wakeup event is related to. 132 + * @msec: Anticipated event processing time (in milliseconds). 133 + * 134 + * Notify the PM core of a wakeup event (signaled by @dev) that will take 135 + * approximately @msec milliseconds to be processed by the kernel. Increment 136 + * the counter of registered wakeup events and (if @msec is nonzero) set up 137 + * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the 138 + * timer has not been set up already, increment the counter of wakeup events 139 + * being processed). If @dev is not NULL, the counter of wakeup events related 140 + * to @dev is incremented too. 141 + * 142 + * It is safe to call this function from interrupt context. 143 + */ 144 + void pm_wakeup_event(struct device *dev, unsigned int msec) 145 + { 146 + unsigned long flags; 147 + 148 + spin_lock_irqsave(&events_lock, flags); 149 + event_count++; 150 + if (dev) 151 + dev->power.wakeup_count++; 152 + 153 + if (msec) { 154 + unsigned long expires; 155 + 156 + expires = jiffies + msecs_to_jiffies(msec); 157 + if (!expires) 158 + expires = 1; 159 + 160 + if (!events_timer_expires 161 + || time_after(expires, events_timer_expires)) { 162 + if (!events_timer_expires) 163 + events_in_progress++; 164 + 165 + mod_timer(&events_timer, expires); 166 + events_timer_expires = expires; 167 + } 168 + } 169 + spin_unlock_irqrestore(&events_lock, flags); 170 + } 171 + 172 + /** 173 + * pm_check_wakeup_events - Check for new wakeup events. 174 + * 175 + * Compare the current number of registered wakeup events with its preserved 176 + * value from the past to check if new wakeup events have been registered since 177 + * the old value was stored. Check if the current number of wakeup events being 178 + * processed is zero. 179 + */ 180 + bool pm_check_wakeup_events(void) 181 + { 182 + unsigned long flags; 183 + bool ret = true; 184 + 185 + spin_lock_irqsave(&events_lock, flags); 186 + if (events_check_enabled) { 187 + ret = (event_count == saved_event_count) && !events_in_progress; 188 + events_check_enabled = ret; 189 + } 190 + spin_unlock_irqrestore(&events_lock, flags); 191 + return ret; 192 + } 193 + 194 + /** 195 + * pm_get_wakeup_count - Read the number of registered wakeup events. 196 + * @count: Address to store the value at. 197 + * 198 + * Store the number of registered wakeup events at the address in @count. Block 199 + * if the current number of wakeup events being processed is nonzero. 200 + * 201 + * Return false if the wait for the number of wakeup events being processed to 202 + * drop down to zero has been interrupted by a signal (and the current number 203 + * of wakeup events being processed is still nonzero). Otherwise return true. 204 + */ 205 + bool pm_get_wakeup_count(unsigned long *count) 206 + { 207 + bool ret; 208 + 209 + spin_lock_irq(&events_lock); 210 + if (capable(CAP_SYS_ADMIN)) 211 + events_check_enabled = false; 212 + 213 + while (events_in_progress && !signal_pending(current)) { 214 + spin_unlock_irq(&events_lock); 215 + 216 + schedule_timeout_interruptible(msecs_to_jiffies(100)); 217 + 218 + spin_lock_irq(&events_lock); 219 + } 220 + *count = event_count; 221 + ret = !events_in_progress; 222 + spin_unlock_irq(&events_lock); 223 + return ret; 224 + } 225 + 226 + /** 227 + * pm_save_wakeup_count - Save the current number of registered wakeup events. 228 + * @count: Value to compare with the current number of registered wakeup events. 229 + * 230 + * If @count is equal to the current number of registered wakeup events and the 231 + * current number of wakeup events being processed is zero, store @count as the 232 + * old number of registered wakeup events to be used by pm_check_wakeup_events() 233 + * and return true. Otherwise return false. 234 + */ 235 + bool pm_save_wakeup_count(unsigned long count) 236 + { 237 + bool ret = false; 238 + 239 + spin_lock_irq(&events_lock); 240 + if (count == event_count && !events_in_progress) { 241 + saved_event_count = count; 242 + events_check_enabled = true; 243 + ret = true; 244 + } 245 + spin_unlock_irq(&events_lock); 246 + return ret; 247 + }
+7 -10
drivers/net/e1000e/netdev.c
··· 2901 2901 * dropped transactions. 2902 2902 */ 2903 2903 pm_qos_update_request( 2904 - adapter->netdev->pm_qos_req, 55); 2904 + &adapter->netdev->pm_qos_req, 55); 2905 2905 } else { 2906 2906 pm_qos_update_request( 2907 - adapter->netdev->pm_qos_req, 2907 + &adapter->netdev->pm_qos_req, 2908 2908 PM_QOS_DEFAULT_VALUE); 2909 2909 } 2910 2910 } ··· 3196 3196 3197 3197 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3198 3198 if (adapter->flags & FLAG_HAS_ERT) 3199 - adapter->netdev->pm_qos_req = 3200 - pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 3201 - PM_QOS_DEFAULT_VALUE); 3199 + pm_qos_add_request(&adapter->netdev->pm_qos_req, 3200 + PM_QOS_CPU_DMA_LATENCY, 3201 + PM_QOS_DEFAULT_VALUE); 3202 3202 3203 3203 /* hardware has been reset, we need to reload some things */ 3204 3204 e1000_configure(adapter); ··· 3263 3263 e1000_clean_tx_ring(adapter); 3264 3264 e1000_clean_rx_ring(adapter); 3265 3265 3266 - if (adapter->flags & FLAG_HAS_ERT) { 3267 - pm_qos_remove_request( 3268 - adapter->netdev->pm_qos_req); 3269 - adapter->netdev->pm_qos_req = NULL; 3270 - } 3266 + if (adapter->flags & FLAG_HAS_ERT) 3267 + pm_qos_remove_request(&adapter->netdev->pm_qos_req); 3271 3268 3272 3269 /* 3273 3270 * TODO: for power management, we could drop the link and
+4 -5
drivers/net/igbvf/netdev.c
··· 48 48 #define DRV_VERSION "1.0.0-k0" 49 49 char igbvf_driver_name[] = "igbvf"; 50 50 const char igbvf_driver_version[] = DRV_VERSION; 51 - struct pm_qos_request_list *igbvf_driver_pm_qos_req; 51 + static struct pm_qos_request_list igbvf_driver_pm_qos_req; 52 52 static const char igbvf_driver_string[] = 53 53 "Intel(R) Virtual Function Network Driver"; 54 54 static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; ··· 2902 2902 printk(KERN_INFO "%s\n", igbvf_copyright); 2903 2903 2904 2904 ret = pci_register_driver(&igbvf_driver); 2905 - igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 2906 - PM_QOS_DEFAULT_VALUE); 2905 + pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 2906 + PM_QOS_DEFAULT_VALUE); 2907 2907 2908 2908 return ret; 2909 2909 } ··· 2918 2918 static void __exit igbvf_exit_module(void) 2919 2919 { 2920 2920 pci_unregister_driver(&igbvf_driver); 2921 - pm_qos_remove_request(igbvf_driver_pm_qos_req); 2922 - igbvf_driver_pm_qos_req = NULL; 2921 + pm_qos_remove_request(&igbvf_driver_pm_qos_req); 2923 2922 } 2924 2923 module_exit(igbvf_exit_module); 2925 2924
+6 -6
drivers/net/wireless/ipw2x00/ipw2100.c
··· 174 174 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 175 175 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 176 176 177 - struct pm_qos_request_list *ipw2100_pm_qos_req; 177 + struct pm_qos_request_list ipw2100_pm_qos_req; 178 178 179 179 /* Debugging stuff */ 180 180 #ifdef CONFIG_IPW2100_DEBUG ··· 1741 1741 /* the ipw2100 hardware really doesn't want power management delays 1742 1742 * longer than 175usec 1743 1743 */ 1744 - pm_qos_update_request(ipw2100_pm_qos_req, 175); 1744 + pm_qos_update_request(&ipw2100_pm_qos_req, 175); 1745 1745 1746 1746 /* If the interrupt is enabled, turn it off... */ 1747 1747 spin_lock_irqsave(&priv->low_lock, flags); ··· 1889 1889 ipw2100_disable_interrupts(priv); 1890 1890 spin_unlock_irqrestore(&priv->low_lock, flags); 1891 1891 1892 - pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1892 + pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1893 1893 1894 1894 /* We have to signal any supplicant if we are disassociating */ 1895 1895 if (associated) ··· 6669 6669 if (ret) 6670 6670 goto out; 6671 6671 6672 - ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 6673 - PM_QOS_DEFAULT_VALUE); 6672 + pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 6673 + PM_QOS_DEFAULT_VALUE); 6674 6674 #ifdef CONFIG_IPW2100_DEBUG 6675 6675 ipw2100_debug_level = debug; 6676 6676 ret = driver_create_file(&ipw2100_pci_driver.driver, ··· 6692 6692 &driver_attr_debug_level); 6693 6693 #endif 6694 6694 pci_unregister_driver(&ipw2100_pci_driver); 6695 - pm_qos_remove_request(ipw2100_pm_qos_req); 6695 + pm_qos_remove_request(&ipw2100_pm_qos_req); 6696 6696 } 6697 6697 6698 6698 module_init(ipw2100_init);
+1
drivers/pci/pci-acpi.c
··· 48 48 if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { 49 49 pci_check_pme_status(pci_dev); 50 50 pm_runtime_resume(&pci_dev->dev); 51 + pci_wakeup_event(pci_dev); 51 52 if (pci_dev->subordinate) 52 53 pci_pme_wakeup_bus(pci_dev->subordinate); 53 54 }
+19 -1
drivers/pci/pci.c
··· 1275 1275 return ret; 1276 1276 } 1277 1277 1278 + /* 1279 + * Time to wait before the system can be put into a sleep state after reporting 1280 + * a wakeup event signaled by a PCI device. 1281 + */ 1282 + #define PCI_WAKEUP_COOLDOWN 100 1283 + 1284 + /** 1285 + * pci_wakeup_event - Report a wakeup event related to a given PCI device. 1286 + * @dev: Device to report the wakeup event for. 1287 + */ 1288 + void pci_wakeup_event(struct pci_dev *dev) 1289 + { 1290 + if (device_may_wakeup(&dev->dev)) 1291 + pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN); 1292 + } 1293 + 1278 1294 /** 1279 1295 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1280 1296 * @dev: Device to handle. ··· 1301 1285 */ 1302 1286 static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1303 1287 { 1304 - if (pci_check_pme_status(dev)) 1288 + if (pci_check_pme_status(dev)) { 1305 1289 pm_request_resume(&dev->dev); 1290 + pci_wakeup_event(dev); 1291 + } 1306 1292 return 0; 1307 1293 } 1308 1294
+1
drivers/pci/pci.h
··· 56 56 extern void pci_disable_enabled_device(struct pci_dev *dev); 57 57 extern bool pci_check_pme_status(struct pci_dev *dev); 58 58 extern int pci_finish_runtime_suspend(struct pci_dev *dev); 59 + extern void pci_wakeup_event(struct pci_dev *dev); 59 60 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 60 61 extern void pci_pme_wakeup_bus(struct pci_bus *bus); 61 62 extern void pci_pm_init(struct pci_dev *dev);
+4 -1
drivers/pci/pcie/pme/pcie_pme.c
··· 154 154 /* Skip PCIe devices in case we started from a root port. */ 155 155 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { 156 156 pm_request_resume(&dev->dev); 157 + pci_wakeup_event(dev); 157 158 ret = true; 158 159 } 159 160 ··· 255 254 if (found) { 256 255 /* The device is there, but we have to check its PME status. */ 257 256 found = pci_check_pme_status(dev); 258 - if (found) 257 + if (found) { 259 258 pm_request_resume(&dev->dev); 259 + pci_wakeup_event(dev); 260 + } 260 261 pci_dev_put(dev); 261 262 } else if (devfn) { 262 263 /*
+3
drivers/pnp/core.c
··· 164 164 list_add_tail(&dev->global_list, &pnp_global); 165 165 list_add_tail(&dev->protocol_list, &dev->protocol->devices); 166 166 spin_unlock(&pnp_lock); 167 + if (dev->protocol->can_wakeup) 168 + device_set_wakeup_capable(&dev->dev, 169 + dev->protocol->can_wakeup(dev)); 167 170 return device_register(&dev->dev); 168 171 } 169 172
+23
drivers/pnp/pnpacpi/core.c
··· 122 122 } 123 123 124 124 #ifdef CONFIG_ACPI_SLEEP 125 + static bool pnpacpi_can_wakeup(struct pnp_dev *dev) 126 + { 127 + struct acpi_device *acpi_dev = dev->data; 128 + acpi_handle handle = acpi_dev->handle; 129 + 130 + return acpi_bus_can_wakeup(handle); 131 + } 132 + 125 133 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) 126 134 { 127 135 struct acpi_device *acpi_dev = dev->data; 128 136 acpi_handle handle = acpi_dev->handle; 129 137 int power_state; 130 138 139 + if (device_can_wakeup(&dev->dev)) { 140 + int rc = acpi_pm_device_sleep_wake(&dev->dev, 141 + device_may_wakeup(&dev->dev)); 142 + 143 + if (rc) 144 + return rc; 145 + } 131 146 power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); 132 147 if (power_state < 0) 133 148 power_state = (state.event == PM_EVENT_ON) ? 134 149 ACPI_STATE_D0 : ACPI_STATE_D3; 135 150 151 + /* acpi_bus_set_power() often fails (keyboard port can't be 152 + * powered-down?), and in any case, our return value is ignored 153 + * by pnp_bus_suspend(). Hence we don't revert the wakeup 154 + * setting if the set_power fails. 155 + */ 136 156 return acpi_bus_set_power(handle, power_state); 137 157 } 138 158 ··· 161 141 struct acpi_device *acpi_dev = dev->data; 162 142 acpi_handle handle = acpi_dev->handle; 163 143 144 + if (device_may_wakeup(&dev->dev)) 145 + acpi_pm_device_sleep_wake(&dev->dev, false); 164 146 return acpi_bus_set_power(handle, ACPI_STATE_D0); 165 147 } 166 148 #endif ··· 173 151 .set = pnpacpi_set_resources, 174 152 .disable = pnpacpi_disable_resources, 175 153 #ifdef CONFIG_ACPI_SLEEP 154 + .can_wakeup = pnpacpi_can_wakeup, 176 155 .suspend = pnpacpi_suspend, 177 156 .resume = pnpacpi_resume, 178 157 #endif
+1 -1
include/linux/netdevice.h
··· 779 779 */ 780 780 char name[IFNAMSIZ]; 781 781 782 - struct pm_qos_request_list *pm_qos_req; 782 + struct pm_qos_request_list pm_qos_req; 783 783 784 784 /* device name hash chain */ 785 785 struct hlist_node name_hlist;
+29
include/linux/plist.h
··· 260 260 #endif 261 261 262 262 /** 263 + * plist_last_entry - get the struct for the last entry 264 + * @head: the &struct plist_head pointer 265 + * @type: the type of the struct this is embedded in 266 + * @member: the name of the list_struct within the struct 267 + */ 268 + #ifdef CONFIG_DEBUG_PI_LIST 269 + # define plist_last_entry(head, type, member) \ 270 + ({ \ 271 + WARN_ON(plist_head_empty(head)); \ 272 + container_of(plist_last(head), type, member); \ 273 + }) 274 + #else 275 + # define plist_last_entry(head, type, member) \ 276 + container_of(plist_last(head), type, member) 277 + #endif 278 + 279 + /** 263 280 * plist_first - return the first node (and thus, highest priority) 264 281 * @head: the &struct plist_head pointer 265 282 * ··· 285 268 static inline struct plist_node *plist_first(const struct plist_head *head) 286 269 { 287 270 return list_entry(head->node_list.next, 271 + struct plist_node, plist.node_list); 272 + } 273 + 274 + /** 275 + * plist_last - return the last node (and thus, lowest priority) 276 + * @head: the &struct plist_head pointer 277 + * 278 + * Assumes the plist is _not_ empty. 279 + */ 280 + static inline struct plist_node *plist_last(const struct plist_head *head) 281 + { 282 + return list_entry(head->node_list.prev, 288 283 struct plist_node, plist.node_list); 289 284 } 290 285
+16
include/linux/pm.h
··· 457 457 #ifdef CONFIG_PM_SLEEP 458 458 struct list_head entry; 459 459 struct completion completion; 460 + unsigned long wakeup_count; 460 461 #endif 461 462 #ifdef CONFIG_PM_RUNTIME 462 463 struct timer_list suspend_timer; ··· 477 476 enum rpm_request request; 478 477 enum rpm_status runtime_status; 479 478 int runtime_error; 479 + unsigned long active_jiffies; 480 + unsigned long suspended_jiffies; 481 + unsigned long accounting_timestamp; 480 482 #endif 481 483 }; 484 + 485 + extern void update_pm_runtime_accounting(struct device *dev); 486 + 482 487 483 488 /* 484 489 * The PM_EVENT_ messages are also used by drivers implementing the legacy ··· 559 552 } while (0) 560 553 561 554 extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); 555 + 556 + /* drivers/base/power/wakeup.c */ 557 + extern void pm_wakeup_event(struct device *dev, unsigned int msec); 558 + extern void pm_stay_awake(struct device *dev); 559 + extern void pm_relax(void); 562 560 #else /* !CONFIG_PM_SLEEP */ 563 561 564 562 #define device_pm_lock() do {} while (0) ··· 577 565 #define suspend_report_result(fn, ret) do {} while (0) 578 566 579 567 static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} 568 + 569 + static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} 570 + static inline void pm_stay_awake(struct device *dev) {} 571 + static inline void pm_relax(void) {} 580 572 #endif /* !CONFIG_PM_SLEEP */ 581 573 582 574 /* How to reorder dpm_list after device_move() */
+10 -3
include/linux/pm_qos_params.h
··· 1 + #ifndef _LINUX_PM_QOS_PARAMS_H 2 + #define _LINUX_PM_QOS_PARAMS_H 1 3 /* interface for the pm_qos_power infrastructure of the linux kernel. 2 4 * 3 5 * Mark Gross <mgross@linux.intel.com> 4 6 */ 5 - #include <linux/list.h> 7 + #include <linux/plist.h> 6 8 #include <linux/notifier.h> 7 9 #include <linux/miscdevice.h> 8 10 ··· 16 14 #define PM_QOS_NUM_CLASSES 4 17 15 #define PM_QOS_DEFAULT_VALUE -1 18 16 19 - struct pm_qos_request_list; 17 + struct pm_qos_request_list { 18 + struct plist_node list; 19 + int pm_qos_class; 20 + }; 20 21 21 - struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value); 22 + void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); 22 23 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 23 24 s32 new_value); 24 25 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); ··· 29 24 int pm_qos_request(int pm_qos_class); 30 25 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); 31 26 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); 27 + int pm_qos_request_active(struct pm_qos_request_list *req); 32 28 29 + #endif
+7 -3
include/linux/pm_wakeup.h
··· 29 29 30 30 #ifdef CONFIG_PM 31 31 32 - /* changes to device_may_wakeup take effect on the next pm state change. 33 - * by default, devices should wakeup if they can. 32 + /* Changes to device_may_wakeup take effect on the next pm state change. 33 + * 34 + * By default, most devices should leave wakeup disabled. The exceptions 35 + * are devices that everyone expects to be wakeup sources: keyboards, 36 + * power buttons, possibly network interfaces, etc. 34 37 */ 35 38 static inline void device_init_wakeup(struct device *dev, bool val) 36 39 { ··· 62 59 63 60 #else /* !CONFIG_PM */ 64 61 65 - /* For some reason the next two routines work even without CONFIG_PM */ 62 + /* For some reason the following routines work even without CONFIG_PM */ 66 63 static inline void device_init_wakeup(struct device *dev, bool val) 67 64 { 68 65 dev->power.can_wakeup = val; ··· 70 67 71 68 static inline void device_set_wakeup_capable(struct device *dev, bool capable) 72 69 { 70 + dev->power.can_wakeup = capable; 73 71 } 74 72 75 73 static inline bool device_can_wakeup(struct device *dev)
+1
include/linux/pnp.h
··· 414 414 int (*disable) (struct pnp_dev *dev); 415 415 416 416 /* protocol specific suspend/resume */ 417 + bool (*can_wakeup) (struct pnp_dev *dev); 417 418 int (*suspend) (struct pnp_dev * dev, pm_message_t state); 418 419 int (*resume) (struct pnp_dev * dev); 419 420
+13 -4
include/linux/suspend.h
··· 61 61 * before device drivers' late suspend callbacks are executed. It returns 62 62 * 0 on success or a negative error code otherwise, in which case the 63 63 * system cannot enter the desired sleep state (@prepare_late(), @enter(), 64 - * @wake(), and @finish() will not be called in that case). 64 + * and @wake() will not be called in that case). 65 65 * 66 66 * @prepare_late: Finish preparing the platform for entering the system sleep 67 67 * state indicated by @begin(). 68 68 * @prepare_late is called before disabling nonboot CPUs and after 69 69 * device drivers' late suspend callbacks have been executed. It returns 70 70 * 0 on success or a negative error code otherwise, in which case the 71 - * system cannot enter the desired sleep state (@enter() and @wake()). 71 + * system cannot enter the desired sleep state (@enter() will not be 72 + * executed). 72 73 * 73 74 * @enter: Enter the system sleep state indicated by @begin() or represented by 74 75 * the argument if @begin() is not implemented. ··· 82 81 * resume callbacks are executed. 83 82 * This callback is optional, but should be implemented by the platforms 84 83 * that implement @prepare_late(). If implemented, it is always called 85 - * after @enter(), even if @enter() fails. 84 + * after @prepare_late and @enter(), even if one of them fails. 86 85 * 87 86 * @finish: Finish wake-up of the platform. 88 87 * @finish is called right prior to calling device drivers' regular suspend 89 88 * callbacks. 90 89 * This callback is optional, but should be implemented by the platforms 91 90 * that implement @prepare(). If implemented, it is always called after 92 - * @enter() and @wake(), if implemented, even if any of them fails. 91 + * @enter() and @wake(), even if any of them fails. It is executed after 92 + * a failing @prepare. 93 93 * 94 94 * @end: Called by the PM core right after resuming devices, to indicate to 95 95 * the platform that the system has returned to the working state or ··· 288 286 { .notifier_call = fn, .priority = pri }; \ 289 287 register_pm_notifier(&fn##_nb); \ 290 288 } 289 + 290 + /* drivers/base/power/wakeup.c */ 291 + extern bool events_check_enabled; 292 + 293 + extern bool pm_check_wakeup_events(void); 294 + extern bool pm_get_wakeup_count(unsigned long *count); 295 + extern bool pm_save_wakeup_count(unsigned long count); 291 296 #else /* !CONFIG_PM_SLEEP */ 292 297 293 298 static inline int register_pm_notifier(struct notifier_block *nb)
+1 -1
include/sound/pcm.h
··· 366 366 int number; 367 367 char name[32]; /* substream name */ 368 368 int stream; /* stream (direction) */ 369 - struct pm_qos_request_list *latency_pm_qos_req; /* pm_qos request */ 369 + struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */ 370 370 size_t buffer_bytes_max; /* limit ring buffer size */ 371 371 struct snd_dma_buffer dma_buffer; 372 372 unsigned int dma_buf_id;
+114 -101
kernel/pm_qos_params.c
··· 48 48 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock 49 49 * held, taken with _irqsave. One lock to rule them all 50 50 */ 51 - struct pm_qos_request_list { 52 - struct list_head list; 53 - union { 54 - s32 value; 55 - s32 usec; 56 - s32 kbps; 57 - }; 58 - int pm_qos_class; 51 + enum pm_qos_type { 52 + PM_QOS_MAX, /* return the largest value */ 53 + PM_QOS_MIN /* return the smallest value */ 59 54 }; 60 55 61 - static s32 max_compare(s32 v1, s32 v2); 62 - static s32 min_compare(s32 v1, s32 v2); 63 - 64 56 struct pm_qos_object { 65 - struct pm_qos_request_list requests; 57 + struct plist_head requests; 66 58 struct blocking_notifier_head *notifiers; 67 59 struct miscdevice pm_qos_power_miscdev; 68 60 char *name; 69 61 s32 default_value; 70 - atomic_t target_value; 71 - s32 (*comparitor)(s32, s32); 62 + enum pm_qos_type type; 72 63 }; 64 + 65 + static DEFINE_SPINLOCK(pm_qos_lock); 73 66 74 67 static struct pm_qos_object null_pm_qos; 75 68 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); 76 69 static struct pm_qos_object cpu_dma_pm_qos = { 77 - .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)}, 70 + .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), 78 71 .notifiers = &cpu_dma_lat_notifier, 79 72 .name = "cpu_dma_latency", 80 73 .default_value = 2000 * USEC_PER_SEC, 81 - .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), 82 - .comparitor = min_compare 74 + .type = PM_QOS_MIN, 83 75 }; 84 76 85 77 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 86 78 static struct pm_qos_object network_lat_pm_qos = { 87 - .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)}, 79 + .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), 88 80 .notifiers = &network_lat_notifier, 89 81 .name = "network_latency", 90 82 .default_value = 2000 * USEC_PER_SEC, 91 - .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), 92 - .comparitor = min_compare 83 + .type = PM_QOS_MIN 93 84 }; 94 85 95 86 96 87 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); 97 88 static struct pm_qos_object network_throughput_pm_qos = { 98 - .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)}, 89 + .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), 99 90 .notifiers = &network_throughput_notifier, 100 91 .name = "network_throughput", 101 92 .default_value = 0, 102 - .target_value = ATOMIC_INIT(0), 103 - .comparitor = max_compare 93 + .type = PM_QOS_MAX, 104 94 }; 105 95 106 96 ··· 100 110 &network_lat_pm_qos, 101 111 &network_throughput_pm_qos 102 112 }; 103 - 104 - static DEFINE_SPINLOCK(pm_qos_lock); 105 113 106 114 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, 107 115 size_t count, loff_t *f_pos); ··· 112 124 .release = pm_qos_power_release, 113 125 }; 114 126 115 - /* static helper functions */ 116 - static s32 max_compare(s32 v1, s32 v2) 127 + /* unlocked internal variant */ 128 + static inline int pm_qos_get_value(struct pm_qos_object *o) 117 129 { 118 - return max(v1, v2); 130 + if (plist_head_empty(&o->requests)) 131 + return o->default_value; 132 + 133 + switch (o->type) { 134 + case PM_QOS_MIN: 135 + return plist_last(&o->requests)->prio; 136 + 137 + case PM_QOS_MAX: 138 + return plist_first(&o->requests)->prio; 139 + 140 + default: 141 + /* runtime check for not using enum */ 142 + BUG(); 143 + } 119 144 } 120 145 121 - static s32 min_compare(s32 v1, s32 v2) 146 + static void update_target(struct pm_qos_object *o, struct plist_node *node, 147 + int del, int value) 122 148 { 123 - return min(v1, v2); 124 - } 125 - 126 - 127 - static void update_target(int pm_qos_class) 128 - { 129 - s32 extreme_value; 130 - struct pm_qos_request_list *node; 131 149 unsigned long flags; 132 - int call_notifier = 0; 150 + int prev_value, curr_value; 133 151 134 152 spin_lock_irqsave(&pm_qos_lock, flags); 135 - extreme_value = pm_qos_array[pm_qos_class]->default_value; 136 - list_for_each_entry(node, 137 - &pm_qos_array[pm_qos_class]->requests.list, list) { 138 - extreme_value = pm_qos_array[pm_qos_class]->comparitor( 139 - extreme_value, node->value); 153 + prev_value = pm_qos_get_value(o); 154 + /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ 155 + if (value != PM_QOS_DEFAULT_VALUE) { 156 + /* 157 + * to change the list, we atomically remove, reinit 158 + * with new value and add, then see if the extremal 159 + * changed 160 + */ 161 + plist_del(node, &o->requests); 162 + plist_node_init(node, value); 163 + plist_add(node, &o->requests); 164 + } else if (del) { 165 + plist_del(node, &o->requests); 166 + } else { 167 + plist_add(node, &o->requests); 140 168 } 141 - if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) != 142 - extreme_value) { 143 - call_notifier = 1; 144 - atomic_set(&pm_qos_array[pm_qos_class]->target_value, 145 - extreme_value); 146 - pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class, 147 - atomic_read(&pm_qos_array[pm_qos_class]->target_value)); 148 - } 169 + curr_value = pm_qos_get_value(o); 149 170 spin_unlock_irqrestore(&pm_qos_lock, flags); 150 171 151 - if (call_notifier) 152 - blocking_notifier_call_chain( 153 - pm_qos_array[pm_qos_class]->notifiers, 154 - (unsigned long) extreme_value, NULL); 172 + if (prev_value != curr_value) 173 + blocking_notifier_call_chain(o->notifiers, 174 + (unsigned long)curr_value, 175 + NULL); 155 176 } 156 177 157 178 static int register_pm_qos_misc(struct pm_qos_object *qos) ··· 193 196 */ 194 197 int pm_qos_request(int pm_qos_class) 195 198 { 196 - return atomic_read(&pm_qos_array[pm_qos_class]->target_value); 199 + unsigned long flags; 200 + int value; 201 + 202 + spin_lock_irqsave(&pm_qos_lock, flags); 203 + value = pm_qos_get_value(pm_qos_array[pm_qos_class]); 204 + spin_unlock_irqrestore(&pm_qos_lock, flags); 205 + 206 + return value; 197 207 } 198 208 EXPORT_SYMBOL_GPL(pm_qos_request); 209 + 210 + int pm_qos_request_active(struct pm_qos_request_list *req) 211 + { 212 + return req->pm_qos_class != 0; 213 + } 214 + EXPORT_SYMBOL_GPL(pm_qos_request_active); 199 215 200 216 /** 201 217 * pm_qos_add_request - inserts new qos request into the list ··· 221 211 * element as a handle for use in updating and removal. Call needs to save 222 212 * this handle for later use. 223 213 */ 224 - struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) 214 + void pm_qos_add_request(struct pm_qos_request_list *dep, 215 + int pm_qos_class, s32 value) 225 216 { 226 - struct pm_qos_request_list *dep; 227 - unsigned long flags; 217 + struct pm_qos_object *o = pm_qos_array[pm_qos_class]; 218 + int new_value; 228 219 229 - dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); 230 - if (dep) { 231 - if (value == PM_QOS_DEFAULT_VALUE) 232 - dep->value = pm_qos_array[pm_qos_class]->default_value; 233 - else 234 - dep->value = value; 235 - dep->pm_qos_class = pm_qos_class; 236 - 237 - spin_lock_irqsave(&pm_qos_lock, flags); 238 - list_add(&dep->list, 239 - &pm_qos_array[pm_qos_class]->requests.list); 240 - spin_unlock_irqrestore(&pm_qos_lock, flags); 241 - update_target(pm_qos_class); 220 + if (pm_qos_request_active(dep)) { 221 + WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); 222 + return; 242 223 } 243 - 244 - return dep; 224 + if (value == PM_QOS_DEFAULT_VALUE) 225 + new_value = o->default_value; 226 + else 227 + new_value = value; 228 + plist_node_init(&dep->list, new_value); 229 + dep->pm_qos_class = pm_qos_class; 230 + update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); 245 231 } 246 232 EXPORT_SYMBOL_GPL(pm_qos_add_request); 247 233 ··· 252 246 * Attempts are made to make this code callable on hot code paths. 253 247 */ 254 248 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 255 - s32 new_value) 249 + s32 new_value) 256 250 { 257 - unsigned long flags; 258 - int pending_update = 0; 259 251 s32 temp; 252 + struct pm_qos_object *o; 260 253 261 - if (pm_qos_req) { /*guard against callers passing in null */ 262 - spin_lock_irqsave(&pm_qos_lock, flags); 263 - if (new_value == PM_QOS_DEFAULT_VALUE) 264 - temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value; 265 - else 266 - temp = new_value; 254 + if (!pm_qos_req) /*guard against callers passing in null */ 255 + return; 267 256 268 - if (temp != pm_qos_req->value) { 269 - pending_update = 1; 270 - pm_qos_req->value = temp; 271 - } 272 - spin_unlock_irqrestore(&pm_qos_lock, flags); 273 - if (pending_update) 274 - update_target(pm_qos_req->pm_qos_class); 257 + if (!pm_qos_request_active(pm_qos_req)) { 258 + WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); 259 + return; 275 260 } 261 + 262 + o = pm_qos_array[pm_qos_req->pm_qos_class]; 263 + 264 + if (new_value == PM_QOS_DEFAULT_VALUE) 265 + temp = o->default_value; 266 + else 267 + temp = new_value; 268 + 269 + if (temp != pm_qos_req->list.prio) 270 + update_target(o, &pm_qos_req->list, 0, temp); 276 271 } 277 272 EXPORT_SYMBOL_GPL(pm_qos_update_request); 278 273 ··· 287 280 */ 288 281 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) 289 282 { 290 - unsigned long flags; 291 - int qos_class; 283 + struct pm_qos_object *o; 292 284 293 285 if (pm_qos_req == NULL) 294 286 return; 295 287 /* silent return to keep pcm code cleaner */ 296 288 297 - qos_class = pm_qos_req->pm_qos_class; 298 - spin_lock_irqsave(&pm_qos_lock, flags); 299 - list_del(&pm_qos_req->list); 300 - kfree(pm_qos_req); 301 - spin_unlock_irqrestore(&pm_qos_lock, flags); 302 - update_target(qos_class); 289 + if (!pm_qos_request_active(pm_qos_req)) { 290 + WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); 291 + return; 292 + } 293 + 294 + o = pm_qos_array[pm_qos_req->pm_qos_class]; 295 + update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); 296 + memset(pm_qos_req, 0, sizeof(*pm_qos_req)); 303 297 } 304 298 EXPORT_SYMBOL_GPL(pm_qos_remove_request); 305 299 ··· 348 340 349 341 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 350 342 if (pm_qos_class >= 0) { 351 - filp->private_data = (void *) pm_qos_add_request(pm_qos_class, 352 - PM_QOS_DEFAULT_VALUE); 343 + struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 344 + if (!req) 345 + return -ENOMEM; 346 + 347 + pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); 348 + filp->private_data = req; 353 349 354 350 if (filp->private_data) 355 351 return 0; ··· 365 353 { 366 354 struct pm_qos_request_list *req; 367 355 368 - req = (struct pm_qos_request_list *)filp->private_data; 356 + req = filp->private_data; 369 357 pm_qos_remove_request(req); 358 + kfree(req); 370 359 371 360 return 0; 372 361 }
+16 -8
kernel/power/hibernate.c
··· 277 277 goto Enable_irqs; 278 278 } 279 279 280 - if (hibernation_test(TEST_CORE)) 280 + if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) 281 281 goto Power_up; 282 282 283 283 in_suspend = 1; ··· 288 288 error); 289 289 /* Restore control flow magically appears here */ 290 290 restore_processor_state(); 291 - if (!in_suspend) 291 + if (!in_suspend) { 292 + events_check_enabled = false; 292 293 platform_leave(platform_mode); 294 + } 293 295 294 296 Power_up: 295 297 sysdev_resume(); ··· 330 328 331 329 error = platform_begin(platform_mode); 332 330 if (error) 333 - return error; 331 + goto Close; 334 332 335 333 /* Preallocate image memory before shutting down devices. */ 336 334 error = hibernate_preallocate_memory(); ··· 513 511 514 512 local_irq_disable(); 515 513 sysdev_suspend(PMSG_HIBERNATE); 514 + if (!pm_check_wakeup_events()) { 515 + error = -EAGAIN; 516 + goto Power_up; 517 + } 518 + 516 519 hibernation_ops->enter(); 517 520 /* We should never get here */ 518 521 while (1); 519 522 520 - /* 521 - * We don't need to reenable the nonboot CPUs or resume consoles, since 522 - * the system is going to be halted anyway. 523 - */ 523 + Power_up: 524 + sysdev_resume(); 525 + local_irq_enable(); 526 + enable_nonboot_cpus(); 527 + 524 528 Platform_finish: 525 529 hibernation_ops->finish(); 526 530 527 - dpm_suspend_noirq(PMSG_RESTORE); 531 + dpm_resume_noirq(PMSG_RESTORE); 528 532 529 533 Resume_devices: 530 534 entering_platform_hibernation = false;
+55
kernel/power/main.c
··· 204 204 205 205 power_attr(state); 206 206 207 + #ifdef CONFIG_PM_SLEEP 208 + /* 209 + * The 'wakeup_count' attribute, along with the functions defined in 210 + * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 211 + * handled in a non-racy way. 212 + * 213 + * If a wakeup event occurs when the system is in a sleep state, it simply is 214 + * woken up. In turn, if an event that would wake the system up from a sleep 215 + * state occurs when it is undergoing a transition to that sleep state, the 216 + * transition should be aborted. Moreover, if such an event occurs when the 217 + * system is in the working state, an attempt to start a transition to the 218 + * given sleep state should fail during certain period after the detection of 219 + * the event. Using the 'state' attribute alone is not sufficient to satisfy 220 + * these requirements, because a wakeup event may occur exactly when 'state' 221 + * is being written to and may be delivered to user space right before it is 222 + * frozen, so the event will remain only partially processed until the system is 223 + * woken up by another event. In particular, it won't cause the transition to 224 + * a sleep state to be aborted. 225 + * 226 + * This difficulty may be overcome if user space uses 'wakeup_count' before 227 + * writing to 'state'. It first should read from 'wakeup_count' and store 228 + * the read value. Then, after carrying out its own preparations for the system 229 + * transition to a sleep state, it should write the stored value to 230 + * 'wakeup_count'. If that fails, at least one wakeup event has occured since 231 + * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 232 + * is allowed to write to 'state', but the transition will be aborted if there 233 + * are any wakeup events detected after 'wakeup_count' was written to. 234 + */ 235 + 236 + static ssize_t wakeup_count_show(struct kobject *kobj, 237 + struct kobj_attribute *attr, 238 + char *buf) 239 + { 240 + unsigned long val; 241 + 242 + return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; 243 + } 244 + 245 + static ssize_t wakeup_count_store(struct kobject *kobj, 246 + struct kobj_attribute *attr, 247 + const char *buf, size_t n) 248 + { 249 + unsigned long val; 250 + 251 + if (sscanf(buf, "%lu", &val) == 1) { 252 + if (pm_save_wakeup_count(val)) 253 + return n; 254 + } 255 + return -EINVAL; 256 + } 257 + 258 + power_attr(wakeup_count); 259 + #endif /* CONFIG_PM_SLEEP */ 260 + 207 261 #ifdef CONFIG_PM_TRACE 208 262 int pm_trace_enabled; 209 263 ··· 290 236 #endif 291 237 #ifdef CONFIG_PM_SLEEP 292 238 &pm_async_attr.attr, 239 + &wakeup_count_attr.attr, 293 240 #ifdef CONFIG_PM_DEBUG 294 241 &pm_test_attr.attr, 295 242 #endif
+7 -6
kernel/power/suspend.c
··· 136 136 if (suspend_ops->prepare) { 137 137 error = suspend_ops->prepare(); 138 138 if (error) 139 - return error; 139 + goto Platform_finish; 140 140 } 141 141 142 142 error = dpm_suspend_noirq(PMSG_SUSPEND); 143 143 if (error) { 144 144 printk(KERN_ERR "PM: Some devices failed to power down\n"); 145 - goto Platfrom_finish; 145 + goto Platform_finish; 146 146 } 147 147 148 148 if (suspend_ops->prepare_late) { 149 149 error = suspend_ops->prepare_late(); 150 150 if (error) 151 - goto Power_up_devices; 151 + goto Platform_wake; 152 152 } 153 153 154 154 if (suspend_test(TEST_PLATFORM)) ··· 163 163 164 164 error = sysdev_suspend(PMSG_SUSPEND); 165 165 if (!error) { 166 - if (!suspend_test(TEST_CORE)) 166 + if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { 167 167 error = suspend_ops->enter(state); 168 + events_check_enabled = false; 169 + } 168 170 sysdev_resume(); 169 171 } 170 172 ··· 180 178 if (suspend_ops->wake) 181 179 suspend_ops->wake(); 182 180 183 - Power_up_devices: 184 181 dpm_resume_noirq(PMSG_RESUME); 185 182 186 - Platfrom_finish: 183 + Platform_finish: 187 184 if (suspend_ops->finish) 188 185 suspend_ops->finish(); 189 186
+2 -2
kernel/power/swap.c
··· 32 32 /* 33 33 * The swap map is a data structure used for keeping track of each page 34 34 * written to a swap partition. It consists of many swap_map_page 35 - * structures that contain each an array of MAP_PAGE_SIZE swap entries. 35 + * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 36 36 * These structures are stored on the swap and linked together with the 37 37 * help of the .next_swap member. 38 38 * ··· 148 148 149 149 /** 150 150 * free_all_swap_pages - free swap pages allocated for saving image data. 151 - * It also frees the extents used to register which swap entres had been 151 + * It also frees the extents used to register which swap entries had been 152 152 * allocated. 153 153 */ 154 154
+5 -8
sound/core/pcm_native.c
··· 451 451 snd_pcm_timer_resolution_change(substream); 452 452 runtime->status->state = SNDRV_PCM_STATE_SETUP; 453 453 454 - if (substream->latency_pm_qos_req) { 455 - pm_qos_remove_request(substream->latency_pm_qos_req); 456 - substream->latency_pm_qos_req = NULL; 457 - } 454 + if (pm_qos_request_active(&substream->latency_pm_qos_req)) 455 + pm_qos_remove_request(&substream->latency_pm_qos_req); 458 456 if ((usecs = period_to_usecs(runtime)) >= 0) 459 - substream->latency_pm_qos_req = pm_qos_add_request( 460 - PM_QOS_CPU_DMA_LATENCY, usecs); 457 + pm_qos_add_request(&substream->latency_pm_qos_req, 458 + PM_QOS_CPU_DMA_LATENCY, usecs); 461 459 return 0; 462 460 _error: 463 461 /* hardware might be unuseable from this time, ··· 510 512 if (substream->ops->hw_free) 511 513 result = substream->ops->hw_free(substream); 512 514 runtime->status->state = SNDRV_PCM_STATE_OPEN; 513 - pm_qos_remove_request(substream->latency_pm_qos_req); 514 - substream->latency_pm_qos_req = NULL; 515 + pm_qos_remove_request(&substream->latency_pm_qos_req); 515 516 return result; 516 517 } 517 518