Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6:
PM / Runtime: Add runtime PM statistics (v3)
PM / Runtime: Make runtime_status attribute not debug-only (v. 2)
PM: Do not use dynamically allocated objects in pm_wakeup_event()
PM / Suspend: Fix ordering of calls in suspend error paths
PM / Hibernate: Fix snapshot error code path
PM / Hibernate: Fix hibernation_platform_enter()
pm_qos: Get rid of the allocation in pm_qos_add_request()
pm_qos: Reimplement using plists
plist: Add plist_last
PM: Make it possible to avoid races between wakeup and system sleep
PNPACPI: Add support for remote wakeup
PM: describe kernel policy regarding wakeup defaults (v. 2)
PM / Hibernate: Fix typos in comments in kernel/power/swap.c

+734 -188
+15
Documentation/ABI/testing/sysfs-power
··· 114 if this file contains "1", which is the default. It may be 115 disabled by writing "0" to this file, in which case all devices 116 will be suspended and resumed synchronously.
··· 114 if this file contains "1", which is the default. It may be 115 disabled by writing "0" to this file, in which case all devices 116 will be suspended and resumed synchronously. 117 + 118 + What: /sys/power/wakeup_count 119 + Date: July 2010 120 + Contact: Rafael J. Wysocki <rjw@sisk.pl> 121 + Description: 122 + The /sys/power/wakeup_count file allows user space to put the 123 + system into a sleep state while taking into account the 124 + concurrent arrival of wakeup events. Reading from it returns 125 + the current number of registered wakeup events and it blocks if 126 + some wakeup events are being processed at the time the file is 127 + read from. Writing to it will only succeed if the current 128 + number of wakeup events is equal to the written value and, if 129 + successful, will make the kernel abort a subsequent transition 130 + to a sleep state if any wakeup events are reported after the 131 + write has returned.
+1 -1
drivers/base/power/Makefile
··· 1 obj-$(CONFIG_PM) += sysfs.o 2 - obj-$(CONFIG_PM_SLEEP) += main.o 3 obj-$(CONFIG_PM_RUNTIME) += runtime.o 4 obj-$(CONFIG_PM_OPS) += generic_ops.o 5 obj-$(CONFIG_PM_TRACE_RTC) += trace.o
··· 1 obj-$(CONFIG_PM) += sysfs.o 2 + obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o 3 obj-$(CONFIG_PM_RUNTIME) += runtime.o 4 obj-$(CONFIG_PM_OPS) += generic_ops.o 5 obj-$(CONFIG_PM_TRACE_RTC) += trace.o
+1
drivers/base/power/main.c
··· 59 { 60 dev->power.status = DPM_ON; 61 init_completion(&dev->power.completion); 62 pm_runtime_init(dev); 63 } 64
··· 59 { 60 dev->power.status = DPM_ON; 61 init_completion(&dev->power.completion); 62 + dev->power.wakeup_count = 0; 63 pm_runtime_init(dev); 64 } 65
+47 -7
drivers/base/power/runtime.c
··· 123 } 124 EXPORT_SYMBOL_GPL(pm_runtime_idle); 125 126 /** 127 * __pm_runtime_suspend - Carry out run-time suspend of given device. 128 * @dev: Device to suspend. ··· 236 goto repeat; 237 } 238 239 - dev->power.runtime_status = RPM_SUSPENDING; 240 dev->power.deferred_resume = false; 241 242 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { ··· 267 } 268 269 if (retval) { 270 - dev->power.runtime_status = RPM_ACTIVE; 271 if (retval == -EAGAIN || retval == -EBUSY) { 272 if (dev->power.timer_expires == 0) 273 notify = true; ··· 276 pm_runtime_cancel_pending(dev); 277 } 278 } else { 279 - dev->power.runtime_status = RPM_SUSPENDED; 280 pm_runtime_deactivate_timer(dev); 281 282 if (dev->parent) { ··· 420 goto repeat; 421 } 422 423 - dev->power.runtime_status = RPM_RESUMING; 424 425 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { 426 spin_unlock_irq(&dev->power.lock); ··· 450 } 451 452 if (retval) { 453 - dev->power.runtime_status = RPM_SUSPENDED; 454 pm_runtime_cancel_pending(dev); 455 } else { 456 - dev->power.runtime_status = RPM_ACTIVE; 457 if (parent) 458 atomic_inc(&parent->power.child_count); 459 } ··· 887 } 888 889 out_set: 890 - dev->power.runtime_status = status; 891 dev->power.runtime_error = 0; 892 out: 893 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 1116 dev->power.request_pending = false; 1117 dev->power.request = RPM_REQ_NONE; 1118 dev->power.deferred_resume = false; 1119 INIT_WORK(&dev->power.work, pm_runtime_work); 1120 1121 dev->power.timer_expires = 0;
··· 123 } 124 EXPORT_SYMBOL_GPL(pm_runtime_idle); 125 126 + 127 + /** 128 + * update_pm_runtime_accounting - Update the time accounting of power states 129 + * @dev: Device to update the accounting for 130 + * 131 + * In order to be able to have time accounting of the various power states 132 + * (as used by programs such as PowerTOP to show the effectiveness of runtime 133 + * PM), we need to track the time spent in each state. 134 + * update_pm_runtime_accounting must be called each time before the 135 + * runtime_status field is updated, to account the time in the old state 136 + * correctly. 137 + */ 138 + void update_pm_runtime_accounting(struct device *dev) 139 + { 140 + unsigned long now = jiffies; 141 + int delta; 142 + 143 + delta = now - dev->power.accounting_timestamp; 144 + 145 + if (delta < 0) 146 + delta = 0; 147 + 148 + dev->power.accounting_timestamp = now; 149 + 150 + if (dev->power.disable_depth > 0) 151 + return; 152 + 153 + if (dev->power.runtime_status == RPM_SUSPENDED) 154 + dev->power.suspended_jiffies += delta; 155 + else 156 + dev->power.active_jiffies += delta; 157 + } 158 + 159 + static void __update_runtime_status(struct device *dev, enum rpm_status status) 160 + { 161 + update_pm_runtime_accounting(dev); 162 + dev->power.runtime_status = status; 163 + } 164 + 165 /** 166 * __pm_runtime_suspend - Carry out run-time suspend of given device. 167 * @dev: Device to suspend. ··· 197 goto repeat; 198 } 199 200 + __update_runtime_status(dev, RPM_SUSPENDING); 201 dev->power.deferred_resume = false; 202 203 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { ··· 228 } 229 230 if (retval) { 231 + __update_runtime_status(dev, RPM_ACTIVE); 232 if (retval == -EAGAIN || retval == -EBUSY) { 233 if (dev->power.timer_expires == 0) 234 notify = true; ··· 237 pm_runtime_cancel_pending(dev); 238 } 239 } else { 240 + __update_runtime_status(dev, RPM_SUSPENDED); 241 pm_runtime_deactivate_timer(dev); 242 243 if (dev->parent) { ··· 381 goto repeat; 382 } 383 384 + __update_runtime_status(dev, RPM_RESUMING); 385 386 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { 387 spin_unlock_irq(&dev->power.lock); ··· 411 } 412 413 if (retval) { 414 + __update_runtime_status(dev, RPM_SUSPENDED); 415 pm_runtime_cancel_pending(dev); 416 } else { 417 + __update_runtime_status(dev, RPM_ACTIVE); 418 if (parent) 419 atomic_inc(&parent->power.child_count); 420 } ··· 848 } 849 850 out_set: 851 + __update_runtime_status(dev, status); 852 dev->power.runtime_error = 0; 853 out: 854 spin_unlock_irqrestore(&dev->power.lock, flags); ··· 1077 dev->power.request_pending = false; 1078 dev->power.request = RPM_REQ_NONE; 1079 dev->power.deferred_resume = false; 1080 + dev->power.accounting_timestamp = jiffies; 1081 INIT_WORK(&dev->power.work, pm_runtime_work); 1082 1083 dev->power.timer_expires = 0;
+78 -20
drivers/base/power/sysfs.c
··· 6 #include <linux/string.h> 7 #include <linux/pm_runtime.h> 8 #include <asm/atomic.h> 9 #include "power.h" 10 11 /* ··· 74 * device are known to the PM core. However, for some devices this 75 * attribute is set to "enabled" by bus type code or device drivers and in 76 * that cases it should be safe to leave the default value. 77 */ 78 79 static const char enabled[] = "enabled"; ··· 111 } 112 113 static DEVICE_ATTR(control, 0644, control_show, control_store); 114 #endif 115 116 static ssize_t ··· 206 207 static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 208 209 #ifdef CONFIG_PM_ADVANCED_DEBUG 210 #ifdef CONFIG_PM_RUNTIME 211 ··· 244 return sprintf(buf, "enabled\n"); 245 } 246 247 - static ssize_t rtpm_status_show(struct device *dev, 248 - struct device_attribute *attr, char *buf) 249 - { 250 - if (dev->power.runtime_error) 251 - return sprintf(buf, "error\n"); 252 - switch (dev->power.runtime_status) { 253 - case RPM_SUSPENDED: 254 - return sprintf(buf, "suspended\n"); 255 - case RPM_SUSPENDING: 256 - return sprintf(buf, "suspending\n"); 257 - case RPM_RESUMING: 258 - return sprintf(buf, "resuming\n"); 259 - case RPM_ACTIVE: 260 - return sprintf(buf, "active\n"); 261 - } 262 - return -EIO; 263 - } 264 - 265 static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); 266 static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); 267 - static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); 268 static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); 269 270 #endif ··· 281 static struct attribute * power_attrs[] = { 282 #ifdef CONFIG_PM_RUNTIME 283 &dev_attr_control.attr, 284 #endif 285 &dev_attr_wakeup.attr, 286 #ifdef CONFIG_PM_ADVANCED_DEBUG 287 &dev_attr_async.attr, 288 #ifdef CONFIG_PM_RUNTIME 289 &dev_attr_runtime_usage.attr, 290 &dev_attr_runtime_active_kids.attr, 291 - &dev_attr_runtime_status.attr, 292 &dev_attr_runtime_enabled.attr, 293 #endif 294 #endif
··· 6 #include <linux/string.h> 7 #include <linux/pm_runtime.h> 8 #include <asm/atomic.h> 9 + #include <linux/jiffies.h> 10 #include "power.h" 11 12 /* ··· 73 * device are known to the PM core. However, for some devices this 74 * attribute is set to "enabled" by bus type code or device drivers and in 75 * that cases it should be safe to leave the default value. 76 + * 77 + * wakeup_count - Report the number of wakeup events related to the device 78 */ 79 80 static const char enabled[] = "enabled"; ··· 108 } 109 110 static DEVICE_ATTR(control, 0644, control_show, control_store); 111 + 112 + static ssize_t rtpm_active_time_show(struct device *dev, 113 + struct device_attribute *attr, char *buf) 114 + { 115 + int ret; 116 + spin_lock_irq(&dev->power.lock); 117 + update_pm_runtime_accounting(dev); 118 + ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); 119 + spin_unlock_irq(&dev->power.lock); 120 + return ret; 121 + } 122 + 123 + static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); 124 + 125 + static ssize_t rtpm_suspended_time_show(struct device *dev, 126 + struct device_attribute *attr, char *buf) 127 + { 128 + int ret; 129 + spin_lock_irq(&dev->power.lock); 130 + update_pm_runtime_accounting(dev); 131 + ret = sprintf(buf, "%i\n", 132 + jiffies_to_msecs(dev->power.suspended_jiffies)); 133 + spin_unlock_irq(&dev->power.lock); 134 + return ret; 135 + } 136 + 137 + static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); 138 + 139 + static ssize_t rtpm_status_show(struct device *dev, 140 + struct device_attribute *attr, char *buf) 141 + { 142 + const char *p; 143 + 144 + if (dev->power.runtime_error) { 145 + p = "error\n"; 146 + } else if (dev->power.disable_depth) { 147 + p = "unsupported\n"; 148 + } else { 149 + switch (dev->power.runtime_status) { 150 + case RPM_SUSPENDED: 151 + p = "suspended\n"; 152 + break; 153 + case RPM_SUSPENDING: 154 + p = "suspending\n"; 155 + break; 156 + case RPM_RESUMING: 157 + p = "resuming\n"; 158 + break; 159 + case RPM_ACTIVE: 160 + p = "active\n"; 161 + break; 162 + default: 163 + return -EIO; 164 + } 165 + } 166 + return sprintf(buf, p); 167 + } 168 + 169 + static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); 170 #endif 171 172 static ssize_t ··· 144 145 static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); 146 147 + #ifdef CONFIG_PM_SLEEP 148 + static ssize_t wakeup_count_show(struct device *dev, 149 + struct device_attribute *attr, char *buf) 150 + { 151 + return sprintf(buf, "%lu\n", dev->power.wakeup_count); 152 + } 153 + 154 + static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); 155 + #endif 156 + 157 #ifdef CONFIG_PM_ADVANCED_DEBUG 158 #ifdef CONFIG_PM_RUNTIME 159 ··· 172 return sprintf(buf, "enabled\n"); 173 } 174 175 static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); 176 static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); 177 static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); 178 179 #endif ··· 228 static struct attribute * power_attrs[] = { 229 #ifdef CONFIG_PM_RUNTIME 230 &dev_attr_control.attr, 231 + &dev_attr_runtime_status.attr, 232 + &dev_attr_runtime_suspended_time.attr, 233 + &dev_attr_runtime_active_time.attr, 234 #endif 235 &dev_attr_wakeup.attr, 236 + #ifdef CONFIG_PM_SLEEP 237 + &dev_attr_wakeup_count.attr, 238 + #endif 239 #ifdef CONFIG_PM_ADVANCED_DEBUG 240 &dev_attr_async.attr, 241 #ifdef CONFIG_PM_RUNTIME 242 &dev_attr_runtime_usage.attr, 243 &dev_attr_runtime_active_kids.attr, 244 &dev_attr_runtime_enabled.attr, 245 #endif 246 #endif
+247
drivers/base/power/wakeup.c
···
··· 1 + /* 2 + * drivers/base/power/wakeup.c - System wakeup events framework 3 + * 4 + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 + * 6 + * This file is released under the GPLv2. 7 + */ 8 + 9 + #include <linux/device.h> 10 + #include <linux/slab.h> 11 + #include <linux/sched.h> 12 + #include <linux/capability.h> 13 + #include <linux/suspend.h> 14 + #include <linux/pm.h> 15 + 16 + /* 17 + * If set, the suspend/hibernate code will abort transitions to a sleep state 18 + * if wakeup events are registered during or immediately before the transition. 19 + */ 20 + bool events_check_enabled; 21 + 22 + /* The counter of registered wakeup events. */ 23 + static unsigned long event_count; 24 + /* A preserved old value of event_count. */ 25 + static unsigned long saved_event_count; 26 + /* The counter of wakeup events being processed. */ 27 + static unsigned long events_in_progress; 28 + 29 + static DEFINE_SPINLOCK(events_lock); 30 + 31 + static void pm_wakeup_timer_fn(unsigned long data); 32 + 33 + static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); 34 + static unsigned long events_timer_expires; 35 + 36 + /* 37 + * The functions below use the observation that each wakeup event starts a 38 + * period in which the system should not be suspended. The moment this period 39 + * will end depends on how the wakeup event is going to be processed after being 40 + * detected and all of the possible cases can be divided into two distinct 41 + * groups. 42 + * 43 + * First, a wakeup event may be detected by the same functional unit that will 44 + * carry out the entire processing of it and possibly will pass it to user space 45 + * for further processing. In that case the functional unit that has detected 46 + * the event may later "close" the "no suspend" period associated with it 47 + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and 48 + * pm_relax(), balanced with each other, is supposed to be used in such 49 + * situations. 50 + * 51 + * Second, a wakeup event may be detected by one functional unit and processed 52 + * by another one. In that case the unit that has detected it cannot really 53 + * "close" the "no suspend" period associated with it, unless it knows in 54 + * advance what's going to happen to the event during processing. This 55 + * knowledge, however, may not be available to it, so it can simply specify time 56 + * to wait before the system can be suspended and pass it as the second 57 + * argument of pm_wakeup_event(). 58 + */ 59 + 60 + /** 61 + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. 62 + * @dev: Device the wakeup event is related to. 63 + * 64 + * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the 65 + * counter of wakeup events being processed. If @dev is not NULL, the counter 66 + * of wakeup events related to @dev is incremented too. 67 + * 68 + * Call this function after detecting of a wakeup event if pm_relax() is going 69 + * to be called directly after processing the event (and possibly passing it to 70 + * user space for further processing). 71 + * 72 + * It is safe to call this function from interrupt context. 73 + */ 74 + void pm_stay_awake(struct device *dev) 75 + { 76 + unsigned long flags; 77 + 78 + spin_lock_irqsave(&events_lock, flags); 79 + if (dev) 80 + dev->power.wakeup_count++; 81 + 82 + events_in_progress++; 83 + spin_unlock_irqrestore(&events_lock, flags); 84 + } 85 + 86 + /** 87 + * pm_relax - Notify the PM core that processing of a wakeup event has ended. 88 + * 89 + * Notify the PM core that a wakeup event has been processed by decrementing 90 + * the counter of wakeup events being processed and incrementing the counter 91 + * of registered wakeup events. 92 + * 93 + * Call this function for wakeup events whose processing started with calling 94 + * pm_stay_awake(). 95 + * 96 + * It is safe to call it from interrupt context. 97 + */ 98 + void pm_relax(void) 99 + { 100 + unsigned long flags; 101 + 102 + spin_lock_irqsave(&events_lock, flags); 103 + if (events_in_progress) { 104 + events_in_progress--; 105 + event_count++; 106 + } 107 + spin_unlock_irqrestore(&events_lock, flags); 108 + } 109 + 110 + /** 111 + * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 112 + * 113 + * Decrease the counter of wakeup events being processed after it was increased 114 + * by pm_wakeup_event(). 115 + */ 116 + static void pm_wakeup_timer_fn(unsigned long data) 117 + { 118 + unsigned long flags; 119 + 120 + spin_lock_irqsave(&events_lock, flags); 121 + if (events_timer_expires 122 + && time_before_eq(events_timer_expires, jiffies)) { 123 + events_in_progress--; 124 + events_timer_expires = 0; 125 + } 126 + spin_unlock_irqrestore(&events_lock, flags); 127 + } 128 + 129 + /** 130 + * pm_wakeup_event - Notify the PM core of a wakeup event. 131 + * @dev: Device the wakeup event is related to. 132 + * @msec: Anticipated event processing time (in milliseconds). 133 + * 134 + * Notify the PM core of a wakeup event (signaled by @dev) that will take 135 + * approximately @msec milliseconds to be processed by the kernel. Increment 136 + * the counter of registered wakeup events and (if @msec is nonzero) set up 137 + * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the 138 + * timer has not been set up already, increment the counter of wakeup events 139 + * being processed). If @dev is not NULL, the counter of wakeup events related 140 + * to @dev is incremented too. 141 + * 142 + * It is safe to call this function from interrupt context. 143 + */ 144 + void pm_wakeup_event(struct device *dev, unsigned int msec) 145 + { 146 + unsigned long flags; 147 + 148 + spin_lock_irqsave(&events_lock, flags); 149 + event_count++; 150 + if (dev) 151 + dev->power.wakeup_count++; 152 + 153 + if (msec) { 154 + unsigned long expires; 155 + 156 + expires = jiffies + msecs_to_jiffies(msec); 157 + if (!expires) 158 + expires = 1; 159 + 160 + if (!events_timer_expires 161 + || time_after(expires, events_timer_expires)) { 162 + if (!events_timer_expires) 163 + events_in_progress++; 164 + 165 + mod_timer(&events_timer, expires); 166 + events_timer_expires = expires; 167 + } 168 + } 169 + spin_unlock_irqrestore(&events_lock, flags); 170 + } 171 + 172 + /** 173 + * pm_check_wakeup_events - Check for new wakeup events. 174 + * 175 + * Compare the current number of registered wakeup events with its preserved 176 + * value from the past to check if new wakeup events have been registered since 177 + * the old value was stored. Check if the current number of wakeup events being 178 + * processed is zero. 179 + */ 180 + bool pm_check_wakeup_events(void) 181 + { 182 + unsigned long flags; 183 + bool ret = true; 184 + 185 + spin_lock_irqsave(&events_lock, flags); 186 + if (events_check_enabled) { 187 + ret = (event_count == saved_event_count) && !events_in_progress; 188 + events_check_enabled = ret; 189 + } 190 + spin_unlock_irqrestore(&events_lock, flags); 191 + return ret; 192 + } 193 + 194 + /** 195 + * pm_get_wakeup_count - Read the number of registered wakeup events. 196 + * @count: Address to store the value at. 197 + * 198 + * Store the number of registered wakeup events at the address in @count. Block 199 + * if the current number of wakeup events being processed is nonzero. 200 + * 201 + * Return false if the wait for the number of wakeup events being processed to 202 + * drop down to zero has been interrupted by a signal (and the current number 203 + * of wakeup events being processed is still nonzero). Otherwise return true. 204 + */ 205 + bool pm_get_wakeup_count(unsigned long *count) 206 + { 207 + bool ret; 208 + 209 + spin_lock_irq(&events_lock); 210 + if (capable(CAP_SYS_ADMIN)) 211 + events_check_enabled = false; 212 + 213 + while (events_in_progress && !signal_pending(current)) { 214 + spin_unlock_irq(&events_lock); 215 + 216 + schedule_timeout_interruptible(msecs_to_jiffies(100)); 217 + 218 + spin_lock_irq(&events_lock); 219 + } 220 + *count = event_count; 221 + ret = !events_in_progress; 222 + spin_unlock_irq(&events_lock); 223 + return ret; 224 + } 225 + 226 + /** 227 + * pm_save_wakeup_count - Save the current number of registered wakeup events. 228 + * @count: Value to compare with the current number of registered wakeup events. 229 + * 230 + * If @count is equal to the current number of registered wakeup events and the 231 + * current number of wakeup events being processed is zero, store @count as the 232 + * old number of registered wakeup events to be used by pm_check_wakeup_events() 233 + * and return true. Otherwise return false. 234 + */ 235 + bool pm_save_wakeup_count(unsigned long count) 236 + { 237 + bool ret = false; 238 + 239 + spin_lock_irq(&events_lock); 240 + if (count == event_count && !events_in_progress) { 241 + saved_event_count = count; 242 + events_check_enabled = true; 243 + ret = true; 244 + } 245 + spin_unlock_irq(&events_lock); 246 + return ret; 247 + }
+7 -10
drivers/net/e1000e/netdev.c
··· 2901 * dropped transactions. 2902 */ 2903 pm_qos_update_request( 2904 - adapter->netdev->pm_qos_req, 55); 2905 } else { 2906 pm_qos_update_request( 2907 - adapter->netdev->pm_qos_req, 2908 PM_QOS_DEFAULT_VALUE); 2909 } 2910 } ··· 3196 3197 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3198 if (adapter->flags & FLAG_HAS_ERT) 3199 - adapter->netdev->pm_qos_req = 3200 - pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 3201 - PM_QOS_DEFAULT_VALUE); 3202 3203 /* hardware has been reset, we need to reload some things */ 3204 e1000_configure(adapter); ··· 3263 e1000_clean_tx_ring(adapter); 3264 e1000_clean_rx_ring(adapter); 3265 3266 - if (adapter->flags & FLAG_HAS_ERT) { 3267 - pm_qos_remove_request( 3268 - adapter->netdev->pm_qos_req); 3269 - adapter->netdev->pm_qos_req = NULL; 3270 - } 3271 3272 /* 3273 * TODO: for power management, we could drop the link and
··· 2901 * dropped transactions. 2902 */ 2903 pm_qos_update_request( 2904 + &adapter->netdev->pm_qos_req, 55); 2905 } else { 2906 pm_qos_update_request( 2907 + &adapter->netdev->pm_qos_req, 2908 PM_QOS_DEFAULT_VALUE); 2909 } 2910 } ··· 3196 3197 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3198 if (adapter->flags & FLAG_HAS_ERT) 3199 + pm_qos_add_request(&adapter->netdev->pm_qos_req, 3200 + PM_QOS_CPU_DMA_LATENCY, 3201 + PM_QOS_DEFAULT_VALUE); 3202 3203 /* hardware has been reset, we need to reload some things */ 3204 e1000_configure(adapter); ··· 3263 e1000_clean_tx_ring(adapter); 3264 e1000_clean_rx_ring(adapter); 3265 3266 + if (adapter->flags & FLAG_HAS_ERT) 3267 + pm_qos_remove_request(&adapter->netdev->pm_qos_req); 3268 3269 /* 3270 * TODO: for power management, we could drop the link and
+4 -5
drivers/net/igbvf/netdev.c
··· 48 #define DRV_VERSION "1.0.0-k0" 49 char igbvf_driver_name[] = "igbvf"; 50 const char igbvf_driver_version[] = DRV_VERSION; 51 - struct pm_qos_request_list *igbvf_driver_pm_qos_req; 52 static const char igbvf_driver_string[] = 53 "Intel(R) Virtual Function Network Driver"; 54 static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; ··· 2902 printk(KERN_INFO "%s\n", igbvf_copyright); 2903 2904 ret = pci_register_driver(&igbvf_driver); 2905 - igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 2906 - PM_QOS_DEFAULT_VALUE); 2907 2908 return ret; 2909 } ··· 2918 static void __exit igbvf_exit_module(void) 2919 { 2920 pci_unregister_driver(&igbvf_driver); 2921 - pm_qos_remove_request(igbvf_driver_pm_qos_req); 2922 - igbvf_driver_pm_qos_req = NULL; 2923 } 2924 module_exit(igbvf_exit_module); 2925
··· 48 #define DRV_VERSION "1.0.0-k0" 49 char igbvf_driver_name[] = "igbvf"; 50 const char igbvf_driver_version[] = DRV_VERSION; 51 + static struct pm_qos_request_list igbvf_driver_pm_qos_req; 52 static const char igbvf_driver_string[] = 53 "Intel(R) Virtual Function Network Driver"; 54 static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; ··· 2902 printk(KERN_INFO "%s\n", igbvf_copyright); 2903 2904 ret = pci_register_driver(&igbvf_driver); 2905 + pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 2906 + PM_QOS_DEFAULT_VALUE); 2907 2908 return ret; 2909 } ··· 2918 static void __exit igbvf_exit_module(void) 2919 { 2920 pci_unregister_driver(&igbvf_driver); 2921 + pm_qos_remove_request(&igbvf_driver_pm_qos_req); 2922 } 2923 module_exit(igbvf_exit_module); 2924
+6 -6
drivers/net/wireless/ipw2x00/ipw2100.c
··· 174 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 175 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 176 177 - struct pm_qos_request_list *ipw2100_pm_qos_req; 178 179 /* Debugging stuff */ 180 #ifdef CONFIG_IPW2100_DEBUG ··· 1741 /* the ipw2100 hardware really doesn't want power management delays 1742 * longer than 175usec 1743 */ 1744 - pm_qos_update_request(ipw2100_pm_qos_req, 175); 1745 1746 /* If the interrupt is enabled, turn it off... */ 1747 spin_lock_irqsave(&priv->low_lock, flags); ··· 1889 ipw2100_disable_interrupts(priv); 1890 spin_unlock_irqrestore(&priv->low_lock, flags); 1891 1892 - pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1893 1894 /* We have to signal any supplicant if we are disassociating */ 1895 if (associated) ··· 6669 if (ret) 6670 goto out; 6671 6672 - ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 6673 - PM_QOS_DEFAULT_VALUE); 6674 #ifdef CONFIG_IPW2100_DEBUG 6675 ipw2100_debug_level = debug; 6676 ret = driver_create_file(&ipw2100_pci_driver.driver, ··· 6692 &driver_attr_debug_level); 6693 #endif 6694 pci_unregister_driver(&ipw2100_pci_driver); 6695 - pm_qos_remove_request(ipw2100_pm_qos_req); 6696 } 6697 6698 module_init(ipw2100_init);
··· 174 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 175 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 176 177 + struct pm_qos_request_list ipw2100_pm_qos_req; 178 179 /* Debugging stuff */ 180 #ifdef CONFIG_IPW2100_DEBUG ··· 1741 /* the ipw2100 hardware really doesn't want power management delays 1742 * longer than 175usec 1743 */ 1744 + pm_qos_update_request(&ipw2100_pm_qos_req, 175); 1745 1746 /* If the interrupt is enabled, turn it off... */ 1747 spin_lock_irqsave(&priv->low_lock, flags); ··· 1889 ipw2100_disable_interrupts(priv); 1890 spin_unlock_irqrestore(&priv->low_lock, flags); 1891 1892 + pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1893 1894 /* We have to signal any supplicant if we are disassociating */ 1895 if (associated) ··· 6669 if (ret) 6670 goto out; 6671 6672 + pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 6673 + PM_QOS_DEFAULT_VALUE); 6674 #ifdef CONFIG_IPW2100_DEBUG 6675 ipw2100_debug_level = debug; 6676 ret = driver_create_file(&ipw2100_pci_driver.driver, ··· 6692 &driver_attr_debug_level); 6693 #endif 6694 pci_unregister_driver(&ipw2100_pci_driver); 6695 + pm_qos_remove_request(&ipw2100_pm_qos_req); 6696 } 6697 6698 module_init(ipw2100_init);
+1
drivers/pci/pci-acpi.c
··· 48 if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { 49 pci_check_pme_status(pci_dev); 50 pm_runtime_resume(&pci_dev->dev); 51 if (pci_dev->subordinate) 52 pci_pme_wakeup_bus(pci_dev->subordinate); 53 }
··· 48 if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) { 49 pci_check_pme_status(pci_dev); 50 pm_runtime_resume(&pci_dev->dev); 51 + pci_wakeup_event(pci_dev); 52 if (pci_dev->subordinate) 53 pci_pme_wakeup_bus(pci_dev->subordinate); 54 }
+19 -1
drivers/pci/pci.c
··· 1275 return ret; 1276 } 1277 1278 /** 1279 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1280 * @dev: Device to handle. ··· 1301 */ 1302 static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1303 { 1304 - if (pci_check_pme_status(dev)) 1305 pm_request_resume(&dev->dev); 1306 return 0; 1307 } 1308
··· 1275 return ret; 1276 } 1277 1278 + /* 1279 + * Time to wait before the system can be put into a sleep state after reporting 1280 + * a wakeup event signaled by a PCI device. 1281 + */ 1282 + #define PCI_WAKEUP_COOLDOWN 100 1283 + 1284 + /** 1285 + * pci_wakeup_event - Report a wakeup event related to a given PCI device. 1286 + * @dev: Device to report the wakeup event for. 1287 + */ 1288 + void pci_wakeup_event(struct pci_dev *dev) 1289 + { 1290 + if (device_may_wakeup(&dev->dev)) 1291 + pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN); 1292 + } 1293 + 1294 /** 1295 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1296 * @dev: Device to handle. ··· 1285 */ 1286 static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1287 { 1288 + if (pci_check_pme_status(dev)) { 1289 pm_request_resume(&dev->dev); 1290 + pci_wakeup_event(dev); 1291 + } 1292 return 0; 1293 } 1294
+1
drivers/pci/pci.h
··· 56 extern void pci_disable_enabled_device(struct pci_dev *dev); 57 extern bool pci_check_pme_status(struct pci_dev *dev); 58 extern int pci_finish_runtime_suspend(struct pci_dev *dev); 59 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 60 extern void pci_pme_wakeup_bus(struct pci_bus *bus); 61 extern void pci_pm_init(struct pci_dev *dev);
··· 56 extern void pci_disable_enabled_device(struct pci_dev *dev); 57 extern bool pci_check_pme_status(struct pci_dev *dev); 58 extern int pci_finish_runtime_suspend(struct pci_dev *dev); 59 + extern void pci_wakeup_event(struct pci_dev *dev); 60 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign); 61 extern void pci_pme_wakeup_bus(struct pci_bus *bus); 62 extern void pci_pm_init(struct pci_dev *dev);
+4 -1
drivers/pci/pcie/pme/pcie_pme.c
··· 154 /* Skip PCIe devices in case we started from a root port. */ 155 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { 156 pm_request_resume(&dev->dev); 157 ret = true; 158 } 159 ··· 255 if (found) { 256 /* The device is there, but we have to check its PME status. */ 257 found = pci_check_pme_status(dev); 258 - if (found) 259 pm_request_resume(&dev->dev); 260 pci_dev_put(dev); 261 } else if (devfn) { 262 /*
··· 154 /* Skip PCIe devices in case we started from a root port. */ 155 if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) { 156 pm_request_resume(&dev->dev); 157 + pci_wakeup_event(dev); 158 ret = true; 159 } 160 ··· 254 if (found) { 255 /* The device is there, but we have to check its PME status. */ 256 found = pci_check_pme_status(dev); 257 + if (found) { 258 pm_request_resume(&dev->dev); 259 + pci_wakeup_event(dev); 260 + } 261 pci_dev_put(dev); 262 } else if (devfn) { 263 /*
+3
drivers/pnp/core.c
··· 164 list_add_tail(&dev->global_list, &pnp_global); 165 list_add_tail(&dev->protocol_list, &dev->protocol->devices); 166 spin_unlock(&pnp_lock); 167 return device_register(&dev->dev); 168 } 169
··· 164 list_add_tail(&dev->global_list, &pnp_global); 165 list_add_tail(&dev->protocol_list, &dev->protocol->devices); 166 spin_unlock(&pnp_lock); 167 + if (dev->protocol->can_wakeup) 168 + device_set_wakeup_capable(&dev->dev, 169 + dev->protocol->can_wakeup(dev)); 170 return device_register(&dev->dev); 171 } 172
+23
drivers/pnp/pnpacpi/core.c
··· 122 } 123 124 #ifdef CONFIG_ACPI_SLEEP 125 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) 126 { 127 struct acpi_device *acpi_dev = dev->data; 128 acpi_handle handle = acpi_dev->handle; 129 int power_state; 130 131 power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); 132 if (power_state < 0) 133 power_state = (state.event == PM_EVENT_ON) ? 134 ACPI_STATE_D0 : ACPI_STATE_D3; 135 136 return acpi_bus_set_power(handle, power_state); 137 } 138 ··· 161 struct acpi_device *acpi_dev = dev->data; 162 acpi_handle handle = acpi_dev->handle; 163 164 return acpi_bus_set_power(handle, ACPI_STATE_D0); 165 } 166 #endif ··· 173 .set = pnpacpi_set_resources, 174 .disable = pnpacpi_disable_resources, 175 #ifdef CONFIG_ACPI_SLEEP 176 .suspend = pnpacpi_suspend, 177 .resume = pnpacpi_resume, 178 #endif
··· 122 } 123 124 #ifdef CONFIG_ACPI_SLEEP 125 + static bool pnpacpi_can_wakeup(struct pnp_dev *dev) 126 + { 127 + struct acpi_device *acpi_dev = dev->data; 128 + acpi_handle handle = acpi_dev->handle; 129 + 130 + return acpi_bus_can_wakeup(handle); 131 + } 132 + 133 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) 134 { 135 struct acpi_device *acpi_dev = dev->data; 136 acpi_handle handle = acpi_dev->handle; 137 int power_state; 138 139 + if (device_can_wakeup(&dev->dev)) { 140 + int rc = acpi_pm_device_sleep_wake(&dev->dev, 141 + device_may_wakeup(&dev->dev)); 142 + 143 + if (rc) 144 + return rc; 145 + } 146 power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); 147 if (power_state < 0) 148 power_state = (state.event == PM_EVENT_ON) ? 149 ACPI_STATE_D0 : ACPI_STATE_D3; 150 151 + /* acpi_bus_set_power() often fails (keyboard port can't be 152 + * powered-down?), and in any case, our return value is ignored 153 + * by pnp_bus_suspend(). Hence we don't revert the wakeup 154 + * setting if the set_power fails. 155 + */ 156 return acpi_bus_set_power(handle, power_state); 157 } 158 ··· 141 struct acpi_device *acpi_dev = dev->data; 142 acpi_handle handle = acpi_dev->handle; 143 144 + if (device_may_wakeup(&dev->dev)) 145 + acpi_pm_device_sleep_wake(&dev->dev, false); 146 return acpi_bus_set_power(handle, ACPI_STATE_D0); 147 } 148 #endif ··· 151 .set = pnpacpi_set_resources, 152 .disable = pnpacpi_disable_resources, 153 #ifdef CONFIG_ACPI_SLEEP 154 + .can_wakeup = pnpacpi_can_wakeup, 155 .suspend = pnpacpi_suspend, 156 .resume = pnpacpi_resume, 157 #endif
+1 -1
include/linux/netdevice.h
··· 779 */ 780 char name[IFNAMSIZ]; 781 782 - struct pm_qos_request_list *pm_qos_req; 783 784 /* device name hash chain */ 785 struct hlist_node name_hlist;
··· 779 */ 780 char name[IFNAMSIZ]; 781 782 + struct pm_qos_request_list pm_qos_req; 783 784 /* device name hash chain */ 785 struct hlist_node name_hlist;
+29
include/linux/plist.h
··· 260 #endif 261 262 /** 263 * plist_first - return the first node (and thus, highest priority) 264 * @head: the &struct plist_head pointer 265 * ··· 285 static inline struct plist_node *plist_first(const struct plist_head *head) 286 { 287 return list_entry(head->node_list.next, 288 struct plist_node, plist.node_list); 289 } 290
··· 260 #endif 261 262 /** 263 + * plist_last_entry - get the struct for the last entry 264 + * @head: the &struct plist_head pointer 265 + * @type: the type of the struct this is embedded in 266 + * @member: the name of the list_struct within the struct 267 + */ 268 + #ifdef CONFIG_DEBUG_PI_LIST 269 + # define plist_last_entry(head, type, member) \ 270 + ({ \ 271 + WARN_ON(plist_head_empty(head)); \ 272 + container_of(plist_last(head), type, member); \ 273 + }) 274 + #else 275 + # define plist_last_entry(head, type, member) \ 276 + container_of(plist_last(head), type, member) 277 + #endif 278 + 279 + /** 280 * plist_first - return the first node (and thus, highest priority) 281 * @head: the &struct plist_head pointer 282 * ··· 268 static inline struct plist_node *plist_first(const struct plist_head *head) 269 { 270 return list_entry(head->node_list.next, 271 + struct plist_node, plist.node_list); 272 + } 273 + 274 + /** 275 + * plist_last - return the last node (and thus, lowest priority) 276 + * @head: the &struct plist_head pointer 277 + * 278 + * Assumes the plist is _not_ empty. 279 + */ 280 + static inline struct plist_node *plist_last(const struct plist_head *head) 281 + { 282 + return list_entry(head->node_list.prev, 283 struct plist_node, plist.node_list); 284 } 285
+16
include/linux/pm.h
··· 457 #ifdef CONFIG_PM_SLEEP 458 struct list_head entry; 459 struct completion completion; 460 #endif 461 #ifdef CONFIG_PM_RUNTIME 462 struct timer_list suspend_timer; ··· 477 enum rpm_request request; 478 enum rpm_status runtime_status; 479 int runtime_error; 480 #endif 481 }; 482 483 /* 484 * The PM_EVENT_ messages are also used by drivers implementing the legacy ··· 559 } while (0) 560 561 extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); 562 #else /* !CONFIG_PM_SLEEP */ 563 564 #define device_pm_lock() do {} while (0) ··· 577 #define suspend_report_result(fn, ret) do {} while (0) 578 579 static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} 580 #endif /* !CONFIG_PM_SLEEP */ 581 582 /* How to reorder dpm_list after device_move() */
··· 457 #ifdef CONFIG_PM_SLEEP 458 struct list_head entry; 459 struct completion completion; 460 + unsigned long wakeup_count; 461 #endif 462 #ifdef CONFIG_PM_RUNTIME 463 struct timer_list suspend_timer; ··· 476 enum rpm_request request; 477 enum rpm_status runtime_status; 478 int runtime_error; 479 + unsigned long active_jiffies; 480 + unsigned long suspended_jiffies; 481 + unsigned long accounting_timestamp; 482 #endif 483 }; 484 + 485 + extern void update_pm_runtime_accounting(struct device *dev); 486 + 487 488 /* 489 * The PM_EVENT_ messages are also used by drivers implementing the legacy ··· 552 } while (0) 553 554 extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); 555 + 556 + /* drivers/base/power/wakeup.c */ 557 + extern void pm_wakeup_event(struct device *dev, unsigned int msec); 558 + extern void pm_stay_awake(struct device *dev); 559 + extern void pm_relax(void); 560 #else /* !CONFIG_PM_SLEEP */ 561 562 #define device_pm_lock() do {} while (0) ··· 565 #define suspend_report_result(fn, ret) do {} while (0) 566 567 static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} 568 + 569 + static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} 570 + static inline void pm_stay_awake(struct device *dev) {} 571 + static inline void pm_relax(void) {} 572 #endif /* !CONFIG_PM_SLEEP */ 573 574 /* How to reorder dpm_list after device_move() */
+10 -3
include/linux/pm_qos_params.h
··· 1 /* interface for the pm_qos_power infrastructure of the linux kernel. 2 * 3 * Mark Gross <mgross@linux.intel.com> 4 */ 5 - #include <linux/list.h> 6 #include <linux/notifier.h> 7 #include <linux/miscdevice.h> 8 ··· 16 #define PM_QOS_NUM_CLASSES 4 17 #define PM_QOS_DEFAULT_VALUE -1 18 19 - struct pm_qos_request_list; 20 21 - struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value); 22 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 23 s32 new_value); 24 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); ··· 29 int pm_qos_request(int pm_qos_class); 30 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); 31 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); 32
··· 1 + #ifndef _LINUX_PM_QOS_PARAMS_H 2 + #define _LINUX_PM_QOS_PARAMS_H 3 /* interface for the pm_qos_power infrastructure of the linux kernel. 4 * 5 * Mark Gross <mgross@linux.intel.com> 6 */ 7 + #include <linux/plist.h> 8 #include <linux/notifier.h> 9 #include <linux/miscdevice.h> 10 ··· 14 #define PM_QOS_NUM_CLASSES 4 15 #define PM_QOS_DEFAULT_VALUE -1 16 17 + struct pm_qos_request_list { 18 + struct plist_node list; 19 + int pm_qos_class; 20 + }; 21 22 + void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); 23 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 24 s32 new_value); 25 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); ··· 24 int pm_qos_request(int pm_qos_class); 25 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); 26 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); 27 + int pm_qos_request_active(struct pm_qos_request_list *req); 28 29 + #endif
+7 -3
include/linux/pm_wakeup.h
··· 29 30 #ifdef CONFIG_PM 31 32 - /* changes to device_may_wakeup take effect on the next pm state change. 33 - * by default, devices should wakeup if they can. 34 */ 35 static inline void device_init_wakeup(struct device *dev, bool val) 36 { ··· 62 63 #else /* !CONFIG_PM */ 64 65 - /* For some reason the next two routines work even without CONFIG_PM */ 66 static inline void device_init_wakeup(struct device *dev, bool val) 67 { 68 dev->power.can_wakeup = val; ··· 70 71 static inline void device_set_wakeup_capable(struct device *dev, bool capable) 72 { 73 } 74 75 static inline bool device_can_wakeup(struct device *dev)
··· 29 30 #ifdef CONFIG_PM 31 32 + /* Changes to device_may_wakeup take effect on the next pm state change. 33 + * 34 + * By default, most devices should leave wakeup disabled. The exceptions 35 + * are devices that everyone expects to be wakeup sources: keyboards, 36 + * power buttons, possibly network interfaces, etc. 37 */ 38 static inline void device_init_wakeup(struct device *dev, bool val) 39 { ··· 59 60 #else /* !CONFIG_PM */ 61 62 + /* For some reason the following routines work even without CONFIG_PM */ 63 static inline void device_init_wakeup(struct device *dev, bool val) 64 { 65 dev->power.can_wakeup = val; ··· 67 68 static inline void device_set_wakeup_capable(struct device *dev, bool capable) 69 { 70 + dev->power.can_wakeup = capable; 71 } 72 73 static inline bool device_can_wakeup(struct device *dev)
+1
include/linux/pnp.h
··· 414 int (*disable) (struct pnp_dev *dev); 415 416 /* protocol specific suspend/resume */ 417 int (*suspend) (struct pnp_dev * dev, pm_message_t state); 418 int (*resume) (struct pnp_dev * dev); 419
··· 414 int (*disable) (struct pnp_dev *dev); 415 416 /* protocol specific suspend/resume */ 417 + bool (*can_wakeup) (struct pnp_dev *dev); 418 int (*suspend) (struct pnp_dev * dev, pm_message_t state); 419 int (*resume) (struct pnp_dev * dev); 420
+13 -4
include/linux/suspend.h
··· 61 * before device drivers' late suspend callbacks are executed. It returns 62 * 0 on success or a negative error code otherwise, in which case the 63 * system cannot enter the desired sleep state (@prepare_late(), @enter(), 64 - * @wake(), and @finish() will not be called in that case). 65 * 66 * @prepare_late: Finish preparing the platform for entering the system sleep 67 * state indicated by @begin(). 68 * @prepare_late is called before disabling nonboot CPUs and after 69 * device drivers' late suspend callbacks have been executed. It returns 70 * 0 on success or a negative error code otherwise, in which case the 71 - * system cannot enter the desired sleep state (@enter() and @wake()). 72 * 73 * @enter: Enter the system sleep state indicated by @begin() or represented by 74 * the argument if @begin() is not implemented. ··· 82 * resume callbacks are executed. 83 * This callback is optional, but should be implemented by the platforms 84 * that implement @prepare_late(). If implemented, it is always called 85 - * after @enter(), even if @enter() fails. 86 * 87 * @finish: Finish wake-up of the platform. 88 * @finish is called right prior to calling device drivers' regular suspend 89 * callbacks. 90 * This callback is optional, but should be implemented by the platforms 91 * that implement @prepare(). If implemented, it is always called after 92 - * @enter() and @wake(), if implemented, even if any of them fails. 93 * 94 * @end: Called by the PM core right after resuming devices, to indicate to 95 * the platform that the system has returned to the working state or ··· 288 { .notifier_call = fn, .priority = pri }; \ 289 register_pm_notifier(&fn##_nb); \ 290 } 291 #else /* !CONFIG_PM_SLEEP */ 292 293 static inline int register_pm_notifier(struct notifier_block *nb)
··· 61 * before device drivers' late suspend callbacks are executed. It returns 62 * 0 on success or a negative error code otherwise, in which case the 63 * system cannot enter the desired sleep state (@prepare_late(), @enter(), 64 + * and @wake() will not be called in that case). 65 * 66 * @prepare_late: Finish preparing the platform for entering the system sleep 67 * state indicated by @begin(). 68 * @prepare_late is called before disabling nonboot CPUs and after 69 * device drivers' late suspend callbacks have been executed. It returns 70 * 0 on success or a negative error code otherwise, in which case the 71 + * system cannot enter the desired sleep state (@enter() will not be 72 + * executed). 73 * 74 * @enter: Enter the system sleep state indicated by @begin() or represented by 75 * the argument if @begin() is not implemented. ··· 81 * resume callbacks are executed. 82 * This callback is optional, but should be implemented by the platforms 83 * that implement @prepare_late(). If implemented, it is always called 84 + * after @prepare_late and @enter(), even if one of them fails. 85 * 86 * @finish: Finish wake-up of the platform. 87 * @finish is called right prior to calling device drivers' regular suspend 88 * callbacks. 89 * This callback is optional, but should be implemented by the platforms 90 * that implement @prepare(). If implemented, it is always called after 91 + * @enter() and @wake(), even if any of them fails. It is executed after 92 + * a failing @prepare. 93 * 94 * @end: Called by the PM core right after resuming devices, to indicate to 95 * the platform that the system has returned to the working state or ··· 286 { .notifier_call = fn, .priority = pri }; \ 287 register_pm_notifier(&fn##_nb); \ 288 } 289 + 290 + /* drivers/base/power/wakeup.c */ 291 + extern bool events_check_enabled; 292 + 293 + extern bool pm_check_wakeup_events(void); 294 + extern bool pm_get_wakeup_count(unsigned long *count); 295 + extern bool pm_save_wakeup_count(unsigned long count); 296 #else /* !CONFIG_PM_SLEEP */ 297 298 static inline int register_pm_notifier(struct notifier_block *nb)
+1 -1
include/sound/pcm.h
··· 366 int number; 367 char name[32]; /* substream name */ 368 int stream; /* stream (direction) */ 369 - struct pm_qos_request_list *latency_pm_qos_req; /* pm_qos request */ 370 size_t buffer_bytes_max; /* limit ring buffer size */ 371 struct snd_dma_buffer dma_buffer; 372 unsigned int dma_buf_id;
··· 366 int number; 367 char name[32]; /* substream name */ 368 int stream; /* stream (direction) */ 369 + struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */ 370 size_t buffer_bytes_max; /* limit ring buffer size */ 371 struct snd_dma_buffer dma_buffer; 372 unsigned int dma_buf_id;
+114 -101
kernel/pm_qos_params.c
··· 48 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock 49 * held, taken with _irqsave. One lock to rule them all 50 */ 51 - struct pm_qos_request_list { 52 - struct list_head list; 53 - union { 54 - s32 value; 55 - s32 usec; 56 - s32 kbps; 57 - }; 58 - int pm_qos_class; 59 }; 60 61 - static s32 max_compare(s32 v1, s32 v2); 62 - static s32 min_compare(s32 v1, s32 v2); 63 - 64 struct pm_qos_object { 65 - struct pm_qos_request_list requests; 66 struct blocking_notifier_head *notifiers; 67 struct miscdevice pm_qos_power_miscdev; 68 char *name; 69 s32 default_value; 70 - atomic_t target_value; 71 - s32 (*comparitor)(s32, s32); 72 }; 73 74 static struct pm_qos_object null_pm_qos; 75 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); 76 static struct pm_qos_object cpu_dma_pm_qos = { 77 - .requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)}, 78 .notifiers = &cpu_dma_lat_notifier, 79 .name = "cpu_dma_latency", 80 .default_value = 2000 * USEC_PER_SEC, 81 - .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), 82 - .comparitor = min_compare 83 }; 84 85 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 86 static struct pm_qos_object network_lat_pm_qos = { 87 - .requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)}, 88 .notifiers = &network_lat_notifier, 89 .name = "network_latency", 90 .default_value = 2000 * USEC_PER_SEC, 91 - .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), 92 - .comparitor = min_compare 93 }; 94 95 96 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); 97 static struct pm_qos_object network_throughput_pm_qos = { 98 - .requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)}, 99 .notifiers = &network_throughput_notifier, 100 .name = "network_throughput", 101 .default_value = 0, 102 - .target_value = ATOMIC_INIT(0), 103 - .comparitor = max_compare 104 }; 105 106 ··· 100 &network_lat_pm_qos, 101 &network_throughput_pm_qos 102 }; 103 - 104 - static DEFINE_SPINLOCK(pm_qos_lock); 105 106 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, 107 size_t count, loff_t *f_pos); ··· 112 .release = pm_qos_power_release, 113 }; 114 115 - /* static helper functions */ 116 - static s32 max_compare(s32 v1, s32 v2) 117 { 118 - return max(v1, v2); 119 } 120 121 - static s32 min_compare(s32 v1, s32 v2) 122 { 123 - return min(v1, v2); 124 - } 125 - 126 - 127 - static void update_target(int pm_qos_class) 128 - { 129 - s32 extreme_value; 130 - struct pm_qos_request_list *node; 131 unsigned long flags; 132 - int call_notifier = 0; 133 134 spin_lock_irqsave(&pm_qos_lock, flags); 135 - extreme_value = pm_qos_array[pm_qos_class]->default_value; 136 - list_for_each_entry(node, 137 - &pm_qos_array[pm_qos_class]->requests.list, list) { 138 - extreme_value = pm_qos_array[pm_qos_class]->comparitor( 139 - extreme_value, node->value); 140 } 141 - if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) != 142 - extreme_value) { 143 - call_notifier = 1; 144 - atomic_set(&pm_qos_array[pm_qos_class]->target_value, 145 - extreme_value); 146 - pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class, 147 - atomic_read(&pm_qos_array[pm_qos_class]->target_value)); 148 - } 149 spin_unlock_irqrestore(&pm_qos_lock, flags); 150 151 - if (call_notifier) 152 - blocking_notifier_call_chain( 153 - pm_qos_array[pm_qos_class]->notifiers, 154 - (unsigned long) extreme_value, NULL); 155 } 156 157 static int register_pm_qos_misc(struct pm_qos_object *qos) ··· 193 */ 194 int pm_qos_request(int pm_qos_class) 195 { 196 - return atomic_read(&pm_qos_array[pm_qos_class]->target_value); 197 } 198 EXPORT_SYMBOL_GPL(pm_qos_request); 199 200 /** 201 * pm_qos_add_request - inserts new qos request into the list ··· 221 * element as a handle for use in updating and removal. Call needs to save 222 * this handle for later use. 223 */ 224 - struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) 225 { 226 - struct pm_qos_request_list *dep; 227 - unsigned long flags; 228 229 - dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); 230 - if (dep) { 231 - if (value == PM_QOS_DEFAULT_VALUE) 232 - dep->value = pm_qos_array[pm_qos_class]->default_value; 233 - else 234 - dep->value = value; 235 - dep->pm_qos_class = pm_qos_class; 236 - 237 - spin_lock_irqsave(&pm_qos_lock, flags); 238 - list_add(&dep->list, 239 - &pm_qos_array[pm_qos_class]->requests.list); 240 - spin_unlock_irqrestore(&pm_qos_lock, flags); 241 - update_target(pm_qos_class); 242 } 243 - 244 - return dep; 245 } 246 EXPORT_SYMBOL_GPL(pm_qos_add_request); 247 ··· 252 * Attempts are made to make this code callable on hot code paths. 253 */ 254 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 255 - s32 new_value) 256 { 257 - unsigned long flags; 258 - int pending_update = 0; 259 s32 temp; 260 261 - if (pm_qos_req) { /*guard against callers passing in null */ 262 - spin_lock_irqsave(&pm_qos_lock, flags); 263 - if (new_value == PM_QOS_DEFAULT_VALUE) 264 - temp = pm_qos_array[pm_qos_req->pm_qos_class]->default_value; 265 - else 266 - temp = new_value; 267 268 - if (temp != pm_qos_req->value) { 269 - pending_update = 1; 270 - pm_qos_req->value = temp; 271 - } 272 - spin_unlock_irqrestore(&pm_qos_lock, flags); 273 - if (pending_update) 274 - update_target(pm_qos_req->pm_qos_class); 275 } 276 } 277 EXPORT_SYMBOL_GPL(pm_qos_update_request); 278 ··· 287 */ 288 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) 289 { 290 - unsigned long flags; 291 - int qos_class; 292 293 if (pm_qos_req == NULL) 294 return; 295 /* silent return to keep pcm code cleaner */ 296 297 - qos_class = pm_qos_req->pm_qos_class; 298 - spin_lock_irqsave(&pm_qos_lock, flags); 299 - list_del(&pm_qos_req->list); 300 - kfree(pm_qos_req); 301 - spin_unlock_irqrestore(&pm_qos_lock, flags); 302 - update_target(qos_class); 303 } 304 EXPORT_SYMBOL_GPL(pm_qos_remove_request); 305 ··· 348 349 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 350 if (pm_qos_class >= 0) { 351 - filp->private_data = (void *) pm_qos_add_request(pm_qos_class, 352 - PM_QOS_DEFAULT_VALUE); 353 354 if (filp->private_data) 355 return 0; ··· 365 { 366 struct pm_qos_request_list *req; 367 368 - req = (struct pm_qos_request_list *)filp->private_data; 369 pm_qos_remove_request(req); 370 371 return 0; 372 }
··· 48 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock 49 * held, taken with _irqsave. One lock to rule them all 50 */ 51 + enum pm_qos_type { 52 + PM_QOS_MAX, /* return the largest value */ 53 + PM_QOS_MIN /* return the smallest value */ 54 }; 55 56 struct pm_qos_object { 57 + struct plist_head requests; 58 struct blocking_notifier_head *notifiers; 59 struct miscdevice pm_qos_power_miscdev; 60 char *name; 61 s32 default_value; 62 + enum pm_qos_type type; 63 }; 64 + 65 + static DEFINE_SPINLOCK(pm_qos_lock); 66 67 static struct pm_qos_object null_pm_qos; 68 static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); 69 static struct pm_qos_object cpu_dma_pm_qos = { 70 + .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), 71 .notifiers = &cpu_dma_lat_notifier, 72 .name = "cpu_dma_latency", 73 .default_value = 2000 * USEC_PER_SEC, 74 + .type = PM_QOS_MIN, 75 }; 76 77 static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); 78 static struct pm_qos_object network_lat_pm_qos = { 79 + .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), 80 .notifiers = &network_lat_notifier, 81 .name = "network_latency", 82 .default_value = 2000 * USEC_PER_SEC, 83 + .type = PM_QOS_MIN 84 }; 85 86 87 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); 88 static struct pm_qos_object network_throughput_pm_qos = { 89 + .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), 90 .notifiers = &network_throughput_notifier, 91 .name = "network_throughput", 92 .default_value = 0, 93 + .type = PM_QOS_MAX, 94 }; 95 96 ··· 110 &network_lat_pm_qos, 111 &network_throughput_pm_qos 112 }; 113 114 static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, 115 size_t count, loff_t *f_pos); ··· 124 .release = pm_qos_power_release, 125 }; 126 127 + /* unlocked internal variant */ 128 + static inline int pm_qos_get_value(struct pm_qos_object *o) 129 { 130 + if (plist_head_empty(&o->requests)) 131 + return o->default_value; 132 + 133 + switch (o->type) { 134 + case PM_QOS_MIN: 135 + return plist_last(&o->requests)->prio; 136 + 137 + case PM_QOS_MAX: 138 + return plist_first(&o->requests)->prio; 139 + 140 + default: 141 + /* runtime check for not using enum */ 142 + BUG(); 143 + } 144 } 145 146 + static void update_target(struct pm_qos_object *o, struct plist_node *node, 147 + int del, int value) 148 { 149 unsigned long flags; 150 + int prev_value, curr_value; 151 152 spin_lock_irqsave(&pm_qos_lock, flags); 153 + prev_value = pm_qos_get_value(o); 154 + /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */ 155 + if (value != PM_QOS_DEFAULT_VALUE) { 156 + /* 157 + * to change the list, we atomically remove, reinit 158 + * with new value and add, then see if the extremal 159 + * changed 160 + */ 161 + plist_del(node, &o->requests); 162 + plist_node_init(node, value); 163 + plist_add(node, &o->requests); 164 + } else if (del) { 165 + plist_del(node, &o->requests); 166 + } else { 167 + plist_add(node, &o->requests); 168 } 169 + curr_value = pm_qos_get_value(o); 170 spin_unlock_irqrestore(&pm_qos_lock, flags); 171 172 + if (prev_value != curr_value) 173 + blocking_notifier_call_chain(o->notifiers, 174 + (unsigned long)curr_value, 175 + NULL); 176 } 177 178 static int register_pm_qos_misc(struct pm_qos_object *qos) ··· 196 */ 197 int pm_qos_request(int pm_qos_class) 198 { 199 + unsigned long flags; 200 + int value; 201 + 202 + spin_lock_irqsave(&pm_qos_lock, flags); 203 + value = pm_qos_get_value(pm_qos_array[pm_qos_class]); 204 + spin_unlock_irqrestore(&pm_qos_lock, flags); 205 + 206 + return value; 207 } 208 EXPORT_SYMBOL_GPL(pm_qos_request); 209 + 210 + int pm_qos_request_active(struct pm_qos_request_list *req) 211 + { 212 + return req->pm_qos_class != 0; 213 + } 214 + EXPORT_SYMBOL_GPL(pm_qos_request_active); 215 216 /** 217 * pm_qos_add_request - inserts new qos request into the list ··· 211 * element as a handle for use in updating and removal. Call needs to save 212 * this handle for later use. 213 */ 214 + void pm_qos_add_request(struct pm_qos_request_list *dep, 215 + int pm_qos_class, s32 value) 216 { 217 + struct pm_qos_object *o = pm_qos_array[pm_qos_class]; 218 + int new_value; 219 220 + if (pm_qos_request_active(dep)) { 221 + WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); 222 + return; 223 } 224 + if (value == PM_QOS_DEFAULT_VALUE) 225 + new_value = o->default_value; 226 + else 227 + new_value = value; 228 + plist_node_init(&dep->list, new_value); 229 + dep->pm_qos_class = pm_qos_class; 230 + update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); 231 } 232 EXPORT_SYMBOL_GPL(pm_qos_add_request); 233 ··· 246 * Attempts are made to make this code callable on hot code paths. 247 */ 248 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 249 + s32 new_value) 250 { 251 s32 temp; 252 + struct pm_qos_object *o; 253 254 + if (!pm_qos_req) /*guard against callers passing in null */ 255 + return; 256 257 + if (!pm_qos_request_active(pm_qos_req)) { 258 + WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); 259 + return; 260 } 261 + 262 + o = pm_qos_array[pm_qos_req->pm_qos_class]; 263 + 264 + if (new_value == PM_QOS_DEFAULT_VALUE) 265 + temp = o->default_value; 266 + else 267 + temp = new_value; 268 + 269 + if (temp != pm_qos_req->list.prio) 270 + update_target(o, &pm_qos_req->list, 0, temp); 271 } 272 EXPORT_SYMBOL_GPL(pm_qos_update_request); 273 ··· 280 */ 281 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req) 282 { 283 + struct pm_qos_object *o; 284 285 if (pm_qos_req == NULL) 286 return; 287 /* silent return to keep pcm code cleaner */ 288 289 + if (!pm_qos_request_active(pm_qos_req)) { 290 + WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); 291 + return; 292 + } 293 + 294 + o = pm_qos_array[pm_qos_req->pm_qos_class]; 295 + update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); 296 + memset(pm_qos_req, 0, sizeof(*pm_qos_req)); 297 } 298 EXPORT_SYMBOL_GPL(pm_qos_remove_request); 299 ··· 340 341 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 342 if (pm_qos_class >= 0) { 343 + struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 344 + if (!req) 345 + return -ENOMEM; 346 + 347 + pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); 348 + filp->private_data = req; 349 350 if (filp->private_data) 351 return 0; ··· 353 { 354 struct pm_qos_request_list *req; 355 356 + req = filp->private_data; 357 pm_qos_remove_request(req); 358 + kfree(req); 359 360 return 0; 361 }
+16 -8
kernel/power/hibernate.c
··· 277 goto Enable_irqs; 278 } 279 280 - if (hibernation_test(TEST_CORE)) 281 goto Power_up; 282 283 in_suspend = 1; ··· 288 error); 289 /* Restore control flow magically appears here */ 290 restore_processor_state(); 291 - if (!in_suspend) 292 platform_leave(platform_mode); 293 294 Power_up: 295 sysdev_resume(); ··· 330 331 error = platform_begin(platform_mode); 332 if (error) 333 - return error; 334 335 /* Preallocate image memory before shutting down devices. */ 336 error = hibernate_preallocate_memory(); ··· 513 514 local_irq_disable(); 515 sysdev_suspend(PMSG_HIBERNATE); 516 hibernation_ops->enter(); 517 /* We should never get here */ 518 while (1); 519 520 - /* 521 - * We don't need to reenable the nonboot CPUs or resume consoles, since 522 - * the system is going to be halted anyway. 523 - */ 524 Platform_finish: 525 hibernation_ops->finish(); 526 527 - dpm_suspend_noirq(PMSG_RESTORE); 528 529 Resume_devices: 530 entering_platform_hibernation = false;
··· 277 goto Enable_irqs; 278 } 279 280 + if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) 281 goto Power_up; 282 283 in_suspend = 1; ··· 288 error); 289 /* Restore control flow magically appears here */ 290 restore_processor_state(); 291 + if (!in_suspend) { 292 + events_check_enabled = false; 293 platform_leave(platform_mode); 294 + } 295 296 Power_up: 297 sysdev_resume(); ··· 328 329 error = platform_begin(platform_mode); 330 if (error) 331 + goto Close; 332 333 /* Preallocate image memory before shutting down devices. */ 334 error = hibernate_preallocate_memory(); ··· 511 512 local_irq_disable(); 513 sysdev_suspend(PMSG_HIBERNATE); 514 + if (!pm_check_wakeup_events()) { 515 + error = -EAGAIN; 516 + goto Power_up; 517 + } 518 + 519 hibernation_ops->enter(); 520 /* We should never get here */ 521 while (1); 522 523 + Power_up: 524 + sysdev_resume(); 525 + local_irq_enable(); 526 + enable_nonboot_cpus(); 527 + 528 Platform_finish: 529 hibernation_ops->finish(); 530 531 + dpm_resume_noirq(PMSG_RESTORE); 532 533 Resume_devices: 534 entering_platform_hibernation = false;
+55
kernel/power/main.c
··· 204 205 power_attr(state); 206 207 #ifdef CONFIG_PM_TRACE 208 int pm_trace_enabled; 209 ··· 290 #endif 291 #ifdef CONFIG_PM_SLEEP 292 &pm_async_attr.attr, 293 #ifdef CONFIG_PM_DEBUG 294 &pm_test_attr.attr, 295 #endif
··· 204 205 power_attr(state); 206 207 + #ifdef CONFIG_PM_SLEEP 208 + /* 209 + * The 'wakeup_count' attribute, along with the functions defined in 210 + * drivers/base/power/wakeup.c, provides a means by which wakeup events can be 211 + * handled in a non-racy way. 212 + * 213 + * If a wakeup event occurs when the system is in a sleep state, it simply is 214 + * woken up. In turn, if an event that would wake the system up from a sleep 215 + * state occurs when it is undergoing a transition to that sleep state, the 216 + * transition should be aborted. Moreover, if such an event occurs when the 217 + * system is in the working state, an attempt to start a transition to the 218 + * given sleep state should fail during certain period after the detection of 219 + * the event. Using the 'state' attribute alone is not sufficient to satisfy 220 + * these requirements, because a wakeup event may occur exactly when 'state' 221 + * is being written to and may be delivered to user space right before it is 222 + * frozen, so the event will remain only partially processed until the system is 223 + * woken up by another event. In particular, it won't cause the transition to 224 + * a sleep state to be aborted. 225 + * 226 + * This difficulty may be overcome if user space uses 'wakeup_count' before 227 + * writing to 'state'. It first should read from 'wakeup_count' and store 228 + * the read value. Then, after carrying out its own preparations for the system 229 + * transition to a sleep state, it should write the stored value to 230 + * 'wakeup_count'. If that fails, at least one wakeup event has occured since 231 + * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 232 + * is allowed to write to 'state', but the transition will be aborted if there 233 + * are any wakeup events detected after 'wakeup_count' was written to. 234 + */ 235 + 236 + static ssize_t wakeup_count_show(struct kobject *kobj, 237 + struct kobj_attribute *attr, 238 + char *buf) 239 + { 240 + unsigned long val; 241 + 242 + return pm_get_wakeup_count(&val) ? sprintf(buf, "%lu\n", val) : -EINTR; 243 + } 244 + 245 + static ssize_t wakeup_count_store(struct kobject *kobj, 246 + struct kobj_attribute *attr, 247 + const char *buf, size_t n) 248 + { 249 + unsigned long val; 250 + 251 + if (sscanf(buf, "%lu", &val) == 1) { 252 + if (pm_save_wakeup_count(val)) 253 + return n; 254 + } 255 + return -EINVAL; 256 + } 257 + 258 + power_attr(wakeup_count); 259 + #endif /* CONFIG_PM_SLEEP */ 260 + 261 #ifdef CONFIG_PM_TRACE 262 int pm_trace_enabled; 263 ··· 236 #endif 237 #ifdef CONFIG_PM_SLEEP 238 &pm_async_attr.attr, 239 + &wakeup_count_attr.attr, 240 #ifdef CONFIG_PM_DEBUG 241 &pm_test_attr.attr, 242 #endif
+7 -6
kernel/power/suspend.c
··· 136 if (suspend_ops->prepare) { 137 error = suspend_ops->prepare(); 138 if (error) 139 - return error; 140 } 141 142 error = dpm_suspend_noirq(PMSG_SUSPEND); 143 if (error) { 144 printk(KERN_ERR "PM: Some devices failed to power down\n"); 145 - goto Platfrom_finish; 146 } 147 148 if (suspend_ops->prepare_late) { 149 error = suspend_ops->prepare_late(); 150 if (error) 151 - goto Power_up_devices; 152 } 153 154 if (suspend_test(TEST_PLATFORM)) ··· 163 164 error = sysdev_suspend(PMSG_SUSPEND); 165 if (!error) { 166 - if (!suspend_test(TEST_CORE)) 167 error = suspend_ops->enter(state); 168 sysdev_resume(); 169 } 170 ··· 180 if (suspend_ops->wake) 181 suspend_ops->wake(); 182 183 - Power_up_devices: 184 dpm_resume_noirq(PMSG_RESUME); 185 186 - Platfrom_finish: 187 if (suspend_ops->finish) 188 suspend_ops->finish(); 189
··· 136 if (suspend_ops->prepare) { 137 error = suspend_ops->prepare(); 138 if (error) 139 + goto Platform_finish; 140 } 141 142 error = dpm_suspend_noirq(PMSG_SUSPEND); 143 if (error) { 144 printk(KERN_ERR "PM: Some devices failed to power down\n"); 145 + goto Platform_finish; 146 } 147 148 if (suspend_ops->prepare_late) { 149 error = suspend_ops->prepare_late(); 150 if (error) 151 + goto Platform_wake; 152 } 153 154 if (suspend_test(TEST_PLATFORM)) ··· 163 164 error = sysdev_suspend(PMSG_SUSPEND); 165 if (!error) { 166 + if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { 167 error = suspend_ops->enter(state); 168 + events_check_enabled = false; 169 + } 170 sysdev_resume(); 171 } 172 ··· 178 if (suspend_ops->wake) 179 suspend_ops->wake(); 180 181 dpm_resume_noirq(PMSG_RESUME); 182 183 + Platform_finish: 184 if (suspend_ops->finish) 185 suspend_ops->finish(); 186
+2 -2
kernel/power/swap.c
··· 32 /* 33 * The swap map is a data structure used for keeping track of each page 34 * written to a swap partition. It consists of many swap_map_page 35 - * structures that contain each an array of MAP_PAGE_SIZE swap entries. 36 * These structures are stored on the swap and linked together with the 37 * help of the .next_swap member. 38 * ··· 148 149 /** 150 * free_all_swap_pages - free swap pages allocated for saving image data. 151 - * It also frees the extents used to register which swap entres had been 152 * allocated. 153 */ 154
··· 32 /* 33 * The swap map is a data structure used for keeping track of each page 34 * written to a swap partition. It consists of many swap_map_page 35 + * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 36 * These structures are stored on the swap and linked together with the 37 * help of the .next_swap member. 38 * ··· 148 149 /** 150 * free_all_swap_pages - free swap pages allocated for saving image data. 151 + * It also frees the extents used to register which swap entries had been 152 * allocated. 153 */ 154
+5 -8
sound/core/pcm_native.c
··· 451 snd_pcm_timer_resolution_change(substream); 452 runtime->status->state = SNDRV_PCM_STATE_SETUP; 453 454 - if (substream->latency_pm_qos_req) { 455 - pm_qos_remove_request(substream->latency_pm_qos_req); 456 - substream->latency_pm_qos_req = NULL; 457 - } 458 if ((usecs = period_to_usecs(runtime)) >= 0) 459 - substream->latency_pm_qos_req = pm_qos_add_request( 460 - PM_QOS_CPU_DMA_LATENCY, usecs); 461 return 0; 462 _error: 463 /* hardware might be unuseable from this time, ··· 510 if (substream->ops->hw_free) 511 result = substream->ops->hw_free(substream); 512 runtime->status->state = SNDRV_PCM_STATE_OPEN; 513 - pm_qos_remove_request(substream->latency_pm_qos_req); 514 - substream->latency_pm_qos_req = NULL; 515 return result; 516 } 517
··· 451 snd_pcm_timer_resolution_change(substream); 452 runtime->status->state = SNDRV_PCM_STATE_SETUP; 453 454 + if (pm_qos_request_active(&substream->latency_pm_qos_req)) 455 + pm_qos_remove_request(&substream->latency_pm_qos_req); 456 if ((usecs = period_to_usecs(runtime)) >= 0) 457 + pm_qos_add_request(&substream->latency_pm_qos_req, 458 + PM_QOS_CPU_DMA_LATENCY, usecs); 459 return 0; 460 _error: 461 /* hardware might be unuseable from this time, ··· 512 if (substream->ops->hw_free) 513 result = substream->ops->hw_free(substream); 514 runtime->status->state = SNDRV_PCM_STATE_OPEN; 515 + pm_qos_remove_request(&substream->latency_pm_qos_req); 516 return result; 517 } 518