Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

pm_qos: Get rid of the allocation in pm_qos_add_request()

All current users of pm_qos_add_request() have the ability to supply
the memory required by the pm_qos routines, so make them do this and
eliminate the kmalloc() with pm_qos_add_request(). This has the
double benefit of making the call never fail and allowing it to be
called from atomic context.

Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Signed-off-by: mark gross <markgross@thegnar.org>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>

authored by

James Bottomley and committed by
Rafael J. Wysocki
82f68251 5f279845

+73 -60
+7 -10
drivers/net/e1000e/netdev.c
··· 2901 2901 * dropped transactions. 2902 2902 */ 2903 2903 pm_qos_update_request( 2904 - adapter->netdev->pm_qos_req, 55); 2904 + &adapter->netdev->pm_qos_req, 55); 2905 2905 } else { 2906 2906 pm_qos_update_request( 2907 - adapter->netdev->pm_qos_req, 2907 + &adapter->netdev->pm_qos_req, 2908 2908 PM_QOS_DEFAULT_VALUE); 2909 2909 } 2910 2910 } ··· 3196 3196 3197 3197 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3198 3198 if (adapter->flags & FLAG_HAS_ERT) 3199 - adapter->netdev->pm_qos_req = 3200 - pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 3201 - PM_QOS_DEFAULT_VALUE); 3199 + pm_qos_add_request(&adapter->netdev->pm_qos_req, 3200 + PM_QOS_CPU_DMA_LATENCY, 3201 + PM_QOS_DEFAULT_VALUE); 3202 3202 3203 3203 /* hardware has been reset, we need to reload some things */ 3204 3204 e1000_configure(adapter); ··· 3263 3263 e1000_clean_tx_ring(adapter); 3264 3264 e1000_clean_rx_ring(adapter); 3265 3265 3266 - if (adapter->flags & FLAG_HAS_ERT) { 3267 - pm_qos_remove_request( 3268 - adapter->netdev->pm_qos_req); 3269 - adapter->netdev->pm_qos_req = NULL; 3270 - } 3266 + if (adapter->flags & FLAG_HAS_ERT) 3267 + pm_qos_remove_request(&adapter->netdev->pm_qos_req); 3271 3268 3272 3269 /* 3273 3270 * TODO: for power management, we could drop the link and
+4 -5
drivers/net/igbvf/netdev.c
··· 48 48 #define DRV_VERSION "1.0.0-k0" 49 49 char igbvf_driver_name[] = "igbvf"; 50 50 const char igbvf_driver_version[] = DRV_VERSION; 51 - struct pm_qos_request_list *igbvf_driver_pm_qos_req; 51 + static struct pm_qos_request_list igbvf_driver_pm_qos_req; 52 52 static const char igbvf_driver_string[] = 53 53 "Intel(R) Virtual Function Network Driver"; 54 54 static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; ··· 2902 2902 printk(KERN_INFO "%s\n", igbvf_copyright); 2903 2903 2904 2904 ret = pci_register_driver(&igbvf_driver); 2905 - igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 2906 - PM_QOS_DEFAULT_VALUE); 2905 + pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 2906 + PM_QOS_DEFAULT_VALUE); 2907 2907 2908 2908 return ret; 2909 2909 } ··· 2918 2918 static void __exit igbvf_exit_module(void) 2919 2919 { 2920 2920 pci_unregister_driver(&igbvf_driver); 2921 - pm_qos_remove_request(igbvf_driver_pm_qos_req); 2922 - igbvf_driver_pm_qos_req = NULL; 2921 + pm_qos_remove_request(&igbvf_driver_pm_qos_req); 2923 2922 } 2924 2923 module_exit(igbvf_exit_module); 2925 2924
+6 -6
drivers/net/wireless/ipw2x00/ipw2100.c
··· 174 174 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 175 175 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 176 176 177 - struct pm_qos_request_list *ipw2100_pm_qos_req; 177 + struct pm_qos_request_list ipw2100_pm_qos_req; 178 178 179 179 /* Debugging stuff */ 180 180 #ifdef CONFIG_IPW2100_DEBUG ··· 1741 1741 /* the ipw2100 hardware really doesn't want power management delays 1742 1742 * longer than 175usec 1743 1743 */ 1744 - pm_qos_update_request(ipw2100_pm_qos_req, 175); 1744 + pm_qos_update_request(&ipw2100_pm_qos_req, 175); 1745 1745 1746 1746 /* If the interrupt is enabled, turn it off... */ 1747 1747 spin_lock_irqsave(&priv->low_lock, flags); ··· 1889 1889 ipw2100_disable_interrupts(priv); 1890 1890 spin_unlock_irqrestore(&priv->low_lock, flags); 1891 1891 1892 - pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1892 + pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1893 1893 1894 1894 /* We have to signal any supplicant if we are disassociating */ 1895 1895 if (associated) ··· 6669 6669 if (ret) 6670 6670 goto out; 6671 6671 6672 - ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 6673 - PM_QOS_DEFAULT_VALUE); 6672 + pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 6673 + PM_QOS_DEFAULT_VALUE); 6674 6674 #ifdef CONFIG_IPW2100_DEBUG 6675 6675 ipw2100_debug_level = debug; 6676 6676 ret = driver_create_file(&ipw2100_pci_driver.driver, ··· 6692 6692 &driver_attr_debug_level); 6693 6693 #endif 6694 6694 pci_unregister_driver(&ipw2100_pci_driver); 6695 - pm_qos_remove_request(ipw2100_pm_qos_req); 6695 + pm_qos_remove_request(&ipw2100_pm_qos_req); 6696 6696 } 6697 6697 6698 6698 module_init(ipw2100_init);
+1 -1
include/linux/netdevice.h
··· 779 779 */ 780 780 char name[IFNAMSIZ]; 781 781 782 - struct pm_qos_request_list *pm_qos_req; 782 + struct pm_qos_request_list pm_qos_req; 783 783 784 784 /* device name hash chain */ 785 785 struct hlist_node name_hlist;
+10 -3
include/linux/pm_qos_params.h
··· 1 + #ifndef _LINUX_PM_QOS_PARAMS_H 2 + #define _LINUX_PM_QOS_PARAMS_H 1 3 /* interface for the pm_qos_power infrastructure of the linux kernel. 2 4 * 3 5 * Mark Gross <mgross@linux.intel.com> 4 6 */ 5 - #include <linux/list.h> 7 + #include <linux/plist.h> 6 8 #include <linux/notifier.h> 7 9 #include <linux/miscdevice.h> 8 10 ··· 16 14 #define PM_QOS_NUM_CLASSES 4 17 15 #define PM_QOS_DEFAULT_VALUE -1 18 16 19 - struct pm_qos_request_list; 17 + struct pm_qos_request_list { 18 + struct plist_node list; 19 + int pm_qos_class; 20 + }; 20 21 21 - struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value); 22 + void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value); 22 23 void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req, 23 24 s32 new_value); 24 25 void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req); ··· 29 24 int pm_qos_request(int pm_qos_class); 30 25 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); 31 26 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); 27 + int pm_qos_request_active(struct pm_qos_request_list *req); 32 28 29 + #endif
+1 -1
include/sound/pcm.h
··· 366 366 int number; 367 367 char name[32]; /* substream name */ 368 368 int stream; /* stream (direction) */ 369 - struct pm_qos_request_list *latency_pm_qos_req; /* pm_qos request */ 369 + struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */ 370 370 size_t buffer_bytes_max; /* limit ring buffer size */ 371 371 struct snd_dma_buffer dma_buffer; 372 372 unsigned int dma_buf_id;
+39 -26
kernel/pm_qos_params.c
··· 30 30 /*#define DEBUG*/ 31 31 32 32 #include <linux/pm_qos_params.h> 33 - #include <linux/plist.h> 34 33 #include <linux/sched.h> 35 34 #include <linux/spinlock.h> 36 35 #include <linux/slab.h> ··· 48 49 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock 49 50 * held, taken with _irqsave. One lock to rule them all 50 51 */ 51 - struct pm_qos_request_list { 52 - struct plist_node list; 53 - int pm_qos_class; 54 - }; 55 - 56 52 enum pm_qos_type { 57 53 PM_QOS_MAX, /* return the largest value */ 58 54 PM_QOS_MIN /* return the smallest value */ ··· 204 210 } 205 211 EXPORT_SYMBOL_GPL(pm_qos_request); 206 212 213 + int pm_qos_request_active(struct pm_qos_request_list *req) 214 + { 215 + return req->pm_qos_class != 0; 216 + } 217 + EXPORT_SYMBOL_GPL(pm_qos_request_active); 218 + 207 219 /** 208 220 * pm_qos_add_request - inserts new qos request into the list 209 221 * @pm_qos_class: identifies which list of qos request to us ··· 221 221 * element as a handle for use in updating and removal. Call needs to save 222 222 * this handle for later use. 223 223 */ 224 - struct pm_qos_request_list *pm_qos_add_request(int pm_qos_class, s32 value) 224 + void pm_qos_add_request(struct pm_qos_request_list *dep, 225 + int pm_qos_class, s32 value) 225 226 { 226 - struct pm_qos_request_list *dep; 227 + struct pm_qos_object *o = pm_qos_array[pm_qos_class]; 228 + int new_value; 227 229 228 - dep = kzalloc(sizeof(struct pm_qos_request_list), GFP_KERNEL); 229 - if (dep) { 230 - struct pm_qos_object *o = pm_qos_array[pm_qos_class]; 231 - int new_value; 232 - 233 - if (value == PM_QOS_DEFAULT_VALUE) 234 - new_value = o->default_value; 235 - else 236 - new_value = value; 237 - plist_node_init(&dep->list, new_value); 238 - dep->pm_qos_class = pm_qos_class; 239 - update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); 230 + if (pm_qos_request_active(dep)) { 231 + WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); 232 + return; 240 233 } 241 - 242 - return dep; 234 + if (value == PM_QOS_DEFAULT_VALUE) 235 + new_value = o->default_value; 236 + else 237 + new_value = value; 238 + plist_node_init(&dep->list, new_value); 239 + dep->pm_qos_class = pm_qos_class; 240 + update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE); 243 241 } 244 242 EXPORT_SYMBOL_GPL(pm_qos_add_request); 245 243 ··· 259 261 260 262 if (!pm_qos_req) /*guard against callers passing in null */ 261 263 return; 264 + 265 + if (!pm_qos_request_active(pm_qos_req)) { 266 + WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n"); 267 + return; 268 + } 262 269 263 270 o = pm_qos_array[pm_qos_req->pm_qos_class]; 264 271 ··· 293 290 return; 294 291 /* silent return to keep pcm code cleaner */ 295 292 293 + if (!pm_qos_request_active(pm_qos_req)) { 294 + WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); 295 + return; 296 + } 297 + 296 298 o = pm_qos_array[pm_qos_req->pm_qos_class]; 297 299 update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE); 298 - kfree(pm_qos_req); 300 + memset(pm_qos_req, 0, sizeof(*pm_qos_req)); 299 301 } 300 302 EXPORT_SYMBOL_GPL(pm_qos_remove_request); 301 303 ··· 348 340 349 341 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 350 342 if (pm_qos_class >= 0) { 351 - filp->private_data = (void *) pm_qos_add_request(pm_qos_class, 352 - PM_QOS_DEFAULT_VALUE); 343 + struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 344 + if (!req) 345 + return -ENOMEM; 346 + 347 + pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); 348 + filp->private_data = req; 353 349 354 350 if (filp->private_data) 355 351 return 0; ··· 365 353 { 366 354 struct pm_qos_request_list *req; 367 355 368 - req = (struct pm_qos_request_list *)filp->private_data; 356 + req = filp->private_data; 369 357 pm_qos_remove_request(req); 358 + kfree(req); 370 359 371 360 return 0; 372 361 }
+5 -8
sound/core/pcm_native.c
··· 451 451 snd_pcm_timer_resolution_change(substream); 452 452 runtime->status->state = SNDRV_PCM_STATE_SETUP; 453 453 454 - if (substream->latency_pm_qos_req) { 455 - pm_qos_remove_request(substream->latency_pm_qos_req); 456 - substream->latency_pm_qos_req = NULL; 457 - } 454 + if (pm_qos_request_active(&substream->latency_pm_qos_req)) 455 + pm_qos_remove_request(&substream->latency_pm_qos_req); 458 456 if ((usecs = period_to_usecs(runtime)) >= 0) 459 - substream->latency_pm_qos_req = pm_qos_add_request( 460 - PM_QOS_CPU_DMA_LATENCY, usecs); 457 + pm_qos_add_request(&substream->latency_pm_qos_req, 458 + PM_QOS_CPU_DMA_LATENCY, usecs); 461 459 return 0; 462 460 _error: 463 461 /* hardware might be unuseable from this time, ··· 510 512 if (substream->ops->hw_free) 511 513 result = substream->ops->hw_free(substream); 512 514 runtime->status->state = SNDRV_PCM_STATE_OPEN; 513 - pm_qos_remove_request(substream->latency_pm_qos_req); 514 - substream->latency_pm_qos_req = NULL; 515 + pm_qos_remove_request(&substream->latency_pm_qos_req); 515 516 return result; 516 517 } 517 518