Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'power-domains' into for-linus

* power-domains:
PM: Fix build issue in clock_ops.c for CONFIG_PM_RUNTIME unset
PM: Revert "driver core: platform_bus: allow runtime override of dev_pm_ops"
OMAP1 / PM: Use generic clock manipulation routines for runtime PM
PM / Runtime: Generic clock manipulation rountines for runtime PM (v6)
PM / Runtime: Add subsystem data field to struct dev_pm_info
OMAP2+ / PM: move runtime PM implementation to use device power domains
PM / Platform: Use generic runtime PM callbacks directly
shmobile: Use power domains for platform runtime PM
PM: Export platform bus type's default PM callbacks
PM: Make power domain callbacks take precedence over subsystem ones

+689 -447
+23 -48
arch/arm/mach-omap1/pm_bus.c
··· 24 24 #ifdef CONFIG_PM_RUNTIME 25 25 static int omap1_pm_runtime_suspend(struct device *dev) 26 26 { 27 - struct clk *iclk, *fclk; 28 - int ret = 0; 27 + int ret; 29 28 30 29 dev_dbg(dev, "%s\n", __func__); 31 30 32 31 ret = pm_generic_runtime_suspend(dev); 32 + if (ret) 33 + return ret; 33 34 34 - fclk = clk_get(dev, "fck"); 35 - if (!IS_ERR(fclk)) { 36 - clk_disable(fclk); 37 - clk_put(fclk); 38 - } 39 - 40 - iclk = clk_get(dev, "ick"); 41 - if (!IS_ERR(iclk)) { 42 - clk_disable(iclk); 43 - clk_put(iclk); 35 + ret = pm_runtime_clk_suspend(dev); 36 + if (ret) { 37 + pm_generic_runtime_resume(dev); 38 + return ret; 44 39 } 45 40 46 41 return 0; 47 - }; 42 + } 48 43 49 44 static int omap1_pm_runtime_resume(struct device *dev) 50 45 { 51 - struct clk *iclk, *fclk; 52 - 53 46 dev_dbg(dev, "%s\n", __func__); 54 47 55 - iclk = clk_get(dev, "ick"); 56 - if (!IS_ERR(iclk)) { 57 - clk_enable(iclk); 58 - clk_put(iclk); 59 - } 60 - 61 - fclk = clk_get(dev, "fck"); 62 - if (!IS_ERR(fclk)) { 63 - clk_enable(fclk); 64 - clk_put(fclk); 65 - } 66 - 48 + pm_runtime_clk_resume(dev); 67 49 return pm_generic_runtime_resume(dev); 50 + } 51 + 52 + static struct dev_power_domain default_power_domain = { 53 + .ops = { 54 + .runtime_suspend = omap1_pm_runtime_suspend, 55 + .runtime_resume = omap1_pm_runtime_resume, 56 + USE_PLATFORM_PM_SLEEP_OPS 57 + }, 58 + }; 59 + 60 + static struct pm_clk_notifier_block platform_bus_notifier = { 61 + .pwr_domain = &default_power_domain, 62 + .con_ids = { "ick", "fck", NULL, }, 68 63 }; 69 64 70 65 static int __init omap1_pm_runtime_init(void) 71 66 { 72 - const struct dev_pm_ops *pm; 73 - struct dev_pm_ops *omap_pm; 74 - 75 67 if (!cpu_class_is_omap1()) 76 68 return -ENODEV; 77 69 78 - pm = platform_bus_get_pm_ops(); 79 - if (!pm) { 80 - pr_err("%s: unable to get dev_pm_ops from platform_bus\n", 81 - __func__); 82 - return -ENODEV; 83 - } 84 - 85 - omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); 86 - if (!omap_pm) { 87 - pr_err("%s: unable to alloc memory for new dev_pm_ops\n", 88 - __func__); 89 - return -ENOMEM; 90 - } 91 - 92 - omap_pm->runtime_suspend = omap1_pm_runtime_suspend; 93 - omap_pm->runtime_resume = omap1_pm_runtime_resume; 94 - 95 - platform_bus_set_pm_ops(omap_pm); 70 + pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 96 71 97 72 return 0; 98 73 }
+3 -3
arch/arm/mach-omap2/Makefile
··· 59 59 # Power Management 60 60 ifeq ($(CONFIG_PM),y) 61 61 obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o 62 - obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o pm_bus.o 62 + obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o 63 63 obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ 64 - cpuidle34xx.o pm_bus.o 65 - obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o pm_bus.o 64 + cpuidle34xx.o 65 + obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o 66 66 obj-$(CONFIG_PM_DEBUG) += pm-debug.o 67 67 obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o 68 68 obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
-85
arch/arm/mach-omap2/pm_bus.c
··· 1 - /* 2 - * Runtime PM support code for OMAP 3 - * 4 - * Author: Kevin Hilman, Deep Root Systems, LLC 5 - * 6 - * Copyright (C) 2010 Texas Instruments, Inc. 7 - * 8 - * This file is licensed under the terms of the GNU General Public 9 - * License version 2. This program is licensed "as is" without any 10 - * warranty of any kind, whether express or implied. 11 - */ 12 - #include <linux/init.h> 13 - #include <linux/kernel.h> 14 - #include <linux/io.h> 15 - #include <linux/pm_runtime.h> 16 - #include <linux/platform_device.h> 17 - #include <linux/mutex.h> 18 - 19 - #include <plat/omap_device.h> 20 - #include <plat/omap-pm.h> 21 - 22 - #ifdef CONFIG_PM_RUNTIME 23 - static int omap_pm_runtime_suspend(struct device *dev) 24 - { 25 - struct platform_device *pdev = to_platform_device(dev); 26 - int r, ret = 0; 27 - 28 - dev_dbg(dev, "%s\n", __func__); 29 - 30 - ret = pm_generic_runtime_suspend(dev); 31 - 32 - if (!ret && dev->parent == &omap_device_parent) { 33 - r = omap_device_idle(pdev); 34 - WARN_ON(r); 35 - } 36 - 37 - return ret; 38 - }; 39 - 40 - static int omap_pm_runtime_resume(struct device *dev) 41 - { 42 - struct platform_device *pdev = to_platform_device(dev); 43 - int r; 44 - 45 - dev_dbg(dev, "%s\n", __func__); 46 - 47 - if (dev->parent == &omap_device_parent) { 48 - r = omap_device_enable(pdev); 49 - WARN_ON(r); 50 - } 51 - 52 - return pm_generic_runtime_resume(dev); 53 - }; 54 - #else 55 - #define omap_pm_runtime_suspend NULL 56 - #define omap_pm_runtime_resume NULL 57 - #endif /* CONFIG_PM_RUNTIME */ 58 - 59 - static int __init omap_pm_runtime_init(void) 60 - { 61 - const struct dev_pm_ops *pm; 62 - struct dev_pm_ops *omap_pm; 63 - 64 - pm = platform_bus_get_pm_ops(); 65 - if (!pm) { 66 - pr_err("%s: unable to get dev_pm_ops from platform_bus\n", 67 - __func__); 68 - return -ENODEV; 69 - } 70 - 71 - omap_pm = kmemdup(pm, sizeof(struct dev_pm_ops), GFP_KERNEL); 72 - if (!omap_pm) { 73 - pr_err("%s: unable to alloc memory for new dev_pm_ops\n", 74 - __func__); 75 - return -ENOMEM; 76 - } 77 - 78 - omap_pm->runtime_suspend = omap_pm_runtime_suspend; 79 - omap_pm->runtime_resume = omap_pm_runtime_resume; 80 - 81 - platform_bus_set_pm_ops(omap_pm); 82 - 83 - return 0; 84 - } 85 - core_initcall(omap_pm_runtime_init);
+17 -128
arch/arm/mach-shmobile/pm_runtime.c
··· 18 18 #include <linux/clk.h> 19 19 #include <linux/sh_clk.h> 20 20 #include <linux/bitmap.h> 21 + #include <linux/slab.h> 21 22 22 23 #ifdef CONFIG_PM_RUNTIME 23 - #define BIT_ONCE 0 24 - #define BIT_ACTIVE 1 25 - #define BIT_CLK_ENABLED 2 26 24 27 - struct pm_runtime_data { 28 - unsigned long flags; 29 - struct clk *clk; 30 - }; 31 - 32 - static void __devres_release(struct device *dev, void *res) 33 - { 34 - struct pm_runtime_data *prd = res; 35 - 36 - dev_dbg(dev, "__devres_release()\n"); 37 - 38 - if (test_bit(BIT_CLK_ENABLED, &prd->flags)) 39 - clk_disable(prd->clk); 40 - 41 - if (test_bit(BIT_ACTIVE, &prd->flags)) 42 - clk_put(prd->clk); 43 - } 44 - 45 - static struct pm_runtime_data *__to_prd(struct device *dev) 46 - { 47 - return devres_find(dev, __devres_release, NULL, NULL); 48 - } 49 - 50 - static void platform_pm_runtime_init(struct device *dev, 51 - struct pm_runtime_data *prd) 52 - { 53 - if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) { 54 - prd->clk = clk_get(dev, NULL); 55 - if (!IS_ERR(prd->clk)) { 56 - set_bit(BIT_ACTIVE, &prd->flags); 57 - dev_info(dev, "clocks managed by runtime pm\n"); 58 - } 59 - } 60 - } 61 - 62 - static void platform_pm_runtime_bug(struct device *dev, 63 - struct pm_runtime_data *prd) 64 - { 65 - if (prd && !test_and_set_bit(BIT_ONCE, &prd->flags)) 66 - dev_err(dev, "runtime pm suspend before resume\n"); 67 - } 68 - 69 - int platform_pm_runtime_suspend(struct device *dev) 70 - { 71 - struct pm_runtime_data *prd = __to_prd(dev); 72 - 73 - dev_dbg(dev, "platform_pm_runtime_suspend()\n"); 74 - 75 - platform_pm_runtime_bug(dev, prd); 76 - 77 - if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { 78 - clk_disable(prd->clk); 79 - clear_bit(BIT_CLK_ENABLED, &prd->flags); 80 - } 81 - 82 - return 0; 83 - } 84 - 85 - int platform_pm_runtime_resume(struct device *dev) 86 - { 87 - struct pm_runtime_data *prd = __to_prd(dev); 88 - 89 - dev_dbg(dev, "platform_pm_runtime_resume()\n"); 90 - 91 - platform_pm_runtime_init(dev, prd); 92 - 93 - if (prd && test_bit(BIT_ACTIVE, &prd->flags)) { 94 - clk_enable(prd->clk); 95 - set_bit(BIT_CLK_ENABLED, &prd->flags); 96 - } 97 - 98 - return 0; 99 - } 100 - 101 - int platform_pm_runtime_idle(struct device *dev) 25 + static int default_platform_runtime_idle(struct device *dev) 102 26 { 103 27 /* suspend synchronously to disable clocks immediately */ 104 28 return pm_runtime_suspend(dev); 105 29 } 106 30 107 - static int platform_bus_notify(struct notifier_block *nb, 108 - unsigned long action, void *data) 109 - { 110 - struct device *dev = data; 111 - struct pm_runtime_data *prd; 31 + static struct dev_power_domain default_power_domain = { 32 + .ops = { 33 + .runtime_suspend = pm_runtime_clk_suspend, 34 + .runtime_resume = pm_runtime_clk_resume, 35 + .runtime_idle = default_platform_runtime_idle, 36 + USE_PLATFORM_PM_SLEEP_OPS 37 + }, 38 + }; 112 39 113 - dev_dbg(dev, "platform_bus_notify() %ld !\n", action); 40 + #define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) 114 41 115 - if (action == BUS_NOTIFY_BIND_DRIVER) { 116 - prd = devres_alloc(__devres_release, sizeof(*prd), GFP_KERNEL); 117 - if (prd) 118 - devres_add(dev, prd); 119 - else 120 - dev_err(dev, "unable to alloc memory for runtime pm\n"); 121 - } 42 + #else 122 43 123 - return 0; 124 - } 125 - 126 - #else /* CONFIG_PM_RUNTIME */ 127 - 128 - static int platform_bus_notify(struct notifier_block *nb, 129 - unsigned long action, void *data) 130 - { 131 - struct device *dev = data; 132 - struct clk *clk; 133 - 134 - dev_dbg(dev, "platform_bus_notify() %ld !\n", action); 135 - 136 - switch (action) { 137 - case BUS_NOTIFY_BIND_DRIVER: 138 - clk = clk_get(dev, NULL); 139 - if (!IS_ERR(clk)) { 140 - clk_enable(clk); 141 - clk_put(clk); 142 - dev_info(dev, "runtime pm disabled, clock forced on\n"); 143 - } 144 - break; 145 - case BUS_NOTIFY_UNBOUND_DRIVER: 146 - clk = clk_get(dev, NULL); 147 - if (!IS_ERR(clk)) { 148 - clk_disable(clk); 149 - clk_put(clk); 150 - dev_info(dev, "runtime pm disabled, clock forced off\n"); 151 - } 152 - break; 153 - } 154 - 155 - return 0; 156 - } 44 + #define DEFAULT_PWR_DOMAIN_PTR NULL 157 45 158 46 #endif /* CONFIG_PM_RUNTIME */ 159 47 160 - static struct notifier_block platform_bus_notifier = { 161 - .notifier_call = platform_bus_notify 48 + static struct pm_clk_notifier_block platform_bus_notifier = { 49 + .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, 50 + .con_ids = { NULL, }, 162 51 }; 163 52 164 53 static int __init sh_pm_runtime_init(void) 165 54 { 166 - bus_register_notifier(&platform_bus_type, &platform_bus_notifier); 55 + pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 167 56 return 0; 168 57 } 169 58 core_initcall(sh_pm_runtime_init);
+23
arch/arm/plat-omap/omap_device.c
··· 536 536 return 0; 537 537 } 538 538 539 + static int _od_runtime_suspend(struct device *dev) 540 + { 541 + struct platform_device *pdev = to_platform_device(dev); 542 + 543 + return omap_device_idle(pdev); 544 + } 545 + 546 + static int _od_runtime_resume(struct device *dev) 547 + { 548 + struct platform_device *pdev = to_platform_device(dev); 549 + 550 + return omap_device_enable(pdev); 551 + } 552 + 553 + static struct dev_power_domain omap_device_power_domain = { 554 + .ops = { 555 + .runtime_suspend = _od_runtime_suspend, 556 + .runtime_resume = _od_runtime_resume, 557 + USE_PLATFORM_PM_SLEEP_OPS 558 + } 559 + }; 560 + 539 561 /** 540 562 * omap_device_register - register an omap_device with one omap_hwmod 541 563 * @od: struct omap_device * to register ··· 571 549 pr_debug("omap_device: %s: registering\n", od->pdev.name); 572 550 573 551 od->pdev.dev.parent = &omap_device_parent; 552 + od->pdev.dev.pwr_domain = &omap_device_power_domain; 574 553 return platform_device_register(&od->pdev); 575 554 } 576 555
+22 -11
arch/sh/kernel/cpu/shmobile/pm_runtime.c
··· 139 139 queue_work(pm_wq, &hwblk_work); 140 140 } 141 141 142 - int platform_pm_runtime_suspend(struct device *dev) 142 + static int default_platform_runtime_suspend(struct device *dev) 143 143 { 144 144 struct platform_device *pdev = to_platform_device(dev); 145 145 struct pdev_archdata *ad = &pdev->archdata; ··· 147 147 int hwblk = ad->hwblk_id; 148 148 int ret = 0; 149 149 150 - dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); 150 + dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); 151 151 152 152 /* ignore off-chip platform devices */ 153 153 if (!hwblk) ··· 183 183 mutex_unlock(&ad->mutex); 184 184 185 185 out: 186 - dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", 187 - hwblk, ret); 186 + dev_dbg(dev, "%s() [%d] returns %d\n", 187 + __func__, hwblk, ret); 188 188 189 189 return ret; 190 190 } 191 191 192 - int platform_pm_runtime_resume(struct device *dev) 192 + static int default_platform_runtime_resume(struct device *dev) 193 193 { 194 194 struct platform_device *pdev = to_platform_device(dev); 195 195 struct pdev_archdata *ad = &pdev->archdata; 196 196 int hwblk = ad->hwblk_id; 197 197 int ret = 0; 198 198 199 - dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); 199 + dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); 200 200 201 201 /* ignore off-chip platform devices */ 202 202 if (!hwblk) ··· 228 228 */ 229 229 mutex_unlock(&ad->mutex); 230 230 out: 231 - dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", 232 - hwblk, ret); 231 + dev_dbg(dev, "%s() [%d] returns %d\n", 232 + __func__, hwblk, ret); 233 233 234 234 return ret; 235 235 } 236 236 237 - int platform_pm_runtime_idle(struct device *dev) 237 + static int default_platform_runtime_idle(struct device *dev) 238 238 { 239 239 struct platform_device *pdev = to_platform_device(dev); 240 240 int hwblk = pdev->archdata.hwblk_id; 241 241 int ret = 0; 242 242 243 - dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); 243 + dev_dbg(dev, "%s() [%d]\n", __func__, hwblk); 244 244 245 245 /* ignore off-chip platform devices */ 246 246 if (!hwblk) ··· 252 252 /* suspend synchronously to disable clocks immediately */ 253 253 ret = pm_runtime_suspend(dev); 254 254 out: 255 - dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); 255 + dev_dbg(dev, "%s() [%d] done!\n", __func__, hwblk); 256 256 return ret; 257 257 } 258 + 259 + static struct dev_power_domain default_power_domain = { 260 + .ops = { 261 + .runtime_suspend = default_platform_runtime_suspend, 262 + .runtime_resume = default_platform_runtime_resume, 263 + .runtime_idle = default_platform_runtime_idle, 264 + USE_PLATFORM_PM_SLEEP_OPS 265 + }, 266 + }; 258 267 259 268 static int platform_bus_notify(struct notifier_block *nb, 260 269 unsigned long action, void *data) ··· 285 276 hwblk_disable(hwblk_info, hwblk); 286 277 /* make sure driver re-inits itself once */ 287 278 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 279 + dev->pwr_domain = &default_power_domain; 288 280 break; 289 281 /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ 290 282 case BUS_NOTIFY_BOUND_DRIVER: ··· 299 289 __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); 300 290 break; 301 291 case BUS_NOTIFY_DEL_DEVICE: 292 + dev->pwr_domain = NULL; 302 293 break; 303 294 } 304 295 return 0;
+21 -117
drivers/base/platform.c
··· 667 667 return ret; 668 668 } 669 669 670 - static int platform_pm_prepare(struct device *dev) 670 + int platform_pm_prepare(struct device *dev) 671 671 { 672 672 struct device_driver *drv = dev->driver; 673 673 int ret = 0; ··· 678 678 return ret; 679 679 } 680 680 681 - static void platform_pm_complete(struct device *dev) 681 + void platform_pm_complete(struct device *dev) 682 682 { 683 683 struct device_driver *drv = dev->driver; 684 684 ··· 686 686 drv->pm->complete(dev); 687 687 } 688 688 689 - #else /* !CONFIG_PM_SLEEP */ 690 - 691 - #define platform_pm_prepare NULL 692 - #define platform_pm_complete NULL 693 - 694 - #endif /* !CONFIG_PM_SLEEP */ 689 + #endif /* CONFIG_PM_SLEEP */ 695 690 696 691 #ifdef CONFIG_SUSPEND 697 692 698 - int __weak platform_pm_suspend(struct device *dev) 693 + int platform_pm_suspend(struct device *dev) 699 694 { 700 695 struct device_driver *drv = dev->driver; 701 696 int ret = 0; ··· 708 713 return ret; 709 714 } 710 715 711 - int __weak platform_pm_suspend_noirq(struct device *dev) 716 + int platform_pm_suspend_noirq(struct device *dev) 712 717 { 713 718 struct device_driver *drv = dev->driver; 714 719 int ret = 0; ··· 724 729 return ret; 725 730 } 726 731 727 - int __weak platform_pm_resume(struct device *dev) 732 + int platform_pm_resume(struct device *dev) 728 733 { 729 734 struct device_driver *drv = dev->driver; 730 735 int ret = 0; ··· 742 747 return ret; 743 748 } 744 749 745 - int __weak platform_pm_resume_noirq(struct device *dev) 750 + int platform_pm_resume_noirq(struct device *dev) 746 751 { 747 752 struct device_driver *drv = dev->driver; 748 753 int ret = 0; ··· 758 763 return ret; 759 764 } 760 765 761 - #else /* !CONFIG_SUSPEND */ 762 - 763 - #define platform_pm_suspend NULL 764 - #define platform_pm_resume NULL 765 - #define platform_pm_suspend_noirq NULL 766 - #define platform_pm_resume_noirq NULL 767 - 768 - #endif /* !CONFIG_SUSPEND */ 766 + #endif /* CONFIG_SUSPEND */ 769 767 770 768 #ifdef CONFIG_HIBERNATE_CALLBACKS 771 769 772 - static int platform_pm_freeze(struct device *dev) 770 + int platform_pm_freeze(struct device *dev) 773 771 { 774 772 struct device_driver *drv = dev->driver; 775 773 int ret = 0; ··· 780 792 return ret; 781 793 } 782 794 783 - static int platform_pm_freeze_noirq(struct device *dev) 795 + int platform_pm_freeze_noirq(struct device *dev) 784 796 { 785 797 struct device_driver *drv = dev->driver; 786 798 int ret = 0; ··· 796 808 return ret; 797 809 } 798 810 799 - static int platform_pm_thaw(struct device *dev) 811 + int platform_pm_thaw(struct device *dev) 800 812 { 801 813 struct device_driver *drv = dev->driver; 802 814 int ret = 0; ··· 814 826 return ret; 815 827 } 816 828 817 - static int platform_pm_thaw_noirq(struct device *dev) 829 + int platform_pm_thaw_noirq(struct device *dev) 818 830 { 819 831 struct device_driver *drv = dev->driver; 820 832 int ret = 0; ··· 830 842 return ret; 831 843 } 832 844 833 - static int platform_pm_poweroff(struct device *dev) 845 + int platform_pm_poweroff(struct device *dev) 834 846 { 835 847 struct device_driver *drv = dev->driver; 836 848 int ret = 0; ··· 848 860 return ret; 849 861 } 850 862 851 - static int platform_pm_poweroff_noirq(struct device *dev) 863 + int platform_pm_poweroff_noirq(struct device *dev) 852 864 { 853 865 struct device_driver *drv = dev->driver; 854 866 int ret = 0; ··· 864 876 return ret; 865 877 } 866 878 867 - static int platform_pm_restore(struct device *dev) 879 + int platform_pm_restore(struct device *dev) 868 880 { 869 881 struct device_driver *drv = dev->driver; 870 882 int ret = 0; ··· 882 894 return ret; 883 895 } 884 896 885 - static int platform_pm_restore_noirq(struct device *dev) 897 + int platform_pm_restore_noirq(struct device *dev) 886 898 { 887 899 struct device_driver *drv = dev->driver; 888 900 int ret = 0; ··· 898 910 return ret; 899 911 } 900 912 901 - #else /* !CONFIG_HIBERNATE_CALLBACKS */ 902 - 903 - #define platform_pm_freeze NULL 904 - #define platform_pm_thaw NULL 905 - #define platform_pm_poweroff NULL 906 - #define platform_pm_restore NULL 907 - #define platform_pm_freeze_noirq NULL 908 - #define platform_pm_thaw_noirq NULL 909 - #define platform_pm_poweroff_noirq NULL 910 - #define platform_pm_restore_noirq NULL 911 - 912 - #endif /* !CONFIG_HIBERNATE_CALLBACKS */ 913 - 914 - #ifdef CONFIG_PM_RUNTIME 915 - 916 - int __weak platform_pm_runtime_suspend(struct device *dev) 917 - { 918 - return pm_generic_runtime_suspend(dev); 919 - }; 920 - 921 - int __weak platform_pm_runtime_resume(struct device *dev) 922 - { 923 - return pm_generic_runtime_resume(dev); 924 - }; 925 - 926 - int __weak platform_pm_runtime_idle(struct device *dev) 927 - { 928 - return pm_generic_runtime_idle(dev); 929 - }; 930 - 931 - #else /* !CONFIG_PM_RUNTIME */ 932 - 933 - #define platform_pm_runtime_suspend NULL 934 - #define platform_pm_runtime_resume NULL 935 - #define platform_pm_runtime_idle NULL 936 - 937 - #endif /* !CONFIG_PM_RUNTIME */ 913 + #endif /* CONFIG_HIBERNATE_CALLBACKS */ 938 914 939 915 static const struct dev_pm_ops platform_dev_pm_ops = { 940 - .prepare = platform_pm_prepare, 941 - .complete = platform_pm_complete, 942 - .suspend = platform_pm_suspend, 943 - .resume = platform_pm_resume, 944 - .freeze = platform_pm_freeze, 945 - .thaw = platform_pm_thaw, 946 - .poweroff = platform_pm_poweroff, 947 - .restore = platform_pm_restore, 948 - .suspend_noirq = platform_pm_suspend_noirq, 949 - .resume_noirq = platform_pm_resume_noirq, 950 - .freeze_noirq = platform_pm_freeze_noirq, 951 - .thaw_noirq = platform_pm_thaw_noirq, 952 - .poweroff_noirq = platform_pm_poweroff_noirq, 953 - .restore_noirq = platform_pm_restore_noirq, 954 - .runtime_suspend = platform_pm_runtime_suspend, 955 - .runtime_resume = platform_pm_runtime_resume, 956 - .runtime_idle = platform_pm_runtime_idle, 916 + .runtime_suspend = pm_generic_runtime_suspend, 917 + .runtime_resume = pm_generic_runtime_resume, 918 + .runtime_idle = pm_generic_runtime_idle, 919 + USE_PLATFORM_PM_SLEEP_OPS 957 920 }; 958 921 959 922 struct bus_type platform_bus_type = { ··· 915 976 .pm = &platform_dev_pm_ops, 916 977 }; 917 978 EXPORT_SYMBOL_GPL(platform_bus_type); 918 - 919 - /** 920 - * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops 921 - * 922 - * This function can be used by platform code to get the current 923 - * set of dev_pm_ops functions used by the platform_bus_type. 924 - */ 925 - const struct dev_pm_ops * __init platform_bus_get_pm_ops(void) 926 - { 927 - return platform_bus_type.pm; 928 - } 929 - 930 - /** 931 - * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type 932 - * 933 - * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type 934 - * 935 - * Platform code can override the dev_pm_ops methods of 936 - * platform_bus_type by using this function. It is expected that 937 - * platform code will first do a platform_bus_get_pm_ops(), then 938 - * kmemdup it, then customize selected methods and pass a pointer to 939 - * the new struct dev_pm_ops to this function. 940 - * 941 - * Since platform-specific code is customizing methods for *all* 942 - * devices (not just platform-specific devices) it is expected that 943 - * any custom overrides of these functions will keep existing behavior 944 - * and simply extend it. For example, any customization of the 945 - * runtime PM methods should continue to call the pm_generic_* 946 - * functions as the default ones do in addition to the 947 - * platform-specific behavior. 948 - */ 949 - void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm) 950 - { 951 - platform_bus_type.pm = pm; 952 - } 953 979 954 980 int __init platform_bus_init(void) 955 981 {
+1
drivers/base/power/Makefile
··· 3 3 obj-$(CONFIG_PM_RUNTIME) += runtime.o 4 4 obj-$(CONFIG_PM_TRACE_RTC) += trace.o 5 5 obj-$(CONFIG_PM_OPP) += opp.o 6 + obj-$(CONFIG_HAVE_CLK) += clock_ops.o 6 7 7 8 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 8 9 ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG
+431
drivers/base/power/clock_ops.c
··· 1 + /* 2 + * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks 3 + * 4 + * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. 5 + * 6 + * This file is released under the GPLv2. 7 + */ 8 + 9 + #include <linux/init.h> 10 + #include <linux/kernel.h> 11 + #include <linux/io.h> 12 + #include <linux/pm.h> 13 + #include <linux/pm_runtime.h> 14 + #include <linux/clk.h> 15 + #include <linux/slab.h> 16 + #include <linux/err.h> 17 + 18 + #ifdef CONFIG_PM_RUNTIME 19 + 20 + struct pm_runtime_clk_data { 21 + struct list_head clock_list; 22 + struct mutex lock; 23 + }; 24 + 25 + enum pce_status { 26 + PCE_STATUS_NONE = 0, 27 + PCE_STATUS_ACQUIRED, 28 + PCE_STATUS_ENABLED, 29 + PCE_STATUS_ERROR, 30 + }; 31 + 32 + struct pm_clock_entry { 33 + struct list_head node; 34 + char *con_id; 35 + struct clk *clk; 36 + enum pce_status status; 37 + }; 38 + 39 + static struct pm_runtime_clk_data *__to_prd(struct device *dev) 40 + { 41 + return dev ? dev->power.subsys_data : NULL; 42 + } 43 + 44 + /** 45 + * pm_runtime_clk_add - Start using a device clock for runtime PM. 46 + * @dev: Device whose clock is going to be used for runtime PM. 47 + * @con_id: Connection ID of the clock. 48 + * 49 + * Add the clock represented by @con_id to the list of clocks used for 50 + * the runtime PM of @dev. 51 + */ 52 + int pm_runtime_clk_add(struct device *dev, const char *con_id) 53 + { 54 + struct pm_runtime_clk_data *prd = __to_prd(dev); 55 + struct pm_clock_entry *ce; 56 + 57 + if (!prd) 58 + return -EINVAL; 59 + 60 + ce = kzalloc(sizeof(*ce), GFP_KERNEL); 61 + if (!ce) { 62 + dev_err(dev, "Not enough memory for clock entry.\n"); 63 + return -ENOMEM; 64 + } 65 + 66 + if (con_id) { 67 + ce->con_id = kstrdup(con_id, GFP_KERNEL); 68 + if (!ce->con_id) { 69 + dev_err(dev, 70 + "Not enough memory for clock connection ID.\n"); 71 + kfree(ce); 72 + return -ENOMEM; 73 + } 74 + } 75 + 76 + mutex_lock(&prd->lock); 77 + list_add_tail(&ce->node, &prd->clock_list); 78 + mutex_unlock(&prd->lock); 79 + return 0; 80 + } 81 + 82 + /** 83 + * __pm_runtime_clk_remove - Destroy runtime PM clock entry. 84 + * @ce: Runtime PM clock entry to destroy. 85 + * 86 + * This routine must be called under the mutex protecting the runtime PM list 87 + * of clocks corresponding the the @ce's device. 88 + */ 89 + static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) 90 + { 91 + if (!ce) 92 + return; 93 + 94 + list_del(&ce->node); 95 + 96 + if (ce->status < PCE_STATUS_ERROR) { 97 + if (ce->status == PCE_STATUS_ENABLED) 98 + clk_disable(ce->clk); 99 + 100 + if (ce->status >= PCE_STATUS_ACQUIRED) 101 + clk_put(ce->clk); 102 + } 103 + 104 + if (ce->con_id) 105 + kfree(ce->con_id); 106 + 107 + kfree(ce); 108 + } 109 + 110 + /** 111 + * pm_runtime_clk_remove - Stop using a device clock for runtime PM. 112 + * @dev: Device whose clock should not be used for runtime PM any more. 113 + * @con_id: Connection ID of the clock. 114 + * 115 + * Remove the clock represented by @con_id from the list of clocks used for 116 + * the runtime PM of @dev. 117 + */ 118 + void pm_runtime_clk_remove(struct device *dev, const char *con_id) 119 + { 120 + struct pm_runtime_clk_data *prd = __to_prd(dev); 121 + struct pm_clock_entry *ce; 122 + 123 + if (!prd) 124 + return; 125 + 126 + mutex_lock(&prd->lock); 127 + 128 + list_for_each_entry(ce, &prd->clock_list, node) { 129 + if (!con_id && !ce->con_id) { 130 + __pm_runtime_clk_remove(ce); 131 + break; 132 + } else if (!con_id || !ce->con_id) { 133 + continue; 134 + } else if (!strcmp(con_id, ce->con_id)) { 135 + __pm_runtime_clk_remove(ce); 136 + break; 137 + } 138 + } 139 + 140 + mutex_unlock(&prd->lock); 141 + } 142 + 143 + /** 144 + * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. 145 + * @dev: Device to initialize the list of runtime PM clocks for. 146 + * 147 + * Allocate a struct pm_runtime_clk_data object, initialize its lock member and 148 + * make the @dev's power.subsys_data field point to it. 149 + */ 150 + int pm_runtime_clk_init(struct device *dev) 151 + { 152 + struct pm_runtime_clk_data *prd; 153 + 154 + prd = kzalloc(sizeof(*prd), GFP_KERNEL); 155 + if (!prd) { 156 + dev_err(dev, "Not enough memory fo runtime PM data.\n"); 157 + return -ENOMEM; 158 + } 159 + 160 + INIT_LIST_HEAD(&prd->clock_list); 161 + mutex_init(&prd->lock); 162 + dev->power.subsys_data = prd; 163 + return 0; 164 + } 165 + 166 + /** 167 + * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. 168 + * @dev: Device to destroy the list of runtime PM clocks for. 169 + * 170 + * Clear the @dev's power.subsys_data field, remove the list of clock entries 171 + * from the struct pm_runtime_clk_data object pointed to by it before and free 172 + * that object. 173 + */ 174 + void pm_runtime_clk_destroy(struct device *dev) 175 + { 176 + struct pm_runtime_clk_data *prd = __to_prd(dev); 177 + struct pm_clock_entry *ce, *c; 178 + 179 + if (!prd) 180 + return; 181 + 182 + dev->power.subsys_data = NULL; 183 + 184 + mutex_lock(&prd->lock); 185 + 186 + list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) 187 + __pm_runtime_clk_remove(ce); 188 + 189 + mutex_unlock(&prd->lock); 190 + 191 + kfree(prd); 192 + } 193 + 194 + /** 195 + * pm_runtime_clk_acquire - Acquire a device clock. 196 + * @dev: Device whose clock is to be acquired. 197 + * @con_id: Connection ID of the clock. 198 + */ 199 + static void pm_runtime_clk_acquire(struct device *dev, 200 + struct pm_clock_entry *ce) 201 + { 202 + ce->clk = clk_get(dev, ce->con_id); 203 + if (IS_ERR(ce->clk)) { 204 + ce->status = PCE_STATUS_ERROR; 205 + } else { 206 + ce->status = PCE_STATUS_ACQUIRED; 207 + dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); 208 + } 209 + } 210 + 211 + /** 212 + * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. 213 + * @dev: Device to disable the clocks for. 214 + */ 215 + int pm_runtime_clk_suspend(struct device *dev) 216 + { 217 + struct pm_runtime_clk_data *prd = __to_prd(dev); 218 + struct pm_clock_entry *ce; 219 + 220 + dev_dbg(dev, "%s()\n", __func__); 221 + 222 + if (!prd) 223 + return 0; 224 + 225 + mutex_lock(&prd->lock); 226 + 227 + list_for_each_entry_reverse(ce, &prd->clock_list, node) { 228 + if (ce->status == PCE_STATUS_NONE) 229 + pm_runtime_clk_acquire(dev, ce); 230 + 231 + if (ce->status < PCE_STATUS_ERROR) { 232 + clk_disable(ce->clk); 233 + ce->status = PCE_STATUS_ACQUIRED; 234 + } 235 + } 236 + 237 + mutex_unlock(&prd->lock); 238 + 239 + return 0; 240 + } 241 + 242 + /** 243 + * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. 244 + * @dev: Device to enable the clocks for. 245 + */ 246 + int pm_runtime_clk_resume(struct device *dev) 247 + { 248 + struct pm_runtime_clk_data *prd = __to_prd(dev); 249 + struct pm_clock_entry *ce; 250 + 251 + dev_dbg(dev, "%s()\n", __func__); 252 + 253 + if (!prd) 254 + return 0; 255 + 256 + mutex_lock(&prd->lock); 257 + 258 + list_for_each_entry(ce, &prd->clock_list, node) { 259 + if (ce->status == PCE_STATUS_NONE) 260 + pm_runtime_clk_acquire(dev, ce); 261 + 262 + if (ce->status < PCE_STATUS_ERROR) { 263 + clk_enable(ce->clk); 264 + ce->status = PCE_STATUS_ENABLED; 265 + } 266 + } 267 + 268 + mutex_unlock(&prd->lock); 269 + 270 + return 0; 271 + } 272 + 273 + /** 274 + * pm_runtime_clk_notify - Notify routine for device addition and removal. 275 + * @nb: Notifier block object this function is a member of. 276 + * @action: Operation being carried out by the caller. 277 + * @data: Device the routine is being run for. 278 + * 279 + * For this function to work, @nb must be a member of an object of type 280 + * struct pm_clk_notifier_block containing all of the requisite data. 281 + * Specifically, the pwr_domain member of that object is copied to the device's 282 + * pwr_domain field and its con_ids member is used to populate the device's list 283 + * of runtime PM clocks, depending on @action. 284 + * 285 + * If the device's pwr_domain field is already populated with a value different 286 + * from the one stored in the struct pm_clk_notifier_block object, the function 287 + * does nothing. 288 + */ 289 + static int pm_runtime_clk_notify(struct notifier_block *nb, 290 + unsigned long action, void *data) 291 + { 292 + struct pm_clk_notifier_block *clknb; 293 + struct device *dev = data; 294 + char *con_id; 295 + int error; 296 + 297 + dev_dbg(dev, "%s() %ld\n", __func__, action); 298 + 299 + clknb = container_of(nb, struct pm_clk_notifier_block, nb); 300 + 301 + switch (action) { 302 + case BUS_NOTIFY_ADD_DEVICE: 303 + if (dev->pwr_domain) 304 + break; 305 + 306 + error = pm_runtime_clk_init(dev); 307 + if (error) 308 + break; 309 + 310 + dev->pwr_domain = clknb->pwr_domain; 311 + if (clknb->con_ids[0]) { 312 + for (con_id = clknb->con_ids[0]; *con_id; con_id++) 313 + pm_runtime_clk_add(dev, con_id); 314 + } else { 315 + pm_runtime_clk_add(dev, NULL); 316 + } 317 + 318 + break; 319 + case BUS_NOTIFY_DEL_DEVICE: 320 + if (dev->pwr_domain != clknb->pwr_domain) 321 + break; 322 + 323 + dev->pwr_domain = NULL; 324 + pm_runtime_clk_destroy(dev); 325 + break; 326 + } 327 + 328 + return 0; 329 + } 330 + 331 + #else /* !CONFIG_PM_RUNTIME */ 332 + 333 + /** 334 + * enable_clock - Enable a device clock. 335 + * @dev: Device whose clock is to be enabled. 336 + * @con_id: Connection ID of the clock. 337 + */ 338 + static void enable_clock(struct device *dev, const char *con_id) 339 + { 340 + struct clk *clk; 341 + 342 + clk = clk_get(dev, con_id); 343 + if (!IS_ERR(clk)) { 344 + clk_enable(clk); 345 + clk_put(clk); 346 + dev_info(dev, "Runtime PM disabled, clock forced on.\n"); 347 + } 348 + } 349 + 350 + /** 351 + * disable_clock - Disable a device clock. 352 + * @dev: Device whose clock is to be disabled. 353 + * @con_id: Connection ID of the clock. 354 + */ 355 + static void disable_clock(struct device *dev, const char *con_id) 356 + { 357 + struct clk *clk; 358 + 359 + clk = clk_get(dev, con_id); 360 + if (!IS_ERR(clk)) { 361 + clk_disable(clk); 362 + clk_put(clk); 363 + dev_info(dev, "Runtime PM disabled, clock forced off.\n"); 364 + } 365 + } 366 + 367 + /** 368 + * pm_runtime_clk_notify - Notify routine for device addition and removal. 369 + * @nb: Notifier block object this function is a member of. 370 + * @action: Operation being carried out by the caller. 371 + * @data: Device the routine is being run for. 372 + * 373 + * For this function to work, @nb must be a member of an object of type 374 + * struct pm_clk_notifier_block containing all of the requisite data. 375 + * Specifically, the con_ids member of that object is used to enable or disable 376 + * the device's clocks, depending on @action. 377 + */ 378 + static int pm_runtime_clk_notify(struct notifier_block *nb, 379 + unsigned long action, void *data) 380 + { 381 + struct pm_clk_notifier_block *clknb; 382 + struct device *dev = data; 383 + char *con_id; 384 + 385 + dev_dbg(dev, "%s() %ld\n", __func__, action); 386 + 387 + clknb = container_of(nb, struct pm_clk_notifier_block, nb); 388 + 389 + switch (action) { 390 + case BUS_NOTIFY_ADD_DEVICE: 391 + if (clknb->con_ids[0]) { 392 + for (con_id = clknb->con_ids[0]; *con_id; con_id++) 393 + enable_clock(dev, con_id); 394 + } else { 395 + enable_clock(dev, NULL); 396 + } 397 + break; 398 + case BUS_NOTIFY_DEL_DEVICE: 399 + if (clknb->con_ids[0]) { 400 + for (con_id = clknb->con_ids[0]; *con_id; con_id++) 401 + disable_clock(dev, con_id); 402 + } else { 403 + disable_clock(dev, NULL); 404 + } 405 + break; 406 + } 407 + 408 + return 0; 409 + } 410 + 411 + #endif /* !CONFIG_PM_RUNTIME */ 412 + 413 + /** 414 + * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. 415 + * @bus: Bus type to add the notifier to. 416 + * @clknb: Notifier to be added to the given bus type. 417 + * 418 + * The nb member of @clknb is not expected to be initialized and its 419 + * notifier_call member will be replaced with pm_runtime_clk_notify(). However, 420 + * the remaining members of @clknb should be populated prior to calling this 421 + * routine. 422 + */ 423 + void pm_runtime_clk_add_notifier(struct bus_type *bus, 424 + struct pm_clk_notifier_block *clknb) 425 + { 426 + if (!bus || !clknb) 427 + return; 428 + 429 + clknb->nb.notifier_call = pm_runtime_clk_notify; 430 + bus_register_notifier(bus, &clknb->nb); 431 + }
+31 -33
drivers/base/power/main.c
··· 426 426 427 427 if (dev->pwr_domain) { 428 428 pm_dev_dbg(dev, state, "EARLY power domain "); 429 - pm_noirq_op(dev, &dev->pwr_domain->ops, state); 430 - } 431 - 432 - if (dev->type && dev->type->pm) { 429 + error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 430 + } else if (dev->type && dev->type->pm) { 433 431 pm_dev_dbg(dev, state, "EARLY type "); 434 432 error = pm_noirq_op(dev, dev->type->pm, state); 435 433 } else if (dev->class && dev->class->pm) { ··· 515 517 516 518 if (dev->pwr_domain) { 517 519 pm_dev_dbg(dev, state, "power domain "); 518 - pm_op(dev, &dev->pwr_domain->ops, state); 520 + error = pm_op(dev, &dev->pwr_domain->ops, state); 521 + goto End; 519 522 } 520 523 521 524 if (dev->type && dev->type->pm) { ··· 628 629 { 629 630 device_lock(dev); 630 631 631 - if (dev->pwr_domain && dev->pwr_domain->ops.complete) { 632 + if (dev->pwr_domain) { 632 633 pm_dev_dbg(dev, state, "completing power domain "); 633 - dev->pwr_domain->ops.complete(dev); 634 - } 635 - 636 - if (dev->type && dev->type->pm) { 634 + if (dev->pwr_domain->ops.complete) 635 + dev->pwr_domain->ops.complete(dev); 636 + } else if (dev->type && dev->type->pm) { 637 637 pm_dev_dbg(dev, state, "completing type "); 638 638 if (dev->type->pm->complete) 639 639 dev->type->pm->complete(dev); ··· 730 732 { 731 733 int error; 732 734 733 - if (dev->type && dev->type->pm) { 735 + if (dev->pwr_domain) { 736 + pm_dev_dbg(dev, state, "LATE power domain "); 737 + error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 738 + if (error) 739 + return error; 740 + } else if (dev->type && dev->type->pm) { 734 741 pm_dev_dbg(dev, state, "LATE type "); 735 742 error = pm_noirq_op(dev, dev->type->pm, state); 736 743 if (error) ··· 750 747 error = pm_noirq_op(dev, dev->bus->pm, state); 751 748 if (error) 752 749 return error; 753 - } 754 - 755 - if (dev->pwr_domain) { 756 - pm_dev_dbg(dev, state, "LATE power domain "); 757 - pm_noirq_op(dev, &dev->pwr_domain->ops, state); 758 750 } 759 751 760 752 return 0; ··· 839 841 goto End; 840 842 } 841 843 844 + if (dev->pwr_domain) { 845 + pm_dev_dbg(dev, state, "power domain "); 846 + error = pm_op(dev, &dev->pwr_domain->ops, state); 847 + goto End; 848 + } 849 + 842 850 if (dev->type && dev->type->pm) { 843 851 pm_dev_dbg(dev, state, "type "); 844 852 error = pm_op(dev, dev->type->pm, state); 845 - goto Domain; 853 + goto End; 846 854 } 847 855 848 856 if (dev->class) { 849 857 if (dev->class->pm) { 850 858 pm_dev_dbg(dev, state, "class "); 851 859 error = pm_op(dev, dev->class->pm, state); 852 - goto Domain; 860 + goto End; 853 861 } else if (dev->class->suspend) { 854 862 pm_dev_dbg(dev, state, "legacy class "); 855 863 error = legacy_suspend(dev, state, dev->class->suspend); 856 - goto Domain; 864 + goto End; 857 865 } 858 866 } 859 867 ··· 871 867 pm_dev_dbg(dev, state, "legacy "); 872 868 error = legacy_suspend(dev, state, dev->bus->suspend); 873 869 } 874 - } 875 - 876 - Domain: 877 - if (!error && dev->pwr_domain) { 878 - pm_dev_dbg(dev, state, "power domain "); 879 - pm_op(dev, &dev->pwr_domain->ops, state); 880 870 } 881 871 882 872 End: ··· 963 965 964 966 device_lock(dev); 965 967 966 - if (dev->type && dev->type->pm) { 968 + if (dev->pwr_domain) { 969 + pm_dev_dbg(dev, state, "preparing power domain "); 970 + if (dev->pwr_domain->ops.prepare) 971 + error = dev->pwr_domain->ops.prepare(dev); 972 + suspend_report_result(dev->pwr_domain->ops.prepare, error); 973 + if (error) 974 + goto End; 975 + } else if (dev->type && dev->type->pm) { 967 976 pm_dev_dbg(dev, state, "preparing type "); 968 977 if (dev->type->pm->prepare) 969 978 error = dev->type->pm->prepare(dev); ··· 989 984 if (dev->bus->pm->prepare) 990 985 error = dev->bus->pm->prepare(dev); 991 986 suspend_report_result(dev->bus->pm->prepare, error); 992 - if (error) 993 - goto End; 994 - } 995 - 996 - if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { 997 - pm_dev_dbg(dev, state, "preparing power domain "); 998 - dev->pwr_domain->ops.prepare(dev); 999 987 } 1000 988 1001 989 End:
+10 -19
drivers/base/power/runtime.c
··· 168 168 static int rpm_idle(struct device *dev, int rpmflags) 169 169 { 170 170 int (*callback)(struct device *); 171 - int (*domain_callback)(struct device *); 172 171 int retval; 173 172 174 173 retval = rpm_check_suspend_allowed(dev); ··· 213 214 214 215 dev->power.idle_notification = true; 215 216 216 - if (dev->type && dev->type->pm) 217 + if (dev->pwr_domain) 218 + callback = dev->pwr_domain->ops.runtime_idle; 219 + else if (dev->type && dev->type->pm) 217 220 callback = dev->type->pm->runtime_idle; 218 221 else if (dev->class && dev->class->pm) 219 222 callback = dev->class->pm->runtime_idle; ··· 224 223 else 225 224 callback = NULL; 226 225 227 - if (dev->pwr_domain) 228 - domain_callback = dev->pwr_domain->ops.runtime_idle; 229 - else 230 - domain_callback = NULL; 231 - 232 - if (callback || domain_callback) { 226 + if (callback) { 233 227 spin_unlock_irq(&dev->power.lock); 234 228 235 - if (domain_callback) 236 - retval = domain_callback(dev); 237 - 238 - if (!retval && callback) 239 - callback(dev); 229 + callback(dev); 240 230 241 231 spin_lock_irq(&dev->power.lock); 242 232 } ··· 374 382 375 383 __update_runtime_status(dev, RPM_SUSPENDING); 376 384 377 - if (dev->type && dev->type->pm) 385 + if (dev->pwr_domain) 386 + callback = dev->pwr_domain->ops.runtime_suspend; 387 + else if (dev->type && dev->type->pm) 378 388 callback = dev->type->pm->runtime_suspend; 379 389 else if (dev->class && dev->class->pm) 380 390 callback = dev->class->pm->runtime_suspend; ··· 394 400 else 395 401 pm_runtime_cancel_pending(dev); 396 402 } else { 397 - if (dev->pwr_domain) 398 - rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); 399 403 no_callback: 400 404 __update_runtime_status(dev, RPM_SUSPENDED); 401 405 pm_runtime_deactivate_timer(dev); ··· 574 582 __update_runtime_status(dev, RPM_RESUMING); 575 583 576 584 if (dev->pwr_domain) 577 - rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); 578 - 579 - if (dev->type && dev->type->pm) 585 + callback = dev->pwr_domain->ops.runtime_resume; 586 + else if (dev->type && dev->type->pm) 580 587 callback = dev->type->pm->runtime_resume; 581 588 else if (dev->class && dev->class->pm) 582 589 callback = dev->class->pm->runtime_resume;
+60 -3
include/linux/platform_device.h
··· 150 150 struct resource *res, unsigned int n_res, 151 151 const void *data, size_t size); 152 152 153 - extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); 154 - extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); 155 - 156 153 /* early platform driver interface */ 157 154 struct early_platform_driver { 158 155 const char *class_str; ··· 201 204 return bufsiz ? buf : NULL; \ 202 205 } 203 206 #endif /* MODULE */ 207 + 208 + #ifdef CONFIG_PM_SLEEP 209 + extern int platform_pm_prepare(struct device *dev); 210 + extern void platform_pm_complete(struct device *dev); 211 + #else 212 + #define platform_pm_prepare NULL 213 + #define platform_pm_complete NULL 214 + #endif 215 + 216 + #ifdef CONFIG_SUSPEND 217 + extern int platform_pm_suspend(struct device *dev); 218 + extern int platform_pm_suspend_noirq(struct device *dev); 219 + extern int platform_pm_resume(struct device *dev); 220 + extern int platform_pm_resume_noirq(struct device *dev); 221 + #else 222 + #define platform_pm_suspend NULL 223 + #define platform_pm_resume NULL 224 + #define platform_pm_suspend_noirq NULL 225 + #define platform_pm_resume_noirq NULL 226 + #endif 227 + 228 + #ifdef CONFIG_HIBERNATE_CALLBACKS 229 + extern int platform_pm_freeze(struct device *dev); 230 + extern int platform_pm_freeze_noirq(struct device *dev); 231 + extern int platform_pm_thaw(struct device *dev); 232 + extern int platform_pm_thaw_noirq(struct device *dev); 233 + extern int platform_pm_poweroff(struct device *dev); 234 + extern int platform_pm_poweroff_noirq(struct device *dev); 235 + extern int platform_pm_restore(struct device *dev); 236 + extern int platform_pm_restore_noirq(struct device *dev); 237 + #else 238 + #define platform_pm_freeze NULL 239 + #define platform_pm_thaw NULL 240 + #define platform_pm_poweroff NULL 241 + #define platform_pm_restore NULL 242 + #define platform_pm_freeze_noirq NULL 243 + #define platform_pm_thaw_noirq NULL 244 + #define platform_pm_poweroff_noirq NULL 245 + #define platform_pm_restore_noirq NULL 246 + #endif 247 + 248 + #ifdef CONFIG_PM_SLEEP 249 + #define USE_PLATFORM_PM_SLEEP_OPS \ 250 + .prepare = platform_pm_prepare, \ 251 + .complete = platform_pm_complete, \ 252 + .suspend = platform_pm_suspend, \ 253 + .resume = platform_pm_resume, \ 254 + .freeze = platform_pm_freeze, \ 255 + .thaw = platform_pm_thaw, \ 256 + .poweroff = platform_pm_poweroff, \ 257 + .restore = platform_pm_restore, \ 258 + .suspend_noirq = platform_pm_suspend_noirq, \ 259 + .resume_noirq = platform_pm_resume_noirq, \ 260 + .freeze_noirq = platform_pm_freeze_noirq, \ 261 + .thaw_noirq = platform_pm_thaw_noirq, \ 262 + .poweroff_noirq = platform_pm_poweroff_noirq, \ 263 + .restore_noirq = platform_pm_restore_noirq, 264 + #else 265 + #define USE_PLATFORM_PM_SLEEP_OPS 266 + #endif 204 267 205 268 #endif /* _PLATFORM_DEVICE_H_ */
+1
include/linux/pm.h
··· 460 460 unsigned long active_jiffies; 461 461 unsigned long suspended_jiffies; 462 462 unsigned long accounting_timestamp; 463 + void *subsys_data; /* Owned by the subsystem. */ 463 464 #endif 464 465 }; 465 466
+42
include/linux/pm_runtime.h
··· 245 245 __pm_runtime_use_autosuspend(dev, false); 246 246 } 247 247 248 + struct pm_clk_notifier_block { 249 + struct notifier_block nb; 250 + struct dev_power_domain *pwr_domain; 251 + char *con_ids[]; 252 + }; 253 + 254 + #ifdef CONFIG_PM_RUNTIME_CLK 255 + extern int pm_runtime_clk_init(struct device *dev); 256 + extern void pm_runtime_clk_destroy(struct device *dev); 257 + extern int pm_runtime_clk_add(struct device *dev, const char *con_id); 258 + extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); 259 + extern int pm_runtime_clk_suspend(struct device *dev); 260 + extern int pm_runtime_clk_resume(struct device *dev); 261 + #else 262 + static inline int pm_runtime_clk_init(struct device *dev) 263 + { 264 + return -EINVAL; 265 + } 266 + static inline void pm_runtime_clk_destroy(struct device *dev) 267 + { 268 + } 269 + static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) 270 + { 271 + return -EINVAL; 272 + } 273 + static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) 274 + { 275 + } 276 + #define pm_runtime_clock_suspend NULL 277 + #define pm_runtime_clock_resume NULL 278 + #endif 279 + 280 + #ifdef CONFIG_HAVE_CLK 281 + extern void pm_runtime_clk_add_notifier(struct bus_type *bus, 282 + struct pm_clk_notifier_block *clknb); 283 + #else 284 + static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, 285 + struct pm_clk_notifier_block *clknb) 286 + { 287 + } 288 + #endif 289 + 248 290 #endif
+4
kernel/power/Kconfig
··· 229 229 representing individual voltage domains and provides SOC 230 230 implementations a ready to use framework to manage OPPs. 231 231 For more information, read <file:Documentation/power/opp.txt> 232 + 233 + config PM_RUNTIME_CLK 234 + def_bool y 235 + depends on PM_RUNTIME && HAVE_CLK