Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

PM / Domains: Allow genpd to power on during system PM phases

If a PM domain is powered off when the first device starts its system PM
prepare phase, genpd prevents any further attempts to power on the PM
domain during the following system PM phases. Not until the system PM
complete phase is finalized for all devices in the PM domain, genpd again
allows it to be powered on.

This behaviour needs to be changed, as a subsystem/driver for a device in
the same PM domain may still need to be able to serve requests in some of
the system PM phases. Accordingly, it may need to runtime resume its
device and thus also request the corresponding PM domain to be powered on.

To deal with these scenarios, let's make the device operational in the
system PM prepare phase by runtime resuming it, no matter if the PM domain
is powered on or off. Changing this also enables us to remove genpd's
suspend_power_off flag, as it's being used to track this condition.
Additionally, we must allow the PM domain to be powered on via runtime PM
during the system PM phases.

This change also requires a fix in the AMD ACP (Audio CoProcessor) drm
driver. It registers a genpd to model the ACP as a PM domain, but
unfortunately it's also abuses genpd's "internal" suspend_power_off flag
to deal with a corner case at system PM resume.

More precisely, the so called SMU block powers on the ACP at system PM
resume, unconditionally if it's being used or not. This may lead to that
genpd's internal status of the power state, may not correctly reflect the
power state of the HW after a system PM resume.

Because of changing the behaviour of genpd, by runtime resuming devices in
the prepare phase, the AMD ACP drm driver no longer have to deal with this
corner case. So let's just drop the related code in this driver.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Reviewed-by: Kevin Hilman <khilman@baylibre.com>
Acked-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Ulf Hansson and committed by
Rafael J. Wysocki
39dd0f23 5edb5649

+31 -77
+31 -53
drivers/base/power/domain.c
··· 187 187 struct gpd_link *link; 188 188 int ret = 0; 189 189 190 - if (genpd->status == GPD_STATE_ACTIVE 191 - || (genpd->prepared_count > 0 && genpd->suspend_power_off)) 190 + if (genpd->status == GPD_STATE_ACTIVE) 192 191 return 0; 193 192 194 193 /* ··· 734 735 735 736 mutex_lock(&genpd->lock); 736 737 737 - if (genpd->prepared_count++ == 0) { 738 + if (genpd->prepared_count++ == 0) 738 739 genpd->suspended_count = 0; 739 - genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 740 - } 741 740 742 741 mutex_unlock(&genpd->lock); 743 742 744 - if (genpd->suspend_power_off) 745 - return 0; 746 - 747 743 /* 748 - * The PM domain must be in the GPD_STATE_ACTIVE state at this point, 749 - * so genpd_poweron() will return immediately, but if the device 750 - * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need 751 - * to make it operational. 744 + * Even if the PM domain is powered off at this point, we can't expect 745 + * it to remain in that state during the entire system PM suspend 746 + * phase. Any subsystem/driver for a device in the PM domain, may still 747 + * need to serve a request which may require the device to be runtime 748 + * resumed and its PM domain to be powered. 749 + * 750 + * As we are disabling runtime PM at this point, we are preventing the 751 + * subsystem/driver to decide themselves. For that reason, we need to 752 + * make sure the device is operational as it may be required in some 753 + * cases. 752 754 */ 753 755 pm_runtime_resume(dev); 754 756 __pm_runtime_disable(dev, false); ··· 758 758 if (ret) { 759 759 mutex_lock(&genpd->lock); 760 760 761 - if (--genpd->prepared_count == 0) 762 - genpd->suspend_power_off = false; 761 + genpd->prepared_count--; 763 762 764 763 mutex_unlock(&genpd->lock); 765 764 pm_runtime_enable(dev); ··· 785 786 if (IS_ERR(genpd)) 786 787 return -EINVAL; 787 788 788 - return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); 789 + return pm_generic_suspend(dev); 789 790 } 790 791 791 792 /** ··· 806 807 if (IS_ERR(genpd)) 807 808 return -EINVAL; 808 809 809 - return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev); 810 + return pm_generic_suspend_late(dev); 810 811 } 811 812 812 813 /** ··· 826 827 if (IS_ERR(genpd)) 827 828 return -EINVAL; 828 829 829 - if (genpd->suspend_power_off 830 - || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 830 + if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) 831 831 return 0; 832 832 833 833 genpd_stop_dev(genpd, dev); ··· 858 860 if (IS_ERR(genpd)) 859 861 return -EINVAL; 860 862 861 - if (genpd->suspend_power_off 862 - || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) 863 + if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) 863 864 return 0; 864 865 865 866 /* ··· 891 894 if (IS_ERR(genpd)) 892 895 return -EINVAL; 893 896 894 - return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev); 897 + return pm_generic_resume_early(dev); 895 898 } 896 899 897 900 /** ··· 912 915 if (IS_ERR(genpd)) 913 916 return -EINVAL; 914 917 915 - return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); 918 + return pm_generic_resume(dev); 916 919 } 917 920 918 921 /** ··· 933 936 if (IS_ERR(genpd)) 934 937 return -EINVAL; 935 938 936 - return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); 939 + return pm_generic_freeze(dev); 937 940 } 938 941 939 942 /** ··· 955 958 if (IS_ERR(genpd)) 956 959 return -EINVAL; 957 960 958 - return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev); 961 + return pm_generic_freeze_late(dev); 959 962 } 960 963 961 964 /** ··· 977 980 if (IS_ERR(genpd)) 978 981 return -EINVAL; 979 982 980 - return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); 983 + return genpd_stop_dev(genpd, dev); 981 984 } 982 985 983 986 /** ··· 997 1000 if (IS_ERR(genpd)) 998 1001 return -EINVAL; 999 1002 1000 - return genpd->suspend_power_off ? 1001 - 0 : genpd_start_dev(genpd, dev); 1003 + return genpd_start_dev(genpd, dev); 1002 1004 } 1003 1005 1004 1006 /** ··· 1019 1023 if (IS_ERR(genpd)) 1020 1024 return -EINVAL; 1021 1025 1022 - return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev); 1026 + return pm_generic_thaw_early(dev); 1023 1027 } 1024 1028 1025 1029 /** ··· 1040 1044 if (IS_ERR(genpd)) 1041 1045 return -EINVAL; 1042 1046 1043 - return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); 1047 + return pm_generic_thaw(dev); 1044 1048 } 1045 1049 1046 1050 /** ··· 1068 1072 * At this point suspended_count == 0 means we are being run for the 1069 1073 * first time for the given domain in the present cycle. 1070 1074 */ 1071 - if (genpd->suspended_count++ == 0) { 1075 + if (genpd->suspended_count++ == 0) 1072 1076 /* 1073 1077 * The boot kernel might put the domain into arbitrary state, 1074 1078 * so make it appear as powered off to pm_genpd_sync_poweron(), 1075 1079 * so that it tries to power it on in case it was really off. 1076 1080 */ 1077 1081 genpd->status = GPD_STATE_POWER_OFF; 1078 - if (genpd->suspend_power_off) { 1079 - /* 1080 - * If the domain was off before the hibernation, make 1081 - * sure it will be off going forward. 1082 - */ 1083 - genpd_power_off(genpd, true); 1084 - 1085 - return 0; 1086 - } 1087 - } 1088 - 1089 - if (genpd->suspend_power_off) 1090 - return 0; 1091 1082 1092 1083 pm_genpd_sync_poweron(genpd, true); 1093 1084 ··· 1093 1110 static void pm_genpd_complete(struct device *dev) 1094 1111 { 1095 1112 struct generic_pm_domain *genpd; 1096 - bool run_complete; 1097 1113 1098 1114 dev_dbg(dev, "%s()\n", __func__); 1099 1115 ··· 1102 1120 1103 1121 mutex_lock(&genpd->lock); 1104 1122 1105 - run_complete = !genpd->suspend_power_off; 1106 - if (--genpd->prepared_count == 0) 1107 - genpd->suspend_power_off = false; 1123 + genpd->prepared_count--; 1108 1124 1109 1125 mutex_unlock(&genpd->lock); 1110 1126 1111 - if (run_complete) { 1112 - pm_generic_complete(dev); 1113 - pm_runtime_set_active(dev); 1114 - pm_runtime_enable(dev); 1115 - pm_request_idle(dev); 1116 - } 1127 + pm_generic_complete(dev); 1128 + pm_runtime_set_active(dev); 1129 + pm_runtime_enable(dev); 1130 + pm_request_idle(dev); 1117 1131 } 1118 1132 1119 1133 /**
-23
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 421 421 422 422 static int acp_resume(void *handle) 423 423 { 424 - int i, ret; 425 - struct acp_pm_domain *apd; 426 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 427 - 428 - /* return early if no ACP */ 429 - if (!adev->acp.acp_genpd) 430 - return 0; 431 - 432 - /* SMU block will power on ACP irrespective of ACP runtime status. 433 - * Power off explicitly based on genpd ACP runtime status so that ACP 434 - * hw and ACP-genpd status are in sync. 435 - * 'suspend_power_off' represents "Power status before system suspend" 436 - */ 437 - if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { 438 - apd = container_of(&adev->acp.acp_genpd->gpd, 439 - struct acp_pm_domain, gpd); 440 - 441 - for (i = 4; i >= 0 ; i--) { 442 - ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); 443 - if (ret) 444 - pr_err("ACP tile %d tile suspend failed\n", i); 445 - } 446 - } 447 424 return 0; 448 425 } 449 426
-1
include/linux/pm_domain.h
··· 57 57 unsigned int device_count; /* Number of devices */ 58 58 unsigned int suspended_count; /* System suspend device counter */ 59 59 unsigned int prepared_count; /* Suspend counter of prepared devices */ 60 - bool suspend_power_off; /* Power status before system suspend */ 61 60 int (*power_off)(struct generic_pm_domain *domain); 62 61 int (*power_on)(struct generic_pm_domain *domain); 63 62 struct gpd_dev_ops dev_ops;