Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pmdomain-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm

Pull pmdomain updates from Ulf Hansson:
"Core:
- Log a message when unused PM domains gets disabled
- Scale down parent/child performance states in the reverse order

Providers:
- qcom: rpmpd: Add power domains support for MSM8974, MSM8974PRO,
PMA8084 and PM8841
- renesas: rcar-gen4-sysc: Reduce atomic delays
- renesas: rcar-sysc: Adjust the waiting time to cover the worst case
- renesas: r8a779h0-sysc: Add support for the r8a779h0 PM domains
- imx: imx8mp-blk-ctrl: Add the fdcc clock to the hdmimix domains
- imx: imx8mp-blk-ctrl: Error out if domains are missing in DT

Improve support for multiple PM domains:
- Add two helper functions to attach/detach multiple PM domains
- Convert a couple of drivers to use the new helper functions"

* tag 'pmdomain-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm: (22 commits)
pmdomain: renesas: rcar-gen4-sysc: Reduce atomic delays
pmdomain: renesas: Adjust the waiting time to cover the worst case
pmdomain: qcom: rpmpd: Add MSM8974PRO+PMA8084 power domains
pmdomain: qcom: rpmpd: Add MSM8974+PM8841 power domains
pmdomain: core: constify of_phandle_args in add device and subdomain
pmdomain: core: constify of_phandle_args in xlate
media: venus: Convert to dev_pm_domain_attach|detach_list() for vcodec
remoteproc: qcom_q6v5_adsp: Convert to dev_pm_domain_attach|detach_list()
remoteproc: imx_rproc: Convert to dev_pm_domain_attach|detach_list()
remoteproc: imx_dsp_rproc: Convert to dev_pm_domain_attach|detach_list()
PM: domains: Add helper functions to attach/detach multiple PM domains
pmdomain: imx8mp-blk-ctrl: imx8mp_blk: Add fdcc clock to hdmimix domain
pmdomain: mediatek: Use devm_platform_ioremap_resource() in init_scp()
pmdomain: renesas: r8a779h0-sysc: Add r8a779h0 support
pmdomain: imx8mp-blk-ctrl: Error out if domains are missing in DT
pmdomain: ti: Add a null pointer check to the omap_prm_domain_init
pmdomain: renesas: rcar-gen4-sysc: Remove unneeded includes
pmdomain: core: Print a message when unused power domains are disabled
pmdomain: qcom: rpmpd: Keep one RPM handle for all RPMPDs
pmdomain: core: Scale down parent/child performance states in reverse order
...

+598 -413
+2
Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
··· 24 24 - qcom,msm8917-rpmpd 25 25 - qcom,msm8939-rpmpd 26 26 - qcom,msm8953-rpmpd 27 + - qcom,msm8974-rpmpd 28 + - qcom,msm8974pro-pma8084-rpmpd 27 29 - qcom,msm8976-rpmpd 28 30 - qcom,msm8994-rpmpd 29 31 - qcom,msm8996-rpmpd
+14 -8
Documentation/devicetree/bindings/soc/imx/fsl,imx8mp-hdmi-blk-ctrl.yaml
··· 27 27 const: 1 28 28 29 29 power-domains: 30 - minItems: 8 31 - maxItems: 8 30 + minItems: 10 31 + maxItems: 10 32 32 33 33 power-domain-names: 34 34 items: ··· 40 40 - const: trng 41 41 - const: hdmi-tx 42 42 - const: hdmi-tx-phy 43 + - const: hdcp 44 + - const: hrv 43 45 44 46 clocks: 45 - minItems: 4 46 - maxItems: 4 47 + minItems: 5 48 + maxItems: 5 47 49 48 50 clock-names: 49 51 items: ··· 53 51 - const: axi 54 52 - const: ref_266m 55 53 - const: ref_24m 54 + - const: fdcc 56 55 57 56 interconnects: 58 57 maxItems: 3 ··· 85 82 clocks = <&clk IMX8MP_CLK_HDMI_APB>, 86 83 <&clk IMX8MP_CLK_HDMI_ROOT>, 87 84 <&clk IMX8MP_CLK_HDMI_REF_266M>, 88 - <&clk IMX8MP_CLK_HDMI_24M>; 89 - clock-names = "apb", "axi", "ref_266m", "ref_24m"; 85 + <&clk IMX8MP_CLK_HDMI_24M>, 86 + <&clk IMX8MP_CLK_HDMI_FDCC_TST>; 87 + clock-names = "apb", "axi", "ref_266m", "ref_24m", "fdcc"; 90 88 power-domains = <&pgc_hdmimix>, <&pgc_hdmimix>, <&pgc_hdmimix>, 91 89 <&pgc_hdmimix>, <&pgc_hdmimix>, <&pgc_hdmimix>, 92 - <&pgc_hdmimix>, <&pgc_hdmi_phy>; 90 + <&pgc_hdmimix>, <&pgc_hdmi_phy>, 91 + <&pgc_hdmimix>, <&pgc_hdmimix>; 93 92 power-domain-names = "bus", "irqsteer", "lcdif", "pai", "pvi", "trng", 94 - "hdmi-tx", "hdmi-tx-phy"; 93 + "hdmi-tx", "hdmi-tx-phy", 94 + "hdcp", "hrv"; 95 95 #power-domain-cells = <1>; 96 96 };
+134
drivers/base/power/common.c
··· 168 168 EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name); 169 169 170 170 /** 171 + * dev_pm_domain_attach_list - Associate a device with its PM domains. 172 + * @dev: The device used to lookup the PM domains for. 173 + * @data: The data used for attaching to the PM domains. 174 + * @list: An out-parameter with an allocated list of attached PM domains. 175 + * 176 + * This function helps to attach a device to its multiple PM domains. The 177 + * caller, which is typically a driver's probe function, may provide a list of 178 + * names for the PM domains that we should try to attach the device to, but it 179 + * may also provide an empty list, in case the attach should be done for all of 180 + * the available PM domains. 181 + * 182 + * Callers must ensure proper synchronization of this function with power 183 + * management callbacks. 184 + * 185 + * Returns the number of attached PM domains or a negative error code in case of 186 + * a failure. Note that, to detach the list of PM domains, the driver shall call 187 + * dev_pm_domain_detach_list(), typically during the remove phase. 188 + */ 189 + int dev_pm_domain_attach_list(struct device *dev, 190 + const struct dev_pm_domain_attach_data *data, 191 + struct dev_pm_domain_list **list) 192 + { 193 + struct device_node *np = dev->of_node; 194 + struct dev_pm_domain_list *pds; 195 + struct device *pd_dev = NULL; 196 + int ret, i, num_pds = 0; 197 + bool by_id = true; 198 + u32 pd_flags = data ? data->pd_flags : 0; 199 + u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 : 200 + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; 201 + 202 + if (dev->pm_domain) 203 + return -EEXIST; 204 + 205 + /* For now this is limited to OF based platforms. */ 206 + if (!np) 207 + return 0; 208 + 209 + if (data && data->pd_names) { 210 + num_pds = data->num_pd_names; 211 + by_id = false; 212 + } else { 213 + num_pds = of_count_phandle_with_args(np, "power-domains", 214 + "#power-domain-cells"); 215 + } 216 + 217 + if (num_pds <= 0) 218 + return 0; 219 + 220 + pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL); 221 + if (!pds) 222 + return -ENOMEM; 223 + 224 + pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs), 225 + GFP_KERNEL); 226 + if (!pds->pd_devs) 227 + return -ENOMEM; 228 + 229 + pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links), 230 + GFP_KERNEL); 231 + if (!pds->pd_links) 232 + return -ENOMEM; 233 + 234 + if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON) 235 + link_flags |= DL_FLAG_RPM_ACTIVE; 236 + 237 + for (i = 0; i < num_pds; i++) { 238 + if (by_id) 239 + pd_dev = dev_pm_domain_attach_by_id(dev, i); 240 + else 241 + pd_dev = dev_pm_domain_attach_by_name(dev, 242 + data->pd_names[i]); 243 + if (IS_ERR_OR_NULL(pd_dev)) { 244 + ret = pd_dev ? PTR_ERR(pd_dev) : -ENODEV; 245 + goto err_attach; 246 + } 247 + 248 + if (link_flags) { 249 + struct device_link *link; 250 + 251 + link = device_link_add(dev, pd_dev, link_flags); 252 + if (!link) { 253 + ret = -ENODEV; 254 + goto err_link; 255 + } 256 + 257 + pds->pd_links[i] = link; 258 + } 259 + 260 + pds->pd_devs[i] = pd_dev; 261 + } 262 + 263 + pds->num_pds = num_pds; 264 + *list = pds; 265 + return num_pds; 266 + 267 + err_link: 268 + dev_pm_domain_detach(pd_dev, true); 269 + err_attach: 270 + while (--i >= 0) { 271 + if (pds->pd_links[i]) 272 + device_link_del(pds->pd_links[i]); 273 + dev_pm_domain_detach(pds->pd_devs[i], true); 274 + } 275 + return ret; 276 + } 277 + EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list); 278 + 279 + /** 171 280 * dev_pm_domain_detach - Detach a device from its PM domain. 172 281 * @dev: Device to detach. 173 282 * @power_off: Used to indicate whether we should power off the device. ··· 295 186 dev->pm_domain->detach(dev, power_off); 296 187 } 297 188 EXPORT_SYMBOL_GPL(dev_pm_domain_detach); 189 + 190 + /** 191 + * dev_pm_domain_detach_list - Detach a list of PM domains. 192 + * @list: The list of PM domains to detach. 193 + * 194 + * This function reverse the actions from dev_pm_domain_attach_list(). 195 + * Typically it should be invoked during the remove phase from drivers. 196 + * 197 + * Callers must ensure proper synchronization of this function with power 198 + * management callbacks. 199 + */ 200 + void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) 201 + { 202 + int i; 203 + 204 + if (!list) 205 + return; 206 + 207 + for (i = 0; i < list->num_pds; i++) { 208 + if (list->pd_links[i]) 209 + device_link_del(list->pd_links[i]); 210 + dev_pm_domain_detach(list->pd_devs[i], true); 211 + } 212 + } 213 + EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list); 298 214 299 215 /** 300 216 * dev_pm_domain_start - Start the device through its PM domain.
+7 -5
drivers/media/platform/qcom/venus/core.c
··· 16 16 #include <linux/platform_device.h> 17 17 #include <linux/slab.h> 18 18 #include <linux/types.h> 19 + #include <linux/pm_domain.h> 19 20 #include <linux/pm_runtime.h> 20 21 #include <media/videobuf2-v4l2.h> 21 22 #include <media/v4l2-mem2mem.h> ··· 115 114 pm_runtime_put_sync(core->dev); 116 115 117 116 for (i = 0; i < max_attempts; i++) { 118 - if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0])) 117 + if (!core->pmdomains || 118 + !pm_runtime_active(core->pmdomains->pd_devs[0])) 119 119 break; 120 120 usleep_range(1000, 1500); 121 121 } ··· 707 705 .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" }, 708 706 .vcodec1_clks = { "vcodec1_core", "vcodec1_bus" }, 709 707 .vcodec_clks_num = 2, 710 - .vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" }, 708 + .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0", "vcodec1" }, 711 709 .vcodec_pmdomains_num = 3, 712 710 .opp_pmdomain = (const char *[]) { "cx", NULL }, 713 711 .vcodec_num = 2, ··· 756 754 .clks_num = 3, 757 755 .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" }, 758 756 .vcodec_clks_num = 2, 759 - .vcodec_pmdomains = { "venus", "vcodec0" }, 757 + .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" }, 760 758 .vcodec_pmdomains_num = 2, 761 759 .opp_pmdomain = (const char *[]) { "cx", NULL }, 762 760 .vcodec_num = 1, ··· 813 811 .resets_num = 2, 814 812 .vcodec0_clks = { "vcodec0_core" }, 815 813 .vcodec_clks_num = 1, 816 - .vcodec_pmdomains = { "venus", "vcodec0" }, 814 + .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" }, 817 815 .vcodec_pmdomains_num = 2, 818 816 .opp_pmdomain = (const char *[]) { "mx", NULL }, 819 817 .vcodec_num = 1, ··· 872 870 .clks_num = 3, 873 871 .vcodec0_clks = {"vcodec_core", "vcodec_bus"}, 874 872 .vcodec_clks_num = 2, 875 - .vcodec_pmdomains = { "venus", "vcodec0" }, 873 + .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" }, 876 874 .vcodec_pmdomains_num = 2, 877 875 .opp_pmdomain = (const char *[]) { "cx", NULL }, 878 876 .vcodec_num = 1,
+3 -4
drivers/media/platform/qcom/venus/core.h
··· 25 25 26 26 #define VIDC_CLKS_NUM_MAX 4 27 27 #define VIDC_VCODEC_CLKS_NUM_MAX 2 28 - #define VIDC_PMDOMAINS_NUM_MAX 3 29 28 #define VIDC_RESETS_NUM_MAX 2 30 29 31 30 extern int venus_fw_debug; ··· 71 72 const char * const vcodec0_clks[VIDC_VCODEC_CLKS_NUM_MAX]; 72 73 const char * const vcodec1_clks[VIDC_VCODEC_CLKS_NUM_MAX]; 73 74 unsigned int vcodec_clks_num; 74 - const char * const vcodec_pmdomains[VIDC_PMDOMAINS_NUM_MAX]; 75 + const char **vcodec_pmdomains; 75 76 unsigned int vcodec_pmdomains_num; 76 77 const char **opp_pmdomain; 77 78 unsigned int vcodec_num; ··· 133 134 * @video_path: an interconnect handle to video to/from memory path 134 135 * @cpucfg_path: an interconnect handle to cpu configuration path 135 136 * @has_opp_table: does OPP table exist 136 - * @pmdomains: an array of pmdomains struct device pointers 137 + * @pmdomains: a pointer to a list of pmdomains 137 138 * @opp_dl_venus: an device-link for device OPP 138 139 * @opp_pmdomain: an OPP power-domain 139 140 * @resets: an array of reset signals ··· 186 187 struct icc_path *video_path; 187 188 struct icc_path *cpucfg_path; 188 189 bool has_opp_table; 189 - struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX]; 190 + struct dev_pm_domain_list *pmdomains; 190 191 struct device_link *opp_dl_venus; 191 192 struct device *opp_pmdomain; 192 193 struct reset_control *resets[VIDC_RESETS_NUM_MAX];
+16 -32
drivers/media/platform/qcom/venus/pm_helpers.c
··· 455 455 if (ret) 456 456 return ret; 457 457 458 - ret = pm_runtime_put_sync(core->pmdomains[1]); 458 + ret = pm_runtime_put_sync(core->pmdomains->pd_devs[1]); 459 459 if (ret < 0) 460 460 return ret; 461 461 } ··· 471 471 if (ret) 472 472 return ret; 473 473 474 - ret = pm_runtime_put_sync(core->pmdomains[2]); 474 + ret = pm_runtime_put_sync(core->pmdomains->pd_devs[2]); 475 475 if (ret < 0) 476 476 return ret; 477 477 } ··· 484 484 int ret; 485 485 486 486 if (coreid_mask & VIDC_CORE_ID_1) { 487 - ret = pm_runtime_get_sync(core->pmdomains[1]); 487 + ret = pm_runtime_get_sync(core->pmdomains->pd_devs[1]); 488 488 if (ret < 0) 489 489 return ret; 490 490 ··· 502 502 } 503 503 504 504 if (coreid_mask & VIDC_CORE_ID_2) { 505 - ret = pm_runtime_get_sync(core->pmdomains[2]); 505 + ret = pm_runtime_get_sync(core->pmdomains->pd_devs[2]); 506 506 if (ret < 0) 507 507 return ret; 508 508 ··· 860 860 struct device **opp_virt_dev; 861 861 struct device *dev = core->dev; 862 862 const struct venus_resources *res = core->res; 863 - struct device *pd; 864 - unsigned int i; 863 + struct dev_pm_domain_attach_data vcodec_data = { 864 + .pd_names = res->vcodec_pmdomains, 865 + .num_pd_names = res->vcodec_pmdomains_num, 866 + .pd_flags = PD_FLAG_NO_DEV_LINK, 867 + }; 865 868 866 869 if (!res->vcodec_pmdomains_num) 867 870 goto skip_pmdomains; 868 871 869 - for (i = 0; i < res->vcodec_pmdomains_num; i++) { 870 - pd = dev_pm_domain_attach_by_name(dev, 871 - res->vcodec_pmdomains[i]); 872 - if (IS_ERR_OR_NULL(pd)) 873 - return pd ? PTR_ERR(pd) : -ENODATA; 874 - core->pmdomains[i] = pd; 875 - } 872 + ret = dev_pm_domain_attach_list(dev, &vcodec_data, &core->pmdomains); 873 + if (ret < 0) 874 + return ret; 876 875 877 876 skip_pmdomains: 878 877 if (!core->res->opp_pmdomain) ··· 895 896 return 0; 896 897 897 898 opp_attach_err: 898 - for (i = 0; i < res->vcodec_pmdomains_num; i++) { 899 - if (IS_ERR_OR_NULL(core->pmdomains[i])) 900 - continue; 901 - dev_pm_domain_detach(core->pmdomains[i], true); 902 - } 903 - 899 + dev_pm_domain_detach_list(core->pmdomains); 904 900 return ret; 905 901 } 906 902 907 903 static void vcodec_domains_put(struct venus_core *core) 908 904 { 909 - const struct venus_resources *res = core->res; 910 - unsigned int i; 905 + dev_pm_domain_detach_list(core->pmdomains); 911 906 912 - if (!res->vcodec_pmdomains_num) 913 - goto skip_pmdomains; 914 - 915 - for (i = 0; i < res->vcodec_pmdomains_num; i++) { 916 - if (IS_ERR_OR_NULL(core->pmdomains[i])) 917 - continue; 918 - dev_pm_domain_detach(core->pmdomains[i], true); 919 - } 920 - 921 - skip_pmdomains: 922 907 if (!core->has_opp_table) 923 908 return; 924 909 ··· 1018 1035 static int core_power_v4(struct venus_core *core, int on) 1019 1036 { 1020 1037 struct device *dev = core->dev; 1021 - struct device *pmctrl = core->pmdomains[0]; 1038 + struct device *pmctrl = core->pmdomains ? 1039 + core->pmdomains->pd_devs[0] : NULL; 1022 1040 int ret = 0; 1023 1041 1024 1042 if (on == POWER_ON) {
+90 -59
drivers/pmdomain/core.c
··· 311 311 } 312 312 313 313 static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 314 + unsigned int state, int depth); 315 + 316 + static void _genpd_rollback_parent_state(struct gpd_link *link, int depth) 317 + { 318 + struct generic_pm_domain *parent = link->parent; 319 + int parent_state; 320 + 321 + genpd_lock_nested(parent, depth + 1); 322 + 323 + parent_state = link->prev_performance_state; 324 + link->performance_state = parent_state; 325 + 326 + parent_state = _genpd_reeval_performance_state(parent, parent_state); 327 + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 328 + pr_err("%s: Failed to roll back to %d performance state\n", 329 + parent->name, parent_state); 330 + } 331 + 332 + genpd_unlock(parent); 333 + } 334 + 335 + static int _genpd_set_parent_state(struct generic_pm_domain *genpd, 336 + struct gpd_link *link, 337 + unsigned int state, int depth) 338 + { 339 + struct generic_pm_domain *parent = link->parent; 340 + int parent_state, ret; 341 + 342 + /* Find parent's performance state */ 343 + ret = genpd_xlate_performance_state(genpd, parent, state); 344 + if (unlikely(ret < 0)) 345 + return ret; 346 + 347 + parent_state = ret; 348 + 349 + genpd_lock_nested(parent, depth + 1); 350 + 351 + link->prev_performance_state = link->performance_state; 352 + link->performance_state = parent_state; 353 + 354 + parent_state = _genpd_reeval_performance_state(parent, parent_state); 355 + ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 356 + if (ret) 357 + link->performance_state = link->prev_performance_state; 358 + 359 + genpd_unlock(parent); 360 + 361 + return ret; 362 + } 363 + 364 + static int _genpd_set_performance_state(struct generic_pm_domain *genpd, 314 365 unsigned int state, int depth) 315 366 { 316 - struct generic_pm_domain *parent; 317 - struct gpd_link *link; 318 - int parent_state, ret; 367 + struct gpd_link *link = NULL; 368 + int ret; 319 369 320 370 if (state == genpd->performance_state) 321 371 return 0; 322 372 323 - /* Propagate to parents of genpd */ 324 - list_for_each_entry(link, &genpd->child_links, child_node) { 325 - parent = link->parent; 326 - 327 - /* Find parent's performance state */ 328 - ret = genpd_xlate_performance_state(genpd, parent, state); 329 - if (unlikely(ret < 0)) 330 - goto err; 331 - 332 - parent_state = ret; 333 - 334 - genpd_lock_nested(parent, depth + 1); 335 - 336 - link->prev_performance_state = link->performance_state; 337 - link->performance_state = parent_state; 338 - parent_state = _genpd_reeval_performance_state(parent, 339 - parent_state); 340 - ret = _genpd_set_performance_state(parent, parent_state, depth + 1); 341 - if (ret) 342 - link->performance_state = link->prev_performance_state; 343 - 344 - genpd_unlock(parent); 345 - 346 - if (ret) 347 - goto err; 373 + /* When scaling up, propagate to parents first in normal order */ 374 + if (state > genpd->performance_state) { 375 + list_for_each_entry(link, &genpd->child_links, child_node) { 376 + ret = _genpd_set_parent_state(genpd, link, state, depth); 377 + if (ret) 378 + goto rollback_parents_up; 379 + } 348 380 } 349 381 350 382 if (genpd->set_performance_state) { 351 383 ret = genpd->set_performance_state(genpd, state); 352 - if (ret) 353 - goto err; 384 + if (ret) { 385 + if (link) 386 + goto rollback_parents_up; 387 + return ret; 388 + } 389 + } 390 + 391 + /* When scaling down, propagate to parents last in reverse order */ 392 + if (state < genpd->performance_state) { 393 + list_for_each_entry_reverse(link, &genpd->child_links, child_node) { 394 + ret = _genpd_set_parent_state(genpd, link, state, depth); 395 + if (ret) 396 + goto rollback_parents_down; 397 + } 354 398 } 355 399 356 400 genpd->performance_state = state; 357 401 return 0; 358 402 359 - err: 360 - /* Encountered an error, lets rollback */ 361 - list_for_each_entry_continue_reverse(link, &genpd->child_links, 362 - child_node) { 363 - parent = link->parent; 364 - 365 - genpd_lock_nested(parent, depth + 1); 366 - 367 - parent_state = link->prev_performance_state; 368 - link->performance_state = parent_state; 369 - 370 - parent_state = _genpd_reeval_performance_state(parent, 371 - parent_state); 372 - if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { 373 - pr_err("%s: Failed to roll back to %d performance state\n", 374 - parent->name, parent_state); 375 - } 376 - 377 - genpd_unlock(parent); 378 - } 379 - 403 + rollback_parents_up: 404 + list_for_each_entry_continue_reverse(link, &genpd->child_links, child_node) 405 + _genpd_rollback_parent_state(link, depth); 406 + return ret; 407 + rollback_parents_down: 408 + list_for_each_entry_continue(link, &genpd->child_links, child_node) 409 + _genpd_rollback_parent_state(link, depth); 380 410 return ret; 381 411 } 382 412 ··· 1130 1100 return 0; 1131 1101 } 1132 1102 1103 + pr_info("genpd: Disabling unused power domains\n"); 1133 1104 mutex_lock(&gpd_list_lock); 1134 1105 1135 1106 list_for_each_entry(genpd, &gpd_list, gpd_list_node) ··· 2266 2235 * to be a valid pointer to struct generic_pm_domain. 2267 2236 */ 2268 2237 static struct generic_pm_domain *genpd_xlate_simple( 2269 - struct of_phandle_args *genpdspec, 2238 + const struct of_phandle_args *genpdspec, 2270 2239 void *data) 2271 2240 { 2272 2241 return data; ··· 2283 2252 * the genpd_onecell_data struct when registering the provider. 2284 2253 */ 2285 2254 static struct generic_pm_domain *genpd_xlate_onecell( 2286 - struct of_phandle_args *genpdspec, 2255 + const struct of_phandle_args *genpdspec, 2287 2256 void *data) 2288 2257 { 2289 2258 struct genpd_onecell_data *genpd_data = data; ··· 2526 2495 * on failure. 2527 2496 */ 2528 2497 static struct generic_pm_domain *genpd_get_from_provider( 2529 - struct of_phandle_args *genpdspec) 2498 + const struct of_phandle_args *genpdspec) 2530 2499 { 2531 2500 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); 2532 2501 struct of_genpd_provider *provider; ··· 2557 2526 * Looks-up an I/O PM domain based upon phandle args provided and adds 2558 2527 * the device to the PM domain. Returns a negative error code on failure. 2559 2528 */ 2560 - int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) 2529 + int of_genpd_add_device(const struct of_phandle_args *genpdspec, struct device *dev) 2561 2530 { 2562 2531 struct generic_pm_domain *genpd; 2563 2532 int ret; ··· 2591 2560 * provided and adds the subdomain to the parent PM domain. Returns a 2592 2561 * negative error code on failure. 2593 2562 */ 2594 - int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 2595 - struct of_phandle_args *subdomain_spec) 2563 + int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 2564 + const struct of_phandle_args *subdomain_spec) 2596 2565 { 2597 2566 struct generic_pm_domain *parent, *subdomain; 2598 2567 int ret; ··· 2629 2598 * provided and removes the subdomain from the parent PM domain. Returns a 2630 2599 * negative error code on failure. 2631 2600 */ 2632 - int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 2633 - struct of_phandle_args *subdomain_spec) 2601 + int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 2602 + const struct of_phandle_args *subdomain_spec) 2634 2603 { 2635 2604 struct generic_pm_domain *parent, *subdomain; 2636 2605 int ret;
+6 -3
drivers/pmdomain/imx/imx8m-blk-ctrl.c
··· 258 258 259 259 domain->power_dev = 260 260 dev_pm_domain_attach_by_name(dev, data->gpc_name); 261 - if (IS_ERR(domain->power_dev)) { 262 - dev_err_probe(dev, PTR_ERR(domain->power_dev), 261 + if (IS_ERR_OR_NULL(domain->power_dev)) { 262 + if (!domain->power_dev) 263 + ret = -ENODEV; 264 + else 265 + ret = PTR_ERR(domain->power_dev); 266 + dev_err_probe(dev, ret, 263 267 "failed to attach power domain \"%s\"\n", 264 268 data->gpc_name); 265 - ret = PTR_ERR(domain->power_dev); 266 269 goto cleanup_pds; 267 270 } 268 271
+11 -8
drivers/pmdomain/imx/imx8mp-blk-ctrl.c
··· 55 55 const char *gpc_name; 56 56 }; 57 57 58 - #define DOMAIN_MAX_CLKS 2 58 + #define DOMAIN_MAX_CLKS 3 59 59 #define DOMAIN_MAX_PATHS 3 60 60 61 61 struct imx8mp_blk_ctrl_domain { ··· 457 457 }, 458 458 [IMX8MP_HDMIBLK_PD_LCDIF] = { 459 459 .name = "hdmiblk-lcdif", 460 - .clk_names = (const char *[]){ "axi", "apb" }, 461 - .num_clks = 2, 460 + .clk_names = (const char *[]){ "axi", "apb", "fdcc" }, 461 + .num_clks = 3, 462 462 .gpc_name = "lcdif", 463 463 .path_names = (const char *[]){"lcdif-hdmi"}, 464 464 .num_paths = 1, ··· 483 483 }, 484 484 [IMX8MP_HDMIBLK_PD_HDMI_TX] = { 485 485 .name = "hdmiblk-hdmi-tx", 486 - .clk_names = (const char *[]){ "apb", "ref_266m" }, 487 - .num_clks = 2, 486 + .clk_names = (const char *[]){ "apb", "ref_266m", "fdcc" }, 487 + .num_clks = 3, 488 488 .gpc_name = "hdmi-tx", 489 489 }, 490 490 [IMX8MP_HDMIBLK_PD_HDMI_TX_PHY] = { ··· 687 687 688 688 domain->power_dev = 689 689 dev_pm_domain_attach_by_name(dev, data->gpc_name); 690 - if (IS_ERR(domain->power_dev)) { 691 - dev_err_probe(dev, PTR_ERR(domain->power_dev), 690 + if (IS_ERR_OR_NULL(domain->power_dev)) { 691 + if (!domain->power_dev) 692 + ret = -ENODEV; 693 + else 694 + ret = PTR_ERR(domain->power_dev); 695 + dev_err_probe(dev, ret, 692 696 "failed to attach power domain %s\n", 693 697 data->gpc_name); 694 - ret = PTR_ERR(domain->power_dev); 695 698 goto cleanup_pds; 696 699 } 697 700
+1 -1
drivers/pmdomain/imx/scu-pd.c
··· 393 393 return imx_sc_pd_power(domain, false); 394 394 } 395 395 396 - static struct generic_pm_domain *imx_scu_pd_xlate(struct of_phandle_args *spec, 396 + static struct generic_pm_domain *imx_scu_pd_xlate(const struct of_phandle_args *spec, 397 397 void *data) 398 398 { 399 399 struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+1 -3
drivers/pmdomain/mediatek/mtk-scpsys.c
··· 425 425 bool bus_prot_reg_update) 426 426 { 427 427 struct genpd_onecell_data *pd_data; 428 - struct resource *res; 429 428 int i, j; 430 429 struct scp *scp; 431 430 struct clk *clk[CLK_MAX]; ··· 440 441 441 442 scp->dev = &pdev->dev; 442 443 443 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 444 - scp->base = devm_ioremap_resource(&pdev->dev, res); 444 + scp->base = devm_platform_ioremap_resource(pdev, 0); 445 445 if (IS_ERR(scp->base)) 446 446 return ERR_CAST(scp->base); 447 447
+89 -7
drivers/pmdomain/qcom/rpmpd.c
··· 16 16 17 17 #define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd) 18 18 19 + static struct qcom_smd_rpm *rpmpd_smd_rpm; 20 + 19 21 /* Resource types: 20 22 * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */ 21 23 #define RPMPD_SMPA 0x61706d73 ··· 56 54 bool enabled; 57 55 const int res_type; 58 56 const int res_id; 59 - struct qcom_smd_rpm *rpm; 60 57 unsigned int max_state; 61 58 __le32 key; 62 59 bool state_synced; ··· 227 226 .key = KEY_FLOOR_LEVEL, 228 227 }; 229 228 229 + static struct rpmpd cx_s2b_corner_ao; 230 + static struct rpmpd cx_s2b_corner = { 231 + .pd = { .name = "cx", }, 232 + .peer = &cx_s2b_corner_ao, 233 + .res_type = RPMPD_SMPB, 234 + .res_id = 2, 235 + .key = KEY_CORNER, 236 + }; 237 + 238 + static struct rpmpd cx_s2b_corner_ao = { 239 + .pd = { .name = "cx_ao", }, 240 + .peer = &cx_s2b_corner, 241 + .active_only = true, 242 + .res_type = RPMPD_SMPB, 243 + .res_id = 2, 244 + .key = KEY_CORNER, 245 + }; 246 + 247 + static struct rpmpd cx_s2b_vfc = { 248 + .pd = { .name = "cx_vfc", }, 249 + .res_type = RPMPD_SMPB, 250 + .res_id = 2, 251 + .key = KEY_FLOOR_CORNER, 252 + }; 253 + 230 254 /* G(F)X */ 255 + static struct rpmpd gfx_s7a_corner = { 256 + .pd = { .name = "gfx", }, 257 + .res_type = RPMPD_SMPA, 258 + .res_id = 7, 259 + .key = KEY_CORNER, 260 + }; 261 + 262 + static struct rpmpd gfx_s7a_vfc = { 263 + .pd = { .name = "gfx_vfc", }, 264 + .res_type = RPMPD_SMPA, 265 + .res_id = 7, 266 + .key = KEY_FLOOR_CORNER, 267 + }; 268 + 231 269 static struct rpmpd gfx_s2b_corner = { 232 270 .pd = { .name = "gfx", }, 233 271 .res_type = RPMPD_SMPB, ··· 278 238 .pd = { .name = "gfx_vfc", }, 279 239 .res_type = RPMPD_SMPB, 280 240 .res_id = 2, 241 + .key = KEY_FLOOR_CORNER, 242 + }; 243 + 244 + static struct rpmpd gfx_s4b_corner = { 245 + .pd = { .name = "gfx", }, 246 + .res_type = RPMPD_SMPB, 247 + .res_id = 4, 248 + .key = KEY_CORNER, 249 + }; 250 + 251 + static struct rpmpd gfx_s4b_vfc = { 252 + .pd = { .name = "gfx_vfc", }, 253 + .res_type = RPMPD_SMPB, 254 + .res_id = 4, 281 255 .key = KEY_FLOOR_CORNER, 282 256 }; 283 257 ··· 717 663 .max_state = RPM_SMD_LEVEL_TURBO, 718 664 }; 719 665 666 + static struct rpmpd *msm8974_rpmpds[] = { 667 + [MSM8974_VDDCX] = &cx_s2b_corner, 668 + [MSM8974_VDDCX_AO] = &cx_s2b_corner_ao, 669 + [MSM8974_VDDCX_VFC] = &cx_s2b_vfc, 670 + [MSM8974_VDDGFX] = &gfx_s4b_corner, 671 + [MSM8974_VDDGFX_VFC] = &gfx_s4b_vfc, 672 + }; 673 + 674 + static const struct rpmpd_desc msm8974_desc = { 675 + .rpmpds = msm8974_rpmpds, 676 + .num_pds = ARRAY_SIZE(msm8974_rpmpds), 677 + .max_state = MAX_CORNER_RPMPD_STATE, 678 + }; 679 + 680 + static struct rpmpd *msm8974pro_pma8084_rpmpds[] = { 681 + [MSM8974_VDDCX] = &cx_s2a_corner, 682 + [MSM8974_VDDCX_AO] = &cx_s2a_corner_ao, 683 + [MSM8974_VDDCX_VFC] = &cx_s2a_vfc, 684 + [MSM8974_VDDGFX] = &gfx_s7a_corner, 685 + [MSM8974_VDDGFX_VFC] = &gfx_s7a_vfc, 686 + }; 687 + 688 + static const struct rpmpd_desc msm8974pro_pma8084_desc = { 689 + .rpmpds = msm8974pro_pma8084_rpmpds, 690 + .num_pds = ARRAY_SIZE(msm8974pro_pma8084_rpmpds), 691 + .max_state = MAX_CORNER_RPMPD_STATE, 692 + }; 693 + 720 694 static struct rpmpd *msm8976_rpmpds[] = { 721 695 [MSM8976_VDDCX] = &cx_s2a_lvl, 722 696 [MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao, ··· 938 856 { .compatible = "qcom,msm8917-rpmpd", .data = &msm8917_desc }, 939 857 { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc }, 940 858 { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc }, 859 + { .compatible = "qcom,msm8974-rpmpd", .data = &msm8974_desc }, 860 + { .compatible = "qcom,msm8974pro-pma8084-rpmpd", .data = &msm8974pro_pma8084_desc }, 941 861 { .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc }, 942 862 { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc }, 943 863 { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, ··· 963 879 .value = cpu_to_le32(enable), 964 880 }; 965 881 966 - return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE, 882 + return qcom_rpm_smd_write(rpmpd_smd_rpm, QCOM_SMD_RPM_ACTIVE_STATE, 967 883 pd->res_type, pd->res_id, &req, sizeof(req)); 968 884 } 969 885 ··· 975 891 .value = cpu_to_le32(corner), 976 892 }; 977 893 978 - return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id, 894 + return qcom_rpm_smd_write(rpmpd_smd_rpm, state, pd->res_type, pd->res_id, 979 895 &req, sizeof(req)); 980 896 }; 981 897 ··· 1088 1004 int i; 1089 1005 size_t num; 1090 1006 struct genpd_onecell_data *data; 1091 - struct qcom_smd_rpm *rpm; 1092 1007 struct rpmpd **rpmpds; 1093 1008 const struct rpmpd_desc *desc; 1094 1009 1095 - rpm = dev_get_drvdata(pdev->dev.parent); 1096 - if (!rpm) { 1010 + rpmpd_smd_rpm = dev_get_drvdata(pdev->dev.parent); 1011 + if (!rpmpd_smd_rpm) { 1097 1012 dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); 1098 1013 return -ENODEV; 1099 1014 } ··· 1122 1039 continue; 1123 1040 } 1124 1041 1125 - rpmpds[i]->rpm = rpm; 1126 1042 rpmpds[i]->max_state = desc->max_state; 1127 1043 rpmpds[i]->pd.power_off = rpmpd_power_off; 1128 1044 rpmpds[i]->pd.power_on = rpmpd_power_on;
+4
drivers/pmdomain/renesas/Kconfig
··· 71 71 bool "System Controller support for R-Car V4H" if COMPILE_TEST 72 72 select SYSC_RCAR_GEN4 73 73 74 + config SYSC_R8A779H0 75 + bool "System Controller support for R-Car V4M" if COMPILE_TEST 76 + select SYSC_RCAR_GEN4 77 + 74 78 config SYSC_RMOBILE 75 79 bool "System Controller support for R-Mobile" if COMPILE_TEST 76 80
+1
drivers/pmdomain/renesas/Makefile
··· 24 24 obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o 25 25 obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o 26 26 obj-$(CONFIG_SYSC_R8A779G0) += r8a779g0-sysc.o 27 + obj-$(CONFIG_SYSC_R8A779H0) += r8a779h0-sysc.o 27 28 # Family 28 29 obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o 29 30 obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o
-12
drivers/pmdomain/renesas/r8a779a0-sysc.c
··· 5 5 * Copyright (C) 2020 Renesas Electronics Corp. 6 6 */ 7 7 8 - #include <linux/bits.h> 9 - #include <linux/clk/renesas.h> 10 - #include <linux/delay.h> 11 - #include <linux/err.h> 12 - #include <linux/io.h> 13 - #include <linux/iopoll.h> 14 8 #include <linux/kernel.h> 15 - #include <linux/mm.h> 16 - #include <linux/of_address.h> 17 - #include <linux/pm_domain.h> 18 - #include <linux/slab.h> 19 - #include <linux/spinlock.h> 20 - #include <linux/types.h> 21 9 22 10 #include <dt-bindings/power/r8a779a0-sysc.h> 23 11
-12
drivers/pmdomain/renesas/r8a779f0-sysc.c
··· 5 5 * Copyright (C) 2021 Renesas Electronics Corp. 6 6 */ 7 7 8 - #include <linux/bits.h> 9 - #include <linux/clk/renesas.h> 10 - #include <linux/delay.h> 11 - #include <linux/err.h> 12 - #include <linux/io.h> 13 - #include <linux/iopoll.h> 14 8 #include <linux/kernel.h> 15 - #include <linux/mm.h> 16 - #include <linux/of_address.h> 17 - #include <linux/pm_domain.h> 18 - #include <linux/slab.h> 19 - #include <linux/spinlock.h> 20 - #include <linux/types.h> 21 9 22 10 #include <dt-bindings/power/r8a779f0-sysc.h> 23 11
-12
drivers/pmdomain/renesas/r8a779g0-sysc.c
··· 5 5 * Copyright (C) 2022 Renesas Electronics Corp. 6 6 */ 7 7 8 - #include <linux/bits.h> 9 - #include <linux/clk/renesas.h> 10 - #include <linux/delay.h> 11 - #include <linux/err.h> 12 - #include <linux/io.h> 13 - #include <linux/iopoll.h> 14 8 #include <linux/kernel.h> 15 - #include <linux/mm.h> 16 - #include <linux/of_address.h> 17 - #include <linux/pm_domain.h> 18 - #include <linux/slab.h> 19 - #include <linux/spinlock.h> 20 - #include <linux/types.h> 21 9 22 10 #include <dt-bindings/power/r8a779g0-sysc.h> 23 11
+54
drivers/pmdomain/renesas/r8a779h0-sysc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Renesas R-Car V4M System Controller 4 + * 5 + * Copyright (C) 2023 Renesas Electronics Corp 6 + */ 7 + 8 + #include <linux/kernel.h> 9 + 10 + #include <dt-bindings/power/renesas,r8a779h0-sysc.h> 11 + 12 + #include "rcar-gen4-sysc.h" 13 + 14 + static struct rcar_gen4_sysc_area r8a779h0_areas[] __initdata = { 15 + { "always-on", R8A779H0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, 16 + { "c4", R8A779H0_PD_C4, R8A779H0_PD_ALWAYS_ON }, 17 + { "a2e0d0", R8A779H0_PD_A2E0D0, R8A779H0_PD_C4, PD_SCU }, 18 + { "a1e0d0c0", R8A779H0_PD_A1E0D0C0, R8A779H0_PD_A2E0D0, PD_CPU_NOCR }, 19 + { "a1e0d0c1", R8A779H0_PD_A1E0D0C1, R8A779H0_PD_A2E0D0, PD_CPU_NOCR }, 20 + { "a1e0d0c2", R8A779H0_PD_A1E0D0C2, R8A779H0_PD_A2E0D0, PD_CPU_NOCR }, 21 + { "a1e0d0c3", R8A779H0_PD_A1E0D0C3, R8A779H0_PD_A2E0D0, PD_CPU_NOCR }, 22 + { "a3cr0", R8A779H0_PD_A3CR0, R8A779H0_PD_ALWAYS_ON, PD_CPU_NOCR }, 23 + { "a3cr1", R8A779H0_PD_A3CR1, R8A779H0_PD_ALWAYS_ON, PD_CPU_NOCR }, 24 + { "a3cr2", R8A779H0_PD_A3CR2, R8A779H0_PD_ALWAYS_ON, PD_CPU_NOCR }, 25 + { "a33dga", R8A779H0_PD_A33DGA, R8A779H0_PD_C4 }, 26 + { "a23dgb", R8A779H0_PD_A23DGB, R8A779H0_PD_A33DGA }, 27 + { "a3vip0", R8A779H0_PD_A3VIP0, R8A779H0_PD_C4 }, 28 + { "a3vip2", R8A779H0_PD_A3VIP2, R8A779H0_PD_C4 }, 29 + { "a3dul", R8A779H0_PD_A3DUL, R8A779H0_PD_C4 }, 30 + { "a3isp0", R8A779H0_PD_A3ISP0, R8A779H0_PD_C4 }, 31 + { "a2cn0", R8A779H0_PD_A2CN0, R8A779H0_PD_C4 }, 32 + { "a1cn0", R8A779H0_PD_A1CN0, R8A779H0_PD_A2CN0 }, 33 + { "a1dsp0", R8A779H0_PD_A1DSP0, R8A779H0_PD_A2CN0 }, 34 + { "a1dsp1", R8A779H0_PD_A1DSP1, R8A779H0_PD_A2CN0 }, 35 + { "a2imp01", R8A779H0_PD_A2IMP01, R8A779H0_PD_C4 }, 36 + { "a2psc", R8A779H0_PD_A2PSC, R8A779H0_PD_C4 }, 37 + { "a2dma", R8A779H0_PD_A2DMA, R8A779H0_PD_C4 }, 38 + { "a2cv0", R8A779H0_PD_A2CV0, R8A779H0_PD_C4 }, 39 + { "a2cv1", R8A779H0_PD_A2CV1, R8A779H0_PD_C4 }, 40 + { "a2cv2", R8A779H0_PD_A2CV2, R8A779H0_PD_C4 }, 41 + { "a2cv3", R8A779H0_PD_A2CV3, R8A779H0_PD_C4 }, 42 + { "a3imr0", R8A779H0_PD_A3IMR0, R8A779H0_PD_C4 }, 43 + { "a3imr1", R8A779H0_PD_A3IMR1, R8A779H0_PD_C4 }, 44 + { "a3imr2", R8A779H0_PD_A3IMR2, R8A779H0_PD_C4 }, 45 + { "a3imr3", R8A779H0_PD_A3IMR3, R8A779H0_PD_C4 }, 46 + { "a3vc", R8A779H0_PD_A3VC, R8A779H0_PD_C4 }, 47 + { "a3pci", R8A779H0_PD_A3PCI, R8A779H0_PD_C4 }, 48 + { "a2pciphy", R8A779H0_PD_A2PCIPHY, R8A779H0_PD_A3PCI }, 49 + }; 50 + 51 + const struct rcar_gen4_sysc_info r8a779h0_sysc_info __initconst = { 52 + .areas = r8a779h0_areas, 53 + .num_areas = ARRAY_SIZE(r8a779h0_areas), 54 + };
+10 -7
drivers/pmdomain/renesas/rcar-gen4-sysc.c
··· 50 50 #define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */ 51 51 52 52 #define SYSCSR_TIMEOUT 10000 53 - #define SYSCSR_DELAY_US 10 53 + #define SYSCSR_DELAY_US 1 54 54 55 - #define PDRESR_RETRIES 1000 56 - #define PDRESR_DELAY_US 10 55 + #define PDRESR_RETRIES 10000 56 + #define PDRESR_DELAY_US 1 57 57 58 - #define SYSCISR_TIMEOUT 10000 59 - #define SYSCISR_DELAY_US 10 58 + #define SYSCISCR_TIMEOUT 10000 59 + #define SYSCISCR_DELAY_US 1 60 60 61 61 #define RCAR_GEN4_PD_ALWAYS_ON 64 62 62 #define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32) ··· 97 97 98 98 ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx), 99 99 val, !(val & isr_mask), 100 - SYSCISR_DELAY_US, SYSCISR_TIMEOUT); 100 + SYSCISCR_DELAY_US, SYSCISCR_TIMEOUT); 101 101 if (ret < 0) { 102 102 pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__); 103 103 return -EIO; ··· 157 157 /* Wait until the power shutoff or resume request has completed * */ 158 158 ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx), 159 159 val, (val & isr_mask), 160 - SYSCISR_DELAY_US, SYSCISR_TIMEOUT); 160 + SYSCISCR_DELAY_US, SYSCISCR_TIMEOUT); 161 161 if (ret < 0) { 162 162 ret = -EIO; 163 163 goto out; ··· 284 284 #endif 285 285 #ifdef CONFIG_SYSC_R8A779G0 286 286 { .compatible = "renesas,r8a779g0-sysc", .data = &r8a779g0_sysc_info }, 287 + #endif 288 + #ifdef CONFIG_SYSC_R8A779H0 289 + { .compatible = "renesas,r8a779h0-sysc", .data = &r8a779h0_sysc_info }, 287 290 #endif 288 291 { /* sentinel */ } 289 292 };
+1
drivers/pmdomain/renesas/rcar-gen4-sysc.h
··· 40 40 extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info; 41 41 extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info; 42 42 extern const struct rcar_gen4_sysc_info r8a779g0_sysc_info; 43 + extern const struct rcar_gen4_sysc_info r8a779h0_sysc_info; 43 44 44 45 #endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
+2 -2
drivers/pmdomain/renesas/rcar-sysc.c
··· 45 45 #define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */ 46 46 47 47 48 - #define SYSCSR_TIMEOUT 100 48 + #define SYSCSR_TIMEOUT 1000 49 49 #define SYSCSR_DELAY_US 1 50 50 51 - #define PWRER_RETRIES 100 51 + #define PWRER_RETRIES 1000 52 52 #define PWRER_DELAY_US 1 53 53 54 54 #define SYSCISR_TIMEOUT 1000
+1 -1
drivers/pmdomain/tegra/powergate-bpmp.c
··· 305 305 } 306 306 307 307 static struct generic_pm_domain * 308 - tegra_powergate_xlate(struct of_phandle_args *spec, void *data) 308 + tegra_powergate_xlate(const struct of_phandle_args *spec, void *data) 309 309 { 310 310 struct generic_pm_domain *domain = ERR_PTR(-ENOENT); 311 311 struct genpd_onecell_data *genpd = data;
+2
drivers/pmdomain/ti/omap_prm.c
··· 695 695 data = prm->data; 696 696 name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s", 697 697 data->name); 698 + if (!name) 699 + return -ENOMEM; 698 700 699 701 prmd->dev = dev; 700 702 prmd->prm = prm;
+1 -1
drivers/pmdomain/ti/ti_sci_pm_domains.c
··· 85 85 * @data: genpd core data for all the powerdomains on the device 86 86 */ 87 87 static struct generic_pm_domain *ti_sci_pd_xlate( 88 - struct of_phandle_args *genpdspec, 88 + const struct of_phandle_args *genpdspec, 89 89 void *data) 90 90 { 91 91 struct genpd_onecell_data *genpd_data = data;
+1 -1
drivers/pmdomain/xilinx/zynqmp-pm-domains.c
··· 210 210 } 211 211 212 212 static struct generic_pm_domain *zynqmp_gpd_xlate 213 - (struct of_phandle_args *genpdspec, void *data) 213 + (const struct of_phandle_args *genpdspec, void *data) 214 214 { 215 215 struct genpd_onecell_data *genpd_data = data; 216 216 unsigned int i, idx = genpdspec->args[0];
+9 -73
drivers/remoteproc/imx_dsp_rproc.c
··· 103 103 * @tx_ch: mailbox tx channel handle 104 104 * @rx_ch: mailbox rx channel handle 105 105 * @rxdb_ch: mailbox rx doorbell channel handle 106 - * @pd_dev: power domain device 107 - * @pd_dev_link: power domain device link 106 + * @pd_list: power domain list 108 107 * @ipc_handle: System Control Unit ipc handle 109 108 * @rproc_work: work for processing virtio interrupts 110 109 * @pm_comp: completion primitive to sync for suspend response 111 - * @num_domains: power domain number 112 110 * @flags: control flags 113 111 */ 114 112 struct imx_dsp_rproc { ··· 119 121 struct mbox_chan *tx_ch; 120 122 struct mbox_chan *rx_ch; 121 123 struct mbox_chan *rxdb_ch; 122 - struct device **pd_dev; 123 - struct device_link **pd_dev_link; 124 + struct dev_pm_domain_list *pd_list; 124 125 struct imx_sc_ipc *ipc_handle; 125 126 struct work_struct rproc_work; 126 127 struct completion pm_comp; 127 - int num_domains; 128 128 u32 flags; 129 129 }; 130 130 ··· 951 955 static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv) 952 956 { 953 957 struct device *dev = priv->rproc->dev.parent; 954 - int ret, i; 958 + int ret; 955 959 956 - priv->num_domains = of_count_phandle_with_args(dev->of_node, 957 - "power-domains", 958 - "#power-domain-cells"); 959 - 960 - /* If only one domain, then no need to link the device */ 961 - if (priv->num_domains <= 1) 960 + /* A single PM domain is already attached. */ 961 + if (dev->pm_domain) 962 962 return 0; 963 963 964 - priv->pd_dev = devm_kmalloc_array(dev, priv->num_domains, 965 - sizeof(*priv->pd_dev), 966 - GFP_KERNEL); 967 - if (!priv->pd_dev) 968 - return -ENOMEM; 969 - 970 - priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_domains, 971 - sizeof(*priv->pd_dev_link), 972 - GFP_KERNEL); 973 - if (!priv->pd_dev_link) 974 - return -ENOMEM; 975 - 976 - for (i = 0; i < priv->num_domains; i++) { 977 - priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); 978 - if (IS_ERR(priv->pd_dev[i])) { 979 - ret = PTR_ERR(priv->pd_dev[i]); 980 - goto detach_pm; 981 - } 982 - 983 - /* 984 - * device_link_add will check priv->pd_dev[i], if it is 985 - * NULL, then will break. 986 - */ 987 - priv->pd_dev_link[i] = device_link_add(dev, 988 - priv->pd_dev[i], 989 - DL_FLAG_STATELESS | 990 - DL_FLAG_PM_RUNTIME); 991 - if (!priv->pd_dev_link[i]) { 992 - dev_pm_domain_detach(priv->pd_dev[i], false); 993 - ret = -EINVAL; 994 - goto detach_pm; 995 - } 996 - } 997 - 998 - return 0; 999 - 1000 - detach_pm: 1001 - while (--i >= 0) { 1002 - device_link_del(priv->pd_dev_link[i]); 1003 - dev_pm_domain_detach(priv->pd_dev[i], false); 1004 - } 1005 - 1006 - return ret; 1007 - } 1008 - 1009 - static int imx_dsp_detach_pm_domains(struct imx_dsp_rproc *priv) 1010 - { 1011 - int i; 1012 - 1013 - if (priv->num_domains <= 1) 1014 - return 0; 1015 - 1016 - for (i = 0; i < priv->num_domains; i++) { 1017 - device_link_del(priv->pd_dev_link[i]); 1018 - dev_pm_domain_detach(priv->pd_dev[i], false); 1019 - } 1020 - 1021 - return 0; 964 + ret = dev_pm_domain_attach_list(dev, NULL, &priv->pd_list); 965 + return ret < 0 ? ret : 0; 1022 966 } 1023 967 1024 968 /** ··· 1090 1154 return 0; 1091 1155 1092 1156 err_detach_domains: 1093 - imx_dsp_detach_pm_domains(priv); 1157 + dev_pm_domain_detach_list(priv->pd_list); 1094 1158 err_put_rproc: 1095 1159 rproc_free(rproc); 1096 1160 ··· 1104 1168 1105 1169 pm_runtime_disable(&pdev->dev); 1106 1170 rproc_del(rproc); 1107 - imx_dsp_detach_pm_domains(priv); 1171 + dev_pm_domain_detach_list(priv->pd_list); 1108 1172 rproc_free(rproc); 1109 1173 } 1110 1174
+9 -64
drivers/remoteproc/imx_rproc.c
··· 92 92 93 93 static int imx_rproc_xtr_mbox_init(struct rproc *rproc); 94 94 static void imx_rproc_free_mbox(struct rproc *rproc); 95 - static int imx_rproc_detach_pd(struct rproc *rproc); 96 95 97 96 struct imx_rproc { 98 97 struct device *dev; ··· 112 113 u32 rproc_pt; /* partition id */ 113 114 u32 rsrc_id; /* resource id */ 114 115 u32 entry; /* cpu start address */ 115 - int num_pd; 116 116 u32 core_index; 117 - struct device **pd_dev; 118 - struct device_link **pd_dev_link; 117 + struct dev_pm_domain_list *pd_list; 119 118 }; 120 119 121 120 static const struct imx_rproc_att imx_rproc_att_imx93[] = { ··· 850 853 return; 851 854 852 855 if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) { 853 - imx_rproc_detach_pd(rproc); 856 + dev_pm_domain_detach_list(priv->pd_list); 854 857 return; 855 858 } 856 859 ··· 877 880 static int imx_rproc_attach_pd(struct imx_rproc *priv) 878 881 { 879 882 struct device *dev = priv->dev; 880 - int ret, i; 883 + int ret; 884 + struct dev_pm_domain_attach_data pd_data = { 885 + .pd_flags = PD_FLAG_DEV_LINK_ON, 886 + }; 881 887 882 888 /* 883 889 * If there is only one power-domain entry, the platform driver framework 884 890 * will handle it, no need handle it in this driver. 885 891 */ 886 - priv->num_pd = of_count_phandle_with_args(dev->of_node, "power-domains", 887 - "#power-domain-cells"); 888 - if (priv->num_pd <= 1) 892 + if (dev->pm_domain) 889 893 return 0; 890 894 891 - priv->pd_dev = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev), GFP_KERNEL); 892 - if (!priv->pd_dev) 893 - return -ENOMEM; 894 - 895 - priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_pd, sizeof(*priv->pd_dev_link), 896 - GFP_KERNEL); 897 - 898 - if (!priv->pd_dev_link) 899 - return -ENOMEM; 900 - 901 - for (i = 0; i < priv->num_pd; i++) { 902 - priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i); 903 - if (IS_ERR(priv->pd_dev[i])) { 904 - ret = PTR_ERR(priv->pd_dev[i]); 905 - goto detach_pd; 906 - } 907 - 908 - priv->pd_dev_link[i] = device_link_add(dev, priv->pd_dev[i], DL_FLAG_STATELESS | 909 - DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); 910 - if (!priv->pd_dev_link[i]) { 911 - dev_pm_domain_detach(priv->pd_dev[i], false); 912 - ret = -EINVAL; 913 - goto detach_pd; 914 - } 915 - } 916 - 917 - return 0; 918 - 919 - detach_pd: 920 - while (--i >= 0) { 921 - device_link_del(priv->pd_dev_link[i]); 922 - dev_pm_domain_detach(priv->pd_dev[i], false); 923 - } 924 - 925 - return ret; 926 - } 927 - 928 - static int imx_rproc_detach_pd(struct rproc *rproc) 929 - { 930 - struct imx_rproc *priv = rproc->priv; 931 - int i; 932 - 933 - /* 934 - * If there is only one power-domain entry, the platform driver framework 935 - * will handle it, no need handle it in this driver. 936 - */ 937 - if (priv->num_pd <= 1) 938 - return 0; 939 - 940 - for (i = 0; i < priv->num_pd; i++) { 941 - device_link_del(priv->pd_dev_link[i]); 942 - dev_pm_domain_detach(priv->pd_dev[i], false); 943 - } 944 - 945 - return 0; 895 + ret = dev_pm_domain_attach_list(dev, &pd_data, &priv->pd_list); 896 + return ret < 0 ? ret : 0; 946 897 } 947 898 948 899 static int imx_rproc_detect_mode(struct imx_rproc *priv)
+73 -87
drivers/remoteproc/qcom_q6v5_adsp.c
··· 55 55 #define QDSP6SS_CORE_CBCR 0x20 56 56 #define QDSP6SS_SLEEP_CBCR 0x3c 57 57 58 - #define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3 59 - 60 58 #define LPASS_BOOT_CORE_START BIT(0) 61 59 #define LPASS_BOOT_CMD_START BIT(0) 62 60 #define LPASS_EFUSE_Q6SS_EVB_SEL 0x0 ··· 72 74 73 75 const char **clk_ids; 74 76 int num_clks; 75 - const char **proxy_pd_names; 77 + const char **pd_names; 78 + unsigned int num_pds; 76 79 const char *load_state; 77 80 }; 78 81 ··· 109 110 size_t mem_size; 110 111 bool has_iommu; 111 112 112 - struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX]; 113 - size_t proxy_pd_count; 113 + struct dev_pm_domain_list *pd_list; 114 114 115 115 struct qcom_rproc_glink glink_subdev; 116 116 struct qcom_rproc_ssr ssr_subdev; ··· 118 120 int (*shutdown)(struct qcom_adsp *adsp); 119 121 }; 120 122 121 - static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp, 122 - const char **pd_names) 123 + static int qcom_rproc_pds_attach(struct qcom_adsp *adsp, const char **pd_names, 124 + unsigned int num_pds) 123 125 { 124 - struct device **devs = adsp->proxy_pds; 125 - size_t num_pds = 0; 126 + struct device *dev = adsp->dev; 127 + struct dev_pm_domain_attach_data pd_data = { 128 + .pd_names = pd_names, 129 + .num_pd_names = num_pds, 130 + }; 126 131 int ret; 127 - int i; 132 + 133 + /* Handle single power domain */ 134 + if (dev->pm_domain) 135 + goto out; 128 136 129 137 if (!pd_names) 130 138 return 0; 131 139 132 - /* Handle single power domain */ 133 - if (dev->pm_domain) { 134 - devs[0] = dev; 135 - pm_runtime_enable(dev); 136 - return 1; 137 - } 140 + ret = dev_pm_domain_attach_list(dev, &pd_data, &adsp->pd_list); 141 + if (ret < 0) 142 + return ret; 138 143 139 - while (pd_names[num_pds]) 140 - num_pds++; 141 - 142 - if (num_pds > ARRAY_SIZE(adsp->proxy_pds)) 143 - return -E2BIG; 144 - 145 - for (i = 0; i < num_pds; i++) { 146 - devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 147 - if (IS_ERR_OR_NULL(devs[i])) { 148 - ret = PTR_ERR(devs[i]) ? : -ENODATA; 149 - goto unroll_attach; 150 - } 151 - } 152 - 153 - return num_pds; 154 - 155 - unroll_attach: 156 - for (i--; i >= 0; i--) 157 - dev_pm_domain_detach(devs[i], false); 158 - 159 - return ret; 144 + out: 145 + pm_runtime_enable(dev); 146 + return 0; 160 147 } 161 148 162 - static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds, 163 - size_t pd_count) 149 + static void qcom_rproc_pds_detach(struct qcom_adsp *adsp) 164 150 { 165 151 struct device *dev = adsp->dev; 166 - int i; 152 + struct dev_pm_domain_list *pds = adsp->pd_list; 167 153 168 - /* Handle single power domain */ 169 - if (dev->pm_domain && pd_count) { 170 - pm_runtime_disable(dev); 171 - return; 172 - } 154 + dev_pm_domain_detach_list(pds); 173 155 174 - for (i = 0; i < pd_count; i++) 175 - dev_pm_domain_detach(pds[i], false); 156 + if (dev->pm_domain || pds) 157 + pm_runtime_disable(adsp->dev); 176 158 } 177 159 178 - static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds, 179 - size_t pd_count) 160 + static int qcom_rproc_pds_enable(struct qcom_adsp *adsp) 180 161 { 181 - int ret; 182 - int i; 162 + struct device *dev = adsp->dev; 163 + struct dev_pm_domain_list *pds = adsp->pd_list; 164 + int ret, i = 0; 183 165 184 - for (i = 0; i < pd_count; i++) { 185 - dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 186 - ret = pm_runtime_resume_and_get(pds[i]); 187 - if (ret < 0) { 188 - dev_pm_genpd_set_performance_state(pds[i], 0); 189 - goto unroll_pd_votes; 190 - } 166 + if (!dev->pm_domain && !pds) 167 + return 0; 168 + 169 + if (dev->pm_domain) 170 + dev_pm_genpd_set_performance_state(dev, INT_MAX); 171 + 172 + while (pds && i < pds->num_pds) { 173 + dev_pm_genpd_set_performance_state(pds->pd_devs[i], INT_MAX); 174 + i++; 191 175 } 192 176 193 - return 0; 177 + ret = pm_runtime_resume_and_get(dev); 178 + if (ret < 0) { 179 + while (pds && i > 0) { 180 + i--; 181 + dev_pm_genpd_set_performance_state(pds->pd_devs[i], 0); 182 + } 194 183 195 - unroll_pd_votes: 196 - for (i--; i >= 0; i--) { 197 - dev_pm_genpd_set_performance_state(pds[i], 0); 198 - pm_runtime_put(pds[i]); 184 + if (dev->pm_domain) 185 + dev_pm_genpd_set_performance_state(dev, 0); 199 186 } 200 187 201 188 return ret; 202 189 } 203 190 204 - static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds, 205 - size_t pd_count) 191 + static void qcom_rproc_pds_disable(struct qcom_adsp *adsp) 206 192 { 207 - int i; 193 + struct device *dev = adsp->dev; 194 + struct dev_pm_domain_list *pds = adsp->pd_list; 195 + int i = 0; 208 196 209 - for (i = 0; i < pd_count; i++) { 210 - dev_pm_genpd_set_performance_state(pds[i], 0); 211 - pm_runtime_put(pds[i]); 197 + if (!dev->pm_domain && !pds) 198 + return; 199 + 200 + if (dev->pm_domain) 201 + dev_pm_genpd_set_performance_state(dev, 0); 202 + 203 + while (pds && i < pds->num_pds) { 204 + dev_pm_genpd_set_performance_state(pds->pd_devs[i], 0); 205 + i++; 212 206 } 207 + 208 + pm_runtime_put(dev); 213 209 } 214 210 215 211 static int qcom_wpss_shutdown(struct qcom_adsp *adsp) ··· 389 397 if (ret) 390 398 goto adsp_smmu_unmap; 391 399 392 - ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds, 393 - adsp->proxy_pd_count); 400 + ret = qcom_rproc_pds_enable(adsp); 394 401 if (ret < 0) 395 402 goto disable_xo_clk; 396 403 ··· 439 448 disable_adsp_clks: 440 449 clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks); 441 450 disable_power_domain: 442 - qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); 451 + qcom_rproc_pds_disable(adsp); 443 452 disable_xo_clk: 444 453 clk_disable_unprepare(adsp->xo); 445 454 adsp_smmu_unmap: ··· 455 464 struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5); 456 465 457 466 clk_disable_unprepare(adsp->xo); 458 - qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); 467 + qcom_rproc_pds_disable(adsp); 459 468 } 460 469 461 470 static int adsp_stop(struct rproc *rproc) ··· 706 715 if (ret) 707 716 goto free_rproc; 708 717 709 - ret = qcom_rproc_pds_attach(adsp->dev, adsp, 710 - desc->proxy_pd_names); 718 + ret = qcom_rproc_pds_attach(adsp, desc->pd_names, desc->num_pds); 711 719 if (ret < 0) { 712 720 dev_err(&pdev->dev, "Failed to attach proxy power domains\n"); 713 721 goto free_rproc; 714 722 } 715 - adsp->proxy_pd_count = ret; 716 723 717 724 ret = adsp_init_reset(adsp); 718 725 if (ret) ··· 742 753 return 0; 743 754 744 755 disable_pm: 745 - qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); 756 + qcom_rproc_pds_detach(adsp); 746 757 747 758 free_rproc: 748 759 rproc_free(rproc); ··· 760 771 qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev); 761 772 qcom_remove_sysmon_subdev(adsp->sysmon); 762 773 qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); 763 - qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); 774 + qcom_rproc_pds_detach(adsp); 764 775 rproc_free(adsp->rproc); 765 776 } 766 777 ··· 777 788 "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL 778 789 }, 779 790 .num_clks = 7, 780 - .proxy_pd_names = (const char*[]) { 781 - "cx", NULL 782 - }, 791 + .pd_names = (const char*[]) { "cx" }, 792 + .num_pds = 1, 783 793 }; 784 794 785 795 static const struct adsp_pil_data adsp_sc7280_resource_init = { ··· 809 821 "q6_axim", NULL 810 822 }, 811 823 .num_clks = 7, 812 - .proxy_pd_names = (const char*[]) { 813 - "cx", NULL 814 - }, 824 + .pd_names = (const char*[]) { "cx" }, 825 + .num_pds = 1, 815 826 }; 816 827 817 828 static const struct adsp_pil_data wpss_resource_init = { ··· 826 839 "ahb_bdg", "ahb", "rscp", NULL 827 840 }, 828 841 .num_clks = 3, 829 - .proxy_pd_names = (const char*[]) { 830 - "cx", "mx", NULL 831 - }, 842 + .pd_names = (const char*[]) { "cx", "mx" }, 843 + .num_pds = 2, 832 844 }; 833 845 834 846 static const struct of_device_id adsp_of_match[] = {
+7
include/dt-bindings/power/qcom-rpmpd.h
··· 308 308 #define MSM8953_VDDMX 5 309 309 #define MSM8953_VDDMX_AO 6 310 310 311 + /* MSM8974 Power Domain Indexes */ 312 + #define MSM8974_VDDCX 0 313 + #define MSM8974_VDDCX_AO 1 314 + #define MSM8974_VDDCX_VFC 2 315 + #define MSM8974_VDDGFX 3 316 + #define MSM8974_VDDGFX_VFC 4 317 + 311 318 /* MSM8976 Power Domain Indexes */ 312 319 #define MSM8976_VDDCX 0 313 320 #define MSM8976_VDDCX_AO 1
+49 -11
include/linux/pm_domain.h
··· 20 20 #include <linux/time64.h> 21 21 22 22 /* 23 + * Flags to control the behaviour when attaching a device to its PM domains. 24 + * 25 + * PD_FLAG_NO_DEV_LINK: As the default behaviour creates a device-link 26 + * for every PM domain that gets attached, this 27 + * flag can be used to skip that. 28 + * 29 + * PD_FLAG_DEV_LINK_ON: Add the DL_FLAG_RPM_ACTIVE to power-on the 30 + * supplier and its PM domain when creating the 31 + * device-links. 32 + * 33 + */ 34 + #define PD_FLAG_NO_DEV_LINK BIT(0) 35 + #define PD_FLAG_DEV_LINK_ON BIT(1) 36 + 37 + struct dev_pm_domain_attach_data { 38 + const char * const *pd_names; 39 + const u32 num_pd_names; 40 + const u32 pd_flags; 41 + }; 42 + 43 + struct dev_pm_domain_list { 44 + struct device **pd_devs; 45 + struct device_link **pd_links; 46 + u32 num_pds; 47 + }; 48 + 49 + /* 23 50 * Flags to control the behaviour of a genpd. 24 51 * 25 52 * These flags may be set in the struct generic_pm_domain's flags field by a ··· 349 322 /* OF PM domain providers */ 350 323 struct of_device_id; 351 324 352 - typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args, 325 + typedef struct generic_pm_domain *(*genpd_xlate_t)(const struct of_phandle_args *args, 353 326 void *data); 354 327 355 328 struct genpd_onecell_data { ··· 364 337 int of_genpd_add_provider_onecell(struct device_node *np, 365 338 struct genpd_onecell_data *data); 366 339 void of_genpd_del_provider(struct device_node *np); 367 - int of_genpd_add_device(struct of_phandle_args *args, struct device *dev); 368 - int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 369 - struct of_phandle_args *subdomain_spec); 370 - int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 371 - struct of_phandle_args *subdomain_spec); 340 + int of_genpd_add_device(const struct of_phandle_args *args, struct device *dev); 341 + int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 342 + const struct of_phandle_args *subdomain_spec); 343 + int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 344 + const struct of_phandle_args *subdomain_spec); 372 345 struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); 373 346 int of_genpd_parse_idle_states(struct device_node *dn, 374 347 struct genpd_power_state **states, int *n); ··· 393 366 394 367 static inline void of_genpd_del_provider(struct device_node *np) {} 395 368 396 - static inline int of_genpd_add_device(struct of_phandle_args *args, 369 + static inline int of_genpd_add_device(const struct of_phandle_args *args, 397 370 struct device *dev) 398 371 { 399 372 return -ENODEV; 400 373 } 401 374 402 - static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, 403 - struct of_phandle_args *subdomain_spec) 375 + static inline int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec, 376 + const struct of_phandle_args *subdomain_spec) 404 377 { 405 378 return -ENODEV; 406 379 } 407 380 408 - static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, 409 - struct of_phandle_args *subdomain_spec) 381 + static inline int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec, 382 + const struct of_phandle_args *subdomain_spec) 410 383 { 411 384 return -ENODEV; 412 385 } ··· 447 420 unsigned int index); 448 421 struct device *dev_pm_domain_attach_by_name(struct device *dev, 449 422 const char *name); 423 + int dev_pm_domain_attach_list(struct device *dev, 424 + const struct dev_pm_domain_attach_data *data, 425 + struct dev_pm_domain_list **list); 450 426 void dev_pm_domain_detach(struct device *dev, bool power_off); 427 + void dev_pm_domain_detach_list(struct dev_pm_domain_list *list); 451 428 int dev_pm_domain_start(struct device *dev); 452 429 void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); 453 430 int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state); ··· 470 439 { 471 440 return NULL; 472 441 } 442 + static inline int dev_pm_domain_attach_list(struct device *dev, 443 + const struct dev_pm_domain_attach_data *data, 444 + struct dev_pm_domain_list **list) 445 + { 446 + return 0; 447 + } 473 448 static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} 449 + static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {} 474 450 static inline int dev_pm_domain_start(struct device *dev) 475 451 { 476 452 return 0;