Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

opp: Allow lazy-linking of required-opps

The OPP core currently requires the required opp tables to be available
before the dependent OPP table is added, as it needs to create links
from the dependent OPP table to the required ones. This may not be
convenient for all the platforms though, as this requires strict
ordering for probing the drivers.

This patch allows lazy-linking of the required-opps. The OPP tables for
which the required-opp-tables aren't available at the time of their
initialization, are added to a special list of OPP tables:
lazy_opp_tables. Later on, whenever a new OPP table is registered with
the OPP core, we check if it is required by an OPP table in the pending
list; if yes, then we complete the linking then and there.

An OPP table is marked unusable until the time all its required-opp
tables are available. And if lazy-linking fails for an OPP table, the
OPP core disables all of its OPPs to make sure no one can use them.

Tested-by: Hsin-Yi Wang <hsinyi@chromium.org>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>

+161 -16
+36 -9
drivers/opp/core.c
··· 27 27 * various states of availability. 28 28 */ 29 29 LIST_HEAD(opp_tables); 30 + 31 + /* OPP tables with uninitialized required OPPs */ 32 + LIST_HEAD(lazy_opp_tables); 33 + 30 34 /* Lock to allow exclusive modification to the device and opp lists */ 31 35 DEFINE_MUTEX(opp_table_lock); 32 36 /* Flag indicating that opp_tables list is being updated at the moment */ ··· 166 162 pr_err("%s: Invalid parameters\n", __func__); 167 163 return 0; 168 164 } 165 + 166 + /* required-opps not fully initialized yet */ 167 + if (lazy_linking_pending(opp->opp_table)) 168 + return 0; 169 169 170 170 return opp->required_opps[index]->pstate; 171 171 } ··· 893 885 if (!required_opp_tables) 894 886 return 0; 895 887 888 + /* required-opps not fully initialized yet */ 889 + if (lazy_linking_pending(opp_table)) 890 + return -EBUSY; 891 + 896 892 /* Single genpd case */ 897 893 if (!genpd_virt_devs) 898 894 return _set_required_opp(dev, dev, opp, 0); ··· 1193 1181 mutex_init(&opp_table->lock); 1194 1182 mutex_init(&opp_table->genpd_virt_dev_lock); 1195 1183 INIT_LIST_HEAD(&opp_table->dev_list); 1184 + INIT_LIST_HEAD(&opp_table->lazy); 1196 1185 1197 1186 /* Mark regulator count uninitialized */ 1198 1187 opp_table->regulator_count = -1; ··· 1645 1632 return 0; 1646 1633 } 1647 1634 1635 + void _required_opps_available(struct dev_pm_opp *opp, int count) 1636 + { 1637 + int i; 1638 + 1639 + for (i = 0; i < count; i++) { 1640 + if (opp->required_opps[i]->available) 1641 + continue; 1642 + 1643 + opp->available = false; 1644 + pr_warn("%s: OPP not supported by required OPP %pOF (%lu)\n", 1645 + __func__, opp->required_opps[i]->np, opp->rate); 1646 + return; 1647 + } 1648 + } 1649 + 1648 1650 /* 1649 1651 * Returns: 1650 1652 * 0: On success. And appropriate error message for duplicate OPPs. ··· 1674 1646 struct opp_table *opp_table, bool rate_not_available) 1675 1647 { 1676 1648 struct list_head *head; 1677 - unsigned int i; 1678 1649 int ret; 1679 1650 1680 1651 mutex_lock(&opp_table->lock); ··· 1699 1672 __func__, new_opp->rate); 1700 1673 } 1701 1674 1702 - for (i = 0; i < opp_table->required_opp_count; i++) { 1703 - if (new_opp->required_opps[i]->available) 1704 - continue; 1675 + /* required-opps not fully initialized yet */ 1676 + if (lazy_linking_pending(opp_table)) 1677 + return 0; 1705 1678 1706 - new_opp->available = false; 1707 - dev_warn(dev, "%s: OPP not supported by required OPP %pOF (%lu)\n", 1708 - __func__, new_opp->required_opps[i]->np, new_opp->rate); 1709 - break; 1710 - } 1679 + _required_opps_available(new_opp, opp_table->required_opp_count); 1711 1680 1712 1681 return 0; 1713 1682 } ··· 2410 2387 */ 2411 2388 if (!src_table || !src_table->required_opp_count) 2412 2389 return pstate; 2390 + 2391 + /* required-opps not fully initialized yet */ 2392 + if (lazy_linking_pending(src_table)) 2393 + return -EBUSY; 2413 2394 2414 2395 for (i = 0; i < src_table->required_opp_count; i++) { 2415 2396 if (src_table->required_opp_tables[i]->np == dst_table->np)
+117 -5
drivers/opp/of.c
··· 144 144 145 145 for (i = 0; i < opp_table->required_opp_count; i++) { 146 146 if (IS_ERR_OR_NULL(required_opp_tables[i])) 147 - break; 147 + continue; 148 148 149 149 dev_pm_opp_put_opp_table(required_opp_tables[i]); 150 150 } ··· 153 153 154 154 opp_table->required_opp_count = 0; 155 155 opp_table->required_opp_tables = NULL; 156 + list_del(&opp_table->lazy); 156 157 } 157 158 158 159 /* ··· 166 165 { 167 166 struct opp_table **required_opp_tables; 168 167 struct device_node *required_np, *np; 168 + bool lazy = false; 169 169 int count, i; 170 170 171 171 /* Traversing the first OPP node is all we need */ ··· 197 195 required_opp_tables[i] = _find_table_of_opp_np(required_np); 198 196 of_node_put(required_np); 199 197 200 - if (IS_ERR(required_opp_tables[i])) 201 - goto free_required_tables; 198 + if (IS_ERR(required_opp_tables[i])) { 199 + lazy = true; 200 + continue; 201 + } 202 202 203 203 /* 204 204 * We only support genpd's OPPs in the "required-opps" for now, ··· 213 209 goto free_required_tables; 214 210 } 215 211 } 212 + 213 + /* Let's do the linking later on */ 214 + if (lazy) 215 + list_add(&opp_table->lazy, &lazy_opp_tables); 216 216 217 217 goto put_np; 218 218 ··· 286 278 287 279 for (i = 0; i < opp_table->required_opp_count; i++) { 288 280 if (!required_opps[i]) 289 - break; 281 + continue; 290 282 291 283 /* Put the reference back */ 292 284 dev_pm_opp_put(required_opps[i]); 293 285 } 294 286 295 - kfree(required_opps); 296 287 opp->required_opps = NULL; 288 + kfree(required_opps); 297 289 } 298 290 299 291 /* Populate all required OPPs which are part of "required-opps" list */ ··· 316 308 317 309 for (i = 0; i < count; i++) { 318 310 required_table = opp_table->required_opp_tables[i]; 311 + 312 + /* Required table not added yet, we will link later */ 313 + if (IS_ERR_OR_NULL(required_table)) 314 + continue; 319 315 320 316 np = of_parse_required_opp(opp->np, i); 321 317 if (unlikely(!np)) { ··· 344 332 _of_opp_free_required_opps(opp_table, opp); 345 333 346 334 return ret; 335 + } 336 + 337 + /* Link required OPPs for an individual OPP */ 338 + static int lazy_link_required_opps(struct opp_table *opp_table, 339 + struct opp_table *new_table, int index) 340 + { 341 + struct device_node *required_np; 342 + struct dev_pm_opp *opp; 343 + 344 + list_for_each_entry(opp, &opp_table->opp_list, node) { 345 + required_np = of_parse_required_opp(opp->np, index); 346 + if (unlikely(!required_np)) 347 + return -ENODEV; 348 + 349 + opp->required_opps[index] = _find_opp_of_np(new_table, required_np); 350 + of_node_put(required_np); 351 + 352 + if (!opp->required_opps[index]) { 353 + pr_err("%s: Unable to find required OPP node: %pOF (%d)\n", 354 + __func__, opp->np, index); 355 + return -ENODEV; 356 + } 357 + } 358 + 359 + return 0; 360 + } 361 + 362 + /* Link required OPPs for all OPPs of the newly added OPP table */ 363 + static void lazy_link_required_opp_table(struct opp_table *new_table) 364 + { 365 + struct opp_table *opp_table, *temp, **required_opp_tables; 366 + struct device_node *required_np, *opp_np, *required_table_np; 367 + struct dev_pm_opp *opp; 368 + int i, ret; 369 + 370 + /* 371 + * We only support genpd's OPPs in the "required-opps" for now, 372 + * as we don't know much about other cases. 373 + */ 374 + if (!new_table->is_genpd) 375 + return; 376 + 377 + mutex_lock(&opp_table_lock); 378 + 379 + list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) { 380 + bool lazy = false; 381 + 382 + /* opp_np can't be invalid here */ 383 + opp_np = of_get_next_available_child(opp_table->np, NULL); 384 + 385 + for (i = 0; i < opp_table->required_opp_count; i++) { 386 + required_opp_tables = opp_table->required_opp_tables; 387 + 388 + /* Required opp-table is already parsed */ 389 + if (!IS_ERR(required_opp_tables[i])) 390 + continue; 391 + 392 + /* required_np can't be invalid here */ 393 + required_np = of_parse_required_opp(opp_np, i); 394 + required_table_np = of_get_parent(required_np); 395 + 396 + of_node_put(required_table_np); 397 + of_node_put(required_np); 398 + 399 + /* 400 + * Newly added table isn't the required opp-table for 401 + * opp_table. 402 + */ 403 + if (required_table_np != new_table->np) { 404 + lazy = true; 405 + continue; 406 + } 407 + 408 + required_opp_tables[i] = new_table; 409 + _get_opp_table_kref(new_table); 410 + 411 + /* Link OPPs now */ 412 + ret = lazy_link_required_opps(opp_table, new_table, i); 413 + if (ret) { 414 + /* The OPPs will be marked unusable */ 415 + lazy = false; 416 + break; 417 + } 418 + } 419 + 420 + of_node_put(opp_np); 421 + 422 + /* All required opp-tables found, remove from lazy list */ 423 + if (!lazy) { 424 + list_del(&opp_table->lazy); 425 + INIT_LIST_HEAD(&opp_table->lazy); 426 + 427 + list_for_each_entry(opp, &opp_table->opp_list, node) 428 + _required_opps_available(opp, opp_table->required_opp_count); 429 + } 430 + } 431 + 432 + mutex_unlock(&opp_table_lock); 347 433 } 348 434 349 435 static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) ··· 998 888 break; 999 889 } 1000 890 } 891 + 892 + lazy_link_required_opp_table(opp_table); 1001 893 1002 894 return 0; 1003 895
+8 -2
drivers/opp/opp.h
··· 26 26 /* Lock to allow exclusive modification to the device and opp lists */ 27 27 extern struct mutex opp_table_lock; 28 28 29 - extern struct list_head opp_tables; 29 + extern struct list_head opp_tables, lazy_opp_tables; 30 30 31 31 /* 32 32 * Internal data structure organization with the OPP layer library is as ··· 168 168 * meant for book keeping and private to OPP library. 169 169 */ 170 170 struct opp_table { 171 - struct list_head node; 171 + struct list_head node, lazy; 172 172 173 173 struct blocking_notifier_head head; 174 174 struct list_head dev_list; ··· 229 229 void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, int last_cpu); 230 230 struct opp_table *_add_opp_table_indexed(struct device *dev, int index, bool getclk); 231 231 void _put_opp_list_kref(struct opp_table *opp_table); 232 + void _required_opps_available(struct dev_pm_opp *opp, int count); 233 + 234 + static inline bool lazy_linking_pending(struct opp_table *opp_table) 235 + { 236 + return unlikely(!list_empty(&opp_table->lazy)); 237 + } 232 238 233 239 #ifdef CONFIG_OF 234 240 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);