Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: ufs: core: Add OPP support for scaling clocks and regulators

UFS core is only scaling the clocks during devfreq scaling and
initialization. But for an optimum power saving, regulators should also be
scaled along with the clocks.

So let's use the OPP framework which supports scaling clocks, regulators,
and performance state using OPP table defined in devicetree. For
accomodating the OPP support, the existing APIs (ufshcd_scale_clks,
ufshcd_is_devfreq_scaling_required and ufshcd_devfreq_scale) are modified
to accept "freq" as an argument which in turn used by the OPP helpers.

The OPP support is added along with the old freq-table based clock scaling
so that the existing platforms work as expected.

Co-developed-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/20231012172129.65172-3-manivannan.sadhasivam@linaro.org
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Manivannan Sadhasivam and committed by
Martin K. Petersen
930bd77e e820de1d

+115 -33
+111 -33
drivers/ufs/core/ufshcd.c
··· 20 20 #include <linux/delay.h> 21 21 #include <linux/interrupt.h> 22 22 #include <linux/module.h> 23 + #include <linux/pm_opp.h> 23 24 #include <linux/regulator/consumer.h> 24 25 #include <linux/sched/clock.h> 25 26 #include <linux/iopoll.h> ··· 275 274 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 276 275 static void ufshcd_resume_clkscaling(struct ufs_hba *hba); 277 276 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); 278 - static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); 277 + static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, 278 + bool scale_up); 279 279 static irqreturn_t ufshcd_intr(int irq, void *__hba); 280 280 static int ufshcd_change_power_mode(struct ufs_hba *hba, 281 281 struct ufs_pa_layer_attr *pwr_mode); ··· 1063 1061 return ret; 1064 1062 } 1065 1063 1064 + static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq) 1065 + { 1066 + struct dev_pm_opp *opp; 1067 + int ret; 1068 + 1069 + opp = dev_pm_opp_find_freq_floor_indexed(hba->dev, 1070 + &freq, 0); 1071 + if (IS_ERR(opp)) 1072 + return PTR_ERR(opp); 1073 + 1074 + ret = dev_pm_opp_set_opp(hba->dev, opp); 1075 + dev_pm_opp_put(opp); 1076 + 1077 + return ret; 1078 + } 1079 + 1066 1080 /** 1067 1081 * ufshcd_scale_clks - scale up or scale down UFS controller clocks 1068 1082 * @hba: per adapter instance 1083 + * @freq: frequency to scale 1069 1084 * @scale_up: True if scaling up and false if scaling down 1070 1085 * 1071 1086 * Return: 0 if successful; < 0 upon failure. 1072 1087 */ 1073 - static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) 1088 + static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, 1089 + bool scale_up) 1074 1090 { 1075 1091 int ret = 0; 1076 1092 ktime_t start = ktime_get(); ··· 1097 1077 if (ret) 1098 1078 goto out; 1099 1079 1100 - ret = ufshcd_set_clk_freq(hba, scale_up); 1080 + if (hba->use_pm_opp) 1081 + ret = ufshcd_opp_set_rate(hba, freq); 1082 + else 1083 + ret = ufshcd_set_clk_freq(hba, scale_up); 1101 1084 if (ret) 1102 1085 goto out; 1103 1086 1104 1087 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); 1105 - if (ret) 1106 - ufshcd_set_clk_freq(hba, !scale_up); 1088 + if (ret) { 1089 + if (hba->use_pm_opp) 1090 + ufshcd_opp_set_rate(hba, 1091 + hba->devfreq->previous_freq); 1092 + else 1093 + ufshcd_set_clk_freq(hba, !scale_up); 1094 + } 1107 1095 1108 1096 out: 1109 1097 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), ··· 1123 1095 /** 1124 1096 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not 1125 1097 * @hba: per adapter instance 1098 + * @freq: frequency to scale 1126 1099 * @scale_up: True if scaling up and false if scaling down 1127 1100 * 1128 1101 * Return: true if scaling is required, false otherwise. 1129 1102 */ 1130 1103 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, 1131 - bool scale_up) 1104 + unsigned long freq, bool scale_up) 1132 1105 { 1133 1106 struct ufs_clk_info *clki; 1134 1107 struct list_head *head = &hba->clk_list_head; 1135 1108 1136 1109 if (list_empty(head)) 1137 1110 return false; 1111 + 1112 + if (hba->use_pm_opp) 1113 + return freq != hba->clk_scaling.target_freq; 1138 1114 1139 1115 list_for_each_entry(clki, head, list) { 1140 1116 if (!IS_ERR_OR_NULL(clki->clk)) { ··· 1335 1303 /** 1336 1304 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear 1337 1305 * @hba: per adapter instance 1306 + * @freq: frequency to scale 1338 1307 * @scale_up: True for scaling up and false for scalin down 1339 1308 * 1340 1309 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero 1341 1310 * for any other errors. 1342 1311 */ 1343 - static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) 1312 + static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq, 1313 + bool scale_up) 1344 1314 { 1345 1315 int ret = 0; 1346 1316 ··· 1357 1323 goto out_unprepare; 1358 1324 } 1359 1325 1360 - ret = ufshcd_scale_clks(hba, scale_up); 1326 + ret = ufshcd_scale_clks(hba, freq, scale_up); 1361 1327 if (ret) { 1362 1328 if (!scale_up) 1363 1329 ufshcd_scale_gear(hba, true); ··· 1368 1334 if (scale_up) { 1369 1335 ret = ufshcd_scale_gear(hba, true); 1370 1336 if (ret) { 1371 - ufshcd_scale_clks(hba, false); 1337 + ufshcd_scale_clks(hba, hba->devfreq->previous_freq, 1338 + false); 1372 1339 goto out_unprepare; 1373 1340 } 1374 1341 } ··· 1428 1393 if (!ufshcd_is_clkscaling_supported(hba)) 1429 1394 return -EINVAL; 1430 1395 1431 - clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); 1432 - /* Override with the closest supported frequency */ 1433 - *freq = (unsigned long) clk_round_rate(clki->clk, *freq); 1396 + if (hba->use_pm_opp) { 1397 + struct dev_pm_opp *opp; 1398 + 1399 + /* Get the recommended frequency from OPP framework */ 1400 + opp = devfreq_recommended_opp(dev, freq, flags); 1401 + if (IS_ERR(opp)) 1402 + return PTR_ERR(opp); 1403 + 1404 + dev_pm_opp_put(opp); 1405 + } else { 1406 + /* Override with the closest supported frequency */ 1407 + clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, 1408 + list); 1409 + *freq = (unsigned long) clk_round_rate(clki->clk, *freq); 1410 + } 1411 + 1434 1412 spin_lock_irqsave(hba->host->host_lock, irq_flags); 1435 1413 if (ufshcd_eh_in_progress(hba)) { 1436 1414 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); ··· 1465 1417 goto out; 1466 1418 } 1467 1419 1468 - /* Decide based on the rounded-off frequency and update */ 1469 - scale_up = *freq == clki->max_freq; 1470 - if (!scale_up) 1420 + /* Decide based on the target or rounded-off frequency and update */ 1421 + if (hba->use_pm_opp) 1422 + scale_up = *freq > hba->clk_scaling.target_freq; 1423 + else 1424 + scale_up = *freq == clki->max_freq; 1425 + 1426 + if (!hba->use_pm_opp && !scale_up) 1471 1427 *freq = clki->min_freq; 1428 + 1472 1429 /* Update the frequency */ 1473 - if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { 1430 + if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) { 1474 1431 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1475 1432 ret = 0; 1476 1433 goto out; /* no state change required */ ··· 1483 1430 spin_unlock_irqrestore(hba->host->host_lock, irq_flags); 1484 1431 1485 1432 start = ktime_get(); 1486 - ret = ufshcd_devfreq_scale(hba, scale_up); 1433 + ret = ufshcd_devfreq_scale(hba, *freq, scale_up); 1434 + if (!ret) 1435 + hba->clk_scaling.target_freq = *freq; 1487 1436 1488 1437 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), 1489 1438 (scale_up ? "up" : "down"), ··· 1505 1450 struct ufs_hba *hba = dev_get_drvdata(dev); 1506 1451 struct ufs_clk_scaling *scaling = &hba->clk_scaling; 1507 1452 unsigned long flags; 1508 - struct list_head *clk_list = &hba->clk_list_head; 1509 - struct ufs_clk_info *clki; 1510 1453 ktime_t curr_t; 1511 1454 1512 1455 if (!ufshcd_is_clkscaling_supported(hba)) ··· 1517 1464 if (!scaling->window_start_t) 1518 1465 goto start_window; 1519 1466 1520 - clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1521 1467 /* 1522 1468 * If current frequency is 0, then the ondemand governor considers 1523 1469 * there's no initial frequency set. And it always requests to set 1524 1470 * to max. frequency. 1525 1471 */ 1526 - stat->current_frequency = clki->curr_freq; 1472 + if (hba->use_pm_opp) { 1473 + stat->current_frequency = hba->clk_scaling.target_freq; 1474 + } else { 1475 + struct list_head *clk_list = &hba->clk_list_head; 1476 + struct ufs_clk_info *clki; 1477 + 1478 + clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1479 + stat->current_frequency = clki->curr_freq; 1480 + } 1481 + 1527 1482 if (scaling->is_busy_started) 1528 1483 scaling->tot_busy_t += ktime_us_delta(curr_t, 1529 1484 scaling->busy_start_t); 1530 - 1531 1485 stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); 1532 1486 stat->busy_time = scaling->tot_busy_t; 1533 1487 start_window: ··· 1563 1503 if (list_empty(clk_list)) 1564 1504 return 0; 1565 1505 1566 - clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1567 - dev_pm_opp_add(hba->dev, clki->min_freq, 0); 1568 - dev_pm_opp_add(hba->dev, clki->max_freq, 0); 1506 + if (!hba->use_pm_opp) { 1507 + clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1508 + dev_pm_opp_add(hba->dev, clki->min_freq, 0); 1509 + dev_pm_opp_add(hba->dev, clki->max_freq, 0); 1510 + } 1569 1511 1570 1512 ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, 1571 1513 &hba->vps->ondemand_data); ··· 1579 1517 ret = PTR_ERR(devfreq); 1580 1518 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); 1581 1519 1582 - dev_pm_opp_remove(hba->dev, clki->min_freq); 1583 - dev_pm_opp_remove(hba->dev, clki->max_freq); 1520 + if (!hba->use_pm_opp) { 1521 + dev_pm_opp_remove(hba->dev, clki->min_freq); 1522 + dev_pm_opp_remove(hba->dev, clki->max_freq); 1523 + } 1584 1524 return ret; 1585 1525 } 1586 1526 ··· 1594 1530 static void ufshcd_devfreq_remove(struct ufs_hba *hba) 1595 1531 { 1596 1532 struct list_head *clk_list = &hba->clk_list_head; 1597 - struct ufs_clk_info *clki; 1598 1533 1599 1534 if (!hba->devfreq) 1600 1535 return; ··· 1601 1538 devfreq_remove_device(hba->devfreq); 1602 1539 hba->devfreq = NULL; 1603 1540 1604 - clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1605 - dev_pm_opp_remove(hba->dev, clki->min_freq); 1606 - dev_pm_opp_remove(hba->dev, clki->max_freq); 1541 + if (!hba->use_pm_opp) { 1542 + struct ufs_clk_info *clki; 1543 + 1544 + clki = list_first_entry(clk_list, struct ufs_clk_info, list); 1545 + dev_pm_opp_remove(hba->dev, clki->min_freq); 1546 + dev_pm_opp_remove(hba->dev, clki->max_freq); 1547 + } 1607 1548 } 1608 1549 1609 1550 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) ··· 1683 1616 ufshcd_resume_clkscaling(hba); 1684 1617 } else { 1685 1618 ufshcd_suspend_clkscaling(hba); 1686 - err = ufshcd_devfreq_scale(hba, true); 1619 + err = ufshcd_devfreq_scale(hba, ULONG_MAX, true); 1687 1620 if (err) 1688 1621 dev_err(hba->dev, "%s: failed to scale clocks up %d\n", 1689 1622 __func__, err); ··· 7684 7617 hba->silence_err_logs = false; 7685 7618 7686 7619 /* scale up clocks to max frequency before full reinitialization */ 7687 - ufshcd_scale_clks(hba, true); 7620 + ufshcd_scale_clks(hba, ULONG_MAX, true); 7688 7621 7689 7622 err = ufshcd_hba_enable(hba); 7690 7623 ··· 9230 9163 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, 9231 9164 clki->name, clk_get_rate(clki->clk)); 9232 9165 } 9166 + 9167 + /* Set Max. frequency for all clocks */ 9168 + if (hba->use_pm_opp) { 9169 + ret = ufshcd_opp_set_rate(hba, ULONG_MAX); 9170 + if (ret) { 9171 + dev_err(hba->dev, "%s: failed to set OPP: %d", __func__, 9172 + ret); 9173 + goto out; 9174 + } 9175 + } 9176 + 9233 9177 out: 9234 9178 return ret; 9235 9179 }
+4
include/ufs/ufshcd.h
··· 429 429 * @workq: workqueue to schedule devfreq suspend/resume work 430 430 * @suspend_work: worker to suspend devfreq 431 431 * @resume_work: worker to resume devfreq 432 + * @target_freq: frequency requested by devfreq framework 432 433 * @min_gear: lowest HS gear to scale down to 433 434 * @is_enabled: tracks if scaling is currently enabled or not, controlled by 434 435 * clkscale_enable sysfs node ··· 449 448 struct workqueue_struct *workq; 450 449 struct work_struct suspend_work; 451 450 struct work_struct resume_work; 451 + unsigned long target_freq; 452 452 u32 min_gear; 453 453 bool is_enabled; 454 454 bool is_allowed; ··· 864 862 * @auto_bkops_enabled: to track whether bkops is enabled in device 865 863 * @vreg_info: UFS device voltage regulator information 866 864 * @clk_list_head: UFS host controller clocks list node head 865 + * @use_pm_opp: Indicates whether OPP based scaling is used or not 867 866 * @req_abort_count: number of times ufshcd_abort() has been called 868 867 * @lanes_per_direction: number of lanes per data direction between the UFS 869 868 * controller and the UFS device. ··· 1015 1012 bool auto_bkops_enabled; 1016 1013 struct ufs_vreg_info vreg_info; 1017 1014 struct list_head clk_list_head; 1015 + bool use_pm_opp; 1018 1016 1019 1017 /* Number of requests aborts */ 1020 1018 int req_abort_count;