Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "Enhance UFS Mediatek Driver"

Peter Wang <peter.wang@mediatek.com says>:

Improves the UFS Mediatek driver by correcting clock scaling with PM
QoS, and adjusting power management flows. It addresses
shutdown/suspend race conditions, and removes redundant
functions. Support for new platforms is added with the MMIO_OTSD_CTRL
register, and MT6991 performance is optimized with MRTT and random
improvements. These changes collectively enhance driver performance,
stability, and compatibility.

Changes since v1:

1. Remove two patches that will be fixed in UFS core.
- ufs: host: mediatek: Fix runtime suspend error deadlock
- ufs: host: mediatek: Enable interrupts for MCQ mode
2. Use hba->shutting_down instead of ufshcd_is_user_access_allowed

v1:
https://patch.msgid.link/20250918104000.208856-1-peter.wang@mediatek.com

Link: https://patch.msgid.link/20250924094527.2992256-1-peter.wang@mediatek.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+115 -23
+2 -1
drivers/ufs/core/ufs-sysfs.c
··· 235 235 } 236 236 237 237 /* Convert microseconds to Auto-Hibernate Idle Timer register value */ 238 - static u32 ufshcd_us_to_ahit(unsigned int timer) 238 + u32 ufshcd_us_to_ahit(unsigned int timer) 239 239 { 240 240 unsigned int scale; 241 241 ··· 245 245 return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | 246 246 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); 247 247 } 248 + EXPORT_SYMBOL_GPL(ufshcd_us_to_ahit); 248 249 249 250 static int ufshcd_read_hci_reg(struct ufs_hba *hba, u32 *val, unsigned int reg) 250 251 {
+2 -1
drivers/ufs/core/ufshcd.c
··· 1076 1076 * @hba: per adapter instance 1077 1077 * @on: If True, vote for perf PM QoS mode otherwise power save mode 1078 1078 */ 1079 - static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on) 1079 + void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on) 1080 1080 { 1081 1081 guard(mutex)(&hba->pm_qos_mutex); 1082 1082 ··· 1085 1085 1086 1086 cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE); 1087 1087 } 1088 + EXPORT_SYMBOL_GPL(ufshcd_pm_qos_update); 1088 1089 1089 1090 /** 1090 1091 * ufshcd_set_clk_freq - set UFS controller clock frequencies
+99 -20
drivers/ufs/host/ufs-mediatek.c
··· 279 279 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, 280 280 REG_UFS_XOUFS_CTRL); 281 281 282 + if (host->legacy_ip_ver) 283 + return 0; 284 + 282 285 /* DDR_EN setting */ 283 286 if (host->ip_ver >= IP_VER_MT6989) { 284 287 ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), 285 288 0x453000, REG_UFS_MMIO_OPT_CTRL_0); 286 289 } 287 290 291 + if (host->ip_ver >= IP_VER_MT6991_A0) { 292 + /* Enable multi-rtt */ 293 + ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0); 294 + /* Enable random performance improvement */ 295 + ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0); 296 + } 288 297 } 289 298 290 299 return 0; ··· 413 404 { 414 405 struct ufs_mtk_host *host = ufshcd_get_variant(hba); 415 406 416 - if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { 407 + if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) { 417 408 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); 418 409 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); 419 410 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); ··· 430 421 u64 timeout, time_checked; 431 422 u32 val, sm; 432 423 bool wait_idle; 424 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 433 425 434 426 /* cannot use plain ktime_get() in suspend */ 435 427 timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; ··· 441 431 442 432 do { 443 433 time_checked = ktime_get_mono_fast_ns(); 444 - ufs_mtk_dbg_sel(hba); 445 - val = ufshcd_readl(hba, REG_UFS_PROBE); 434 + if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) { 435 + ufs_mtk_dbg_sel(hba); 436 + val = ufshcd_readl(hba, REG_UFS_PROBE); 437 + } else { 438 + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); 439 + val = val >> 16; 440 + } 446 441 447 442 sm = val & 0x1f; 448 443 ··· 479 464 { 480 465 ktime_t timeout, time_checked; 481 466 u32 val; 467 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 482 468 483 469 timeout = ktime_add_ms(ktime_get(), max_wait_ms); 484 470 do { 485 471 time_checked = ktime_get(); 486 - ufs_mtk_dbg_sel(hba); 487 - val = ufshcd_readl(hba, REG_UFS_PROBE); 488 - val = val >> 28; 472 + 473 + if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) { 474 + ufs_mtk_dbg_sel(hba); 475 + val = ufshcd_readl(hba, REG_UFS_PROBE); 476 + val = val >> 28; 477 + } else { 478 + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); 479 + val = val >> 24; 480 + } 489 481 490 482 if (val == state) 491 483 return 0; ··· 1130 1108 } 1131 1109 } 1132 1110 1133 - /* Convert microseconds to Auto-Hibernate Idle Timer register value */ 1134 - static u32 ufs_mtk_us_to_ahit(unsigned int timer) 1135 - { 1136 - unsigned int scale; 1137 - 1138 - for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) 1139 - timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; 1140 - 1141 - return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | 1142 - FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); 1143 - } 1144 - 1145 1111 static void ufs_mtk_fix_ahit(struct ufs_hba *hba) 1146 1112 { 1147 1113 unsigned int us; ··· 1152 1142 break; 1153 1143 } 1154 1144 1155 - hba->ahit = ufs_mtk_us_to_ahit(us); 1145 + hba->ahit = ufshcd_us_to_ahit(us); 1156 1146 } 1157 1147 1158 1148 ufs_mtk_setup_clk_gating(hba); ··· 1341 1331 return true; 1342 1332 } 1343 1333 1334 + static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba) 1335 + { 1336 + int i; 1337 + u32 value; 1338 + u32 cnt, att, min; 1339 + struct attr_min { 1340 + u32 attr; 1341 + u32 min_value; 1342 + } pa_min_sync_length[] = { 1343 + {PA_TXHSG1SYNCLENGTH, 0x48}, 1344 + {PA_TXHSG2SYNCLENGTH, 0x48}, 1345 + {PA_TXHSG3SYNCLENGTH, 0x48}, 1346 + {PA_TXHSG4SYNCLENGTH, 0x48}, 1347 + {PA_TXHSG5SYNCLENGTH, 0x48} 1348 + }; 1349 + 1350 + cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min); 1351 + for (i = 0; i < cnt; i++) { 1352 + att = pa_min_sync_length[i].attr; 1353 + min = pa_min_sync_length[i].min_value; 1354 + ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value); 1355 + if (value < min) 1356 + ufshcd_dme_set(hba, UIC_ARG_MIB(att), min); 1357 + 1358 + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value); 1359 + if (value < min) 1360 + ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min); 1361 + } 1362 + } 1363 + 1344 1364 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, 1345 1365 const struct ufs_pa_layer_attr *dev_max_params, 1346 1366 struct ufs_pa_layer_attr *dev_req_params) ··· 1394 1354 } 1395 1355 1396 1356 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { 1357 + ufs_mtk_adjust_sync_length(hba); 1358 + 1397 1359 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); 1398 1360 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); 1399 1361 ··· 1660 1618 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) 1661 1619 { 1662 1620 int err; 1621 + u32 val; 1622 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 1663 1623 1664 1624 err = ufshcd_hba_enable(hba); 1665 1625 if (err) 1666 1626 return err; 1667 1627 1668 1628 err = ufs_mtk_unipro_set_lpm(hba, false); 1669 - if (err) 1629 + if (err) { 1630 + if (host->ip_ver < IP_VER_MT6899) { 1631 + ufs_mtk_dbg_sel(hba); 1632 + val = ufshcd_readl(hba, REG_UFS_PROBE); 1633 + } else { 1634 + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); 1635 + } 1636 + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val); 1637 + val = ufshcd_readl(hba, REG_INTERRUPT_STATUS); 1638 + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val); 1670 1639 return err; 1640 + } 1671 1641 1672 1642 err = ufshcd_uic_hibern8_exit(hba); 1673 1643 if (err) ··· 1797 1743 { 1798 1744 int err; 1799 1745 struct arm_smccc_res res; 1746 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 1800 1747 1801 1748 if (status == PRE_CHANGE) { 1802 1749 if (ufshcd_is_auto_hibern8_supported(hba)) ··· 1827 1772 1828 1773 ufs_mtk_sram_pwr_ctrl(false, res); 1829 1774 1775 + /* Release pm_qos/clk if in scale-up mode during suspend */ 1776 + if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) { 1777 + ufshcd_pm_qos_update(hba, false); 1778 + _ufs_mtk_clk_scale(hba, false); 1779 + } else if ((!ufshcd_is_clkscaling_supported(hba) && 1780 + hba->pwr_info.gear_rx >= UFS_HS_G5)) { 1781 + _ufs_mtk_clk_scale(hba, false); 1782 + } 1783 + 1830 1784 return 0; 1831 1785 fail: 1832 1786 /* ··· 1851 1787 { 1852 1788 int err; 1853 1789 struct arm_smccc_res res; 1790 + struct ufs_mtk_host *host = ufshcd_get_variant(hba); 1854 1791 1855 1792 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) 1856 1793 ufs_mtk_dev_vreg_set_lpm(hba, false); ··· 1861 1796 err = ufs_mtk_mphy_power_on(hba, true); 1862 1797 if (err) 1863 1798 goto fail; 1799 + 1800 + /* Request pm_qos/clk if in scale-up mode after resume */ 1801 + if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) { 1802 + ufshcd_pm_qos_update(hba, true); 1803 + _ufs_mtk_clk_scale(hba, true); 1804 + } else if ((!ufshcd_is_clkscaling_supported(hba) && 1805 + hba->pwr_info.gear_rx >= UFS_HS_G5)) { 1806 + _ufs_mtk_clk_scale(hba, true); 1807 + } 1864 1808 1865 1809 if (ufshcd_is_link_hibern8(hba)) { 1866 1810 err = ufs_mtk_link_set_hpm(hba); ··· 2443 2369 struct ufs_hba *hba = dev_get_drvdata(dev); 2444 2370 struct arm_smccc_res res; 2445 2371 int ret; 2372 + 2373 + if (hba->shutting_down) { 2374 + ret = -EBUSY; 2375 + goto out; 2376 + } 2446 2377 2447 2378 ret = ufshcd_system_suspend(dev); 2448 2379 if (ret)
+4
drivers/ufs/host/ufs-mediatek.h
··· 20 20 #define MCQ_MULTI_INTR_EN BIT(2) 21 21 #define MCQ_CMB_INTR_EN BIT(3) 22 22 #define MCQ_AH8 BIT(4) 23 + #define MON_EN BIT(5) 24 + #define MRTT_EN BIT(25) 25 + #define RDN_PFM_IMPV_DIS BIT(28) 23 26 24 27 #define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN) 25 28 ··· 31 28 */ 32 29 #define REG_UFS_XOUFS_CTRL 0x140 33 30 #define REG_UFS_REFCLK_CTRL 0x144 31 + #define REG_UFS_UFS_MMIO_OTSD_CTRL 0x14C 34 32 #define REG_UFS_MMIO_OPT_CTRL_0 0x160 35 33 #define REG_UFS_EXTREG 0x2100 36 34 #define REG_UFS_MPHYCTRL 0x2200
+2
include/ufs/ufshcd.h
··· 1489 1489 int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, 1490 1490 const u16 *other_mask, u16 set, u16 clr); 1491 1491 void ufshcd_force_error_recovery(struct ufs_hba *hba); 1492 + void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on); 1493 + u32 ufshcd_us_to_ahit(unsigned int timer); 1492 1494 1493 1495 #endif /* End of Header */
+6 -1
include/ufs/unipro.h
··· 111 111 #define PA_TXLINKSTARTUPHS 0x1544 112 112 #define PA_AVAILRXDATALANES 0x1540 113 113 #define PA_MINRXTRAILINGCLOCKS 0x1543 114 + #define PA_TXHSG1SYNCLENGTH 0x1552 115 + #define PA_TXHSG2SYNCLENGTH 0x1554 116 + #define PA_TXHSG3SYNCLENGTH 0x1556 114 117 #define PA_LOCAL_TX_LCC_ENABLE 0x155E 115 118 #define PA_ACTIVETXDATALANES 0x1560 116 119 #define PA_CONNECTEDTXDATALANES 0x1561 ··· 163 160 #define PA_PACPFRAMECOUNT 0x15C0 164 161 #define PA_PACPERRORCOUNT 0x15C1 165 162 #define PA_PHYTESTCONTROL 0x15C2 166 - #define PA_TXHSADAPTTYPE 0x15D4 163 + #define PA_TXHSG4SYNCLENGTH 0x15D0 164 + #define PA_TXHSADAPTTYPE 0x15D4 165 + #define PA_TXHSG5SYNCLENGTH 0x15D6 167 166 168 167 /* Adpat type for PA_TXHSADAPTTYPE attribute */ 169 168 #define PA_REFRESH_ADAPT 0x00