Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "ufs: Add support for AMD Versal Gen2 UFS"

Ajay Neeli <ajay.neeli@amd.com> says:

This patch series adds support for the UFS driver on the AMD Versal
Gen 2 SoC. It includes:

- Device tree bindings and driver implementation.

- Secure read support for the secure retrieval of UFS calibration
values.

The UFS host driver is based upon the Synopsis DesignWare (DWC) UFS
architecture, utilizing the existing UFSHCD_DWC and UFSHCD_PLATFORM
drivers.

Link: https://patch.msgid.link/20251021113003.13650-1-ajay.neeli@amd.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+912 -1
+61
Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/ufs/amd,versal2-ufs.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: AMD Versal Gen 2 UFS Host Controller 8 + 9 + maintainers: 10 + - Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> 11 + 12 + allOf: 13 + - $ref: ufs-common.yaml 14 + 15 + properties: 16 + compatible: 17 + const: amd,versal2-ufs 18 + 19 + reg: 20 + maxItems: 1 21 + 22 + clocks: 23 + maxItems: 1 24 + 25 + clock-names: 26 + items: 27 + - const: core 28 + 29 + power-domains: 30 + maxItems: 1 31 + 32 + resets: 33 + maxItems: 2 34 + 35 + reset-names: 36 + items: 37 + - const: host 38 + - const: phy 39 + 40 + required: 41 + - reg 42 + - clocks 43 + - clock-names 44 + - resets 45 + - reset-names 46 + 47 + unevaluatedProperties: false 48 + 49 + examples: 50 + - | 51 + #include <dt-bindings/interrupt-controller/arm-gic.h> 52 + ufs@f10b0000 { 53 + compatible = "amd,versal2-ufs"; 54 + reg = <0xf10b0000 0x1000>; 55 + clocks = <&ufs_core_clk>; 56 + clock-names = "core"; 57 + resets = <&scmi_reset 4>, <&scmi_reset 35>; 58 + reset-names = "host", "phy"; 59 + interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; 60 + freq-table-hz = <0 0>; 61 + };
+7
MAINTAINERS
··· 26339 26339 F: Documentation/scsi/ufs.rst 26340 26340 F: drivers/ufs/core/ 26341 26341 26342 + UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER AMD VERSAL2 26343 + M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> 26344 + M: Ajay Neeli <ajay.neeli@amd.com> 26345 + S: Maintained 26346 + F: Documentation/devicetree/bindings/ufs/amd,versal2-ufs.yaml 26347 + F: drivers/ufs/host/ufs-amd-versal2.c 26348 + 26342 26349 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS 26343 26350 M: Pedro Sousa <pedrom.sousa@synopsys.com> 26344 26351 L: linux-scsi@vger.kernel.org
+1 -1
drivers/firmware/xilinx/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # Makefile for Xilinx firmwares 3 3 4 - obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o 4 + obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o 5 5 obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
+118
drivers/firmware/xilinx/zynqmp-ufs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Firmware Layer for UFS APIs 4 + * 5 + * Copyright (C) 2025 Advanced Micro Devices, Inc. 6 + */ 7 + 8 + #include <linux/firmware/xlnx-zynqmp.h> 9 + #include <linux/module.h> 10 + 11 + /* Register Node IDs */ 12 + #define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */ 13 + #define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */ 14 + 15 + /* Register Offsets for PMC IOU SLCR */ 16 + #define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */ 17 + #define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */ 18 + 19 + /* Masks for SRAM Control and Status Register */ 20 + #define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */ 21 + #define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */ 22 + #define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */ 23 + 24 + /* Mask to check M-PHY TX-RX configuration readiness */ 25 + #define TX_RX_CFG_RDY_MASK GENMASK(3, 0) 26 + 27 + /* Register Offsets for EFUSE Cache */ 28 + #define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */ 29 + 30 + /** 31 + * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness 32 + * @is_ready: Store output status (true/false) 33 + * 34 + * Return: Returns 0 on success or error value on failure. 35 + */ 36 + int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) 37 + { 38 + u32 regval; 39 + int ret; 40 + 41 + if (!is_ready) 42 + return -EINVAL; 43 + 44 + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval); 45 + if (ret) 46 + return ret; 47 + 48 + regval &= TX_RX_CFG_RDY_MASK; 49 + if (regval) 50 + *is_ready = true; 51 + else 52 + *is_ready = false; 53 + 54 + return ret; 55 + } 56 + EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready); 57 + 58 + /** 59 + * zynqmp_pm_is_sram_init_done - check SRAM initialization 60 + * @is_done: Store output status (true/false) 61 + * 62 + * Return: Returns 0 on success or error value on failure. 63 + */ 64 + int zynqmp_pm_is_sram_init_done(bool *is_done) 65 + { 66 + u32 regval; 67 + int ret; 68 + 69 + if (!is_done) 70 + return -EINVAL; 71 + 72 + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval); 73 + if (ret) 74 + return ret; 75 + 76 + regval &= SRAM_CSR_INIT_DONE_MASK; 77 + if (regval) 78 + *is_done = true; 79 + else 80 + *is_done = false; 81 + 82 + return ret; 83 + } 84 + EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done); 85 + 86 + /** 87 + * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control 88 + * 89 + * Return: Returns 0 on success or error value on failure. 90 + */ 91 + int zynqmp_pm_set_sram_bypass(void) 92 + { 93 + u32 sram_csr; 94 + int ret; 95 + 96 + ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr); 97 + if (ret) 98 + return ret; 99 + 100 + sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK; 101 + sram_csr |= SRAM_CSR_BYPASS_MASK; 102 + 103 + return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, 104 + GENMASK(2, 1), sram_csr); 105 + } 106 + EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass); 107 + 108 + /** 109 + * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values 110 + * @val: Store the calibration value 111 + * 112 + * Return: Returns 0 on success or error value on failure. 113 + */ 114 + int zynqmp_pm_get_ufs_calibration_values(u32 *val) 115 + { 116 + return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val); 117 + } 118 + EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);
+46
drivers/firmware/xilinx/zynqmp.c
··· 1617 1617 } 1618 1618 1619 1619 /** 1620 + * zynqmp_pm_sec_read_reg - PM call to securely read from given offset 1621 + * of the node 1622 + * @node_id: Node Id of the device 1623 + * @offset: Offset to be used (20-bit) 1624 + * @ret_value: Output data read from the given offset after 1625 + * firmware access policy is successfully enforced 1626 + * 1627 + * Return: Returns 0 on success or error value on failure 1628 + */ 1629 + int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) 1630 + { 1631 + u32 ret_payload[PAYLOAD_ARG_CNT]; 1632 + u32 count = 1; 1633 + int ret; 1634 + 1635 + if (!ret_value) 1636 + return -EINVAL; 1637 + 1638 + ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG, 1639 + offset, count); 1640 + 1641 + *ret_value = ret_payload[1]; 1642 + 1643 + return ret; 1644 + } 1645 + EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg); 1646 + 1647 + /** 1648 + * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset 1649 + * of the node 1650 + * @node_id: Node Id of the device 1651 + * @offset: Offset to be used (20-bit) 1652 + * @mask: Mask to be used 1653 + * @value: Value to be written 1654 + * 1655 + * Return: Returns 0 on success or error value on failure 1656 + */ 1657 + int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask, 1658 + u32 value) 1659 + { 1660 + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG, 1661 + offset, mask, value); 1662 + } 1663 + EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg); 1664 + 1665 + /** 1620 1666 * zynqmp_pm_set_sd_config - PM call to set value of SD config registers 1621 1667 * @node: SD node ID 1622 1668 * @config: The config type of SD registers
+13
drivers/ufs/host/Kconfig
··· 154 154 155 155 Select this if you have UFS controller on Rockchip chipset. 156 156 If unsure, say N. 157 + 158 + config SCSI_UFS_AMD_VERSAL2 159 + tristate "AMD Versal Gen 2 UFS controller platform driver" 160 + depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST) 161 + help 162 + This selects the AMD Versal Gen 2 specific additions on top of 163 + the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD 164 + Versal Gen 2 needs some vendor specific configurations like PHY 165 + and vendor specific register accesses before accessing the 166 + hardware. 167 + 168 + Select this if you have UFS controller on AMD Versal Gen 2 SoC. 169 + If unsure, say N.
+1
drivers/ufs/host/Makefile
··· 13 13 obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o 14 14 obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o 15 15 obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o 16 + obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o
+564
drivers/ufs/host/ufs-amd-versal2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2025 Advanced Micro Devices, Inc. 4 + * 5 + * Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> 6 + */ 7 + 8 + #include <linux/clk.h> 9 + #include <linux/delay.h> 10 + #include <linux/firmware/xlnx-zynqmp.h> 11 + #include <linux/irqreturn.h> 12 + #include <linux/module.h> 13 + #include <linux/of.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/reset.h> 16 + #include <ufs/unipro.h> 17 + 18 + #include "ufshcd-dwc.h" 19 + #include "ufshcd-pltfrm.h" 20 + #include "ufshci-dwc.h" 21 + 22 + /* PHY modes */ 23 + #define UFSHCD_DWC_PHY_MODE_ROM 0 24 + 25 + #define MPHY_FAST_RX_AFE_CAL BIT(2) 26 + #define MPHY_FW_CALIB_CFG_VAL BIT(8) 27 + 28 + #define MPHY_RX_OVRD_EN BIT(3) 29 + #define MPHY_RX_OVRD_VAL BIT(2) 30 + #define MPHY_RX_ACK_MASK BIT(0) 31 + 32 + #define TIMEOUT_MICROSEC 1000000 33 + 34 + struct ufs_versal2_host { 35 + struct ufs_hba *hba; 36 + struct reset_control *rstc; 37 + struct reset_control *rstphy; 38 + u32 phy_mode; 39 + unsigned long host_clk; 40 + u8 attcompval0; 41 + u8 attcompval1; 42 + u8 ctlecompval0; 43 + u8 ctlecompval1; 44 + }; 45 + 46 + static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val) 47 + { 48 + static struct ufshcd_dme_attr_val phy_write_attrs[] = { 49 + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, 50 + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, 51 + { UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL }, 52 + { UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL }, 53 + { UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL }, 54 + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } 55 + }; 56 + 57 + phy_write_attrs[0].mib_val = (u8)addr; 58 + phy_write_attrs[1].mib_val = (u8)(addr >> 8); 59 + phy_write_attrs[2].mib_val = (u8)val; 60 + phy_write_attrs[3].mib_val = (u8)(val >> 8); 61 + 62 + return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs)); 63 + } 64 + 65 + static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val) 66 + { 67 + u32 mib_val; 68 + int ret; 69 + static struct ufshcd_dme_attr_val phy_read_attrs[] = { 70 + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, 71 + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, 72 + { UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL }, 73 + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } 74 + }; 75 + 76 + phy_read_attrs[0].mib_val = (u8)addr; 77 + phy_read_attrs[1].mib_val = (u8)(addr >> 8); 78 + 79 + ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs)); 80 + if (ret) 81 + return ret; 82 + 83 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val); 84 + if (ret) 85 + return ret; 86 + 87 + *val = mib_val; 88 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val); 89 + if (ret) 90 + return ret; 91 + 92 + *val |= (mib_val << 8); 93 + 94 + return 0; 95 + } 96 + 97 + static int ufs_versal2_enable_phy(struct ufs_hba *hba) 98 + { 99 + u32 offset, reg; 100 + int ret; 101 + 102 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0); 103 + if (ret) 104 + return ret; 105 + 106 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); 107 + if (ret) 108 + return ret; 109 + 110 + /* Check Tx/Rx FSM states */ 111 + for (offset = 0; offset < 2; offset++) { 112 + u32 time_left, mibsel; 113 + 114 + time_left = TIMEOUT_MICROSEC; 115 + mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset)); 116 + do { 117 + ret = ufshcd_dme_get(hba, mibsel, &reg); 118 + if (ret) 119 + return ret; 120 + 121 + if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP || 122 + reg == TX_STATE_LSBURST) 123 + break; 124 + 125 + time_left--; 126 + usleep_range(1, 5); 127 + } while (time_left); 128 + 129 + if (!time_left) { 130 + dev_err(hba->dev, "Invalid Tx FSM state.\n"); 131 + return -ETIMEDOUT; 132 + } 133 + 134 + time_left = TIMEOUT_MICROSEC; 135 + mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset)); 136 + do { 137 + ret = ufshcd_dme_get(hba, mibsel, &reg); 138 + if (ret) 139 + return ret; 140 + 141 + if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP || 142 + reg == RX_STATE_LSBURST) 143 + break; 144 + 145 + time_left--; 146 + usleep_range(1, 5); 147 + } while (time_left); 148 + 149 + if (!time_left) { 150 + dev_err(hba->dev, "Invalid Rx FSM state.\n"); 151 + return -ETIMEDOUT; 152 + } 153 + } 154 + 155 + return 0; 156 + } 157 + 158 + static int ufs_versal2_setup_phy(struct ufs_hba *hba) 159 + { 160 + struct ufs_versal2_host *host = ufshcd_get_variant(hba); 161 + int ret; 162 + u32 reg; 163 + 164 + /* Bypass RX-AFE offset calibrations (ATT/CTLE) */ 165 + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), &reg); 166 + if (ret) 167 + return ret; 168 + 169 + reg |= MPHY_FAST_RX_AFE_CAL; 170 + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg); 171 + if (ret) 172 + return ret; 173 + 174 + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), &reg); 175 + if (ret) 176 + return ret; 177 + 178 + reg |= MPHY_FAST_RX_AFE_CAL; 179 + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg); 180 + if (ret) 181 + return ret; 182 + 183 + /* Program ATT and CTLE compensation values */ 184 + if (host->attcompval0) { 185 + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0); 186 + if (ret) 187 + return ret; 188 + } 189 + 190 + if (host->attcompval1) { 191 + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1); 192 + if (ret) 193 + return ret; 194 + } 195 + 196 + if (host->ctlecompval0) { 197 + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0); 198 + if (ret) 199 + return ret; 200 + } 201 + 202 + if (host->ctlecompval1) { 203 + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1); 204 + if (ret) 205 + return ret; 206 + } 207 + 208 + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), &reg); 209 + if (ret) 210 + return ret; 211 + 212 + reg |= MPHY_FW_CALIB_CFG_VAL; 213 + ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg); 214 + if (ret) 215 + return ret; 216 + 217 + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), &reg); 218 + if (ret) 219 + return ret; 220 + 221 + reg |= MPHY_FW_CALIB_CFG_VAL; 222 + return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg); 223 + } 224 + 225 + static int ufs_versal2_phy_init(struct ufs_hba *hba) 226 + { 227 + struct ufs_versal2_host *host = ufshcd_get_variant(hba); 228 + u32 time_left; 229 + bool is_ready; 230 + int ret; 231 + static const struct ufshcd_dme_attr_val rmmi_attrs[] = { 232 + { UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL }, 233 + { UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL }, 234 + { UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL }, 235 + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } 236 + }; 237 + 238 + /* Wait for Tx/Rx config_rdy */ 239 + time_left = TIMEOUT_MICROSEC; 240 + do { 241 + time_left--; 242 + ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready); 243 + if (ret) 244 + return ret; 245 + 246 + if (!is_ready) 247 + break; 248 + 249 + usleep_range(1, 5); 250 + } while (time_left); 251 + 252 + if (!time_left) { 253 + dev_err(hba->dev, "Tx/Rx configuration signal busy.\n"); 254 + return -ETIMEDOUT; 255 + } 256 + 257 + ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs)); 258 + if (ret) 259 + return ret; 260 + 261 + ret = reset_control_deassert(host->rstphy); 262 + if (ret) { 263 + dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret); 264 + return ret; 265 + } 266 + 267 + /* Wait for SRAM init done */ 268 + time_left = TIMEOUT_MICROSEC; 269 + do { 270 + time_left--; 271 + ret = zynqmp_pm_is_sram_init_done(&is_ready); 272 + if (ret) 273 + return ret; 274 + 275 + if (is_ready) 276 + break; 277 + 278 + usleep_range(1, 5); 279 + } while (time_left); 280 + 281 + if (!time_left) { 282 + dev_err(hba->dev, "SRAM initialization failed.\n"); 283 + return -ETIMEDOUT; 284 + } 285 + 286 + ret = ufs_versal2_setup_phy(hba); 287 + if (ret) 288 + return ret; 289 + 290 + return ufs_versal2_enable_phy(hba); 291 + } 292 + 293 + static int ufs_versal2_init(struct ufs_hba *hba) 294 + { 295 + struct ufs_versal2_host *host; 296 + struct device *dev = hba->dev; 297 + struct ufs_clk_info *clki; 298 + int ret; 299 + u32 cal; 300 + 301 + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 302 + if (!host) 303 + return -ENOMEM; 304 + 305 + host->hba = hba; 306 + ufshcd_set_variant(hba, host); 307 + 308 + host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM; 309 + 310 + list_for_each_entry(clki, &hba->clk_list_head, list) { 311 + if (!strcmp(clki->name, "core")) 312 + host->host_clk = clk_get_rate(clki->clk); 313 + } 314 + 315 + host->rstc = devm_reset_control_get_exclusive(dev, "host"); 316 + if (IS_ERR(host->rstc)) { 317 + dev_err(dev, "failed to get reset ctrl: host\n"); 318 + return PTR_ERR(host->rstc); 319 + } 320 + 321 + host->rstphy = devm_reset_control_get_exclusive(dev, "phy"); 322 + if (IS_ERR(host->rstphy)) { 323 + dev_err(dev, "failed to get reset ctrl: phy\n"); 324 + return PTR_ERR(host->rstphy); 325 + } 326 + 327 + ret = reset_control_assert(host->rstc); 328 + if (ret) { 329 + dev_err(hba->dev, "host reset assert failed, err = %d\n", ret); 330 + return ret; 331 + } 332 + 333 + ret = reset_control_assert(host->rstphy); 334 + if (ret) { 335 + dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret); 336 + return ret; 337 + } 338 + 339 + ret = zynqmp_pm_set_sram_bypass(); 340 + if (ret) { 341 + dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret); 342 + return ret; 343 + } 344 + 345 + ret = reset_control_deassert(host->rstc); 346 + if (ret) 347 + dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret); 348 + 349 + ret = zynqmp_pm_get_ufs_calibration_values(&cal); 350 + if (ret) { 351 + dev_err(dev, "failed to read calibration values\n"); 352 + return ret; 353 + } 354 + 355 + host->attcompval0 = (u8)cal; 356 + host->attcompval1 = (u8)(cal >> 8); 357 + host->ctlecompval0 = (u8)(cal >> 16); 358 + host->ctlecompval1 = (u8)(cal >> 24); 359 + 360 + hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING; 361 + 362 + return 0; 363 + } 364 + 365 + static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba, 366 + enum ufs_notify_change_status status) 367 + { 368 + int ret = 0; 369 + 370 + if (status == PRE_CHANGE) { 371 + ret = ufs_versal2_phy_init(hba); 372 + if (ret) 373 + dev_err(hba->dev, "Phy init failed (%d)\n", ret); 374 + } 375 + 376 + return ret; 377 + } 378 + 379 + static int ufs_versal2_link_startup_notify(struct ufs_hba *hba, 380 + enum ufs_notify_change_status status) 381 + { 382 + struct ufs_versal2_host *host = ufshcd_get_variant(hba); 383 + int ret = 0; 384 + 385 + switch (status) { 386 + case PRE_CHANGE: 387 + if (host->host_clk) 388 + ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV); 389 + 390 + break; 391 + case POST_CHANGE: 392 + ret = ufshcd_dwc_link_startup_notify(hba, status); 393 + break; 394 + default: 395 + ret = -EINVAL; 396 + break; 397 + } 398 + 399 + return ret; 400 + } 401 + 402 + static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req) 403 + { 404 + u32 time_left, reg, lane; 405 + int ret; 406 + 407 + for (lane = 0; lane < activelanes; lane++) { 408 + time_left = TIMEOUT_MICROSEC; 409 + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg); 410 + if (ret) 411 + return ret; 412 + 413 + reg |= MPHY_RX_OVRD_EN; 414 + if (rx_req) 415 + reg |= MPHY_RX_OVRD_VAL; 416 + else 417 + reg &= ~MPHY_RX_OVRD_VAL; 418 + 419 + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); 420 + if (ret) 421 + return ret; 422 + 423 + do { 424 + ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), &reg); 425 + if (ret) 426 + return ret; 427 + 428 + reg &= MPHY_RX_ACK_MASK; 429 + if (reg == rx_req) 430 + break; 431 + 432 + time_left--; 433 + usleep_range(1, 5); 434 + } while (time_left); 435 + 436 + if (!time_left) { 437 + dev_err(hba->dev, "Invalid Rx Ack value.\n"); 438 + return -ETIMEDOUT; 439 + } 440 + } 441 + 442 + return 0; 443 + } 444 + 445 + static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, 446 + const struct ufs_pa_layer_attr *dev_max_params, 447 + struct ufs_pa_layer_attr *dev_req_params) 448 + { 449 + struct ufs_versal2_host *host = ufshcd_get_variant(hba); 450 + u32 lane, reg, rate = 0; 451 + int ret = 0; 452 + 453 + if (status == PRE_CHANGE) { 454 + memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr)); 455 + 456 + /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */ 457 + if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 && 458 + !host->ctlecompval1) { 459 + dev_req_params->pwr_rx = SLOW_MODE; 460 + dev_req_params->pwr_tx = SLOW_MODE; 461 + return 0; 462 + } 463 + 464 + if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE) 465 + return 0; 466 + 467 + if (dev_req_params->hs_rate == PA_HS_MODE_B) 468 + rate = 1; 469 + 470 + /* Select the rate */ 471 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate); 472 + if (ret) 473 + return ret; 474 + 475 + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); 476 + if (ret) 477 + return ret; 478 + 479 + ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1); 480 + if (ret) 481 + return ret; 482 + 483 + ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0); 484 + if (ret) 485 + return ret; 486 + 487 + /* Remove rx_req override */ 488 + for (lane = 0; lane < dev_req_params->lane_tx; lane++) { 489 + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), &reg); 490 + if (ret) 491 + return ret; 492 + 493 + reg &= ~MPHY_RX_OVRD_EN; 494 + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); 495 + if (ret) 496 + return ret; 497 + } 498 + 499 + if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2) 500 + ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx, 501 + PA_INITIAL_ADAPT); 502 + } 503 + 504 + return ret; 505 + } 506 + 507 + static struct ufs_hba_variant_ops ufs_versal2_hba_vops = { 508 + .name = "ufs-versal2-pltfm", 509 + .init = ufs_versal2_init, 510 + .link_startup_notify = ufs_versal2_link_startup_notify, 511 + .hce_enable_notify = ufs_versal2_hce_enable_notify, 512 + .pwr_change_notify = ufs_versal2_pwr_change_notify, 513 + }; 514 + 515 + static const struct of_device_id ufs_versal2_pltfm_match[] = { 516 + { 517 + .compatible = "amd,versal2-ufs", 518 + .data = &ufs_versal2_hba_vops, 519 + }, 520 + { }, 521 + }; 522 + MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match); 523 + 524 + static int ufs_versal2_probe(struct platform_device *pdev) 525 + { 526 + struct device *dev = &pdev->dev; 527 + int ret; 528 + 529 + /* Perform generic probe */ 530 + ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops); 531 + if (ret) 532 + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret); 533 + 534 + return ret; 535 + } 536 + 537 + static void ufs_versal2_remove(struct platform_device *pdev) 538 + { 539 + struct ufs_hba *hba = platform_get_drvdata(pdev); 540 + 541 + pm_runtime_get_sync(&(pdev)->dev); 542 + ufshcd_remove(hba); 543 + } 544 + 545 + static const struct dev_pm_ops ufs_versal2_pm_ops = { 546 + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) 547 + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) 548 + }; 549 + 550 + static struct platform_driver ufs_versal2_pltfm = { 551 + .probe = ufs_versal2_probe, 552 + .remove = ufs_versal2_remove, 553 + .driver = { 554 + .name = "ufshcd-versal2", 555 + .pm = &ufs_versal2_pm_ops, 556 + .of_match_table = of_match_ptr(ufs_versal2_pltfm_match), 557 + }, 558 + }; 559 + 560 + module_platform_driver(ufs_versal2_pltfm); 561 + 562 + MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>"); 563 + MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver"); 564 + MODULE_LICENSE("GPL");
+46
drivers/ufs/host/ufshcd-dwc.h
··· 12 12 13 13 #include <ufs/ufshcd.h> 14 14 15 + /* RMMI Attributes */ 16 + #define CBREFCLKCTRL2 0x8132 17 + #define CBCRCTRL 0x811F 18 + #define CBC10DIRECTCONF2 0x810E 19 + #define CBRATESEL 0x8114 20 + #define CBCREGADDRLSB 0x8116 21 + #define CBCREGADDRMSB 0x8117 22 + #define CBCREGWRLSB 0x8118 23 + #define CBCREGWRMSB 0x8119 24 + #define CBCREGRDLSB 0x811A 25 + #define CBCREGRDMSB 0x811B 26 + #define CBCREGRDWRSEL 0x811C 27 + 28 + #define CBREFREFCLK_GATE_OVR_EN BIT(7) 29 + 30 + /* M-PHY Attributes */ 31 + #define MTX_FSM_STATE 0x41 32 + #define MRX_FSM_STATE 0xC1 33 + 34 + /* M-PHY registers */ 35 + #define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100)) 36 + #define RX_PCS_OUT(n) (0x300F + ((n) * 0x100)) 37 + #define FAST_FLAGS(n) (0x401C + ((n) * 0x100)) 38 + #define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100)) 39 + #define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100)) 40 + #define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100)) 41 + 42 + /* Tx/Rx FSM state */ 43 + enum rx_fsm_state { 44 + RX_STATE_DISABLED = 0, 45 + RX_STATE_HIBERN8 = 1, 46 + RX_STATE_SLEEP = 2, 47 + RX_STATE_STALL = 3, 48 + RX_STATE_LSBURST = 4, 49 + RX_STATE_HSBURST = 5, 50 + }; 51 + 52 + enum tx_fsm_state { 53 + TX_STATE_DISABLED = 0, 54 + TX_STATE_HIBERN8 = 1, 55 + TX_STATE_SLEEP = 2, 56 + TX_STATE_STALL = 3, 57 + TX_STATE_LSBURST = 4, 58 + TX_STATE_HSBURST = 5, 59 + }; 60 + 15 61 struct ufshcd_dme_attr_val { 16 62 u32 attr_sel; 17 63 u32 mib_val;
+38
include/linux/firmware/xlnx-zynqmp-ufs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Firmware layer for UFS APIs. 4 + * 5 + * Copyright (c) 2025 Advanced Micro Devices, Inc. 6 + */ 7 + 8 + #ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__ 9 + #define __FIRMWARE_XLNX_ZYNQMP_UFS_H__ 10 + 11 + #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) 12 + int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready); 13 + int zynqmp_pm_is_sram_init_done(bool *is_done); 14 + int zynqmp_pm_set_sram_bypass(void); 15 + int zynqmp_pm_get_ufs_calibration_values(u32 *val); 16 + #else 17 + static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready) 18 + { 19 + return -ENODEV; 20 + } 21 + 22 + static inline int zynqmp_pm_is_sram_init_done(bool *is_done) 23 + { 24 + return -ENODEV; 25 + } 26 + 27 + static inline int zynqmp_pm_set_sram_bypass(void) 28 + { 29 + return -ENODEV; 30 + } 31 + 32 + static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val) 33 + { 34 + return -ENODEV; 35 + } 36 + #endif 37 + 38 + #endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */
+16
include/linux/firmware/xlnx-zynqmp.h
··· 16 16 #include <linux/types.h> 17 17 18 18 #include <linux/err.h> 19 + #include <linux/firmware/xlnx-zynqmp-ufs.h> 19 20 20 21 #define ZYNQMP_PM_VERSION_MAJOR 1 21 22 #define ZYNQMP_PM_VERSION_MINOR 0 ··· 242 241 IOCTL_GET_FEATURE_CONFIG = 27, 243 242 /* IOCTL for Secure Read/Write Interface */ 244 243 IOCTL_READ_REG = 28, 244 + IOCTL_MASK_WRITE_REG = 29, 245 245 /* Dynamic SD/GEM configuration */ 246 246 IOCTL_SET_SD_CONFIG = 30, 247 247 IOCTL_SET_GEM_CONFIG = 31, ··· 621 619 int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); 622 620 int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); 623 621 int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); 622 + int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value); 623 + int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, 624 + u32 mask, u32 value); 624 625 int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); 625 626 int zynqmp_pm_force_pwrdwn(const u32 target, 626 627 const enum zynqmp_pm_request_ack ack); ··· 917 912 const bool set_addr, 918 913 const u64 address, 919 914 const enum zynqmp_pm_request_ack ack) 915 + { 916 + return -ENODEV; 917 + } 918 + 919 + static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value) 920 + { 921 + return -ENODEV; 922 + } 923 + 924 + static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, 925 + u32 mask, u32 value) 920 926 { 921 927 return -ENODEV; 922 928 }
+1
include/ufs/unipro.h
··· 179 179 #define VS_POWERSTATE 0xD083 180 180 #define VS_MPHYCFGUPDT 0xD085 181 181 #define VS_DEBUGOMC 0xD09E 182 + #define VS_MPHYDISABLE 0xD0C1 182 183 183 184 #define PA_GRANULARITY_MIN_VAL 1 184 185 #define PA_GRANULARITY_MAX_VAL 6