Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'phy-for-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy

Pull phy updates from Vinod Koul:
"New Support
- Samsung Exynos gs101 drd combo phy
- Qualcomm SC8180x USB uniphy, IPQ9574 QMP PCIe phy
- Airoha EN7581 PCIe phy
- Freescale i.MX8Q HSIO SerDes phy
- Starfive jh7110 dphy tx

Updates:
- Resume support for j721e-wiz driver
- Updates to Exynos usbdrd driver
- Support for optional power domains in g12a usb2-phy driver
- Debugfs support and updates to zynqmp driver"

* tag 'phy-for-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy: (56 commits)
phy: airoha: Add dtime and Rx AEQ IO registers
dt-bindings: phy: airoha: Add dtime and Rx AEQ IO registers
dt-bindings: phy: rockchip-emmc-phy: Convert to dtschema
dt-bindings: phy: qcom,qmp-usb: fix spelling error
phy: exynos5-usbdrd: support Exynos USBDRD 3.1 combo phy (HS & SS)
phy: exynos5-usbdrd: convert Vbus supplies to regulator_bulk
phy: exynos5-usbdrd: convert (phy) register access clock to clk_bulk
phy: exynos5-usbdrd: convert core clocks to clk_bulk
phy: exynos5-usbdrd: support isolating HS and SS ports independently
dt-bindings: phy: samsung,usb3-drd-phy: add gs101 compatible
phy: core: Fix documentation of of_phy_get
phy: starfive: Correct the dphy configure process
phy: zynqmp: Add debugfs support
phy: zynqmp: Take the phy mutex in xlate
phy: zynqmp: Only wait for PLL lock "primary" instances
phy: zynqmp: Store instance instead of type
phy: zynqmp: Enable reference clock correctly
phy: cadence-torrent: Check return value on register read
phy: Fix the cacography in phy-exynos5250-usb2.c
phy: phy-rockchip-samsung-hdptx: Select CONFIG_MFD_SYSCON
...

+4926 -455
+69
Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/airoha,en7581-pcie-phy.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Airoha EN7581 PCI-Express PHY 8 + 9 + maintainers: 10 + - Lorenzo Bianconi <lorenzo@kernel.org> 11 + 12 + description: 13 + The PCIe PHY supports physical layer functionality for PCIe Gen2/Gen3 port. 14 + 15 + properties: 16 + compatible: 17 + const: airoha,en7581-pcie-phy 18 + 19 + reg: 20 + items: 21 + - description: PCIE analog base address 22 + - description: PCIE lane0 base address 23 + - description: PCIE lane1 base address 24 + - description: PCIE lane0 detection time base address 25 + - description: PCIE lane1 detection time base address 26 + - description: PCIE Rx AEQ base address 27 + 28 + reg-names: 29 + items: 30 + - const: csr-2l 31 + - const: pma0 32 + - const: pma1 33 + - const: p0-xr-dtime 34 + - const: p1-xr-dtime 35 + - const: rx-aeq 36 + 37 + "#phy-cells": 38 + const: 0 39 + 40 + required: 41 + - compatible 42 + - reg 43 + - reg-names 44 + - "#phy-cells" 45 + 46 + additionalProperties: false 47 + 48 + examples: 49 + - | 50 + #include <dt-bindings/phy/phy.h> 51 + 52 + soc { 53 + #address-cells = <2>; 54 + #size-cells = <2>; 55 + 56 + phy@11e80000 { 57 + compatible = "airoha,en7581-pcie-phy"; 58 + #phy-cells = <0>; 59 + reg = <0x0 0x1fa5a000 0x0 0xfff>, 60 + <0x0 0x1fa5b000 0x0 0xfff>, 61 + <0x0 0x1fa5c000 0x0 0xfff>, 62 + <0x0 0x1fc10044 0x0 0x4>, 63 + <0x0 0x1fc30044 0x0 0x4>, 64 + <0x0 0x1fc15030 0x0 0x104>; 65 + reg-names = "csr-2l", "pma0", "pma1", 66 + "p0-xr-dtime", "p1-xr-dtime", 67 + "rx-aeq"; 68 + }; 69 + };
+3
Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
··· 41 41 Phandle to a regulator that provides power to the PHY. This 42 42 regulator will be managed during the PHY power on/off sequence. 43 43 44 + power-domains: 45 + maxItems: 1 46 + 44 47 required: 45 48 - compatible 46 49 - reg
+164
Documentation/devicetree/bindings/phy/fsl,imx8qm-hsio.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/fsl,imx8qm-hsio.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Freescale i.MX8QM SoC series High Speed IO(HSIO) SERDES PHY 8 + 9 + maintainers: 10 + - Richard Zhu <hongxing.zhu@nxp.com> 11 + 12 + properties: 13 + compatible: 14 + enum: 15 + - fsl,imx8qm-hsio 16 + - fsl,imx8qxp-hsio 17 + reg: 18 + items: 19 + - description: Base address and length of the PHY block 20 + - description: HSIO control and status registers(CSR) of the PHY 21 + - description: HSIO CSR of the controller bound to the PHY 22 + - description: HSIO CSR for MISC 23 + 24 + reg-names: 25 + items: 26 + - const: reg 27 + - const: phy 28 + - const: ctrl 29 + - const: misc 30 + 31 + "#phy-cells": 32 + const: 3 33 + description: 34 + The first defines lane index. 35 + The second defines the type of the PHY refer to the include phy.h. 36 + The third defines the controller index, indicated which controller 37 + is bound to the lane. 38 + 39 + clocks: 40 + minItems: 5 41 + maxItems: 14 42 + 43 + clock-names: 44 + minItems: 5 45 + maxItems: 14 46 + 47 + fsl,hsio-cfg: 48 + description: | 49 + Specifies the use case of the HSIO module in the hardware design. 50 + Regarding the design of i.MX8QM HSIO subsystem, HSIO module can be 51 + confiured as following three use cases. 52 + +---------------------------------------+ 53 + | | i.MX8QM | 54 + |------------------|--------------------| 55 + | | Lane0| Lane1| Lane2| 56 + |------------------|------|------|------| 57 + | pciea-x2-sata | PCIEA| PCIEA| SATA | 58 + |------------------|------|------|------| 59 + | pciea-x2-pcieb | PCIEA| PCIEA| PCIEB| 60 + |------------------|------|------|------| 61 + | pciea-pcieb-sata | PCIEA| PCIEB| SATA | 62 + +---------------------------------------+ 63 + $ref: /schemas/types.yaml#/definitions/string 64 + enum: [ pciea-x2-sata, pciea-x2-pcieb, pciea-pcieb-sata] 65 + default: pciea-pcieb-sata 66 + 67 + fsl,refclk-pad-mode: 68 + description: 69 + Specifies the mode of the refclk pad used. INPUT(PHY refclock is 70 + provided externally via the refclk pad) or OUTPUT(PHY refclock is 71 + derived from SoC internal source and provided on the refclk pad). 72 + This property not exists means unused(PHY refclock is derived from 73 + SoC internal source). 74 + $ref: /schemas/types.yaml#/definitions/string 75 + enum: [ input, output, unused ] 76 + default: unused 77 + 78 + power-domains: 79 + minItems: 1 80 + maxItems: 2 81 + 82 + required: 83 + - compatible 84 + - reg 85 + - reg-names 86 + - "#phy-cells" 87 + - clocks 88 + - clock-names 89 + - fsl,hsio-cfg 90 + 91 + allOf: 92 + - if: 93 + properties: 94 + compatible: 95 + contains: 96 + enum: 97 + - fsl,imx8qxp-hsio 98 + then: 99 + properties: 100 + clock-names: 101 + items: 102 + - const: pclk0 103 + - const: apb_pclk0 104 + - const: phy0_crr 105 + - const: ctl0_crr 106 + - const: misc_crr 107 + power-domains: 108 + maxItems: 1 109 + 110 + - if: 111 + properties: 112 + compatible: 113 + contains: 114 + enum: 115 + - fsl,imx8qm-hsio 116 + then: 117 + properties: 118 + clock-names: 119 + items: 120 + - const: pclk0 121 + - const: pclk1 122 + - const: apb_pclk0 123 + - const: apb_pclk1 124 + - const: pclk2 125 + - const: epcs_tx 126 + - const: epcs_rx 127 + - const: apb_pclk2 128 + - const: phy0_crr 129 + - const: phy1_crr 130 + - const: ctl0_crr 131 + - const: ctl1_crr 132 + - const: ctl2_crr 133 + - const: misc_crr 134 + power-domains: 135 + minItems: 2 136 + 137 + additionalProperties: false 138 + 139 + examples: 140 + - | 141 + #include <dt-bindings/clock/imx8-clock.h> 142 + #include <dt-bindings/clock/imx8-lpcg.h> 143 + #include <dt-bindings/firmware/imx/rsrc.h> 144 + #include <dt-bindings/phy/phy-imx8-pcie.h> 145 + 146 + phy@5f1a0000 { 147 + compatible = "fsl,imx8qxp-hsio"; 148 + reg = <0x5f1a0000 0x10000>, 149 + <0x5f120000 0x10000>, 150 + <0x5f140000 0x10000>, 151 + <0x5f160000 0x10000>; 152 + reg-names = "reg", "phy", "ctrl", "misc"; 153 + clocks = <&phyx1_lpcg IMX_LPCG_CLK_0>, 154 + <&phyx1_lpcg IMX_LPCG_CLK_4>, 155 + <&phyx1_crr1_lpcg IMX_LPCG_CLK_4>, 156 + <&pcieb_crr3_lpcg IMX_LPCG_CLK_4>, 157 + <&misc_crr5_lpcg IMX_LPCG_CLK_4>; 158 + clock-names = "pclk0", "apb_pclk0", "phy0_crr", "ctl0_crr", "misc_crr"; 159 + power-domains = <&pd IMX_SC_R_SERDES_1>; 160 + #phy-cells = <3>; 161 + fsl,hsio-cfg = "pciea-pcieb-sata"; 162 + fsl,refclk-pad-mode = "input"; 163 + }; 164 + ...
+6
Documentation/devicetree/bindings/phy/marvell,armada-cp110-utmi-phy.yaml
··· 41 41 Phandle to the system controller node 42 42 $ref: /schemas/types.yaml#/definitions/phandle 43 43 44 + swap-dx-lanes: 45 + $ref: /schemas/types.yaml#/definitions/uint32-array 46 + description: | 47 + Specifies the ports which will swap the differential-pair (D+/D-), 48 + default is not-swapped. 49 + 44 50 # Required child nodes: 45 51 46 52 patternProperties:
+2
Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
··· 19 19 - qcom,ipq6018-qmp-pcie-phy 20 20 - qcom,ipq8074-qmp-gen3-pcie-phy 21 21 - qcom,ipq8074-qmp-pcie-phy 22 + - qcom,ipq9574-qmp-gen3x1-pcie-phy 23 + - qcom,ipq9574-qmp-gen3x2-pcie-phy 22 24 23 25 reg: 24 26 items:
+1 -6
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
··· 91 91 "#clock-cells": true 92 92 93 93 clock-output-names: 94 - minItems: 1 95 - maxItems: 2 94 + maxItems: 1 96 95 97 96 "#phy-cells": 98 97 const: 0 ··· 221 222 - qcom,sm8650-qmp-gen4x2-pcie-phy 222 223 then: 223 224 properties: 224 - clock-output-names: 225 - minItems: 2 226 225 "#clock-cells": 227 226 const: 1 228 227 else: 229 228 properties: 230 - clock-output-names: 231 - maxItems: 1 232 229 "#clock-cells": 233 230 const: 0 234 231
+4 -1
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
··· 20 20 - qcom,ipq8074-qmp-usb3-phy 21 21 - qcom,ipq9574-qmp-usb3-phy 22 22 - qcom,msm8996-qmp-usb3-phy 23 - - com,qdu1000-qmp-usb3-uni-phy 23 + - qcom,qdu1000-qmp-usb3-uni-phy 24 24 - qcom,sa8775p-qmp-usb3-uni-phy 25 + - qcom,sc8180x-qmp-usb3-uni-phy 25 26 - qcom,sc8280xp-qmp-usb3-uni-phy 26 27 - qcom,sdm845-qmp-usb3-uni-phy 27 28 - qcom,sdx55-qmp-usb3-uni-phy ··· 113 112 enum: 114 113 - qcom,qdu1000-qmp-usb3-uni-phy 115 114 - qcom,sa8775p-qmp-usb3-uni-phy 115 + - qcom,sc8180x-qmp-usb3-uni-phy 116 116 - qcom,sc8280xp-qmp-usb3-uni-phy 117 117 - qcom,sm8150-qmp-usb3-uni-phy 118 118 - qcom,sm8250-qmp-usb3-uni-phy ··· 154 152 contains: 155 153 enum: 156 154 - qcom,sa8775p-qmp-usb3-uni-phy 155 + - qcom,sc8180x-qmp-usb3-uni-phy 157 156 - qcom,sc8280xp-qmp-usb3-uni-phy 158 157 - qcom,x1e80100-qmp-usb3-uni-phy 159 158 then:
+2
Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
··· 15 15 contains: 16 16 enum: 17 17 - qcom,usb-hs-phy-apq8064 18 + - qcom,usb-hs-phy-msm8660 18 19 - qcom,usb-hs-phy-msm8960 19 20 then: 20 21 properties: ··· 42 41 - enum: 43 42 - qcom,usb-hs-phy-apq8064 44 43 - qcom,usb-hs-phy-msm8226 44 + - qcom,usb-hs-phy-msm8660 45 45 - qcom,usb-hs-phy-msm8916 46 46 - qcom,usb-hs-phy-msm8960 47 47 - qcom,usb-hs-phy-msm8974
+64
Documentation/devicetree/bindings/phy/rockchip,rk3399-emmc-phy.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/rockchip,rk3399-emmc-phy.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Rockchip EMMC PHY 8 + 9 + maintainers: 10 + - Heiko Stuebner <heiko@sntech.de> 11 + 12 + properties: 13 + compatible: 14 + const: rockchip,rk3399-emmc-phy 15 + 16 + reg: 17 + maxItems: 1 18 + 19 + clocks: 20 + maxItems: 1 21 + 22 + clock-names: 23 + const: emmcclk 24 + 25 + drive-impedance-ohm: 26 + $ref: /schemas/types.yaml#/definitions/uint32 27 + description: 28 + Specifies the drive impedance in Ohm. 29 + enum: [33, 40, 50, 66, 100] 30 + default: 50 31 + 32 + rockchip,enable-strobe-pulldown: 33 + type: boolean 34 + description: | 35 + Enable internal pull-down for the strobe 36 + line. If not set, pull-down is not used. 37 + 38 + rockchip,output-tapdelay-select: 39 + $ref: /schemas/types.yaml#/definitions/uint32 40 + description: 41 + Specifies the phyctrl_otapdlysec register. 42 + default: 0x4 43 + maximum: 0xf 44 + 45 + "#phy-cells": 46 + const: 0 47 + 48 + required: 49 + - compatible 50 + - reg 51 + - "#phy-cells" 52 + 53 + additionalProperties: false 54 + 55 + examples: 56 + - | 57 + phy@f780 { 58 + compatible = "rockchip,rk3399-emmc-phy"; 59 + reg = <0xf780 0x20>; 60 + clocks = <&sdhci>; 61 + clock-names = "emmcclk"; 62 + drive-impedance-ohm = <50>; 63 + #phy-cells = <0>; 64 + };
-43
Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
··· 1 - Rockchip EMMC PHY 2 - ----------------------- 3 - 4 - Required properties: 5 - - compatible: rockchip,rk3399-emmc-phy 6 - - #phy-cells: must be 0 7 - - reg: PHY register address offset and length in "general 8 - register files" 9 - 10 - Optional properties: 11 - - clock-names: Should contain "emmcclk". Although this is listed as optional 12 - (because most boards can get basic functionality without having 13 - access to it), it is strongly suggested. 14 - See ../clock/clock-bindings.txt for details. 15 - - clocks: Should have a phandle to the card clock exported by the SDHCI driver. 16 - - drive-impedance-ohm: Specifies the drive impedance in Ohm. 17 - Possible values are 33, 40, 50, 66 and 100. 18 - If not set, the default value of 50 will be applied. 19 - - rockchip,enable-strobe-pulldown: Enable internal pull-down for the strobe 20 - line. If not set, pull-down is not used. 21 - - rockchip,output-tapdelay-select: Specifies the phyctrl_otapdlysec register. 22 - If not set, the register defaults to 0x4. 23 - Maximum value 0xf. 24 - 25 - Example: 26 - 27 - 28 - grf: syscon@ff770000 { 29 - compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd"; 30 - #address-cells = <1>; 31 - #size-cells = <1>; 32 - 33 - ... 34 - 35 - emmcphy: phy@f780 { 36 - compatible = "rockchip,rk3399-emmc-phy"; 37 - reg = <0xf780 0x20>; 38 - clocks = <&sdhci>; 39 - clock-names = "emmcclk"; 40 - drive-impedance-ohm = <50>; 41 - #phy-cells = <0>; 42 - }; 43 - };
+75 -2
Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
··· 25 25 properties: 26 26 compatible: 27 27 enum: 28 + - google,gs101-usb31drd-phy 28 29 - samsung,exynos5250-usbdrd-phy 29 30 - samsung,exynos5420-usbdrd-phy 30 31 - samsung,exynos5433-usbdrd-phy ··· 58 57 the OF graph bindings specified. 59 58 60 59 reg: 61 - maxItems: 1 60 + minItems: 1 61 + maxItems: 3 62 + 63 + reg-names: 64 + minItems: 1 65 + items: 66 + - const: phy 67 + - const: pcs 68 + - const: pma 62 69 63 70 samsung,pmu-syscon: 64 71 $ref: /schemas/types.yaml#/definitions/phandle ··· 81 72 description: 82 73 VBUS Boost 5V power source. 83 74 75 + pll-supply: 76 + description: Power supply for the USB PLL. 77 + dvdd-usb20-supply: 78 + description: DVDD power supply for the USB 2.0 phy. 79 + vddh-usb20-supply: 80 + description: VDDh power supply for the USB 2.0 phy. 81 + vdd33-usb20-supply: 82 + description: 3.3V power supply for the USB 2.0 phy. 83 + vdda-usbdp-supply: 84 + description: VDDa power supply for the USB DP phy. 85 + vddh-usbdp-supply: 86 + description: VDDh power supply for the USB DP phy. 87 + 84 88 required: 85 89 - compatible 86 90 - clocks ··· 103 81 - samsung,pmu-syscon 104 82 105 83 allOf: 84 + - if: 85 + properties: 86 + compatible: 87 + contains: 88 + const: google,gs101-usb31drd-phy 89 + then: 90 + properties: 91 + clocks: 92 + items: 93 + - description: Gate of main PHY clock 94 + - description: Gate of PHY reference clock 95 + - description: Gate of control interface AXI clock 96 + - description: Gate of control interface APB clock 97 + - description: Gate of SCL APB clock 98 + clock-names: 99 + items: 100 + - const: phy 101 + - const: ref 102 + - const: ctrl_aclk 103 + - const: ctrl_pclk 104 + - const: scl_pclk 105 + reg: 106 + minItems: 3 107 + reg-names: 108 + minItems: 3 109 + required: 110 + - reg-names 111 + - pll-supply 112 + - dvdd-usb20-supply 113 + - vddh-usb20-supply 114 + - vdd33-usb20-supply 115 + - vdda-usbdp-supply 116 + - vddh-usbdp-supply 117 + 106 118 - if: 107 119 properties: 108 120 compatible: ··· 156 100 - const: phy_utmi 157 101 - const: phy_pipe 158 102 - const: itp 159 - else: 103 + reg: 104 + maxItems: 1 105 + reg-names: 106 + maxItems: 1 107 + 108 + - if: 109 + properties: 110 + compatible: 111 + contains: 112 + enum: 113 + - samsung,exynos5250-usbdrd-phy 114 + - samsung,exynos5420-usbdrd-phy 115 + - samsung,exynos850-usbdrd-phy 116 + then: 160 117 properties: 161 118 clocks: 162 119 minItems: 2 ··· 178 109 items: 179 110 - const: phy 180 111 - const: ref 112 + reg: 113 + maxItems: 1 114 + reg-names: 115 + maxItems: 1 181 116 182 117 additionalProperties: false 183 118
+68
Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/starfive,jh7110-dphy-tx.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Starfive SoC MIPI D-PHY Tx Controller 8 + 9 + maintainers: 10 + - Keith Zhao <keith.zhao@starfivetech.com> 11 + - Shengyang Chen <shengyang.chen@starfivetech.com> 12 + 13 + description: 14 + The Starfive SoC uses the MIPI DSI D-PHY based on M31 IP to transfer 15 + DSI data. 16 + 17 + properties: 18 + compatible: 19 + const: starfive,jh7110-dphy-tx 20 + 21 + reg: 22 + maxItems: 1 23 + 24 + clocks: 25 + maxItems: 1 26 + 27 + clock-names: 28 + items: 29 + - const: txesc 30 + 31 + resets: 32 + items: 33 + - description: MIPITX_DPHY_SYS reset 34 + 35 + reset-names: 36 + items: 37 + - const: sys 38 + 39 + power-domains: 40 + maxItems: 1 41 + 42 + "#phy-cells": 43 + const: 0 44 + 45 + required: 46 + - compatible 47 + - reg 48 + - clocks 49 + - clock-names 50 + - resets 51 + - reset-names 52 + - power-domains 53 + - "#phy-cells" 54 + 55 + additionalProperties: false 56 + 57 + examples: 58 + - | 59 + phy@295e0000 { 60 + compatible = "starfive,jh7110-dphy-tx"; 61 + reg = <0x295e0000 0x10000>; 62 + clocks = <&voutcrg 14>; 63 + clock-names = "txesc"; 64 + resets = <&syscrg 10>; 65 + reset-names = "sys"; 66 + power-domains = <&aon_syscon 0>; 67 + #phy-cells = <0>; 68 + };
+13 -3
Documentation/devicetree/bindings/soc/rockchip/grf.yaml
··· 176 176 Documentation/devicetree/bindings/phy/rockchip-pcie-phy.txt 177 177 178 178 patternProperties: 179 - "phy@[0-9a-f]+$": 180 - description: 181 - Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt 179 + "^phy@[0-9a-f]+$": 180 + type: object 181 + $ref: /schemas/phy/rockchip,rk3399-emmc-phy.yaml# 182 + unevaluatedProperties: false 182 183 183 184 - if: 184 185 properties: ··· 290 289 <&cru PCLK_VIO_GRF>; 291 290 clock-names = "dphy-ref", "dphy-cfg", "grf"; 292 291 power-domains = <&power RK3399_PD_VIO>; 292 + #phy-cells = <0>; 293 + }; 294 + 295 + phy@f780 { 296 + compatible = "rockchip,rk3399-emmc-phy"; 297 + reg = <0xf780 0x20>; 298 + clocks = <&sdhci>; 299 + clock-names = "emmcclk"; 300 + drive-impedance-ohm = <50>; 293 301 #phy-cells = <0>; 294 302 }; 295 303
+15
MAINTAINERS
··· 702 702 F: Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml 703 703 F: drivers/net/ethernet/mediatek/airoha_eth.c 704 704 705 + AIROHA PCIE PHY DRIVER 706 + M: Lorenzo Bianconi <lorenzo@kernel.org> 707 + L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 708 + S: Maintained 709 + F: Documentation/devicetree/bindings/phy/airoha,en7581-pcie-phy.yaml 710 + F: drivers/phy/phy-airoha-pcie-regs.h 711 + F: drivers/phy/phy-airoha-pcie.c 712 + 705 713 AIROHA SPI SNFI DRIVER 706 714 M: Lorenzo Bianconi <lorenzo@kernel.org> 707 715 M: Ray Liu <ray.liu@airoha.com> ··· 21689 21681 S: Supported 21690 21682 F: Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml 21691 21683 F: drivers/phy/starfive/phy-jh7110-dphy-rx.c 21684 + 21685 + STARFIVE JH7110 DPHY TX DRIVER 21686 + M: Keith Zhao <keith.zhao@starfivetech.com> 21687 + M: Shengyang Chen <shengyang.chen@starfivetech.com> 21688 + S: Supported 21689 + F: Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-tx.yaml 21690 + F: drivers/phy/starfive/phy-jh7110-dphy-tx.c 21692 21691 21693 21692 STARFIVE JH7110 MMC/SD/SDIO DRIVER 21694 21693 M: William Qiu <william.qiu@starfivetech.com>
+10
drivers/phy/Kconfig
··· 72 72 functional modes using gpios and sets the attribute max link 73 73 rate, for CAN drivers. 74 74 75 + config PHY_AIROHA_PCIE 76 + tristate "Airoha PCIe-PHY Driver" 77 + depends on ARCH_AIROHA || COMPILE_TEST 78 + depends on OF 79 + select GENERIC_PHY 80 + help 81 + Say Y here to add support for Airoha PCIe PHY driver. 82 + This driver create the basic PHY instance and provides initialize 83 + callback for PCIe GEN3 port. 84 + 75 85 source "drivers/phy/allwinner/Kconfig" 76 86 source "drivers/phy/amlogic/Kconfig" 77 87 source "drivers/phy/broadcom/Kconfig"
+1
drivers/phy/Makefile
··· 10 10 obj-$(CONFIG_PHY_XGENE) += phy-xgene.o 11 11 obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o 12 12 obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o 13 + obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o 13 14 obj-y += allwinner/ \ 14 15 amlogic/ \ 15 16 broadcom/ \
+1
drivers/phy/broadcom/phy-bcm-ns-usb2.c
··· 162 162 }; 163 163 module_platform_driver(bcm_ns_usb2_driver); 164 164 165 + MODULE_DESCRIPTION("Broadcom Northstar USB 2.0 PHY Driver"); 165 166 MODULE_LICENSE("GPL v2");
+1
drivers/phy/broadcom/phy-bcm-ns-usb3.c
··· 240 240 241 241 mdio_module_driver(bcm_ns_usb3_mdio_driver); 242 242 243 + MODULE_DESCRIPTION("Broadcom Northstar USB 3.0 PHY Driver"); 243 244 MODULE_LICENSE("GPL v2"); 244 245 MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table);
+162 -45
drivers/phy/cadence/phy-cadence-torrent.c
··· 360 360 enum cdns_torrent_ref_clk ref_clk1_rate; 361 361 struct cdns_torrent_inst phys[MAX_NUM_LANES]; 362 362 int nsubnodes; 363 + int already_configured; 363 364 const struct cdns_torrent_data *init_data; 364 365 struct regmap *regmap_common_cdb; 365 366 struct regmap *regmap_phy_pcs_common_cdb; ··· 1157 1156 ret = regmap_read_poll_timeout(regmap, PHY_PMA_XCVR_POWER_STATE_ACK, 1158 1157 read_val, (read_val & mask) == value, 0, 1159 1158 POLL_TIMEOUT_US); 1159 + if (ret) 1160 + return ret; 1161 + 1160 1162 cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, 0x00000000); 1161 1163 ndelay(100); 1162 1164 ··· 1598 1594 struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); 1599 1595 int ret; 1600 1596 1597 + if (cdns_phy->already_configured) 1598 + return 0; 1599 + 1601 1600 ret = cdns_torrent_dp_verify_config(inst, &opts->dp); 1602 1601 if (ret) { 1603 1602 dev_err(&phy->dev, "invalid params for phy configure\n"); ··· 1635 1628 struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); 1636 1629 u32 read_val; 1637 1630 int ret; 1631 + 1632 + if (cdns_phy->already_configured) { 1633 + /* Give 5ms to 10ms delay for the PIPE clock to be stable */ 1634 + usleep_range(5000, 10000); 1635 + return 0; 1636 + } 1638 1637 1639 1638 if (cdns_phy->nsubnodes == 1) { 1640 1639 /* Take the PHY lane group out of reset */ ··· 2320 2307 u32 num_regs; 2321 2308 int i, j; 2322 2309 2310 + if (cdns_phy->already_configured) 2311 + return 0; 2312 + 2323 2313 if (cdns_phy->nsubnodes > 1) { 2324 2314 if (phy_type == TYPE_DP) 2325 2315 return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy); ··· 2457 2441 .configure = cdns_torrent_dp_configure, 2458 2442 .power_on = cdns_torrent_phy_on, 2459 2443 .power_off = cdns_torrent_phy_off, 2460 - .owner = THIS_MODULE, 2461 - }; 2462 - 2463 - static int cdns_torrent_noop_phy_on(struct phy *phy) 2464 - { 2465 - /* Give 5ms to 10ms delay for the PIPE clock to be stable */ 2466 - usleep_range(5000, 10000); 2467 - 2468 - return 0; 2469 - } 2470 - 2471 - static const struct phy_ops noop_ops = { 2472 - .power_on = cdns_torrent_noop_phy_on, 2473 2444 .owner = THIS_MODULE, 2474 2445 }; 2475 2446 ··· 2681 2678 return 0; 2682 2679 } 2683 2680 2684 - static int cdns_torrent_reset(struct cdns_torrent_phy *cdns_phy) 2681 + static int cdns_torrent_of_get_reset(struct cdns_torrent_phy *cdns_phy) 2685 2682 { 2686 2683 struct device *dev = cdns_phy->dev; 2687 2684 ··· 2702 2699 return 0; 2703 2700 } 2704 2701 2702 + static int cdns_torrent_of_get_clk(struct cdns_torrent_phy *cdns_phy) 2703 + { 2704 + /* refclk: Input reference clock for PLL0 */ 2705 + cdns_phy->clk = devm_clk_get(cdns_phy->dev, "refclk"); 2706 + if (IS_ERR(cdns_phy->clk)) 2707 + return dev_err_probe(cdns_phy->dev, PTR_ERR(cdns_phy->clk), 2708 + "phy ref clock not found\n"); 2709 + 2710 + /* refclk1: Input reference clock for PLL1 */ 2711 + cdns_phy->clk1 = devm_clk_get_optional(cdns_phy->dev, "pll1_refclk"); 2712 + if (IS_ERR(cdns_phy->clk1)) 2713 + return dev_err_probe(cdns_phy->dev, PTR_ERR(cdns_phy->clk1), 2714 + "phy PLL1 ref clock not found\n"); 2715 + 2716 + return 0; 2717 + } 2718 + 2705 2719 static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy) 2706 2720 { 2707 - struct device *dev = cdns_phy->dev; 2708 2721 unsigned long ref_clk1_rate; 2709 2722 unsigned long ref_clk_rate; 2710 2723 int ret; 2711 - 2712 - /* refclk: Input reference clock for PLL0 */ 2713 - cdns_phy->clk = devm_clk_get(dev, "refclk"); 2714 - if (IS_ERR(cdns_phy->clk)) { 2715 - dev_err(dev, "phy ref clock not found\n"); 2716 - return PTR_ERR(cdns_phy->clk); 2717 - } 2718 2724 2719 2725 ret = clk_prepare_enable(cdns_phy->clk); 2720 2726 if (ret) { ··· 2754 2742 default: 2755 2743 dev_err(cdns_phy->dev, "Invalid ref clock rate\n"); 2756 2744 ret = -EINVAL; 2757 - goto disable_clk; 2758 - } 2759 - 2760 - /* refclk1: Input reference clock for PLL1 */ 2761 - cdns_phy->clk1 = devm_clk_get_optional(dev, "pll1_refclk"); 2762 - if (IS_ERR(cdns_phy->clk1)) { 2763 - dev_err(dev, "phy PLL1 ref clock not found\n"); 2764 - ret = PTR_ERR(cdns_phy->clk1); 2765 2745 goto disable_clk; 2766 2746 } 2767 2747 ··· 2811 2807 struct device_node *child; 2812 2808 int ret, subnodes, node = 0, i; 2813 2809 u32 total_num_lanes = 0; 2814 - int already_configured; 2815 2810 u8 init_dp_regmap = 0; 2816 2811 u32 phy_type; 2817 2812 ··· 2849 2846 if (ret) 2850 2847 return ret; 2851 2848 2852 - regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &already_configured); 2849 + ret = cdns_torrent_of_get_reset(cdns_phy); 2850 + if (ret) 2851 + goto clk_cleanup; 2853 2852 2854 - if (!already_configured) { 2855 - ret = cdns_torrent_reset(cdns_phy); 2856 - if (ret) 2857 - goto clk_cleanup; 2853 + ret = cdns_torrent_of_get_clk(cdns_phy); 2854 + if (ret) 2855 + goto clk_cleanup; 2858 2856 2857 + regmap_field_read(cdns_phy->phy_pma_cmn_ctrl_1, &cdns_phy->already_configured); 2858 + 2859 + if (!cdns_phy->already_configured) { 2859 2860 ret = cdns_torrent_clk(cdns_phy); 2860 2861 if (ret) 2861 2862 goto clk_cleanup; ··· 2939 2932 of_property_read_u32(child, "cdns,ssc-mode", 2940 2933 &cdns_phy->phys[node].ssc_mode); 2941 2934 2942 - if (!already_configured) 2943 - gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops); 2944 - else 2945 - gphy = devm_phy_create(dev, child, &noop_ops); 2935 + gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops); 2946 2936 if (IS_ERR(gphy)) { 2947 2937 ret = PTR_ERR(gphy); 2948 2938 goto put_child; ··· 3022 3018 goto put_lnk_rst; 3023 3019 } 3024 3020 3025 - if (cdns_phy->nsubnodes > 1 && !already_configured) { 3021 + if (cdns_phy->nsubnodes > 1 && !cdns_phy->already_configured) { 3026 3022 ret = cdns_torrent_phy_configure_multilink(cdns_phy); 3027 3023 if (ret) 3028 3024 goto put_lnk_rst; ··· 3077 3073 clk_disable_unprepare(cdns_phy->clk); 3078 3074 cdns_torrent_clk_cleanup(cdns_phy); 3079 3075 } 3076 + 3077 + /* SGMII and QSGMII link configuration */ 3078 + static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { 3079 + {0x0002, PHY_PLL_CFG} 3080 + }; 3081 + 3082 + static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = { 3083 + {0x0003, XCVR_DIAG_HSCLK_DIV}, 3084 + {0x0113, XCVR_DIAG_PLLDRC_CTRL} 3085 + }; 3086 + 3087 + static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = { 3088 + .reg_pairs = sgmii_qsgmii_link_cmn_regs, 3089 + .num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs), 3090 + }; 3091 + 3092 + static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = { 3093 + .reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs, 3094 + .num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs), 3095 + }; 3096 + 3097 + static int cdns_torrent_phy_suspend_noirq(struct device *dev) 3098 + { 3099 + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(dev); 3100 + int i; 3101 + 3102 + reset_control_assert(cdns_phy->phy_rst); 3103 + reset_control_assert(cdns_phy->apb_rst); 3104 + for (i = 0; i < cdns_phy->nsubnodes; i++) 3105 + reset_control_assert(cdns_phy->phys[i].lnk_rst); 3106 + 3107 + if (cdns_phy->already_configured) 3108 + cdns_phy->already_configured = 0; 3109 + else { 3110 + clk_disable_unprepare(cdns_phy->clk1); 3111 + clk_disable_unprepare(cdns_phy->clk); 3112 + } 3113 + 3114 + return 0; 3115 + } 3116 + 3117 + static int cdns_torrent_phy_resume_noirq(struct device *dev) 3118 + { 3119 + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(dev); 3120 + int node = cdns_phy->nsubnodes; 3121 + int ret, i; 3122 + 3123 + ret = cdns_torrent_clk(cdns_phy); 3124 + if (ret) 3125 + return ret; 3126 + 3127 + /* Enable APB */ 3128 + reset_control_deassert(cdns_phy->apb_rst); 3129 + 3130 + if (cdns_phy->nsubnodes > 1) { 3131 + ret = cdns_torrent_phy_configure_multilink(cdns_phy); 3132 + if (ret) 3133 + goto put_lnk_rst; 3134 + } 3135 + 3136 + return 0; 3137 + 3138 + put_lnk_rst: 3139 + for (i = 0; i < node; i++) 3140 + reset_control_assert(cdns_phy->phys[i].lnk_rst); 3141 + reset_control_assert(cdns_phy->apb_rst); 3142 + 3143 + clk_disable_unprepare(cdns_phy->clk1); 3144 + clk_disable_unprepare(cdns_phy->clk); 3145 + 3146 + return ret; 3147 + } 3148 + 3149 + static DEFINE_NOIRQ_DEV_PM_OPS(cdns_torrent_phy_pm_ops, 3150 + cdns_torrent_phy_suspend_noirq, 3151 + cdns_torrent_phy_resume_noirq); 3080 3152 3081 3153 /* USB and DP link configuration */ 3082 3154 static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = { ··· 4123 4043 {0x04A2, TX_PSC_A2}, 4124 4044 {0x04A2, TX_PSC_A3}, 4125 4045 {0x0000, TX_TXCC_CPOST_MULT_00}, 4126 - {0x00B3, DRV_DIAG_TX_DRV} 4046 + {0x00B3, DRV_DIAG_TX_DRV}, 4047 + {0x0002, XCVR_DIAG_PSC_OVRD} 4127 4048 }; 4128 4049 4129 4050 static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { ··· 4133 4052 {0x04A2, TX_PSC_A3}, 4134 4053 {0x0000, TX_TXCC_CPOST_MULT_00}, 4135 4054 {0x00B3, DRV_DIAG_TX_DRV}, 4136 - {0x4000, XCVR_DIAG_RXCLK_CTRL}, 4055 + {0x0002, XCVR_DIAG_PSC_OVRD}, 4056 + {0x4000, XCVR_DIAG_RXCLK_CTRL} 4137 4057 }; 4138 4058 4139 4059 static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { ··· 4301 4219 {0x04A2, TX_PSC_A3}, 4302 4220 {0x0000, TX_TXCC_CPOST_MULT_00}, 4303 4221 {0x0011, TX_TXCC_MGNFS_MULT_100}, 4304 - {0x0003, DRV_DIAG_TX_DRV} 4222 + {0x0003, DRV_DIAG_TX_DRV}, 4223 + {0x0002, XCVR_DIAG_PSC_OVRD} 4305 4224 }; 4306 4225 4307 4226 static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { ··· 4312 4229 {0x0000, TX_TXCC_CPOST_MULT_00}, 4313 4230 {0x0011, TX_TXCC_MGNFS_MULT_100}, 4314 4231 {0x0003, DRV_DIAG_TX_DRV}, 4315 - {0x4000, XCVR_DIAG_RXCLK_CTRL}, 4232 + {0x0002, XCVR_DIAG_PSC_OVRD}, 4233 + {0x4000, XCVR_DIAG_RXCLK_CTRL} 4316 4234 }; 4317 4235 4318 4236 static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { ··· 4625 4541 4626 4542 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals}, 4627 4543 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals}, 4544 + {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_link_cmn_vals}, 4628 4545 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals}, 4629 4546 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals}, 4630 4547 4631 4548 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals}, 4632 4549 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals}, 4550 + {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_SGMII), &sgmii_qsgmii_link_cmn_vals}, 4633 4551 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals}, 4634 4552 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &usxgmii_sgmii_link_cmn_vals}, 4635 4553 ··· 4661 4575 4662 4576 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals}, 4663 4577 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals}, 4578 + {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_xcvr_diag_ln_vals}, 4664 4579 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals}, 4665 4580 {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals}, 4666 4581 4667 4582 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals}, 4668 4583 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals}, 4584 + {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_SGMII), &sgmii_qsgmii_xcvr_diag_ln_vals}, 4669 4585 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals}, 4670 4586 {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USXGMII), &sgmii_usxgmii_xcvr_diag_ln_vals}, 4671 4587 ··· 4723 4635 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, 4724 4636 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals}, 4725 4637 4638 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals}, 4639 + 4726 4640 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals}, 4727 4641 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, 4728 4642 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, ··· 4734 4644 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals}, 4735 4645 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, 4736 4646 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals}, 4647 + 4648 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals}, 4737 4649 4738 4650 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals}, 4739 4651 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, ··· 4805 4713 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, 4806 4714 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, 4807 4715 4716 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals}, 4717 + 4808 4718 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals}, 4809 4719 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, 4810 4720 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals}, ··· 4816 4722 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, 4817 4723 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, 4818 4724 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, 4725 + 4726 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, 4819 4727 4820 4728 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, 4821 4729 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals}, ··· 4887 4791 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 4888 4792 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 4889 4793 4794 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 4795 + 4890 4796 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 4891 4797 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 4892 4798 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, ··· 4898 4800 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 4899 4801 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 4900 4802 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 4803 + 4804 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 4901 4805 4902 4806 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 4903 4807 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, ··· 5005 4905 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5006 4906 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5007 4907 4908 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 4909 + 5008 4910 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5009 4911 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5010 4912 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, ··· 5016 4914 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5017 4915 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5018 4916 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 4917 + 4918 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5019 4919 5020 4920 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5021 4921 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, ··· 5121 5017 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, 5122 5018 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals}, 5123 5019 5020 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals}, 5021 + 5124 5022 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals}, 5125 5023 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, 5126 5024 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals}, ··· 5132 5026 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals}, 5133 5027 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, 5134 5028 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals}, 5029 + 5030 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals}, 5135 5031 5136 5032 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals}, 5137 5033 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals}, ··· 5203 5095 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5204 5096 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5205 5097 5098 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5099 + 5206 5100 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5207 5101 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, 5208 5102 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, ··· 5214 5104 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5215 5105 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5216 5106 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5107 + 5108 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5217 5109 5218 5110 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, 5219 5111 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals}, ··· 5285 5173 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 5286 5174 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 5287 5175 5176 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_QSGMII, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 5177 + 5288 5178 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 5289 5179 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, 5290 5180 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals}, ··· 5296 5182 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 5297 5183 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 5298 5184 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 5185 + 5186 + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_SGMII, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 5299 5187 5300 5188 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, 5301 5189 {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals}, ··· 5391 5275 .driver = { 5392 5276 .name = "cdns-torrent-phy", 5393 5277 .of_match_table = cdns_torrent_phy_of_match, 5278 + .pm = pm_sleep_ptr(&cdns_torrent_phy_pm_ops), 5394 5279 } 5395 5280 }; 5396 5281 module_platform_driver(cdns_torrent_phy_driver);
+8 -1
drivers/phy/freescale/Kconfig
··· 35 35 Enable this to add support for the PCIE PHY as found on 36 36 i.MX8M family of SOCs. 37 37 38 + config PHY_FSL_IMX8QM_HSIO 39 + tristate "Freescale i.MX8QM HSIO PHY" 40 + depends on OF && HAS_IOMEM 41 + select GENERIC_PHY 42 + help 43 + Enable this to add support for the HSIO PHY as found on 44 + i.MX8QM family of SOCs. 45 + 38 46 config PHY_FSL_SAMSUNG_HDMI_PHY 39 47 tristate "Samsung HDMI PHY support" 40 48 depends on OF && HAS_IOMEM && COMMON_CLK 41 49 help 42 50 Enable this to add support for the Samsung HDMI PHY in i.MX8MP. 43 - 44 51 endif 45 52 46 53 config PHY_FSL_LYNX_28G
+1
drivers/phy/freescale/Makefile
··· 3 3 obj-$(CONFIG_PHY_MIXEL_LVDS_PHY) += phy-fsl-imx8qm-lvds-phy.o 4 4 obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o 5 5 obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o 6 + obj-$(CONFIG_PHY_FSL_IMX8QM_HSIO) += phy-fsl-imx8qm-hsio.o 6 7 obj-$(CONFIG_PHY_FSL_LYNX_28G) += phy-fsl-lynx-28g.o 7 8 obj-$(CONFIG_PHY_FSL_SAMSUNG_HDMI_PHY) += phy-fsl-samsung-hdmi.o
+611
drivers/phy/freescale/phy-fsl-imx8qm-hsio.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright 2024 NXP 4 + */ 5 + 6 + #include <linux/bitfield.h> 7 + #include <linux/clk.h> 8 + #include <linux/delay.h> 9 + #include <linux/io.h> 10 + #include <linux/iopoll.h> 11 + #include <linux/module.h> 12 + #include <linux/of.h> 13 + #include <linux/pci_regs.h> 14 + #include <linux/phy/phy.h> 15 + #include <linux/phy/pcie.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/regmap.h> 18 + 19 + #include <dt-bindings/phy/phy.h> 20 + #include <dt-bindings/phy/phy-imx8-pcie.h> 21 + 22 + #define MAX_NUM_LANE 3 23 + #define LANE_NUM_CLKS 5 24 + 25 + /* Parameters for the waiting for PCIe PHY PLL to lock */ 26 + #define PHY_INIT_WAIT_USLEEP_MAX 10 27 + #define PHY_INIT_WAIT_TIMEOUT (1000 * PHY_INIT_WAIT_USLEEP_MAX) 28 + 29 + /* i.MX8Q HSIO registers */ 30 + #define HSIO_CTRL0 0x0 31 + #define HSIO_APB_RSTN_0 BIT(0) 32 + #define HSIO_APB_RSTN_1 BIT(1) 33 + #define HSIO_PIPE_RSTN_0_MASK GENMASK(25, 24) 34 + #define HSIO_PIPE_RSTN_1_MASK GENMASK(27, 26) 35 + #define HSIO_MODE_MASK GENMASK(20, 17) 36 + #define HSIO_MODE_PCIE 0x0 37 + #define HSIO_MODE_SATA 0x4 38 + #define HSIO_DEVICE_TYPE_MASK GENMASK(27, 24) 39 + #define HSIO_EPCS_TXDEEMP BIT(5) 40 + #define HSIO_EPCS_TXDEEMP_SEL BIT(6) 41 + #define HSIO_EPCS_PHYRESET_N BIT(7) 42 + #define HSIO_RESET_N BIT(12) 43 + 44 + #define HSIO_IOB_RXENA BIT(0) 45 + #define HSIO_IOB_TXENA BIT(1) 46 + #define HSIO_IOB_A_0_TXOE BIT(2) 47 + #define HSIO_IOB_A_0_M1M0_2 BIT(4) 48 + #define HSIO_IOB_A_0_M1M0_MASK GENMASK(4, 3) 49 + #define HSIO_PHYX1_EPCS_SEL BIT(12) 50 + #define HSIO_PCIE_AB_SELECT BIT(13) 51 + 52 + #define HSIO_PHY_STS0 0x4 53 + #define HSIO_LANE0_TX_PLL_LOCK BIT(4) 54 + #define HSIO_LANE1_TX_PLL_LOCK BIT(12) 55 + 56 + #define HSIO_CTRL2 0x8 57 + #define HSIO_LTSSM_ENABLE BIT(4) 58 + #define HSIO_BUTTON_RST_N BIT(21) 59 + #define HSIO_PERST_N BIT(22) 60 + #define HSIO_POWER_UP_RST_N BIT(23) 61 + 62 + #define HSIO_PCIE_STS0 0xc 63 + #define HSIO_PM_REQ_CORE_RST BIT(19) 64 + 65 + #define HSIO_REG48_PMA_STATUS 0x30 66 + #define HSIO_REG48_PMA_RDY BIT(7) 67 + 68 + struct imx_hsio_drvdata { 69 + int lane_num; 70 + }; 71 + 72 + struct imx_hsio_lane { 73 + u32 ctrl_index; 74 + u32 ctrl_off; 75 + u32 idx; 76 + u32 phy_off; 77 + u32 phy_type; 78 + const char * const *clk_names; 79 + struct clk_bulk_data clks[LANE_NUM_CLKS]; 80 + struct imx_hsio_priv *priv; 81 + struct phy *phy; 82 + enum phy_mode phy_mode; 83 + }; 84 + 85 + struct imx_hsio_priv { 86 + void __iomem *base; 87 + struct device *dev; 88 + struct mutex lock; 89 + const char *hsio_cfg; 90 + const char *refclk_pad; 91 + u32 open_cnt; 92 + struct regmap *phy; 93 + struct regmap *ctrl; 94 + struct regmap *misc; 95 + const struct imx_hsio_drvdata *drvdata; 96 + struct imx_hsio_lane lane[MAX_NUM_LANE]; 97 + }; 98 + 99 + static const char * const lan0_pcie_clks[] = {"apb_pclk0", "pclk0", "ctl0_crr", 100 + "phy0_crr", "misc_crr"}; 101 + static const char * const lan1_pciea_clks[] = {"apb_pclk1", "pclk1", "ctl0_crr", 102 + "phy0_crr", "misc_crr"}; 103 + static const char * const lan1_pcieb_clks[] = {"apb_pclk1", "pclk1", "ctl1_crr", 104 + "phy0_crr", "misc_crr"}; 105 + static const char * const lan2_pcieb_clks[] = {"apb_pclk2", "pclk2", "ctl1_crr", 106 + "phy1_crr", "misc_crr"}; 107 + static const char * const lan2_sata_clks[] = {"pclk2", "epcs_tx", "epcs_rx", 108 + "phy1_crr", "misc_crr"}; 109 + 110 + static const struct regmap_config regmap_config = { 111 + .reg_bits = 32, 112 + .val_bits = 32, 113 + .reg_stride = 4, 114 + }; 115 + 116 + static int imx_hsio_init(struct phy *phy) 117 + { 118 + int ret, i; 119 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 120 + struct imx_hsio_priv *priv = lane->priv; 121 + struct device *dev = priv->dev; 122 + 123 + /* Assign clocks refer to different modes */ 124 + switch (lane->phy_type) { 125 + case PHY_TYPE_PCIE: 126 + lane->phy_mode = PHY_MODE_PCIE; 127 + if (lane->ctrl_index == 0) { /* PCIEA */ 128 + lane->ctrl_off = 0; 129 + lane->phy_off = 0; 130 + 131 + for (i = 0; i < LANE_NUM_CLKS; i++) { 132 + if (lane->idx == 0) 133 + lane->clks[i].id = lan0_pcie_clks[i]; 134 + else 135 + lane->clks[i].id = lan1_pciea_clks[i]; 136 + } 137 + } else { /* PCIEB */ 138 + if (lane->idx == 0) { /* i.MX8QXP */ 139 + lane->ctrl_off = 0; 140 + lane->phy_off = 0; 141 + } else { 142 + /* 143 + * On i.MX8QM, only second or third lane can be 144 + * bound to PCIEB. 145 + */ 146 + lane->ctrl_off = SZ_64K; 147 + if (lane->idx == 1) 148 + lane->phy_off = 0; 149 + else /* the third lane is bound to PCIEB */ 150 + lane->phy_off = SZ_64K; 151 + } 152 + 153 + for (i = 0; i < LANE_NUM_CLKS; i++) { 154 + if (lane->idx == 1) 155 + lane->clks[i].id = lan1_pcieb_clks[i]; 156 + else if (lane->idx == 2) 157 + lane->clks[i].id = lan2_pcieb_clks[i]; 158 + else /* i.MX8QXP only has PCIEB, idx is 0 */ 159 + lane->clks[i].id = lan0_pcie_clks[i]; 160 + } 161 + } 162 + break; 163 + case PHY_TYPE_SATA: 164 + /* On i.MX8QM, only the third lane can be bound to SATA */ 165 + lane->phy_mode = PHY_MODE_SATA; 166 + lane->ctrl_off = SZ_128K; 167 + lane->phy_off = SZ_64K; 168 + 169 + for (i = 0; i < LANE_NUM_CLKS; i++) 170 + lane->clks[i].id = lan2_sata_clks[i]; 171 + break; 172 + default: 173 + return -EINVAL; 174 + } 175 + 176 + /* Fetch clocks and enable them */ 177 + ret = devm_clk_bulk_get(dev, LANE_NUM_CLKS, lane->clks); 178 + if (ret) 179 + return ret; 180 + ret = clk_bulk_prepare_enable(LANE_NUM_CLKS, lane->clks); 181 + if (ret) 182 + return ret; 183 + 184 + /* allow the clocks to stabilize */ 185 + usleep_range(200, 500); 186 + return 0; 187 + } 188 + 189 + static int imx_hsio_exit(struct phy *phy) 190 + { 191 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 192 + 193 + clk_bulk_disable_unprepare(LANE_NUM_CLKS, lane->clks); 194 + 195 + return 0; 196 + } 197 + 198 + static void imx_hsio_pcie_phy_resets(struct phy *phy) 199 + { 200 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 201 + struct imx_hsio_priv *priv = lane->priv; 202 + 203 + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 204 + HSIO_BUTTON_RST_N); 205 + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 206 + HSIO_PERST_N); 207 + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 208 + HSIO_POWER_UP_RST_N); 209 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 210 + HSIO_BUTTON_RST_N); 211 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 212 + HSIO_PERST_N); 213 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 214 + HSIO_POWER_UP_RST_N); 215 + 216 + if (lane->idx == 1) { 217 + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, 218 + HSIO_APB_RSTN_1); 219 + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, 220 + HSIO_PIPE_RSTN_1_MASK); 221 + } else { 222 + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, 223 + HSIO_APB_RSTN_0); 224 + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, 225 + HSIO_PIPE_RSTN_0_MASK); 226 + } 227 + } 228 + 229 + static void imx_hsio_sata_phy_resets(struct phy *phy) 230 + { 231 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 232 + struct imx_hsio_priv *priv = lane->priv; 233 + 234 + /* clear PHY RST, then set it */ 235 + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, 236 + HSIO_EPCS_PHYRESET_N); 237 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, 238 + HSIO_EPCS_PHYRESET_N); 239 + 240 + /* CTRL RST: SET -> delay 1 us -> CLEAR -> SET */ 241 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, HSIO_RESET_N); 242 + udelay(1); 243 + regmap_clear_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, 244 + HSIO_RESET_N); 245 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, HSIO_RESET_N); 246 + } 247 + 248 + static void imx_hsio_configure_clk_pad(struct phy *phy) 249 + { 250 + bool pll = false; 251 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 252 + struct imx_hsio_priv *priv = lane->priv; 253 + 254 + if (strncmp(priv->refclk_pad, "output", 6) == 0) { 255 + pll = true; 256 + regmap_update_bits(priv->misc, HSIO_CTRL0, 257 + HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_MASK, 258 + HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_2); 259 + } else { 260 + regmap_update_bits(priv->misc, HSIO_CTRL0, 261 + HSIO_IOB_A_0_TXOE | HSIO_IOB_A_0_M1M0_MASK, 262 + 0); 263 + } 264 + 265 + regmap_update_bits(priv->misc, HSIO_CTRL0, HSIO_IOB_RXENA, 266 + pll ? 0 : HSIO_IOB_RXENA); 267 + regmap_update_bits(priv->misc, HSIO_CTRL0, HSIO_IOB_TXENA, 268 + pll ? HSIO_IOB_TXENA : 0); 269 + } 270 + 271 + static void imx_hsio_pre_set(struct phy *phy) 272 + { 273 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 274 + struct imx_hsio_priv *priv = lane->priv; 275 + 276 + if (strncmp(priv->hsio_cfg, "pciea-x2-pcieb", 14) == 0) { 277 + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PCIE_AB_SELECT); 278 + } else if (strncmp(priv->hsio_cfg, "pciea-x2-sata", 13) == 0) { 279 + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PHYX1_EPCS_SEL); 280 + } else if (strncmp(priv->hsio_cfg, "pciea-pcieb-sata", 16) == 0) { 281 + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PCIE_AB_SELECT); 282 + regmap_set_bits(priv->misc, HSIO_CTRL0, HSIO_PHYX1_EPCS_SEL); 283 + } 284 + 285 + imx_hsio_configure_clk_pad(phy); 286 + } 287 + 288 + static int imx_hsio_pcie_power_on(struct phy *phy) 289 + { 290 + int ret; 291 + u32 val, addr, cond; 292 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 293 + struct imx_hsio_priv *priv = lane->priv; 294 + 295 + imx_hsio_pcie_phy_resets(phy); 296 + 297 + /* Toggle apb_pclk to make sure PM_REQ_CORE_RST is cleared. */ 298 + clk_disable_unprepare(lane->clks[0].clk); 299 + mdelay(1); 300 + ret = clk_prepare_enable(lane->clks[0].clk); 301 + if (ret) { 302 + dev_err(priv->dev, "unable to enable phy apb_pclk\n"); 303 + return ret; 304 + } 305 + 306 + addr = lane->ctrl_off + HSIO_PCIE_STS0; 307 + cond = HSIO_PM_REQ_CORE_RST; 308 + ret = regmap_read_poll_timeout(priv->ctrl, addr, val, 309 + (val & cond) == 0, 310 + PHY_INIT_WAIT_USLEEP_MAX, 311 + PHY_INIT_WAIT_TIMEOUT); 312 + if (ret) 313 + dev_err(priv->dev, "HSIO_PM_REQ_CORE_RST is set\n"); 314 + return ret; 315 + } 316 + 317 + static int imx_hsio_sata_power_on(struct phy *phy) 318 + { 319 + int ret; 320 + u32 val, cond; 321 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 322 + struct imx_hsio_priv *priv = lane->priv; 323 + 324 + regmap_set_bits(priv->phy, lane->phy_off + HSIO_CTRL0, HSIO_APB_RSTN_0); 325 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, 326 + HSIO_EPCS_TXDEEMP); 327 + regmap_set_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, 328 + HSIO_EPCS_TXDEEMP_SEL); 329 + 330 + imx_hsio_sata_phy_resets(phy); 331 + 332 + cond = HSIO_REG48_PMA_RDY; 333 + ret = read_poll_timeout(readb, val, ((val & cond) == cond), 334 + PHY_INIT_WAIT_USLEEP_MAX, 335 + PHY_INIT_WAIT_TIMEOUT, false, 336 + priv->base + HSIO_REG48_PMA_STATUS); 337 + if (ret) 338 + dev_err(priv->dev, "PHY calibration is timeout\n"); 339 + else 340 + dev_dbg(priv->dev, "PHY calibration is done\n"); 341 + 342 + return ret; 343 + } 344 + 345 + static int imx_hsio_power_on(struct phy *phy) 346 + { 347 + int ret; 348 + u32 val, cond; 349 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 350 + struct imx_hsio_priv *priv = lane->priv; 351 + 352 + scoped_guard(mutex, &priv->lock) { 353 + if (!priv->open_cnt) 354 + imx_hsio_pre_set(phy); 355 + priv->open_cnt++; 356 + } 357 + 358 + if (lane->phy_mode == PHY_MODE_PCIE) 359 + ret = imx_hsio_pcie_power_on(phy); 360 + else /* SATA */ 361 + ret = imx_hsio_sata_power_on(phy); 362 + if (ret) 363 + return ret; 364 + 365 + /* Polling to check the PHY is ready or not. */ 366 + if (lane->idx == 1) 367 + cond = HSIO_LANE1_TX_PLL_LOCK; 368 + else 369 + /* 370 + * Except the phy_off, the bit-offset of lane2 is same to lane0. 371 + * Merge the lane0 and lane2 bit-operations together. 372 + */ 373 + cond = HSIO_LANE0_TX_PLL_LOCK; 374 + 375 + ret = regmap_read_poll_timeout(priv->phy, lane->phy_off + HSIO_PHY_STS0, 376 + val, ((val & cond) == cond), 377 + PHY_INIT_WAIT_USLEEP_MAX, 378 + PHY_INIT_WAIT_TIMEOUT); 379 + if (ret) { 380 + dev_err(priv->dev, "IMX8Q PHY%d PLL lock timeout\n", lane->idx); 381 + return ret; 382 + } 383 + dev_dbg(priv->dev, "IMX8Q PHY%d PLL is locked\n", lane->idx); 384 + 385 + return ret; 386 + } 387 + 388 + static int imx_hsio_power_off(struct phy *phy) 389 + { 390 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 391 + struct imx_hsio_priv *priv = lane->priv; 392 + 393 + scoped_guard(mutex, &priv->lock) { 394 + priv->open_cnt--; 395 + if (priv->open_cnt == 0) { 396 + regmap_clear_bits(priv->misc, HSIO_CTRL0, 397 + HSIO_PCIE_AB_SELECT); 398 + regmap_clear_bits(priv->misc, HSIO_CTRL0, 399 + HSIO_PHYX1_EPCS_SEL); 400 + 401 + if (lane->phy_mode == PHY_MODE_PCIE) { 402 + regmap_clear_bits(priv->ctrl, 403 + lane->ctrl_off + HSIO_CTRL2, 404 + HSIO_BUTTON_RST_N); 405 + regmap_clear_bits(priv->ctrl, 406 + lane->ctrl_off + HSIO_CTRL2, 407 + HSIO_PERST_N); 408 + regmap_clear_bits(priv->ctrl, 409 + lane->ctrl_off + HSIO_CTRL2, 410 + HSIO_POWER_UP_RST_N); 411 + } else { 412 + regmap_clear_bits(priv->ctrl, 413 + lane->ctrl_off + HSIO_CTRL0, 414 + HSIO_EPCS_TXDEEMP); 415 + regmap_clear_bits(priv->ctrl, 416 + lane->ctrl_off + HSIO_CTRL0, 417 + HSIO_EPCS_TXDEEMP_SEL); 418 + regmap_clear_bits(priv->ctrl, 419 + lane->ctrl_off + HSIO_CTRL0, 420 + HSIO_RESET_N); 421 + } 422 + 423 + if (lane->idx == 1) { 424 + regmap_clear_bits(priv->phy, 425 + lane->phy_off + HSIO_CTRL0, 426 + HSIO_APB_RSTN_1); 427 + regmap_clear_bits(priv->phy, 428 + lane->phy_off + HSIO_CTRL0, 429 + HSIO_PIPE_RSTN_1_MASK); 430 + } else { 431 + /* 432 + * Except the phy_off, the bit-offset of lane2 is same 433 + * to lane0. Merge the lane0 and lane2 bit-operations 434 + * together. 435 + */ 436 + regmap_clear_bits(priv->phy, 437 + lane->phy_off + HSIO_CTRL0, 438 + HSIO_APB_RSTN_0); 439 + regmap_clear_bits(priv->phy, 440 + lane->phy_off + HSIO_CTRL0, 441 + HSIO_PIPE_RSTN_0_MASK); 442 + } 443 + } 444 + } 445 + 446 + return 0; 447 + } 448 + 449 + static int imx_hsio_set_mode(struct phy *phy, enum phy_mode mode, 450 + int submode) 451 + { 452 + u32 val; 453 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 454 + struct imx_hsio_priv *priv = lane->priv; 455 + 456 + if (lane->phy_mode != mode) 457 + return -EINVAL; 458 + 459 + val = (mode == PHY_MODE_PCIE) ? HSIO_MODE_PCIE : HSIO_MODE_SATA; 460 + val = FIELD_PREP(HSIO_MODE_MASK, val); 461 + regmap_update_bits(priv->phy, lane->phy_off + HSIO_CTRL0, 462 + HSIO_MODE_MASK, val); 463 + 464 + switch (submode) { 465 + case PHY_MODE_PCIE_RC: 466 + val = FIELD_PREP(HSIO_DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT); 467 + break; 468 + case PHY_MODE_PCIE_EP: 469 + val = FIELD_PREP(HSIO_DEVICE_TYPE_MASK, PCI_EXP_TYPE_ENDPOINT); 470 + break; 471 + default: /* Support only PCIe EP and RC now. */ 472 + return 0; 473 + } 474 + if (submode) 475 + regmap_update_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL0, 476 + HSIO_DEVICE_TYPE_MASK, val); 477 + 478 + return 0; 479 + } 480 + 481 + static int imx_hsio_set_speed(struct phy *phy, int speed) 482 + { 483 + struct imx_hsio_lane *lane = phy_get_drvdata(phy); 484 + struct imx_hsio_priv *priv = lane->priv; 485 + 486 + regmap_update_bits(priv->ctrl, lane->ctrl_off + HSIO_CTRL2, 487 + HSIO_LTSSM_ENABLE, 488 + speed ? HSIO_LTSSM_ENABLE : 0); 489 + return 0; 490 + } 491 + 492 + static const struct phy_ops imx_hsio_ops = { 493 + .init = imx_hsio_init, 494 + .exit = imx_hsio_exit, 495 + .power_on = imx_hsio_power_on, 496 + .power_off = imx_hsio_power_off, 497 + .set_mode = imx_hsio_set_mode, 498 + .set_speed = imx_hsio_set_speed, 499 + .owner = THIS_MODULE, 500 + }; 501 + 502 + static const struct imx_hsio_drvdata imx8qxp_hsio_drvdata = { 503 + .lane_num = 0x1, 504 + }; 505 + 506 + static const struct imx_hsio_drvdata imx8qm_hsio_drvdata = { 507 + .lane_num = 0x3, 508 + }; 509 + 510 + static const struct of_device_id imx_hsio_of_match[] = { 511 + {.compatible = "fsl,imx8qm-hsio", .data = &imx8qm_hsio_drvdata}, 512 + {.compatible = "fsl,imx8qxp-hsio", .data = &imx8qxp_hsio_drvdata}, 513 + { }, 514 + }; 515 + MODULE_DEVICE_TABLE(of, imx_hsio_of_match); 516 + 517 + static struct phy *imx_hsio_xlate(struct device *dev, 518 + const struct of_phandle_args *args) 519 + { 520 + struct imx_hsio_priv *priv = dev_get_drvdata(dev); 521 + int idx = args->args[0]; 522 + int phy_type = args->args[1]; 523 + int ctrl_index = args->args[2]; 524 + 525 + if (idx < 0 || idx >= priv->drvdata->lane_num) 526 + return ERR_PTR(-EINVAL); 527 + priv->lane[idx].idx = idx; 528 + priv->lane[idx].phy_type = phy_type; 529 + priv->lane[idx].ctrl_index = ctrl_index; 530 + 531 + return priv->lane[idx].phy; 532 + } 533 + 534 + static int imx_hsio_probe(struct platform_device *pdev) 535 + { 536 + int i; 537 + void __iomem *off; 538 + struct device *dev = &pdev->dev; 539 + struct device_node *np = dev->of_node; 540 + struct imx_hsio_priv *priv; 541 + struct phy_provider *provider; 542 + 543 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 544 + if (!priv) 545 + return -ENOMEM; 546 + priv->dev = &pdev->dev; 547 + priv->drvdata = of_device_get_match_data(dev); 548 + 549 + /* Get HSIO configuration mode */ 550 + if (of_property_read_string(np, "fsl,hsio-cfg", &priv->hsio_cfg)) 551 + priv->hsio_cfg = "pciea-pcieb-sata"; 552 + /* Get PHY refclk pad mode */ 553 + if (of_property_read_string(np, "fsl,refclk-pad-mode", 554 + &priv->refclk_pad)) 555 + priv->refclk_pad = NULL; 556 + 557 + priv->base = devm_platform_ioremap_resource(pdev, 0); 558 + if (IS_ERR(priv->base)) 559 + return PTR_ERR(priv->base); 560 + 561 + off = devm_platform_ioremap_resource_byname(pdev, "phy"); 562 + priv->phy = devm_regmap_init_mmio(dev, off, &regmap_config); 563 + if (IS_ERR(priv->phy)) 564 + return dev_err_probe(dev, PTR_ERR(priv->phy), 565 + "unable to find phy csr registers\n"); 566 + 567 + off = devm_platform_ioremap_resource_byname(pdev, "ctrl"); 568 + priv->ctrl = devm_regmap_init_mmio(dev, off, &regmap_config); 569 + if (IS_ERR(priv->ctrl)) 570 + return dev_err_probe(dev, PTR_ERR(priv->ctrl), 571 + "unable to find ctrl csr registers\n"); 572 + 573 + off = devm_platform_ioremap_resource_byname(pdev, "misc"); 574 + priv->misc = devm_regmap_init_mmio(dev, off, &regmap_config); 575 + if (IS_ERR(priv->misc)) 576 + return dev_err_probe(dev, PTR_ERR(priv->misc), 577 + "unable to find misc csr registers\n"); 578 + 579 + for (i = 0; i < priv->drvdata->lane_num; i++) { 580 + struct imx_hsio_lane *lane = &priv->lane[i]; 581 + struct phy *phy; 582 + 583 + phy = devm_phy_create(&pdev->dev, NULL, &imx_hsio_ops); 584 + if (IS_ERR(phy)) 585 + return PTR_ERR(phy); 586 + 587 + lane->priv = priv; 588 + lane->phy = phy; 589 + lane->idx = i; 590 + phy_set_drvdata(phy, lane); 591 + } 592 + 593 + dev_set_drvdata(dev, priv); 594 + dev_set_drvdata(&pdev->dev, priv); 595 + 596 + provider = devm_of_phy_provider_register(&pdev->dev, imx_hsio_xlate); 597 + 598 + return PTR_ERR_OR_ZERO(provider); 599 + } 600 + 601 + static struct platform_driver imx_hsio_driver = { 602 + .probe = imx_hsio_probe, 603 + .driver = { 604 + .name = "imx8qm-hsio-phy", 605 + .of_match_table = imx_hsio_of_match, 606 + } 607 + }; 608 + module_platform_driver(imx_hsio_driver); 609 + 610 + MODULE_DESCRIPTION("FSL IMX8QM HSIO SERDES PHY driver"); 611 + MODULE_LICENSE("GPL");
+494
drivers/phy/phy-airoha-pcie-regs.h
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2024 AIROHA Inc 4 + * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 + */ 6 + 7 + #ifndef _PHY_AIROHA_PCIE_H 8 + #define _PHY_AIROHA_PCIE_H 9 + 10 + /* CSR_2L */ 11 + #define REG_CSR_2L_CMN 0x0000 12 + #define CSR_2L_PXP_CMN_LANE_EN BIT(0) 13 + #define CSR_2L_PXP_CMN_TRIM_MASK GENMASK(28, 24) 14 + 15 + #define REG_CSR_2L_JCPLL_IB_EXT 0x0004 16 + #define REG_CSR_2L_JCPLL_LPF_SHCK_EN BIT(8) 17 + #define CSR_2L_PXP_JCPLL_CHP_IBIAS GENMASK(21, 16) 18 + #define CSR_2L_PXP_JCPLL_CHP_IOFST GENMASK(29, 24) 19 + 20 + #define REG_CSR_2L_JCPLL_LPF_BR 0x0008 21 + #define CSR_2L_PXP_JCPLL_LPF_BR GENMASK(4, 0) 22 + #define CSR_2L_PXP_JCPLL_LPF_BC GENMASK(12, 8) 23 + #define CSR_2L_PXP_JCPLL_LPF_BP GENMASK(20, 16) 24 + #define CSR_2L_PXP_JCPLL_LPF_BWR GENMASK(28, 24) 25 + 26 + #define REG_CSR_2L_JCPLL_LPF_BWC 0x000c 27 + #define CSR_2L_PXP_JCPLL_LPF_BWC GENMASK(4, 0) 28 + #define CSR_2L_PXP_JCPLL_KBAND_CODE GENMASK(23, 16) 29 + #define CSR_2L_PXP_JCPLL_KBAND_DIV GENMASK(26, 24) 30 + 31 + #define REG_CSR_2L_JCPLL_KBAND_KFC 0x0010 32 + #define CSR_2L_PXP_JCPLL_KBAND_KFC GENMASK(1, 0) 33 + #define CSR_2L_PXP_JCPLL_KBAND_KF GENMASK(9, 8) 34 + #define CSR_2L_PXP_JCPLL_KBAND_KS GENMASK(17, 16) 35 + #define CSR_2L_PXP_JCPLL_POSTDIV_EN BIT(24) 36 + 37 + #define REG_CSR_2L_JCPLL_MMD_PREDIV_MODE 0x0014 38 + #define CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE GENMASK(1, 0) 39 + #define CSR_2L_PXP_JCPLL_POSTDIV_D2 BIT(16) 40 + #define CSR_2L_PXP_JCPLL_POSTDIV_D5 BIT(24) 41 + 42 + #define CSR_2L_PXP_JCPLL_MONCK 0x0018 43 + #define CSR_2L_PXP_JCPLL_REFIN_DIV GENMASK(25, 24) 44 + 45 + #define REG_CSR_2L_JCPLL_RST_DLY 0x001c 46 + #define CSR_2L_PXP_JCPLL_RST_DLY GENMASK(2, 0) 47 + #define CSR_2L_PXP_JCPLL_RST BIT(8) 48 + #define CSR_2L_PXP_JCPLL_SDM_DI_EN BIT(16) 49 + #define CSR_2L_PXP_JCPLL_SDM_DI_LS GENMASK(25, 24) 50 + 51 + #define REG_CSR_2L_JCPLL_SDM_IFM 0x0020 52 + #define CSR_2L_PXP_JCPLL_SDM_IFM BIT(0) 53 + 54 + #define REG_CSR_2L_JCPLL_SDM_HREN 0x0024 55 + #define CSR_2L_PXP_JCPLL_SDM_HREN BIT(0) 56 + #define CSR_2L_PXP_JCPLL_TCL_AMP_EN BIT(8) 57 + #define CSR_2L_PXP_JCPLL_TCL_AMP_GAIN GENMASK(18, 16) 58 + #define CSR_2L_PXP_JCPLL_TCL_AMP_VREF GENMASK(28, 24) 59 + 60 + #define REG_CSR_2L_JCPLL_TCL_CMP 0x0028 61 + #define CSR_2L_PXP_JCPLL_TCL_LPF_EN BIT(16) 62 + #define CSR_2L_PXP_JCPLL_TCL_LPF_BW GENMASK(26, 24) 63 + 64 + #define REG_CSR_2L_JCPLL_VCODIV 0x002c 65 + #define CSR_2L_PXP_JCPLL_VCO_CFIX GENMASK(9, 8) 66 + #define CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN BIT(16) 67 + #define CSR_2L_PXP_JCPLL_VCO_SCAPWR GENMASK(26, 24) 68 + 69 + #define REG_CSR_2L_JCPLL_VCO_TCLVAR 0x0030 70 + #define CSR_2L_PXP_JCPLL_VCO_TCLVAR GENMASK(2, 0) 71 + 72 + #define REG_CSR_2L_JCPLL_SSC 0x0038 73 + #define CSR_2L_PXP_JCPLL_SSC_EN BIT(0) 74 + #define CSR_2L_PXP_JCPLL_SSC_PHASE_INI BIT(8) 75 + #define CSR_2L_PXP_JCPLL_SSC_TRI_EN BIT(16) 76 + 77 + #define REG_CSR_2L_JCPLL_SSC_DELTA1 0x003c 78 + #define CSR_2L_PXP_JCPLL_SSC_DELTA1 GENMASK(15, 0) 79 + #define CSR_2L_PXP_JCPLL_SSC_DELTA GENMASK(31, 16) 80 + 81 + #define REG_CSR_2L_JCPLL_SSC_PERIOD 0x0040 82 + #define CSR_2L_PXP_JCPLL_SSC_PERIOD GENMASK(15, 0) 83 + 84 + #define REG_CSR_2L_JCPLL_TCL_VTP_EN 0x004c 85 + #define CSR_2L_PXP_JCPLL_SPARE_LOW GENMASK(31, 24) 86 + 87 + #define REG_CSR_2L_JCPLL_TCL_KBAND_VREF 0x0050 88 + #define CSR_2L_PXP_JCPLL_TCL_KBAND_VREF GENMASK(4, 0) 89 + #define CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN BIT(24) 90 + 91 + #define REG_CSR_2L_750M_SYS_CK 0x0054 92 + #define CSR_2L_PXP_TXPLL_LPF_SHCK_EN BIT(16) 93 + #define CSR_2L_PXP_TXPLL_CHP_IBIAS GENMASK(29, 24) 94 + 95 + #define REG_CSR_2L_TXPLL_CHP_IOFST 0x0058 96 + #define CSR_2L_PXP_TXPLL_CHP_IOFST GENMASK(5, 0) 97 + #define CSR_2L_PXP_TXPLL_LPF_BR GENMASK(12, 8) 98 + #define CSR_2L_PXP_TXPLL_LPF_BC GENMASK(20, 16) 99 + #define CSR_2L_PXP_TXPLL_LPF_BP GENMASK(28, 24) 100 + 101 + #define REG_CSR_2L_TXPLL_LPF_BWR 0x005c 102 + #define CSR_2L_PXP_TXPLL_LPF_BWR GENMASK(4, 0) 103 + #define CSR_2L_PXP_TXPLL_LPF_BWC GENMASK(12, 8) 104 + #define CSR_2L_PXP_TXPLL_KBAND_CODE GENMASK(31, 24) 105 + 106 + #define REG_CSR_2L_TXPLL_KBAND_DIV 0x0060 107 + #define CSR_2L_PXP_TXPLL_KBAND_DIV GENMASK(2, 0) 108 + #define CSR_2L_PXP_TXPLL_KBAND_KFC GENMASK(9, 8) 109 + #define CSR_2L_PXP_TXPLL_KBAND_KF GENMASK(17, 16) 110 + #define CSR_2L_PXP_txpll_KBAND_KS GENMASK(25, 24) 111 + 112 + #define REG_CSR_2L_TXPLL_POSTDIV 0x0064 113 + #define CSR_2L_PXP_TXPLL_POSTDIV_EN BIT(0) 114 + #define CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE GENMASK(9, 8) 115 + #define CSR_2L_PXP_TXPLL_PHY_CK1_EN BIT(24) 116 + 117 + #define REG_CSR_2L_TXPLL_PHY_CK2 0x0068 118 + #define CSR_2L_PXP_TXPLL_REFIN_INTERNAL BIT(24) 119 + 120 + #define REG_CSR_2L_TXPLL_REFIN_DIV 0x006c 121 + #define CSR_2L_PXP_TXPLL_REFIN_DIV GENMASK(1, 0) 122 + #define CSR_2L_PXP_TXPLL_RST_DLY GENMASK(10, 8) 123 + #define CSR_2L_PXP_TXPLL_PLL_RSTB BIT(16) 124 + 125 + #define REG_CSR_2L_TXPLL_SDM_DI_LS 0x0070 126 + #define CSR_2L_PXP_TXPLL_SDM_DI_LS GENMASK(1, 0) 127 + #define CSR_2L_PXP_TXPLL_SDM_IFM BIT(8) 128 + #define CSR_2L_PXP_TXPLL_SDM_ORD GENMASK(25, 24) 129 + 130 + #define REG_CSR_2L_TXPLL_SDM_OUT 0x0074 131 + #define CSR_2L_PXP_TXPLL_TCL_AMP_EN BIT(16) 132 + #define CSR_2L_PXP_TXPLL_TCL_AMP_GAIN GENMASK(26, 24) 133 + 134 + #define REG_CSR_2L_TXPLL_TCL_AMP_VREF 0x0078 135 + #define CSR_2L_PXP_TXPLL_TCL_AMP_VREF GENMASK(4, 0) 136 + #define CSR_2L_PXP_TXPLL_TCL_LPF_EN BIT(24) 137 + 138 + #define REG_CSR_2L_TXPLL_TCL_LPF_BW 0x007c 139 + #define CSR_2L_PXP_TXPLL_TCL_LPF_BW GENMASK(2, 0) 140 + #define CSR_2L_PXP_TXPLL_VCO_CFIX GENMASK(17, 16) 141 + #define CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN BIT(24) 142 + 143 + #define REG_CSR_2L_TXPLL_VCO_SCAPWR 0x0080 144 + #define CSR_2L_PXP_TXPLL_VCO_SCAPWR GENMASK(2, 0) 145 + 146 + #define REG_CSR_2L_TXPLL_SSC 0x0084 147 + #define CSR_2L_PXP_TXPLL_SSC_EN BIT(0) 148 + #define CSR_2L_PXP_TXPLL_SSC_PHASE_INI BIT(8) 149 + 150 + #define REG_CSR_2L_TXPLL_SSC_DELTA1 0x0088 151 + #define CSR_2L_PXP_TXPLL_SSC_DELTA1 GENMASK(15, 0) 152 + #define CSR_2L_PXP_TXPLL_SSC_DELTA GENMASK(31, 16) 153 + 154 + #define REG_CSR_2L_TXPLL_SSC_PERIOD 0x008c 155 + #define CSR_2L_PXP_txpll_SSC_PERIOD GENMASK(15, 0) 156 + 157 + #define REG_CSR_2L_TXPLL_VTP 0x0090 158 + #define CSR_2L_PXP_TXPLL_VTP_EN BIT(0) 159 + 160 + #define REG_CSR_2L_TXPLL_TCL_VTP 0x0098 161 + #define CSR_2L_PXP_TXPLL_SPARE_L GENMASK(31, 24) 162 + 163 + #define REG_CSR_2L_TXPLL_TCL_KBAND_VREF 0x009c 164 + #define CSR_2L_PXP_TXPLL_TCL_KBAND_VREF GENMASK(4, 0) 165 + #define CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN BIT(24) 166 + 167 + #define REG_CSR_2L_TXPLL_POSTDIV_D256 0x00a0 168 + #define CSR_2L_PXP_CLKTX0_AMP GENMASK(10, 8) 169 + #define CSR_2L_PXP_CLKTX0_OFFSET GENMASK(17, 16) 170 + #define CSR_2L_PXP_CLKTX0_SR GENMASK(25, 24) 171 + 172 + #define REG_CSR_2L_CLKTX0_FORCE_OUT1 0x00a4 173 + #define CSR_2L_PXP_CLKTX0_HZ BIT(8) 174 + #define CSR_2L_PXP_CLKTX0_IMP_SEL GENMASK(20, 16) 175 + #define CSR_2L_PXP_CLKTX1_AMP GENMASK(26, 24) 176 + 177 + #define REG_CSR_2L_CLKTX1_OFFSET 0x00a8 178 + #define CSR_2L_PXP_CLKTX1_OFFSET GENMASK(1, 0) 179 + #define CSR_2L_PXP_CLKTX1_SR GENMASK(9, 8) 180 + #define CSR_2L_PXP_CLKTX1_HZ BIT(24) 181 + 182 + #define REG_CSR_2L_CLKTX1_IMP_SEL 0x00ac 183 + #define CSR_2L_PXP_CLKTX1_IMP_SEL GENMASK(4, 0) 184 + 185 + #define REG_CSR_2L_PLL_CMN_RESERVE0 0x00b0 186 + #define CSR_2L_PXP_PLL_RESERVE_MASK GENMASK(15, 0) 187 + 188 + #define REG_CSR_2L_TX0_CKLDO 0x00cc 189 + #define CSR_2L_PXP_TX0_CKLDO_EN BIT(0) 190 + #define CSR_2L_PXP_TX0_DMEDGEGEN_EN BIT(24) 191 + 192 + #define REG_CSR_2L_TX1_CKLDO 0x00e8 193 + #define CSR_2L_PXP_TX1_CKLDO_EN BIT(0) 194 + #define CSR_2L_PXP_TX1_DMEDGEGEN_EN BIT(24) 195 + 196 + #define REG_CSR_2L_TX1_MULTLANE 0x00ec 197 + #define CSR_2L_PXP_TX1_MULTLANE_EN BIT(0) 198 + 199 + #define REG_CSR_2L_RX0_REV0 0x00fc 200 + #define CSR_2L_PXP_VOS_PNINV GENMASK(3, 2) 201 + #define CSR_2L_PXP_FE_GAIN_NORMAL_MODE GENMASK(6, 4) 202 + #define CSR_2L_PXP_FE_GAIN_TRAIN_MODE GENMASK(10, 8) 203 + 204 + #define REG_CSR_2L_RX0_PHYCK_DIV 0x0100 205 + #define CSR_2L_PXP_RX0_PHYCK_SEL GENMASK(9, 8) 206 + #define CSR_2L_PXP_RX0_PHYCK_RSTB BIT(16) 207 + #define CSR_2L_PXP_RX0_TDC_CK_SEL BIT(24) 208 + 209 + #define REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV 0x0104 210 + #define CSR_2L_PXP_CDR0_PD_EDGE_DISABLE BIT(8) 211 + 212 + #define REG_CSR_2L_CDR0_LPF_RATIO 0x0110 213 + #define CSR_2L_PXP_CDR0_LPF_TOP_LIM GENMASK(26, 8) 214 + 215 + #define REG_CSR_2L_CDR0_PR_INJ_MODE 0x011c 216 + #define CSR_2L_PXP_CDR0_INJ_FORCE_OFF BIT(24) 217 + 218 + #define REG_CSR_2L_CDR0_PR_BETA_DAC 0x0120 219 + #define CSR_2L_PXP_CDR0_PR_BETA_SEL GENMASK(19, 16) 220 + #define CSR_2L_PXP_CDR0_PR_KBAND_DIV GENMASK(26, 24) 221 + 222 + #define REG_CSR_2L_CDR0_PR_VREG_IBAND 0x0124 223 + #define CSR_2L_PXP_CDR0_PR_VREG_IBAND GENMASK(2, 0) 224 + #define CSR_2L_PXP_CDR0_PR_VREG_CKBUF GENMASK(10, 8) 225 + 226 + #define REG_CSR_2L_CDR0_PR_CKREF_DIV 0x0128 227 + #define CSR_2L_PXP_CDR0_PR_CKREF_DIV GENMASK(1, 0) 228 + 229 + #define REG_CSR_2L_CDR0_PR_MONCK 0x012c 230 + #define CSR_2L_PXP_CDR0_PR_MONCK_ENABLE BIT(0) 231 + #define CSR_2L_PXP_CDR0_PR_RESERVE0 GENMASK(19, 16) 232 + 233 + #define REG_CSR_2L_CDR0_PR_COR_HBW 0x0130 234 + #define CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON BIT(8) 235 + #define CSR_2L_PXP_CDR0_PR_CKREF_DIV1 GENMASK(17, 16) 236 + 237 + #define REG_CSR_2L_CDR0_PR_MONPI 0x0134 238 + #define CSR_2L_PXP_CDR0_PR_XFICK_EN BIT(8) 239 + 240 + #define REG_CSR_2L_RX0_SIGDET_DCTEST 0x0140 241 + #define CSR_2L_PXP_RX0_SIGDET_LPF_CTRL GENMASK(9, 8) 242 + #define CSR_2L_PXP_RX0_SIGDET_PEAK GENMASK(25, 24) 243 + 244 + #define REG_CSR_2L_RX0_SIGDET_VTH_SEL 0x0144 245 + #define CSR_2L_PXP_RX0_SIGDET_VTH_SEL GENMASK(4, 0) 246 + #define CSR_2L_PXP_RX0_FE_VB_EQ1_EN BIT(24) 247 + 248 + #define REG_CSR_2L_PXP_RX0_FE_VB_EQ2 0x0148 249 + #define CSR_2L_PXP_RX0_FE_VB_EQ2_EN BIT(0) 250 + #define CSR_2L_PXP_RX0_FE_VB_EQ3_EN BIT(8) 251 + #define CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB BIT(16) 252 + 253 + #define REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS 0x0158 254 + #define CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS GENMASK(29, 24) 255 + 256 + #define REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS 0x015c 257 + #define CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS GENMASK(5, 0) 258 + #define CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS GENMASK(13, 8) 259 + 260 + #define REG_CSR_2L_RX1_REV0 0x01b4 261 + 262 + #define REG_CSR_2L_RX1_PHYCK_DIV 0x01b8 263 + #define CSR_2L_PXP_RX1_PHYCK_SEL GENMASK(9, 8) 264 + #define CSR_2L_PXP_RX1_PHYCK_RSTB BIT(16) 265 + #define CSR_2L_PXP_RX1_TDC_CK_SEL BIT(24) 266 + 267 + #define REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV 0x01bc 268 + #define CSR_2L_PXP_CDR1_PD_EDGE_DISABLE BIT(8) 269 + 270 + #define REG_CSR_2L_CDR1_PR_BETA_DAC 0x01d8 271 + #define CSR_2L_PXP_CDR1_PR_BETA_SEL GENMASK(19, 16) 272 + #define CSR_2L_PXP_CDR1_PR_KBAND_DIV GENMASK(26, 24) 273 + 274 + #define REG_CSR_2L_CDR1_PR_MONCK 0x01e4 275 + #define CSR_2L_PXP_CDR1_PR_MONCK_ENABLE BIT(0) 276 + #define CSR_2L_PXP_CDR1_PR_RESERVE0 GENMASK(19, 16) 277 + 278 + #define REG_CSR_2L_CDR1_LPF_RATIO 0x01c8 279 + #define CSR_2L_PXP_CDR1_LPF_TOP_LIM GENMASK(26, 8) 280 + 281 + #define REG_CSR_2L_CDR1_PR_INJ_MODE 0x01d4 282 + #define CSR_2L_PXP_CDR1_INJ_FORCE_OFF BIT(24) 283 + 284 + #define REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL 0x01dc 285 + #define CSR_2L_PXP_CDR1_PR_VREG_IBAND GENMASK(2, 0) 286 + #define CSR_2L_PXP_CDR1_PR_VREG_CKBUF GENMASK(10, 8) 287 + 288 + #define REG_CSR_2L_CDR1_PR_CKREF_DIV 0x01e0 289 + #define CSR_2L_PXP_CDR1_PR_CKREF_DIV GENMASK(1, 0) 290 + 291 + #define REG_CSR_2L_CDR1_PR_COR_HBW 0x01e8 292 + #define CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON BIT(8) 293 + #define CSR_2L_PXP_CDR1_PR_CKREF_DIV1 GENMASK(17, 16) 294 + 295 + #define REG_CSR_2L_CDR1_PR_MONPI 0x01ec 296 + #define CSR_2L_PXP_CDR1_PR_XFICK_EN BIT(8) 297 + 298 + #define REG_CSR_2L_RX1_DAC_RANGE_EYE 0x01f4 299 + #define CSR_2L_PXP_RX1_SIGDET_LPF_CTRL GENMASK(25, 24) 300 + 301 + #define REG_CSR_2L_RX1_SIGDET_NOVTH 0x01f8 302 + #define CSR_2L_PXP_RX1_SIGDET_PEAK GENMASK(9, 8) 303 + #define CSR_2L_PXP_RX1_SIGDET_VTH_SEL GENMASK(20, 16) 304 + 305 + #define REG_CSR_2L_RX1_FE_VB_EQ1 0x0200 306 + #define CSR_2L_PXP_RX1_FE_VB_EQ1_EN BIT(0) 307 + #define CSR_2L_PXP_RX1_FE_VB_EQ2_EN BIT(8) 308 + #define CSR_2L_PXP_RX1_FE_VB_EQ3_EN BIT(16) 309 + #define CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB BIT(24) 310 + 311 + #define REG_CSR_2L_RX1_OSCAL_VGA1IOS 0x0214 312 + #define CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS GENMASK(5, 0) 313 + #define CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS GENMASK(13, 8) 314 + #define CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS GENMASK(21, 16) 315 + 316 + /* PMA */ 317 + #define REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1 0x0004 318 + #define PCIE_LCPLL_MAN_PWDB BIT(0) 319 + 320 + #define REG_PCIE_PMA_SEQUENCE_DISB_CTRL1 0x010c 321 + #define PCIE_DISB_RX_SDCAL_EN BIT(0) 322 + 323 + #define REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1 0x0114 324 + #define PCIE_FORCE_RX_SDCAL_EN BIT(0) 325 + 326 + #define REG_PCIE_PMA_SS_RX_FREQ_DET1 0x014c 327 + #define PCIE_PLL_FT_LOCK_CYCLECNT GENMASK(15, 0) 328 + #define PCIE_PLL_FT_UNLOCK_CYCLECNT GENMASK(31, 16) 329 + 330 + #define REG_PCIE_PMA_SS_RX_FREQ_DET2 0x0150 331 + #define PCIE_LOCK_TARGET_BEG GENMASK(15, 0) 332 + #define PCIE_LOCK_TARGET_END GENMASK(31, 16) 333 + 334 + #define REG_PCIE_PMA_SS_RX_FREQ_DET3 0x0154 335 + #define PCIE_UNLOCK_TARGET_BEG GENMASK(15, 0) 336 + #define PCIE_UNLOCK_TARGET_END GENMASK(31, 16) 337 + 338 + #define REG_PCIE_PMA_SS_RX_FREQ_DET4 0x0158 339 + #define PCIE_FREQLOCK_DET_EN GENMASK(2, 0) 340 + #define PCIE_LOCK_LOCKTH GENMASK(11, 8) 341 + #define PCIE_UNLOCK_LOCKTH GENMASK(15, 12) 342 + 343 + #define REG_PCIE_PMA_SS_RX_CAL1 0x0160 344 + #define REG_PCIE_PMA_SS_RX_CAL2 0x0164 345 + #define PCIE_CAL_OUT_OS GENMASK(11, 8) 346 + 347 + #define REG_PCIE_PMA_SS_RX_SIGDET0 0x0168 348 + #define PCIE_SIGDET_WIN_NONVLD_TIMES GENMASK(28, 24) 349 + 350 + #define REG_PCIE_PMA_TX_RESET 0x0260 351 + #define PCIE_TX_TOP_RST BIT(0) 352 + #define PCIE_TX_CAL_RST BIT(8) 353 + 354 + #define REG_PCIE_PMA_RX_FORCE_MODE0 0x0294 355 + #define PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL GENMASK(1, 0) 356 + 357 + #define REG_PCIE_PMA_SS_DA_XPON_PWDB0 0x034c 358 + #define PCIE_DA_XPON_CDR_PR_PWDB BIT(8) 359 + 360 + #define REG_PCIE_PMA_SW_RESET 0x0460 361 + #define PCIE_SW_RX_FIFO_RST BIT(0) 362 + #define PCIE_SW_RX_RST BIT(1) 363 + #define PCIE_SW_TX_RST BIT(2) 364 + #define PCIE_SW_PMA_RST BIT(3) 365 + #define PCIE_SW_ALLPCS_RST BIT(4) 366 + #define PCIE_SW_REF_RST BIT(5) 367 + #define PCIE_SW_TX_FIFO_RST BIT(6) 368 + #define PCIE_SW_XFI_TXPCS_RST BIT(7) 369 + #define PCIE_SW_XFI_RXPCS_RST BIT(8) 370 + #define PCIE_SW_XFI_RXPCS_BIST_RST BIT(9) 371 + #define PCIE_SW_HSG_TXPCS_RST BIT(10) 372 + #define PCIE_SW_HSG_RXPCS_RST BIT(11) 373 + #define PCIE_PMA_SW_RST (PCIE_SW_RX_FIFO_RST | \ 374 + PCIE_SW_RX_RST | \ 375 + PCIE_SW_TX_RST | \ 376 + PCIE_SW_PMA_RST | \ 377 + PCIE_SW_ALLPCS_RST | \ 378 + PCIE_SW_REF_RST | \ 379 + PCIE_SW_TX_FIFO_RST | \ 380 + PCIE_SW_XFI_TXPCS_RST | \ 381 + PCIE_SW_XFI_RXPCS_RST | \ 382 + PCIE_SW_XFI_RXPCS_BIST_RST | \ 383 + PCIE_SW_HSG_TXPCS_RST | \ 384 + PCIE_SW_HSG_RXPCS_RST) 385 + 386 + #define REG_PCIE_PMA_RO_RX_FREQDET 0x0530 387 + #define PCIE_RO_FBCK_LOCK BIT(0) 388 + #define PCIE_RO_FL_OUT GENMASK(31, 16) 389 + 390 + #define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC 0x0794 391 + #define PCIE_FORCE_DA_PXP_CDR_PR_IDAC GENMASK(10, 0) 392 + #define PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC BIT(16) 393 + #define PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW BIT(24) 394 + 395 + #define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW 0x0798 396 + #define PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW GENMASK(30, 0) 397 + 398 + #define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS 0x079c 399 + #define PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW BIT(16) 400 + 401 + #define REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW 0x0800 402 + #define PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW GENMASK(30, 0) 403 + 404 + #define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB 0x081c 405 + #define PCIE_FORCE_DA_PXP_CDR_PD_PWDB BIT(0) 406 + #define PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB BIT(8) 407 + 408 + #define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C 0x0820 409 + #define PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN BIT(0) 410 + #define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN BIT(8) 411 + #define PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN BIT(16) 412 + #define PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN BIT(24) 413 + 414 + #define REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB 0x0824 415 + #define PCIE_FORCE_DA_PXP_CDR_PR_PWDB BIT(16) 416 + #define PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB BIT(24) 417 + 418 + #define REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT 0x0828 419 + #define PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN BIT(0) 420 + #define PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN BIT(8) 421 + #define PCIE_FORCE_DA_PXP_JCPLL_EN BIT(16) 422 + #define PCIE_FORCE_SEL_DA_PXP_JCPLL_EN BIT(24) 423 + 424 + #define REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST 0x0084c 425 + #define PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB BIT(16) 426 + #define PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB BIT(24) 427 + 428 + #define REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT 0x0854 429 + #define PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN BIT(0) 430 + #define PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN BIT(8) 431 + #define PCIE_FORCE_DA_PXP_TXPLL_EN BIT(16) 432 + #define PCIE_FORCE_SEL_DA_PXP_TXPLL_EN BIT(24) 433 + 434 + #define REG_PCIE_PMA_SCAN_MODE 0x0884 435 + #define PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(0) 436 + #define PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN BIT(8) 437 + 438 + #define REG_PCIE_PMA_DIG_RESERVE_13 0x08bc 439 + #define PCIE_FLL_IDAC_PCIEG1 GENMASK(10, 0) 440 + #define PCIE_FLL_IDAC_PCIEG2 GENMASK(26, 16) 441 + 442 + #define REG_PCIE_PMA_DIG_RESERVE_14 0x08c0 443 + #define PCIE_FLL_IDAC_PCIEG3 GENMASK(10, 0) 444 + #define PCIE_FLL_LOAD_EN BIT(16) 445 + 446 + #define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL 0x088c 447 + #define PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL GENMASK(1, 0) 448 + #define PCIE_FORCE_SEL_DA_PXP_RX_FE_GAIN_CTRL BIT(8) 449 + 450 + #define REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB 0x0894 451 + #define PCIE_FORCE_DA_PXP_RX_FE_PWDB BIT(0) 452 + #define PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB BIT(8) 453 + 454 + #define REG_PCIE_PMA_DIG_RESERVE_12 0x08b8 455 + #define PCIE_FORCE_PMA_RX_SPEED GENMASK(7, 4) 456 + #define PCIE_FORCE_SEL_PMA_RX_SPEED BIT(7) 457 + 458 + #define REG_PCIE_PMA_DIG_RESERVE_17 0x08e0 459 + 460 + #define REG_PCIE_PMA_DIG_RESERVE_18 0x08e4 461 + #define PCIE_PXP_RX_VTH_SEL_PCIE_G1 GENMASK(4, 0) 462 + #define PCIE_PXP_RX_VTH_SEL_PCIE_G2 GENMASK(12, 8) 463 + #define PCIE_PXP_RX_VTH_SEL_PCIE_G3 GENMASK(20, 16) 464 + 465 + #define REG_PCIE_PMA_DIG_RESERVE_19 0x08e8 466 + #define PCIE_PCP_RX_REV0_PCIE_GEN1 GENMASK(31, 16) 467 + 468 + #define REG_PCIE_PMA_DIG_RESERVE_20 0x08ec 469 + #define PCIE_PCP_RX_REV0_PCIE_GEN2 GENMASK(15, 0) 470 + #define PCIE_PCP_RX_REV0_PCIE_GEN3 GENMASK(31, 16) 471 + 472 + #define REG_PCIE_PMA_DIG_RESERVE_21 0x08f0 473 + #define REG_PCIE_PMA_DIG_RESERVE_22 0x08f4 474 + #define REG_PCIE_PMA_DIG_RESERVE_27 0x0908 475 + #define REG_PCIE_PMA_DIG_RESERVE_30 0x0914 476 + 477 + /* DTIME */ 478 + #define REG_PCIE_PEXTP_DIG_GLB44 0x00 479 + #define PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL GENMASK(7, 0) 480 + #define PCIE_XTP_RXDET_EN_STB_T_SEL GENMASK(15, 8) 481 + #define PCIE_XTP_RXDET_FINISH_STB_T_SEL GENMASK(23, 16) 482 + #define PCIE_XTP_TXPD_TX_DATA_EN_DLY GENMASK(27, 24) 483 + #define PCIE_XTP_TXPD_RXDET_DONE_CDT BIT(28) 484 + #define PCIE_XTP_RXDET_LATCH_STB_T_SEL GENMASK(31, 29) 485 + 486 + /* RX AEQ */ 487 + #define REG_PCIE_PEXTP_DIG_LN_RX30_P0 0x0000 488 + #define PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT GENMASK(7, 0) 489 + #define PCIE_XTP_LN_RX_PDOWN_T2RLB_DIG_EN BIT(8) 490 + #define PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT GENMASK(31, 16) 491 + 492 + #define REG_PCIE_PEXTP_DIG_LN_RX30_P1 0x0100 493 + 494 + #endif /* _PHY_AIROHA_PCIE_H */
+1286
drivers/phy/phy-airoha-pcie.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2024 AIROHA Inc 4 + * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 + */ 6 + 7 + #include <linux/bitfield.h> 8 + #include <linux/delay.h> 9 + #include <linux/io.h> 10 + #include <linux/module.h> 11 + #include <linux/of.h> 12 + #include <linux/phy/phy.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/slab.h> 15 + 16 + #include "phy-airoha-pcie-regs.h" 17 + 18 + #define LEQ_LEN_CTRL_MAX_VAL 7 19 + #define FREQ_LOCK_MAX_ATTEMPT 10 20 + 21 + enum airoha_pcie_port_gen { 22 + PCIE_PORT_GEN1 = 1, 23 + PCIE_PORT_GEN2, 24 + PCIE_PORT_GEN3, 25 + }; 26 + 27 + /** 28 + * struct airoha_pcie_phy - PCIe phy driver main structure 29 + * @dev: pointer to device 30 + * @phy: pointer to generic phy 31 + * @csr_2l: Analogic lane IO mapped register base address 32 + * @pma0: IO mapped register base address of PMA0-PCIe 33 + * @pma1: IO mapped register base address of PMA1-PCIe 34 + * @p0_xr_dtime: IO mapped register base address of port0 Tx-Rx detection time 35 + * @p1_xr_dtime: IO mapped register base address of port1 Tx-Rx detection time 36 + * @rx_aeq: IO mapped register base address of Rx AEQ training 37 + */ 38 + struct airoha_pcie_phy { 39 + struct device *dev; 40 + struct phy *phy; 41 + void __iomem *csr_2l; 42 + void __iomem *pma0; 43 + void __iomem *pma1; 44 + void __iomem *p0_xr_dtime; 45 + void __iomem *p1_xr_dtime; 46 + void __iomem *rx_aeq; 47 + }; 48 + 49 + static void airoha_phy_clear_bits(void __iomem *reg, u32 mask) 50 + { 51 + u32 val = readl(reg) & ~mask; 52 + 53 + writel(val, reg); 54 + } 55 + 56 + static void airoha_phy_set_bits(void __iomem *reg, u32 mask) 57 + { 58 + u32 val = readl(reg) | mask; 59 + 60 + writel(val, reg); 61 + } 62 + 63 + static void airoha_phy_update_bits(void __iomem *reg, u32 mask, u32 val) 64 + { 65 + u32 tmp = readl(reg); 66 + 67 + tmp &= ~mask; 68 + tmp |= val & mask; 69 + writel(tmp, reg); 70 + } 71 + 72 + #define airoha_phy_update_field(reg, mask, val) \ 73 + do { \ 74 + BUILD_BUG_ON_MSG(!__builtin_constant_p((mask)), \ 75 + "mask is not constant"); \ 76 + airoha_phy_update_bits((reg), (mask), \ 77 + FIELD_PREP((mask), (val))); \ 78 + } while (0) 79 + 80 + #define airoha_phy_csr_2l_clear_bits(pcie_phy, reg, mask) \ 81 + airoha_phy_clear_bits((pcie_phy)->csr_2l + (reg), (mask)) 82 + #define airoha_phy_csr_2l_set_bits(pcie_phy, reg, mask) \ 83 + airoha_phy_set_bits((pcie_phy)->csr_2l + (reg), (mask)) 84 + #define airoha_phy_csr_2l_update_field(pcie_phy, reg, mask, val) \ 85 + airoha_phy_update_field((pcie_phy)->csr_2l + (reg), (mask), (val)) 86 + #define airoha_phy_pma0_clear_bits(pcie_phy, reg, mask) \ 87 + airoha_phy_clear_bits((pcie_phy)->pma0 + (reg), (mask)) 88 + #define airoha_phy_pma1_clear_bits(pcie_phy, reg, mask) \ 89 + airoha_phy_clear_bits((pcie_phy)->pma1 + (reg), (mask)) 90 + #define airoha_phy_pma0_set_bits(pcie_phy, reg, mask) \ 91 + airoha_phy_set_bits((pcie_phy)->pma0 + (reg), (mask)) 92 + #define airoha_phy_pma1_set_bits(pcie_phy, reg, mask) \ 93 + airoha_phy_set_bits((pcie_phy)->pma1 + (reg), (mask)) 94 + #define airoha_phy_pma0_update_field(pcie_phy, reg, mask, val) \ 95 + airoha_phy_update_field((pcie_phy)->pma0 + (reg), (mask), (val)) 96 + #define airoha_phy_pma1_update_field(pcie_phy, reg, mask, val) \ 97 + airoha_phy_update_field((pcie_phy)->pma1 + (reg), (mask), (val)) 98 + 99 + static void 100 + airoha_phy_init_lane0_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy, 101 + enum airoha_pcie_port_gen gen) 102 + { 103 + u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941; 104 + u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767; 105 + u32 pr_idac, val, cdr_pr_idac_tmp = 0; 106 + int i; 107 + 108 + airoha_phy_pma0_set_bits(pcie_phy, 109 + REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1, 110 + PCIE_LCPLL_MAN_PWDB); 111 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, 112 + PCIE_LOCK_TARGET_BEG, 113 + fl_out_target - 100); 114 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, 115 + PCIE_LOCK_TARGET_END, 116 + fl_out_target + 100); 117 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, 118 + PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt); 119 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, 120 + PCIE_LOCK_LOCKTH, 0x3); 121 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, 122 + PCIE_UNLOCK_TARGET_BEG, 123 + fl_out_target - 100); 124 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, 125 + PCIE_UNLOCK_TARGET_END, 126 + fl_out_target + 100); 127 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, 128 + PCIE_PLL_FT_UNLOCK_CYCLECNT, 129 + lock_cyclecnt); 130 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, 131 + PCIE_UNLOCK_LOCKTH, 0x3); 132 + 133 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE, 134 + CSR_2L_PXP_CDR0_INJ_FORCE_OFF); 135 + 136 + airoha_phy_pma0_set_bits(pcie_phy, 137 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 138 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); 139 + airoha_phy_pma0_set_bits(pcie_phy, 140 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 141 + PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN); 142 + airoha_phy_pma0_set_bits(pcie_phy, 143 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 144 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); 145 + airoha_phy_pma0_clear_bits(pcie_phy, 146 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 147 + PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN); 148 + airoha_phy_pma0_set_bits(pcie_phy, 149 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 150 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); 151 + 152 + airoha_phy_pma0_set_bits(pcie_phy, 153 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 154 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); 155 + airoha_phy_pma0_clear_bits(pcie_phy, 156 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 157 + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); 158 + airoha_phy_pma0_set_bits(pcie_phy, 159 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 160 + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); 161 + 162 + for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) { 163 + airoha_phy_pma0_update_field(pcie_phy, 164 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 165 + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8); 166 + airoha_phy_pma0_clear_bits(pcie_phy, 167 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 168 + PCIE_FREQLOCK_DET_EN); 169 + airoha_phy_pma0_update_field(pcie_phy, 170 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 171 + PCIE_FREQLOCK_DET_EN, 0x3); 172 + 173 + usleep_range(10000, 15000); 174 + 175 + val = FIELD_GET(PCIE_RO_FL_OUT, 176 + readl(pcie_phy->pma0 + 177 + REG_PCIE_PMA_RO_RX_FREQDET)); 178 + if (val > fl_out_target) 179 + cdr_pr_idac_tmp = i << 8; 180 + } 181 + 182 + for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) { 183 + pr_idac = cdr_pr_idac_tmp | (0x1 << i); 184 + airoha_phy_pma0_update_field(pcie_phy, 185 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 186 + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac); 187 + airoha_phy_pma0_clear_bits(pcie_phy, 188 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 189 + PCIE_FREQLOCK_DET_EN); 190 + airoha_phy_pma0_update_field(pcie_phy, 191 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 192 + PCIE_FREQLOCK_DET_EN, 0x3); 193 + 194 + usleep_range(10000, 15000); 195 + 196 + val = FIELD_GET(PCIE_RO_FL_OUT, 197 + readl(pcie_phy->pma0 + 198 + REG_PCIE_PMA_RO_RX_FREQDET)); 199 + if (val < fl_out_target) 200 + pr_idac &= ~(0x1 << i); 201 + 202 + cdr_pr_idac_tmp = pr_idac; 203 + } 204 + 205 + airoha_phy_pma0_update_field(pcie_phy, 206 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 207 + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, 208 + cdr_pr_idac_tmp); 209 + 210 + for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) { 211 + u32 val; 212 + 213 + airoha_phy_pma0_clear_bits(pcie_phy, 214 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 215 + PCIE_FREQLOCK_DET_EN); 216 + airoha_phy_pma0_update_field(pcie_phy, 217 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 218 + PCIE_FREQLOCK_DET_EN, 0x3); 219 + 220 + usleep_range(10000, 15000); 221 + 222 + val = readl(pcie_phy->pma0 + REG_PCIE_PMA_RO_RX_FREQDET); 223 + if (val & PCIE_RO_FBCK_LOCK) 224 + break; 225 + } 226 + 227 + /* turn off force mode and update band values */ 228 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_INJ_MODE, 229 + CSR_2L_PXP_CDR0_INJ_FORCE_OFF); 230 + 231 + airoha_phy_pma0_clear_bits(pcie_phy, 232 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 233 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); 234 + airoha_phy_pma0_clear_bits(pcie_phy, 235 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 236 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); 237 + airoha_phy_pma0_clear_bits(pcie_phy, 238 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 239 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); 240 + airoha_phy_pma0_clear_bits(pcie_phy, 241 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 242 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); 243 + if (gen == PCIE_PORT_GEN3) { 244 + airoha_phy_pma0_update_field(pcie_phy, 245 + REG_PCIE_PMA_DIG_RESERVE_14, 246 + PCIE_FLL_IDAC_PCIEG3, 247 + cdr_pr_idac_tmp); 248 + } else { 249 + airoha_phy_pma0_update_field(pcie_phy, 250 + REG_PCIE_PMA_DIG_RESERVE_13, 251 + PCIE_FLL_IDAC_PCIEG1, 252 + cdr_pr_idac_tmp); 253 + airoha_phy_pma0_update_field(pcie_phy, 254 + REG_PCIE_PMA_DIG_RESERVE_13, 255 + PCIE_FLL_IDAC_PCIEG2, 256 + cdr_pr_idac_tmp); 257 + } 258 + } 259 + 260 + static void 261 + airoha_phy_init_lane1_rx_fw_pre_calib(struct airoha_pcie_phy *pcie_phy, 262 + enum airoha_pcie_port_gen gen) 263 + { 264 + u32 fl_out_target = gen == PCIE_PORT_GEN3 ? 41600 : 41941; 265 + u32 lock_cyclecnt = gen == PCIE_PORT_GEN3 ? 26000 : 32767; 266 + u32 pr_idac, val, cdr_pr_idac_tmp = 0; 267 + int i; 268 + 269 + airoha_phy_pma1_set_bits(pcie_phy, 270 + REG_PCIE_PMA_SS_LCPLL_PWCTL_SETTING_1, 271 + PCIE_LCPLL_MAN_PWDB); 272 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, 273 + PCIE_LOCK_TARGET_BEG, 274 + fl_out_target - 100); 275 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET2, 276 + PCIE_LOCK_TARGET_END, 277 + fl_out_target + 100); 278 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, 279 + PCIE_PLL_FT_LOCK_CYCLECNT, lock_cyclecnt); 280 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, 281 + PCIE_LOCK_LOCKTH, 0x3); 282 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, 283 + PCIE_UNLOCK_TARGET_BEG, 284 + fl_out_target - 100); 285 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET3, 286 + PCIE_UNLOCK_TARGET_END, 287 + fl_out_target + 100); 288 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET1, 289 + PCIE_PLL_FT_UNLOCK_CYCLECNT, 290 + lock_cyclecnt); 291 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_FREQ_DET4, 292 + PCIE_UNLOCK_LOCKTH, 0x3); 293 + 294 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE, 295 + CSR_2L_PXP_CDR1_INJ_FORCE_OFF); 296 + 297 + airoha_phy_pma1_set_bits(pcie_phy, 298 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 299 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); 300 + airoha_phy_pma1_set_bits(pcie_phy, 301 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 302 + PCIE_FORCE_DA_PXP_CDR_PR_LPF_R_EN); 303 + airoha_phy_pma1_set_bits(pcie_phy, 304 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 305 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); 306 + airoha_phy_pma1_clear_bits(pcie_phy, 307 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 308 + PCIE_FORCE_DA_PXP_CDR_PR_LPF_C_EN); 309 + airoha_phy_pma1_set_bits(pcie_phy, 310 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 311 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); 312 + airoha_phy_pma1_set_bits(pcie_phy, 313 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 314 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); 315 + airoha_phy_pma1_clear_bits(pcie_phy, 316 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 317 + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); 318 + airoha_phy_pma1_set_bits(pcie_phy, 319 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 320 + PCIE_FORCE_DA_PXP_CDR_PR_PWDB); 321 + 322 + for (i = 0; i < LEQ_LEN_CTRL_MAX_VAL; i++) { 323 + airoha_phy_pma1_update_field(pcie_phy, 324 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 325 + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, i << 8); 326 + airoha_phy_pma1_clear_bits(pcie_phy, 327 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 328 + PCIE_FREQLOCK_DET_EN); 329 + airoha_phy_pma1_update_field(pcie_phy, 330 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 331 + PCIE_FREQLOCK_DET_EN, 0x3); 332 + 333 + usleep_range(10000, 15000); 334 + 335 + val = FIELD_GET(PCIE_RO_FL_OUT, 336 + readl(pcie_phy->pma1 + 337 + REG_PCIE_PMA_RO_RX_FREQDET)); 338 + if (val > fl_out_target) 339 + cdr_pr_idac_tmp = i << 8; 340 + } 341 + 342 + for (i = LEQ_LEN_CTRL_MAX_VAL; i >= 0; i--) { 343 + pr_idac = cdr_pr_idac_tmp | (0x1 << i); 344 + airoha_phy_pma1_update_field(pcie_phy, 345 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 346 + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, pr_idac); 347 + airoha_phy_pma1_clear_bits(pcie_phy, 348 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 349 + PCIE_FREQLOCK_DET_EN); 350 + airoha_phy_pma1_update_field(pcie_phy, 351 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 352 + PCIE_FREQLOCK_DET_EN, 0x3); 353 + 354 + usleep_range(10000, 15000); 355 + 356 + val = FIELD_GET(PCIE_RO_FL_OUT, 357 + readl(pcie_phy->pma1 + 358 + REG_PCIE_PMA_RO_RX_FREQDET)); 359 + if (val < fl_out_target) 360 + pr_idac &= ~(0x1 << i); 361 + 362 + cdr_pr_idac_tmp = pr_idac; 363 + } 364 + 365 + airoha_phy_pma1_update_field(pcie_phy, 366 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 367 + PCIE_FORCE_DA_PXP_CDR_PR_IDAC, 368 + cdr_pr_idac_tmp); 369 + 370 + for (i = 0; i < FREQ_LOCK_MAX_ATTEMPT; i++) { 371 + u32 val; 372 + 373 + airoha_phy_pma1_clear_bits(pcie_phy, 374 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 375 + PCIE_FREQLOCK_DET_EN); 376 + airoha_phy_pma1_update_field(pcie_phy, 377 + REG_PCIE_PMA_SS_RX_FREQ_DET4, 378 + PCIE_FREQLOCK_DET_EN, 0x3); 379 + 380 + usleep_range(10000, 15000); 381 + 382 + val = readl(pcie_phy->pma1 + REG_PCIE_PMA_RO_RX_FREQDET); 383 + if (val & PCIE_RO_FBCK_LOCK) 384 + break; 385 + } 386 + 387 + /* turn off force mode and update band values */ 388 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_INJ_MODE, 389 + CSR_2L_PXP_CDR1_INJ_FORCE_OFF); 390 + 391 + airoha_phy_pma1_clear_bits(pcie_phy, 392 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 393 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_R_EN); 394 + airoha_phy_pma1_clear_bits(pcie_phy, 395 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_LPF_C, 396 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_LPF_C_EN); 397 + airoha_phy_pma1_clear_bits(pcie_phy, 398 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_PIEYE_PWDB, 399 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_PWDB); 400 + airoha_phy_pma1_clear_bits(pcie_phy, 401 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 402 + PCIE_FORCE_SEL_DA_PXP_CDR_PR_IDAC); 403 + if (gen == PCIE_PORT_GEN3) { 404 + airoha_phy_pma1_update_field(pcie_phy, 405 + REG_PCIE_PMA_DIG_RESERVE_14, 406 + PCIE_FLL_IDAC_PCIEG3, 407 + cdr_pr_idac_tmp); 408 + } else { 409 + airoha_phy_pma1_update_field(pcie_phy, 410 + REG_PCIE_PMA_DIG_RESERVE_13, 411 + PCIE_FLL_IDAC_PCIEG1, 412 + cdr_pr_idac_tmp); 413 + airoha_phy_pma1_update_field(pcie_phy, 414 + REG_PCIE_PMA_DIG_RESERVE_13, 415 + PCIE_FLL_IDAC_PCIEG2, 416 + cdr_pr_idac_tmp); 417 + } 418 + } 419 + 420 + static void airoha_pcie_phy_init_default(struct airoha_pcie_phy *pcie_phy) 421 + { 422 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CMN, 423 + CSR_2L_PXP_CMN_TRIM_MASK, 0x10); 424 + writel(0xcccbcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_21); 425 + writel(0xcccb, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_22); 426 + writel(0xcccbcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_21); 427 + writel(0xcccb, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_22); 428 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CMN, 429 + CSR_2L_PXP_CMN_LANE_EN); 430 + } 431 + 432 + static void airoha_pcie_phy_init_clk_out(struct airoha_pcie_phy *pcie_phy) 433 + { 434 + airoha_phy_csr_2l_update_field(pcie_phy, 435 + REG_CSR_2L_TXPLL_POSTDIV_D256, 436 + CSR_2L_PXP_CLKTX0_AMP, 0x5); 437 + airoha_phy_csr_2l_update_field(pcie_phy, 438 + REG_CSR_2L_CLKTX0_FORCE_OUT1, 439 + CSR_2L_PXP_CLKTX1_AMP, 0x5); 440 + airoha_phy_csr_2l_update_field(pcie_phy, 441 + REG_CSR_2L_TXPLL_POSTDIV_D256, 442 + CSR_2L_PXP_CLKTX0_OFFSET, 0x2); 443 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, 444 + CSR_2L_PXP_CLKTX1_OFFSET, 0x2); 445 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX0_FORCE_OUT1, 446 + CSR_2L_PXP_CLKTX0_HZ); 447 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, 448 + CSR_2L_PXP_CLKTX1_HZ); 449 + airoha_phy_csr_2l_update_field(pcie_phy, 450 + REG_CSR_2L_CLKTX0_FORCE_OUT1, 451 + CSR_2L_PXP_CLKTX0_IMP_SEL, 0x12); 452 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CLKTX1_IMP_SEL, 453 + CSR_2L_PXP_CLKTX1_IMP_SEL, 0x12); 454 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV_D256, 455 + CSR_2L_PXP_CLKTX0_SR); 456 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CLKTX1_OFFSET, 457 + CSR_2L_PXP_CLKTX1_SR); 458 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_PLL_CMN_RESERVE0, 459 + CSR_2L_PXP_PLL_RESERVE_MASK, 0xdd); 460 + } 461 + 462 + static void airoha_pcie_phy_init_csr_2l(struct airoha_pcie_phy *pcie_phy) 463 + { 464 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, 465 + PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST | 466 + PCIE_SW_RX_RST); 467 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, 468 + PCIE_SW_XFI_RXPCS_RST | PCIE_SW_REF_RST | 469 + PCIE_SW_RX_RST); 470 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, 471 + PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); 472 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_TX_RESET, 473 + PCIE_TX_TOP_RST | REG_PCIE_PMA_TX_RESET); 474 + } 475 + 476 + static void airoha_pcie_phy_init_rx(struct airoha_pcie_phy *pcie_phy) 477 + { 478 + writel(0x2a00090b, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_17); 479 + writel(0x2a00090b, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_17); 480 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONPI, 481 + CSR_2L_PXP_CDR0_PR_XFICK_EN); 482 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONPI, 483 + CSR_2L_PXP_CDR1_PR_XFICK_EN); 484 + airoha_phy_csr_2l_clear_bits(pcie_phy, 485 + REG_CSR_2L_CDR0_PD_PICAL_CKD8_INV, 486 + CSR_2L_PXP_CDR0_PD_EDGE_DISABLE); 487 + airoha_phy_csr_2l_clear_bits(pcie_phy, 488 + REG_CSR_2L_CDR1_PD_PICAL_CKD8_INV, 489 + CSR_2L_PXP_CDR1_PD_EDGE_DISABLE); 490 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV, 491 + CSR_2L_PXP_RX0_PHYCK_SEL, 0x1); 492 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV, 493 + CSR_2L_PXP_RX1_PHYCK_SEL, 0x1); 494 + } 495 + 496 + static void airoha_pcie_phy_init_jcpll(struct airoha_pcie_phy *pcie_phy) 497 + { 498 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 499 + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); 500 + airoha_phy_pma0_clear_bits(pcie_phy, 501 + REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 502 + PCIE_FORCE_DA_PXP_JCPLL_EN); 503 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 504 + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); 505 + airoha_phy_pma1_clear_bits(pcie_phy, 506 + REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 507 + PCIE_FORCE_DA_PXP_JCPLL_EN); 508 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_VTP_EN, 509 + CSR_2L_PXP_JCPLL_SPARE_LOW, 0x20); 510 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, 511 + CSR_2L_PXP_JCPLL_RST); 512 + writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_JCPLL_SSC_DELTA1); 513 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD, 514 + CSR_2L_PXP_JCPLL_SSC_PERIOD); 515 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, 516 + CSR_2L_PXP_JCPLL_SSC_PHASE_INI); 517 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, 518 + CSR_2L_PXP_JCPLL_SSC_TRI_EN); 519 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, 520 + CSR_2L_PXP_JCPLL_LPF_BR, 0xa); 521 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, 522 + CSR_2L_PXP_JCPLL_LPF_BP, 0xc); 523 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, 524 + CSR_2L_PXP_JCPLL_LPF_BC, 0x1f); 525 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC, 526 + CSR_2L_PXP_JCPLL_LPF_BWC, 0x1e); 527 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BR, 528 + CSR_2L_PXP_JCPLL_LPF_BWR, 0xa); 529 + airoha_phy_csr_2l_update_field(pcie_phy, 530 + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, 531 + CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE, 532 + 0x1); 533 + airoha_phy_csr_2l_clear_bits(pcie_phy, CSR_2L_PXP_JCPLL_MONCK, 534 + CSR_2L_PXP_JCPLL_REFIN_DIV); 535 + 536 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS, 537 + PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW); 538 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_VOS, 539 + PCIE_FORCE_SEL_DA_PXP_JCPLL_SDM_PCW); 540 + airoha_phy_pma0_update_field(pcie_phy, 541 + REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW, 542 + PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW, 543 + 0x50000000); 544 + airoha_phy_pma1_update_field(pcie_phy, 545 + REG_PCIE_PMA_FORCE_DA_PXP_JCPLL_SDM_PCW, 546 + PCIE_FORCE_DA_PXP_JCPLL_SDM_PCW, 547 + 0x50000000); 548 + 549 + airoha_phy_csr_2l_set_bits(pcie_phy, 550 + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, 551 + CSR_2L_PXP_JCPLL_POSTDIV_D5); 552 + airoha_phy_csr_2l_set_bits(pcie_phy, 553 + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, 554 + CSR_2L_PXP_JCPLL_POSTDIV_D2); 555 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, 556 + CSR_2L_PXP_JCPLL_RST_DLY, 0x4); 557 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, 558 + CSR_2L_PXP_JCPLL_SDM_DI_LS); 559 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_KBAND_VREF, 560 + CSR_2L_PXP_JCPLL_VCO_KBAND_MEAS_EN); 561 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT, 562 + CSR_2L_PXP_JCPLL_CHP_IOFST); 563 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT, 564 + CSR_2L_PXP_JCPLL_CHP_IBIAS, 0xc); 565 + airoha_phy_csr_2l_update_field(pcie_phy, 566 + REG_CSR_2L_JCPLL_MMD_PREDIV_MODE, 567 + CSR_2L_PXP_JCPLL_MMD_PREDIV_MODE, 568 + 0x1); 569 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_VCODIV, 570 + CSR_2L_PXP_JCPLL_VCO_HALFLSB_EN); 571 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV, 572 + CSR_2L_PXP_JCPLL_VCO_CFIX, 0x1); 573 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCODIV, 574 + CSR_2L_PXP_JCPLL_VCO_SCAPWR, 0x4); 575 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_IB_EXT, 576 + REG_CSR_2L_JCPLL_LPF_SHCK_EN); 577 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, 578 + CSR_2L_PXP_JCPLL_POSTDIV_EN); 579 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, 580 + CSR_2L_PXP_JCPLL_KBAND_KFC); 581 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, 582 + CSR_2L_PXP_JCPLL_KBAND_KF, 0x3); 583 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_KBAND_KFC, 584 + CSR_2L_PXP_JCPLL_KBAND_KS); 585 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC, 586 + CSR_2L_PXP_JCPLL_KBAND_DIV, 0x1); 587 + 588 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE, 589 + PCIE_FORCE_SEL_DA_PXP_JCPLL_KBAND_LOAD_EN); 590 + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SCAN_MODE, 591 + PCIE_FORCE_DA_PXP_JCPLL_KBAND_LOAD_EN); 592 + 593 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_LPF_BWC, 594 + CSR_2L_PXP_JCPLL_KBAND_CODE, 0xe4); 595 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, 596 + CSR_2L_PXP_JCPLL_TCL_AMP_EN); 597 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP, 598 + CSR_2L_PXP_JCPLL_TCL_LPF_EN); 599 + airoha_phy_csr_2l_update_field(pcie_phy, 600 + REG_CSR_2L_JCPLL_TCL_KBAND_VREF, 601 + CSR_2L_PXP_JCPLL_TCL_KBAND_VREF, 0xf); 602 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, 603 + CSR_2L_PXP_JCPLL_TCL_AMP_GAIN, 0x1); 604 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, 605 + CSR_2L_PXP_JCPLL_TCL_AMP_VREF, 0x5); 606 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_TCL_CMP, 607 + CSR_2L_PXP_JCPLL_TCL_LPF_BW, 0x1); 608 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_VCO_TCLVAR, 609 + CSR_2L_PXP_JCPLL_VCO_TCLVAR, 0x3); 610 + 611 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 612 + PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN); 613 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 614 + PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN); 615 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 616 + PCIE_FORCE_SEL_DA_PXP_JCPLL_CKOUT_EN); 617 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 618 + PCIE_FORCE_DA_PXP_JCPLL_CKOUT_EN); 619 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 620 + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); 621 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 622 + PCIE_FORCE_DA_PXP_JCPLL_EN); 623 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 624 + PCIE_FORCE_SEL_DA_PXP_JCPLL_EN); 625 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_PXP_JCPLL_CKOUT, 626 + PCIE_FORCE_DA_PXP_JCPLL_EN); 627 + } 628 + 629 + static void airoha_pcie_phy_txpll(struct airoha_pcie_phy *pcie_phy) 630 + { 631 + airoha_phy_pma0_set_bits(pcie_phy, 632 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 633 + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); 634 + airoha_phy_pma0_clear_bits(pcie_phy, 635 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 636 + PCIE_FORCE_DA_PXP_TXPLL_EN); 637 + airoha_phy_pma1_set_bits(pcie_phy, 638 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 639 + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); 640 + airoha_phy_pma1_clear_bits(pcie_phy, 641 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 642 + PCIE_FORCE_DA_PXP_TXPLL_EN); 643 + 644 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, 645 + CSR_2L_PXP_TXPLL_PLL_RSTB); 646 + writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1); 647 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC_PERIOD, 648 + CSR_2L_PXP_txpll_SSC_PERIOD); 649 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, 650 + CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1); 651 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK, 652 + CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d); 653 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, 654 + CSR_2L_PXP_TXPLL_REFIN_DIV); 655 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, 656 + CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3); 657 + 658 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 659 + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); 660 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 661 + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); 662 + airoha_phy_pma0_update_field(pcie_phy, 663 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW, 664 + PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW, 665 + 0xc800000); 666 + airoha_phy_pma1_update_field(pcie_phy, 667 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_SDM_PCW, 668 + PCIE_FORCE_DA_PXP_TXPLL_SDM_PCW, 669 + 0xc800000); 670 + 671 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS, 672 + CSR_2L_PXP_TXPLL_SDM_IFM); 673 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC, 674 + CSR_2L_PXP_TXPLL_SSC_PHASE_INI); 675 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, 676 + CSR_2L_PXP_TXPLL_RST_DLY, 0x4); 677 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS, 678 + CSR_2L_PXP_TXPLL_SDM_DI_LS); 679 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_DI_LS, 680 + CSR_2L_PXP_TXPLL_SDM_ORD, 0x3); 681 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF, 682 + CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN); 683 + writel(0x0, pcie_phy->csr_2l + REG_CSR_2L_TXPLL_SSC_DELTA1); 684 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, 685 + CSR_2L_PXP_TXPLL_LPF_BP, 0x1); 686 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, 687 + CSR_2L_PXP_TXPLL_LPF_BC, 0x18); 688 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, 689 + CSR_2L_PXP_TXPLL_LPF_BR, 0x5); 690 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_CHP_IOFST, 691 + CSR_2L_PXP_TXPLL_CHP_IOFST, 0x1); 692 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_750M_SYS_CK, 693 + CSR_2L_PXP_TXPLL_CHP_IBIAS, 0x2d); 694 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_VTP, 695 + CSR_2L_PXP_TXPLL_SPARE_L, 0x1); 696 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR, 697 + CSR_2L_PXP_TXPLL_LPF_BWC); 698 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV, 699 + CSR_2L_PXP_TXPLL_MMD_PREDIV_MODE); 700 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_REFIN_DIV, 701 + CSR_2L_PXP_TXPLL_REFIN_DIV); 702 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, 703 + CSR_2L_PXP_TXPLL_VCO_HALFLSB_EN); 704 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_VCO_SCAPWR, 705 + CSR_2L_PXP_TXPLL_VCO_SCAPWR, 0x7); 706 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, 707 + CSR_2L_PXP_TXPLL_VCO_CFIX, 0x3); 708 + 709 + airoha_phy_pma0_set_bits(pcie_phy, 710 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 711 + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); 712 + airoha_phy_pma1_set_bits(pcie_phy, 713 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PR_IDAC, 714 + PCIE_FORCE_SEL_DA_PXP_TXPLL_SDM_PCW); 715 + 716 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC, 717 + CSR_2L_PXP_TXPLL_SSC_PHASE_INI); 718 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR, 719 + CSR_2L_PXP_TXPLL_LPF_BWR); 720 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2, 721 + CSR_2L_PXP_TXPLL_REFIN_INTERNAL); 722 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_KBAND_VREF, 723 + CSR_2L_PXP_TXPLL_VCO_KBAND_MEAS_EN); 724 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_VTP, 725 + CSR_2L_PXP_TXPLL_VTP_EN); 726 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV, 727 + CSR_2L_PXP_TXPLL_PHY_CK1_EN); 728 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_PHY_CK2, 729 + CSR_2L_PXP_TXPLL_REFIN_INTERNAL); 730 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_SSC, 731 + CSR_2L_PXP_TXPLL_SSC_EN); 732 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_750M_SYS_CK, 733 + CSR_2L_PXP_TXPLL_LPF_SHCK_EN); 734 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_POSTDIV, 735 + CSR_2L_PXP_TXPLL_POSTDIV_EN); 736 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, 737 + CSR_2L_PXP_TXPLL_KBAND_KFC); 738 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, 739 + CSR_2L_PXP_TXPLL_KBAND_KF, 0x3); 740 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, 741 + CSR_2L_PXP_txpll_KBAND_KS, 0x1); 742 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_KBAND_DIV, 743 + CSR_2L_PXP_TXPLL_KBAND_DIV, 0x4); 744 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_LPF_BWR, 745 + CSR_2L_PXP_TXPLL_KBAND_CODE, 0xe4); 746 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT, 747 + CSR_2L_PXP_TXPLL_TCL_AMP_EN); 748 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TXPLL_TCL_AMP_VREF, 749 + CSR_2L_PXP_TXPLL_TCL_LPF_EN); 750 + airoha_phy_csr_2l_update_field(pcie_phy, 751 + REG_CSR_2L_TXPLL_TCL_KBAND_VREF, 752 + CSR_2L_PXP_TXPLL_TCL_KBAND_VREF, 0xf); 753 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_SDM_OUT, 754 + CSR_2L_PXP_TXPLL_TCL_AMP_GAIN, 0x3); 755 + airoha_phy_csr_2l_update_field(pcie_phy, 756 + REG_CSR_2L_TXPLL_TCL_AMP_VREF, 757 + CSR_2L_PXP_TXPLL_TCL_AMP_VREF, 0xb); 758 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_TXPLL_TCL_LPF_BW, 759 + CSR_2L_PXP_TXPLL_TCL_LPF_BW, 0x3); 760 + 761 + airoha_phy_pma0_set_bits(pcie_phy, 762 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 763 + PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN); 764 + airoha_phy_pma0_set_bits(pcie_phy, 765 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 766 + PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN); 767 + airoha_phy_pma1_set_bits(pcie_phy, 768 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 769 + PCIE_FORCE_SEL_DA_PXP_TXPLL_CKOUT_EN); 770 + airoha_phy_pma1_set_bits(pcie_phy, 771 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 772 + PCIE_FORCE_DA_PXP_TXPLL_CKOUT_EN); 773 + airoha_phy_pma0_set_bits(pcie_phy, 774 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 775 + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); 776 + airoha_phy_pma0_set_bits(pcie_phy, 777 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 778 + PCIE_FORCE_DA_PXP_TXPLL_EN); 779 + airoha_phy_pma1_set_bits(pcie_phy, 780 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 781 + PCIE_FORCE_SEL_DA_PXP_TXPLL_EN); 782 + airoha_phy_pma1_set_bits(pcie_phy, 783 + REG_PCIE_PMA_FORCE_DA_PXP_TXPLL_CKOUT, 784 + PCIE_FORCE_DA_PXP_TXPLL_EN); 785 + } 786 + 787 + static void airoha_pcie_phy_init_ssc_jcpll(struct airoha_pcie_phy *pcie_phy) 788 + { 789 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1, 790 + CSR_2L_PXP_JCPLL_SSC_DELTA1, 0x106); 791 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_DELTA1, 792 + CSR_2L_PXP_JCPLL_SSC_DELTA, 0x106); 793 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_JCPLL_SSC_PERIOD, 794 + CSR_2L_PXP_JCPLL_SSC_PERIOD, 0x31b); 795 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, 796 + CSR_2L_PXP_JCPLL_SSC_PHASE_INI); 797 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, 798 + CSR_2L_PXP_JCPLL_SSC_EN); 799 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_IFM, 800 + CSR_2L_PXP_JCPLL_SDM_IFM); 801 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SDM_HREN, 802 + REG_CSR_2L_JCPLL_SDM_HREN); 803 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_RST_DLY, 804 + CSR_2L_PXP_JCPLL_SDM_DI_EN); 805 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, 806 + CSR_2L_PXP_JCPLL_SSC_TRI_EN); 807 + } 808 + 809 + static void 810 + airoha_pcie_phy_set_rxlan0_signal_detect(struct airoha_pcie_phy *pcie_phy) 811 + { 812 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW, 813 + CSR_2L_PXP_CDR0_PR_LDO_FORCE_ON); 814 + 815 + usleep_range(100, 200); 816 + 817 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19, 818 + PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0); 819 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, 820 + PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0); 821 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, 822 + PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030); 823 + 824 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST, 825 + CSR_2L_PXP_RX0_SIGDET_PEAK, 0x2); 826 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL, 827 + CSR_2L_PXP_RX0_SIGDET_VTH_SEL, 0x5); 828 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0, 829 + CSR_2L_PXP_VOS_PNINV, 0x2); 830 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_SIGDET_DCTEST, 831 + CSR_2L_PXP_RX0_SIGDET_LPF_CTRL, 0x1); 832 + 833 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2, 834 + PCIE_CAL_OUT_OS, 0x0); 835 + 836 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2, 837 + CSR_2L_PXP_RX0_FE_VCM_GEN_PWDB); 838 + 839 + airoha_phy_pma0_set_bits(pcie_phy, 840 + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 841 + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); 842 + airoha_phy_pma0_update_field(pcie_phy, 843 + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 844 + PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3); 845 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0, 846 + PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1); 847 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0, 848 + PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3); 849 + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1, 850 + PCIE_DISB_RX_SDCAL_EN); 851 + 852 + airoha_phy_pma0_set_bits(pcie_phy, 853 + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, 854 + PCIE_FORCE_RX_SDCAL_EN); 855 + usleep_range(150, 200); 856 + airoha_phy_pma0_clear_bits(pcie_phy, 857 + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, 858 + PCIE_FORCE_RX_SDCAL_EN); 859 + } 860 + 861 + static void 862 + airoha_pcie_phy_set_rxlan1_signal_detect(struct airoha_pcie_phy *pcie_phy) 863 + { 864 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW, 865 + CSR_2L_PXP_CDR1_PR_LDO_FORCE_ON); 866 + 867 + usleep_range(100, 200); 868 + 869 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_19, 870 + PCIE_PCP_RX_REV0_PCIE_GEN1, 0x18b0); 871 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, 872 + PCIE_PCP_RX_REV0_PCIE_GEN2, 0x18b0); 873 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_20, 874 + PCIE_PCP_RX_REV0_PCIE_GEN3, 0x1030); 875 + 876 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH, 877 + CSR_2L_PXP_RX1_SIGDET_PEAK, 0x2); 878 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_SIGDET_NOVTH, 879 + CSR_2L_PXP_RX1_SIGDET_VTH_SEL, 0x5); 880 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0, 881 + CSR_2L_PXP_VOS_PNINV, 0x2); 882 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_DAC_RANGE_EYE, 883 + CSR_2L_PXP_RX1_SIGDET_LPF_CTRL, 0x1); 884 + 885 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_CAL2, 886 + PCIE_CAL_OUT_OS, 0x0); 887 + 888 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1, 889 + CSR_2L_PXP_RX1_FE_VCM_GEN_PWDB); 890 + 891 + airoha_phy_pma1_set_bits(pcie_phy, 892 + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 893 + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); 894 + airoha_phy_pma1_update_field(pcie_phy, 895 + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 896 + PCIE_FORCE_DA_PXP_RX_FE_GAIN_CTRL, 0x3); 897 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_RX_FORCE_MODE0, 898 + PCIE_FORCE_DA_XPON_RX_FE_GAIN_CTRL, 0x1); 899 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_SS_RX_SIGDET0, 900 + PCIE_SIGDET_WIN_NONVLD_TIMES, 0x3); 901 + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SEQUENCE_DISB_CTRL1, 902 + PCIE_DISB_RX_SDCAL_EN); 903 + 904 + airoha_phy_pma1_set_bits(pcie_phy, 905 + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, 906 + PCIE_FORCE_RX_SDCAL_EN); 907 + usleep_range(150, 200); 908 + airoha_phy_pma1_clear_bits(pcie_phy, 909 + REG_PCIE_PMA_CTRL_SEQUENCE_FORCE_CTRL1, 910 + PCIE_FORCE_RX_SDCAL_EN); 911 + } 912 + 913 + static void airoha_pcie_phy_set_rxflow(struct airoha_pcie_phy *pcie_phy) 914 + { 915 + airoha_phy_pma0_set_bits(pcie_phy, 916 + REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST, 917 + PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB | 918 + PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB); 919 + airoha_phy_pma1_set_bits(pcie_phy, 920 + REG_PCIE_PMA_FORCE_DA_PXP_RX_SCAN_RST, 921 + PCIE_FORCE_DA_PXP_RX_SIGDET_PWDB | 922 + PCIE_FORCE_SEL_DA_PXP_RX_SIGDET_PWDB); 923 + 924 + airoha_phy_pma0_set_bits(pcie_phy, 925 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB, 926 + PCIE_FORCE_DA_PXP_CDR_PD_PWDB | 927 + PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB); 928 + airoha_phy_pma0_set_bits(pcie_phy, 929 + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB, 930 + PCIE_FORCE_DA_PXP_RX_FE_PWDB | 931 + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); 932 + airoha_phy_pma1_set_bits(pcie_phy, 933 + REG_PCIE_PMA_FORCE_DA_PXP_CDR_PD_PWDB, 934 + PCIE_FORCE_DA_PXP_CDR_PD_PWDB | 935 + PCIE_FORCE_SEL_DA_PXP_CDR_PD_PWDB); 936 + airoha_phy_pma1_set_bits(pcie_phy, 937 + REG_PCIE_PMA_FORCE_DA_PXP_RX_FE_PWDB, 938 + PCIE_FORCE_DA_PXP_RX_FE_PWDB | 939 + PCIE_FORCE_SEL_DA_PXP_RX_FE_PWDB); 940 + 941 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_PHYCK_DIV, 942 + CSR_2L_PXP_RX0_PHYCK_RSTB | 943 + CSR_2L_PXP_RX0_TDC_CK_SEL); 944 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_PHYCK_DIV, 945 + CSR_2L_PXP_RX1_PHYCK_RSTB | 946 + CSR_2L_PXP_RX1_TDC_CK_SEL); 947 + 948 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, 949 + PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST | 950 + PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST | 951 + PCIE_SW_TX_FIFO_RST); 952 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, 953 + PCIE_SW_RX_FIFO_RST | PCIE_SW_TX_RST | 954 + PCIE_SW_PMA_RST | PCIE_SW_ALLPCS_RST | 955 + PCIE_SW_TX_FIFO_RST); 956 + 957 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_PXP_RX0_FE_VB_EQ2, 958 + CSR_2L_PXP_RX0_FE_VB_EQ2_EN | 959 + CSR_2L_PXP_RX0_FE_VB_EQ3_EN); 960 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX0_SIGDET_VTH_SEL, 961 + CSR_2L_PXP_RX0_FE_VB_EQ1_EN); 962 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_RX1_FE_VB_EQ1, 963 + CSR_2L_PXP_RX1_FE_VB_EQ1_EN | 964 + CSR_2L_PXP_RX1_FE_VB_EQ2_EN | 965 + CSR_2L_PXP_RX1_FE_VB_EQ3_EN); 966 + 967 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0, 968 + CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4); 969 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX0_REV0, 970 + CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4); 971 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0, 972 + CSR_2L_PXP_FE_GAIN_NORMAL_MODE, 0x4); 973 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_REV0, 974 + CSR_2L_PXP_FE_GAIN_TRAIN_MODE, 0x4); 975 + } 976 + 977 + static void airoha_pcie_phy_set_pr(struct airoha_pcie_phy *pcie_phy) 978 + { 979 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND, 980 + CSR_2L_PXP_CDR0_PR_VREG_IBAND, 0x5); 981 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_VREG_IBAND, 982 + CSR_2L_PXP_CDR0_PR_VREG_CKBUF, 0x5); 983 + 984 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_CKREF_DIV, 985 + CSR_2L_PXP_CDR0_PR_CKREF_DIV); 986 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_COR_HBW, 987 + CSR_2L_PXP_CDR0_PR_CKREF_DIV1); 988 + 989 + airoha_phy_csr_2l_update_field(pcie_phy, 990 + REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL, 991 + CSR_2L_PXP_CDR1_PR_VREG_IBAND, 0x5); 992 + airoha_phy_csr_2l_update_field(pcie_phy, 993 + REG_CSR_2L_CDR1_PR_VREG_IBAND_VAL, 994 + CSR_2L_PXP_CDR1_PR_VREG_CKBUF, 0x5); 995 + 996 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_CKREF_DIV, 997 + CSR_2L_PXP_CDR1_PR_CKREF_DIV); 998 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_COR_HBW, 999 + CSR_2L_PXP_CDR1_PR_CKREF_DIV1); 1000 + 1001 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_LPF_RATIO, 1002 + CSR_2L_PXP_CDR0_LPF_TOP_LIM, 0x20000); 1003 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_LPF_RATIO, 1004 + CSR_2L_PXP_CDR1_LPF_TOP_LIM, 0x20000); 1005 + 1006 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC, 1007 + CSR_2L_PXP_CDR0_PR_BETA_SEL, 0x2); 1008 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC, 1009 + CSR_2L_PXP_CDR1_PR_BETA_SEL, 0x2); 1010 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_BETA_DAC, 1011 + CSR_2L_PXP_CDR0_PR_KBAND_DIV, 0x4); 1012 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_BETA_DAC, 1013 + CSR_2L_PXP_CDR1_PR_KBAND_DIV, 0x4); 1014 + } 1015 + 1016 + static void airoha_pcie_phy_set_txflow(struct airoha_pcie_phy *pcie_phy) 1017 + { 1018 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO, 1019 + CSR_2L_PXP_TX0_CKLDO_EN); 1020 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO, 1021 + CSR_2L_PXP_TX1_CKLDO_EN); 1022 + 1023 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX0_CKLDO, 1024 + CSR_2L_PXP_TX0_DMEDGEGEN_EN); 1025 + airoha_phy_csr_2l_set_bits(pcie_phy, REG_CSR_2L_TX1_CKLDO, 1026 + CSR_2L_PXP_TX1_DMEDGEGEN_EN); 1027 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_TX1_MULTLANE, 1028 + CSR_2L_PXP_TX1_MULTLANE_EN); 1029 + } 1030 + 1031 + static void airoha_pcie_phy_set_rx_mode(struct airoha_pcie_phy *pcie_phy) 1032 + { 1033 + writel(0x804000, pcie_phy->pma0 + REG_PCIE_PMA_DIG_RESERVE_27); 1034 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, 1035 + PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5); 1036 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, 1037 + PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5); 1038 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, 1039 + PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5); 1040 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30, 1041 + 0x77700); 1042 + 1043 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK, 1044 + CSR_2L_PXP_CDR0_PR_MONCK_ENABLE); 1045 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR0_PR_MONCK, 1046 + CSR_2L_PXP_CDR0_PR_RESERVE0, 0x2); 1047 + airoha_phy_csr_2l_update_field(pcie_phy, 1048 + REG_CSR_2L_PXP_RX0_OSCAL_CTLE1IOS, 1049 + CSR_2L_PXP_RX0_PR_OSCAL_VGA1IOS, 0x19); 1050 + airoha_phy_csr_2l_update_field(pcie_phy, 1051 + REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS, 1052 + CSR_2L_PXP_RX0_PR_OSCAL_VGA1VOS, 0x19); 1053 + airoha_phy_csr_2l_update_field(pcie_phy, 1054 + REG_CSR_2L_PXP_RX0_OSCA_VGA1VOS, 1055 + CSR_2L_PXP_RX0_PR_OSCAL_VGA2IOS, 0x14); 1056 + 1057 + writel(0x804000, pcie_phy->pma1 + REG_PCIE_PMA_DIG_RESERVE_27); 1058 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, 1059 + PCIE_PXP_RX_VTH_SEL_PCIE_G1, 0x5); 1060 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, 1061 + PCIE_PXP_RX_VTH_SEL_PCIE_G2, 0x5); 1062 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_18, 1063 + PCIE_PXP_RX_VTH_SEL_PCIE_G3, 0x5); 1064 + 1065 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_30, 1066 + 0x77700); 1067 + 1068 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK, 1069 + CSR_2L_PXP_CDR1_PR_MONCK_ENABLE); 1070 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_CDR1_PR_MONCK, 1071 + CSR_2L_PXP_CDR1_PR_RESERVE0, 0x2); 1072 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS, 1073 + CSR_2L_PXP_RX1_PR_OSCAL_VGA1IOS, 0x19); 1074 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS, 1075 + CSR_2L_PXP_RX1_PR_OSCAL_VGA1VOS, 0x19); 1076 + airoha_phy_csr_2l_update_field(pcie_phy, REG_CSR_2L_RX1_OSCAL_VGA1IOS, 1077 + CSR_2L_PXP_RX1_PR_OSCAL_VGA2IOS, 0x14); 1078 + } 1079 + 1080 + static void airoha_pcie_phy_load_kflow(struct airoha_pcie_phy *pcie_phy) 1081 + { 1082 + airoha_phy_pma0_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, 1083 + PCIE_FORCE_PMA_RX_SPEED, 0xa); 1084 + airoha_phy_pma1_update_field(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, 1085 + PCIE_FORCE_PMA_RX_SPEED, 0xa); 1086 + airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3); 1087 + airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN3); 1088 + 1089 + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, 1090 + PCIE_FORCE_PMA_RX_SPEED); 1091 + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_12, 1092 + PCIE_FORCE_PMA_RX_SPEED); 1093 + usleep_range(100, 200); 1094 + 1095 + airoha_phy_init_lane0_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2); 1096 + airoha_phy_init_lane1_rx_fw_pre_calib(pcie_phy, PCIE_PORT_GEN2); 1097 + } 1098 + 1099 + /** 1100 + * airoha_pcie_phy_init() - Initialize the phy 1101 + * @phy: the phy to be initialized 1102 + * 1103 + * Initialize the phy registers. 1104 + * The hardware settings will be reset during suspend, it should be 1105 + * reinitialized when the consumer calls phy_init() again on resume. 1106 + */ 1107 + static int airoha_pcie_phy_init(struct phy *phy) 1108 + { 1109 + struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy); 1110 + u32 val; 1111 + 1112 + /* Setup Tx-Rx detection time */ 1113 + val = FIELD_PREP(PCIE_XTP_RXDET_VCM_OFF_STB_T_SEL, 0x33) | 1114 + FIELD_PREP(PCIE_XTP_RXDET_EN_STB_T_SEL, 0x1) | 1115 + FIELD_PREP(PCIE_XTP_RXDET_FINISH_STB_T_SEL, 0x2) | 1116 + FIELD_PREP(PCIE_XTP_TXPD_TX_DATA_EN_DLY, 0x3) | 1117 + FIELD_PREP(PCIE_XTP_RXDET_LATCH_STB_T_SEL, 0x1); 1118 + writel(val, pcie_phy->p0_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44); 1119 + writel(val, pcie_phy->p1_xr_dtime + REG_PCIE_PEXTP_DIG_GLB44); 1120 + /* Setup Rx AEQ training time */ 1121 + val = FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_L1P2_EXIT_WAIT, 0x32) | 1122 + FIELD_PREP(PCIE_XTP_LN_RX_PDOWN_E0_AEQEN_WAIT, 0x5050); 1123 + writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P0); 1124 + writel(val, pcie_phy->rx_aeq + REG_PCIE_PEXTP_DIG_LN_RX30_P1); 1125 + 1126 + /* enable load FLL-K flow */ 1127 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14, 1128 + PCIE_FLL_LOAD_EN); 1129 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_DIG_RESERVE_14, 1130 + PCIE_FLL_LOAD_EN); 1131 + 1132 + airoha_pcie_phy_init_default(pcie_phy); 1133 + airoha_pcie_phy_init_clk_out(pcie_phy); 1134 + airoha_pcie_phy_init_csr_2l(pcie_phy); 1135 + 1136 + usleep_range(100, 200); 1137 + 1138 + airoha_pcie_phy_init_rx(pcie_phy); 1139 + /* phase 1, no ssc for K TXPLL */ 1140 + airoha_pcie_phy_init_jcpll(pcie_phy); 1141 + 1142 + usleep_range(500, 600); 1143 + 1144 + /* TX PLL settings */ 1145 + airoha_pcie_phy_txpll(pcie_phy); 1146 + 1147 + usleep_range(200, 300); 1148 + 1149 + /* SSC JCPLL setting */ 1150 + airoha_pcie_phy_init_ssc_jcpll(pcie_phy); 1151 + 1152 + usleep_range(100, 200); 1153 + 1154 + /* Rx lan0 signal detect */ 1155 + airoha_pcie_phy_set_rxlan0_signal_detect(pcie_phy); 1156 + /* Rx lan1 signal detect */ 1157 + airoha_pcie_phy_set_rxlan1_signal_detect(pcie_phy); 1158 + /* RX FLOW */ 1159 + airoha_pcie_phy_set_rxflow(pcie_phy); 1160 + 1161 + usleep_range(100, 200); 1162 + 1163 + airoha_pcie_phy_set_pr(pcie_phy); 1164 + /* TX FLOW */ 1165 + airoha_pcie_phy_set_txflow(pcie_phy); 1166 + 1167 + usleep_range(100, 200); 1168 + /* RX mode setting */ 1169 + airoha_pcie_phy_set_rx_mode(pcie_phy); 1170 + /* Load K-Flow */ 1171 + airoha_pcie_phy_load_kflow(pcie_phy); 1172 + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, 1173 + PCIE_DA_XPON_CDR_PR_PWDB); 1174 + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, 1175 + PCIE_DA_XPON_CDR_PR_PWDB); 1176 + 1177 + usleep_range(100, 200); 1178 + 1179 + airoha_phy_pma0_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, 1180 + PCIE_DA_XPON_CDR_PR_PWDB); 1181 + airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, 1182 + PCIE_DA_XPON_CDR_PR_PWDB); 1183 + 1184 + usleep_range(100, 200); 1185 + 1186 + return 0; 1187 + } 1188 + 1189 + static int airoha_pcie_phy_exit(struct phy *phy) 1190 + { 1191 + struct airoha_pcie_phy *pcie_phy = phy_get_drvdata(phy); 1192 + 1193 + airoha_phy_pma0_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, 1194 + PCIE_PMA_SW_RST); 1195 + airoha_phy_pma1_clear_bits(pcie_phy, REG_PCIE_PMA_SW_RESET, 1196 + PCIE_PMA_SW_RST); 1197 + airoha_phy_csr_2l_clear_bits(pcie_phy, REG_CSR_2L_JCPLL_SSC, 1198 + CSR_2L_PXP_JCPLL_SSC_PHASE_INI | 1199 + CSR_2L_PXP_JCPLL_SSC_TRI_EN | 1200 + CSR_2L_PXP_JCPLL_SSC_EN); 1201 + 1202 + return 0; 1203 + } 1204 + 1205 + static const struct phy_ops airoha_pcie_phy_ops = { 1206 + .init = airoha_pcie_phy_init, 1207 + .exit = airoha_pcie_phy_exit, 1208 + .owner = THIS_MODULE, 1209 + }; 1210 + 1211 + static int airoha_pcie_phy_probe(struct platform_device *pdev) 1212 + { 1213 + struct airoha_pcie_phy *pcie_phy; 1214 + struct device *dev = &pdev->dev; 1215 + struct phy_provider *provider; 1216 + 1217 + pcie_phy = devm_kzalloc(dev, sizeof(*pcie_phy), GFP_KERNEL); 1218 + if (!pcie_phy) 1219 + return -ENOMEM; 1220 + 1221 + pcie_phy->csr_2l = devm_platform_ioremap_resource_byname(pdev, "csr-2l"); 1222 + if (IS_ERR(pcie_phy->csr_2l)) 1223 + return dev_err_probe(dev, PTR_ERR(pcie_phy->csr_2l), 1224 + "Failed to map phy-csr-2l base\n"); 1225 + 1226 + pcie_phy->pma0 = devm_platform_ioremap_resource_byname(pdev, "pma0"); 1227 + if (IS_ERR(pcie_phy->pma0)) 1228 + return dev_err_probe(dev, PTR_ERR(pcie_phy->pma0), 1229 + "Failed to map phy-pma0 base\n"); 1230 + 1231 + pcie_phy->pma1 = devm_platform_ioremap_resource_byname(pdev, "pma1"); 1232 + if (IS_ERR(pcie_phy->pma1)) 1233 + return dev_err_probe(dev, PTR_ERR(pcie_phy->pma1), 1234 + "Failed to map phy-pma1 base\n"); 1235 + 1236 + pcie_phy->phy = devm_phy_create(dev, dev->of_node, &airoha_pcie_phy_ops); 1237 + if (IS_ERR(pcie_phy->phy)) 1238 + return dev_err_probe(dev, PTR_ERR(pcie_phy->phy), 1239 + "Failed to create PCIe phy\n"); 1240 + 1241 + pcie_phy->p0_xr_dtime = 1242 + devm_platform_ioremap_resource_byname(pdev, "p0-xr-dtime"); 1243 + if (IS_ERR(pcie_phy->p0_xr_dtime)) 1244 + return dev_err_probe(dev, PTR_ERR(pcie_phy->p0_xr_dtime), 1245 + "Failed to map P0 Tx-Rx dtime base\n"); 1246 + 1247 + pcie_phy->p1_xr_dtime = 1248 + devm_platform_ioremap_resource_byname(pdev, "p1-xr-dtime"); 1249 + if (IS_ERR(pcie_phy->p1_xr_dtime)) 1250 + return dev_err_probe(dev, PTR_ERR(pcie_phy->p1_xr_dtime), 1251 + "Failed to map P1 Tx-Rx dtime base\n"); 1252 + 1253 + pcie_phy->rx_aeq = devm_platform_ioremap_resource_byname(pdev, "rx-aeq"); 1254 + if (IS_ERR(pcie_phy->rx_aeq)) 1255 + return dev_err_probe(dev, PTR_ERR(pcie_phy->rx_aeq), 1256 + "Failed to map Rx AEQ base\n"); 1257 + 1258 + pcie_phy->dev = dev; 1259 + phy_set_drvdata(pcie_phy->phy, pcie_phy); 1260 + 1261 + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 1262 + if (IS_ERR(provider)) 1263 + return dev_err_probe(dev, PTR_ERR(provider), 1264 + "PCIe phy probe failed\n"); 1265 + 1266 + return 0; 1267 + } 1268 + 1269 + static const struct of_device_id airoha_pcie_phy_of_match[] = { 1270 + { .compatible = "airoha,en7581-pcie-phy" }, 1271 + { /* sentinel */ } 1272 + }; 1273 + MODULE_DEVICE_TABLE(of, airoha_pcie_phy_of_match); 1274 + 1275 + static struct platform_driver airoha_pcie_phy_driver = { 1276 + .probe = airoha_pcie_phy_probe, 1277 + .driver = { 1278 + .name = "airoha-pcie-phy", 1279 + .of_match_table = airoha_pcie_phy_of_match, 1280 + }, 1281 + }; 1282 + module_platform_driver(airoha_pcie_phy_driver); 1283 + 1284 + MODULE_DESCRIPTION("Airoha PCIe PHY driver"); 1285 + MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 1286 + MODULE_LICENSE("GPL");
+1 -1
drivers/phy/phy-core.c
··· 664 664 * 665 665 * Returns the phy driver, after getting a refcount to it; or 666 666 * -ENODEV if there is no such phy. The caller is responsible for 667 - * calling phy_put() to release that count. 667 + * calling of_phy_put() to release that count. 668 668 */ 669 669 struct phy *of_phy_get(struct device_node *np, const char *con_id) 670 670 {
+312 -6
drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
··· 489 489 QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), 490 490 }; 491 491 492 + static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_serdes_tbl[] = { 493 + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18), 494 + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01), 495 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31), 496 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f), 497 + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f), 498 + QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06), 499 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42), 500 + QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20), 501 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01), 502 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04), 503 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), 504 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff), 505 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f), 506 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30), 507 + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21), 508 + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68), 509 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02), 510 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa), 511 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab), 512 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14), 513 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4), 514 + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09), 515 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16), 516 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28), 517 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00), 518 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0), 519 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02), 520 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24), 521 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), 522 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x20), 523 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a), 524 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32), 525 + QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02), 526 + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07), 527 + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08), 528 + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a), 529 + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01), 530 + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53), 531 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05), 532 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55), 533 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55), 534 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29), 535 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa), 536 + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09), 537 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16), 538 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28), 539 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00), 540 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0), 541 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03), 542 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4), 543 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), 544 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), 545 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08), 546 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01), 547 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d), 548 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01), 549 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00), 550 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00), 551 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a), 552 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05), 553 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08), 554 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04), 555 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19), 556 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28), 557 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), 558 + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89), 559 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10), 560 + }; 561 + 562 + static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_serdes_tbl[] = { 563 + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CLKBUFLR_EN, 0x18), 564 + QMP_PHY_INIT_CFG(QSERDES_PLL_BIAS_EN_CTRL_BY_PSM, 0x01), 565 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x31), 566 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_IVCO, 0x0f), 567 + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TRIM, 0x0f), 568 + QMP_PHY_INIT_CFG(QSERDES_PLL_CMN_CONFIG, 0x06), 569 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP_EN, 0x42), 570 + QMP_PHY_INIT_CFG(QSERDES_PLL_RESETSM_CNTRL, 0x20), 571 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x01), 572 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_MAP, 0x04), 573 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), 574 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER1, 0xff), 575 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE_TIMER2, 0x3f), 576 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x30), 577 + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x21), 578 + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE0, 0x68), 579 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE0, 0x02), 580 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE0, 0xaa), 581 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE0, 0xab), 582 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE0, 0x14), 583 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE0, 0xd4), 584 + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE0, 0x09), 585 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE0, 0x16), 586 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE0, 0x28), 587 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE0, 0x00), 588 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE0, 0xa0), 589 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE0, 0x02), 590 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE0, 0x24), 591 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), 592 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), 593 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV, 0x0a), 594 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_SELECT, 0x32), 595 + QMP_PHY_INIT_CFG(QSERDES_PLL_SYS_CLK_CTRL, 0x02), 596 + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_BUF_ENABLE, 0x07), 597 + QMP_PHY_INIT_CFG(QSERDES_PLL_SYSCLK_EN_SEL, 0x08), 598 + QMP_PHY_INIT_CFG(QSERDES_PLL_BG_TIMER, 0x0a), 599 + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x01), 600 + QMP_PHY_INIT_CFG(QSERDES_PLL_DEC_START_MODE1, 0x53), 601 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START3_MODE1, 0x05), 602 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START2_MODE1, 0x55), 603 + QMP_PHY_INIT_CFG(QSERDES_PLL_DIV_FRAC_START1_MODE1, 0x55), 604 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP2_MODE1, 0x29), 605 + QMP_PHY_INIT_CFG(QSERDES_PLL_LOCK_CMP1_MODE1, 0xaa), 606 + QMP_PHY_INIT_CFG(QSERDES_PLL_CP_CTRL_MODE1, 0x09), 607 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_RCTRL_MODE1, 0x16), 608 + QMP_PHY_INIT_CFG(QSERDES_PLL_PLL_CCTRL_MODE1, 0x28), 609 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN1_MODE1, 0x00), 610 + QMP_PHY_INIT_CFG(QSERDES_PLL_INTEGLOOP_GAIN0_MODE1, 0xa0), 611 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE2_MODE1, 0x03), 612 + QMP_PHY_INIT_CFG(QSERDES_PLL_VCO_TUNE1_MODE1, 0xb4), 613 + QMP_PHY_INIT_CFG(QSERDES_PLL_SVS_MODE_CLK_SEL, 0x05), 614 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORE_CLK_EN, 0x00), 615 + QMP_PHY_INIT_CFG(QSERDES_PLL_CORECLK_DIV_MODE1, 0x08), 616 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_EN_CENTER, 0x01), 617 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER1, 0x7d), 618 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_PER2, 0x01), 619 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER1, 0x00), 620 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_ADJ_PER2, 0x00), 621 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE0, 0x0a), 622 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE0, 0x05), 623 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE1_MODE1, 0x08), 624 + QMP_PHY_INIT_CFG(QSERDES_PLL_SSC_STEP_SIZE2_MODE1, 0x04), 625 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE0, 0x19), 626 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_EP_DIV_MODE1, 0x28), 627 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x90), 628 + QMP_PHY_INIT_CFG(QSERDES_PLL_HSCLK_SEL, 0x89), 629 + QMP_PHY_INIT_CFG(QSERDES_PLL_CLK_ENABLE1, 0x10), 630 + }; 631 + 632 + static const struct qmp_phy_init_tbl ipq9574_pcie_rx_tbl[] = { 633 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), 634 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), 635 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x14), 636 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x61), 637 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04), 638 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1e), 639 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), 640 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), 641 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x02), 642 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), 643 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), 644 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x73), 645 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x80), 646 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0x00), 647 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x02), 648 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8), 649 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x09), 650 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1), 651 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x00), 652 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x02), 653 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xc8), 654 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x09), 655 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb1), 656 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xf0), 657 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x02), 658 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x2f), 659 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0xd3), 660 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x40), 661 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), 662 + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), 663 + }; 664 + 665 + static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_tbl[] = { 666 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), 667 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), 668 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), 669 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), 670 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), 671 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), 672 + }; 673 + 674 + static const struct qmp_phy_init_tbl ipq9574_gen3x1_pcie_pcs_misc_tbl[] = { 675 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), 676 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d), 677 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), 678 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), 679 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), 680 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), 681 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x14), 682 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG1, 0x10), 683 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0b), 684 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), 685 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), 686 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), 687 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52), 688 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), 689 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x50), 690 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x1a), 691 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x06), 692 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03), 693 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), 694 + }; 695 + 696 + static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_tbl[] = { 697 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), 698 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_G12S1_TXDEEMPH_M3P5DB, 0x10), 699 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), 700 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), 701 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_DCC_CAL_CONFIG, 0x01), 702 + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa), 703 + }; 704 + 705 + static const struct qmp_phy_init_tbl ipq9574_gen3x2_pcie_pcs_misc_tbl[] = { 706 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), 707 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d), 708 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), 709 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), 710 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H, 0x00), 711 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), 712 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x14), 713 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG1, 0x10), 714 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0b), 715 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_PRE, 0x00), 716 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_PRESET_P10_POST, 0x58), 717 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), 718 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1, 0x00), 719 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2, 0x52), 720 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4, 0x19), 721 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), 722 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2, 0x49), 723 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4, 0x2a), 724 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x02), 725 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6, 0x03), 726 + QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), 727 + }; 728 + 492 729 static const struct qmp_phy_init_tbl sdm845_qmp_pcie_serdes_tbl[] = { 493 730 QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14), 494 731 QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), ··· 2772 2535 .rx2 = 0x1800, 2773 2536 }; 2774 2537 2538 + static const struct qmp_pcie_offsets qmp_pcie_offsets_ipq9574 = { 2539 + .serdes = 0, 2540 + .pcs = 0x1000, 2541 + .pcs_misc = 0x1400, 2542 + .tx = 0x0200, 2543 + .rx = 0x0400, 2544 + .tx2 = 0x0600, 2545 + .rx2 = 0x0800, 2546 + }; 2547 + 2775 2548 static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = { 2776 2549 .serdes = 0x1000, 2777 2550 .pcs = 0x1200, ··· 2892 2645 2893 2646 .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 2894 2647 .phy_status = PHYSTATUS, 2648 + }; 2649 + 2650 + static const struct qmp_phy_cfg ipq9574_gen3x1_pciephy_cfg = { 2651 + .lanes = 1, 2652 + 2653 + .offsets = &qmp_pcie_offsets_v4x1, 2654 + 2655 + .tbls = { 2656 + .serdes = ipq9574_gen3x1_pcie_serdes_tbl, 2657 + .serdes_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_serdes_tbl), 2658 + .tx = ipq8074_pcie_gen3_tx_tbl, 2659 + .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), 2660 + .rx = ipq9574_pcie_rx_tbl, 2661 + .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl), 2662 + .pcs = ipq9574_gen3x1_pcie_pcs_tbl, 2663 + .pcs_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_tbl), 2664 + .pcs_misc = ipq9574_gen3x1_pcie_pcs_misc_tbl, 2665 + .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x1_pcie_pcs_misc_tbl), 2666 + }, 2667 + .reset_list = ipq8074_pciephy_reset_l, 2668 + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), 2669 + .vreg_list = NULL, 2670 + .num_vregs = 0, 2671 + .regs = pciephy_v4_regs_layout, 2672 + 2673 + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 2674 + .phy_status = PHYSTATUS, 2675 + .pipe_clock_rate = 250000000, 2676 + }; 2677 + 2678 + static const struct qmp_phy_cfg ipq9574_gen3x2_pciephy_cfg = { 2679 + .lanes = 2, 2680 + 2681 + .offsets = &qmp_pcie_offsets_ipq9574, 2682 + 2683 + .tbls = { 2684 + .serdes = ipq9574_gen3x2_pcie_serdes_tbl, 2685 + .serdes_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_serdes_tbl), 2686 + .tx = ipq8074_pcie_gen3_tx_tbl, 2687 + .tx_num = ARRAY_SIZE(ipq8074_pcie_gen3_tx_tbl), 2688 + .rx = ipq9574_pcie_rx_tbl, 2689 + .rx_num = ARRAY_SIZE(ipq9574_pcie_rx_tbl), 2690 + .pcs = ipq9574_gen3x2_pcie_pcs_tbl, 2691 + .pcs_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_tbl), 2692 + .pcs_misc = ipq9574_gen3x2_pcie_pcs_misc_tbl, 2693 + .pcs_misc_num = ARRAY_SIZE(ipq9574_gen3x2_pcie_pcs_misc_tbl), 2694 + }, 2695 + .reset_list = ipq8074_pciephy_reset_l, 2696 + .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), 2697 + .vreg_list = NULL, 2698 + .num_vregs = 0, 2699 + .regs = pciephy_v5_regs_layout, 2700 + 2701 + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 2702 + .phy_status = PHYSTATUS, 2703 + .pipe_clock_rate = 250000000, 2895 2704 }; 2896 2705 2897 2706 static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = { ··· 4033 3730 { 4034 3731 struct clk_fixed_rate *fixed = &qmp->aux_clk_fixed; 4035 3732 struct clk_init_data init = { }; 4036 - int ret; 3733 + char name[64]; 4037 3734 4038 - ret = of_property_read_string_index(np, "clock-output-names", 1, &init.name); 4039 - if (ret) { 4040 - dev_err(qmp->dev, "%pOFn: No clock-output-names index 1\n", np); 4041 - return ret; 4042 - } 3735 + snprintf(name, sizeof(name), "%s::phy_aux_clk", dev_name(qmp->dev)); 4043 3736 3737 + init.name = name; 4044 3738 init.ops = &clk_fixed_rate_ops; 4045 3739 4046 3740 fixed->fixed_rate = qmp->cfg->aux_clock_rate; ··· 4330 4030 }, { 4331 4031 .compatible = "qcom,ipq8074-qmp-pcie-phy", 4332 4032 .data = &ipq8074_pciephy_cfg, 4033 + }, { 4034 + .compatible = "qcom,ipq9574-qmp-gen3x1-pcie-phy", 4035 + .data = &ipq9574_gen3x1_pciephy_cfg, 4036 + }, { 4037 + .compatible = "qcom,ipq9574-qmp-gen3x2-pcie-phy", 4038 + .data = &ipq9574_gen3x2_pciephy_cfg, 4333 4039 }, { 4334 4040 .compatible = "qcom,msm8998-qmp-pcie-phy", 4335 4041 .data = &msm8998_pciephy_cfg,
+14
drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5.h
··· 11 11 #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG2 0x0c 12 12 #define QPHY_V5_PCS_PCIE_POWER_STATE_CONFIG4 0x14 13 13 #define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20 14 + #define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x44 15 + #define QPHY_V5_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_H 0x48 16 + #define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x4c 17 + #define QPHY_V5_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_H 0x50 14 18 #define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54 19 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG1 0x5c 20 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG2 0x60 21 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_CONFIG4 0x68 22 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG2 0x7c 23 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG4 0x84 24 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x88 25 + #define QPHY_V5_PCS_PCIE_OSC_DTCT_MODE2_CONFIG6 0x8c 15 26 #define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94 27 + #define QPHY_V5_PCS_PCIE_EQ_CONFIG1 0xa4 16 28 #define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8 29 + #define QPHY_V5_PCS_PCIE_PRESET_P10_PRE 0xc0 30 + #define QPHY_V5_PCS_PCIE_PRESET_P10_POST 0xe4 17 31 18 32 #endif
+3
drivers/phy/qualcomm/phy-qcom-qmp-qserdes-pll.h
··· 8 8 9 9 /* QMP V2 PHY for PCIE gen3 ports - QSERDES PLL registers */ 10 10 #define QSERDES_PLL_BG_TIMER 0x00c 11 + #define QSERDES_PLL_SSC_EN_CENTER 0x010 12 + #define QSERDES_PLL_SSC_ADJ_PER1 0x014 13 + #define QSERDES_PLL_SSC_ADJ_PER2 0x018 11 14 #define QSERDES_PLL_SSC_PER1 0x01c 12 15 #define QSERDES_PLL_SSC_PER2 0x020 13 16 #define QSERDES_PLL_SSC_STEP_SIZE1_MODE0 0x024
+3
drivers/phy/qualcomm/phy-qcom-qmp-usb.c
··· 2253 2253 .compatible = "qcom,sa8775p-qmp-usb3-uni-phy", 2254 2254 .data = &sa8775p_usb3_uniphy_cfg, 2255 2255 }, { 2256 + .compatible = "qcom,sc8180x-qmp-usb3-uni-phy", 2257 + .data = &sm8150_usb3_uniphy_cfg, 2258 + }, { 2256 2259 .compatible = "qcom,sc8280xp-qmp-usb3-uni-phy", 2257 2260 .data = &sc8280xp_usb3_uniphy_cfg, 2258 2261 }, {
+2
drivers/phy/rockchip/Kconfig
··· 86 86 config PHY_ROCKCHIP_SAMSUNG_HDPTX 87 87 tristate "Rockchip Samsung HDMI/eDP Combo PHY driver" 88 88 depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF 89 + depends on HAS_IOMEM 89 90 select GENERIC_PHY 91 + select MFD_SYSCON 90 92 select RATIONAL 91 93 help 92 94 Enable this to support the Rockchip HDMI/eDP Combo PHY
+872 -187
drivers/phy/samsung/phy-exynos5-usbdrd.c
··· 8 8 * Author: Vivek Gautam <gautam.vivek@samsung.com> 9 9 */ 10 10 11 + #include <linux/bitfield.h> 11 12 #include <linux/clk.h> 12 13 #include <linux/delay.h> 13 14 #include <linux/io.h> ··· 31 30 #define EXYNOS5_FSEL_19MHZ2 0x3 32 31 #define EXYNOS5_FSEL_20MHZ 0x4 33 32 #define EXYNOS5_FSEL_24MHZ 0x5 34 - #define EXYNOS5_FSEL_26MHZ 0x82 33 + #define EXYNOS5_FSEL_26MHZ 0x6 35 34 #define EXYNOS5_FSEL_50MHZ 0x7 36 35 37 36 /* Exynos5: USB 3.0 DRD PHY registers */ 38 37 #define EXYNOS5_DRD_LINKSYSTEM 0x04 39 - 38 + #define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27) 40 39 #define LINKSYSTEM_FLADJ_MASK (0x3f << 1) 41 40 #define LINKSYSTEM_FLADJ(_x) ((_x) << 1) 42 - #define LINKSYSTEM_XHCI_VERSION_CONTROL BIT(27) 43 41 44 42 #define EXYNOS5_DRD_PHYUTMI 0x08 45 - 46 43 #define PHYUTMI_OTGDISABLE BIT(6) 47 44 #define PHYUTMI_FORCESUSPEND BIT(1) 48 45 #define PHYUTMI_FORCESLEEP BIT(0) ··· 48 49 #define EXYNOS5_DRD_PHYPIPE 0x0c 49 50 50 51 #define EXYNOS5_DRD_PHYCLKRST 0x10 51 - 52 52 #define PHYCLKRST_EN_UTMISUSPEND BIT(31) 53 - 54 53 #define PHYCLKRST_SSC_REFCLKSEL_MASK (0xff << 23) 55 54 #define PHYCLKRST_SSC_REFCLKSEL(_x) ((_x) << 23) 56 - 57 55 #define PHYCLKRST_SSC_RANGE_MASK (0x03 << 21) 58 56 #define PHYCLKRST_SSC_RANGE(_x) ((_x) << 21) 59 - 60 57 #define PHYCLKRST_SSC_EN BIT(20) 61 58 #define PHYCLKRST_REF_SSP_EN BIT(19) 62 59 #define PHYCLKRST_REF_CLKDIV2 BIT(18) 63 - 64 60 #define PHYCLKRST_MPLL_MULTIPLIER_MASK (0x7f << 11) 65 61 #define PHYCLKRST_MPLL_MULTIPLIER_100MHZ_REF (0x19 << 11) 66 62 #define PHYCLKRST_MPLL_MULTIPLIER_50M_REF (0x32 << 11) 67 63 #define PHYCLKRST_MPLL_MULTIPLIER_24MHZ_REF (0x68 << 11) 68 64 #define PHYCLKRST_MPLL_MULTIPLIER_20MHZ_REF (0x7d << 11) 69 65 #define PHYCLKRST_MPLL_MULTIPLIER_19200KHZ_REF (0x02 << 11) 70 - 71 - #define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5) 72 66 #define PHYCLKRST_FSEL_PIPE_MASK (0x7 << 8) 67 + #define PHYCLKRST_FSEL_UTMI_MASK (0x7 << 5) 73 68 #define PHYCLKRST_FSEL(_x) ((_x) << 5) 74 69 #define PHYCLKRST_FSEL_PAD_100MHZ (0x27 << 5) 75 70 #define PHYCLKRST_FSEL_PAD_24MHZ (0x2a << 5) 76 71 #define PHYCLKRST_FSEL_PAD_20MHZ (0x31 << 5) 77 72 #define PHYCLKRST_FSEL_PAD_19_2MHZ (0x38 << 5) 78 - 79 73 #define PHYCLKRST_RETENABLEN BIT(4) 80 - 81 74 #define PHYCLKRST_REFCLKSEL_MASK (0x03 << 2) 82 75 #define PHYCLKRST_REFCLKSEL_PAD_REFCLK (0x2 << 2) 83 76 #define PHYCLKRST_REFCLKSEL_EXT_REFCLK (0x3 << 2) 84 - 85 77 #define PHYCLKRST_PORTRESET BIT(1) 86 78 #define PHYCLKRST_COMMONONN BIT(0) 87 79 ··· 90 100 #define PHYREG1_CR_ACK BIT(0) 91 101 92 102 #define EXYNOS5_DRD_PHYPARAM0 0x1c 93 - 94 103 #define PHYPARAM0_REF_USE_PAD BIT(31) 95 104 #define PHYPARAM0_REF_LOSLEVEL_MASK (0x1f << 26) 96 105 #define PHYPARAM0_REF_LOSLEVEL (0x9 << 26) 97 106 98 107 #define EXYNOS5_DRD_PHYPARAM1 0x20 99 - 100 108 #define PHYPARAM1_PCS_TXDEEMPH_MASK (0x1f << 0) 101 109 #define PHYPARAM1_PCS_TXDEEMPH (0x1c) 102 110 103 111 #define EXYNOS5_DRD_PHYTERM 0x24 104 112 105 113 #define EXYNOS5_DRD_PHYTEST 0x28 106 - 107 114 #define PHYTEST_POWERDOWN_SSP BIT(3) 108 115 #define PHYTEST_POWERDOWN_HSP BIT(2) 109 116 110 117 #define EXYNOS5_DRD_PHYADP 0x2c 111 118 112 119 #define EXYNOS5_DRD_PHYUTMICLKSEL 0x30 113 - 114 120 #define PHYUTMICLKSEL_UTMI_CLKSEL BIT(2) 115 121 116 122 #define EXYNOS5_DRD_PHYRESUME 0x34 123 + 117 124 #define EXYNOS5_DRD_LINKPORT 0x44 118 125 119 126 /* USB 3.0 DRD PHY SS Function Control Reg; accessed by CR_PORT */ ··· 134 147 135 148 /* Exynos850: USB DRD PHY registers */ 136 149 #define EXYNOS850_DRD_LINKCTRL 0x04 137 - #define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4) 150 + #define LINKCTRL_FORCE_RXELECIDLE BIT(18) 151 + #define LINKCTRL_FORCE_PHYSTATUS BIT(17) 152 + #define LINKCTRL_FORCE_PIPE_EN BIT(16) 138 153 #define LINKCTRL_FORCE_QACT BIT(8) 154 + #define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4) 155 + 156 + #define EXYNOS850_DRD_LINKPORT 0x08 157 + #define LINKPORT_HOST_NUM_U3 GENMASK(19, 16) 158 + #define LINKPORT_HOST_NUM_U2 GENMASK(15, 12) 139 159 140 160 #define EXYNOS850_DRD_CLKRST 0x20 141 - #define CLKRST_LINK_SW_RST BIT(0) 142 - #define CLKRST_PORT_RST BIT(1) 161 + /* 162 + * On versions without SS ports (like E850), bit 3 is for the 2.0 phy (HS), 163 + * while on versions with (like gs101), bits 2 and 3 are for the 3.0 phy (SS) 164 + * and bits 12 & 13 for the 2.0 phy. 165 + */ 166 + #define CLKRST_PHY20_SW_POR BIT(13) 167 + #define CLKRST_PHY20_SW_POR_SEL BIT(12) 168 + #define CLKRST_LINK_PCLK_SEL BIT(7) 143 169 #define CLKRST_PHY_SW_RST BIT(3) 170 + #define CLKRST_PHY_RESET_SEL BIT(2) 171 + #define CLKRST_PORT_RST BIT(1) 172 + #define CLKRST_LINK_SW_RST BIT(0) 173 + 174 + #define EXYNOS850_DRD_SSPPLLCTL 0x30 175 + #define SSPPLLCTL_FSEL GENMASK(2, 0) 144 176 145 177 #define EXYNOS850_DRD_UTMI 0x50 146 - #define UTMI_FORCE_SLEEP BIT(0) 147 - #define UTMI_FORCE_SUSPEND BIT(1) 148 - #define UTMI_DM_PULLDOWN BIT(2) 149 - #define UTMI_DP_PULLDOWN BIT(3) 150 - #define UTMI_FORCE_BVALID BIT(4) 151 178 #define UTMI_FORCE_VBUSVALID BIT(5) 179 + #define UTMI_FORCE_BVALID BIT(4) 180 + #define UTMI_DP_PULLDOWN BIT(3) 181 + #define UTMI_DM_PULLDOWN BIT(2) 182 + #define UTMI_FORCE_SUSPEND BIT(1) 183 + #define UTMI_FORCE_SLEEP BIT(0) 152 184 153 185 #define EXYNOS850_DRD_HSP 0x54 154 - #define HSP_COMMONONN BIT(8) 155 - #define HSP_EN_UTMISUSPEND BIT(9) 156 - #define HSP_VBUSVLDEXT BIT(12) 157 - #define HSP_VBUSVLDEXTSEL BIT(13) 158 186 #define HSP_FSV_OUT_EN BIT(24) 187 + #define HSP_VBUSVLDEXTSEL BIT(13) 188 + #define HSP_VBUSVLDEXT BIT(12) 189 + #define HSP_EN_UTMISUSPEND BIT(9) 190 + #define HSP_COMMONONN BIT(8) 191 + 192 + #define EXYNOS850_DRD_HSPPARACON 0x58 193 + #define HSPPARACON_TXVREF GENMASK(31, 28) 194 + #define HSPPARACON_TXRISE GENMASK(25, 24) 195 + #define HSPPARACON_TXRES GENMASK(22, 21) 196 + #define HSPPARACON_TXPREEMPPULSE BIT(20) 197 + #define HSPPARACON_TXPREEMPAMP GENMASK(19, 18) 198 + #define HSPPARACON_TXHSXV GENMASK(17, 16) 199 + #define HSPPARACON_TXFSLS GENMASK(15, 12) 200 + #define HSPPARACON_SQRX GENMASK(10, 8) 201 + #define HSPPARACON_OTG GENMASK(6, 4) 202 + #define HSPPARACON_COMPDIS GENMASK(2, 0) 159 203 160 204 #define EXYNOS850_DRD_HSP_TEST 0x5c 161 205 #define HSP_TEST_SIDDQ BIT(24) 162 206 207 + /* Exynos9 - GS101 */ 208 + #define EXYNOS850_DRD_SECPMACTL 0x48 209 + #define SECPMACTL_PMA_ROPLL_REF_CLK_SEL GENMASK(13, 12) 210 + #define SECPMACTL_PMA_LCPLL_REF_CLK_SEL GENMASK(11, 10) 211 + #define SECPMACTL_PMA_REF_FREQ_SEL GENMASK(9, 8) 212 + #define SECPMACTL_PMA_LOW_PWR BIT(4) 213 + #define SECPMACTL_PMA_TRSV_SW_RST BIT(3) 214 + #define SECPMACTL_PMA_CMN_SW_RST BIT(2) 215 + #define SECPMACTL_PMA_INIT_SW_RST BIT(1) 216 + #define SECPMACTL_PMA_APB_SW_RST BIT(0) 217 + 218 + /* PMA registers */ 219 + #define EXYNOS9_PMA_USBDP_CMN_REG0008 0x0020 220 + #define CMN_REG0008_OVRD_AUX_EN BIT(3) 221 + #define CMN_REG0008_AUX_EN BIT(2) 222 + 223 + #define EXYNOS9_PMA_USBDP_CMN_REG00B8 0x02e0 224 + #define CMN_REG00B8_LANE_MUX_SEL_DP GENMASK(3, 0) 225 + 226 + #define EXYNOS9_PMA_USBDP_CMN_REG01C0 0x0700 227 + #define CMN_REG01C0_ANA_LCPLL_LOCK_DONE BIT(7) 228 + #define CMN_REG01C0_ANA_LCPLL_AFC_DONE BIT(6) 229 + 230 + /* these have similar register layout, for lanes 0 and 2 */ 231 + #define EXYNOS9_PMA_USBDP_TRSV_REG03C3 0x0f0c 232 + #define EXYNOS9_PMA_USBDP_TRSV_REG07C3 0x1f0c 233 + #define TRSV_REG03C3_LN0_MON_RX_CDR_AFC_DONE BIT(3) 234 + #define TRSV_REG03C3_LN0_MON_RX_CDR_CAL_DONE BIT(2) 235 + #define TRSV_REG03C3_LN0_MON_RX_CDR_FLD_PLL_MODE_DONE BIT(1) 236 + #define TRSV_REG03C3_LN0_MON_RX_CDR_LOCK_DONE BIT(0) 237 + 238 + /* TRSV_REG0413 and TRSV_REG0813 have similar register layout */ 239 + #define EXYNOS9_PMA_USBDP_TRSV_REG0413 0x104c 240 + #define TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN BIT(7) 241 + #define TRSV_REG0413_OVRD_LN1_TX_RXD_EN BIT(5) 242 + 243 + #define EXYNOS9_PMA_USBDP_TRSV_REG0813 0x204c 244 + #define TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN BIT(7) 245 + #define TRSV_REG0813_OVRD_LN3_TX_RXD_EN BIT(5) 246 + 247 + /* PCS registers */ 248 + #define EXYNOS9_PCS_NS_VEC_PS1_N1 0x010c 249 + #define EXYNOS9_PCS_NS_VEC_PS2_N0 0x0110 250 + #define EXYNOS9_PCS_NS_VEC_PS3_N0 0x0118 251 + #define NS_VEC_NS_REQ GENMASK(31, 24) 252 + #define NS_VEC_ENABLE_TIMER BIT(22) 253 + #define NS_VEC_SEL_TIMEOUT GENMASK(21, 20) 254 + #define NS_VEC_INV_MASK GENMASK(19, 16) 255 + #define NS_VEC_COND_MASK GENMASK(11, 8) 256 + #define NS_VEC_EXP_COND GENMASK(3, 0) 257 + 258 + #define EXYNOS9_PCS_OUT_VEC_2 0x014c 259 + #define EXYNOS9_PCS_OUT_VEC_3 0x0150 260 + #define PCS_OUT_VEC_B9_DYNAMIC BIT(19) 261 + #define PCS_OUT_VEC_B9_SEL_OUT BIT(18) 262 + #define PCS_OUT_VEC_B8_DYNAMIC BIT(17) 263 + #define PCS_OUT_VEC_B8_SEL_OUT BIT(16) 264 + #define PCS_OUT_VEC_B7_DYNAMIC BIT(15) 265 + #define PCS_OUT_VEC_B7_SEL_OUT BIT(14) 266 + #define PCS_OUT_VEC_B6_DYNAMIC BIT(13) 267 + #define PCS_OUT_VEC_B6_SEL_OUT BIT(12) 268 + #define PCS_OUT_VEC_B5_DYNAMIC BIT(11) 269 + #define PCS_OUT_VEC_B5_SEL_OUT BIT(10) 270 + #define PCS_OUT_VEC_B4_DYNAMIC BIT(9) 271 + #define PCS_OUT_VEC_B4_SEL_OUT BIT(8) 272 + #define PCS_OUT_VEC_B3_DYNAMIC BIT(7) 273 + #define PCS_OUT_VEC_B3_SEL_OUT BIT(6) 274 + #define PCS_OUT_VEC_B2_DYNAMIC BIT(5) 275 + #define PCS_OUT_VEC_B2_SEL_OUT BIT(4) 276 + #define PCS_OUT_VEC_B1_DYNAMIC BIT(3) 277 + #define PCS_OUT_VEC_B1_SEL_OUT BIT(2) 278 + #define PCS_OUT_VEC_B0_DYNAMIC BIT(1) 279 + #define PCS_OUT_VEC_B0_SEL_OUT BIT(0) 280 + 281 + #define EXYNOS9_PCS_TIMEOUT_0 0x0170 282 + 283 + #define EXYNOS9_PCS_TIMEOUT_3 0x017c 284 + 285 + #define EXYNOS9_PCS_EBUF_PARAM 0x0304 286 + #define EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE GENMASK(29, 24) 287 + 288 + #define EXYNOS9_PCS_BACK_END_MODE_VEC 0x030c 289 + #define BACK_END_MODE_VEC_FORCE_EBUF_EMPTY_MODE BIT(1) 290 + #define BACK_END_MODE_VEC_DISABLE_DATA_MASK BIT(0) 291 + 292 + #define EXYNOS9_PCS_RX_CONTROL 0x03f0 293 + #define RX_CONTROL_EN_BLOCK_ALIGNER_TYPE_B BIT(22) 294 + 295 + #define EXYNOS9_PCS_RX_CONTROL_DEBUG 0x03f4 296 + #define RX_CONTROL_DEBUG_EN_TS_CHECK BIT(5) 297 + #define RX_CONTROL_DEBUG_NUM_COM_FOUND GENMASK(3, 0) 298 + 299 + #define EXYNOS9_PCS_LOCAL_COEF 0x040c 300 + #define LOCAL_COEF_PMA_CENTER_COEF GENMASK(21, 16) 301 + #define LOCAL_COEF_LF GENMASK(13, 8) 302 + #define LOCAL_COEF_FS GENMASK(5, 0) 303 + 304 + #define EXYNOS9_PCS_HS_TX_COEF_MAP_0 0x0410 305 + #define HS_TX_COEF_MAP_0_SSTX_DEEMP GENMASK(17, 12) 306 + #define HS_TX_COEF_MAP_0_SSTX_LEVEL GENMASK(11, 6) 307 + #define HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT GENMASK(5, 0) 308 + 309 + 163 310 #define KHZ 1000 164 311 #define MHZ (KHZ * KHZ) 312 + 313 + #define PHY_TUNING_ENTRY_PHY(o, m, v) { \ 314 + .off = (o), \ 315 + .mask = (m), \ 316 + .val = (v), \ 317 + .region = PTR_PHY \ 318 + } 319 + 320 + #define PHY_TUNING_ENTRY_PCS(o, m, v) { \ 321 + .off = (o), \ 322 + .mask = (m), \ 323 + .val = (v), \ 324 + .region = PTR_PCS \ 325 + } 326 + 327 + #define PHY_TUNING_ENTRY_PMA(o, m, v) { \ 328 + .off = (o), \ 329 + .mask = (m), \ 330 + .val = (v), \ 331 + .region = PTR_PMA, \ 332 + } 333 + 334 + #define PHY_TUNING_ENTRY_LAST { .region = PTR_INVALID } 335 + 336 + #define for_each_phy_tune(tune) \ 337 + for (; (tune)->region != PTR_INVALID; ++(tune)) 338 + 339 + struct exynos5_usbdrd_phy_tuning { 340 + u32 off; 341 + u32 mask; 342 + u32 val; 343 + char region; 344 + #define PTR_INVALID 0 345 + #define PTR_PHY 1 346 + #define PTR_PCS 2 347 + #define PTR_PMA 3 348 + }; 349 + 350 + enum exynos5_usbdrd_phy_tuning_state { 351 + PTS_UTMI_POSTINIT, 352 + PTS_PIPE3_PREINIT, 353 + PTS_PIPE3_INIT, 354 + PTS_PIPE3_POSTINIT, 355 + PTS_PIPE3_POSTLOCK, 356 + PTS_MAX, 357 + }; 165 358 166 359 enum exynos5_usbdrd_phy_id { 167 360 EXYNOS5_DRDPHY_UTMI, ··· 354 187 355 188 struct exynos5_usbdrd_phy_config { 356 189 u32 id; 357 - void (*phy_isol)(struct phy_usb_instance *inst, u32 on); 190 + void (*phy_isol)(struct phy_usb_instance *inst, bool isolate); 358 191 void (*phy_init)(struct exynos5_usbdrd_phy *phy_drd); 359 192 unsigned int (*set_refclk)(struct phy_usb_instance *inst); 360 193 }; 361 194 362 195 struct exynos5_usbdrd_phy_drvdata { 363 196 const struct exynos5_usbdrd_phy_config *phy_cfg; 197 + const struct exynos5_usbdrd_phy_tuning **phy_tunes; 364 198 const struct phy_ops *phy_ops; 199 + const char * const *clk_names; 200 + int n_clks; 201 + const char * const *core_clk_names; 202 + int n_core_clks; 203 + const char * const *regulator_names; 204 + int n_regulators; 365 205 u32 pmu_offset_usbdrd0_phy; 206 + u32 pmu_offset_usbdrd0_phy_ss; 366 207 u32 pmu_offset_usbdrd1_phy; 367 - bool has_common_clk_gate; 368 208 }; 369 209 370 210 /** 371 211 * struct exynos5_usbdrd_phy - driver data for USB 3.0 PHY 372 212 * @dev: pointer to device instance of this platform device 373 213 * @reg_phy: usb phy controller register memory base 374 - * @clk: phy clock for register access 375 - * @pipeclk: clock for pipe3 phy 376 - * @utmiclk: clock for utmi+ phy 377 - * @itpclk: clock for ITP generation 214 + * @reg_pcs: usb phy physical coding sublayer register memory base 215 + * @reg_pma: usb phy physical media attachment register memory base 216 + * @clks: clocks for register access 217 + * @core_clks: core clocks for phy (ref, pipe3, utmi+, ITP, etc. as required) 378 218 * @drv_data: pointer to SoC level driver data structure 379 219 * @phys: array for 'EXYNOS5_DRDPHYS_NUM' number of PHY 380 220 * instances each with its 'phy' and 'phy_cfg'. 381 221 * @extrefclk: frequency select settings when using 'separate 382 222 * reference clocks' for SS and HS operations 383 - * @ref_clk: reference clock to PHY block from which PHY's 384 - * operational clocks are derived 385 - * @vbus: VBUS regulator for phy 386 - * @vbus_boost: Boost regulator for VBUS present on few Exynos boards 223 + * @regulators: regulators for phy 387 224 */ 388 225 struct exynos5_usbdrd_phy { 389 226 struct device *dev; 390 227 void __iomem *reg_phy; 391 - struct clk *clk; 392 - struct clk *pipeclk; 393 - struct clk *utmiclk; 394 - struct clk *itpclk; 228 + void __iomem *reg_pcs; 229 + void __iomem *reg_pma; 230 + struct clk_bulk_data *clks; 231 + struct clk_bulk_data *core_clks; 395 232 const struct exynos5_usbdrd_phy_drvdata *drv_data; 396 233 struct phy_usb_instance { 397 234 struct phy *phy; ··· 405 234 const struct exynos5_usbdrd_phy_config *phy_cfg; 406 235 } phys[EXYNOS5_DRDPHYS_NUM]; 407 236 u32 extrefclk; 408 - struct clk *ref_clk; 409 - struct regulator *vbus; 410 - struct regulator *vbus_boost; 237 + struct regulator_bulk_data *regulators; 411 238 }; 412 239 413 240 static inline ··· 456 287 } 457 288 458 289 static void exynos5_usbdrd_phy_isol(struct phy_usb_instance *inst, 459 - unsigned int on) 290 + bool isolate) 460 291 { 461 292 unsigned int val; 462 293 463 294 if (!inst->reg_pmu) 464 295 return; 465 296 466 - val = on ? 0 : EXYNOS4_PHY_ENABLE; 297 + val = isolate ? 0 : EXYNOS4_PHY_ENABLE; 467 298 468 299 regmap_update_bits(inst->reg_pmu, inst->pmu_offset, 469 300 EXYNOS4_PHY_ENABLE, val); ··· 540 371 return reg; 541 372 } 542 373 374 + static void 375 + exynos5_usbdrd_apply_phy_tunes(struct exynos5_usbdrd_phy *phy_drd, 376 + enum exynos5_usbdrd_phy_tuning_state state) 377 + { 378 + const struct exynos5_usbdrd_phy_tuning *tune; 379 + 380 + tune = phy_drd->drv_data->phy_tunes[state]; 381 + if (!tune) 382 + return; 383 + 384 + for_each_phy_tune(tune) { 385 + void __iomem *reg_base; 386 + u32 reg = 0; 387 + 388 + switch (tune->region) { 389 + case PTR_PHY: 390 + reg_base = phy_drd->reg_phy; 391 + break; 392 + case PTR_PCS: 393 + reg_base = phy_drd->reg_pcs; 394 + break; 395 + case PTR_PMA: 396 + reg_base = phy_drd->reg_pma; 397 + break; 398 + default: 399 + dev_warn_once(phy_drd->dev, 400 + "unknown phy region %d\n", tune->region); 401 + continue; 402 + } 403 + 404 + if (~tune->mask) { 405 + reg = readl(reg_base + tune->off); 406 + reg &= ~tune->mask; 407 + } 408 + reg |= tune->val; 409 + writel(reg, reg_base + tune->off); 410 + } 411 + } 412 + 543 413 static void exynos5_usbdrd_pipe3_init(struct exynos5_usbdrd_phy *phy_drd) 544 414 { 545 415 u32 reg; ··· 592 384 reg = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); 593 385 reg &= ~PHYTEST_POWERDOWN_SSP; 594 386 writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); 387 + } 388 + 389 + static void 390 + exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(struct exynos5_usbdrd_phy *phy_drd) 391 + { 392 + void __iomem *regs_base = phy_drd->reg_phy; 393 + u32 reg; 394 + 395 + /* link pipe_clock selection to pclk of PMA */ 396 + reg = readl(regs_base + EXYNOS850_DRD_CLKRST); 397 + reg |= CLKRST_LINK_PCLK_SEL; 398 + writel(reg, regs_base + EXYNOS850_DRD_CLKRST); 399 + 400 + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); 401 + reg &= ~SECPMACTL_PMA_REF_FREQ_SEL; 402 + reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1); 403 + /* SFR reset */ 404 + reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST); 405 + reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL | 406 + SECPMACTL_PMA_LCPLL_REF_CLK_SEL); 407 + /* PMA power off */ 408 + reg |= (SECPMACTL_PMA_TRSV_SW_RST | SECPMACTL_PMA_CMN_SW_RST | 409 + SECPMACTL_PMA_INIT_SW_RST); 410 + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); 411 + 412 + udelay(1); 413 + 414 + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); 415 + reg &= ~SECPMACTL_PMA_LOW_PWR; 416 + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); 417 + 418 + udelay(1); 419 + 420 + /* release override */ 421 + reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL); 422 + reg &= ~LINKCTRL_FORCE_PIPE_EN; 423 + writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL); 424 + 425 + udelay(1); 426 + 427 + /* APB enable */ 428 + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); 429 + reg &= ~SECPMACTL_PMA_APB_SW_RST; 430 + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); 431 + } 432 + 433 + static void 434 + exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(struct exynos5_usbdrd_phy *phy_drd) 435 + { 436 + void __iomem *regs_base = phy_drd->reg_pma; 437 + u32 reg; 438 + 439 + /* lane configuration: USB on all lanes */ 440 + reg = readl(regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8); 441 + reg &= ~CMN_REG00B8_LANE_MUX_SEL_DP; 442 + writel(reg, regs_base + EXYNOS9_PMA_USBDP_CMN_REG00B8); 443 + 444 + /* 445 + * FIXME: below code supports one connector orientation only. It needs 446 + * updating once we can receive connector events. 447 + */ 448 + /* override of TX receiver detector and comparator: lane 1 */ 449 + reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413); 450 + reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_COMP_EN; 451 + reg &= ~TRSV_REG0413_OVRD_LN1_TX_RXD_EN; 452 + writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0413); 453 + 454 + /* lane 3 */ 455 + reg = readl(regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813); 456 + reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_COMP_EN; 457 + reg |= TRSV_REG0813_OVRD_LN3_TX_RXD_EN; 458 + writel(reg, regs_base + EXYNOS9_PMA_USBDP_TRSV_REG0813); 459 + } 460 + 461 + static int 462 + exynos5_usbdrd_usbdp_g2_v4_pma_check_pll_lock(struct exynos5_usbdrd_phy *phy_drd) 463 + { 464 + static const unsigned int timeout_us = 40000; 465 + static const unsigned int sleep_us = 40; 466 + static const u32 locked = (CMN_REG01C0_ANA_LCPLL_LOCK_DONE | 467 + CMN_REG01C0_ANA_LCPLL_AFC_DONE); 468 + u32 reg; 469 + int err; 470 + 471 + err = readl_poll_timeout( 472 + phy_drd->reg_pma + EXYNOS9_PMA_USBDP_CMN_REG01C0, 473 + reg, (reg & locked) == locked, sleep_us, timeout_us); 474 + if (err) 475 + dev_err(phy_drd->dev, 476 + "timed out waiting for PLL lock: %#.8x\n", reg); 477 + 478 + return err; 479 + } 480 + 481 + static void 482 + exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(struct exynos5_usbdrd_phy *phy_drd) 483 + { 484 + static const unsigned int timeout_us = 40000; 485 + static const unsigned int sleep_us = 40; 486 + static const u32 locked = 487 + (TRSV_REG03C3_LN0_MON_RX_CDR_AFC_DONE 488 + | TRSV_REG03C3_LN0_MON_RX_CDR_CAL_DONE 489 + | TRSV_REG03C3_LN0_MON_RX_CDR_FLD_PLL_MODE_DONE 490 + | TRSV_REG03C3_LN0_MON_RX_CDR_LOCK_DONE); 491 + u32 reg; 492 + int err; 493 + 494 + err = readl_poll_timeout( 495 + phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG03C3, 496 + reg, (reg & locked) == locked, sleep_us, timeout_us); 497 + if (!err) 498 + return; 499 + 500 + dev_err(phy_drd->dev, 501 + "timed out waiting for CDR lock (l0): %#.8x, retrying\n", reg); 502 + 503 + /* based on cable orientation, this might be on the other phy port */ 504 + err = readl_poll_timeout( 505 + phy_drd->reg_pma + EXYNOS9_PMA_USBDP_TRSV_REG07C3, 506 + reg, (reg & locked) == locked, sleep_us, timeout_us); 507 + if (err) 508 + dev_err(phy_drd->dev, 509 + "timed out waiting for CDR lock (l2): %#.8x\n", reg); 595 510 } 596 511 597 512 static void exynos5_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) ··· 748 417 struct phy_usb_instance *inst = phy_get_drvdata(phy); 749 418 struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); 750 419 751 - ret = clk_prepare_enable(phy_drd->clk); 420 + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); 752 421 if (ret) 753 422 return ret; 754 423 ··· 793 462 794 463 writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); 795 464 796 - udelay(10); 465 + fsleep(10); 797 466 798 467 reg &= ~PHYCLKRST_PORTRESET; 799 468 writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYCLKRST); 800 469 801 - clk_disable_unprepare(phy_drd->clk); 470 + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); 802 471 803 472 return 0; 804 473 } ··· 810 479 struct phy_usb_instance *inst = phy_get_drvdata(phy); 811 480 struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); 812 481 813 - ret = clk_prepare_enable(phy_drd->clk); 482 + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); 814 483 if (ret) 815 484 return ret; 816 485 ··· 832 501 PHYTEST_POWERDOWN_HSP; 833 502 writel(reg, phy_drd->reg_phy + EXYNOS5_DRD_PHYTEST); 834 503 835 - clk_disable_unprepare(phy_drd->clk); 504 + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); 836 505 837 506 return 0; 838 507 } ··· 845 514 846 515 dev_dbg(phy_drd->dev, "Request to power_on usbdrd_phy phy\n"); 847 516 848 - clk_prepare_enable(phy_drd->ref_clk); 849 - if (!phy_drd->drv_data->has_common_clk_gate) { 850 - clk_prepare_enable(phy_drd->pipeclk); 851 - clk_prepare_enable(phy_drd->utmiclk); 852 - clk_prepare_enable(phy_drd->itpclk); 853 - } 517 + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_core_clks, 518 + phy_drd->core_clks); 519 + if (ret) 520 + return ret; 854 521 855 522 /* Enable VBUS supply */ 856 - if (phy_drd->vbus_boost) { 857 - ret = regulator_enable(phy_drd->vbus_boost); 858 - if (ret) { 859 - dev_err(phy_drd->dev, 860 - "Failed to enable VBUS boost supply\n"); 861 - goto fail_vbus; 862 - } 523 + ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators, 524 + phy_drd->regulators); 525 + if (ret) { 526 + dev_err(phy_drd->dev, "Failed to enable PHY regulator(s)\n"); 527 + goto fail_vbus; 863 528 } 864 529 865 - if (phy_drd->vbus) { 866 - ret = regulator_enable(phy_drd->vbus); 867 - if (ret) { 868 - dev_err(phy_drd->dev, "Failed to enable VBUS supply\n"); 869 - goto fail_vbus_boost; 870 - } 871 - } 872 - 873 - /* Power-on PHY*/ 874 - inst->phy_cfg->phy_isol(inst, 0); 530 + /* Power-on PHY */ 531 + inst->phy_cfg->phy_isol(inst, false); 875 532 876 533 return 0; 877 534 878 - fail_vbus_boost: 879 - if (phy_drd->vbus_boost) 880 - regulator_disable(phy_drd->vbus_boost); 881 - 882 535 fail_vbus: 883 - clk_disable_unprepare(phy_drd->ref_clk); 884 - if (!phy_drd->drv_data->has_common_clk_gate) { 885 - clk_disable_unprepare(phy_drd->itpclk); 886 - clk_disable_unprepare(phy_drd->utmiclk); 887 - clk_disable_unprepare(phy_drd->pipeclk); 888 - } 536 + clk_bulk_disable_unprepare(phy_drd->drv_data->n_core_clks, 537 + phy_drd->core_clks); 889 538 890 539 return ret; 891 540 } ··· 878 567 dev_dbg(phy_drd->dev, "Request to power_off usbdrd_phy phy\n"); 879 568 880 569 /* Power-off the PHY */ 881 - inst->phy_cfg->phy_isol(inst, 1); 570 + inst->phy_cfg->phy_isol(inst, true); 882 571 883 572 /* Disable VBUS supply */ 884 - if (phy_drd->vbus) 885 - regulator_disable(phy_drd->vbus); 886 - if (phy_drd->vbus_boost) 887 - regulator_disable(phy_drd->vbus_boost); 573 + regulator_bulk_disable(phy_drd->drv_data->n_regulators, 574 + phy_drd->regulators); 888 575 889 - clk_disable_unprepare(phy_drd->ref_clk); 890 - if (!phy_drd->drv_data->has_common_clk_gate) { 891 - clk_disable_unprepare(phy_drd->itpclk); 892 - clk_disable_unprepare(phy_drd->pipeclk); 893 - clk_disable_unprepare(phy_drd->utmiclk); 894 - } 576 + clk_bulk_disable_unprepare(phy_drd->drv_data->n_core_clks, 577 + phy_drd->core_clks); 895 578 896 579 return 0; 897 580 } ··· 1049 744 .owner = THIS_MODULE, 1050 745 }; 1051 746 747 + static void 748 + exynos5_usbdrd_usb_v3p1_pipe_override(struct exynos5_usbdrd_phy *phy_drd) 749 + { 750 + void __iomem *regs_base = phy_drd->reg_phy; 751 + u32 reg; 752 + 753 + /* force pipe3 signal for link */ 754 + reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL); 755 + reg &= ~LINKCTRL_FORCE_PHYSTATUS; 756 + reg |= LINKCTRL_FORCE_PIPE_EN | LINKCTRL_FORCE_RXELECIDLE; 757 + writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL); 758 + 759 + /* PMA disable */ 760 + reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); 761 + reg |= SECPMACTL_PMA_LOW_PWR; 762 + writel(reg, regs_base + EXYNOS850_DRD_SECPMACTL); 763 + } 764 + 1052 765 static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd) 1053 766 { 1054 767 void __iomem *regs_base = phy_drd->reg_phy; 1055 768 u32 reg; 769 + u32 ss_ports; 1056 770 1057 771 /* 1058 772 * Disable HWACG (hardware auto clock gating control). This will force ··· 1082 758 reg |= LINKCTRL_FORCE_QACT; 1083 759 writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL); 1084 760 761 + reg = readl(regs_base + EXYNOS850_DRD_LINKPORT); 762 + ss_ports = FIELD_GET(LINKPORT_HOST_NUM_U3, reg); 763 + 1085 764 /* Start PHY Reset (POR=high) */ 1086 765 reg = readl(regs_base + EXYNOS850_DRD_CLKRST); 766 + if (ss_ports) { 767 + reg |= CLKRST_PHY20_SW_POR; 768 + reg |= CLKRST_PHY20_SW_POR_SEL; 769 + reg |= CLKRST_PHY_RESET_SEL; 770 + } 1087 771 reg |= CLKRST_PHY_SW_RST; 1088 772 writel(reg, regs_base + EXYNOS850_DRD_CLKRST); 1089 773 ··· 1119 787 reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL; 1120 788 writel(reg, regs_base + EXYNOS850_DRD_HSP); 1121 789 790 + reg = readl(regs_base + EXYNOS850_DRD_SSPPLLCTL); 791 + reg &= ~SSPPLLCTL_FSEL; 792 + switch (phy_drd->extrefclk) { 793 + case EXYNOS5_FSEL_50MHZ: 794 + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7); 795 + break; 796 + case EXYNOS5_FSEL_26MHZ: 797 + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6); 798 + break; 799 + case EXYNOS5_FSEL_24MHZ: 800 + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2); 801 + break; 802 + case EXYNOS5_FSEL_20MHZ: 803 + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1); 804 + break; 805 + case EXYNOS5_FSEL_19MHZ2: 806 + reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0); 807 + break; 808 + default: 809 + dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n", 810 + phy_drd->extrefclk); 811 + break; 812 + } 813 + writel(reg, regs_base + EXYNOS850_DRD_SSPPLLCTL); 814 + 815 + if (phy_drd->drv_data->phy_tunes) 816 + exynos5_usbdrd_apply_phy_tunes(phy_drd, 817 + PTS_UTMI_POSTINIT); 818 + 1122 819 /* Power up PHY analog blocks */ 1123 820 reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST); 1124 821 reg &= ~HSP_TEST_SIDDQ; 1125 822 writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST); 1126 823 1127 824 /* Finish PHY reset (POR=low) */ 1128 - udelay(10); /* required before doing POR=low */ 825 + fsleep(10); /* required before doing POR=low */ 1129 826 reg = readl(regs_base + EXYNOS850_DRD_CLKRST); 827 + if (ss_ports) { 828 + reg |= CLKRST_PHY20_SW_POR_SEL; 829 + reg &= ~CLKRST_PHY20_SW_POR; 830 + } 1130 831 reg &= ~(CLKRST_PHY_SW_RST | CLKRST_PORT_RST); 1131 832 writel(reg, regs_base + EXYNOS850_DRD_CLKRST); 1132 - udelay(75); /* required after POR=low for guaranteed PHY clock */ 833 + fsleep(75); /* required after POR=low for guaranteed PHY clock */ 1133 834 1134 835 /* Disable single ended signal out */ 1135 836 reg = readl(regs_base + EXYNOS850_DRD_HSP); 1136 837 reg &= ~HSP_FSV_OUT_EN; 1137 838 writel(reg, regs_base + EXYNOS850_DRD_HSP); 839 + 840 + if (ss_ports) 841 + exynos5_usbdrd_usb_v3p1_pipe_override(phy_drd); 1138 842 } 1139 843 1140 844 static int exynos850_usbdrd_phy_init(struct phy *phy) ··· 1179 811 struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); 1180 812 int ret; 1181 813 1182 - ret = clk_prepare_enable(phy_drd->clk); 814 + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); 1183 815 if (ret) 1184 816 return ret; 1185 817 1186 818 /* UTMI or PIPE3 specific init */ 1187 819 inst->phy_cfg->phy_init(phy_drd); 1188 820 1189 - clk_disable_unprepare(phy_drd->clk); 821 + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); 1190 822 1191 823 return 0; 1192 824 } ··· 1199 831 u32 reg; 1200 832 int ret; 1201 833 1202 - ret = clk_prepare_enable(phy_drd->clk); 834 + ret = clk_bulk_prepare_enable(phy_drd->drv_data->n_clks, phy_drd->clks); 1203 835 if (ret) 1204 836 return ret; 1205 837 ··· 1218 850 reg = readl(regs_base + EXYNOS850_DRD_CLKRST); 1219 851 reg |= CLKRST_LINK_SW_RST; 1220 852 writel(reg, regs_base + EXYNOS850_DRD_CLKRST); 1221 - udelay(10); /* required before doing POR=low */ 853 + fsleep(10); /* required before doing POR=low */ 1222 854 reg &= ~CLKRST_LINK_SW_RST; 1223 855 writel(reg, regs_base + EXYNOS850_DRD_CLKRST); 1224 856 1225 - clk_disable_unprepare(phy_drd->clk); 857 + clk_bulk_disable_unprepare(phy_drd->drv_data->n_clks, phy_drd->clks); 1226 858 1227 859 return 0; 1228 860 } ··· 1235 867 .owner = THIS_MODULE, 1236 868 }; 1237 869 1238 - static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd) 870 + static void exynos5_usbdrd_gs101_pipe3_init(struct exynos5_usbdrd_phy *phy_drd) 1239 871 { 1240 - unsigned long ref_rate; 872 + void __iomem *regs_pma = phy_drd->reg_pma; 873 + void __iomem *regs_phy = phy_drd->reg_phy; 874 + u32 reg; 875 + 876 + exynos5_usbdrd_usbdp_g2_v4_ctrl_pma_ready(phy_drd); 877 + 878 + /* force aux off */ 879 + reg = readl(regs_pma + EXYNOS9_PMA_USBDP_CMN_REG0008); 880 + reg &= ~CMN_REG0008_AUX_EN; 881 + reg |= CMN_REG0008_OVRD_AUX_EN; 882 + writel(reg, regs_pma + EXYNOS9_PMA_USBDP_CMN_REG0008); 883 + 884 + exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_PREINIT); 885 + exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_INIT); 886 + exynos5_usbdrd_apply_phy_tunes(phy_drd, PTS_PIPE3_POSTINIT); 887 + 888 + exynos5_usbdrd_usbdp_g2_v4_pma_lane_mux_sel(phy_drd); 889 + 890 + /* reset release from port */ 891 + reg = readl(regs_phy + EXYNOS850_DRD_SECPMACTL); 892 + reg &= ~(SECPMACTL_PMA_TRSV_SW_RST | SECPMACTL_PMA_CMN_SW_RST | 893 + SECPMACTL_PMA_INIT_SW_RST); 894 + writel(reg, regs_phy + EXYNOS850_DRD_SECPMACTL); 895 + 896 + if (!exynos5_usbdrd_usbdp_g2_v4_pma_check_pll_lock(phy_drd)) 897 + exynos5_usbdrd_usbdp_g2_v4_pma_check_cdr_lock(phy_drd); 898 + } 899 + 900 + static int exynos5_usbdrd_gs101_phy_init(struct phy *phy) 901 + { 902 + struct phy_usb_instance *inst = phy_get_drvdata(phy); 903 + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); 1241 904 int ret; 1242 905 1243 - phy_drd->clk = devm_clk_get(phy_drd->dev, "phy"); 1244 - if (IS_ERR(phy_drd->clk)) { 1245 - dev_err(phy_drd->dev, "Failed to get phy clock\n"); 1246 - return PTR_ERR(phy_drd->clk); 906 + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) { 907 + /* Power-on PHY ... */ 908 + ret = regulator_bulk_enable(phy_drd->drv_data->n_regulators, 909 + phy_drd->regulators); 910 + if (ret) { 911 + dev_err(phy_drd->dev, 912 + "Failed to enable PHY regulator(s)\n"); 913 + return ret; 914 + } 1247 915 } 916 + /* 917 + * ... and ungate power via PMU. Without this here, we get an SError 918 + * trying to access PMA registers 919 + */ 920 + exynos5_usbdrd_phy_isol(inst, false); 1248 921 1249 - phy_drd->ref_clk = devm_clk_get(phy_drd->dev, "ref"); 1250 - if (IS_ERR(phy_drd->ref_clk)) { 1251 - dev_err(phy_drd->dev, "Failed to get phy reference clock\n"); 1252 - return PTR_ERR(phy_drd->ref_clk); 1253 - } 1254 - ref_rate = clk_get_rate(phy_drd->ref_clk); 922 + return exynos850_usbdrd_phy_init(phy); 923 + } 1255 924 1256 - ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk); 1257 - if (ret) { 1258 - dev_err(phy_drd->dev, "Clock rate (%ld) not supported\n", 1259 - ref_rate); 925 + static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy) 926 + { 927 + struct phy_usb_instance *inst = phy_get_drvdata(phy); 928 + struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); 929 + int ret; 930 + 931 + if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI) 932 + return 0; 933 + 934 + ret = exynos850_usbdrd_phy_exit(phy); 935 + if (ret) 1260 936 return ret; 937 + 938 + exynos5_usbdrd_phy_isol(inst, true); 939 + return regulator_bulk_disable(phy_drd->drv_data->n_regulators, 940 + phy_drd->regulators); 941 + } 942 + 943 + static const struct phy_ops gs101_usbdrd_phy_ops = { 944 + .init = exynos5_usbdrd_gs101_phy_init, 945 + .exit = exynos5_usbdrd_gs101_phy_exit, 946 + .owner = THIS_MODULE, 947 + }; 948 + 949 + static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd) 950 + { 951 + int ret; 952 + struct clk *ref_clk; 953 + unsigned long ref_rate; 954 + 955 + phy_drd->clks = devm_kcalloc(phy_drd->dev, phy_drd->drv_data->n_clks, 956 + sizeof(*phy_drd->clks), GFP_KERNEL); 957 + if (!phy_drd->clks) 958 + return -ENOMEM; 959 + 960 + for (int i = 0; i < phy_drd->drv_data->n_clks; ++i) 961 + phy_drd->clks[i].id = phy_drd->drv_data->clk_names[i]; 962 + 963 + ret = devm_clk_bulk_get(phy_drd->dev, phy_drd->drv_data->n_clks, 964 + phy_drd->clks); 965 + if (ret) 966 + return dev_err_probe(phy_drd->dev, ret, 967 + "failed to get phy clock(s)\n"); 968 + 969 + phy_drd->core_clks = devm_kcalloc(phy_drd->dev, 970 + phy_drd->drv_data->n_core_clks, 971 + sizeof(*phy_drd->core_clks), 972 + GFP_KERNEL); 973 + if (!phy_drd->core_clks) 974 + return -ENOMEM; 975 + 976 + for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) 977 + phy_drd->core_clks[i].id = phy_drd->drv_data->core_clk_names[i]; 978 + 979 + ret = devm_clk_bulk_get(phy_drd->dev, phy_drd->drv_data->n_core_clks, 980 + phy_drd->core_clks); 981 + if (ret) 982 + return dev_err_probe(phy_drd->dev, ret, 983 + "failed to get phy core clock(s)\n"); 984 + 985 + ref_clk = NULL; 986 + for (int i = 0; i < phy_drd->drv_data->n_core_clks; ++i) { 987 + if (!strcmp(phy_drd->core_clks[i].id, "ref")) { 988 + ref_clk = phy_drd->core_clks[i].clk; 989 + break; 990 + } 1261 991 } 992 + if (!ref_clk) 993 + return dev_err_probe(phy_drd->dev, -ENODEV, 994 + "failed to find phy reference clock\n"); 1262 995 1263 - if (!phy_drd->drv_data->has_common_clk_gate) { 1264 - phy_drd->pipeclk = devm_clk_get(phy_drd->dev, "phy_pipe"); 1265 - if (IS_ERR(phy_drd->pipeclk)) { 1266 - dev_info(phy_drd->dev, 1267 - "PIPE3 phy operational clock not specified\n"); 1268 - phy_drd->pipeclk = NULL; 1269 - } 1270 - 1271 - phy_drd->utmiclk = devm_clk_get(phy_drd->dev, "phy_utmi"); 1272 - if (IS_ERR(phy_drd->utmiclk)) { 1273 - dev_info(phy_drd->dev, 1274 - "UTMI phy operational clock not specified\n"); 1275 - phy_drd->utmiclk = NULL; 1276 - } 1277 - 1278 - phy_drd->itpclk = devm_clk_get(phy_drd->dev, "itp"); 1279 - if (IS_ERR(phy_drd->itpclk)) { 1280 - dev_info(phy_drd->dev, 1281 - "ITP clock from main OSC not specified\n"); 1282 - phy_drd->itpclk = NULL; 1283 - } 1284 - } 996 + ref_rate = clk_get_rate(ref_clk); 997 + ret = exynos5_rate_to_clk(ref_rate, &phy_drd->extrefclk); 998 + if (ret) 999 + return dev_err_probe(phy_drd->dev, ret, 1000 + "clock rate (%ld) not supported\n", 1001 + ref_rate); 1285 1002 1286 1003 return 0; 1287 1004 } ··· 1394 941 }, 1395 942 }; 1396 943 944 + static const char * const exynos5_clk_names[] = { 945 + "phy", 946 + }; 947 + 948 + static const char * const exynos5_core_clk_names[] = { 949 + "ref", 950 + }; 951 + 952 + static const char * const exynos5433_core_clk_names[] = { 953 + "ref", "phy_pipe", "phy_utmi", "itp", 954 + }; 955 + 956 + static const char * const exynos5_regulator_names[] = { 957 + "vbus", "vbus-boost", 958 + }; 959 + 1397 960 static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = { 1398 961 .phy_cfg = phy_cfg_exynos5, 1399 962 .phy_ops = &exynos5_usbdrd_phy_ops, 1400 963 .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, 1401 964 .pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL, 1402 - .has_common_clk_gate = true, 965 + .clk_names = exynos5_clk_names, 966 + .n_clks = ARRAY_SIZE(exynos5_clk_names), 967 + .core_clk_names = exynos5_core_clk_names, 968 + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), 969 + .regulator_names = exynos5_regulator_names, 970 + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), 1403 971 }; 1404 972 1405 973 static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = { 1406 974 .phy_cfg = phy_cfg_exynos5, 1407 975 .phy_ops = &exynos5_usbdrd_phy_ops, 1408 976 .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, 1409 - .has_common_clk_gate = true, 977 + .clk_names = exynos5_clk_names, 978 + .n_clks = ARRAY_SIZE(exynos5_clk_names), 979 + .core_clk_names = exynos5_core_clk_names, 980 + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), 981 + .regulator_names = exynos5_regulator_names, 982 + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), 1410 983 }; 1411 984 1412 985 static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = { ··· 1440 961 .phy_ops = &exynos5_usbdrd_phy_ops, 1441 962 .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, 1442 963 .pmu_offset_usbdrd1_phy = EXYNOS5433_USBHOST30_PHY_CONTROL, 1443 - .has_common_clk_gate = false, 964 + .clk_names = exynos5_clk_names, 965 + .n_clks = ARRAY_SIZE(exynos5_clk_names), 966 + .core_clk_names = exynos5433_core_clk_names, 967 + .n_core_clks = ARRAY_SIZE(exynos5433_core_clk_names), 968 + .regulator_names = exynos5_regulator_names, 969 + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), 1444 970 }; 1445 971 1446 972 static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = { 1447 973 .phy_cfg = phy_cfg_exynos5, 1448 974 .phy_ops = &exynos5_usbdrd_phy_ops, 1449 975 .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, 1450 - .has_common_clk_gate = false, 976 + .clk_names = exynos5_clk_names, 977 + .n_clks = ARRAY_SIZE(exynos5_clk_names), 978 + .core_clk_names = exynos5433_core_clk_names, 979 + .n_core_clks = ARRAY_SIZE(exynos5433_core_clk_names), 980 + .regulator_names = exynos5_regulator_names, 981 + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), 1451 982 }; 1452 983 1453 984 static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = { 1454 985 .phy_cfg = phy_cfg_exynos850, 1455 986 .phy_ops = &exynos850_usbdrd_phy_ops, 1456 987 .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL, 1457 - .has_common_clk_gate = true, 988 + .clk_names = exynos5_clk_names, 989 + .n_clks = ARRAY_SIZE(exynos5_clk_names), 990 + .core_clk_names = exynos5_core_clk_names, 991 + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), 992 + .regulator_names = exynos5_regulator_names, 993 + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), 994 + }; 995 + 996 + static const struct exynos5_usbdrd_phy_config phy_cfg_gs101[] = { 997 + { 998 + .id = EXYNOS5_DRDPHY_UTMI, 999 + .phy_isol = exynos5_usbdrd_phy_isol, 1000 + .phy_init = exynos850_usbdrd_utmi_init, 1001 + }, 1002 + { 1003 + .id = EXYNOS5_DRDPHY_PIPE3, 1004 + .phy_isol = exynos5_usbdrd_phy_isol, 1005 + .phy_init = exynos5_usbdrd_gs101_pipe3_init, 1006 + }, 1007 + }; 1008 + 1009 + static const struct exynos5_usbdrd_phy_tuning gs101_tunes_utmi_postinit[] = { 1010 + PHY_TUNING_ENTRY_PHY(EXYNOS850_DRD_HSPPARACON, 1011 + (HSPPARACON_TXVREF | HSPPARACON_TXRES | 1012 + HSPPARACON_TXPREEMPAMP | HSPPARACON_SQRX | 1013 + HSPPARACON_COMPDIS), 1014 + (FIELD_PREP_CONST(HSPPARACON_TXVREF, 6) | 1015 + FIELD_PREP_CONST(HSPPARACON_TXRES, 1) | 1016 + FIELD_PREP_CONST(HSPPARACON_TXPREEMPAMP, 3) | 1017 + FIELD_PREP_CONST(HSPPARACON_SQRX, 5) | 1018 + FIELD_PREP_CONST(HSPPARACON_COMPDIS, 7))), 1019 + PHY_TUNING_ENTRY_LAST 1020 + }; 1021 + 1022 + static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_preinit[] = { 1023 + /* preinit */ 1024 + /* CDR data mode exit GEN1 ON / GEN2 OFF */ 1025 + PHY_TUNING_ENTRY_PMA(0x0c8c, -1, 0xff), 1026 + PHY_TUNING_ENTRY_PMA(0x1c8c, -1, 0xff), 1027 + PHY_TUNING_ENTRY_PMA(0x0c9c, -1, 0x7d), 1028 + PHY_TUNING_ENTRY_PMA(0x1c9c, -1, 0x7d), 1029 + /* improve EDS distribution */ 1030 + PHY_TUNING_ENTRY_PMA(0x0e7c, -1, 0x06), 1031 + PHY_TUNING_ENTRY_PMA(0x09e0, -1, 0x00), 1032 + PHY_TUNING_ENTRY_PMA(0x09e4, -1, 0x36), 1033 + PHY_TUNING_ENTRY_PMA(0x1e7c, -1, 0x06), 1034 + PHY_TUNING_ENTRY_PMA(0x1e90, -1, 0x00), 1035 + PHY_TUNING_ENTRY_PMA(0x1e94, -1, 0x36), 1036 + /* improve LVCC */ 1037 + PHY_TUNING_ENTRY_PMA(0x08f0, -1, 0x30), 1038 + PHY_TUNING_ENTRY_PMA(0x18f0, -1, 0x30), 1039 + /* LFPS RX VIH shmoo hole */ 1040 + PHY_TUNING_ENTRY_PMA(0x0a08, -1, 0x0c), 1041 + PHY_TUNING_ENTRY_PMA(0x1a08, -1, 0x0c), 1042 + /* remove unrelated option for v4 phy */ 1043 + PHY_TUNING_ENTRY_PMA(0x0a0c, -1, 0x05), 1044 + PHY_TUNING_ENTRY_PMA(0x1a0c, -1, 0x05), 1045 + /* improve Gen2 LVCC */ 1046 + PHY_TUNING_ENTRY_PMA(0x00f8, -1, 0x1c), 1047 + PHY_TUNING_ENTRY_PMA(0x00fc, -1, 0x54), 1048 + /* Change Vth of RCV_DET because of TD 7.40 Polling Retry Test */ 1049 + PHY_TUNING_ENTRY_PMA(0x104c, -1, 0x07), 1050 + PHY_TUNING_ENTRY_PMA(0x204c, -1, 0x07), 1051 + /* reduce Ux Exit time, assuming 26MHz clock */ 1052 + /* Gen1 */ 1053 + PHY_TUNING_ENTRY_PMA(0x0ca8, -1, 0x00), 1054 + PHY_TUNING_ENTRY_PMA(0x0cac, -1, 0x04), 1055 + PHY_TUNING_ENTRY_PMA(0x1ca8, -1, 0x00), 1056 + PHY_TUNING_ENTRY_PMA(0x1cac, -1, 0x04), 1057 + /* Gen2 */ 1058 + PHY_TUNING_ENTRY_PMA(0x0cb8, -1, 0x00), 1059 + PHY_TUNING_ENTRY_PMA(0x0cbc, -1, 0x04), 1060 + PHY_TUNING_ENTRY_PMA(0x1cb8, -1, 0x00), 1061 + PHY_TUNING_ENTRY_PMA(0x1cbc, -1, 0x04), 1062 + /* RX impedance setting */ 1063 + PHY_TUNING_ENTRY_PMA(0x0bb0, 0x03, 0x01), 1064 + PHY_TUNING_ENTRY_PMA(0x0bb4, 0xf0, 0xa0), 1065 + PHY_TUNING_ENTRY_PMA(0x1bb0, 0x03, 0x01), 1066 + PHY_TUNING_ENTRY_PMA(0x1bb4, 0xf0, 0xa0), 1067 + 1068 + PHY_TUNING_ENTRY_LAST 1069 + }; 1070 + 1071 + static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_init[] = { 1072 + /* init */ 1073 + /* abnormal common pattern mask */ 1074 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_BACK_END_MODE_VEC, 1075 + BACK_END_MODE_VEC_DISABLE_DATA_MASK, 0), 1076 + /* de-serializer enabled when U2 */ 1077 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_2, PCS_OUT_VEC_B4_DYNAMIC, 1078 + PCS_OUT_VEC_B4_SEL_OUT), 1079 + /* TX Keeper Disable, Squelch on when U3 */ 1080 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_3, PCS_OUT_VEC_B7_DYNAMIC, 1081 + PCS_OUT_VEC_B7_SEL_OUT | PCS_OUT_VEC_B2_SEL_OUT), 1082 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS1_N1, -1, 1083 + (FIELD_PREP_CONST(NS_VEC_NS_REQ, 5) | 1084 + NS_VEC_ENABLE_TIMER | 1085 + FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3))), 1086 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS2_N0, -1, 1087 + (FIELD_PREP_CONST(NS_VEC_NS_REQ, 1) | 1088 + NS_VEC_ENABLE_TIMER | 1089 + FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3) | 1090 + FIELD_PREP_CONST(NS_VEC_COND_MASK, 2) | 1091 + FIELD_PREP_CONST(NS_VEC_EXP_COND, 2))), 1092 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_NS_VEC_PS3_N0, -1, 1093 + (FIELD_PREP_CONST(NS_VEC_NS_REQ, 1) | 1094 + NS_VEC_ENABLE_TIMER | 1095 + FIELD_PREP_CONST(NS_VEC_SEL_TIMEOUT, 3) | 1096 + FIELD_PREP_CONST(NS_VEC_COND_MASK, 7) | 1097 + FIELD_PREP_CONST(NS_VEC_EXP_COND, 7))), 1098 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_TIMEOUT_0, -1, 112), 1099 + /* Block Aligner Type B */ 1100 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_RX_CONTROL, 0, 1101 + RX_CONTROL_EN_BLOCK_ALIGNER_TYPE_B), 1102 + /* Block align at TS1/TS2 for Gen2 stability (Gen2 only) */ 1103 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_RX_CONTROL_DEBUG, 1104 + RX_CONTROL_DEBUG_NUM_COM_FOUND, 1105 + (RX_CONTROL_DEBUG_EN_TS_CHECK | 1106 + /* 1107 + * increase pcs ts1 adding packet-cnt 1 --> 4 1108 + * lnx_rx_valid_rstn_delay_rise_sp/ssp : 1109 + * 19.6us(0x200) -> 15.3us(0x4) 1110 + */ 1111 + FIELD_PREP_CONST(RX_CONTROL_DEBUG_NUM_COM_FOUND, 4))), 1112 + /* Gen1 Tx DRIVER pre-shoot, de-emphasis, level ctrl */ 1113 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_HS_TX_COEF_MAP_0, 1114 + (HS_TX_COEF_MAP_0_SSTX_DEEMP | HS_TX_COEF_MAP_0_SSTX_LEVEL | 1115 + HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT), 1116 + (FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_DEEMP, 8) | 1117 + FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_LEVEL, 0xb) | 1118 + FIELD_PREP_CONST(HS_TX_COEF_MAP_0_SSTX_PRE_SHOOT, 0))), 1119 + /* Gen2 Tx DRIVER level ctrl */ 1120 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_LOCAL_COEF, 1121 + LOCAL_COEF_PMA_CENTER_COEF, 1122 + FIELD_PREP_CONST(LOCAL_COEF_PMA_CENTER_COEF, 0xb)), 1123 + /* Gen2 U1 exit LFPS duration : 900ns ~ 1.2us */ 1124 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_TIMEOUT_3, -1, 4096), 1125 + /* set skp_remove_th 0x2 -> 0x7 for avoiding retry problem. */ 1126 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_EBUF_PARAM, 1127 + EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE, 1128 + FIELD_PREP_CONST(EBUF_PARAM_SKP_REMOVE_TH_EMPTY_MODE, 0x7)), 1129 + 1130 + PHY_TUNING_ENTRY_LAST 1131 + }; 1132 + 1133 + static const struct exynos5_usbdrd_phy_tuning gs101_tunes_pipe3_postlock[] = { 1134 + /* Squelch off when U3 */ 1135 + PHY_TUNING_ENTRY_PCS(EXYNOS9_PCS_OUT_VEC_3, PCS_OUT_VEC_B2_SEL_OUT, 0), 1136 + 1137 + PHY_TUNING_ENTRY_LAST 1138 + }; 1139 + 1140 + static const struct exynos5_usbdrd_phy_tuning *gs101_tunes[PTS_MAX] = { 1141 + [PTS_UTMI_POSTINIT] = gs101_tunes_utmi_postinit, 1142 + [PTS_PIPE3_PREINIT] = gs101_tunes_pipe3_preinit, 1143 + [PTS_PIPE3_INIT] = gs101_tunes_pipe3_init, 1144 + [PTS_PIPE3_POSTLOCK] = gs101_tunes_pipe3_postlock, 1145 + }; 1146 + 1147 + static const char * const gs101_clk_names[] = { 1148 + "phy", "ctrl_aclk", "ctrl_pclk", "scl_pclk", 1149 + }; 1150 + 1151 + static const char * const gs101_regulator_names[] = { 1152 + "pll", 1153 + "dvdd-usb20", "vddh-usb20", "vdd33-usb20", 1154 + "vdda-usbdp", "vddh-usbdp", 1155 + }; 1156 + 1157 + static const struct exynos5_usbdrd_phy_drvdata gs101_usbd31rd_phy = { 1158 + .phy_cfg = phy_cfg_gs101, 1159 + .phy_tunes = gs101_tunes, 1160 + .phy_ops = &gs101_usbdrd_phy_ops, 1161 + .pmu_offset_usbdrd0_phy = GS101_PHY_CTRL_USB20, 1162 + .pmu_offset_usbdrd0_phy_ss = GS101_PHY_CTRL_USBDP, 1163 + .clk_names = gs101_clk_names, 1164 + .n_clks = ARRAY_SIZE(gs101_clk_names), 1165 + .core_clk_names = exynos5_core_clk_names, 1166 + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), 1167 + .regulator_names = gs101_regulator_names, 1168 + .n_regulators = ARRAY_SIZE(gs101_regulator_names), 1458 1169 }; 1459 1170 1460 1171 static const struct of_device_id exynos5_usbdrd_phy_of_match[] = { 1461 1172 { 1173 + .compatible = "google,gs101-usb31drd-phy", 1174 + .data = &gs101_usbd31rd_phy 1175 + }, { 1462 1176 .compatible = "samsung,exynos5250-usbdrd-phy", 1463 1177 .data = &exynos5250_usbdrd_phy 1464 1178 }, { ··· 1690 1018 dev_set_drvdata(dev, phy_drd); 1691 1019 phy_drd->dev = dev; 1692 1020 1693 - phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0); 1694 - if (IS_ERR(phy_drd->reg_phy)) 1695 - return PTR_ERR(phy_drd->reg_phy); 1696 - 1697 1021 drv_data = of_device_get_match_data(dev); 1698 1022 if (!drv_data) 1699 1023 return -EINVAL; 1700 - 1701 1024 phy_drd->drv_data = drv_data; 1702 1025 1703 - ret = exynos5_usbdrd_phy_clk_handle(phy_drd); 1704 - if (ret) { 1705 - dev_err(dev, "Failed to initialize clocks\n"); 1706 - return ret; 1026 + if (of_property_present(dev->of_node, "reg-names")) { 1027 + void __iomem *reg; 1028 + 1029 + reg = devm_platform_ioremap_resource_byname(pdev, "phy"); 1030 + if (IS_ERR(reg)) 1031 + return PTR_ERR(reg); 1032 + phy_drd->reg_phy = reg; 1033 + 1034 + reg = devm_platform_ioremap_resource_byname(pdev, "pcs"); 1035 + if (IS_ERR(reg)) 1036 + return PTR_ERR(reg); 1037 + phy_drd->reg_pcs = reg; 1038 + 1039 + reg = devm_platform_ioremap_resource_byname(pdev, "pma"); 1040 + if (IS_ERR(reg)) 1041 + return PTR_ERR(reg); 1042 + phy_drd->reg_pma = reg; 1043 + } else { 1044 + /* DTB with just a single region */ 1045 + phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0); 1046 + if (IS_ERR(phy_drd->reg_phy)) 1047 + return PTR_ERR(phy_drd->reg_phy); 1707 1048 } 1049 + 1050 + ret = exynos5_usbdrd_phy_clk_handle(phy_drd); 1051 + if (ret) 1052 + return ret; 1708 1053 1709 1054 reg_pmu = syscon_regmap_lookup_by_phandle(dev->of_node, 1710 1055 "samsung,pmu-syscon"); ··· 1739 1050 if (channel < 0) 1740 1051 dev_dbg(dev, "Not a multi-controller usbdrd phy\n"); 1741 1052 1742 - switch (channel) { 1743 - case 1: 1744 - pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd1_phy; 1745 - break; 1746 - case 0: 1747 - default: 1748 - pmu_offset = phy_drd->drv_data->pmu_offset_usbdrd0_phy; 1749 - break; 1750 - } 1751 - 1752 - /* Get Vbus regulators */ 1753 - phy_drd->vbus = devm_regulator_get(dev, "vbus"); 1754 - if (IS_ERR(phy_drd->vbus)) { 1755 - ret = PTR_ERR(phy_drd->vbus); 1756 - if (ret == -EPROBE_DEFER) 1757 - return ret; 1758 - 1759 - dev_warn(dev, "Failed to get VBUS supply regulator\n"); 1760 - phy_drd->vbus = NULL; 1761 - } 1762 - 1763 - phy_drd->vbus_boost = devm_regulator_get(dev, "vbus-boost"); 1764 - if (IS_ERR(phy_drd->vbus_boost)) { 1765 - ret = PTR_ERR(phy_drd->vbus_boost); 1766 - if (ret == -EPROBE_DEFER) 1767 - return ret; 1768 - 1769 - dev_warn(dev, "Failed to get VBUS boost supply regulator\n"); 1770 - phy_drd->vbus_boost = NULL; 1771 - } 1053 + /* Get regulators */ 1054 + phy_drd->regulators = devm_kcalloc(dev, 1055 + drv_data->n_regulators, 1056 + sizeof(*phy_drd->regulators), 1057 + GFP_KERNEL); 1058 + if (!phy_drd->regulators) 1059 + return ENOMEM; 1060 + regulator_bulk_set_supply_names(phy_drd->regulators, 1061 + drv_data->regulator_names, 1062 + drv_data->n_regulators); 1063 + ret = devm_regulator_bulk_get(dev, drv_data->n_regulators, 1064 + phy_drd->regulators); 1065 + if (ret) 1066 + return dev_err_probe(dev, ret, "failed to get regulators\n"); 1772 1067 1773 1068 dev_vdbg(dev, "Creating usbdrd_phy phy\n"); 1774 1069 ··· 1767 1094 phy_drd->phys[i].phy = phy; 1768 1095 phy_drd->phys[i].index = i; 1769 1096 phy_drd->phys[i].reg_pmu = reg_pmu; 1097 + switch (channel) { 1098 + case 1: 1099 + pmu_offset = drv_data->pmu_offset_usbdrd1_phy; 1100 + break; 1101 + case 0: 1102 + default: 1103 + pmu_offset = drv_data->pmu_offset_usbdrd0_phy; 1104 + if (i == EXYNOS5_DRDPHY_PIPE3 && drv_data 1105 + ->pmu_offset_usbdrd0_phy_ss) 1106 + pmu_offset = drv_data->pmu_offset_usbdrd0_phy_ss; 1107 + break; 1108 + } 1770 1109 phy_drd->phys[i].pmu_offset = pmu_offset; 1771 1110 phy_drd->phys[i].phy_cfg = &drv_data->phy_cfg[i]; 1772 1111 phy_set_drvdata(phy, &phy_drd->phys[i]);
+1 -1
drivers/phy/samsung/phy-exynos5250-usb2.c
··· 121 121 #define EXYNOS_5420_USB_ISOL_HOST_OFFSET 0x70C 122 122 #define EXYNOS_5250_USB_ISOL_ENABLE BIT(0) 123 123 124 - /* Mode swtich register */ 124 + /* Mode switch register */ 125 125 #define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230 126 126 #define EXYNOS_5250_MODE_SWITCH_MASK 1 127 127 #define EXYNOS_5250_MODE_SWITCH_DEVICE 0
-5
drivers/phy/st/phy-miphy28lp.c
··· 228 228 int nphys; 229 229 }; 230 230 231 - struct miphy_initval { 232 - u16 reg; 233 - u16 val; 234 - }; 235 - 236 231 enum miphy_sata_gen { SATA_GEN1, SATA_GEN2, SATA_GEN3 }; 237 232 238 233 static char *PHY_TYPE_name[] = { "sata-up", "pcie-up", "", "usb3-up" };
+10
drivers/phy/starfive/Kconfig
··· 15 15 system. If M is selected, the module will be called 16 16 phy-jh7110-dphy-rx.ko. 17 17 18 + config PHY_STARFIVE_JH7110_DPHY_TX 19 + tristate "StarFive JH7110 D-PHY TX Support" 20 + depends on HAS_IOMEM 21 + select GENERIC_PHY 22 + select GENERIC_PHY_MIPI_DPHY 23 + help 24 + Choose this option if you have a StarFive D-PHY TX in your 25 + system. If M is selected, the module will be called 26 + phy-jh7110-dphy-tx.ko. 27 + 18 28 config PHY_STARFIVE_JH7110_PCIE 19 29 tristate "Starfive JH7110 PCIE 2.0/USB 3.0 PHY support" 20 30 depends on HAS_IOMEM
+1
drivers/phy/starfive/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_RX) += phy-jh7110-dphy-rx.o 3 + obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_TX) += phy-jh7110-dphy-tx.o 3 4 obj-$(CONFIG_PHY_STARFIVE_JH7110_PCIE) += phy-jh7110-pcie.o 4 5 obj-$(CONFIG_PHY_STARFIVE_JH7110_USB) += phy-jh7110-usb.o
-5
drivers/phy/starfive/phy-jh7110-dphy-rx.c
··· 46 46 47 47 #define STF_MAP_LANES_NUM 6 48 48 49 - struct regval { 50 - u32 addr; 51 - u32 val; 52 - }; 53 - 54 49 struct stf_dphy_info { 55 50 /** 56 51 * @maps:
+461
drivers/phy/starfive/phy-jh7110-dphy-tx.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * DPHY TX driver for the StarFive JH7110 SoC 4 + * 5 + * Copyright (C) 2023 StarFive Technology Co., Ltd. 6 + * Author: Keith Zhao <keith.zhao@starfivetech.com> 7 + * Author: Shengyang Chen <shengyang.chen@starfivetech.com> 8 + */ 9 + 10 + #include <linux/bitfield.h> 11 + #include <linux/clk.h> 12 + #include <linux/io.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/mfd/syscon.h> 15 + #include <linux/module.h> 16 + #include <linux/of.h> 17 + #include <linux/of_device.h> 18 + #include <linux/phy/phy.h> 19 + #include <linux/phy/phy-mipi-dphy.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/pm_runtime.h> 22 + #include <linux/reset.h> 23 + 24 + #define STF_DPHY_APBIFSAIF_SYSCFG(x) (x) 25 + 26 + #define STF_DPHY_AON_POWER_READY_N_ACTIVE 0 27 + #define STF_DPHY_AON_POWER_READY_N BIT(0) 28 + #define STF_DPHY_CFG_L0_SWAP_SEL GENMASK(14, 12) 29 + #define STF_DPHY_CFG_L1_SWAP_SEL GENMASK(17, 15) 30 + #define STF_DPHY_CFG_L2_SWAP_SEL GENMASK(20, 18) 31 + #define STF_DPHY_CFG_L3_SWAP_SEL GENMASK(23, 21) 32 + #define STF_DPHY_CFG_L4_SWAP_SEL GENMASK(26, 24) 33 + #define STF_DPHY_RGS_CDTX_PLL_UNLOCK BIT(18) 34 + #define STF_DPHY_RG_CDTX_L0N_HSTX_RES GENMASK(23, 19) 35 + #define STF_DPHY_RG_CDTX_L0P_HSTX_RES GENMASK(28, 24) 36 + 37 + #define STF_DPHY_RG_CDTX_L1P_HSTX_RES GENMASK(9, 5) 38 + #define STF_DPHY_RG_CDTX_L2N_HSTX_RES GENMASK(14, 10) 39 + #define STF_DPHY_RG_CDTX_L2P_HSTX_RES GENMASK(19, 15) 40 + #define STF_DPHY_RG_CDTX_L3N_HSTX_RES GENMASK(24, 20) 41 + #define STF_DPHY_RG_CDTX_L3P_HSTX_RES GENMASK(29, 25) 42 + 43 + #define STF_DPHY_RG_CDTX_L4N_HSTX_RES GENMASK(4, 0) 44 + #define STF_DPHY_RG_CDTX_L4P_HSTX_RES GENMASK(9, 5) 45 + #define STF_DPHY_RG_CDTX_PLL_FBK_FRA GENMASK(23, 0) 46 + 47 + #define STF_DPHY_RG_CDTX_PLL_FBK_INT GENMASK(8, 0) 48 + #define STF_DPHY_RG_CDTX_PLL_FM_EN BIT(9) 49 + #define STF_DPHY_RG_CDTX_PLL_LDO_STB_X2_EN BIT(10) 50 + #define STF_DPHY_RG_CDTX_PLL_PRE_DIV GENMASK(12, 11) 51 + 52 + #define STF_DPHY_RG_CDTX_PLL_SSC_EN BIT(18) 53 + 54 + #define STF_DPHY_RG_CLANE_HS_CLK_POST_TIME GENMASK(7, 0) 55 + #define STF_DPHY_RG_CLANE_HS_CLK_PRE_TIME GENMASK(15, 8) 56 + #define STF_DPHY_RG_CLANE_HS_PRE_TIME GENMASK(23, 16) 57 + #define STF_DPHY_RG_CLANE_HS_TRAIL_TIME GENMASK(31, 24) 58 + 59 + #define STF_DPHY_RG_CLANE_HS_ZERO_TIME GENMASK(7, 0) 60 + #define STF_DPHY_RG_DLANE_HS_PRE_TIME GENMASK(15, 8) 61 + #define STF_DPHY_RG_DLANE_HS_TRAIL_TIME GENMASK(23, 16) 62 + #define STF_DPHY_RG_DLANE_HS_ZERO_TIME GENMASK(31, 24) 63 + 64 + #define STF_DPHY_RG_EXTD_CYCLE_SEL GENMASK(2, 0) 65 + #define STF_DPHY_SCFG_C_HS_PRE_ZERO_TIME GENMASK(31, 0) 66 + 67 + #define STF_DPHY_SCFG_DSI_TXREADY_ESC_SEL GENMASK(2, 1) 68 + #define STF_DPHY_SCFG_PPI_C_READY_SEL GENMASK(4, 3) 69 + 70 + #define STF_DPHY_REFCLK_IN_SEL GENMASK(28, 26) 71 + #define STF_DPHY_RESETB BIT(29) 72 + 73 + #define STF_DPHY_REFCLK_12M 1 74 + #define STF_DPHY_BITRATE_ALIGN 10000000 75 + 76 + #define STF_MAP_LANES_NUM 5 77 + 78 + #define STF_DPHY_LSHIFT_16(x) (FIELD_PREP(GENMASK(23, 16), (x))) 79 + #define STF_DPHY_LSHIFT_8(x) (FIELD_PREP(GENMASK(15, 8), (x))) 80 + 81 + #define STF_DPHY_HW_DELAY_US 200 82 + #define STF_DPHY_HW_TIMEOUT_US 5000 83 + 84 + struct stf_dphy_config { 85 + unsigned long bitrate; 86 + u32 pll_fbk_int; 87 + u32 pll_fbk_fra_val; 88 + u32 extd_cycle_sel; 89 + u32 dlane_hs_pre_time; 90 + u32 dlane_hs_zero_time; 91 + u32 dlane_hs_trail_time; 92 + u32 clane_hs_pre_time; 93 + u32 clane_hs_zero_time; 94 + u32 clane_hs_trail_time; 95 + u32 clane_hs_clk_pre_time; 96 + u32 clane_hs_clk_post_time; 97 + }; 98 + 99 + static const struct stf_dphy_config reg_configs[] = { 100 + {160000000, 0x6a, 0xaa, 0x3, 0xa, 0x17, 0x11, 0x5, 0x2b, 0xd, 0x7, 0x3d}, 101 + {170000000, 0x71, 0x55, 0x3, 0xb, 0x18, 0x11, 0x5, 0x2e, 0xd, 0x7, 0x3d}, 102 + {180000000, 0x78, 0x0, 0x3, 0xb, 0x19, 0x12, 0x6, 0x30, 0xe, 0x7, 0x3e}, 103 + {190000000, 0x7e, 0xaa, 0x3, 0xc, 0x1a, 0x12, 0x6, 0x33, 0xe, 0x7, 0x3e}, 104 + {200000000, 0x85, 0x55, 0x3, 0xc, 0x1b, 0x13, 0x7, 0x35, 0xf, 0x7, 0x3f}, 105 + {320000000, 0x6a, 0xaa, 0x2, 0x8, 0x14, 0xf, 0x5, 0x2b, 0xd, 0x3, 0x23}, 106 + {330000000, 0x6e, 0x0, 0x2, 0x8, 0x15, 0xf, 0x5, 0x2d, 0xd, 0x3, 0x23}, 107 + {340000000, 0x71, 0x55, 0x2, 0x9, 0x15, 0xf, 0x5, 0x2e, 0xd, 0x3, 0x23}, 108 + {350000000, 0x74, 0xaa, 0x2, 0x9, 0x15, 0x10, 0x6, 0x2f, 0xe, 0x3, 0x24}, 109 + {360000000, 0x78, 0x0, 0x2, 0x9, 0x16, 0x10, 0x6, 0x30, 0xe, 0x3, 0x24}, 110 + {370000000, 0x7b, 0x55, 0x2, 0x9, 0x17, 0x10, 0x6, 0x32, 0xe, 0x3, 0x24}, 111 + {380000000, 0x7e, 0xaa, 0x2, 0xa, 0x17, 0x10, 0x6, 0x33, 0xe, 0x3, 0x24}, 112 + {390000000, 0x82, 0x0, 0x2, 0xa, 0x17, 0x11, 0x6, 0x35, 0xf, 0x3, 0x25}, 113 + {400000000, 0x85, 0x55, 0x2, 0xa, 0x18, 0x11, 0x7, 0x35, 0xf, 0x3, 0x25}, 114 + {410000000, 0x88, 0xaa, 0x2, 0xa, 0x19, 0x11, 0x7, 0x37, 0xf, 0x3, 0x25}, 115 + {420000000, 0x8c, 0x0, 0x2, 0xa, 0x19, 0x12, 0x7, 0x38, 0x10, 0x3, 0x26}, 116 + {430000000, 0x8f, 0x55, 0x2, 0xb, 0x19, 0x12, 0x7, 0x39, 0x10, 0x3, 0x26}, 117 + {440000000, 0x92, 0xaa, 0x2, 0xb, 0x1a, 0x12, 0x7, 0x3b, 0x10, 0x3, 0x26}, 118 + {450000000, 0x96, 0x0, 0x2, 0xb, 0x1b, 0x12, 0x8, 0x3c, 0x10, 0x3, 0x26}, 119 + {460000000, 0x99, 0x55, 0x2, 0xb, 0x1b, 0x13, 0x8, 0x3d, 0x11, 0x3, 0x27}, 120 + {470000000, 0x9c, 0xaa, 0x2, 0xc, 0x1b, 0x13, 0x8, 0x3e, 0x11, 0x3, 0x27}, 121 + {480000000, 0xa0, 0x27, 0x2, 0xc, 0x1c, 0x13, 0x8, 0x40, 0x11, 0x3, 0x27}, 122 + {490000000, 0xa3, 0x55, 0x2, 0xc, 0x1d, 0x14, 0x8, 0x42, 0x12, 0x3, 0x28}, 123 + {500000000, 0xa6, 0xaa, 0x2, 0xc, 0x1d, 0x14, 0x9, 0x42, 0x12, 0x3, 0x28}, 124 + {510000000, 0xaa, 0x0, 0x2, 0xc, 0x1e, 0x14, 0x9, 0x44, 0x12, 0x3, 0x28}, 125 + {520000000, 0xad, 0x55, 0x2, 0xd, 0x1e, 0x15, 0x9, 0x45, 0x13, 0x3, 0x29}, 126 + {530000000, 0xb0, 0xaa, 0x2, 0xd, 0x1e, 0x15, 0x9, 0x47, 0x13, 0x3, 0x29}, 127 + {540000000, 0xb4, 0x0, 0x2, 0xd, 0x1f, 0x15, 0x9, 0x48, 0x13, 0x3, 0x29}, 128 + {550000000, 0xb7, 0x55, 0x2, 0xd, 0x20, 0x16, 0x9, 0x4a, 0x14, 0x3, 0x2a}, 129 + {560000000, 0xba, 0xaa, 0x2, 0xe, 0x20, 0x16, 0xa, 0x4a, 0x14, 0x3, 0x2a}, 130 + {570000000, 0xbe, 0x0, 0x2, 0xe, 0x20, 0x16, 0xa, 0x4c, 0x14, 0x3, 0x2a}, 131 + {580000000, 0xc1, 0x55, 0x2, 0xe, 0x21, 0x16, 0xa, 0x4d, 0x14, 0x3, 0x2a}, 132 + {590000000, 0xc4, 0xaa, 0x2, 0xe, 0x22, 0x17, 0xa, 0x4f, 0x15, 0x3, 0x2b}, 133 + {600000000, 0xc8, 0x0, 0x2, 0xe, 0x23, 0x17, 0xa, 0x50, 0x15, 0x3, 0x2b}, 134 + {610000000, 0xcb, 0x55, 0x2, 0xf, 0x22, 0x17, 0xb, 0x50, 0x15, 0x3, 0x2b}, 135 + {620000000, 0xce, 0xaa, 0x2, 0xf, 0x23, 0x18, 0xb, 0x52, 0x16, 0x3, 0x2c}, 136 + {630000000, 0x69, 0x0, 0x1, 0x7, 0x12, 0xd, 0x5, 0x2a, 0xc, 0x1, 0x15}, 137 + {640000000, 0x6a, 0xaa, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2b, 0xd, 0x1, 0x16}, 138 + {650000000, 0x6c, 0x55, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2c, 0xd, 0x1, 0x16}, 139 + {660000000, 0x6e, 0x0, 0x1, 0x7, 0x13, 0xe, 0x5, 0x2d, 0xd, 0x1, 0x16}, 140 + {670000000, 0x6f, 0xaa, 0x1, 0x8, 0x13, 0xe, 0x5, 0x2d, 0xd, 0x1, 0x16}, 141 + {680000000, 0x71, 0x55, 0x1, 0x8, 0x13, 0xe, 0x5, 0x2e, 0xd, 0x1, 0x16}, 142 + {690000000, 0x73, 0x0, 0x1, 0x8, 0x14, 0xe, 0x6, 0x2e, 0xd, 0x1, 0x16}, 143 + {700000000, 0x74, 0xaa, 0x1, 0x8, 0x14, 0xf, 0x6, 0x2f, 0xe, 0x1, 0x16}, 144 + {710000000, 0x76, 0x55, 0x1, 0x8, 0x14, 0xf, 0x6, 0x2f, 0xe, 0x1, 0x17}, 145 + {720000000, 0x78, 0x0, 0x1, 0x8, 0x15, 0xf, 0x6, 0x30, 0xe, 0x1, 0x17}, 146 + {730000000, 0x79, 0xaa, 0x1, 0x8, 0x15, 0xf, 0x6, 0x31, 0xe, 0x1, 0x17}, 147 + {740000000, 0x7b, 0x55, 0x1, 0x8, 0x15, 0xf, 0x6, 0x32, 0xe, 0x1, 0x17}, 148 + {750000000, 0x7d, 0x0, 0x1, 0x8, 0x16, 0xf, 0x6, 0x32, 0xe, 0x1, 0x17}, 149 + {760000000, 0x7e, 0xaa, 0x1, 0x9, 0x15, 0xf, 0x6, 0x33, 0xe, 0x1, 0x17}, 150 + {770000000, 0x80, 0x55, 0x1, 0x9, 0x15, 0x10, 0x6, 0x34, 0xf, 0x1, 0x18}, 151 + {780000000, 0x82, 0x0, 0x1, 0x9, 0x16, 0x10, 0x6, 0x35, 0xf, 0x1, 0x18,}, 152 + {790000000, 0x83, 0xaa, 0x1, 0x9, 0x16, 0x10, 0x7, 0x34, 0xf, 0x1, 0x18}, 153 + {800000000, 0x85, 0x55, 0x1, 0x9, 0x17, 0x10, 0x7, 0x35, 0xf, 0x1, 0x18}, 154 + {810000000, 0x87, 0x0, 0x1, 0x9, 0x17, 0x10, 0x7, 0x36, 0xf, 0x1, 0x18}, 155 + {820000000, 0x88, 0xaa, 0x1, 0x9, 0x17, 0x10, 0x7, 0x37, 0xf, 0x1, 0x18}, 156 + {830000000, 0x8a, 0x55, 0x1, 0x9, 0x18, 0x10, 0x7, 0x37, 0xf, 0x1, 0x18}, 157 + {840000000, 0x8c, 0x0, 0x1, 0x9, 0x18, 0x11, 0x7, 0x38, 0x10, 0x1, 0x19}, 158 + {850000000, 0x8d, 0xaa, 0x1, 0xa, 0x17, 0x11, 0x7, 0x39, 0x10, 0x1, 0x19}, 159 + {860000000, 0x8f, 0x55, 0x1, 0xa, 0x18, 0x11, 0x7, 0x39, 0x10, 0x1, 0x19}, 160 + {870000000, 0x91, 0x0, 0x1, 0xa, 0x18, 0x11, 0x7, 0x3a, 0x10, 0x1, 0x19}, 161 + {880000000, 0x92, 0xaa, 0x1, 0xa, 0x18, 0x11, 0x7, 0x3b, 0x10, 0x1, 0x19}, 162 + {890000000, 0x94, 0x55, 0x1, 0xa, 0x19, 0x11, 0x7, 0x3c, 0x10, 0x1, 0x19}, 163 + {900000000, 0x96, 0x0, 0x1, 0xa, 0x19, 0x12, 0x8, 0x3c, 0x10, 0x1, 0x19}, 164 + {910000000, 0x97, 0xaa, 0x1, 0xa, 0x19, 0x12, 0x8, 0x3c, 0x11, 0x1, 0x1a}, 165 + {920000000, 0x99, 0x55, 0x1, 0xa, 0x1a, 0x12, 0x8, 0x3d, 0x11, 0x1, 0x1a}, 166 + {930000000, 0x9b, 0x0, 0x1, 0xa, 0x1a, 0x12, 0x8, 0x3e, 0x11, 0x1, 0x1a}, 167 + {940000000, 0x9c, 0xaa, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x3e, 0x11, 0x1, 0x1a}, 168 + {950000000, 0x9e, 0x55, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x3f, 0x11, 0x1, 0x1a}, 169 + {960000000, 0xa0, 0x0, 0x1, 0xb, 0x1a, 0x12, 0x8, 0x40, 0x11, 0x1, 0x1a}, 170 + {970000000, 0xa1, 0xaa, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x41, 0x12, 0x1, 0x1b}, 171 + {980000000, 0xa3, 0x55, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x42, 0x12, 0x1, 0x1b}, 172 + {990000000, 0xa5, 0x0, 0x1, 0xb, 0x1b, 0x13, 0x8, 0x42, 0x12, 0x1, 0x1b}, 173 + {1000000000, 0xa6, 0xaa, 0x1, 0xb, 0x1c, 0x13, 0x9, 0x42, 0x12, 0x1, 0x1b}, 174 + }; 175 + 176 + struct stf_dphy_info { 177 + /** 178 + * @maps: 179 + * 180 + * Physical lanes and logic lanes mapping table. 181 + * 182 + * The default order is: 183 + * [data lane 0, data lane 1, data lane 2, date lane 3, clk lane] 184 + */ 185 + u8 maps[STF_MAP_LANES_NUM]; 186 + }; 187 + 188 + struct stf_dphy { 189 + struct device *dev; 190 + void __iomem *topsys; 191 + struct clk *txesc_clk; 192 + struct reset_control *sys_rst; 193 + 194 + struct phy_configure_opts_mipi_dphy config; 195 + 196 + struct phy *phy; 197 + const struct stf_dphy_info *info; 198 + }; 199 + 200 + static u32 stf_dphy_get_config_index(u32 bitrate) 201 + { 202 + u32 i; 203 + 204 + for (i = 0; i < ARRAY_SIZE(reg_configs); i++) { 205 + if (reg_configs[i].bitrate == bitrate) 206 + return i; 207 + } 208 + 209 + return 0; 210 + } 211 + 212 + static void stf_dphy_hw_reset(struct stf_dphy *dphy, int assert) 213 + { 214 + int rc; 215 + u32 status = 0; 216 + 217 + writel(FIELD_PREP(STF_DPHY_RESETB, assert), 218 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100)); 219 + 220 + if (assert) { 221 + rc = readl_poll_timeout_atomic(dphy->topsys + 222 + STF_DPHY_APBIFSAIF_SYSCFG(8), 223 + status, 224 + !(FIELD_GET(STF_DPHY_RGS_CDTX_PLL_UNLOCK, status)), 225 + STF_DPHY_HW_DELAY_US, STF_DPHY_HW_TIMEOUT_US); 226 + if (rc) 227 + dev_err(dphy->dev, "MIPI dphy-tx # PLL Locked\n"); 228 + } 229 + } 230 + 231 + static int stf_dphy_configure(struct phy *phy, union phy_configure_opts *opts) 232 + { 233 + struct stf_dphy *dphy = phy_get_drvdata(phy); 234 + const struct stf_dphy_info *info = dphy->info; 235 + const struct stf_dphy_config *p = reg_configs; 236 + unsigned long alignment = STF_DPHY_BITRATE_ALIGN; 237 + u32 bitrate = opts->mipi_dphy.hs_clk_rate; 238 + u32 tmp; 239 + u32 i; 240 + 241 + if (bitrate % alignment) 242 + bitrate += alignment - (bitrate % alignment); 243 + 244 + i = stf_dphy_get_config_index(bitrate); 245 + 246 + tmp = readl(dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100)); 247 + tmp &= ~STF_DPHY_REFCLK_IN_SEL; 248 + tmp |= FIELD_PREP(STF_DPHY_REFCLK_IN_SEL, STF_DPHY_REFCLK_12M); 249 + writel(tmp, dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(100)); 250 + 251 + writel(FIELD_PREP(STF_DPHY_RG_CDTX_L0N_HSTX_RES, 0x10) | 252 + FIELD_PREP(STF_DPHY_RG_CDTX_L0P_HSTX_RES, 0x10), 253 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(8)); 254 + 255 + writel(FIELD_PREP(STF_DPHY_RG_CDTX_L0N_HSTX_RES, 0x10) | 256 + FIELD_PREP(STF_DPHY_RG_CDTX_L2N_HSTX_RES, 0x10) | 257 + FIELD_PREP(STF_DPHY_RG_CDTX_L3N_HSTX_RES, 0x10) | 258 + FIELD_PREP(STF_DPHY_RG_CDTX_L1P_HSTX_RES, 0x10) | 259 + FIELD_PREP(STF_DPHY_RG_CDTX_L2P_HSTX_RES, 0x10) | 260 + FIELD_PREP(STF_DPHY_RG_CDTX_L3P_HSTX_RES, 0x10), 261 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(12)); 262 + 263 + writel(FIELD_PREP(STF_DPHY_RG_CDTX_L4N_HSTX_RES, 0x10) | 264 + FIELD_PREP(STF_DPHY_RG_CDTX_L4P_HSTX_RES, 0x10), 265 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(16)); 266 + 267 + /* Lane setting */ 268 + writel(FIELD_PREP(STF_DPHY_AON_POWER_READY_N, 269 + STF_DPHY_AON_POWER_READY_N_ACTIVE) | 270 + FIELD_PREP(STF_DPHY_CFG_L0_SWAP_SEL, info->maps[0]) | 271 + FIELD_PREP(STF_DPHY_CFG_L1_SWAP_SEL, info->maps[1]) | 272 + FIELD_PREP(STF_DPHY_CFG_L2_SWAP_SEL, info->maps[2]) | 273 + FIELD_PREP(STF_DPHY_CFG_L3_SWAP_SEL, info->maps[3]) | 274 + FIELD_PREP(STF_DPHY_CFG_L4_SWAP_SEL, info->maps[4]), 275 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(0)); 276 + 277 + /* PLL setting */ 278 + writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_SSC_EN, 0x0), 279 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(28)); 280 + 281 + writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_LDO_STB_X2_EN, 0x1) | 282 + FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FM_EN, 0x1) | 283 + FIELD_PREP(STF_DPHY_RG_CDTX_PLL_PRE_DIV, 0x0) | 284 + FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FBK_INT, p[i].pll_fbk_int), 285 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(24)); 286 + 287 + writel(FIELD_PREP(STF_DPHY_RG_CDTX_PLL_FBK_FRA, 288 + STF_DPHY_LSHIFT_16(p[i].pll_fbk_fra_val) | 289 + STF_DPHY_LSHIFT_8(p[i].pll_fbk_fra_val) | 290 + p[i].pll_fbk_fra_val), 291 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(20)); 292 + 293 + writel(FIELD_PREP(STF_DPHY_RG_EXTD_CYCLE_SEL, p[i].extd_cycle_sel), 294 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(40)); 295 + 296 + writel(FIELD_PREP(STF_DPHY_RG_DLANE_HS_PRE_TIME, p[i].dlane_hs_pre_time) | 297 + FIELD_PREP(STF_DPHY_RG_DLANE_HS_ZERO_TIME, p[i].dlane_hs_zero_time) | 298 + FIELD_PREP(STF_DPHY_RG_DLANE_HS_TRAIL_TIME, p[i].dlane_hs_trail_time) | 299 + FIELD_PREP(STF_DPHY_RG_CLANE_HS_ZERO_TIME, p[i].clane_hs_zero_time), 300 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(36)); 301 + 302 + writel(FIELD_PREP(STF_DPHY_RG_CLANE_HS_PRE_TIME, p[i].clane_hs_pre_time) | 303 + FIELD_PREP(STF_DPHY_RG_CLANE_HS_TRAIL_TIME, p[i].clane_hs_trail_time) | 304 + FIELD_PREP(STF_DPHY_RG_CLANE_HS_CLK_PRE_TIME, p[i].clane_hs_clk_pre_time) | 305 + FIELD_PREP(STF_DPHY_RG_CLANE_HS_CLK_POST_TIME, p[i].clane_hs_clk_post_time), 306 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(32)); 307 + 308 + return 0; 309 + } 310 + 311 + static int stf_dphy_init(struct phy *phy) 312 + { 313 + struct stf_dphy *dphy = phy_get_drvdata(phy); 314 + int ret; 315 + 316 + stf_dphy_hw_reset(dphy, 1); 317 + 318 + writel(FIELD_PREP(STF_DPHY_SCFG_PPI_C_READY_SEL, 0) | 319 + FIELD_PREP(STF_DPHY_SCFG_DSI_TXREADY_ESC_SEL, 0), 320 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(48)); 321 + 322 + writel(FIELD_PREP(STF_DPHY_SCFG_C_HS_PRE_ZERO_TIME, 0x30), 323 + dphy->topsys + STF_DPHY_APBIFSAIF_SYSCFG(44)); 324 + 325 + ret = clk_prepare_enable(dphy->txesc_clk); 326 + if (ret) { 327 + dev_err(dphy->dev, "Failed to prepare/enable txesc_clk\n"); 328 + return ret; 329 + } 330 + 331 + ret = reset_control_deassert(dphy->sys_rst); 332 + if (ret) { 333 + dev_err(dphy->dev, "Failed to deassert sys_rst\n"); 334 + return ret; 335 + } 336 + 337 + return 0; 338 + } 339 + 340 + static int stf_dphy_exit(struct phy *phy) 341 + { 342 + struct stf_dphy *dphy = phy_get_drvdata(phy); 343 + int ret; 344 + 345 + ret = reset_control_assert(dphy->sys_rst); 346 + if (ret) { 347 + dev_err(dphy->dev, "Failed to assert sys_rst\n"); 348 + return ret; 349 + } 350 + 351 + clk_disable_unprepare(dphy->txesc_clk); 352 + 353 + stf_dphy_hw_reset(dphy, 0); 354 + 355 + return 0; 356 + } 357 + 358 + static int stf_dphy_power_on(struct phy *phy) 359 + { 360 + struct stf_dphy *dphy = phy_get_drvdata(phy); 361 + 362 + return pm_runtime_resume_and_get(dphy->dev); 363 + } 364 + 365 + static int stf_dphy_validate(struct phy *phy, enum phy_mode mode, int submode, 366 + union phy_configure_opts *opts) 367 + { 368 + if (mode != PHY_MODE_MIPI_DPHY) 369 + return -EINVAL; 370 + 371 + return 0; 372 + } 373 + 374 + static int stf_dphy_power_off(struct phy *phy) 375 + { 376 + struct stf_dphy *dphy = phy_get_drvdata(phy); 377 + 378 + return pm_runtime_put_sync(dphy->dev); 379 + } 380 + 381 + static const struct phy_ops stf_dphy_ops = { 382 + .power_on = stf_dphy_power_on, 383 + .power_off = stf_dphy_power_off, 384 + .init = stf_dphy_init, 385 + .exit = stf_dphy_exit, 386 + .configure = stf_dphy_configure, 387 + .validate = stf_dphy_validate, 388 + .owner = THIS_MODULE, 389 + }; 390 + 391 + static int stf_dphy_probe(struct platform_device *pdev) 392 + { 393 + struct phy_provider *phy_provider; 394 + struct stf_dphy *dphy; 395 + 396 + dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); 397 + if (!dphy) 398 + return -ENOMEM; 399 + 400 + dphy->info = of_device_get_match_data(&pdev->dev); 401 + 402 + dphy->dev = &pdev->dev; 403 + dev_set_drvdata(&pdev->dev, dphy); 404 + 405 + dphy->topsys = devm_platform_ioremap_resource(pdev, 0); 406 + if (IS_ERR(dphy->topsys)) 407 + return PTR_ERR(dphy->topsys); 408 + 409 + pm_runtime_enable(&pdev->dev); 410 + 411 + dphy->txesc_clk = devm_clk_get(&pdev->dev, "txesc"); 412 + if (IS_ERR(dphy->txesc_clk)) 413 + return dev_err_probe(&pdev->dev, PTR_ERR(dphy->txesc_clk), 414 + "Failed to get txesc clock\n"); 415 + 416 + dphy->sys_rst = devm_reset_control_get_exclusive(&pdev->dev, "sys"); 417 + if (IS_ERR(dphy->sys_rst)) 418 + return dev_err_probe(&pdev->dev, PTR_ERR(dphy->sys_rst), 419 + "Failed to get sys reset\n"); 420 + 421 + dphy->phy = devm_phy_create(&pdev->dev, NULL, &stf_dphy_ops); 422 + if (IS_ERR(dphy->phy)) 423 + return dev_err_probe(&pdev->dev, PTR_ERR(dphy->phy), 424 + "Failed to create phy\n"); 425 + 426 + phy_set_drvdata(dphy->phy, dphy); 427 + 428 + phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); 429 + if (IS_ERR(phy_provider)) 430 + return dev_err_probe(&pdev->dev, PTR_ERR(phy_provider), 431 + "Failed to register phy\n"); 432 + 433 + return 0; 434 + } 435 + 436 + static const struct stf_dphy_info starfive_dphy_info = { 437 + .maps = {0, 1, 2, 3, 4}, 438 + }; 439 + 440 + static const struct of_device_id stf_dphy_dt_ids[] = { 441 + { 442 + .compatible = "starfive,jh7110-dphy-tx", 443 + .data = &starfive_dphy_info, 444 + }, 445 + { /* sentinel */ }, 446 + }; 447 + MODULE_DEVICE_TABLE(of, stf_dphy_dt_ids); 448 + 449 + static struct platform_driver stf_dphy_driver = { 450 + .driver = { 451 + .name = "starfive-dphy-tx", 452 + .of_match_table = stf_dphy_dt_ids, 453 + }, 454 + .probe = stf_dphy_probe, 455 + }; 456 + module_platform_driver(stf_dphy_driver); 457 + 458 + MODULE_AUTHOR("Keith Zhao <keith.zhao@starfivetech.com>"); 459 + MODULE_AUTHOR("Shengyang Chen <shengyang.chen@starfivetech.com>"); 460 + MODULE_DESCRIPTION("StarFive JH7110 DPHY TX driver"); 461 + MODULE_LICENSE("GPL");
-1
drivers/phy/ti/phy-am654-serdes.c
··· 30 30 #define LANE_R058 0x258 31 31 #define LANE_R06c 0x26c 32 32 #define LANE_R070 0x270 33 - #define LANE_R070 0x270 34 33 #define LANE_R19C 0x39c 35 34 36 35 #define COMLANE_R004 0xa04
+83 -50
drivers/phy/ti/phy-j721e-wiz.c
··· 1076 1076 return ret; 1077 1077 } 1078 1078 1079 - static int wiz_clock_init(struct wiz *wiz, struct device_node *node) 1079 + static void wiz_clock_init(struct wiz *wiz) 1080 1080 { 1081 - const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel; 1082 - struct device *dev = wiz->dev; 1083 - struct device_node *clk_node; 1084 - const char *node_name; 1085 1081 unsigned long rate; 1086 - struct clk *clk; 1087 - int ret; 1088 - int i; 1089 1082 1090 - clk = devm_clk_get(dev, "core_ref_clk"); 1091 - if (IS_ERR(clk)) { 1092 - dev_err(dev, "core_ref_clk clock not found\n"); 1093 - ret = PTR_ERR(clk); 1094 - return ret; 1095 - } 1096 - wiz->input_clks[WIZ_CORE_REFCLK] = clk; 1097 - 1098 - rate = clk_get_rate(clk); 1099 - if (rate >= 100000000) 1083 + rate = clk_get_rate(wiz->input_clks[WIZ_CORE_REFCLK]); 1084 + if (rate >= REF_CLK_100MHZ) 1100 1085 regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x1); 1101 1086 else 1102 1087 regmap_field_write(wiz->pma_cmn_refclk_int_mode, 0x3); ··· 1105 1120 break; 1106 1121 } 1107 1122 1108 - if (wiz->data->pma_cmn_refclk1_int_mode) { 1109 - clk = devm_clk_get(dev, "core_ref1_clk"); 1110 - if (IS_ERR(clk)) { 1111 - dev_err(dev, "core_ref1_clk clock not found\n"); 1112 - ret = PTR_ERR(clk); 1113 - return ret; 1114 - } 1115 - wiz->input_clks[WIZ_CORE_REFCLK1] = clk; 1116 - 1117 - rate = clk_get_rate(clk); 1118 - if (rate >= 100000000) 1123 + if (wiz->input_clks[WIZ_CORE_REFCLK1]) { 1124 + rate = clk_get_rate(wiz->input_clks[WIZ_CORE_REFCLK1]); 1125 + if (rate >= REF_CLK_100MHZ) 1119 1126 regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x1); 1120 1127 else 1121 1128 regmap_field_write(wiz->pma_cmn_refclk1_int_mode, 0x3); 1122 1129 } 1123 1130 1124 - clk = devm_clk_get(dev, "ext_ref_clk"); 1125 - if (IS_ERR(clk)) { 1126 - dev_err(dev, "ext_ref_clk clock not found\n"); 1127 - ret = PTR_ERR(clk); 1128 - return ret; 1129 - } 1130 - wiz->input_clks[WIZ_EXT_REFCLK] = clk; 1131 - 1132 - rate = clk_get_rate(clk); 1133 - if (rate >= 100000000) 1131 + rate = clk_get_rate(wiz->input_clks[WIZ_EXT_REFCLK]); 1132 + if (rate >= REF_CLK_100MHZ) 1134 1133 regmap_field_write(wiz->pma_cmn_refclk_mode, 0x0); 1135 1134 else 1136 1135 regmap_field_write(wiz->pma_cmn_refclk_mode, 0x2); 1136 + } 1137 + 1138 + static int wiz_clock_probe(struct wiz *wiz, struct device_node *node) 1139 + { 1140 + const struct wiz_clk_mux_sel *clk_mux_sel = wiz->clk_mux_sel; 1141 + struct device *dev = wiz->dev; 1142 + struct device_node *clk_node; 1143 + const char *node_name; 1144 + struct clk *clk; 1145 + int ret; 1146 + int i; 1147 + 1148 + clk = devm_clk_get(dev, "core_ref_clk"); 1149 + if (IS_ERR(clk)) 1150 + return dev_err_probe(dev, PTR_ERR(clk), 1151 + "core_ref_clk clock not found\n"); 1152 + 1153 + wiz->input_clks[WIZ_CORE_REFCLK] = clk; 1154 + 1155 + if (wiz->data->pma_cmn_refclk1_int_mode) { 1156 + clk = devm_clk_get(dev, "core_ref1_clk"); 1157 + if (IS_ERR(clk)) 1158 + return dev_err_probe(dev, PTR_ERR(clk), 1159 + "core_ref1_clk clock not found\n"); 1160 + 1161 + wiz->input_clks[WIZ_CORE_REFCLK1] = clk; 1162 + } 1163 + 1164 + clk = devm_clk_get(dev, "ext_ref_clk"); 1165 + if (IS_ERR(clk)) 1166 + return dev_err_probe(dev, PTR_ERR(clk), 1167 + "ext_ref_clk clock not found\n"); 1168 + 1169 + wiz->input_clks[WIZ_EXT_REFCLK] = clk; 1170 + 1171 + wiz_clock_init(wiz); 1137 1172 1138 1173 switch (wiz->type) { 1139 1174 case AM64_WIZ_10G: ··· 1162 1157 case J721S2_WIZ_10G: 1163 1158 ret = wiz_clock_register(wiz); 1164 1159 if (ret) 1165 - dev_err(dev, "Failed to register wiz clocks\n"); 1166 - return ret; 1160 + return dev_err_probe(dev, ret, "Failed to register wiz clocks\n"); 1161 + 1162 + return 0; 1167 1163 default: 1168 1164 break; 1169 1165 } ··· 1173 1167 node_name = clk_mux_sel[i].node_name; 1174 1168 clk_node = of_get_child_by_name(node, node_name); 1175 1169 if (!clk_node) { 1176 - dev_err(dev, "Unable to get %s node\n", node_name); 1177 - ret = -EINVAL; 1170 + ret = dev_err_probe(dev, -EINVAL, "Unable to get %s node\n", node_name); 1178 1171 goto err; 1179 1172 } 1180 1173 1181 1174 ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i], 1182 1175 clk_mux_sel[i].table); 1183 1176 if (ret) { 1184 - dev_err(dev, "Failed to register %s clock\n", 1185 - node_name); 1177 + dev_err_probe(dev, ret, "Failed to register %s clock\n", 1178 + node_name); 1186 1179 of_node_put(clk_node); 1187 1180 goto err; 1188 1181 } ··· 1193 1188 node_name = clk_div_sel[i].node_name; 1194 1189 clk_node = of_get_child_by_name(node, node_name); 1195 1190 if (!clk_node) { 1196 - dev_err(dev, "Unable to get %s node\n", node_name); 1197 - ret = -EINVAL; 1191 + ret = dev_err_probe(dev, -EINVAL, "Unable to get %s node\n", node_name); 1198 1192 goto err; 1199 1193 } 1200 1194 1201 1195 ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i], 1202 1196 clk_div_sel[i].table); 1203 1197 if (ret) { 1204 - dev_err(dev, "Failed to register %s clock\n", 1205 - node_name); 1198 + dev_err_probe(dev, ret, "Failed to register %s clock\n", 1199 + node_name); 1206 1200 of_node_put(clk_node); 1207 1201 goto err; 1208 1202 } ··· 1597 1593 goto err_get_sync; 1598 1594 } 1599 1595 1600 - ret = wiz_clock_init(wiz, node); 1596 + ret = wiz_clock_probe(wiz, node); 1601 1597 if (ret < 0) { 1602 1598 dev_warn(dev, "Failed to initialize clocks\n"); 1603 1599 goto err_get_sync; ··· 1659 1655 pm_runtime_disable(dev); 1660 1656 } 1661 1657 1658 + static int wiz_resume_noirq(struct device *dev) 1659 + { 1660 + struct device_node *node = dev->of_node; 1661 + struct wiz *wiz = dev_get_drvdata(dev); 1662 + int ret; 1663 + 1664 + /* Enable supplemental Control override if available */ 1665 + if (wiz->sup_legacy_clk_override) 1666 + regmap_field_write(wiz->sup_legacy_clk_override, 1); 1667 + 1668 + wiz_clock_init(wiz); 1669 + 1670 + ret = wiz_init(wiz); 1671 + if (ret) { 1672 + dev_err(dev, "WIZ initialization failed\n"); 1673 + goto err_wiz_init; 1674 + } 1675 + 1676 + return 0; 1677 + 1678 + err_wiz_init: 1679 + wiz_clock_cleanup(wiz, node); 1680 + 1681 + return ret; 1682 + } 1683 + 1684 + static DEFINE_NOIRQ_DEV_PM_OPS(wiz_pm_ops, NULL, wiz_resume_noirq); 1685 + 1662 1686 static struct platform_driver wiz_driver = { 1663 1687 .probe = wiz_probe, 1664 1688 .remove_new = wiz_remove, 1665 1689 .driver = { 1666 1690 .name = "wiz", 1667 1691 .of_match_table = wiz_id_table, 1692 + .pm = pm_sleep_ptr(&wiz_pm_ops), 1668 1693 }, 1669 1694 }; 1670 1695 module_platform_driver(wiz_driver);
+100 -98
drivers/phy/xilinx/phy-zynqmp.c
··· 13 13 */ 14 14 15 15 #include <linux/clk.h> 16 + #include <linux/debugfs.h> 16 17 #include <linux/delay.h> 17 18 #include <linux/io.h> 18 19 #include <linux/kernel.h> ··· 81 80 82 81 /* Reference clock selection parameters */ 83 82 #define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4) 84 - #define L0_REF_CLK_SEL_MASK 0x8f 83 + #define L0_REF_CLK_LCL_SEL BIT(7) 84 + #define L0_REF_CLK_SEL_MASK 0x9f 85 85 86 86 /* Calibration digital logic parameters */ 87 87 #define L3_TM_CALIB_DIG19 0xec4c ··· 124 122 #define ICM_PROTOCOL_DP 0x4 125 123 #define ICM_PROTOCOL_SGMII 0x5 126 124 125 + static const char *const xpsgtr_icm_str[] = { 126 + [ICM_PROTOCOL_PD] = "none", 127 + [ICM_PROTOCOL_PCIE] = "PCIe", 128 + [ICM_PROTOCOL_SATA] = "SATA", 129 + [ICM_PROTOCOL_USB] = "USB", 130 + [ICM_PROTOCOL_DP] = "DisplayPort", 131 + [ICM_PROTOCOL_SGMII] = "SGMII", 132 + }; 133 + 127 134 /* Test Mode common reset control parameters */ 128 135 #define TM_CMN_RST 0x10018 129 136 #define TM_CMN_RST_EN 0x1 ··· 157 146 /* Total number of controllers */ 158 147 #define CONTROLLERS_PER_LANE 5 159 148 160 - /* Protocol Type parameters */ 161 - #define XPSGTR_TYPE_USB0 0 /* USB controller 0 */ 162 - #define XPSGTR_TYPE_USB1 1 /* USB controller 1 */ 163 - #define XPSGTR_TYPE_SATA_0 2 /* SATA controller lane 0 */ 164 - #define XPSGTR_TYPE_SATA_1 3 /* SATA controller lane 1 */ 165 - #define XPSGTR_TYPE_PCIE_0 4 /* PCIe controller lane 0 */ 166 - #define XPSGTR_TYPE_PCIE_1 5 /* PCIe controller lane 1 */ 167 - #define XPSGTR_TYPE_PCIE_2 6 /* PCIe controller lane 2 */ 168 - #define XPSGTR_TYPE_PCIE_3 7 /* PCIe controller lane 3 */ 169 - #define XPSGTR_TYPE_DP_0 8 /* Display Port controller lane 0 */ 170 - #define XPSGTR_TYPE_DP_1 9 /* Display Port controller lane 1 */ 171 - #define XPSGTR_TYPE_SGMII0 10 /* Ethernet SGMII controller 0 */ 172 - #define XPSGTR_TYPE_SGMII1 11 /* Ethernet SGMII controller 1 */ 173 - #define XPSGTR_TYPE_SGMII2 12 /* Ethernet SGMII controller 2 */ 174 - #define XPSGTR_TYPE_SGMII3 13 /* Ethernet SGMII controller 3 */ 175 - 176 149 /* Timeout values */ 177 150 #define TIMEOUT_US 1000 178 151 ··· 179 184 /** 180 185 * struct xpsgtr_phy - representation of a lane 181 186 * @phy: pointer to the kernel PHY device 182 - * @type: controller which uses this lane 187 + * @instance: instance of the protocol type (such as the lane within a 188 + * protocol, or the USB/Ethernet controller) 183 189 * @lane: lane number 184 190 * @protocol: protocol in which the lane operates 185 191 * @skip_phy_init: skip phy_init() if true ··· 189 193 */ 190 194 struct xpsgtr_phy { 191 195 struct phy *phy; 192 - u8 type; 196 + u8 instance; 193 197 u8 lane; 194 198 u8 protocol; 195 199 bool skip_phy_init; ··· 304 308 struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy); 305 309 struct xpsgtr_dev *gtr_dev = gtr_phy->dev; 306 310 unsigned int timeout = TIMEOUT_US; 311 + u8 protocol = gtr_phy->protocol; 307 312 int ret; 308 313 309 314 dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n"); 310 315 316 + /* 317 + * For DP and PCIe, only the instance 0 PLL is used. Switch to that phy 318 + * so we wait on the right PLL. 319 + */ 320 + if ((protocol == ICM_PROTOCOL_DP || protocol == ICM_PROTOCOL_PCIE) && 321 + gtr_phy->instance) { 322 + int i; 323 + 324 + for (i = 0; i < NUM_LANES; i++) { 325 + gtr_phy = &gtr_dev->phys[i]; 326 + 327 + if (gtr_phy->protocol == protocol && !gtr_phy->instance) 328 + goto got_phy; 329 + } 330 + 331 + return -EBUSY; 332 + } 333 + 334 + got_phy: 311 335 while (1) { 312 336 u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1); 313 337 ··· 346 330 347 331 if (ret == -ETIMEDOUT) 348 332 dev_err(gtr_dev->dev, 349 - "lane %u (type %u, protocol %u): PLL lock timeout\n", 350 - gtr_phy->lane, gtr_phy->type, gtr_phy->protocol); 333 + "lane %u (protocol %u, instance %u): PLL lock timeout\n", 334 + gtr_phy->lane, gtr_phy->protocol, gtr_phy->instance); 351 335 352 336 return ret; 353 337 } ··· 365 349 PLL_FREQ_MASK, ssc->pll_ref_clk); 366 350 367 351 /* Enable lane clock sharing, if required */ 368 - if (gtr_phy->refclk != gtr_phy->lane) { 369 - /* Lane3 Ref Clock Selection Register */ 352 + if (gtr_phy->refclk == gtr_phy->lane) 353 + xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane), 354 + L0_REF_CLK_SEL_MASK, L0_REF_CLK_LCL_SEL); 355 + else 370 356 xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane), 371 357 L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk); 372 - } 373 358 374 359 /* SSC step size [7:0] */ 375 360 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB, ··· 590 573 mutex_lock(&gtr_dev->gtr_mutex); 591 574 592 575 /* Configure and enable the clock when peripheral phy_init call */ 593 - if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane])) 576 + if (clk_prepare_enable(gtr_dev->clk[gtr_phy->refclk])) 594 577 goto out; 595 578 596 579 /* Skip initialization if not required. */ ··· 642 625 gtr_phy->skip_phy_init = false; 643 626 644 627 /* Ensure that disable clock only, which configure for lane */ 645 - clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]); 628 + clk_disable_unprepare(gtr_dev->clk[gtr_phy->refclk]); 646 629 647 630 return 0; 648 631 } ··· 655 638 /* Skip initialization if not required. */ 656 639 if (!xpsgtr_phy_init_required(gtr_phy)) 657 640 return ret; 658 - /* 659 - * Wait for the PLL to lock. For DP, only wait on DP0 to avoid 660 - * cumulating waits for both lanes. The user is expected to initialize 661 - * lane 0 last. 662 - */ 663 - if (gtr_phy->protocol != ICM_PROTOCOL_DP || 664 - gtr_phy->type == XPSGTR_TYPE_DP_0) 665 - ret = xpsgtr_wait_pll_lock(phy); 666 - 667 - return ret; 641 + return xpsgtr_wait_pll_lock(phy); 668 642 } 669 643 670 644 static int xpsgtr_phy_configure(struct phy *phy, union phy_configure_opts *opts) ··· 682 674 * OF Xlate Support 683 675 */ 684 676 685 - /* Set the lane type and protocol based on the PHY type and instance number. */ 677 + /* Set the lane protocol and instance based on the PHY type and instance number. */ 686 678 static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type, 687 679 unsigned int phy_instance) 688 680 { 689 681 unsigned int num_phy_types; 690 - const int *phy_types; 691 682 692 683 switch (phy_type) { 693 - case PHY_TYPE_SATA: { 694 - static const int types[] = { 695 - XPSGTR_TYPE_SATA_0, 696 - XPSGTR_TYPE_SATA_1, 697 - }; 698 - 699 - phy_types = types; 700 - num_phy_types = ARRAY_SIZE(types); 684 + case PHY_TYPE_SATA: 685 + num_phy_types = 2; 701 686 gtr_phy->protocol = ICM_PROTOCOL_SATA; 702 687 break; 703 - } 704 - case PHY_TYPE_USB3: { 705 - static const int types[] = { 706 - XPSGTR_TYPE_USB0, 707 - XPSGTR_TYPE_USB1, 708 - }; 709 - 710 - phy_types = types; 711 - num_phy_types = ARRAY_SIZE(types); 688 + case PHY_TYPE_USB3: 689 + num_phy_types = 2; 712 690 gtr_phy->protocol = ICM_PROTOCOL_USB; 713 691 break; 714 - } 715 - case PHY_TYPE_DP: { 716 - static const int types[] = { 717 - XPSGTR_TYPE_DP_0, 718 - XPSGTR_TYPE_DP_1, 719 - }; 720 - 721 - phy_types = types; 722 - num_phy_types = ARRAY_SIZE(types); 692 + case PHY_TYPE_DP: 693 + num_phy_types = 2; 723 694 gtr_phy->protocol = ICM_PROTOCOL_DP; 724 695 break; 725 - } 726 - case PHY_TYPE_PCIE: { 727 - static const int types[] = { 728 - XPSGTR_TYPE_PCIE_0, 729 - XPSGTR_TYPE_PCIE_1, 730 - XPSGTR_TYPE_PCIE_2, 731 - XPSGTR_TYPE_PCIE_3, 732 - }; 733 - 734 - phy_types = types; 735 - num_phy_types = ARRAY_SIZE(types); 696 + case PHY_TYPE_PCIE: 697 + num_phy_types = 4; 736 698 gtr_phy->protocol = ICM_PROTOCOL_PCIE; 737 699 break; 738 - } 739 - case PHY_TYPE_SGMII: { 740 - static const int types[] = { 741 - XPSGTR_TYPE_SGMII0, 742 - XPSGTR_TYPE_SGMII1, 743 - XPSGTR_TYPE_SGMII2, 744 - XPSGTR_TYPE_SGMII3, 745 - }; 746 - 747 - phy_types = types; 748 - num_phy_types = ARRAY_SIZE(types); 700 + case PHY_TYPE_SGMII: 701 + num_phy_types = 4; 749 702 gtr_phy->protocol = ICM_PROTOCOL_SGMII; 750 703 break; 751 - } 752 704 default: 753 705 return -EINVAL; 754 706 } ··· 716 748 if (phy_instance >= num_phy_types) 717 749 return -EINVAL; 718 750 719 - gtr_phy->type = phy_types[phy_instance]; 751 + gtr_phy->instance = phy_instance; 720 752 return 0; 721 753 } 722 754 723 755 /* 724 - * Valid combinations of controllers and lanes (Interconnect Matrix). 756 + * Valid combinations of controllers and lanes (Interconnect Matrix). Each 757 + * "instance" represents one controller for a lane. For PCIe and DP, the 758 + * "instance" is the logical lane in the link. For SATA, USB, and SGMII, 759 + * the instance is the index of the controller. 760 + * 761 + * This information is only used to validate the devicetree reference, and is 762 + * not used when programming the hardware. 725 763 */ 726 764 static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = { 727 - { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0, 728 - XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 }, 729 - { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0, 730 - XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 }, 731 - { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0, 732 - XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 }, 733 - { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1, 734 - XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 } 765 + /* PCIe, SATA, USB, DP, SGMII */ 766 + { 0, 0, 0, 1, 0 }, /* Lane 0 */ 767 + { 1, 1, 0, 0, 1 }, /* Lane 1 */ 768 + { 2, 0, 0, 1, 2 }, /* Lane 2 */ 769 + { 3, 1, 1, 0, 3 }, /* Lane 3 */ 735 770 }; 736 771 737 772 /* Translate OF phandle and args to PHY instance. */ ··· 769 798 phy_type = args->args[1]; 770 799 phy_instance = args->args[2]; 771 800 801 + guard(mutex)(&gtr_phy->phy->mutex); 772 802 ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance); 773 803 if (ret < 0) { 774 804 dev_err(gtr_dev->dev, "Invalid PHY type and/or instance\n"); ··· 790 818 * is allowed to operate on the lane. 791 819 */ 792 820 for (i = 0; i < CONTROLLERS_PER_LANE; i++) { 793 - if (icm_matrix[phy_lane][i] == gtr_phy->type) 821 + if (icm_matrix[phy_lane][i] == gtr_phy->instance) 794 822 return gtr_phy->phy; 795 823 } 796 824 797 825 return ERR_PTR(-EINVAL); 826 + } 827 + 828 + /* 829 + * DebugFS 830 + */ 831 + 832 + static int xpsgtr_status_read(struct seq_file *seq, void *data) 833 + { 834 + struct device *dev = seq->private; 835 + struct xpsgtr_phy *gtr_phy = dev_get_drvdata(dev); 836 + struct clk *clk; 837 + u32 pll_status; 838 + 839 + mutex_lock(&gtr_phy->phy->mutex); 840 + pll_status = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1); 841 + clk = gtr_phy->dev->clk[gtr_phy->refclk]; 842 + 843 + seq_printf(seq, "Lane: %u\n", gtr_phy->lane); 844 + seq_printf(seq, "Protocol: %s\n", 845 + xpsgtr_icm_str[gtr_phy->protocol]); 846 + seq_printf(seq, "Instance: %u\n", gtr_phy->instance); 847 + seq_printf(seq, "Reference clock: %u (%pC)\n", gtr_phy->refclk, clk); 848 + seq_printf(seq, "Reference rate: %lu\n", clk_get_rate(clk)); 849 + seq_printf(seq, "PLL locked: %s\n", 850 + pll_status & PLL_STATUS_LOCKED ? "yes" : "no"); 851 + 852 + mutex_unlock(&gtr_phy->phy->mutex); 853 + return 0; 798 854 } 799 855 800 856 /* ··· 974 974 975 975 gtr_phy->phy = phy; 976 976 phy_set_drvdata(phy, gtr_phy); 977 + debugfs_create_devm_seqfile(&phy->dev, "status", phy->debugfs, 978 + xpsgtr_status_read); 977 979 } 978 980 979 981 /* Register the PHY provider. */
+2
include/linux/soc/samsung/exynos-regs-pmu.h
··· 660 660 /* For Tensor GS101 */ 661 661 #define GS101_SYSIP_DAT0 (0x810) 662 662 #define GS101_SYSTEM_CONFIGURATION (0x3A00) 663 + #define GS101_PHY_CTRL_USB20 (0x3EB0) 664 + #define GS101_PHY_CTRL_USBDP (0x3EB4) 663 665 664 666 #endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */