Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'phy-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy

Pull phy updates from Vinod Koul:
"New hw support:
- Rcar usb2 support for RZ/G3S SoC
- Nuvoton MA35 SoC USB 2.0 PHY driver

Removed:
- obsolete qcom,usb-8x16-phy bindings

Updates:
- 4 lane PCIe support for Qualcomm X1E80100
- Constify structure in subsystem update
- Subsystem simplification with scoped for each OF child loop update
- Yaml conversion for Qualcomm sata phy, Hiilicon hi3798cv200-combphy
bindings"

* tag 'phy-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy: (40 commits)
phy: renesas: rcar-gen3-usb2: Add support for the RZ/G3S SoC
dt-bindings: phy: renesas,usb2-phy: Document RZ/G3S phy bindings
phy: renesas: rcar-gen3-usb2: Add support to initialize the bus
phy: ti: j721e-wiz: Simplify with scoped for each OF child loop
phy: ti: j721e-wiz: Drop OF node reference earlier for simpler code
phy: ti: gmii-sel: Simplify with dev_err_probe()
phy: ti: am654-serdes: Use scoped device node handling to simplify error paths
phy: qcom: qmp-pcie-msm8996: Simplify with scoped for each OF child loop
phy: mediatek: xsphy: Simplify with scoped for each OF child loop
phy: mediatek: tphy: Simplify with scoped for each OF child loop
phy: hisilicon: usb2: Simplify with scoped for each OF child loop
phy: cadence: sierra: Simplify with scoped for each OF child loop
phy: broadcom: brcm-sata: Simplify with scoped for each OF child loop
phy: broadcom: bcm-cygnus-pcie: Simplify with scoped for each OF child loop
phy: nuvoton: add new driver for the Nuvoton MA35 SoC USB 2.0 PHY
dt-bindings: phy: nuvoton,ma35-usb2-phy: add new bindings
phy: qcom: qmp-pcie: Configure all tables on port B PHY
phy: airoha: adjust initialization delay in airoha_pcie_phy_init()
dt-bindings: phy: socionext,uniphier: add top-level constraints
phy: qcom: qmp-pcie: Add Gen4 4-lanes mode for X1E80100
...

+1194 -796
+56
Documentation/devicetree/bindings/phy/hisilicon,hi3798cv200-combphy.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/hisilicon,hi3798cv200-combphy.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: HiSilicon STB PCIE/SATA/USB3 PHY 8 + 9 + maintainers: 10 + - Shawn Guo <shawn.guo@linaro.org> 11 + 12 + properties: 13 + compatible: 14 + const: hisilicon,hi3798cv200-combphy 15 + 16 + reg: 17 + maxItems: 1 18 + 19 + '#phy-cells': 20 + description: The cell contains the PHY mode 21 + const: 1 22 + 23 + clocks: 24 + maxItems: 1 25 + 26 + resets: 27 + maxItems: 1 28 + 29 + hisilicon,fixed-mode: 30 + description: If the phy device doesn't support mode select but a fixed mode 31 + setting, the property should be present to specify the particular mode. 32 + $ref: /schemas/types.yaml#/definitions/uint32 33 + enum: [ 1, 2, 4] # SATA, PCIE, USB3 34 + 35 + hisilicon,mode-select-bits: 36 + description: If the phy device support mode select, this property should be 37 + present to specify the register bits in peripheral controller. 38 + items: 39 + - description: register_offset 40 + - description: bit shift 41 + - description: bit mask 42 + 43 + required: 44 + - compatible 45 + - reg 46 + - '#phy-cells' 47 + - clocks 48 + - resets 49 + 50 + oneOf: 51 + - required: ['hisilicon,fixed-mode'] 52 + - required: ['hisilicon,mode-select-bits'] 53 + 54 + additionalProperties: false 55 + 56 + ...
+45
Documentation/devicetree/bindings/phy/nuvoton,ma35d1-usb2-phy.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/nuvoton,ma35d1-usb2-phy.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Nuvoton MA35D1 USB2 phy 8 + 9 + maintainers: 10 + - Hui-Ping Chen <hpchen0nvt@gmail.com> 11 + 12 + properties: 13 + compatible: 14 + enum: 15 + - nuvoton,ma35d1-usb2-phy 16 + 17 + "#phy-cells": 18 + const: 0 19 + 20 + clocks: 21 + maxItems: 1 22 + 23 + nuvoton,sys: 24 + $ref: /schemas/types.yaml#/definitions/phandle 25 + description: 26 + phandle to syscon for checking the PHY clock status. 27 + 28 + required: 29 + - compatible 30 + - "#phy-cells" 31 + - clocks 32 + - nuvoton,sys 33 + 34 + additionalProperties: false 35 + 36 + examples: 37 + - | 38 + #include <dt-bindings/clock/nuvoton,ma35d1-clk.h> 39 + 40 + usb_phy: usb-phy { 41 + compatible = "nuvoton,ma35d1-usb2-phy"; 42 + clocks = <&clk USBD_GATE>; 43 + nuvoton,sys = <&sys>; 44 + #phy-cells = <0>; 45 + };
-59
Documentation/devicetree/bindings/phy/phy-hi3798cv200-combphy.txt
··· 1 - HiSilicon STB PCIE/SATA/USB3 PHY 2 - 3 - Required properties: 4 - - compatible: Should be "hisilicon,hi3798cv200-combphy" 5 - - reg: Should be the address space for COMBPHY configuration and state 6 - registers in peripheral controller, e.g. PERI_COMBPHY0_CFG and 7 - PERI_COMBPHY0_STATE for COMBPHY0 Hi3798CV200 SoC. 8 - - #phy-cells: Should be 1. The cell number is used to select the phy mode 9 - as defined in <dt-bindings/phy/phy.h>. 10 - - clocks: The phandle to clock provider and clock specifier pair. 11 - - resets: The phandle to reset controller and reset specifier pair. 12 - 13 - Refer to phy/phy-bindings.txt for the generic PHY binding properties. 14 - 15 - Optional properties: 16 - - hisilicon,fixed-mode: If the phy device doesn't support mode select 17 - but a fixed mode setting, the property should be present to specify 18 - the particular mode. 19 - - hisilicon,mode-select-bits: If the phy device support mode select, 20 - this property should be present to specify the register bits in 21 - peripheral controller, as a 3 integers tuple: 22 - <register_offset bit_shift bit_mask>. 23 - 24 - Notes: 25 - - Between hisilicon,fixed-mode and hisilicon,mode-select-bits, one and only 26 - one of them should be present. 27 - - The device node should be a child of peripheral controller that contains 28 - COMBPHY configuration/state and PERI_CTRL register used to select PHY mode. 29 - Refer to arm/hisilicon/hisilicon.txt for the parent peripheral controller 30 - bindings. 31 - 32 - Examples: 33 - 34 - perictrl: peripheral-controller@8a20000 { 35 - compatible = "hisilicon,hi3798cv200-perictrl", "syscon", 36 - "simple-mfd"; 37 - reg = <0x8a20000 0x1000>; 38 - #address-cells = <1>; 39 - #size-cells = <1>; 40 - ranges = <0x0 0x8a20000 0x1000>; 41 - 42 - combphy0: phy@850 { 43 - compatible = "hisilicon,hi3798cv200-combphy"; 44 - reg = <0x850 0x8>; 45 - #phy-cells = <1>; 46 - clocks = <&crg HISTB_COMBPHY0_CLK>; 47 - resets = <&crg 0x188 4>; 48 - hisilicon,fixed-mode = <PHY_TYPE_USB3>; 49 - }; 50 - 51 - combphy1: phy@858 { 52 - compatible = "hisilicon,hi3798cv200-combphy"; 53 - reg = <0x858 0x8>; 54 - #phy-cells = <1>; 55 - clocks = <&crg HISTB_COMBPHY1_CLK>; 56 - resets = <&crg 0x188 12>; 57 - hisilicon,mode-select-bits = <0x0008 11 (0x3 << 11)>; 58 - }; 59 - };
+55
Documentation/devicetree/bindings/phy/qcom,sata-phy.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/phy/qcom,sata-phy.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm SATA PHY Controller 8 + 9 + maintainers: 10 + - Bjorn Andersson <andersson@kernel.org> 11 + - Konrad Dybcio <konrad.dybcio@linaro.org> 12 + 13 + description: 14 + The Qualcomm SATA PHY describes on-chip SATA Physical layer controllers. 15 + 16 + properties: 17 + compatible: 18 + enum: 19 + - qcom,ipq806x-sata-phy 20 + - qcom,apq8064-sata-phy 21 + 22 + reg: 23 + maxItems: 1 24 + 25 + clocks: 26 + maxItems: 1 27 + 28 + clock-names: 29 + const: cfg 30 + 31 + '#phy-cells': 32 + const: 0 33 + 34 + required: 35 + - compatible 36 + - reg 37 + - clocks 38 + - clock-names 39 + - '#phy-cells' 40 + 41 + additionalProperties: false 42 + 43 + examples: 44 + - | 45 + #include <dt-bindings/clock/qcom,gcc-ipq806x.h> 46 + sata_phy: sata-phy@1b400000 { 47 + compatible = "qcom,ipq806x-sata-phy"; 48 + reg = <0x1b400000 0x200>; 49 + 50 + clocks = <&gcc SATA_PHY_CFG_CLK>; 51 + clock-names = "cfg"; 52 + 53 + #phy-cells = <0>; 54 + }; 55 +
+3
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
··· 40 40 - qcom,sm8650-qmp-gen4x2-pcie-phy 41 41 - qcom,x1e80100-qmp-gen3x2-pcie-phy 42 42 - qcom,x1e80100-qmp-gen4x2-pcie-phy 43 + - qcom,x1e80100-qmp-gen4x4-pcie-phy 43 44 44 45 reg: 45 46 minItems: 1 ··· 119 118 contains: 120 119 enum: 121 120 - qcom,sc8280xp-qmp-gen3x4-pcie-phy 121 + - qcom,x1e80100-qmp-gen4x4-pcie-phy 122 122 then: 123 123 properties: 124 124 reg: ··· 171 169 - qcom,sc8280xp-qmp-gen3x1-pcie-phy 172 170 - qcom,sc8280xp-qmp-gen3x2-pcie-phy 173 171 - qcom,sc8280xp-qmp-gen3x4-pcie-phy 172 + - qcom,x1e80100-qmp-gen4x4-pcie-phy 174 173 then: 175 174 properties: 176 175 clocks:
-76
Documentation/devicetree/bindings/phy/qcom,usb-8x16-phy.txt
··· 1 - Qualcomm's APQ8016/MSM8916 USB transceiver controller 2 - 3 - - compatible: 4 - Usage: required 5 - Value type: <string> 6 - Definition: Should contain "qcom,usb-8x16-phy". 7 - 8 - - reg: 9 - Usage: required 10 - Value type: <prop-encoded-array> 11 - Definition: USB PHY base address and length of the register map 12 - 13 - - clocks: 14 - Usage: required 15 - Value type: <prop-encoded-array> 16 - Definition: See clock-bindings.txt section "consumers". List of 17 - two clock specifiers for interface and core controller 18 - clocks. 19 - 20 - - clock-names: 21 - Usage: required 22 - Value type: <string> 23 - Definition: Must contain "iface" and "core" strings. 24 - 25 - - vddcx-supply: 26 - Usage: required 27 - Value type: <phandle> 28 - Definition: phandle to the regulator VDCCX supply node. 29 - 30 - - v1p8-supply: 31 - Usage: required 32 - Value type: <phandle> 33 - Definition: phandle to the regulator 1.8V supply node. 34 - 35 - - v3p3-supply: 36 - Usage: required 37 - Value type: <phandle> 38 - Definition: phandle to the regulator 3.3V supply node. 39 - 40 - - resets: 41 - Usage: required 42 - Value type: <prop-encoded-array> 43 - Definition: See reset.txt section "consumers". PHY reset specifier. 44 - 45 - - reset-names: 46 - Usage: required 47 - Value type: <string> 48 - Definition: Must contain "phy" string. 49 - 50 - - switch-gpio: 51 - Usage: optional 52 - Value type: <prop-encoded-array> 53 - Definition: Some boards are using Dual SPDT USB Switch, witch is 54 - controlled by GPIO to de/multiplex D+/D- USB lines 55 - between connectors. 56 - 57 - Example: 58 - usb_phy: phy@78d9000 { 59 - compatible = "qcom,usb-8x16-phy"; 60 - reg = <0x78d9000 0x400>; 61 - 62 - vddcx-supply = <&pm8916_s1_corner>; 63 - v1p8-supply = <&pm8916_l7>; 64 - v3p3-supply = <&pm8916_l13>; 65 - 66 - clocks = <&gcc GCC_USB_HS_AHB_CLK>, 67 - <&gcc GCC_USB_HS_SYSTEM_CLK>; 68 - clock-names = "iface", "core"; 69 - 70 - resets = <&gcc GCC_USB2A_PHY_BCR>; 71 - reset-names = "phy"; 72 - 73 - // D+/D- lines: 1 - Routed to HUB, 0 - Device connector 74 - switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>; 75 - }; 76 -
-24
Documentation/devicetree/bindings/phy/qcom-apq8064-sata-phy.txt
··· 1 - Qualcomm APQ8064 SATA PHY Controller 2 - ------------------------------------ 3 - 4 - SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers. 5 - Each SATA PHY controller should have its own node. 6 - 7 - Required properties: 8 - - compatible: compatible list, contains "qcom,apq8064-sata-phy". 9 - - reg: offset and length of the SATA PHY register set; 10 - - #phy-cells: must be zero 11 - - clocks: a list of phandles and clock-specifier pairs, one for each entry in 12 - clock-names. 13 - - clock-names: must be "cfg" for phy config clock. 14 - 15 - Example: 16 - sata_phy: sata-phy@1b400000 { 17 - compatible = "qcom,apq8064-sata-phy"; 18 - reg = <0x1b400000 0x200>; 19 - 20 - clocks = <&gcc SATA_PHY_CFG_CLK>; 21 - clock-names = "cfg"; 22 - 23 - #phy-cells = <0>; 24 - };
-23
Documentation/devicetree/bindings/phy/qcom-ipq806x-sata-phy.txt
··· 1 - Qualcomm IPQ806x SATA PHY Controller 2 - ------------------------------------ 3 - 4 - SATA PHY nodes are defined to describe on-chip SATA Physical layer controllers. 5 - Each SATA PHY controller should have its own node. 6 - 7 - Required properties: 8 - - compatible: compatible list, contains "qcom,ipq806x-sata-phy" 9 - - reg: offset and length of the SATA PHY register set; 10 - - #phy-cells: must be zero 11 - - clocks: must be exactly one entry 12 - - clock-names: must be "cfg" 13 - 14 - Example: 15 - sata_phy: sata-phy@1b400000 { 16 - compatible = "qcom,ipq806x-sata-phy"; 17 - reg = <0x1b400000 0x200>; 18 - 19 - clocks = <&gcc SATA_PHY_CFG_CLK>; 20 - clock-names = "cfg"; 21 - 22 - #phy-cells = <0>; 23 - };
+3 -1
Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
··· 13 13 compatible: 14 14 oneOf: 15 15 - items: 16 - - const: renesas,usb2-phy-r8a77470 # RZ/G1C 16 + - enum: 17 + - renesas,usb2-phy-r8a77470 # RZ/G1C 18 + - renesas,usb2-phy-r9a08g045 # RZ/G3S 17 19 18 20 - items: 19 21 - enum:
+3
Documentation/devicetree/bindings/phy/rockchip,rk3588-hdptx-phy.yaml
··· 27 27 - const: ref 28 28 - const: apb 29 29 30 + "#clock-cells": 31 + const: 0 32 + 30 33 "#phy-cells": 31 34 const: 0 32 35
+6 -2
Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
··· 30 30 minItems: 1 31 31 maxItems: 2 32 32 33 - clock-names: true 33 + clock-names: 34 + minItems: 1 35 + maxItems: 6 34 36 35 37 resets: 36 38 minItems: 2 37 39 maxItems: 6 38 40 39 - reset-names: true 41 + reset-names: 42 + minItems: 2 43 + maxItems: 6 40 44 41 45 allOf: 42 46 - if:
+6 -2
Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
··· 31 31 minItems: 1 32 32 maxItems: 2 33 33 34 - clock-names: true 34 + clock-names: 35 + minItems: 1 36 + maxItems: 2 35 37 36 38 resets: 37 39 minItems: 1 38 40 maxItems: 2 39 41 40 - reset-names: true 42 + reset-names: 43 + minItems: 1 44 + maxItems: 2 41 45 42 46 socionext,syscon: 43 47 $ref: /schemas/types.yaml#/definitions/phandle
+5 -2
Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
··· 34 34 minItems: 2 35 35 maxItems: 3 36 36 37 - clock-names: true 37 + clock-names: 38 + minItems: 2 39 + maxItems: 3 38 40 39 41 resets: 40 42 maxItems: 2 41 43 42 - reset-names: true 44 + reset-names: 45 + maxItems: 2 43 46 44 47 vbus-supply: 45 48 description: A phandle to the regulator for USB VBUS
+5 -2
Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
··· 35 35 minItems: 2 36 36 maxItems: 3 37 37 38 - clock-names: true 38 + clock-names: 39 + minItems: 2 40 + maxItems: 3 39 41 40 42 resets: 41 43 maxItems: 2 42 44 43 - reset-names: true 45 + reset-names: 46 + maxItems: 2 44 47 45 48 vbus-supply: 46 49 description: A phandle to the regulator for USB VBUS, only for USB host
+1
drivers/phy/Kconfig
··· 95 95 source "drivers/phy/microchip/Kconfig" 96 96 source "drivers/phy/motorola/Kconfig" 97 97 source "drivers/phy/mscc/Kconfig" 98 + source "drivers/phy/nuvoton/Kconfig" 98 99 source "drivers/phy/qualcomm/Kconfig" 99 100 source "drivers/phy/ralink/Kconfig" 100 101 source "drivers/phy/realtek/Kconfig"
+1
drivers/phy/Makefile
··· 25 25 microchip/ \ 26 26 motorola/ \ 27 27 mscc/ \ 28 + nuvoton/ \ 28 29 qualcomm/ \ 29 30 ralink/ \ 30 31 realtek/ \
+6 -14
drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
··· 113 113 static int cygnus_pcie_phy_probe(struct platform_device *pdev) 114 114 { 115 115 struct device *dev = &pdev->dev; 116 - struct device_node *node = dev->of_node, *child; 116 + struct device_node *node = dev->of_node; 117 117 struct cygnus_pcie_phy_core *core; 118 118 struct phy_provider *provider; 119 119 unsigned cnt = 0; 120 - int ret; 121 120 122 121 if (of_get_child_count(node) == 0) { 123 122 dev_err(dev, "PHY no child node\n"); ··· 135 136 136 137 mutex_init(&core->lock); 137 138 138 - for_each_available_child_of_node(node, child) { 139 + for_each_available_child_of_node_scoped(node, child) { 139 140 unsigned int id; 140 141 struct cygnus_pcie_phy *p; 141 142 142 143 if (of_property_read_u32(child, "reg", &id)) { 143 144 dev_err(dev, "missing reg property for %pOFn\n", 144 145 child); 145 - ret = -EINVAL; 146 - goto put_child; 146 + return -EINVAL; 147 147 } 148 148 149 149 if (id >= MAX_NUM_PHYS) { 150 150 dev_err(dev, "invalid PHY id: %u\n", id); 151 - ret = -EINVAL; 152 - goto put_child; 151 + return -EINVAL; 153 152 } 154 153 155 154 if (core->phys[id].phy) { 156 155 dev_err(dev, "duplicated PHY id: %u\n", id); 157 - ret = -EINVAL; 158 - goto put_child; 156 + return -EINVAL; 159 157 } 160 158 161 159 p = &core->phys[id]; 162 160 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); 163 161 if (IS_ERR(p->phy)) { 164 162 dev_err(dev, "failed to create PHY\n"); 165 - ret = PTR_ERR(p->phy); 166 - goto put_child; 163 + return PTR_ERR(p->phy); 167 164 } 168 165 169 166 p->core = core; ··· 179 184 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); 180 185 181 186 return 0; 182 - put_child: 183 - of_node_put(child); 184 - return ret; 185 187 } 186 188 187 189 static const struct of_device_id cygnus_pcie_phy_match_table[] = {
+7 -14
drivers/phy/broadcom/phy-brcm-sata.c
··· 751 751 { 752 752 const char *rxaeq_mode; 753 753 struct device *dev = &pdev->dev; 754 - struct device_node *dn = dev->of_node, *child; 754 + struct device_node *dn = dev->of_node; 755 755 const struct of_device_id *of_id; 756 756 struct brcm_sata_phy *priv; 757 757 struct phy_provider *provider; 758 - int ret, count = 0; 758 + int count = 0; 759 759 760 760 if (of_get_child_count(dn) == 0) 761 761 return -ENODEV; ··· 782 782 return PTR_ERR(priv->ctrl_base); 783 783 } 784 784 785 - for_each_available_child_of_node(dn, child) { 785 + for_each_available_child_of_node_scoped(dn, child) { 786 786 unsigned int id; 787 787 struct brcm_sata_port *port; 788 788 789 789 if (of_property_read_u32(child, "reg", &id)) { 790 790 dev_err(dev, "missing reg property in node %pOFn\n", 791 791 child); 792 - ret = -EINVAL; 793 - goto put_child; 792 + return -EINVAL; 794 793 } 795 794 796 795 if (id >= MAX_PORTS) { 797 796 dev_err(dev, "invalid reg: %u\n", id); 798 - ret = -EINVAL; 799 - goto put_child; 797 + return -EINVAL; 800 798 } 801 799 if (priv->phys[id].phy) { 802 800 dev_err(dev, "already registered port %u\n", id); 803 - ret = -EINVAL; 804 - goto put_child; 801 + return -EINVAL; 805 802 } 806 803 807 804 port = &priv->phys[id]; ··· 819 822 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); 820 823 if (IS_ERR(port->phy)) { 821 824 dev_err(dev, "failed to create PHY\n"); 822 - ret = PTR_ERR(port->phy); 823 - goto put_child; 825 + return PTR_ERR(port->phy); 824 826 } 825 827 826 828 phy_set_drvdata(port->phy, port); ··· 835 839 dev_info(dev, "registered %d port(s)\n", count); 836 840 837 841 return 0; 838 - put_child: 839 - of_node_put(child); 840 - return ret; 841 842 } 842 843 843 844 static struct platform_driver brcm_sata_phy_driver = {
+46 -49
drivers/phy/cadence/phy-cadence-sierra.c
··· 310 310 }, 311 311 }; 312 312 313 - static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = { 313 + static const u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = { 314 314 [CMN_PLLLC] = { 0, 1 }, 315 315 [CMN_PLLLC1] = { 1, 0 }, 316 316 }; ··· 362 362 u32 id_value; 363 363 u8 block_offset_shift; 364 364 u8 reg_offset_shift; 365 - struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 366 - [NUM_SSC_MODE]; 367 - struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 368 - [NUM_SSC_MODE]; 369 - struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 370 - [NUM_SSC_MODE]; 371 - struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 372 - [NUM_SSC_MODE]; 365 + const struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 366 + [NUM_SSC_MODE]; 367 + const struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 368 + [NUM_SSC_MODE]; 369 + const struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 370 + [NUM_SSC_MODE]; 371 + const struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] 372 + [NUM_SSC_MODE]; 373 373 }; 374 374 375 375 struct cdns_regmap_cdb_context { ··· 539 539 struct cdns_sierra_inst *ins = phy_get_drvdata(gphy); 540 540 struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent); 541 541 const struct cdns_sierra_data *init_data = phy->init_data; 542 - struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; 542 + const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; 543 543 enum cdns_sierra_phy_type phy_type = ins->phy_type; 544 + const struct cdns_sierra_vals *phy_pma_ln_vals; 544 545 enum cdns_sierra_ssc_mode ssc = ins->ssc_mode; 545 - struct cdns_sierra_vals *phy_pma_ln_vals; 546 + const struct cdns_sierra_vals *pcs_cmn_vals; 546 547 const struct cdns_reg_pairs *reg_pairs; 547 - struct cdns_sierra_vals *pcs_cmn_vals; 548 548 struct regmap *regmap; 549 549 u32 num_regs; 550 550 int i, j; ··· 1244 1244 1245 1245 static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp) 1246 1246 { 1247 + const struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; 1247 1248 const struct cdns_sierra_data *init_data = sp->init_data; 1248 - struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals; 1249 + const struct cdns_sierra_vals *phy_pma_ln_vals; 1250 + const struct cdns_sierra_vals *pcs_cmn_vals; 1249 1251 enum cdns_sierra_phy_type phy_t1, phy_t2; 1250 - struct cdns_sierra_vals *phy_pma_ln_vals; 1251 1252 const struct cdns_reg_pairs *reg_pairs; 1252 - struct cdns_sierra_vals *pcs_cmn_vals; 1253 1253 int i, j, node, mlane, num_lanes, ret; 1254 1254 enum cdns_sierra_ssc_mode ssc; 1255 1255 struct regmap *regmap; ··· 1366 1366 unsigned int id_value; 1367 1367 int ret, node = 0; 1368 1368 void __iomem *base; 1369 - struct device_node *dn = dev->of_node, *child; 1369 + struct device_node *dn = dev->of_node; 1370 1370 1371 1371 if (of_get_child_count(dn) == 0) 1372 1372 return -ENODEV; ··· 1438 1438 1439 1439 sp->autoconf = of_property_read_bool(dn, "cdns,autoconf"); 1440 1440 1441 - for_each_available_child_of_node(dn, child) { 1441 + for_each_available_child_of_node_scoped(dn, child) { 1442 1442 struct phy *gphy; 1443 1443 1444 1444 if (!(of_node_name_eq(child, "phy") || ··· 1452 1452 dev_err(dev, "failed to get reset %s\n", 1453 1453 child->full_name); 1454 1454 ret = PTR_ERR(sp->phys[node].lnk_rst); 1455 - of_node_put(child); 1456 1455 goto put_control; 1457 1456 } 1458 1457 ··· 1460 1461 if (ret) { 1461 1462 dev_err(dev, "missing property in node %s\n", 1462 1463 child->name); 1463 - of_node_put(child); 1464 1464 reset_control_put(sp->phys[node].lnk_rst); 1465 1465 goto put_control; 1466 1466 } ··· 1473 1475 gphy = devm_phy_create(dev, child, &noop_ops); 1474 1476 if (IS_ERR(gphy)) { 1475 1477 ret = PTR_ERR(gphy); 1476 - of_node_put(child); 1477 1478 reset_control_put(sp->phys[node].lnk_rst); 1478 1479 goto put_control; 1479 1480 } ··· 1541 1544 } 1542 1545 1543 1546 /* SGMII PHY PMA lane configuration */ 1544 - static struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = { 1547 + static const struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = { 1545 1548 {0x9010, SIERRA_PHY_PMA_XCVR_CTRL} 1546 1549 }; 1547 1550 1548 - static struct cdns_sierra_vals sgmii_phy_pma_ln_vals = { 1551 + static const struct cdns_sierra_vals sgmii_phy_pma_ln_vals = { 1549 1552 .reg_pairs = sgmii_phy_pma_ln_regs, 1550 1553 .num_regs = ARRAY_SIZE(sgmii_phy_pma_ln_regs), 1551 1554 }; ··· 1595 1598 {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG} 1596 1599 }; 1597 1600 1598 - static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = { 1601 + static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_cmn_vals = { 1599 1602 .reg_pairs = sgmii_100_no_ssc_plllc1_opt3_cmn_regs, 1600 1603 .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_cmn_regs), 1601 1604 }; 1602 1605 1603 - static struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = { 1606 + static const struct cdns_sierra_vals sgmii_100_no_ssc_plllc1_opt3_ln_vals = { 1604 1607 .reg_pairs = sgmii_100_no_ssc_plllc1_opt3_ln_regs, 1605 1608 .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_plllc1_opt3_ln_regs), 1606 1609 }; 1607 1610 1608 1611 /* QSGMII PHY PMA lane configuration */ 1609 - static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = { 1612 + static const struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = { 1610 1613 {0x9010, SIERRA_PHY_PMA_XCVR_CTRL} 1611 1614 }; 1612 1615 1613 - static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = { 1616 + static const struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = { 1614 1617 .reg_pairs = qsgmii_phy_pma_ln_regs, 1615 1618 .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs), 1616 1619 }; ··· 1661 1664 {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG} 1662 1665 }; 1663 1666 1664 - static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = { 1667 + static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = { 1665 1668 .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs, 1666 1669 .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs), 1667 1670 }; 1668 1671 1669 - static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = { 1672 + static const struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = { 1670 1673 .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs, 1671 1674 .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs), 1672 1675 }; 1673 1676 1674 1677 /* PCIE PHY PCS common configuration */ 1675 - static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = { 1678 + static const struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = { 1676 1679 {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1} 1677 1680 }; 1678 1681 1679 - static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = { 1682 + static const struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = { 1680 1683 .reg_pairs = pcie_phy_pcs_cmn_regs, 1681 1684 .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs), 1682 1685 }; ··· 1742 1745 {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} 1743 1746 }; 1744 1747 1745 - static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = { 1748 + static const struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = { 1746 1749 .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs, 1747 1750 .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs), 1748 1751 }; 1749 1752 1750 - static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = { 1753 + static const struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = { 1751 1754 .reg_pairs = ml_pcie_100_no_ssc_ln_regs, 1752 1755 .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs), 1753 1756 }; ··· 1807 1810 {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} 1808 1811 }; 1809 1812 1810 - static struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = { 1813 + static const struct cdns_sierra_vals ti_ml_pcie_100_no_ssc_ln_vals = { 1811 1814 .reg_pairs = ti_ml_pcie_100_no_ssc_ln_regs, 1812 1815 .num_regs = ARRAY_SIZE(ti_ml_pcie_100_no_ssc_ln_regs), 1813 1816 }; ··· 1883 1886 {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} 1884 1887 }; 1885 1888 1886 - static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = { 1889 + static const struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = { 1887 1890 .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs, 1888 1891 .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs), 1889 1892 }; 1890 1893 1891 - static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = { 1894 + static const struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = { 1892 1895 .reg_pairs = ml_pcie_100_int_ssc_ln_regs, 1893 1896 .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs), 1894 1897 }; ··· 1951 1954 {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} 1952 1955 }; 1953 1956 1954 - static struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = { 1957 + static const struct cdns_sierra_vals ti_ml_pcie_100_int_ssc_ln_vals = { 1955 1958 .reg_pairs = ti_ml_pcie_100_int_ssc_ln_regs, 1956 1959 .num_regs = ARRAY_SIZE(ti_ml_pcie_100_int_ssc_ln_regs), 1957 1960 }; ··· 2021 2024 {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} 2022 2025 }; 2023 2026 2024 - static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = { 2027 + static const struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = { 2025 2028 .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs, 2026 2029 .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs), 2027 2030 }; 2028 2031 2029 - static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = { 2032 + static const struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = { 2030 2033 .reg_pairs = ml_pcie_100_ext_ssc_ln_regs, 2031 2034 .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs), 2032 2035 }; ··· 2089 2092 {0x0002, SIERRA_TX_RCVDET_OVRD_PREG} 2090 2093 }; 2091 2094 2092 - static struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = { 2095 + static const struct cdns_sierra_vals ti_ml_pcie_100_ext_ssc_ln_vals = { 2093 2096 .reg_pairs = ti_ml_pcie_100_ext_ssc_ln_regs, 2094 2097 .num_regs = ARRAY_SIZE(ti_ml_pcie_100_ext_ssc_ln_regs), 2095 2098 }; ··· 2149 2152 {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} 2150 2153 }; 2151 2154 2152 - static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = { 2155 + static const struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = { 2153 2156 .reg_pairs = cdns_pcie_cmn_regs_no_ssc, 2154 2157 .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc), 2155 2158 }; 2156 2159 2157 - static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = { 2160 + static const struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = { 2158 2161 .reg_pairs = cdns_pcie_ln_regs_no_ssc, 2159 2162 .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc), 2160 2163 }; ··· 2224 2227 {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} 2225 2228 }; 2226 2229 2227 - static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = { 2230 + static const struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = { 2228 2231 .reg_pairs = cdns_pcie_cmn_regs_int_ssc, 2229 2232 .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc), 2230 2233 }; 2231 2234 2232 - static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = { 2235 + static const struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = { 2233 2236 .reg_pairs = cdns_pcie_ln_regs_int_ssc, 2234 2237 .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc), 2235 2238 }; ··· 2293 2296 {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG} 2294 2297 }; 2295 2298 2296 - static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = { 2299 + static const struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = { 2297 2300 .reg_pairs = cdns_pcie_cmn_regs_ext_ssc, 2298 2301 .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc), 2299 2302 }; 2300 2303 2301 - static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = { 2304 + static const struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = { 2302 2305 .reg_pairs = cdns_pcie_ln_regs_ext_ssc, 2303 2306 .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc), 2304 2307 }; ··· 2410 2413 {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG} 2411 2414 }; 2412 2415 2413 - static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = { 2416 + static const struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = { 2414 2417 .reg_pairs = cdns_usb_cmn_regs_ext_ssc, 2415 2418 .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc), 2416 2419 }; 2417 2420 2418 - static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = { 2421 + static const struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = { 2419 2422 .reg_pairs = cdns_usb_ln_regs_ext_ssc, 2420 2423 .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc), 2421 2424 }; ··· 2440 2443 {0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG}, 2441 2444 }; 2442 2445 2443 - static struct cdns_sierra_vals sgmii_cmn_vals = { 2446 + static const struct cdns_sierra_vals sgmii_cmn_vals = { 2444 2447 .reg_pairs = sgmii_pma_cmn_vals, 2445 2448 .num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals), 2446 2449 }; ··· 2486 2489 {0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG}, 2487 2490 }; 2488 2491 2489 - static struct cdns_sierra_vals sgmii_pma_ln_vals = { 2492 + static const struct cdns_sierra_vals sgmii_pma_ln_vals = { 2490 2493 .reg_pairs = sgmii_ln_regs, 2491 2494 .num_regs = ARRAY_SIZE(sgmii_ln_regs), 2492 2495 };
+368 -315
drivers/phy/cadence/phy-cadence-torrent.c
··· 285 285 CDNS_TORRENT_RECEIVED_REFCLK 286 286 }; 287 287 288 - static u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 }; 288 + static const u32 cdns_torrent_refclk_driver_mux_table[] = { 1, 0 }; 289 289 290 290 enum cdns_torrent_phy_type { 291 291 TYPE_NONE, ··· 351 351 void __iomem *sd_base; /* SD0801 registers base */ 352 352 u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ 353 353 u32 dp_pll; 354 + u32 protocol_bitmask; 354 355 struct reset_control *phy_rst; 355 356 struct reset_control *apb_rst; 356 357 struct device *dev; ··· 423 422 }; 424 423 425 424 struct cdns_torrent_vals { 426 - struct cdns_reg_pairs *reg_pairs; 425 + const struct cdns_reg_pairs *reg_pairs; 427 426 u32 num_regs; 428 427 }; 429 428 430 429 struct cdns_torrent_vals_entry { 431 430 u32 key; 432 - struct cdns_torrent_vals *vals; 431 + const struct cdns_torrent_vals *vals; 433 432 }; 434 433 435 434 struct cdns_torrent_vals_table { 436 - struct cdns_torrent_vals_entry *entries; 435 + const struct cdns_torrent_vals_entry *entries; 437 436 u32 num_entries; 438 437 }; 439 438 ··· 455 454 u8 reg_offset_shift; 456 455 }; 457 456 458 - static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl, 459 - enum cdns_torrent_ref_clk refclk0, 460 - enum cdns_torrent_ref_clk refclk1, 461 - enum cdns_torrent_phy_type link0, 462 - enum cdns_torrent_phy_type link1, 463 - enum cdns_torrent_ssc_mode ssc) 457 + static const struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl, 458 + enum cdns_torrent_ref_clk refclk0, 459 + enum cdns_torrent_ref_clk refclk1, 460 + enum cdns_torrent_phy_type link0, 461 + enum cdns_torrent_phy_type link1, 462 + enum cdns_torrent_ssc_mode ssc) 464 463 { 465 464 int i; 466 465 u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc); ··· 2307 2306 static int cdns_torrent_phy_init(struct phy *phy) 2308 2307 { 2309 2308 struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); 2309 + const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; 2310 2310 const struct cdns_torrent_data *init_data = cdns_phy->init_data; 2311 - struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; 2311 + const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; 2312 2312 enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; 2313 - struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; 2314 2313 struct cdns_torrent_inst *inst = phy_get_drvdata(phy); 2315 2314 enum cdns_torrent_phy_type phy_type = inst->phy_type; 2315 + const struct cdns_torrent_vals *phy_pma_cmn_vals; 2316 2316 enum cdns_torrent_ssc_mode ssc = inst->ssc_mode; 2317 - struct cdns_torrent_vals *phy_pma_cmn_vals; 2318 - struct cdns_torrent_vals *pcs_cmn_vals; 2319 - struct cdns_reg_pairs *reg_pairs; 2317 + const struct cdns_torrent_vals *pcs_cmn_vals; 2318 + const struct cdns_reg_pairs *reg_pairs; 2320 2319 struct regmap *regmap; 2321 2320 u32 num_regs; 2322 2321 int i, j; ··· 2464 2463 static 2465 2464 int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) 2466 2465 { 2466 + const struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; 2467 2467 const struct cdns_torrent_data *init_data = cdns_phy->init_data; 2468 - struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; 2468 + const struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; 2469 2469 enum cdns_torrent_ref_clk ref_clk1 = cdns_phy->ref_clk1_rate; 2470 2470 enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate; 2471 - struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; 2471 + const struct cdns_torrent_vals *phy_pma_cmn_vals; 2472 + const struct cdns_torrent_vals *pcs_cmn_vals; 2472 2473 enum cdns_torrent_phy_type phy_t1, phy_t2; 2473 - struct cdns_torrent_vals *phy_pma_cmn_vals; 2474 - struct cdns_torrent_vals *pcs_cmn_vals; 2474 + const struct cdns_reg_pairs *reg_pairs; 2475 2475 int i, j, node, mlane, num_lanes, ret; 2476 - struct cdns_reg_pairs *reg_pairs; 2476 + struct device *dev = cdns_phy->dev; 2477 2477 enum cdns_torrent_ssc_mode ssc; 2478 2478 struct regmap *regmap; 2479 - u32 num_regs; 2479 + u32 num_regs, num_protocols, protocol; 2480 2480 2481 - /* Maximum 2 links (subnodes) are supported */ 2482 - if (cdns_phy->nsubnodes != 2) 2481 + num_protocols = hweight32(cdns_phy->protocol_bitmask); 2482 + /* Maximum 2 protocols are supported */ 2483 + if (num_protocols > 2) { 2484 + dev_err(dev, "at most 2 protocols are supported\n"); 2483 2485 return -EINVAL; 2486 + } 2484 2487 2485 - phy_t1 = cdns_phy->phys[0].phy_type; 2486 - phy_t2 = cdns_phy->phys[1].phy_type; 2487 2488 2488 2489 /** 2489 - * First configure the PHY for first link with phy_t1. Get the array 2490 - * values as [phy_t1][phy_t2][ssc]. 2490 + * Get PHY types directly from subnodes if only 2 subnodes exist. 2491 + * It is possible for phy_t1 to be the same as phy_t2 for special 2492 + * configurations such as PCIe Multilink. 2491 2493 */ 2492 - for (node = 0; node < cdns_phy->nsubnodes; node++) { 2493 - if (node == 1) { 2494 + if (cdns_phy->nsubnodes == 2) { 2495 + phy_t1 = cdns_phy->phys[0].phy_type; 2496 + phy_t2 = cdns_phy->phys[1].phy_type; 2497 + } else { 2498 + /** 2499 + * Both PHY types / protocols should be unique. 2500 + * If they are the same, it should be expressed with either 2501 + * a) Single-Link (1 Sub-node) - handled via PHY APIs 2502 + * OR 2503 + * b) Double-Link (2 Sub-nodes) - handled above 2504 + */ 2505 + if (num_protocols != 2) { 2506 + dev_err(dev, "incorrect representation of link\n"); 2507 + return -EINVAL; 2508 + } 2509 + 2510 + phy_t1 = fns(cdns_phy->protocol_bitmask, 0); 2511 + phy_t2 = fns(cdns_phy->protocol_bitmask, 1); 2512 + } 2513 + 2514 + /** 2515 + * Configure all links with the protocol phy_t1 first followed by 2516 + * configuring all links with the protocol phy_t2. 2517 + * 2518 + * When phy_t1 = phy_t2, it is a single protocol and configuration 2519 + * is performed with a single iteration of the protocol and multiple 2520 + * iterations over the sub-nodes (links). 2521 + * 2522 + * When phy_t1 != phy_t2, there are two protocols and configuration 2523 + * is performed by iterating over all sub-nodes matching the first 2524 + * protocol and configuring them first, followed by iterating over 2525 + * all sub-nodes matching the second protocol and configuring them 2526 + * next. 2527 + */ 2528 + for (protocol = 0; protocol < num_protocols; protocol++) { 2529 + /** 2530 + * For the case where num_protocols is 1, 2531 + * phy_t1 = phy_t2 and the swap is unnecessary. 2532 + * 2533 + * Swapping phy_t1 and phy_t2 is only required when the 2534 + * number of protocols is 2 and there are 2 or more links. 2535 + */ 2536 + if (protocol == 1) { 2494 2537 /** 2495 - * If first link with phy_t1 is configured, then 2496 - * configure the PHY for second link with phy_t2. 2538 + * If first protocol with phy_t1 is configured, then 2539 + * configure the PHY for second protocol with phy_t2. 2497 2540 * Get the array values as [phy_t2][phy_t1][ssc]. 2498 2541 */ 2499 2542 swap(phy_t1, phy_t2); 2500 2543 swap(ref_clk, ref_clk1); 2501 2544 } 2502 2545 2503 - mlane = cdns_phy->phys[node].mlane; 2504 - ssc = cdns_phy->phys[node].ssc_mode; 2505 - num_lanes = cdns_phy->phys[node].num_lanes; 2546 + for (node = 0; node < cdns_phy->nsubnodes; node++) { 2547 + if (cdns_phy->phys[node].phy_type != phy_t1) 2548 + continue; 2506 2549 2507 - /** 2508 - * PHY configuration specific registers: 2509 - * link_cmn_vals depend on combination of PHY types being 2510 - * configured and are common for both PHY types, so array 2511 - * values should be same for [phy_t1][phy_t2][ssc] and 2512 - * [phy_t2][phy_t1][ssc]. 2513 - * xcvr_diag_vals also depend on combination of PHY types 2514 - * being configured, but these can be different for particular 2515 - * PHY type and are per lane. 2516 - */ 2517 - link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl, 2518 - CLK_ANY, CLK_ANY, 2519 - phy_t1, phy_t2, ANY_SSC); 2520 - if (link_cmn_vals) { 2521 - reg_pairs = link_cmn_vals->reg_pairs; 2522 - num_regs = link_cmn_vals->num_regs; 2523 - regmap = cdns_phy->regmap_common_cdb; 2550 + mlane = cdns_phy->phys[node].mlane; 2551 + ssc = cdns_phy->phys[node].ssc_mode; 2552 + num_lanes = cdns_phy->phys[node].num_lanes; 2524 2553 2525 2554 /** 2526 - * First array value in link_cmn_vals must be of 2527 - * PHY_PLL_CFG register 2555 + * PHY configuration specific registers: 2556 + * link_cmn_vals depend on combination of PHY types being 2557 + * configured and are common for both PHY types, so array 2558 + * values should be same for [phy_t1][phy_t2][ssc] and 2559 + * [phy_t2][phy_t1][ssc]. 2560 + * xcvr_diag_vals also depend on combination of PHY types 2561 + * being configured, but these can be different for particular 2562 + * PHY type and are per lane. 2528 2563 */ 2529 - regmap_field_write(cdns_phy->phy_pll_cfg, 2530 - reg_pairs[0].val); 2564 + link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl, 2565 + CLK_ANY, CLK_ANY, 2566 + phy_t1, phy_t2, ANY_SSC); 2567 + if (link_cmn_vals) { 2568 + reg_pairs = link_cmn_vals->reg_pairs; 2569 + num_regs = link_cmn_vals->num_regs; 2570 + regmap = cdns_phy->regmap_common_cdb; 2531 2571 2532 - for (i = 1; i < num_regs; i++) 2533 - regmap_write(regmap, reg_pairs[i].off, 2534 - reg_pairs[i].val); 2535 - } 2572 + /** 2573 + * First array value in link_cmn_vals must be of 2574 + * PHY_PLL_CFG register 2575 + */ 2576 + regmap_field_write(cdns_phy->phy_pll_cfg, 2577 + reg_pairs[0].val); 2536 2578 2537 - xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl, 2538 - CLK_ANY, CLK_ANY, 2539 - phy_t1, phy_t2, ANY_SSC); 2540 - if (xcvr_diag_vals) { 2541 - reg_pairs = xcvr_diag_vals->reg_pairs; 2542 - num_regs = xcvr_diag_vals->num_regs; 2543 - for (i = 0; i < num_lanes; i++) { 2544 - regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; 2545 - for (j = 0; j < num_regs; j++) 2546 - regmap_write(regmap, reg_pairs[j].off, 2547 - reg_pairs[j].val); 2579 + for (i = 1; i < num_regs; i++) 2580 + regmap_write(regmap, reg_pairs[i].off, 2581 + reg_pairs[i].val); 2548 2582 } 2549 - } 2550 2583 2551 - /* PHY PCS common registers configurations */ 2552 - pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl, 2553 - CLK_ANY, CLK_ANY, 2554 - phy_t1, phy_t2, ANY_SSC); 2555 - if (pcs_cmn_vals) { 2556 - reg_pairs = pcs_cmn_vals->reg_pairs; 2557 - num_regs = pcs_cmn_vals->num_regs; 2558 - regmap = cdns_phy->regmap_phy_pcs_common_cdb; 2559 - for (i = 0; i < num_regs; i++) 2560 - regmap_write(regmap, reg_pairs[i].off, 2561 - reg_pairs[i].val); 2562 - } 2563 - 2564 - /* PHY PMA common registers configurations */ 2565 - phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl, 2566 - CLK_ANY, CLK_ANY, 2567 - phy_t1, phy_t2, ANY_SSC); 2568 - if (phy_pma_cmn_vals) { 2569 - reg_pairs = phy_pma_cmn_vals->reg_pairs; 2570 - num_regs = phy_pma_cmn_vals->num_regs; 2571 - regmap = cdns_phy->regmap_phy_pma_common_cdb; 2572 - for (i = 0; i < num_regs; i++) 2573 - regmap_write(regmap, reg_pairs[i].off, 2574 - reg_pairs[i].val); 2575 - } 2576 - 2577 - /* PMA common registers configurations */ 2578 - cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl, 2579 - ref_clk, ref_clk1, 2580 - phy_t1, phy_t2, ssc); 2581 - if (cmn_vals) { 2582 - reg_pairs = cmn_vals->reg_pairs; 2583 - num_regs = cmn_vals->num_regs; 2584 - regmap = cdns_phy->regmap_common_cdb; 2585 - for (i = 0; i < num_regs; i++) 2586 - regmap_write(regmap, reg_pairs[i].off, 2587 - reg_pairs[i].val); 2588 - } 2589 - 2590 - /* PMA TX lane registers configurations */ 2591 - tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl, 2592 - ref_clk, ref_clk1, 2593 - phy_t1, phy_t2, ssc); 2594 - if (tx_ln_vals) { 2595 - reg_pairs = tx_ln_vals->reg_pairs; 2596 - num_regs = tx_ln_vals->num_regs; 2597 - for (i = 0; i < num_lanes; i++) { 2598 - regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; 2599 - for (j = 0; j < num_regs; j++) 2600 - regmap_write(regmap, reg_pairs[j].off, 2601 - reg_pairs[j].val); 2584 + xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl, 2585 + CLK_ANY, CLK_ANY, 2586 + phy_t1, phy_t2, ANY_SSC); 2587 + if (xcvr_diag_vals) { 2588 + reg_pairs = xcvr_diag_vals->reg_pairs; 2589 + num_regs = xcvr_diag_vals->num_regs; 2590 + for (i = 0; i < num_lanes; i++) { 2591 + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; 2592 + for (j = 0; j < num_regs; j++) 2593 + regmap_write(regmap, reg_pairs[j].off, 2594 + reg_pairs[j].val); 2595 + } 2602 2596 } 2603 - } 2604 2597 2605 - /* PMA RX lane registers configurations */ 2606 - rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl, 2607 - ref_clk, ref_clk1, 2608 - phy_t1, phy_t2, ssc); 2609 - if (rx_ln_vals) { 2610 - reg_pairs = rx_ln_vals->reg_pairs; 2611 - num_regs = rx_ln_vals->num_regs; 2612 - for (i = 0; i < num_lanes; i++) { 2613 - regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane]; 2614 - for (j = 0; j < num_regs; j++) 2615 - regmap_write(regmap, reg_pairs[j].off, 2616 - reg_pairs[j].val); 2598 + /* PHY PCS common registers configurations */ 2599 + pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl, 2600 + CLK_ANY, CLK_ANY, 2601 + phy_t1, phy_t2, ANY_SSC); 2602 + if (pcs_cmn_vals) { 2603 + reg_pairs = pcs_cmn_vals->reg_pairs; 2604 + num_regs = pcs_cmn_vals->num_regs; 2605 + regmap = cdns_phy->regmap_phy_pcs_common_cdb; 2606 + for (i = 0; i < num_regs; i++) 2607 + regmap_write(regmap, reg_pairs[i].off, 2608 + reg_pairs[i].val); 2617 2609 } 2618 - } 2619 2610 2620 - if (phy_t1 == TYPE_DP) { 2621 - ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2); 2622 - if (ret) 2623 - return ret; 2624 - } 2611 + /* PHY PMA common registers configurations */ 2612 + phy_pma_cmn_vals = 2613 + cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl, 2614 + CLK_ANY, CLK_ANY, phy_t1, phy_t2, 2615 + ANY_SSC); 2616 + if (phy_pma_cmn_vals) { 2617 + reg_pairs = phy_pma_cmn_vals->reg_pairs; 2618 + num_regs = phy_pma_cmn_vals->num_regs; 2619 + regmap = cdns_phy->regmap_phy_pma_common_cdb; 2620 + for (i = 0; i < num_regs; i++) 2621 + regmap_write(regmap, reg_pairs[i].off, 2622 + reg_pairs[i].val); 2623 + } 2625 2624 2626 - reset_control_deassert(cdns_phy->phys[node].lnk_rst); 2625 + /* PMA common registers configurations */ 2626 + cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl, 2627 + ref_clk, ref_clk1, 2628 + phy_t1, phy_t2, ssc); 2629 + if (cmn_vals) { 2630 + reg_pairs = cmn_vals->reg_pairs; 2631 + num_regs = cmn_vals->num_regs; 2632 + regmap = cdns_phy->regmap_common_cdb; 2633 + for (i = 0; i < num_regs; i++) 2634 + regmap_write(regmap, reg_pairs[i].off, 2635 + reg_pairs[i].val); 2636 + } 2637 + 2638 + /* PMA TX lane registers configurations */ 2639 + tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl, 2640 + ref_clk, ref_clk1, 2641 + phy_t1, phy_t2, ssc); 2642 + if (tx_ln_vals) { 2643 + reg_pairs = tx_ln_vals->reg_pairs; 2644 + num_regs = tx_ln_vals->num_regs; 2645 + for (i = 0; i < num_lanes; i++) { 2646 + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; 2647 + for (j = 0; j < num_regs; j++) 2648 + regmap_write(regmap, reg_pairs[j].off, 2649 + reg_pairs[j].val); 2650 + } 2651 + } 2652 + 2653 + /* PMA RX lane registers configurations */ 2654 + rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl, 2655 + ref_clk, ref_clk1, 2656 + phy_t1, phy_t2, ssc); 2657 + if (rx_ln_vals) { 2658 + reg_pairs = rx_ln_vals->reg_pairs; 2659 + num_regs = rx_ln_vals->num_regs; 2660 + for (i = 0; i < num_lanes; i++) { 2661 + regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane]; 2662 + for (j = 0; j < num_regs; j++) 2663 + regmap_write(regmap, reg_pairs[j].off, 2664 + reg_pairs[j].val); 2665 + } 2666 + } 2667 + 2668 + if (phy_t1 == TYPE_DP) { 2669 + ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2); 2670 + if (ret) 2671 + return ret; 2672 + } 2673 + 2674 + reset_control_deassert(cdns_phy->phys[node].lnk_rst); 2675 + } 2627 2676 } 2628 2677 2629 2678 /* Take the PHY out of reset */ ··· 2877 2826 dev_set_drvdata(dev, cdns_phy); 2878 2827 cdns_phy->dev = dev; 2879 2828 cdns_phy->init_data = data; 2829 + cdns_phy->protocol_bitmask = 0; 2880 2830 2881 2831 cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0); 2882 2832 if (IS_ERR(cdns_phy->sd_base)) ··· 3062 3010 } 3063 3011 3064 3012 cdns_phy->phys[node].phy = gphy; 3013 + cdns_phy->protocol_bitmask |= BIT(cdns_phy->phys[node].phy_type); 3065 3014 phy_set_drvdata(gphy, &cdns_phy->phys[node]); 3066 3015 3067 3016 node++; ··· 3132 3079 } 3133 3080 3134 3081 /* SGMII and QSGMII link configuration */ 3135 - static struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { 3082 + static const struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { 3136 3083 {0x0002, PHY_PLL_CFG} 3137 3084 }; 3138 3085 3139 - static struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = { 3086 + static const struct cdns_reg_pairs sgmii_qsgmii_xcvr_diag_ln_regs[] = { 3140 3087 {0x0003, XCVR_DIAG_HSCLK_DIV}, 3141 3088 {0x0113, XCVR_DIAG_PLLDRC_CTRL} 3142 3089 }; 3143 3090 3144 - static struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = { 3091 + static const struct cdns_torrent_vals sgmii_qsgmii_link_cmn_vals = { 3145 3092 .reg_pairs = sgmii_qsgmii_link_cmn_regs, 3146 3093 .num_regs = ARRAY_SIZE(sgmii_qsgmii_link_cmn_regs), 3147 3094 }; 3148 3095 3149 - static struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = { 3096 + static const struct cdns_torrent_vals sgmii_qsgmii_xcvr_diag_ln_vals = { 3150 3097 .reg_pairs = sgmii_qsgmii_xcvr_diag_ln_regs, 3151 3098 .num_regs = ARRAY_SIZE(sgmii_qsgmii_xcvr_diag_ln_regs), 3152 3099 }; ··· 3208 3155 cdns_torrent_phy_resume_noirq); 3209 3156 3210 3157 /* USB and DP link configuration */ 3211 - static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = { 3158 + static const struct cdns_reg_pairs usb_dp_link_cmn_regs[] = { 3212 3159 {0x0002, PHY_PLL_CFG}, 3213 3160 {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} 3214 3161 }; 3215 3162 3216 - static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = { 3163 + static const struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = { 3217 3164 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3218 3165 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3219 3166 {0x0041, XCVR_DIAG_PLLDRC_CTRL} 3220 3167 }; 3221 3168 3222 - static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = { 3169 + static const struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = { 3223 3170 {0x0001, XCVR_DIAG_HSCLK_SEL}, 3224 3171 {0x0009, XCVR_DIAG_PLLDRC_CTRL} 3225 3172 }; 3226 3173 3227 - static struct cdns_torrent_vals usb_dp_link_cmn_vals = { 3174 + static const struct cdns_torrent_vals usb_dp_link_cmn_vals = { 3228 3175 .reg_pairs = usb_dp_link_cmn_regs, 3229 3176 .num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs), 3230 3177 }; 3231 3178 3232 - static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = { 3179 + static const struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = { 3233 3180 .reg_pairs = usb_dp_xcvr_diag_ln_regs, 3234 3181 .num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs), 3235 3182 }; 3236 3183 3237 - static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = { 3184 + static const struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = { 3238 3185 .reg_pairs = dp_usb_xcvr_diag_ln_regs, 3239 3186 .num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs), 3240 3187 }; 3241 3188 3242 3189 /* USXGMII and SGMII/QSGMII link configuration */ 3243 - static struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = { 3190 + static const struct cdns_reg_pairs usxgmii_sgmii_link_cmn_regs[] = { 3244 3191 {0x0002, PHY_PLL_CFG}, 3245 3192 {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}, 3246 3193 {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} 3247 3194 }; 3248 3195 3249 - static struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = { 3196 + static const struct cdns_reg_pairs usxgmii_sgmii_xcvr_diag_ln_regs[] = { 3250 3197 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3251 3198 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3252 3199 {0x0001, XCVR_DIAG_PLLDRC_CTRL} 3253 3200 }; 3254 3201 3255 - static struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = { 3202 + static const struct cdns_reg_pairs sgmii_usxgmii_xcvr_diag_ln_regs[] = { 3256 3203 {0x0111, XCVR_DIAG_HSCLK_SEL}, 3257 3204 {0x0103, XCVR_DIAG_HSCLK_DIV}, 3258 3205 {0x0A9B, XCVR_DIAG_PLLDRC_CTRL} 3259 3206 }; 3260 3207 3261 - static struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = { 3208 + static const struct cdns_torrent_vals usxgmii_sgmii_link_cmn_vals = { 3262 3209 .reg_pairs = usxgmii_sgmii_link_cmn_regs, 3263 3210 .num_regs = ARRAY_SIZE(usxgmii_sgmii_link_cmn_regs), 3264 3211 }; 3265 3212 3266 - static struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = { 3213 + static const struct cdns_torrent_vals usxgmii_sgmii_xcvr_diag_ln_vals = { 3267 3214 .reg_pairs = usxgmii_sgmii_xcvr_diag_ln_regs, 3268 3215 .num_regs = ARRAY_SIZE(usxgmii_sgmii_xcvr_diag_ln_regs), 3269 3216 }; 3270 3217 3271 - static struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = { 3218 + static const struct cdns_torrent_vals sgmii_usxgmii_xcvr_diag_ln_vals = { 3272 3219 .reg_pairs = sgmii_usxgmii_xcvr_diag_ln_regs, 3273 3220 .num_regs = ARRAY_SIZE(sgmii_usxgmii_xcvr_diag_ln_regs), 3274 3221 }; 3275 3222 3276 3223 /* Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */ 3277 - static struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { 3224 + static const struct cdns_reg_pairs ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { 3278 3225 {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0}, 3279 3226 {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0}, 3280 3227 {0x061B, CMN_PLL0_VCOCAL_INIT_TMR}, ··· 3286 3233 {0x0138, CMN_PLL0_LOCK_PLLCNT_START} 3287 3234 }; 3288 3235 3289 - static struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { 3236 + static const struct cdns_torrent_vals ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { 3290 3237 .reg_pairs = ml_usxgmii_pll0_156_25_no_ssc_cmn_regs, 3291 3238 .num_regs = ARRAY_SIZE(ml_usxgmii_pll0_156_25_no_ssc_cmn_regs), 3292 3239 }; 3293 3240 3294 3241 /* Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */ 3295 - static struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { 3242 + static const struct cdns_reg_pairs ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { 3296 3243 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, 3297 3244 {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, 3298 3245 {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, ··· 3301 3248 {0x007F, CMN_TXPDCAL_TUNE} 3302 3249 }; 3303 3250 3304 - static struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = { 3251 + static const struct cdns_torrent_vals ml_sgmii_pll1_100_no_ssc_cmn_vals = { 3305 3252 .reg_pairs = ml_sgmii_pll1_100_no_ssc_cmn_regs, 3306 3253 .num_regs = ARRAY_SIZE(ml_sgmii_pll1_100_no_ssc_cmn_regs), 3307 3254 }; 3308 3255 3309 3256 /* TI J7200, Multilink USXGMII, using PLL0, 156.25 MHz Ref clk, no SSC */ 3310 - static struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { 3257 + static const struct cdns_reg_pairs j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs[] = { 3311 3258 {0x0014, CMN_SSM_BIAS_TMR}, 3312 3259 {0x0028, CMN_PLLSM0_PLLPRE_TMR}, 3313 3260 {0x00A4, CMN_PLLSM0_PLLLOCK_TMR}, ··· 3333 3280 {0x0138, CMN_PLL0_LOCK_PLLCNT_START} 3334 3281 }; 3335 3282 3336 - static struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { 3283 + static const struct cdns_torrent_vals j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals = { 3337 3284 .reg_pairs = j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs, 3338 3285 .num_regs = ARRAY_SIZE(j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_regs), 3339 3286 }; 3340 3287 3341 3288 /* TI J7200, Multilink SGMII/QSGMII, using PLL1, 100 MHz Ref clk, no SSC */ 3342 - static struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { 3289 + static const struct cdns_reg_pairs j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs[] = { 3343 3290 {0x0028, CMN_PLLSM1_PLLPRE_TMR}, 3344 3291 {0x00A4, CMN_PLLSM1_PLLLOCK_TMR}, 3345 3292 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, ··· 3350 3297 {0x007F, CMN_TXPDCAL_TUNE} 3351 3298 }; 3352 3299 3353 - static struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = { 3300 + static const struct cdns_torrent_vals j7200_ml_sgmii_pll1_100_no_ssc_cmn_vals = { 3354 3301 .reg_pairs = j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs, 3355 3302 .num_regs = ARRAY_SIZE(j7200_ml_sgmii_pll1_100_no_ssc_cmn_regs), 3356 3303 }; 3357 3304 3358 3305 /* PCIe and USXGMII link configuration */ 3359 - static struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = { 3306 + static const struct cdns_reg_pairs pcie_usxgmii_link_cmn_regs[] = { 3360 3307 {0x0003, PHY_PLL_CFG}, 3361 3308 {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, 3362 3309 {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, 3363 3310 {0x0400, CMN_PDIAG_PLL1_CLK_SEL_M0} 3364 3311 }; 3365 3312 3366 - static struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = { 3313 + static const struct cdns_reg_pairs pcie_usxgmii_xcvr_diag_ln_regs[] = { 3367 3314 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3368 3315 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3369 3316 {0x0012, XCVR_DIAG_PLLDRC_CTRL} 3370 3317 }; 3371 3318 3372 - static struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = { 3319 + static const struct cdns_reg_pairs usxgmii_pcie_xcvr_diag_ln_regs[] = { 3373 3320 {0x0011, XCVR_DIAG_HSCLK_SEL}, 3374 3321 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3375 3322 {0x0089, XCVR_DIAG_PLLDRC_CTRL} 3376 3323 }; 3377 3324 3378 - static struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = { 3325 + static const struct cdns_torrent_vals pcie_usxgmii_link_cmn_vals = { 3379 3326 .reg_pairs = pcie_usxgmii_link_cmn_regs, 3380 3327 .num_regs = ARRAY_SIZE(pcie_usxgmii_link_cmn_regs), 3381 3328 }; 3382 3329 3383 - static struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = { 3330 + static const struct cdns_torrent_vals pcie_usxgmii_xcvr_diag_ln_vals = { 3384 3331 .reg_pairs = pcie_usxgmii_xcvr_diag_ln_regs, 3385 3332 .num_regs = ARRAY_SIZE(pcie_usxgmii_xcvr_diag_ln_regs), 3386 3333 }; 3387 3334 3388 - static struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = { 3335 + static const struct cdns_torrent_vals usxgmii_pcie_xcvr_diag_ln_vals = { 3389 3336 .reg_pairs = usxgmii_pcie_xcvr_diag_ln_regs, 3390 3337 .num_regs = ARRAY_SIZE(usxgmii_pcie_xcvr_diag_ln_regs), 3391 3338 }; ··· 3393 3340 /* 3394 3341 * Multilink USXGMII, using PLL1, 156.25 MHz Ref clk, no SSC 3395 3342 */ 3396 - static struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = { 3343 + static const struct cdns_reg_pairs ml_usxgmii_pll1_156_25_no_ssc_cmn_regs[] = { 3397 3344 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, 3398 3345 {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0}, 3399 3346 {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0}, ··· 3408 3355 {0x007F, CMN_TXPDCAL_TUNE} 3409 3356 }; 3410 3357 3411 - static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = { 3358 + static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_tx_ln_regs[] = { 3412 3359 {0x00F3, TX_PSC_A0}, 3413 3360 {0x04A2, TX_PSC_A2}, 3414 3361 {0x04A2, TX_PSC_A3 }, ··· 3416 3363 {0x0000, XCVR_DIAG_PSC_OVRD} 3417 3364 }; 3418 3365 3419 - static struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = { 3366 + static const struct cdns_reg_pairs ml_usxgmii_156_25_no_ssc_rx_ln_regs[] = { 3420 3367 {0x091D, RX_PSC_A0}, 3421 3368 {0x0900, RX_PSC_A2}, 3422 3369 {0x0100, RX_PSC_A3}, ··· 3434 3381 {0x018C, RX_CDRLF_CNFG} 3435 3382 }; 3436 3383 3437 - static struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = { 3384 + static const struct cdns_torrent_vals ml_usxgmii_pll1_156_25_no_ssc_cmn_vals = { 3438 3385 .reg_pairs = ml_usxgmii_pll1_156_25_no_ssc_cmn_regs, 3439 3386 .num_regs = ARRAY_SIZE(ml_usxgmii_pll1_156_25_no_ssc_cmn_regs), 3440 3387 }; 3441 3388 3442 - static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = { 3389 + static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_tx_ln_vals = { 3443 3390 .reg_pairs = ml_usxgmii_156_25_no_ssc_tx_ln_regs, 3444 3391 .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_tx_ln_regs), 3445 3392 }; 3446 3393 3447 - static struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = { 3394 + static const struct cdns_torrent_vals ml_usxgmii_156_25_no_ssc_rx_ln_vals = { 3448 3395 .reg_pairs = ml_usxgmii_156_25_no_ssc_rx_ln_regs, 3449 3396 .num_regs = ARRAY_SIZE(ml_usxgmii_156_25_no_ssc_rx_ln_regs), 3450 3397 }; 3451 3398 3452 3399 /* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */ 3453 - static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = { 3400 + static const struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = { 3454 3401 {0x0040, PHY_PMA_CMN_CTRL1}, 3455 3402 }; 3456 3403 3457 - static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = { 3404 + static const struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = { 3458 3405 .reg_pairs = ti_usxgmii_phy_pma_cmn_regs, 3459 3406 .num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs), 3460 3407 }; 3461 3408 3462 3409 /* Single USXGMII link configuration */ 3463 - static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = { 3410 + static const struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = { 3464 3411 {0x0000, PHY_PLL_CFG}, 3465 3412 {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0} 3466 3413 }; 3467 3414 3468 - static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = { 3415 + static const struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = { 3469 3416 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3470 3417 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3471 3418 {0x0001, XCVR_DIAG_PLLDRC_CTRL} 3472 3419 }; 3473 3420 3474 - static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = { 3421 + static const struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = { 3475 3422 .reg_pairs = sl_usxgmii_link_cmn_regs, 3476 3423 .num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs), 3477 3424 }; 3478 3425 3479 - static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = { 3426 + static const struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = { 3480 3427 .reg_pairs = sl_usxgmii_xcvr_diag_ln_regs, 3481 3428 .num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs), 3482 3429 }; 3483 3430 3484 3431 /* Single link USXGMII, 156.25 MHz Ref clk, no SSC */ 3485 - static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = { 3432 + static const struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = { 3486 3433 {0x0014, CMN_SSM_BIAS_TMR}, 3487 3434 {0x0028, CMN_PLLSM0_PLLPRE_TMR}, 3488 3435 {0x00A4, CMN_PLLSM0_PLLLOCK_TMR}, ··· 3520 3467 {0x0138, CMN_PLL1_LOCK_PLLCNT_START} 3521 3468 }; 3522 3469 3523 - static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = { 3470 + static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = { 3524 3471 {0x07A2, TX_RCVDET_ST_TMR}, 3525 3472 {0x00F3, TX_PSC_A0}, 3526 3473 {0x04A2, TX_PSC_A2}, ··· 3529 3476 {0x0000, XCVR_DIAG_PSC_OVRD} 3530 3477 }; 3531 3478 3532 - static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = { 3479 + static const struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = { 3533 3480 {0x0014, RX_SDCAL0_INIT_TMR}, 3534 3481 {0x0062, RX_SDCAL0_ITER_TMR}, 3535 3482 {0x0014, RX_SDCAL1_INIT_TMR}, ··· 3551 3498 {0x018C, RX_CDRLF_CNFG} 3552 3499 }; 3553 3500 3554 - static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = { 3501 + static const struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = { 3555 3502 .reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs, 3556 3503 .num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs), 3557 3504 }; 3558 3505 3559 - static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = { 3506 + static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = { 3560 3507 .reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs, 3561 3508 .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs), 3562 3509 }; 3563 3510 3564 - static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = { 3511 + static const struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = { 3565 3512 .reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs, 3566 3513 .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs), 3567 3514 }; 3568 3515 3569 3516 /* PCIe and DP link configuration */ 3570 - static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = { 3517 + static const struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = { 3571 3518 {0x0003, PHY_PLL_CFG}, 3572 3519 {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, 3573 3520 {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1} 3574 3521 }; 3575 3522 3576 - static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = { 3523 + static const struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = { 3577 3524 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3578 3525 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3579 3526 {0x0012, XCVR_DIAG_PLLDRC_CTRL} 3580 3527 }; 3581 3528 3582 - static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = { 3529 + static const struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = { 3583 3530 {0x0001, XCVR_DIAG_HSCLK_SEL}, 3584 3531 {0x0009, XCVR_DIAG_PLLDRC_CTRL} 3585 3532 }; 3586 3533 3587 - static struct cdns_torrent_vals pcie_dp_link_cmn_vals = { 3534 + static const struct cdns_torrent_vals pcie_dp_link_cmn_vals = { 3588 3535 .reg_pairs = pcie_dp_link_cmn_regs, 3589 3536 .num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs), 3590 3537 }; 3591 3538 3592 - static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = { 3539 + static const struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = { 3593 3540 .reg_pairs = pcie_dp_xcvr_diag_ln_regs, 3594 3541 .num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs), 3595 3542 }; 3596 3543 3597 - static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = { 3544 + static const struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = { 3598 3545 .reg_pairs = dp_pcie_xcvr_diag_ln_regs, 3599 3546 .num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs), 3600 3547 }; 3601 3548 3602 3549 /* DP Multilink, 100 MHz Ref clk, no SSC */ 3603 - static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = { 3550 + static const struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = { 3604 3551 {0x007F, CMN_TXPUCAL_TUNE}, 3605 3552 {0x007F, CMN_TXPDCAL_TUNE} 3606 3553 }; 3607 3554 3608 - static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = { 3555 + static const struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = { 3609 3556 {0x00FB, TX_PSC_A0}, 3610 3557 {0x04AA, TX_PSC_A2}, 3611 3558 {0x04AA, TX_PSC_A3}, 3612 3559 {0x000F, XCVR_DIAG_BIDI_CTRL} 3613 3560 }; 3614 3561 3615 - static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = { 3562 + static const struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = { 3616 3563 {0x0000, RX_PSC_A0}, 3617 3564 {0x0000, RX_PSC_A2}, 3618 3565 {0x0000, RX_PSC_A3}, ··· 3622 3569 {0x0000, RX_REE_PERGCSM_CTRL} 3623 3570 }; 3624 3571 3625 - static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = { 3572 + static const struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = { 3626 3573 .reg_pairs = dp_100_no_ssc_cmn_regs, 3627 3574 .num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs), 3628 3575 }; 3629 3576 3630 - static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = { 3577 + static const struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = { 3631 3578 .reg_pairs = dp_100_no_ssc_tx_ln_regs, 3632 3579 .num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs), 3633 3580 }; 3634 3581 3635 - static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = { 3582 + static const struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = { 3636 3583 .reg_pairs = dp_100_no_ssc_rx_ln_regs, 3637 3584 .num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs), 3638 3585 }; 3639 3586 3640 3587 /* Single DisplayPort(DP) link configuration */ 3641 - static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = { 3588 + static const struct cdns_reg_pairs sl_dp_link_cmn_regs[] = { 3642 3589 {0x0000, PHY_PLL_CFG}, 3643 3590 }; 3644 3591 3645 - static struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = { 3592 + static const struct cdns_reg_pairs sl_dp_xcvr_diag_ln_regs[] = { 3646 3593 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3647 3594 {0x0001, XCVR_DIAG_PLLDRC_CTRL} 3648 3595 }; 3649 3596 3650 - static struct cdns_torrent_vals sl_dp_link_cmn_vals = { 3597 + static const struct cdns_torrent_vals sl_dp_link_cmn_vals = { 3651 3598 .reg_pairs = sl_dp_link_cmn_regs, 3652 3599 .num_regs = ARRAY_SIZE(sl_dp_link_cmn_regs), 3653 3600 }; 3654 3601 3655 - static struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = { 3602 + static const struct cdns_torrent_vals sl_dp_xcvr_diag_ln_vals = { 3656 3603 .reg_pairs = sl_dp_xcvr_diag_ln_regs, 3657 3604 .num_regs = ARRAY_SIZE(sl_dp_xcvr_diag_ln_regs), 3658 3605 }; 3659 3606 3660 3607 /* Single DP, 19.2 MHz Ref clk, no SSC */ 3661 - static struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = { 3608 + static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_cmn_regs[] = { 3662 3609 {0x0014, CMN_SSM_BIAS_TMR}, 3663 3610 {0x0027, CMN_PLLSM0_PLLPRE_TMR}, 3664 3611 {0x00A1, CMN_PLLSM0_PLLLOCK_TMR}, ··· 3695 3642 {0x0003, CMN_PLL1_VCOCAL_TCTRL} 3696 3643 }; 3697 3644 3698 - static struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = { 3645 + static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_tx_ln_regs[] = { 3699 3646 {0x0780, TX_RCVDET_ST_TMR}, 3700 3647 {0x00FB, TX_PSC_A0}, 3701 3648 {0x04AA, TX_PSC_A2}, ··· 3703 3650 {0x000F, XCVR_DIAG_BIDI_CTRL} 3704 3651 }; 3705 3652 3706 - static struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = { 3653 + static const struct cdns_reg_pairs sl_dp_19_2_no_ssc_rx_ln_regs[] = { 3707 3654 {0x0000, RX_PSC_A0}, 3708 3655 {0x0000, RX_PSC_A2}, 3709 3656 {0x0000, RX_PSC_A3}, ··· 3713 3660 {0x0000, RX_REE_PERGCSM_CTRL} 3714 3661 }; 3715 3662 3716 - static struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = { 3663 + static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_cmn_vals = { 3717 3664 .reg_pairs = sl_dp_19_2_no_ssc_cmn_regs, 3718 3665 .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_cmn_regs), 3719 3666 }; 3720 3667 3721 - static struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = { 3668 + static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_tx_ln_vals = { 3722 3669 .reg_pairs = sl_dp_19_2_no_ssc_tx_ln_regs, 3723 3670 .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_tx_ln_regs), 3724 3671 }; 3725 3672 3726 - static struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = { 3673 + static const struct cdns_torrent_vals sl_dp_19_2_no_ssc_rx_ln_vals = { 3727 3674 .reg_pairs = sl_dp_19_2_no_ssc_rx_ln_regs, 3728 3675 .num_regs = ARRAY_SIZE(sl_dp_19_2_no_ssc_rx_ln_regs), 3729 3676 }; 3730 3677 3731 3678 /* Single DP, 25 MHz Ref clk, no SSC */ 3732 - static struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = { 3679 + static const struct cdns_reg_pairs sl_dp_25_no_ssc_cmn_regs[] = { 3733 3680 {0x0019, CMN_SSM_BIAS_TMR}, 3734 3681 {0x0032, CMN_PLLSM0_PLLPRE_TMR}, 3735 3682 {0x00D1, CMN_PLLSM0_PLLLOCK_TMR}, ··· 3766 3713 {0x0003, CMN_PLL1_VCOCAL_TCTRL} 3767 3714 }; 3768 3715 3769 - static struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = { 3716 + static const struct cdns_reg_pairs sl_dp_25_no_ssc_tx_ln_regs[] = { 3770 3717 {0x09C4, TX_RCVDET_ST_TMR}, 3771 3718 {0x00FB, TX_PSC_A0}, 3772 3719 {0x04AA, TX_PSC_A2}, ··· 3774 3721 {0x000F, XCVR_DIAG_BIDI_CTRL} 3775 3722 }; 3776 3723 3777 - static struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = { 3724 + static const struct cdns_reg_pairs sl_dp_25_no_ssc_rx_ln_regs[] = { 3778 3725 {0x0000, RX_PSC_A0}, 3779 3726 {0x0000, RX_PSC_A2}, 3780 3727 {0x0000, RX_PSC_A3}, ··· 3784 3731 {0x0000, RX_REE_PERGCSM_CTRL} 3785 3732 }; 3786 3733 3787 - static struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = { 3734 + static const struct cdns_torrent_vals sl_dp_25_no_ssc_cmn_vals = { 3788 3735 .reg_pairs = sl_dp_25_no_ssc_cmn_regs, 3789 3736 .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_cmn_regs), 3790 3737 }; 3791 3738 3792 - static struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = { 3739 + static const struct cdns_torrent_vals sl_dp_25_no_ssc_tx_ln_vals = { 3793 3740 .reg_pairs = sl_dp_25_no_ssc_tx_ln_regs, 3794 3741 .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_tx_ln_regs), 3795 3742 }; 3796 3743 3797 - static struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = { 3744 + static const struct cdns_torrent_vals sl_dp_25_no_ssc_rx_ln_vals = { 3798 3745 .reg_pairs = sl_dp_25_no_ssc_rx_ln_regs, 3799 3746 .num_regs = ARRAY_SIZE(sl_dp_25_no_ssc_rx_ln_regs), 3800 3747 }; 3801 3748 3802 3749 /* Single DP, 100 MHz Ref clk, no SSC */ 3803 - static struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = { 3750 + static const struct cdns_reg_pairs sl_dp_100_no_ssc_cmn_regs[] = { 3804 3751 {0x0003, CMN_PLL0_VCOCAL_TCTRL}, 3805 3752 {0x0003, CMN_PLL1_VCOCAL_TCTRL} 3806 3753 }; 3807 3754 3808 - static struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = { 3755 + static const struct cdns_reg_pairs sl_dp_100_no_ssc_tx_ln_regs[] = { 3809 3756 {0x00FB, TX_PSC_A0}, 3810 3757 {0x04AA, TX_PSC_A2}, 3811 3758 {0x04AA, TX_PSC_A3}, 3812 3759 {0x000F, XCVR_DIAG_BIDI_CTRL} 3813 3760 }; 3814 3761 3815 - static struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = { 3762 + static const struct cdns_reg_pairs sl_dp_100_no_ssc_rx_ln_regs[] = { 3816 3763 {0x0000, RX_PSC_A0}, 3817 3764 {0x0000, RX_PSC_A2}, 3818 3765 {0x0000, RX_PSC_A3}, ··· 3822 3769 {0x0000, RX_REE_PERGCSM_CTRL} 3823 3770 }; 3824 3771 3825 - static struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = { 3772 + static const struct cdns_torrent_vals sl_dp_100_no_ssc_cmn_vals = { 3826 3773 .reg_pairs = sl_dp_100_no_ssc_cmn_regs, 3827 3774 .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_cmn_regs), 3828 3775 }; 3829 3776 3830 - static struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = { 3777 + static const struct cdns_torrent_vals sl_dp_100_no_ssc_tx_ln_vals = { 3831 3778 .reg_pairs = sl_dp_100_no_ssc_tx_ln_regs, 3832 3779 .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_tx_ln_regs), 3833 3780 }; 3834 3781 3835 - static struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = { 3782 + static const struct cdns_torrent_vals sl_dp_100_no_ssc_rx_ln_vals = { 3836 3783 .reg_pairs = sl_dp_100_no_ssc_rx_ln_regs, 3837 3784 .num_regs = ARRAY_SIZE(sl_dp_100_no_ssc_rx_ln_regs), 3838 3785 }; 3839 3786 3840 3787 /* USB and SGMII/QSGMII link configuration */ 3841 - static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { 3788 + static const struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { 3842 3789 {0x0002, PHY_PLL_CFG}, 3843 3790 {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}, 3844 3791 {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} 3845 3792 }; 3846 3793 3847 - static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = { 3794 + static const struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = { 3848 3795 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3849 3796 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3850 3797 {0x0041, XCVR_DIAG_PLLDRC_CTRL} 3851 3798 }; 3852 3799 3853 - static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = { 3800 + static const struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = { 3854 3801 {0x0011, XCVR_DIAG_HSCLK_SEL}, 3855 3802 {0x0003, XCVR_DIAG_HSCLK_DIV}, 3856 3803 {0x009B, XCVR_DIAG_PLLDRC_CTRL} 3857 3804 }; 3858 3805 3859 - static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = { 3806 + static const struct cdns_torrent_vals usb_sgmii_link_cmn_vals = { 3860 3807 .reg_pairs = usb_sgmii_link_cmn_regs, 3861 3808 .num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs), 3862 3809 }; 3863 3810 3864 - static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = { 3811 + static const struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = { 3865 3812 .reg_pairs = usb_sgmii_xcvr_diag_ln_regs, 3866 3813 .num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs), 3867 3814 }; 3868 3815 3869 - static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = { 3816 + static const struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = { 3870 3817 .reg_pairs = sgmii_usb_xcvr_diag_ln_regs, 3871 3818 .num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs), 3872 3819 }; 3873 3820 3874 3821 /* PCIe and USB Unique SSC link configuration */ 3875 - static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = { 3822 + static const struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = { 3876 3823 {0x0003, PHY_PLL_CFG}, 3877 3824 {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, 3878 3825 {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, 3879 3826 {0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0} 3880 3827 }; 3881 3828 3882 - static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = { 3829 + static const struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = { 3883 3830 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3884 3831 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3885 3832 {0x0012, XCVR_DIAG_PLLDRC_CTRL} 3886 3833 }; 3887 3834 3888 - static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = { 3835 + static const struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = { 3889 3836 {0x0011, XCVR_DIAG_HSCLK_SEL}, 3890 3837 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3891 3838 {0x00C9, XCVR_DIAG_PLLDRC_CTRL} 3892 3839 }; 3893 3840 3894 - static struct cdns_torrent_vals pcie_usb_link_cmn_vals = { 3841 + static const struct cdns_torrent_vals pcie_usb_link_cmn_vals = { 3895 3842 .reg_pairs = pcie_usb_link_cmn_regs, 3896 3843 .num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs), 3897 3844 }; 3898 3845 3899 - static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = { 3846 + static const struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = { 3900 3847 .reg_pairs = pcie_usb_xcvr_diag_ln_regs, 3901 3848 .num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs), 3902 3849 }; 3903 3850 3904 - static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = { 3851 + static const struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = { 3905 3852 .reg_pairs = usb_pcie_xcvr_diag_ln_regs, 3906 3853 .num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs), 3907 3854 }; 3908 3855 3909 3856 /* USB 100 MHz Ref clk, internal SSC */ 3910 - static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { 3857 + static const struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { 3911 3858 {0x0004, CMN_PLL0_DSM_DIAG_M0}, 3912 3859 {0x0004, CMN_PLL0_DSM_DIAG_M1}, 3913 3860 {0x0004, CMN_PLL1_DSM_DIAG_M0}, ··· 3960 3907 {0x007F, CMN_TXPDCAL_TUNE} 3961 3908 }; 3962 3909 3963 - static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = { 3910 + static const struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = { 3964 3911 .reg_pairs = usb_100_int_ssc_cmn_regs, 3965 3912 .num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs), 3966 3913 }; 3967 3914 3968 3915 /* Single USB link configuration */ 3969 - static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = { 3916 + static const struct cdns_reg_pairs sl_usb_link_cmn_regs[] = { 3970 3917 {0x0000, PHY_PLL_CFG}, 3971 3918 {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} 3972 3919 }; 3973 3920 3974 - static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = { 3921 + static const struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = { 3975 3922 {0x0000, XCVR_DIAG_HSCLK_SEL}, 3976 3923 {0x0001, XCVR_DIAG_HSCLK_DIV}, 3977 3924 {0x0041, XCVR_DIAG_PLLDRC_CTRL} 3978 3925 }; 3979 3926 3980 - static struct cdns_torrent_vals sl_usb_link_cmn_vals = { 3927 + static const struct cdns_torrent_vals sl_usb_link_cmn_vals = { 3981 3928 .reg_pairs = sl_usb_link_cmn_regs, 3982 3929 .num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs), 3983 3930 }; 3984 3931 3985 - static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = { 3932 + static const struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = { 3986 3933 .reg_pairs = sl_usb_xcvr_diag_ln_regs, 3987 3934 .num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs), 3988 3935 }; 3989 3936 3990 3937 /* USB PHY PCS common configuration */ 3991 - static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = { 3938 + static const struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = { 3992 3939 {0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0}, 3993 3940 {0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0}, 3994 3941 {0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1} 3995 3942 }; 3996 3943 3997 - static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = { 3944 + static const struct cdns_torrent_vals usb_phy_pcs_cmn_vals = { 3998 3945 .reg_pairs = usb_phy_pcs_cmn_regs, 3999 3946 .num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs), 4000 3947 }; 4001 3948 4002 3949 /* USB 100 MHz Ref clk, no SSC */ 4003 - static struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = { 3950 + static const struct cdns_reg_pairs sl_usb_100_no_ssc_cmn_regs[] = { 4004 3951 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, 4005 3952 {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, 4006 3953 {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, ··· 4010 3957 {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} 4011 3958 }; 4012 3959 4013 - static struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = { 3960 + static const struct cdns_torrent_vals sl_usb_100_no_ssc_cmn_vals = { 4014 3961 .reg_pairs = sl_usb_100_no_ssc_cmn_regs, 4015 3962 .num_regs = ARRAY_SIZE(sl_usb_100_no_ssc_cmn_regs), 4016 3963 }; 4017 3964 4018 - static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = { 3965 + static const struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = { 4019 3966 {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, 4020 3967 {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD}, 4021 3968 {0x007F, CMN_TXPUCAL_TUNE}, 4022 3969 {0x007F, CMN_TXPDCAL_TUNE} 4023 3970 }; 4024 3971 4025 - static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { 3972 + static const struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { 4026 3973 {0x02FF, TX_PSC_A0}, 4027 3974 {0x06AF, TX_PSC_A1}, 4028 3975 {0x06AE, TX_PSC_A2}, ··· 4032 3979 {0x0003, XCVR_DIAG_PSC_OVRD} 4033 3980 }; 4034 3981 4035 - static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { 3982 + static const struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { 4036 3983 {0x0D1D, RX_PSC_A0}, 4037 3984 {0x0D1D, RX_PSC_A1}, 4038 3985 {0x0D00, RX_PSC_A2}, ··· 4055 4002 {0x0003, RX_CDRLF_CNFG3} 4056 4003 }; 4057 4004 4058 - static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = { 4005 + static const struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = { 4059 4006 .reg_pairs = usb_100_no_ssc_cmn_regs, 4060 4007 .num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs), 4061 4008 }; 4062 4009 4063 - static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = { 4010 + static const struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = { 4064 4011 .reg_pairs = usb_100_no_ssc_tx_ln_regs, 4065 4012 .num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs), 4066 4013 }; 4067 4014 4068 - static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = { 4015 + static const struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = { 4069 4016 .reg_pairs = usb_100_no_ssc_rx_ln_regs, 4070 4017 .num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs), 4071 4018 }; 4072 4019 4073 4020 /* Single link USB, 100 MHz Ref clk, internal SSC */ 4074 - static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { 4021 + static const struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { 4075 4022 {0x0004, CMN_PLL0_DSM_DIAG_M0}, 4076 4023 {0x0004, CMN_PLL1_DSM_DIAG_M0}, 4077 4024 {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, ··· 4112 4059 {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} 4113 4060 }; 4114 4061 4115 - static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = { 4062 + static const struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = { 4116 4063 .reg_pairs = sl_usb_100_int_ssc_cmn_regs, 4117 4064 .num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs), 4118 4065 }; 4119 4066 4120 4067 /* PCIe and SGMII/QSGMII Unique SSC link configuration */ 4121 - static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = { 4068 + static const struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = { 4122 4069 {0x0003, PHY_PLL_CFG}, 4123 4070 {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, 4124 4071 {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, 4125 4072 {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} 4126 4073 }; 4127 4074 4128 - static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = { 4075 + static const struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = { 4129 4076 {0x0000, XCVR_DIAG_HSCLK_SEL}, 4130 4077 {0x0001, XCVR_DIAG_HSCLK_DIV}, 4131 4078 {0x0012, XCVR_DIAG_PLLDRC_CTRL} 4132 4079 }; 4133 4080 4134 - static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = { 4081 + static const struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = { 4135 4082 {0x0011, XCVR_DIAG_HSCLK_SEL}, 4136 4083 {0x0003, XCVR_DIAG_HSCLK_DIV}, 4137 4084 {0x009B, XCVR_DIAG_PLLDRC_CTRL} 4138 4085 }; 4139 4086 4140 - static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = { 4087 + static const struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = { 4141 4088 .reg_pairs = pcie_sgmii_link_cmn_regs, 4142 4089 .num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs), 4143 4090 }; 4144 4091 4145 - static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = { 4092 + static const struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = { 4146 4093 .reg_pairs = pcie_sgmii_xcvr_diag_ln_regs, 4147 4094 .num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs), 4148 4095 }; 4149 4096 4150 - static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = { 4097 + static const struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = { 4151 4098 .reg_pairs = sgmii_pcie_xcvr_diag_ln_regs, 4152 4099 .num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs), 4153 4100 }; 4154 4101 4155 4102 /* SGMII 100 MHz Ref clk, no SSC */ 4156 - static struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = { 4103 + static const struct cdns_reg_pairs sl_sgmii_100_no_ssc_cmn_regs[] = { 4157 4104 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, 4158 4105 {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, 4159 4106 {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, ··· 4161 4108 {0x0003, CMN_PLL1_VCOCAL_TCTRL} 4162 4109 }; 4163 4110 4164 - static struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = { 4111 + static const struct cdns_torrent_vals sl_sgmii_100_no_ssc_cmn_vals = { 4165 4112 .reg_pairs = sl_sgmii_100_no_ssc_cmn_regs, 4166 4113 .num_regs = ARRAY_SIZE(sl_sgmii_100_no_ssc_cmn_regs), 4167 4114 }; 4168 4115 4169 - static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = { 4116 + static const struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = { 4170 4117 {0x007F, CMN_TXPUCAL_TUNE}, 4171 4118 {0x007F, CMN_TXPDCAL_TUNE} 4172 4119 }; 4173 4120 4174 - static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { 4121 + static const struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { 4175 4122 {0x00F3, TX_PSC_A0}, 4176 4123 {0x04A2, TX_PSC_A2}, 4177 4124 {0x04A2, TX_PSC_A3}, ··· 4180 4127 {0x0002, XCVR_DIAG_PSC_OVRD} 4181 4128 }; 4182 4129 4183 - static struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { 4130 + static const struct cdns_reg_pairs ti_sgmii_100_no_ssc_tx_ln_regs[] = { 4184 4131 {0x00F3, TX_PSC_A0}, 4185 4132 {0x04A2, TX_PSC_A2}, 4186 4133 {0x04A2, TX_PSC_A3}, ··· 4190 4137 {0x4000, XCVR_DIAG_RXCLK_CTRL} 4191 4138 }; 4192 4139 4193 - static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { 4140 + static const struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { 4194 4141 {0x091D, RX_PSC_A0}, 4195 4142 {0x0900, RX_PSC_A2}, 4196 4143 {0x0100, RX_PSC_A3}, ··· 4208 4155 {0x018C, RX_CDRLF_CNFG}, 4209 4156 }; 4210 4157 4211 - static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = { 4158 + static const struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = { 4212 4159 .reg_pairs = sgmii_100_no_ssc_cmn_regs, 4213 4160 .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs), 4214 4161 }; 4215 4162 4216 - static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = { 4163 + static const struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = { 4217 4164 .reg_pairs = sgmii_100_no_ssc_tx_ln_regs, 4218 4165 .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs), 4219 4166 }; 4220 4167 4221 - static struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = { 4168 + static const struct cdns_torrent_vals ti_sgmii_100_no_ssc_tx_ln_vals = { 4222 4169 .reg_pairs = ti_sgmii_100_no_ssc_tx_ln_regs, 4223 4170 .num_regs = ARRAY_SIZE(ti_sgmii_100_no_ssc_tx_ln_regs), 4224 4171 }; 4225 4172 4226 - static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = { 4173 + static const struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = { 4227 4174 .reg_pairs = sgmii_100_no_ssc_rx_ln_regs, 4228 4175 .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs), 4229 4176 }; 4230 4177 4231 4178 /* TI J7200, multilink SGMII */ 4232 - static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = { 4179 + static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_tx_ln_regs[] = { 4233 4180 {0x07A2, TX_RCVDET_ST_TMR}, 4234 4181 {0x00F3, TX_PSC_A0}, 4235 4182 {0x04A2, TX_PSC_A2}, ··· 4240 4187 {0x4000, XCVR_DIAG_RXCLK_CTRL} 4241 4188 }; 4242 4189 4243 - static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = { 4190 + static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_tx_ln_vals = { 4244 4191 .reg_pairs = j7200_sgmii_100_no_ssc_tx_ln_regs, 4245 4192 .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_tx_ln_regs), 4246 4193 }; 4247 4194 4248 - static struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = { 4195 + static const struct cdns_reg_pairs j7200_sgmii_100_no_ssc_rx_ln_regs[] = { 4249 4196 {0x0014, RX_SDCAL0_INIT_TMR}, 4250 4197 {0x0062, RX_SDCAL0_ITER_TMR}, 4251 4198 {0x0014, RX_SDCAL1_INIT_TMR}, ··· 4267 4214 {0x018C, RX_CDRLF_CNFG} 4268 4215 }; 4269 4216 4270 - static struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = { 4217 + static const struct cdns_torrent_vals j7200_sgmii_100_no_ssc_rx_ln_vals = { 4271 4218 .reg_pairs = j7200_sgmii_100_no_ssc_rx_ln_regs, 4272 4219 .num_regs = ARRAY_SIZE(j7200_sgmii_100_no_ssc_rx_ln_regs), 4273 4220 }; 4274 4221 4275 4222 /* SGMII 100 MHz Ref clk, internal SSC */ 4276 - static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { 4223 + static const struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { 4277 4224 {0x0004, CMN_PLL0_DSM_DIAG_M0}, 4278 4225 {0x0004, CMN_PLL0_DSM_DIAG_M1}, 4279 4226 {0x0004, CMN_PLL1_DSM_DIAG_M0}, ··· 4324 4271 {0x007F, CMN_TXPDCAL_TUNE} 4325 4272 }; 4326 4273 4327 - static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = { 4274 + static const struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = { 4328 4275 .reg_pairs = sgmii_100_int_ssc_cmn_regs, 4329 4276 .num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs), 4330 4277 }; 4331 4278 4332 4279 /* QSGMII 100 MHz Ref clk, no SSC */ 4333 - static struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = { 4280 + static const struct cdns_reg_pairs sl_qsgmii_100_no_ssc_cmn_regs[] = { 4334 4281 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, 4335 4282 {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, 4336 4283 {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0}, ··· 4338 4285 {0x0003, CMN_PLL1_VCOCAL_TCTRL} 4339 4286 }; 4340 4287 4341 - static struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = { 4288 + static const struct cdns_torrent_vals sl_qsgmii_100_no_ssc_cmn_vals = { 4342 4289 .reg_pairs = sl_qsgmii_100_no_ssc_cmn_regs, 4343 4290 .num_regs = ARRAY_SIZE(sl_qsgmii_100_no_ssc_cmn_regs), 4344 4291 }; 4345 4292 4346 - static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = { 4293 + static const struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = { 4347 4294 {0x007F, CMN_TXPUCAL_TUNE}, 4348 4295 {0x007F, CMN_TXPDCAL_TUNE} 4349 4296 }; 4350 4297 4351 - static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { 4298 + static const struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { 4352 4299 {0x00F3, TX_PSC_A0}, 4353 4300 {0x04A2, TX_PSC_A2}, 4354 4301 {0x04A2, TX_PSC_A3}, ··· 4358 4305 {0x0002, XCVR_DIAG_PSC_OVRD} 4359 4306 }; 4360 4307 4361 - static struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { 4308 + static const struct cdns_reg_pairs ti_qsgmii_100_no_ssc_tx_ln_regs[] = { 4362 4309 {0x00F3, TX_PSC_A0}, 4363 4310 {0x04A2, TX_PSC_A2}, 4364 4311 {0x04A2, TX_PSC_A3}, ··· 4369 4316 {0x4000, XCVR_DIAG_RXCLK_CTRL} 4370 4317 }; 4371 4318 4372 - static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { 4319 + static const struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { 4373 4320 {0x091D, RX_PSC_A0}, 4374 4321 {0x0900, RX_PSC_A2}, 4375 4322 {0x0100, RX_PSC_A3}, ··· 4387 4334 {0x018C, RX_CDRLF_CNFG}, 4388 4335 }; 4389 4336 4390 - static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = { 4337 + static const struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = { 4391 4338 .reg_pairs = qsgmii_100_no_ssc_cmn_regs, 4392 4339 .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs), 4393 4340 }; 4394 4341 4395 - static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = { 4342 + static const struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = { 4396 4343 .reg_pairs = qsgmii_100_no_ssc_tx_ln_regs, 4397 4344 .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs), 4398 4345 }; 4399 4346 4400 - static struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = { 4347 + static const struct cdns_torrent_vals ti_qsgmii_100_no_ssc_tx_ln_vals = { 4401 4348 .reg_pairs = ti_qsgmii_100_no_ssc_tx_ln_regs, 4402 4349 .num_regs = ARRAY_SIZE(ti_qsgmii_100_no_ssc_tx_ln_regs), 4403 4350 }; 4404 4351 4405 - static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = { 4352 + static const struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = { 4406 4353 .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs, 4407 4354 .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs), 4408 4355 }; 4409 4356 4410 4357 /* TI J7200, multilink QSGMII */ 4411 - static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = { 4358 + static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_tx_ln_regs[] = { 4412 4359 {0x07A2, TX_RCVDET_ST_TMR}, 4413 4360 {0x00F3, TX_PSC_A0}, 4414 4361 {0x04A2, TX_PSC_A2}, ··· 4420 4367 {0x4000, XCVR_DIAG_RXCLK_CTRL} 4421 4368 }; 4422 4369 4423 - static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = { 4370 + static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_tx_ln_vals = { 4424 4371 .reg_pairs = j7200_qsgmii_100_no_ssc_tx_ln_regs, 4425 4372 .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_tx_ln_regs), 4426 4373 }; 4427 4374 4428 - static struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = { 4375 + static const struct cdns_reg_pairs j7200_qsgmii_100_no_ssc_rx_ln_regs[] = { 4429 4376 {0x0014, RX_SDCAL0_INIT_TMR}, 4430 4377 {0x0062, RX_SDCAL0_ITER_TMR}, 4431 4378 {0x0014, RX_SDCAL1_INIT_TMR}, ··· 4447 4394 {0x018C, RX_CDRLF_CNFG} 4448 4395 }; 4449 4396 4450 - static struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = { 4397 + static const struct cdns_torrent_vals j7200_qsgmii_100_no_ssc_rx_ln_vals = { 4451 4398 .reg_pairs = j7200_qsgmii_100_no_ssc_rx_ln_regs, 4452 4399 .num_regs = ARRAY_SIZE(j7200_qsgmii_100_no_ssc_rx_ln_regs), 4453 4400 }; 4454 4401 4455 4402 /* QSGMII 100 MHz Ref clk, internal SSC */ 4456 - static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { 4403 + static const struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { 4457 4404 {0x0004, CMN_PLL0_DSM_DIAG_M0}, 4458 4405 {0x0004, CMN_PLL0_DSM_DIAG_M1}, 4459 4406 {0x0004, CMN_PLL1_DSM_DIAG_M0}, ··· 4504 4451 {0x007F, CMN_TXPDCAL_TUNE} 4505 4452 }; 4506 4453 4507 - static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = { 4454 + static const struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = { 4508 4455 .reg_pairs = qsgmii_100_int_ssc_cmn_regs, 4509 4456 .num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs), 4510 4457 }; 4511 4458 4512 4459 /* Single SGMII/QSGMII link configuration */ 4513 - static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = { 4460 + static const struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = { 4514 4461 {0x0000, PHY_PLL_CFG}, 4515 4462 {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0} 4516 4463 }; 4517 4464 4518 - static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = { 4465 + static const struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = { 4519 4466 {0x0000, XCVR_DIAG_HSCLK_SEL}, 4520 4467 {0x0003, XCVR_DIAG_HSCLK_DIV}, 4521 4468 {0x0013, XCVR_DIAG_PLLDRC_CTRL} 4522 4469 }; 4523 4470 4524 - static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = { 4471 + static const struct cdns_torrent_vals sl_sgmii_link_cmn_vals = { 4525 4472 .reg_pairs = sl_sgmii_link_cmn_regs, 4526 4473 .num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs), 4527 4474 }; 4528 4475 4529 - static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { 4476 + static const struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { 4530 4477 .reg_pairs = sl_sgmii_xcvr_diag_ln_regs, 4531 4478 .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs), 4532 4479 }; 4533 4480 4534 4481 /* Multi link PCIe, 100 MHz Ref clk, internal SSC */ 4535 - static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { 4482 + static const struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { 4536 4483 {0x0004, CMN_PLL0_DSM_DIAG_M0}, 4537 4484 {0x0004, CMN_PLL0_DSM_DIAG_M1}, 4538 4485 {0x0004, CMN_PLL1_DSM_DIAG_M0}, ··· 4581 4528 {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} 4582 4529 }; 4583 4530 4584 - static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = { 4531 + static const struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = { 4585 4532 .reg_pairs = pcie_100_int_ssc_cmn_regs, 4586 4533 .num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs), 4587 4534 }; 4588 4535 4589 4536 /* Single link PCIe, 100 MHz Ref clk, internal SSC */ 4590 - static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { 4537 + static const struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { 4591 4538 {0x0004, CMN_PLL0_DSM_DIAG_M0}, 4592 4539 {0x0004, CMN_PLL0_DSM_DIAG_M1}, 4593 4540 {0x0004, CMN_PLL1_DSM_DIAG_M0}, ··· 4636 4583 {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} 4637 4584 }; 4638 4585 4639 - static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = { 4586 + static const struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = { 4640 4587 .reg_pairs = sl_pcie_100_int_ssc_cmn_regs, 4641 4588 .num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs), 4642 4589 }; 4643 4590 4644 4591 /* PCIe, 100 MHz Ref clk, no SSC & external SSC */ 4645 - static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = { 4592 + static const struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = { 4646 4593 {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0}, 4647 4594 {0x001E, CMN_PLL1_DSM_FBH_OVRD_M0}, 4648 4595 {0x000C, CMN_PLL1_DSM_FBL_OVRD_M0} 4649 4596 }; 4650 4597 4651 - static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = { 4598 + static const struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = { 4652 4599 {0x0019, RX_REE_TAP1_CLIP}, 4653 4600 {0x0019, RX_REE_TAP2TON_CLIP}, 4654 4601 {0x0001, RX_DIAG_ACYA} 4655 4602 }; 4656 4603 4657 - static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = { 4604 + static const struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = { 4658 4605 .reg_pairs = pcie_100_ext_no_ssc_cmn_regs, 4659 4606 .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs), 4660 4607 }; 4661 4608 4662 - static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = { 4609 + static const struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = { 4663 4610 .reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs, 4664 4611 .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs), 4665 4612 }; 4666 4613 4667 - static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { 4614 + static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { 4668 4615 {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals}, 4669 4616 {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals}, 4670 4617 {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals}, ··· 4700 4647 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_link_cmn_vals}, 4701 4648 }; 4702 4649 4703 - static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { 4650 + static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { 4704 4651 {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals}, 4705 4652 {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals}, 4706 4653 {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals}, ··· 4736 4683 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &usxgmii_sgmii_xcvr_diag_ln_vals}, 4737 4684 }; 4738 4685 4739 - static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = { 4686 + static const struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = { 4740 4687 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals}, 4741 4688 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals}, 4742 4689 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals}, ··· 4744 4691 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals}, 4745 4692 }; 4746 4693 4747 - static struct cdns_torrent_vals_entry cmn_vals_entries[] = { 4694 + static const struct cdns_torrent_vals_entry cmn_vals_entries[] = { 4748 4695 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals}, 4749 4696 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals}, 4750 4697 ··· 4826 4773 {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_pll0_156_25_no_ssc_cmn_vals}, 4827 4774 }; 4828 4775 4829 - static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { 4776 + static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { 4830 4777 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals}, 4831 4778 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals}, 4832 4779 ··· 4908 4855 {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &ml_usxgmii_156_25_no_ssc_tx_ln_vals}, 4909 4856 }; 4910 4857 4911 - static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { 4858 + static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { 4912 4859 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals}, 4913 4860 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals}, 4914 4861 ··· 5019 4966 }, 5020 4967 }; 5021 4968 5022 - static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = { 4969 + static const struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = { 5023 4970 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals}, 5024 4971 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_PCIE), &ti_usxgmii_phy_pma_cmn_vals}, 5025 4972 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_SGMII), &ti_usxgmii_phy_pma_cmn_vals}, 5026 4973 {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_QSGMII), &ti_usxgmii_phy_pma_cmn_vals}, 5027 4974 }; 5028 4975 5029 - static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { 4976 + static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { 5030 4977 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals}, 5031 4978 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals}, 5032 4979 ··· 5142 5089 }; 5143 5090 5144 5091 /* TI J7200 (Torrent SD0805) */ 5145 - static struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { 5092 + static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { 5146 5093 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals}, 5147 5094 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals}, 5148 5095 ··· 5224 5171 {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &j7200_ml_usxgmii_pll0_156_25_no_ssc_cmn_vals}, 5225 5172 }; 5226 5173 5227 - static struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { 5174 + static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { 5228 5175 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals}, 5229 5176 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals}, 5230 5177 ··· 5306 5253 {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_100_MHZ, TYPE_USXGMII, TYPE_QSGMII, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals}, 5307 5254 }; 5308 5255 5309 - static struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { 5256 + static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { 5310 5257 {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals}, 5311 5258 {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals}, 5312 5259
+3 -9
drivers/phy/hisilicon/phy-hisi-inno-usb2.c
··· 138 138 struct device_node *np = dev->of_node; 139 139 struct hisi_inno_phy_priv *priv; 140 140 struct phy_provider *provider; 141 - struct device_node *child; 142 141 int i = 0; 143 142 int ret; 144 143 ··· 161 162 162 163 priv->type = (uintptr_t) of_device_get_match_data(dev); 163 164 164 - for_each_child_of_node(np, child) { 165 + for_each_child_of_node_scoped(np, child) { 165 166 struct reset_control *rst; 166 167 struct phy *phy; 167 168 168 169 rst = of_reset_control_get_exclusive(child, NULL); 169 - if (IS_ERR(rst)) { 170 - of_node_put(child); 170 + if (IS_ERR(rst)) 171 171 return PTR_ERR(rst); 172 - } 173 172 174 173 priv->ports[i].utmi_rst = rst; 175 174 priv->ports[i].priv = priv; 176 175 177 176 phy = devm_phy_create(dev, child, &hisi_inno_phy_ops); 178 - if (IS_ERR(phy)) { 179 - of_node_put(child); 177 + if (IS_ERR(phy)) 180 178 return PTR_ERR(phy); 181 - } 182 179 183 180 phy_set_bus_width(phy, 8); 184 181 phy_set_drvdata(phy, &priv->ports[i]); ··· 182 187 183 188 if (i >= INNO_PHY_PORT_NUM) { 184 189 dev_warn(dev, "Support %d ports in maximum\n", i); 185 - of_node_put(child); 186 190 break; 187 191 } 188 192 }
+2 -2
drivers/phy/marvell/phy-mvebu-cp110-comphy.c
··· 244 244 GEN_CONF(4, 1, PHY_MODE_USB_HOST_SS, COMPHY_FW_MODE_USB3H), 245 245 GEN_CONF(4, 1, PHY_MODE_PCIE, COMPHY_FW_MODE_PCIE), 246 246 ETH_CONF(4, 1, PHY_INTERFACE_MODE_SGMII, 0x1, COMPHY_FW_MODE_SGMII), 247 - ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, -1, COMPHY_FW_MODE_2500BASEX), 248 - ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, -1, COMPHY_FW_MODE_XFI), 247 + ETH_CONF(4, 1, PHY_INTERFACE_MODE_2500BASEX, 0x1, COMPHY_FW_MODE_2500BASEX), 248 + ETH_CONF(4, 1, PHY_INTERFACE_MODE_5GBASER, 0x1, COMPHY_FW_MODE_XFI), 249 249 ETH_CONF(4, 1, PHY_INTERFACE_MODE_10GBASER, -1, COMPHY_FW_MODE_XFI), 250 250 /* lane 5 */ 251 251 ETH_CONF(5, 1, PHY_INTERFACE_MODE_RXAUI, 0x2, COMPHY_FW_MODE_RXAUI),
+11 -19
drivers/phy/mediatek/phy-mtk-tphy.c
··· 1577 1577 { 1578 1578 struct device *dev = &pdev->dev; 1579 1579 struct device_node *np = dev->of_node; 1580 - struct device_node *child_np; 1581 1580 struct phy_provider *provider; 1582 1581 struct resource *sif_res; 1583 1582 struct mtk_tphy *tphy; 1584 1583 struct resource res; 1585 - int port, retval; 1584 + int port; 1586 1585 1587 1586 tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL); 1588 1587 if (!tphy) ··· 1622 1623 } 1623 1624 1624 1625 port = 0; 1625 - for_each_child_of_node(np, child_np) { 1626 + for_each_child_of_node_scoped(np, child_np) { 1626 1627 struct mtk_phy_instance *instance; 1627 1628 struct clk_bulk_data *clks; 1628 1629 struct device *subdev; 1629 1630 struct phy *phy; 1631 + int retval; 1630 1632 1631 1633 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); 1632 - if (!instance) { 1633 - retval = -ENOMEM; 1634 - goto put_child; 1635 - } 1634 + if (!instance) 1635 + return -ENOMEM; 1636 1636 1637 1637 tphy->phys[port] = instance; 1638 1638 1639 1639 phy = devm_phy_create(dev, child_np, &mtk_tphy_ops); 1640 1640 if (IS_ERR(phy)) { 1641 1641 dev_err(dev, "failed to create phy\n"); 1642 - retval = PTR_ERR(phy); 1643 - goto put_child; 1642 + return PTR_ERR(phy); 1644 1643 } 1645 1644 1646 1645 subdev = &phy->dev; ··· 1646 1649 if (retval) { 1647 1650 dev_err(subdev, "failed to get address resource(id-%d)\n", 1648 1651 port); 1649 - goto put_child; 1652 + return retval; 1650 1653 } 1651 1654 1652 1655 instance->port_base = devm_ioremap_resource(subdev, &res); 1653 - if (IS_ERR(instance->port_base)) { 1654 - retval = PTR_ERR(instance->port_base); 1655 - goto put_child; 1656 - } 1656 + if (IS_ERR(instance->port_base)) 1657 + return PTR_ERR(instance->port_base); 1657 1658 1658 1659 instance->phy = phy; 1659 1660 instance->index = port; ··· 1663 1668 clks[1].id = "da_ref"; /* analog clock */ 1664 1669 retval = devm_clk_bulk_get_optional(subdev, TPHY_CLKS_CNT, clks); 1665 1670 if (retval) 1666 - goto put_child; 1671 + return retval; 1667 1672 1668 1673 retval = phy_type_syscon_get(instance, child_np); 1669 1674 if (retval) 1670 - goto put_child; 1675 + return retval; 1671 1676 } 1672 1677 1673 1678 provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); 1674 1679 1675 1680 return PTR_ERR_OR_ZERO(provider); 1676 - put_child: 1677 - of_node_put(child_np); 1678 - return retval; 1679 1681 } 1680 1682 1681 1683 static struct platform_driver mtk_tphy_driver = {
+9 -18
drivers/phy/mediatek/phy-mtk-xsphy.c
··· 432 432 { 433 433 struct device *dev = &pdev->dev; 434 434 struct device_node *np = dev->of_node; 435 - struct device_node *child_np; 436 435 struct phy_provider *provider; 437 436 struct resource *glb_res; 438 437 struct mtk_xsphy *xsphy; 439 438 struct resource res; 440 - int port, retval; 439 + int port; 441 440 442 441 xsphy = devm_kzalloc(dev, sizeof(*xsphy), GFP_KERNEL); 443 442 if (!xsphy) ··· 470 471 device_property_read_u32(dev, "mediatek,src-coef", &xsphy->src_coef); 471 472 472 473 port = 0; 473 - for_each_child_of_node(np, child_np) { 474 + for_each_child_of_node_scoped(np, child_np) { 474 475 struct xsphy_instance *inst; 475 476 struct phy *phy; 477 + int retval; 476 478 477 479 inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL); 478 - if (!inst) { 479 - retval = -ENOMEM; 480 - goto put_child; 481 - } 480 + if (!inst) 481 + return -ENOMEM; 482 482 483 483 xsphy->phys[port] = inst; 484 484 485 485 phy = devm_phy_create(dev, child_np, &mtk_xsphy_ops); 486 486 if (IS_ERR(phy)) { 487 487 dev_err(dev, "failed to create phy\n"); 488 - retval = PTR_ERR(phy); 489 - goto put_child; 488 + return PTR_ERR(phy); 490 489 } 491 490 492 491 retval = of_address_to_resource(child_np, 0, &res); 493 492 if (retval) { 494 493 dev_err(dev, "failed to get address resource(id-%d)\n", 495 494 port); 496 - goto put_child; 495 + return retval; 497 496 } 498 497 499 498 inst->port_base = devm_ioremap_resource(&phy->dev, &res); 500 499 if (IS_ERR(inst->port_base)) { 501 500 dev_err(dev, "failed to remap phy regs\n"); 502 - retval = PTR_ERR(inst->port_base); 503 - goto put_child; 501 + return PTR_ERR(inst->port_base); 504 502 } 505 503 506 504 inst->phy = phy; ··· 508 512 inst->ref_clk = devm_clk_get(&phy->dev, "ref"); 509 513 if (IS_ERR(inst->ref_clk)) { 510 514 dev_err(dev, "failed to get ref_clk(id-%d)\n", port); 511 - retval = PTR_ERR(inst->ref_clk); 512 - goto put_child; 515 + return PTR_ERR(inst->ref_clk); 513 516 } 514 517 } 515 518 516 519 provider = devm_of_phy_provider_register(dev, mtk_phy_xlate); 517 520 return PTR_ERR_OR_ZERO(provider); 518 - 519 - put_child: 520 - of_node_put(child_np); 521 - return retval; 522 521 } 523 522 524 523 static struct platform_driver mtk_xsphy_driver = {
+12
drivers/phy/nuvoton/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # 3 + # PHY drivers for Nuvoton MA35 platforms 4 + # 5 + config PHY_MA35_USB 6 + tristate "Nuvoton MA35 USB2.0 PHY driver" 7 + depends on ARCH_MA35 || COMPILE_TEST 8 + depends on OF 9 + select GENERIC_PHY 10 + help 11 + Enable this to support the USB2.0 PHY on the Nuvoton MA35 12 + series SoCs.
+3
drivers/phy/nuvoton/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + obj-$(CONFIG_PHY_MA35_USB) += phy-ma35d1-usb2.o
+143
drivers/phy/nuvoton/phy-ma35d1-usb2.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2024 Nuvoton Technology Corp. 4 + */ 5 + #include <linux/bitfield.h> 6 + #include <linux/clk.h> 7 + #include <linux/delay.h> 8 + #include <linux/io.h> 9 + #include <linux/kernel.h> 10 + #include <linux/mfd/syscon.h> 11 + #include <linux/module.h> 12 + #include <linux/of.h> 13 + #include <linux/phy/phy.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/regmap.h> 16 + 17 + /* USB PHY Miscellaneous Control Register */ 18 + #define MA35_SYS_REG_USBPMISCR 0x60 19 + #define PHY0POR BIT(0) /* PHY Power-On Reset Control Bit */ 20 + #define PHY0SUSPEND BIT(1) /* PHY Suspend; 0: suspend, 1: operaion */ 21 + #define PHY0COMN BIT(2) /* PHY Common Block Power-Down Control */ 22 + #define PHY0DEVCKSTB BIT(10) /* PHY 60 MHz UTMI clock stable bit */ 23 + 24 + struct ma35_usb_phy { 25 + struct clk *clk; 26 + struct device *dev; 27 + struct regmap *sysreg; 28 + }; 29 + 30 + static int ma35_usb_phy_power_on(struct phy *phy) 31 + { 32 + struct ma35_usb_phy *p_phy = phy_get_drvdata(phy); 33 + unsigned int val; 34 + int ret; 35 + 36 + ret = clk_prepare_enable(p_phy->clk); 37 + if (ret < 0) { 38 + dev_err(p_phy->dev, "Failed to enable PHY clock: %d\n", ret); 39 + return ret; 40 + } 41 + 42 + regmap_read(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, &val); 43 + if (val & PHY0SUSPEND) { 44 + /* 45 + * USB PHY0 is in operation mode already 46 + * make sure USB PHY 60 MHz UTMI Interface Clock ready 47 + */ 48 + ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val, 49 + val & PHY0DEVCKSTB, 10, 1000); 50 + if (ret == 0) 51 + return 0; 52 + } 53 + 54 + /* 55 + * reset USB PHY0. 56 + * wait until USB PHY0 60 MHz UTMI Interface Clock ready 57 + */ 58 + regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, (PHY0POR | PHY0SUSPEND)); 59 + udelay(20); 60 + 61 + /* make USB PHY0 enter operation mode */ 62 + regmap_update_bits(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, 0x7, PHY0SUSPEND); 63 + 64 + /* make sure USB PHY 60 MHz UTMI Interface Clock ready */ 65 + ret = regmap_read_poll_timeout(p_phy->sysreg, MA35_SYS_REG_USBPMISCR, val, 66 + val & PHY0DEVCKSTB, 10, 1000); 67 + if (ret == -ETIMEDOUT) { 68 + dev_err(p_phy->dev, "Check PHY clock, Timeout: %d\n", ret); 69 + clk_disable_unprepare(p_phy->clk); 70 + return ret; 71 + } 72 + 73 + return 0; 74 + } 75 + 76 + static int ma35_usb_phy_power_off(struct phy *phy) 77 + { 78 + struct ma35_usb_phy *p_phy = phy_get_drvdata(phy); 79 + 80 + clk_disable_unprepare(p_phy->clk); 81 + return 0; 82 + } 83 + 84 + static const struct phy_ops ma35_usb_phy_ops = { 85 + .power_on = ma35_usb_phy_power_on, 86 + .power_off = ma35_usb_phy_power_off, 87 + .owner = THIS_MODULE, 88 + }; 89 + 90 + static int ma35_usb_phy_probe(struct platform_device *pdev) 91 + { 92 + struct phy_provider *provider; 93 + struct ma35_usb_phy *p_phy; 94 + struct phy *phy; 95 + 96 + p_phy = devm_kzalloc(&pdev->dev, sizeof(*p_phy), GFP_KERNEL); 97 + if (!p_phy) 98 + return -ENOMEM; 99 + 100 + p_phy->dev = &pdev->dev; 101 + platform_set_drvdata(pdev, p_phy); 102 + 103 + p_phy->sysreg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "nuvoton,sys"); 104 + if (IS_ERR(p_phy->sysreg)) 105 + return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->sysreg), 106 + "Failed to get SYS registers\n"); 107 + 108 + p_phy->clk = of_clk_get(pdev->dev.of_node, 0); 109 + if (IS_ERR(p_phy->clk)) 110 + return dev_err_probe(&pdev->dev, PTR_ERR(p_phy->clk), 111 + "failed to find usb_phy clock\n"); 112 + 113 + phy = devm_phy_create(&pdev->dev, NULL, &ma35_usb_phy_ops); 114 + if (IS_ERR(phy)) 115 + return dev_err_probe(&pdev->dev, PTR_ERR(phy), "Failed to create PHY\n"); 116 + 117 + phy_set_drvdata(phy, p_phy); 118 + 119 + provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); 120 + if (IS_ERR(provider)) 121 + return dev_err_probe(&pdev->dev, PTR_ERR(provider), 122 + "Failed to register PHY provider\n"); 123 + return 0; 124 + } 125 + 126 + static const struct of_device_id ma35_usb_phy_of_match[] = { 127 + { .compatible = "nuvoton,ma35d1-usb2-phy", }, 128 + { }, 129 + }; 130 + MODULE_DEVICE_TABLE(of, ma35_usb_phy_of_match); 131 + 132 + static struct platform_driver ma35_usb_phy_driver = { 133 + .probe = ma35_usb_phy_probe, 134 + .driver = { 135 + .name = "ma35d1-usb2-phy", 136 + .of_match_table = ma35_usb_phy_of_match, 137 + }, 138 + }; 139 + module_platform_driver(ma35_usb_phy_driver); 140 + 141 + MODULE_DESCRIPTION("Nuvoton ma35d1 USB2.0 PHY driver"); 142 + MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>"); 143 + MODULE_LICENSE("GPL");
+5 -1
drivers/phy/phy-airoha-pcie.c
··· 18 18 #define LEQ_LEN_CTRL_MAX_VAL 7 19 19 #define FREQ_LOCK_MAX_ATTEMPT 10 20 20 21 + /* PCIe-PHY initialization time in ms needed by the hw to complete */ 22 + #define PHY_HW_INIT_TIME_MS 30 23 + 21 24 enum airoha_pcie_port_gen { 22 25 PCIE_PORT_GEN1 = 1, 23 26 PCIE_PORT_GEN2, ··· 1184 1181 airoha_phy_pma1_set_bits(pcie_phy, REG_PCIE_PMA_SS_DA_XPON_PWDB0, 1185 1182 PCIE_DA_XPON_CDR_PR_PWDB); 1186 1183 1187 - usleep_range(100, 200); 1184 + /* Wait for the PCIe PHY to complete initialization before returning */ 1185 + msleep(PHY_HW_INIT_TIME_MS); 1188 1186 1189 1187 return 0; 1190 1188 }
+20 -18
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
··· 2190 2190 void __iomem *serdes = qmp->dp_serdes; 2191 2191 const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts; 2192 2192 2193 - qmp_configure(serdes, cfg->dp_serdes_tbl, cfg->dp_serdes_tbl_num); 2193 + qmp_configure(qmp->dev, serdes, cfg->dp_serdes_tbl, 2194 + cfg->dp_serdes_tbl_num); 2194 2195 2195 2196 switch (dp_opts->link_rate) { 2196 2197 case 1620: 2197 - qmp_configure(serdes, cfg->serdes_tbl_rbr, 2198 - cfg->serdes_tbl_rbr_num); 2198 + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_rbr, 2199 + cfg->serdes_tbl_rbr_num); 2199 2200 break; 2200 2201 case 2700: 2201 - qmp_configure(serdes, cfg->serdes_tbl_hbr, 2202 - cfg->serdes_tbl_hbr_num); 2202 + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr, 2203 + cfg->serdes_tbl_hbr_num); 2203 2204 break; 2204 2205 case 5400: 2205 - qmp_configure(serdes, cfg->serdes_tbl_hbr2, 2206 - cfg->serdes_tbl_hbr2_num); 2206 + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr2, 2207 + cfg->serdes_tbl_hbr2_num); 2207 2208 break; 2208 2209 case 8100: 2209 - qmp_configure(serdes, cfg->serdes_tbl_hbr3, 2210 - cfg->serdes_tbl_hbr3_num); 2210 + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl_hbr3, 2211 + cfg->serdes_tbl_hbr3_num); 2211 2212 break; 2212 2213 default: 2213 2214 /* Other link rates aren't supported */ ··· 2808 2807 2809 2808 qmp_combo_dp_serdes_init(qmp); 2810 2809 2811 - qmp_configure_lane(tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1); 2812 - qmp_configure_lane(tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2); 2810 + qmp_configure_lane(qmp->dev, tx, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 1); 2811 + qmp_configure_lane(qmp->dev, tx2, cfg->dp_tx_tbl, cfg->dp_tx_tbl_num, 2); 2813 2812 2814 2813 /* Configure special DP tx tunings */ 2815 2814 cfg->configure_dp_tx(qmp); ··· 2851 2850 unsigned int val; 2852 2851 int ret; 2853 2852 2854 - qmp_configure(serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); 2853 + qmp_configure(qmp->dev, serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); 2855 2854 2856 2855 ret = clk_prepare_enable(qmp->pipe_clk); 2857 2856 if (ret) { ··· 2860 2859 } 2861 2860 2862 2861 /* Tx, Rx, and PCS configurations */ 2863 - qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 2864 - qmp_configure_lane(tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); 2862 + qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 2863 + qmp_configure_lane(qmp->dev, tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); 2865 2864 2866 - qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 2867 - qmp_configure_lane(rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); 2865 + qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 2866 + qmp_configure_lane(qmp->dev, rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); 2868 2867 2869 - qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 2868 + qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 2870 2869 2871 2870 if (pcs_usb) 2872 - qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num); 2871 + qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl, 2872 + cfg->pcs_usb_tbl_num); 2873 2873 2874 2874 if (cfg->has_pwrdn_delay) 2875 2875 usleep_range(10, 20);
+11 -8
drivers/phy/qualcomm/phy-qcom-qmp-common.h
··· 9 9 struct qmp_phy_init_tbl { 10 10 unsigned int offset; 11 11 unsigned int val; 12 + char *name; 12 13 /* 13 14 * mask of lanes for which this register is written 14 15 * for cases when second lane needs different values ··· 21 20 { \ 22 21 .offset = o, \ 23 22 .val = v, \ 23 + .name = #o, \ 24 24 .lane_mask = 0xff, \ 25 25 } 26 26 ··· 29 27 { \ 30 28 .offset = o, \ 31 29 .val = v, \ 30 + .name = #o, \ 32 31 .lane_mask = l, \ 33 32 } 34 33 35 - static inline void qmp_configure_lane(void __iomem *base, 36 - const struct qmp_phy_init_tbl tbl[], 37 - int num, 38 - u8 lane_mask) 34 + static inline void qmp_configure_lane(struct device *dev, void __iomem *base, 35 + const struct qmp_phy_init_tbl tbl[], 36 + int num, u8 lane_mask) 39 37 { 40 38 int i; 41 39 const struct qmp_phy_init_tbl *t = tbl; ··· 47 45 if (!(t->lane_mask & lane_mask)) 48 46 continue; 49 47 48 + dev_dbg(dev, "Writing Reg: %s Offset: 0x%04x Val: 0x%02x\n", 49 + t->name, t->offset, t->val); 50 50 writel(t->val, base + t->offset); 51 51 } 52 52 } 53 53 54 - static inline void qmp_configure(void __iomem *base, 55 - const struct qmp_phy_init_tbl tbl[], 56 - int num) 54 + static inline void qmp_configure(struct device *dev, void __iomem *base, 55 + const struct qmp_phy_init_tbl tbl[], int num) 57 56 { 58 - qmp_configure_lane(base, tbl, num, 0xff); 57 + qmp_configure_lane(dev, base, tbl, num, 0xff); 59 58 } 60 59 61 60 #endif
+7 -12
drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
··· 288 288 unsigned int val; 289 289 int ret; 290 290 291 - qmp_configure(serdes, serdes_tbl, serdes_tbl_num); 291 + qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num); 292 292 293 293 qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); 294 294 qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], ··· 431 431 } 432 432 433 433 /* Tx, Rx, and PCS configurations */ 434 - qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 435 - qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 436 - qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 434 + qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 435 + qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 436 + qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 437 437 438 438 /* 439 439 * Pull out PHY from POWER DOWN state. ··· 725 725 { 726 726 struct qcom_qmp *qmp; 727 727 struct device *dev = &pdev->dev; 728 - struct device_node *child; 729 728 struct phy_provider *phy_provider; 730 729 void __iomem *serdes; 731 730 const struct qmp_phy_cfg *cfg = NULL; ··· 772 773 return -ENOMEM; 773 774 774 775 id = 0; 775 - for_each_available_child_of_node(dev->of_node, child) { 776 + for_each_available_child_of_node_scoped(dev->of_node, child) { 776 777 /* Create per-lane phy */ 777 778 ret = qmp_pcie_msm8996_create(dev, child, id, serdes, cfg); 778 779 if (ret) { 779 780 dev_err(dev, "failed to create lane%d phy, %d\n", 780 781 id, ret); 781 - goto err_node_put; 782 + return ret; 782 783 } 783 784 784 785 /* ··· 789 790 if (ret) { 790 791 dev_err(qmp->dev, 791 792 "failed to register pipe clock source\n"); 792 - goto err_node_put; 793 + return ret; 793 794 } 794 795 795 796 id++; ··· 798 799 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 799 800 800 801 return PTR_ERR_OR_ZERO(phy_provider); 801 - 802 - err_node_put: 803 - of_node_put(child); 804 - return ret; 805 802 } 806 803 807 804 static struct platform_driver qmp_pcie_msm8996_driver = {
+69 -14
drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
··· 1242 1242 QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f), 1243 1243 }; 1244 1244 1245 + static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl[] = { 1246 + QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x1c), 1247 + }; 1248 + 1245 1249 static const struct qmp_phy_init_tbl x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl[] = { 1246 1250 QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01), 1247 1251 QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x88), ··· 3658 3654 .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl, 3659 3655 .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl), 3660 3656 }, 3657 + 3658 + .reset_list = sdm845_pciephy_reset_l, 3659 + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), 3660 + .vreg_list = sm8550_qmp_phy_vreg_l, 3661 + .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l), 3662 + .regs = pciephy_v6_regs_layout, 3663 + 3664 + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, 3665 + .phy_status = PHYSTATUS_4_20, 3666 + .has_nocsr_reset = true, 3667 + }; 3668 + 3669 + static const struct qmp_phy_cfg x1e80100_qmp_gen4x4_pciephy_cfg = { 3670 + .lanes = 4, 3671 + 3672 + .offsets = &qmp_pcie_offsets_v6_20, 3673 + 3674 + .tbls = { 3675 + .serdes = x1e80100_qmp_gen4x2_pcie_serdes_tbl, 3676 + .serdes_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_serdes_tbl), 3677 + .tx = x1e80100_qmp_gen4x2_pcie_tx_tbl, 3678 + .tx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_tx_tbl), 3679 + .rx = x1e80100_qmp_gen4x2_pcie_rx_tbl, 3680 + .rx_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_rx_tbl), 3681 + .pcs = x1e80100_qmp_gen4x2_pcie_pcs_tbl, 3682 + .pcs_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_tbl), 3683 + .pcs_misc = x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl, 3684 + .pcs_misc_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_pcs_misc_tbl), 3685 + .ln_shrd = x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl, 3686 + .ln_shrd_num = ARRAY_SIZE(x1e80100_qmp_gen4x2_pcie_ln_shrd_tbl), 3687 + }, 3688 + 3689 + .serdes_4ln_tbl = x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl, 3690 + .serdes_4ln_num = ARRAY_SIZE(x1e80100_qmp_gen4x4_pcie_serdes_4ln_tbl), 3691 + 3661 3692 .reset_list = sdm845_pciephy_reset_l, 3662 3693 .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), 3663 3694 .vreg_list = sm8550_qmp_phy_vreg_l, ··· 3708 3669 { 3709 3670 const struct qmp_phy_cfg *cfg = qmp->cfg; 3710 3671 const struct qmp_pcie_offsets *offs = cfg->offsets; 3711 - void __iomem *tx3, *rx3, *tx4, *rx4; 3672 + void __iomem *serdes, *tx3, *rx3, *tx4, *rx4, *pcs, *pcs_misc, *ln_shrd; 3712 3673 3674 + serdes = qmp->port_b + offs->serdes; 3713 3675 tx3 = qmp->port_b + offs->tx; 3714 3676 rx3 = qmp->port_b + offs->rx; 3715 3677 tx4 = qmp->port_b + offs->tx2; 3716 3678 rx4 = qmp->port_b + offs->rx2; 3679 + pcs = qmp->port_b + offs->pcs; 3680 + pcs_misc = qmp->port_b + offs->pcs_misc; 3681 + ln_shrd = qmp->port_b + offs->ln_shrd; 3717 3682 3718 - qmp_configure_lane(tx3, tbls->tx, tbls->tx_num, 1); 3719 - qmp_configure_lane(rx3, tbls->rx, tbls->rx_num, 1); 3683 + qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); 3684 + qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num); 3720 3685 3721 - qmp_configure_lane(tx4, tbls->tx, tbls->tx_num, 2); 3722 - qmp_configure_lane(rx4, tbls->rx, tbls->rx_num, 2); 3686 + qmp_configure_lane(qmp->dev, tx3, tbls->tx, tbls->tx_num, 1); 3687 + qmp_configure_lane(qmp->dev, rx3, tbls->rx, tbls->rx_num, 1); 3688 + 3689 + qmp_configure_lane(qmp->dev, tx4, tbls->tx, tbls->tx_num, 2); 3690 + qmp_configure_lane(qmp->dev, rx4, tbls->rx, tbls->rx_num, 2); 3691 + 3692 + qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num); 3693 + qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); 3694 + 3695 + qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num); 3723 3696 } 3724 3697 3725 3698 static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_cfg_tbls *tbls) ··· 3749 3698 if (!tbls) 3750 3699 return; 3751 3700 3752 - qmp_configure(serdes, tbls->serdes, tbls->serdes_num); 3701 + qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); 3753 3702 3754 - qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1); 3755 - qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1); 3703 + qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1); 3704 + qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1); 3756 3705 3757 3706 if (cfg->lanes >= 2) { 3758 - qmp_configure_lane(tx2, tbls->tx, tbls->tx_num, 2); 3759 - qmp_configure_lane(rx2, tbls->rx, tbls->rx_num, 2); 3707 + qmp_configure_lane(qmp->dev, tx2, tbls->tx, tbls->tx_num, 2); 3708 + qmp_configure_lane(qmp->dev, rx2, tbls->rx, tbls->rx_num, 2); 3760 3709 } 3761 3710 3762 - qmp_configure(pcs, tbls->pcs, tbls->pcs_num); 3763 - qmp_configure(pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); 3711 + qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num); 3712 + qmp_configure(qmp->dev, pcs_misc, tbls->pcs_misc, tbls->pcs_misc_num); 3764 3713 3765 3714 if (cfg->lanes >= 4 && qmp->tcsr_4ln_config) { 3766 - qmp_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num); 3715 + qmp_configure(qmp->dev, serdes, cfg->serdes_4ln_tbl, 3716 + cfg->serdes_4ln_num); 3767 3717 qmp_pcie_init_port_b(qmp, tbls); 3768 3718 } 3769 3719 3770 - qmp_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num); 3720 + qmp_configure(qmp->dev, ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num); 3771 3721 } 3772 3722 3773 3723 static int qmp_pcie_init(struct phy *phy) ··· 4475 4423 }, { 4476 4424 .compatible = "qcom,x1e80100-qmp-gen4x2-pcie-phy", 4477 4425 .data = &x1e80100_qmp_gen4x2_pciephy_cfg, 4426 + }, { 4427 + .compatible = "qcom,x1e80100-qmp-gen4x4-pcie-phy", 4428 + .data = &x1e80100_qmp_gen4x4_pciephy_cfg, 4478 4429 }, 4479 4430 { }, 4480 4431 };
+6 -6
drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
··· 1527 1527 { 1528 1528 void __iomem *serdes = qmp->serdes; 1529 1529 1530 - qmp_configure(serdes, tbls->serdes, tbls->serdes_num); 1530 + qmp_configure(qmp->dev, serdes, tbls->serdes, tbls->serdes_num); 1531 1531 } 1532 1532 1533 1533 static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls) ··· 1536 1536 void __iomem *tx = qmp->tx; 1537 1537 void __iomem *rx = qmp->rx; 1538 1538 1539 - qmp_configure_lane(tx, tbls->tx, tbls->tx_num, 1); 1540 - qmp_configure_lane(rx, tbls->rx, tbls->rx_num, 1); 1539 + qmp_configure_lane(qmp->dev, tx, tbls->tx, tbls->tx_num, 1); 1540 + qmp_configure_lane(qmp->dev, rx, tbls->rx, tbls->rx_num, 1); 1541 1541 1542 1542 if (cfg->lanes >= 2) { 1543 - qmp_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2); 1544 - qmp_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2); 1543 + qmp_configure_lane(qmp->dev, qmp->tx2, tbls->tx, tbls->tx_num, 2); 1544 + qmp_configure_lane(qmp->dev, qmp->rx2, tbls->rx, tbls->rx_num, 2); 1545 1545 } 1546 1546 } 1547 1547 ··· 1549 1549 { 1550 1550 void __iomem *pcs = qmp->pcs; 1551 1551 1552 - qmp_configure(pcs, tbls->pcs, tbls->pcs_num); 1552 + qmp_configure(qmp->dev, pcs, tbls->pcs, tbls->pcs_num); 1553 1553 } 1554 1554 1555 1555 static int qmp_ufs_get_gear_overlay(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
+5 -5
drivers/phy/qualcomm/phy-qcom-qmp-usb.c
··· 1649 1649 const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; 1650 1650 int serdes_tbl_num = cfg->serdes_tbl_num; 1651 1651 1652 - qmp_configure(serdes, serdes_tbl, serdes_tbl_num); 1652 + qmp_configure(qmp->dev, serdes, serdes_tbl, serdes_tbl_num); 1653 1653 1654 1654 return 0; 1655 1655 } ··· 1730 1730 } 1731 1731 1732 1732 /* Tx, Rx, and PCS configurations */ 1733 - qmp_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 1734 - qmp_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 1733 + qmp_configure_lane(qmp->dev, tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 1734 + qmp_configure_lane(qmp->dev, rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 1735 1735 1736 - qmp_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 1736 + qmp_configure(qmp->dev, pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 1737 1737 1738 1738 if (pcs_usb) 1739 - qmp_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num); 1739 + qmp_configure(qmp->dev, pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num); 1740 1740 1741 1741 if (cfg->has_pwrdn_delay) 1742 1742 usleep_range(10, 20);
+7 -6
drivers/phy/qualcomm/phy-qcom-qmp-usbc.c
··· 526 526 unsigned int val; 527 527 int ret; 528 528 529 - qmp_configure(qmp->serdes, cfg->serdes_tbl, cfg->serdes_tbl_num); 529 + qmp_configure(qmp->dev, qmp->serdes, cfg->serdes_tbl, 530 + cfg->serdes_tbl_num); 530 531 531 532 ret = clk_prepare_enable(qmp->pipe_clk); 532 533 if (ret) { ··· 536 535 } 537 536 538 537 /* Tx, Rx, and PCS configurations */ 539 - qmp_configure_lane(qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 540 - qmp_configure_lane(qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 538 + qmp_configure_lane(qmp->dev, qmp->tx, cfg->tx_tbl, cfg->tx_tbl_num, 1); 539 + qmp_configure_lane(qmp->dev, qmp->rx, cfg->rx_tbl, cfg->rx_tbl_num, 1); 541 540 542 - qmp_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); 543 - qmp_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); 541 + qmp_configure_lane(qmp->dev, qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2); 542 + qmp_configure_lane(qmp->dev, qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2); 544 543 545 - qmp_configure(qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 544 + qmp_configure(qmp->dev, qmp->pcs, cfg->pcs_tbl, cfg->pcs_tbl_num); 546 545 547 546 /* Pull PHY out of reset state */ 548 547 qphy_clrbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+57 -3
drivers/phy/renesas/phy-rcar-gen3-usb2.c
··· 19 19 #include <linux/platform_device.h> 20 20 #include <linux/pm_runtime.h> 21 21 #include <linux/regulator/consumer.h> 22 + #include <linux/reset.h> 22 23 #include <linux/string.h> 23 24 #include <linux/usb/of.h> 24 25 #include <linux/workqueue.h> 25 26 26 27 /******* USB2.0 Host registers (original offset is +0x200) *******/ 27 28 #define USB2_INT_ENABLE 0x000 29 + #define USB2_AHB_BUS_CTR 0x008 28 30 #define USB2_USBCTR 0x00c 29 31 #define USB2_SPD_RSM_TIMSET 0x10c 30 32 #define USB2_OC_TIMSET 0x110 ··· 41 39 #define USB2_INT_ENABLE_UCOM_INTEN BIT(3) 42 40 #define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */ 43 41 #define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */ 42 + 43 + /* AHB_BUS_CTR */ 44 + #define USB2_AHB_BUS_CTR_MBL_MASK GENMASK(1, 0) 45 + #define USB2_AHB_BUS_CTR_MBL_INCR4 2 44 46 45 47 /* USBCTR */ 46 48 #define USB2_USBCTR_DIRPD BIT(2) ··· 117 111 struct extcon_dev *extcon; 118 112 struct rcar_gen3_phy rphys[NUM_OF_PHYS]; 119 113 struct regulator *vbus; 114 + struct reset_control *rstc; 120 115 struct work_struct work; 121 116 struct mutex lock; /* protects rphys[...].powered */ 122 117 enum usb_dr_mode dr_mode; ··· 132 125 struct rcar_gen3_phy_drv_data { 133 126 const struct phy_ops *phy_usb2_ops; 134 127 bool no_adp_ctrl; 128 + bool init_bus; 135 129 }; 136 130 137 131 /* ··· 583 575 .no_adp_ctrl = true, 584 576 }; 585 577 578 + static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = { 579 + .phy_usb2_ops = &rcar_gen3_phy_usb2_ops, 580 + .no_adp_ctrl = true, 581 + .init_bus = true, 582 + }; 583 + 586 584 static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = { 587 585 { 588 586 .compatible = "renesas,usb2-phy-r8a77470", ··· 609 595 { 610 596 .compatible = "renesas,rzg2l-usb2-phy", 611 597 .data = &rz_g2l_phy_usb2_data, 598 + }, 599 + { 600 + .compatible = "renesas,usb2-phy-r9a08g045", 601 + .data = &rz_g3s_phy_usb2_data, 612 602 }, 613 603 { 614 604 .compatible = "renesas,rcar-gen3-usb2-phy", ··· 668 650 return candidate; 669 651 } 670 652 653 + static int rcar_gen3_phy_usb2_init_bus(struct rcar_gen3_chan *channel) 654 + { 655 + struct device *dev = channel->dev; 656 + int ret; 657 + u32 val; 658 + 659 + channel->rstc = devm_reset_control_array_get_shared(dev); 660 + if (IS_ERR(channel->rstc)) 661 + return PTR_ERR(channel->rstc); 662 + 663 + ret = pm_runtime_resume_and_get(dev); 664 + if (ret) 665 + return ret; 666 + 667 + ret = reset_control_deassert(channel->rstc); 668 + if (ret) 669 + goto rpm_put; 670 + 671 + val = readl(channel->base + USB2_AHB_BUS_CTR); 672 + val &= ~USB2_AHB_BUS_CTR_MBL_MASK; 673 + val |= USB2_AHB_BUS_CTR_MBL_INCR4; 674 + writel(val, channel->base + USB2_AHB_BUS_CTR); 675 + 676 + rpm_put: 677 + pm_runtime_put(dev); 678 + 679 + return ret; 680 + } 681 + 671 682 static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) 672 683 { 673 684 const struct rcar_gen3_phy_drv_data *phy_data; ··· 750 703 goto error; 751 704 } 752 705 706 + platform_set_drvdata(pdev, channel); 707 + channel->dev = dev; 708 + 709 + if (phy_data->init_bus) { 710 + ret = rcar_gen3_phy_usb2_init_bus(channel); 711 + if (ret) 712 + goto error; 713 + } 714 + 753 715 channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl; 754 716 if (phy_data->no_adp_ctrl) 755 717 channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; ··· 789 733 channel->vbus = NULL; 790 734 } 791 735 792 - platform_set_drvdata(pdev, channel); 793 - channel->dev = dev; 794 - 795 736 provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); 796 737 if (IS_ERR(provider)) { 797 738 dev_err(dev, "Failed to register PHY provider\n"); ··· 815 762 if (channel->is_otg_channel) 816 763 device_remove_file(&pdev->dev, &dev_attr_role); 817 764 765 + reset_control_assert(channel->rstc); 818 766 pm_runtime_disable(&pdev->dev); 819 767 }; 820 768
+173 -33
drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
··· 8 8 */ 9 9 #include <linux/bitfield.h> 10 10 #include <linux/clk.h> 11 + #include <linux/clk-provider.h> 11 12 #include <linux/delay.h> 12 13 #include <linux/mfd/syscon.h> 13 14 #include <linux/module.h> ··· 16 15 #include <linux/of_platform.h> 17 16 #include <linux/phy/phy.h> 18 17 #include <linux/platform_device.h> 18 + #include <linux/pm_runtime.h> 19 19 #include <linux/rational.h> 20 20 #include <linux/regmap.h> 21 21 #include <linux/reset.h> ··· 192 190 #define LN3_TX_SER_RATE_SEL_HBR2 BIT(3) 193 191 #define LN3_TX_SER_RATE_SEL_HBR3 BIT(2) 194 192 193 + #define HDMI20_MAX_RATE 600000000 194 + 195 195 struct lcpll_config { 196 196 u32 bit_rate; 197 197 u8 lcvco_mode_en; ··· 276 272 struct clk_bulk_data *clks; 277 273 int nr_clks; 278 274 struct reset_control_bulk_data rsts[RST_MAX]; 275 + 276 + /* clk provider */ 277 + struct clk_hw hw; 278 + unsigned long rate; 279 + 280 + atomic_t usage_count; 279 281 }; 280 282 281 283 static const struct ropll_config ropll_tmds_cfg[] = { ··· 769 759 struct ropll_config rc = {0}; 770 760 int i; 771 761 762 + hdptx->rate = rate * 100; 763 + 772 764 for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++) 773 765 if (rate == ropll_tmds_cfg[i].bit_rate) { 774 766 cfg = &ropll_tmds_cfg[i]; ··· 834 822 static int rk_hdptx_ropll_tmds_mode_config(struct rk_hdptx_phy *hdptx, 835 823 unsigned int rate) 836 824 { 837 - u32 val; 838 - int ret; 839 - 840 - ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val); 841 - if (ret) 842 - return ret; 843 - 844 - if (!(val & HDPTX_O_PLL_LOCK_DONE)) { 845 - ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate); 846 - if (ret) 847 - return ret; 848 - } 849 - 850 825 rk_hdptx_multi_reg_write(hdptx, rk_hdtpx_common_sb_init_seq); 851 826 852 827 regmap_write(hdptx->regmap, LNTOP_REG(0200), 0x06); ··· 855 856 return rk_hdptx_post_enable_lane(hdptx); 856 857 } 857 858 859 + static int rk_hdptx_phy_consumer_get(struct rk_hdptx_phy *hdptx, 860 + unsigned int rate) 861 + { 862 + u32 status; 863 + int ret; 864 + 865 + if (atomic_inc_return(&hdptx->usage_count) > 1) 866 + return 0; 867 + 868 + ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status); 869 + if (ret) 870 + goto dec_usage; 871 + 872 + if (status & HDPTX_O_PLL_LOCK_DONE) 873 + dev_warn(hdptx->dev, "PLL locked by unknown consumer!\n"); 874 + 875 + if (rate) { 876 + ret = rk_hdptx_ropll_tmds_cmn_config(hdptx, rate); 877 + if (ret) 878 + goto dec_usage; 879 + } 880 + 881 + return 0; 882 + 883 + dec_usage: 884 + atomic_dec(&hdptx->usage_count); 885 + return ret; 886 + } 887 + 888 + static int rk_hdptx_phy_consumer_put(struct rk_hdptx_phy *hdptx, bool force) 889 + { 890 + u32 status; 891 + int ret; 892 + 893 + ret = atomic_dec_return(&hdptx->usage_count); 894 + if (ret > 0) 895 + return 0; 896 + 897 + if (ret < 0) { 898 + dev_warn(hdptx->dev, "Usage count underflow!\n"); 899 + ret = -EINVAL; 900 + } else { 901 + ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &status); 902 + if (!ret) { 903 + if (status & HDPTX_O_PLL_LOCK_DONE) 904 + rk_hdptx_phy_disable(hdptx); 905 + return 0; 906 + } else if (force) { 907 + return 0; 908 + } 909 + } 910 + 911 + atomic_inc(&hdptx->usage_count); 912 + return ret; 913 + } 914 + 858 915 static int rk_hdptx_phy_power_on(struct phy *phy) 859 916 { 860 917 struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy); 861 - int ret, bus_width = phy_get_bus_width(hdptx->phy); 918 + int bus_width = phy_get_bus_width(hdptx->phy); 919 + int ret; 920 + 862 921 /* 863 922 * FIXME: Temporary workaround to pass pixel_clk_rate 864 923 * from the HDMI bridge driver until phy_configure_opts_hdmi ··· 927 870 dev_dbg(hdptx->dev, "%s bus_width=%x rate=%u\n", 928 871 __func__, bus_width, rate); 929 872 930 - ret = pm_runtime_resume_and_get(hdptx->dev); 931 - if (ret) { 932 - dev_err(hdptx->dev, "Failed to resume phy: %d\n", ret); 873 + ret = rk_hdptx_phy_consumer_get(hdptx, rate); 874 + if (ret) 933 875 return ret; 934 - } 935 876 936 877 ret = rk_hdptx_ropll_tmds_mode_config(hdptx, rate); 937 878 if (ret) 938 - pm_runtime_put(hdptx->dev); 879 + rk_hdptx_phy_consumer_put(hdptx, true); 939 880 940 881 return ret; 941 882 } ··· 941 886 static int rk_hdptx_phy_power_off(struct phy *phy) 942 887 { 943 888 struct rk_hdptx_phy *hdptx = phy_get_drvdata(phy); 944 - u32 val; 945 - int ret; 946 889 947 - ret = regmap_read(hdptx->grf, GRF_HDPTX_STATUS, &val); 948 - if (ret == 0 && (val & HDPTX_O_PLL_LOCK_DONE)) 949 - rk_hdptx_phy_disable(hdptx); 950 - 951 - pm_runtime_put(hdptx->dev); 952 - 953 - return ret; 890 + return rk_hdptx_phy_consumer_put(hdptx, false); 954 891 } 955 892 956 893 static const struct phy_ops rk_hdptx_phy_ops = { ··· 950 903 .power_off = rk_hdptx_phy_power_off, 951 904 .owner = THIS_MODULE, 952 905 }; 906 + 907 + static struct rk_hdptx_phy *to_rk_hdptx_phy(struct clk_hw *hw) 908 + { 909 + return container_of(hw, struct rk_hdptx_phy, hw); 910 + } 911 + 912 + static int rk_hdptx_phy_clk_prepare(struct clk_hw *hw) 913 + { 914 + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); 915 + 916 + return rk_hdptx_phy_consumer_get(hdptx, hdptx->rate / 100); 917 + } 918 + 919 + static void rk_hdptx_phy_clk_unprepare(struct clk_hw *hw) 920 + { 921 + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); 922 + 923 + rk_hdptx_phy_consumer_put(hdptx, true); 924 + } 925 + 926 + static unsigned long rk_hdptx_phy_clk_recalc_rate(struct clk_hw *hw, 927 + unsigned long parent_rate) 928 + { 929 + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); 930 + 931 + return hdptx->rate; 932 + } 933 + 934 + static long rk_hdptx_phy_clk_round_rate(struct clk_hw *hw, unsigned long rate, 935 + unsigned long *parent_rate) 936 + { 937 + u32 bit_rate = rate / 100; 938 + int i; 939 + 940 + if (rate > HDMI20_MAX_RATE) 941 + return rate; 942 + 943 + for (i = 0; i < ARRAY_SIZE(ropll_tmds_cfg); i++) 944 + if (bit_rate == ropll_tmds_cfg[i].bit_rate) 945 + break; 946 + 947 + if (i == ARRAY_SIZE(ropll_tmds_cfg) && 948 + !rk_hdptx_phy_clk_pll_calc(bit_rate, NULL)) 949 + return -EINVAL; 950 + 951 + return rate; 952 + } 953 + 954 + static int rk_hdptx_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate, 955 + unsigned long parent_rate) 956 + { 957 + struct rk_hdptx_phy *hdptx = to_rk_hdptx_phy(hw); 958 + 959 + return rk_hdptx_ropll_tmds_cmn_config(hdptx, rate / 100); 960 + } 961 + 962 + static const struct clk_ops hdptx_phy_clk_ops = { 963 + .prepare = rk_hdptx_phy_clk_prepare, 964 + .unprepare = rk_hdptx_phy_clk_unprepare, 965 + .recalc_rate = rk_hdptx_phy_clk_recalc_rate, 966 + .round_rate = rk_hdptx_phy_clk_round_rate, 967 + .set_rate = rk_hdptx_phy_clk_set_rate, 968 + }; 969 + 970 + static int rk_hdptx_phy_clk_register(struct rk_hdptx_phy *hdptx) 971 + { 972 + struct device *dev = hdptx->dev; 973 + const char *name, *pname; 974 + struct clk *refclk; 975 + int ret, id; 976 + 977 + refclk = devm_clk_get(dev, "ref"); 978 + if (IS_ERR(refclk)) 979 + return dev_err_probe(dev, PTR_ERR(refclk), 980 + "Failed to get ref clock\n"); 981 + 982 + id = of_alias_get_id(dev->of_node, "hdptxphy"); 983 + name = id > 0 ? "clk_hdmiphy_pixel1" : "clk_hdmiphy_pixel0"; 984 + pname = __clk_get_name(refclk); 985 + 986 + hdptx->hw.init = CLK_HW_INIT(name, pname, &hdptx_phy_clk_ops, 987 + CLK_GET_RATE_NOCACHE); 988 + 989 + ret = devm_clk_hw_register(dev, &hdptx->hw); 990 + if (ret) 991 + return dev_err_probe(dev, ret, "Failed to register clock\n"); 992 + 993 + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &hdptx->hw); 994 + if (ret) 995 + return dev_err_probe(dev, ret, 996 + "Failed to register clk provider\n"); 997 + return 0; 998 + } 953 999 954 1000 static int rk_hdptx_phy_runtime_suspend(struct device *dev) 955 1001 { ··· 1116 976 return dev_err_probe(dev, PTR_ERR(hdptx->grf), 1117 977 "Could not get GRF syscon\n"); 1118 978 979 + ret = devm_pm_runtime_enable(dev); 980 + if (ret) 981 + return dev_err_probe(dev, ret, "Failed to enable runtime PM\n"); 982 + 1119 983 hdptx->phy = devm_phy_create(dev, NULL, &rk_hdptx_phy_ops); 1120 984 if (IS_ERR(hdptx->phy)) 1121 985 return dev_err_probe(dev, PTR_ERR(hdptx->phy), ··· 1128 984 platform_set_drvdata(pdev, hdptx); 1129 985 phy_set_drvdata(hdptx->phy, hdptx); 1130 986 phy_set_bus_width(hdptx->phy, 8); 1131 - 1132 - ret = devm_pm_runtime_enable(dev); 1133 - if (ret) 1134 - return dev_err_probe(dev, ret, "Failed to enable runtime PM\n"); 1135 987 1136 988 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 1137 989 if (IS_ERR(phy_provider)) ··· 1138 998 reset_control_deassert(hdptx->rsts[RST_CMN].rstc); 1139 999 reset_control_deassert(hdptx->rsts[RST_INIT].rstc); 1140 1000 1141 - return 0; 1001 + return rk_hdptx_phy_clk_register(hdptx); 1142 1002 } 1143 1003 1144 1004 static const struct dev_pm_ops rk_hdptx_phy_pm_ops = {
+6 -6
drivers/phy/samsung/phy-exynos5-usbdrd.c
··· 607 607 608 608 reg = readl(regs_base + EXYNOS850_DRD_SECPMACTL); 609 609 reg &= ~SECPMACTL_PMA_REF_FREQ_SEL; 610 - reg |= FIELD_PREP_CONST(SECPMACTL_PMA_REF_FREQ_SEL, 1); 610 + reg |= FIELD_PREP(SECPMACTL_PMA_REF_FREQ_SEL, 1); 611 611 /* SFR reset */ 612 612 reg |= (SECPMACTL_PMA_LOW_PWR | SECPMACTL_PMA_APB_SW_RST); 613 613 reg &= ~(SECPMACTL_PMA_ROPLL_REF_CLK_SEL | ··· 1123 1123 reg &= ~SSPPLLCTL_FSEL; 1124 1124 switch (phy_drd->extrefclk) { 1125 1125 case EXYNOS5_FSEL_50MHZ: 1126 - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 7); 1126 + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 7); 1127 1127 break; 1128 1128 case EXYNOS5_FSEL_26MHZ: 1129 - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 6); 1129 + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 6); 1130 1130 break; 1131 1131 case EXYNOS5_FSEL_24MHZ: 1132 - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 2); 1132 + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 2); 1133 1133 break; 1134 1134 case EXYNOS5_FSEL_20MHZ: 1135 - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 1); 1135 + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 1); 1136 1136 break; 1137 1137 case EXYNOS5_FSEL_19MHZ2: 1138 - reg |= FIELD_PREP_CONST(SSPPLLCTL_FSEL, 0); 1138 + reg |= FIELD_PREP(SSPPLLCTL_FSEL, 0); 1139 1139 break; 1140 1140 default: 1141 1141 dev_warn(phy_drd->dev, "unsupported ref clk: %#.2x\n",
+17 -33
drivers/phy/ti/phy-am654-serdes.c
··· 7 7 */ 8 8 9 9 #include <dt-bindings/phy/phy.h> 10 + #include <linux/cleanup.h> 10 11 #include <linux/clk.h> 11 12 #include <linux/clk-provider.h> 12 13 #include <linux/delay.h> ··· 645 644 struct device_node *node = am654_phy->of_node; 646 645 struct device *dev = am654_phy->dev; 647 646 struct serdes_am654_clk_mux *mux; 648 - struct device_node *regmap_node; 649 647 const char **parent_names; 650 648 struct clk_init_data *init; 651 649 unsigned int num_parents; ··· 652 652 const __be32 *addr; 653 653 unsigned int reg; 654 654 struct clk *clk; 655 - int ret = 0; 656 655 657 656 mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 658 657 if (!mux) ··· 659 660 660 661 init = &mux->clk_data; 661 662 662 - regmap_node = of_parse_phandle(node, "ti,serdes-clk", 0); 663 - if (!regmap_node) { 664 - dev_err(dev, "Fail to get serdes-clk node\n"); 665 - ret = -ENODEV; 666 - goto out_put_node; 667 - } 663 + struct device_node *regmap_node __free(device_node) = 664 + of_parse_phandle(node, "ti,serdes-clk", 0); 665 + if (!regmap_node) 666 + return dev_err_probe(dev, -ENODEV, "Fail to get serdes-clk node\n"); 668 667 669 668 regmap = syscon_node_to_regmap(regmap_node->parent); 670 - if (IS_ERR(regmap)) { 671 - dev_err(dev, "Fail to get Syscon regmap\n"); 672 - ret = PTR_ERR(regmap); 673 - goto out_put_node; 674 - } 669 + if (IS_ERR(regmap)) 670 + return dev_err_probe(dev, PTR_ERR(regmap), 671 + "Fail to get Syscon regmap\n"); 675 672 676 673 num_parents = of_clk_get_parent_count(node); 677 - if (num_parents < 2) { 678 - dev_err(dev, "SERDES clock must have parents\n"); 679 - ret = -EINVAL; 680 - goto out_put_node; 681 - } 674 + if (num_parents < 2) 675 + return dev_err_probe(dev, -EINVAL, "SERDES clock must have parents\n"); 682 676 683 677 parent_names = devm_kzalloc(dev, (sizeof(char *) * num_parents), 684 678 GFP_KERNEL); 685 - if (!parent_names) { 686 - ret = -ENOMEM; 687 - goto out_put_node; 688 - } 679 + if (!parent_names) 680 + return -ENOMEM; 689 681 690 682 of_clk_parent_fill(node, parent_names, num_parents); 691 683 692 684 addr = of_get_address(regmap_node, 0, NULL, NULL); 693 - if (!addr) { 694 - ret = -EINVAL; 695 - goto out_put_node; 696 - } 685 + if (!addr) 686 + return -EINVAL; 697 687 698 688 reg = be32_to_cpu(*addr); 699 689 ··· 698 710 mux->hw.init = init; 699 711 700 712 clk = devm_clk_register(dev, &mux->hw); 701 - if (IS_ERR(clk)) { 702 - ret = PTR_ERR(clk); 703 - goto out_put_node; 704 - } 713 + if (IS_ERR(clk)) 714 + return PTR_ERR(clk); 705 715 706 716 am654_phy->clks[clock_num] = clk; 707 717 708 - out_put_node: 709 - of_node_put(regmap_node); 710 - return ret; 718 + return 0; 711 719 } 712 720 713 721 static const struct of_device_id serdes_am654_id_table[] = {
+6 -10
drivers/phy/ti/phy-gmii-sel.c
··· 468 468 priv->regmap = syscon_node_to_regmap(node->parent); 469 469 if (IS_ERR(priv->regmap)) { 470 470 priv->regmap = device_node_to_regmap(node); 471 - if (IS_ERR(priv->regmap)) { 472 - ret = PTR_ERR(priv->regmap); 473 - dev_err(dev, "Failed to get syscon %d\n", ret); 474 - return ret; 475 - } 471 + if (IS_ERR(priv->regmap)) 472 + return dev_err_probe(dev, PTR_ERR(priv->regmap), 473 + "Failed to get syscon\n"); 476 474 priv->no_offset = true; 477 475 } 478 476 ··· 483 485 priv->phy_provider = 484 486 devm_of_phy_provider_register(dev, 485 487 phy_gmii_sel_of_xlate); 486 - if (IS_ERR(priv->phy_provider)) { 487 - ret = PTR_ERR(priv->phy_provider); 488 - dev_err(dev, "Failed to create phy provider %d\n", ret); 489 - return ret; 490 - } 488 + if (IS_ERR(priv->phy_provider)) 489 + return dev_err_probe(dev, PTR_ERR(priv->phy_provider), 490 + "Failed to create phy provider\n"); 491 491 492 492 return 0; 493 493 }
+6 -10
drivers/phy/ti/phy-j721e-wiz.c
··· 1179 1179 1180 1180 ret = wiz_mux_of_clk_register(wiz, clk_node, wiz->mux_sel_field[i], 1181 1181 clk_mux_sel[i].table); 1182 + of_node_put(clk_node); 1182 1183 if (ret) { 1183 1184 dev_err_probe(dev, ret, "Failed to register %s clock\n", 1184 1185 node_name); 1185 - of_node_put(clk_node); 1186 1186 goto err; 1187 1187 } 1188 1188 1189 - of_node_put(clk_node); 1190 1189 } 1191 1190 1192 1191 for (i = 0; i < wiz->clk_div_sel_num; i++) { ··· 1198 1199 1199 1200 ret = wiz_div_clk_register(wiz, clk_node, wiz->div_sel_field[i], 1200 1201 clk_div_sel[i].table); 1202 + of_node_put(clk_node); 1201 1203 if (ret) { 1202 1204 dev_err_probe(dev, ret, "Failed to register %s clock\n", 1203 1205 node_name); 1204 - of_node_put(clk_node); 1205 1206 goto err; 1206 1207 } 1207 - 1208 - of_node_put(clk_node); 1209 1208 } 1210 1209 1211 1210 return 0; ··· 1404 1407 1405 1408 static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz) 1406 1409 { 1407 - struct device_node *serdes, *subnode; 1410 + struct device_node *serdes; 1408 1411 1409 1412 serdes = of_get_child_by_name(dev->of_node, "serdes"); 1410 1413 if (!serdes) { ··· 1412 1415 return -EINVAL; 1413 1416 } 1414 1417 1415 - for_each_child_of_node(serdes, subnode) { 1418 + for_each_child_of_node_scoped(serdes, subnode) { 1416 1419 u32 reg, num_lanes = 1, phy_type = PHY_NONE; 1417 1420 int ret, i; 1418 1421 ··· 1422 1425 1423 1426 ret = of_property_read_u32(subnode, "reg", &reg); 1424 1427 if (ret) { 1425 - of_node_put(subnode); 1426 1428 dev_err(dev, 1427 1429 "%s: Reading \"reg\" from \"%s\" failed: %d\n", 1428 1430 __func__, subnode->name, ret); ··· 1574 1578 1575 1579 phy_reset_dev = &wiz->wiz_phy_reset_dev; 1576 1580 phy_reset_dev->dev = dev; 1577 - phy_reset_dev->ops = &wiz_phy_reset_ops, 1578 - phy_reset_dev->owner = THIS_MODULE, 1581 + phy_reset_dev->ops = &wiz_phy_reset_ops; 1582 + phy_reset_dev->owner = THIS_MODULE; 1579 1583 phy_reset_dev->of_node = node; 1580 1584 /* Reset for each of the lane and one for the entire SERDES */ 1581 1585 phy_reset_dev->nr_resets = num_lanes + 1;