Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drivers-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull ARM SoC driver updates from Arnd Bergmann:
"There are cleanups and minor bugfixes across several SoC specific
drivers, for Qualcomm, Samsung, NXP i.MX, AT91, Tegra, Keystone,
Renesas, ZynqMP

Noteworthy new features are:

- The op-tee firmware driver gains support for asynchronous
notifications from secure-world firmware.

- Qualcomm platforms gain support for new SoC types in various
drivers: power domain, cache controller, RPM sleep, soc-info

- Samsung SoC drivers gain support for new SoCs in ChipID and PMU, as
well as a new USIv2 driver that handles various types of serial
communiction (uart, i2c, spi)

- Renesas adds support for R-Car S4-8 (R8A779F0) in multiple drivers,
as well as memory controller support for RZ/G2L (R9A07G044).

- Apple M1 gains support for the PMGR power management driver"

* tag 'drivers-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (94 commits)
soc: qcom: rpmh-rsc: Fix typo in a comment
soc: qcom: socinfo: Add SM6350 and SM7225
dt-bindings: arm: msm: Don't mark LLCC interrupt as required
dt-bindings: firmware: scm: Add SM6350 compatible
dt-bindings: arm: msm: Add LLCC for SM6350
soc: qcom: rpmhpd: Sort power-domain definitions and lists
soc: qcom: rpmhpd: Remove mx/cx relationship on sc7280
soc: qcom: rpmhpd: Rename rpmhpd struct names
soc: qcom: rpmhpd: sm8450: Add the missing .peer for sm8450_cx_ao
soc: qcom: socinfo: add SM8450 ID
soc: qcom: rpmhpd: Add SM8450 power domains
dt-bindings: power: rpmpd: Add SM8450 to rpmpd binding
soc: qcom: smem: Update max processor count
dt-bindings: arm: qcom: Document SM8450 SoC and boards
dt-bindings: firmware: scm: Add SM8450 compatible
dt-bindings: arm: cpus: Add kryo780 compatible
soc: qcom: rpmpd: Add support for sm6125
dt-bindings: qcom-rpmpd: Add sm6125 power domains
soc: qcom: aoss: constify static struct thermal_cooling_device_ops
PM: AVS: qcom-cpr: Use div64_ul instead of do_div
...

+3406 -922
+1
Documentation/devicetree/bindings/arm/cpus.yaml
··· 174 174 - qcom,kryo560 175 175 - qcom,kryo570 176 176 - qcom,kryo685 177 + - qcom,kryo780 177 178 - qcom,scorpion 178 179 179 180 enable-method:
+8
Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.yaml
··· 24 24 compatible: 25 25 const: linaro,optee-tz 26 26 27 + interrupts: 28 + maxItems: 1 29 + description: | 30 + This interrupt which is used to signal an event by the secure world 31 + software is expected to be edge-triggered. 32 + 27 33 method: 28 34 enum: [smc, hvc] 29 35 description: | ··· 48 42 49 43 examples: 50 44 - | 45 + #include <dt-bindings/interrupt-controller/arm-gic.h> 51 46 firmware { 52 47 optee { 53 48 compatible = "linaro,optee-tz"; 54 49 method = "smc"; 50 + interrupts = <GIC_SPI 187 IRQ_TYPE_EDGE_RISING>; 55 51 }; 56 52 }; 57 53
+1 -1
Documentation/devicetree/bindings/arm/msm/qcom,llcc.yaml
··· 24 24 - qcom,sc7180-llcc 25 25 - qcom,sc7280-llcc 26 26 - qcom,sdm845-llcc 27 + - qcom,sm6350-llcc 27 28 - qcom,sm8150-llcc 28 29 - qcom,sm8250-llcc 29 30 ··· 45 44 - compatible 46 45 - reg 47 46 - reg-names 48 - - interrupts 49 47 50 48 additionalProperties: false 51 49
+6
Documentation/devicetree/bindings/arm/qcom.yaml
··· 50 50 sm8150 51 51 sm8250 52 52 sm8350 53 + sm8450 53 54 54 55 The 'board' element must be one of the following strings: 55 56 ··· 257 256 - qcom,sm8350-hdk 258 257 - qcom,sm8350-mtp 259 258 - const: qcom,sm8350 259 + 260 + - items: 261 + - enum: 262 + - qcom,sm8450-qrd 263 + - const: qcom,sm8450 260 264 261 265 additionalProperties: true 262 266
+1 -1
Documentation/devicetree/bindings/arm/samsung/exynos-chipid.yaml Documentation/devicetree/bindings/soc/samsung/exynos-chipid.yaml
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 %YAML 1.2 3 3 --- 4 - $id: http://devicetree.org/schemas/arm/samsung/exynos-chipid.yaml# 4 + $id: http://devicetree.org/schemas/soc/samsung/exynos-chipid.yaml# 5 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 6 6 7 7 title: Samsung Exynos SoC series Chipid driver
+3 -1
Documentation/devicetree/bindings/arm/samsung/pmu.yaml Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 %YAML 1.2 3 3 --- 4 - $id: http://devicetree.org/schemas/arm/samsung/pmu.yaml# 4 + $id: http://devicetree.org/schemas/soc/samsung/exynos-pmu.yaml# 5 5 $schema: http://devicetree.org/meta-schemas/core.yaml# 6 6 7 7 title: Samsung Exynos SoC series Power Management Unit (PMU) ··· 24 24 - samsung,exynos5420-pmu 25 25 - samsung,exynos5433-pmu 26 26 - samsung,exynos7-pmu 27 + - samsung,exynos850-pmu 27 28 - samsung-s5pv210-pmu 28 29 required: 29 30 - compatible ··· 42 41 - samsung,exynos5420-pmu 43 42 - samsung,exynos5433-pmu 44 43 - samsung,exynos7-pmu 44 + - samsung,exynos850-pmu 45 45 - samsung-s5pv210-pmu 46 46 - const: syscon 47 47
+2
Documentation/devicetree/bindings/firmware/qcom,scm.txt
··· 26 26 * "qcom,scm-sc7280" 27 27 * "qcom,scm-sdm845" 28 28 * "qcom,scm-sdx55" 29 + * "qcom,scm-sm6350" 29 30 * "qcom,scm-sm8150" 30 31 * "qcom,scm-sm8250" 31 32 * "qcom,scm-sm8350" 33 + * "qcom,scm-sm8450" 32 34 and: 33 35 * "qcom,scm" 34 36 - clocks: Specifies clocks needed by the SCM interface, if any:
+40 -14
Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
··· 24 24 25 25 properties: 26 26 compatible: 27 - items: 28 - - enum: 29 - - renesas,r8a774a1-rpc-if # RZ/G2M 30 - - renesas,r8a774b1-rpc-if # RZ/G2N 31 - - renesas,r8a774c0-rpc-if # RZ/G2E 32 - - renesas,r8a774e1-rpc-if # RZ/G2H 33 - - renesas,r8a77970-rpc-if # R-Car V3M 34 - - renesas,r8a77980-rpc-if # R-Car V3H 35 - - renesas,r8a77995-rpc-if # R-Car D3 36 - - renesas,r8a779a0-rpc-if # R-Car V3U 37 - - const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 or RZ/G2 device 27 + oneOf: 28 + - items: 29 + - enum: 30 + - renesas,r8a774a1-rpc-if # RZ/G2M 31 + - renesas,r8a774b1-rpc-if # RZ/G2N 32 + - renesas,r8a774c0-rpc-if # RZ/G2E 33 + - renesas,r8a774e1-rpc-if # RZ/G2H 34 + - renesas,r8a77970-rpc-if # R-Car V3M 35 + - renesas,r8a77980-rpc-if # R-Car V3H 36 + - renesas,r8a77995-rpc-if # R-Car D3 37 + - renesas,r8a779a0-rpc-if # R-Car V3U 38 + - const: renesas,rcar-gen3-rpc-if # a generic R-Car gen3 or RZ/G2{E,H,M,N} device 39 + 40 + - items: 41 + - enum: 42 + - renesas,r9a07g044-rpc-if # RZ/G2{L,LC} 43 + - const: renesas,rzg2l-rpc-if # RZ/G2L family 38 44 39 45 reg: 40 46 items: ··· 54 48 - const: dirmap 55 49 - const: wbuf 56 50 57 - clocks: 51 + clocks: true 52 + 53 + interrupts: 58 54 maxItems: 1 59 55 60 56 power-domains: ··· 75 67 - cfi-flash 76 68 - jedec,spi-nor 77 69 78 - unevaluatedProperties: false 79 - 80 70 required: 81 71 - compatible 82 72 - reg ··· 84 78 - resets 85 79 - '#address-cells' 86 80 - '#size-cells' 81 + 82 + if: 83 + properties: 84 + compatible: 85 + contains: 86 + enum: 87 + - renesas,rzg2l-rpc-if 88 + then: 89 + properties: 90 + clocks: 91 + items: 92 + - description: SPI Multi IO Register access clock (SPI_CLK2) 93 + - description: SPI Multi IO Main clock (SPI_CLK). 94 + 95 + else: 96 + properties: 97 + clocks: 98 + maxItems: 1 99 + 100 + unevaluatedProperties: false 87 101 88 102 examples: 89 103 - |
+3
Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
··· 24 24 - qcom,msm8994-rpmpd 25 25 - qcom,msm8996-rpmpd 26 26 - qcom,msm8998-rpmpd 27 + - qcom,qcm2290-rpmpd 27 28 - qcom,qcs404-rpmpd 28 29 - qcom,sdm660-rpmpd 29 30 - qcom,sc7180-rpmhpd ··· 33 32 - qcom,sdm845-rpmhpd 34 33 - qcom,sdx55-rpmhpd 35 34 - qcom,sm6115-rpmpd 35 + - qcom,sm6125-rpmpd 36 36 - qcom,sm6350-rpmhpd 37 37 - qcom,sm8150-rpmhpd 38 38 - qcom,sm8250-rpmhpd 39 39 - qcom,sm8350-rpmhpd 40 + - qcom,sm8450-rpmhpd 40 41 41 42 '#power-domain-cells': 42 43 const: 1
+5
Documentation/devicetree/bindings/soc/qcom/qcom-stats.yaml
··· 21 21 enum: 22 22 - qcom,rpmh-stats 23 23 - qcom,rpm-stats 24 + # For older RPM firmware versions with fixed offset for the sleep stats 25 + - qcom,apq8084-rpm-stats 26 + - qcom,msm8226-rpm-stats 27 + - qcom,msm8916-rpm-stats 28 + - qcom,msm8974-rpm-stats 24 29 25 30 reg: 26 31 maxItems: 1
+159
Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/soc/samsung/exynos-usi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Samsung's Exynos USI (Universal Serial Interface) binding 8 + 9 + maintainers: 10 + - Sam Protsenko <semen.protsenko@linaro.org> 11 + - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com> 12 + 13 + description: | 14 + USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C). 15 + USI shares almost all internal circuits within each protocol, so only one 16 + protocol can be chosen at a time. USI is modeled as a node with zero or more 17 + child nodes, each representing a serial sub-node device. The mode setting 18 + selects which particular function will be used. 19 + 20 + Refer to next bindings documentation for information on protocol subnodes that 21 + can exist under USI node: 22 + 23 + [1] Documentation/devicetree/bindings/serial/samsung_uart.yaml 24 + [2] Documentation/devicetree/bindings/i2c/i2c-exynos5.txt 25 + [3] Documentation/devicetree/bindings/spi/spi-samsung.txt 26 + 27 + properties: 28 + $nodename: 29 + pattern: "^usi@[0-9a-f]+$" 30 + 31 + compatible: 32 + enum: 33 + - samsung,exynos850-usi # for USIv2 (Exynos850, ExynosAutoV9) 34 + 35 + reg: true 36 + 37 + clocks: true 38 + 39 + clock-names: true 40 + 41 + ranges: true 42 + 43 + "#address-cells": 44 + const: 1 45 + 46 + "#size-cells": 47 + const: 1 48 + 49 + samsung,sysreg: 50 + $ref: /schemas/types.yaml#/definitions/phandle-array 51 + description: 52 + Should be phandle/offset pair. The phandle to System Register syscon node 53 + (for the same domain where this USI controller resides) and the offset 54 + of SW_CONF register for this USI controller. 55 + 56 + samsung,mode: 57 + $ref: /schemas/types.yaml#/definitions/uint32 58 + description: 59 + Selects USI function (which serial protocol to use). Refer to 60 + <include/dt-bindings/soc/samsung,exynos-usi.h> for valid USI mode values. 61 + 62 + samsung,clkreq-on: 63 + type: boolean 64 + description: 65 + Enable this property if underlying protocol requires the clock to be 66 + continuously provided without automatic gating. As suggested by SoC 67 + manual, it should be set in case of SPI/I2C slave, UART Rx and I2C 68 + multi-master mode. Usually this property is needed if USI mode is set 69 + to "UART". 70 + 71 + This property is optional. 72 + 73 + patternProperties: 74 + # All other properties should be child nodes 75 + "^(serial|spi|i2c)@[0-9a-f]+$": 76 + type: object 77 + description: Child node describing underlying USI serial protocol 78 + 79 + required: 80 + - compatible 81 + - ranges 82 + - "#address-cells" 83 + - "#size-cells" 84 + - samsung,sysreg 85 + - samsung,mode 86 + 87 + if: 88 + properties: 89 + compatible: 90 + contains: 91 + enum: 92 + - samsung,exynos850-usi 93 + 94 + then: 95 + properties: 96 + reg: 97 + maxItems: 1 98 + 99 + clocks: 100 + items: 101 + - description: Bus (APB) clock 102 + - description: Operating clock for UART/SPI/I2C protocol 103 + 104 + clock-names: 105 + items: 106 + - const: pclk 107 + - const: ipclk 108 + 109 + required: 110 + - reg 111 + - clocks 112 + - clock-names 113 + 114 + else: 115 + properties: 116 + reg: false 117 + clocks: false 118 + clock-names: false 119 + samsung,clkreq-on: false 120 + 121 + additionalProperties: false 122 + 123 + examples: 124 + - | 125 + #include <dt-bindings/interrupt-controller/arm-gic.h> 126 + #include <dt-bindings/soc/samsung,exynos-usi.h> 127 + 128 + usi0: usi@138200c0 { 129 + compatible = "samsung,exynos850-usi"; 130 + reg = <0x138200c0 0x20>; 131 + samsung,sysreg = <&sysreg_peri 0x1010>; 132 + samsung,mode = <USI_V2_UART>; 133 + samsung,clkreq-on; /* needed for UART mode */ 134 + #address-cells = <1>; 135 + #size-cells = <1>; 136 + ranges; 137 + clocks = <&cmu_peri 32>, <&cmu_peri 31>; 138 + clock-names = "pclk", "ipclk"; 139 + 140 + serial_0: serial@13820000 { 141 + compatible = "samsung,exynos850-uart"; 142 + reg = <0x13820000 0xc0>; 143 + interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>; 144 + clocks = <&cmu_peri 32>, <&cmu_peri 31>; 145 + clock-names = "uart", "clk_uart_baud0"; 146 + status = "disabled"; 147 + }; 148 + 149 + hsi2c_0: i2c@13820000 { 150 + compatible = "samsung,exynosautov9-hsi2c"; 151 + reg = <0x13820000 0xc0>; 152 + interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>; 153 + #address-cells = <1>; 154 + #size-cells = <0>; 155 + clocks = <&cmu_peri 32>, <&cmu_peri 31>; 156 + clock-names = "hsi2c_pclk", "hsi2c"; 157 + status = "disabled"; 158 + }; 159 + };
+30
Documentation/staging/tee.rst
··· 184 184 application to retrieve a list of Trusted Applications which can be registered 185 185 as devices on the TEE bus. 186 186 187 + OP-TEE notifications 188 + -------------------- 189 + 190 + There are two kinds of notifications that secure world can use to make 191 + normal world aware of some event. 192 + 193 + 1. Synchronous notifications delivered with ``OPTEE_RPC_CMD_NOTIFICATION`` 194 + using the ``OPTEE_RPC_NOTIFICATION_SEND`` parameter. 195 + 2. Asynchronous notifications delivered with a combination of a non-secure 196 + edge-triggered interrupt and a fast call from the non-secure interrupt 197 + handler. 198 + 199 + Synchronous notifications are limited by depending on RPC for delivery, 200 + this is only usable when secure world is entered with a yielding call via 201 + ``OPTEE_SMC_CALL_WITH_ARG``. This excludes such notifications from secure 202 + world interrupt handlers. 203 + 204 + An asynchronous notification is delivered via a non-secure edge-triggered 205 + interrupt to an interrupt handler registered in the OP-TEE driver. The 206 + actual notification value are retrieved with the fast call 207 + ``OPTEE_SMC_GET_ASYNC_NOTIF_VALUE``. Note that one interrupt can represent 208 + multiple notifications. 209 + 210 + One notification value ``OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF`` has a 211 + special meaning. When this value is received it means that normal world is 212 + supposed to make a yielding call ``OPTEE_MSG_CMD_DO_BOTTOM_HALF``. This 213 + call is done from the thread assisting the interrupt handler. This is a 214 + building block for OP-TEE OS in secure world to implement the top half and 215 + bottom half style of device drivers. 216 + 187 217 AMD-TEE driver 188 218 ============== 189 219
+1
MAINTAINERS
··· 2551 2551 F: Documentation/arm/samsung/ 2552 2552 F: Documentation/devicetree/bindings/arm/samsung/ 2553 2553 F: Documentation/devicetree/bindings/power/pd-samsung.yaml 2554 + F: Documentation/devicetree/bindings/soc/samsung/ 2554 2555 F: arch/arm/boot/dts/exynos* 2555 2556 F: arch/arm/boot/dts/s3c* 2556 2557 F: arch/arm/boot/dts/s5p*
+2
arch/arm/mach-at91/pm.c
··· 645 645 if (!soc_pm.data.ramc[idx]) { 646 646 pr_err("unable to map ramc[%d] cpu registers\n", idx); 647 647 ret = -ENOMEM; 648 + of_node_put(np); 648 649 goto unmap_ramc; 649 650 } 650 651 ··· 671 670 if (!soc_pm.data.ramc_phy) { 672 671 pr_err("unable to map ramc phy cpu registers\n"); 673 672 ret = -ENOMEM; 673 + of_node_put(np); 674 674 goto unmap_ramc; 675 675 } 676 676 }
+16 -2
drivers/bus/imx-weim.c
··· 21 21 unsigned int cs_stride; 22 22 unsigned int wcr_offset; 23 23 unsigned int wcr_bcm; 24 + unsigned int wcr_cont_bclk; 24 25 }; 25 26 26 27 static const struct imx_weim_devtype imx1_weim_devtype = { ··· 42 41 .cs_stride = 0x18, 43 42 .wcr_offset = 0x90, 44 43 .wcr_bcm = BIT(0), 44 + .wcr_cont_bclk = BIT(3), 45 45 }; 46 46 47 47 static const struct imx_weim_devtype imx51_weim_devtype = { ··· 208 206 if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) { 209 207 if (devtype->wcr_bcm) { 210 208 reg = readl(base + devtype->wcr_offset); 211 - writel(reg | devtype->wcr_bcm, 212 - base + devtype->wcr_offset); 209 + reg |= devtype->wcr_bcm; 210 + 211 + if (of_property_read_bool(pdev->dev.of_node, 212 + "fsl,continuous-burst-clk")) { 213 + if (devtype->wcr_cont_bclk) { 214 + reg |= devtype->wcr_cont_bclk; 215 + } else { 216 + dev_err(&pdev->dev, 217 + "continuous burst clk not supported.\n"); 218 + return -EINVAL; 219 + } 220 + } 221 + 222 + writel(reg, base + devtype->wcr_offset); 213 223 } else { 214 224 dev_err(&pdev->dev, "burst clk mode not supported.\n"); 215 225 return -EINVAL;
+44 -6
drivers/bus/tegra-gmi.c
··· 13 13 #include <linux/io.h> 14 14 #include <linux/module.h> 15 15 #include <linux/of_device.h> 16 + #include <linux/pm_runtime.h> 16 17 #include <linux/reset.h> 18 + 19 + #include <soc/tegra/common.h> 17 20 18 21 #define TEGRA_GMI_CONFIG 0x00 19 22 #define TEGRA_GMI_CONFIG_GO BIT(31) ··· 57 54 { 58 55 int err; 59 56 60 - err = clk_prepare_enable(gmi->clk); 61 - if (err < 0) { 62 - dev_err(gmi->dev, "failed to enable clock: %d\n", err); 57 + pm_runtime_enable(gmi->dev); 58 + err = pm_runtime_resume_and_get(gmi->dev); 59 + if (err) { 60 + pm_runtime_disable(gmi->dev); 63 61 return err; 64 62 } 65 63 ··· 87 83 writel(config, gmi->base + TEGRA_GMI_CONFIG); 88 84 89 85 reset_control_assert(gmi->rst); 90 - clk_disable_unprepare(gmi->clk); 86 + 87 + pm_runtime_put_sync_suspend(gmi->dev); 88 + pm_runtime_force_suspend(gmi->dev); 91 89 } 92 90 93 91 static int tegra_gmi_parse_dt(struct tegra_gmi *gmi) ··· 219 213 if (!gmi) 220 214 return -ENOMEM; 221 215 216 + platform_set_drvdata(pdev, gmi); 222 217 gmi->dev = dev; 223 218 224 219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 239 232 return PTR_ERR(gmi->rst); 240 233 } 241 234 235 + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 236 + if (err) 237 + return err; 238 + 242 239 err = tegra_gmi_parse_dt(gmi); 243 240 if (err) 244 241 return err; ··· 258 247 return err; 259 248 } 260 249 261 - platform_set_drvdata(pdev, gmi); 262 - 263 250 return 0; 264 251 } 265 252 ··· 270 261 271 262 return 0; 272 263 } 264 + 265 + static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev) 266 + { 267 + struct tegra_gmi *gmi = dev_get_drvdata(dev); 268 + int err; 269 + 270 + err = clk_prepare_enable(gmi->clk); 271 + if (err < 0) { 272 + dev_err(gmi->dev, "failed to enable clock: %d\n", err); 273 + return err; 274 + } 275 + 276 + return 0; 277 + } 278 + 279 + static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev) 280 + { 281 + struct tegra_gmi *gmi = dev_get_drvdata(dev); 282 + 283 + clk_disable_unprepare(gmi->clk); 284 + 285 + return 0; 286 + } 287 + 288 + static const struct dev_pm_ops tegra_gmi_pm = { 289 + SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume, 290 + NULL) 291 + }; 273 292 274 293 static const struct of_device_id tegra_gmi_id_table[] = { 275 294 { .compatible = "nvidia,tegra20-gmi", }, ··· 312 275 .driver = { 313 276 .name = "tegra-gmi", 314 277 .of_match_table = tegra_gmi_id_table, 278 + .pm = &tegra_gmi_pm, 315 279 }, 316 280 }; 317 281 module_platform_driver(tegra_gmi_driver);
+1 -1
drivers/firmware/ti_sci.c
··· 1759 1759 desc->num = resp->range_num; 1760 1760 desc->start_sec = resp->range_start_sec; 1761 1761 desc->num_sec = resp->range_num_sec; 1762 - }; 1762 + } 1763 1763 1764 1764 fail: 1765 1765 ti_sci_put_one_xfer(&info->minfo, xfer);
+4 -1
drivers/firmware/xilinx/zynqmp.c
··· 1434 1434 return ret; 1435 1435 1436 1436 /* Check PM API version number */ 1437 - zynqmp_pm_get_api_version(&pm_api_version); 1437 + ret = zynqmp_pm_get_api_version(&pm_api_version); 1438 + if (ret) 1439 + return ret; 1440 + 1438 1441 if (pm_api_version < ZYNQMP_PM_VERSION) { 1439 1442 panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n", 1440 1443 __func__,
+68 -39
drivers/memory/renesas-rpc-if.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/platform_device.h> 14 14 #include <linux/of.h> 15 + #include <linux/of_device.h> 15 16 #include <linux/regmap.h> 16 17 #include <linux/reset.h> 17 18 ··· 20 19 21 20 #define RPCIF_CMNCR 0x0000 /* R/W */ 22 21 #define RPCIF_CMNCR_MD BIT(31) 23 - #define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */ 24 22 #define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) 25 23 #define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) 26 24 #define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) 27 25 #define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) 28 - #define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \ 29 - RPCIF_CMNCR_MOIIO1(3) | \ 30 - RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3)) 31 - #define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */ 32 - #define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */ 26 + #define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \ 27 + RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val)) 28 + #define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */ 29 + #define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */ 33 30 #define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) 34 - #define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \ 35 - RPCIF_CMNCR_IO3FV(3)) 31 + #define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \ 32 + RPCIF_CMNCR_IO3FV(val)) 36 33 #define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) 37 34 38 35 #define RPCIF_SSLDR 0x0004 /* R/W */ ··· 125 126 #define RPCIF_SMDRENR_OPDRE BIT(4) 126 127 #define RPCIF_SMDRENR_SPIDRE BIT(0) 127 128 129 + #define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ 130 + #define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ 131 + 128 132 #define RPCIF_PHYCNT 0x007C /* R/W */ 129 133 #define RPCIF_PHYCNT_CAL BIT(31) 130 134 #define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) ··· 135 133 #define RPCIF_PHYCNT_OCT BIT(20) 136 134 #define RPCIF_PHYCNT_DDRCAL BIT(19) 137 135 #define RPCIF_PHYCNT_HS BIT(18) 138 - #define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) 136 + #define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */ 137 + #define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) /* valid for R-Car and RZ/G2{E,H,M,N} */ 139 138 #define RPCIF_PHYCNT_WBUF2 BIT(4) 140 139 #define RPCIF_PHYCNT_WBUF BIT(2) 141 140 #define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) 141 + #define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0) 142 142 143 143 #define RPCIF_PHYOFFSET1 0x0080 /* R/W */ 144 144 #define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) ··· 150 146 151 147 #define RPCIF_PHYINT 0x0088 /* R/W */ 152 148 #define RPCIF_PHYINT_WPVAL BIT(1) 153 - 154 - #define RPCIF_DIRMAP_SIZE 0x4000000 155 149 156 150 static const struct regmap_range rpcif_volatile_ranges[] = { 157 151 regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1), ··· 245 243 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); 246 244 rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); 247 245 if (IS_ERR(rpc->dirmap)) 248 - rpc->dirmap = NULL; 246 + return PTR_ERR(rpc->dirmap); 249 247 rpc->size = resource_size(res); 250 248 249 + rpc->type = (uintptr_t)of_device_get_match_data(dev); 251 250 rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); 252 251 253 252 return PTR_ERR_OR_ZERO(rpc->rstc); 254 253 } 255 254 EXPORT_SYMBOL(rpcif_sw_init); 256 255 257 - void rpcif_hw_init(struct rpcif *rpc, bool hyperflash) 256 + static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc) 257 + { 258 + regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000); 259 + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000); 260 + regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080); 261 + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022); 262 + regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080); 263 + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024); 264 + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3), 265 + RPCIF_PHYCNT_CKSEL(3)); 266 + regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030); 267 + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); 268 + } 269 + 270 + int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) 258 271 { 259 272 u32 dummy; 260 273 261 274 pm_runtime_get_sync(rpc->dev); 262 275 263 - /* 264 - * NOTE: The 0x260 are undocumented bits, but they must be set. 265 - * RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits, 266 - * 0x0 : the delay is biggest, 267 - * 0x1 : the delay is 2nd biggest, 268 - * On H3 ES1.x, the value should be 0, while on others, 269 - * the value should be 7. 270 - */ 271 - regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) | 272 - RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260); 276 + if (rpc->type == RPCIF_RZ_G2L) { 277 + int ret; 273 278 274 - /* 275 - * NOTE: The 0x1511144 are undocumented bits, but they must be set 276 - * for RPCIF_PHYOFFSET1. 277 - * The 0x31 are undocumented bits, but they must be set 278 - * for RPCIF_PHYOFFSET2. 279 - */ 280 - regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 | 281 - RPCIF_PHYOFFSET1_DDRTMG(3)); 282 - regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 | 283 - RPCIF_PHYOFFSET2_OCTTMG(4)); 279 + ret = reset_control_reset(rpc->rstc); 280 + if (ret) 281 + return ret; 282 + usleep_range(200, 300); 283 + rpcif_rzg2l_timing_adjust_sdr(rpc); 284 + } 285 + 286 + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK, 287 + RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0)); 288 + 289 + if (rpc->type == RPCIF_RCAR_GEN3) 290 + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, 291 + RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7)); 292 + 293 + regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3), 294 + RPCIF_PHYOFFSET1_DDRTMG(3)); 295 + regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7), 296 + RPCIF_PHYOFFSET2_OCTTMG(4)); 284 297 285 298 if (hyperflash) 286 299 regmap_update_bits(rpc->regmap, RPCIF_PHYINT, 287 300 RPCIF_PHYINT_WPVAL, 0); 288 301 289 - regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE | 290 - RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ | 291 - RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); 302 + if (rpc->type == RPCIF_RCAR_GEN3) 303 + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, 304 + RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3), 305 + RPCIF_CMNCR_MOIIO(3) | 306 + RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); 307 + else 308 + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, 309 + RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) | 310 + RPCIF_CMNCR_BSZ(3), 311 + RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) | 312 + RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); 313 + 292 314 /* Set RCF after BSZ update */ 293 315 regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); 294 316 /* Dummy read according to spec */ ··· 323 297 pm_runtime_put(rpc->dev); 324 298 325 299 rpc->bus_size = hyperflash ? 2 : 1; 300 + 301 + return 0; 326 302 } 327 303 EXPORT_SYMBOL(rpcif_hw_init); 328 304 ··· 616 588 617 589 ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) 618 590 { 619 - loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1); 620 - size_t size = RPCIF_DIRMAP_SIZE - from; 591 + loff_t from = offs & (rpc->size - 1); 592 + size_t size = rpc->size - from; 621 593 622 594 if (len > size) 623 595 len = size; ··· 687 659 } 688 660 689 661 static const struct of_device_id rpcif_of_match[] = { 690 - { .compatible = "renesas,rcar-gen3-rpc-if", }, 662 + { .compatible = "renesas,rcar-gen3-rpc-if", .data = (void *)RPCIF_RCAR_GEN3 }, 663 + { .compatible = "renesas,rzg2l-rpc-if", .data = (void *)RPCIF_RZ_G2L }, 691 664 {}, 692 665 }; 693 666 MODULE_DEVICE_TABLE(of, rpcif_of_match);
+65 -16
drivers/mmc/host/sdhci-tegra.c
··· 15 15 #include <linux/of.h> 16 16 #include <linux/of_device.h> 17 17 #include <linux/pinctrl/consumer.h> 18 + #include <linux/pm_opp.h> 19 + #include <linux/pm_runtime.h> 18 20 #include <linux/regulator/consumer.h> 19 21 #include <linux/reset.h> 20 22 #include <linux/mmc/card.h> ··· 25 23 #include <linux/mmc/slot-gpio.h> 26 24 #include <linux/gpio/consumer.h> 27 25 #include <linux/ktime.h> 26 + 27 + #include <soc/tegra/common.h> 28 28 29 29 #include "sdhci-pltfm.h" 30 30 #include "cqhci.h" ··· 747 743 { 748 744 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 749 745 struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); 746 + struct device *dev = mmc_dev(host->mmc); 750 747 unsigned long host_clk; 748 + int err; 751 749 752 750 if (!clock) 753 751 return sdhci_set_clock(host, clock); ··· 767 761 * from clk_get_rate() is used. 768 762 */ 769 763 host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; 770 - clk_set_rate(pltfm_host->clk, host_clk); 764 + 765 + err = dev_pm_opp_set_rate(dev, host_clk); 766 + if (err) 767 + dev_err(dev, "failed to set clk rate to %luHz: %d\n", 768 + host_clk, err); 769 + 771 770 tegra_host->curr_clk_rate = host_clk; 772 771 if (tegra_host->ddr_signaling) 773 772 host->max_clk = host_clk; ··· 1725 1714 "failed to get clock\n"); 1726 1715 goto err_clk_get; 1727 1716 } 1728 - clk_prepare_enable(clk); 1729 1717 pltfm_host->clk = clk; 1730 1718 1731 1719 tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, ··· 1735 1725 goto err_rst_get; 1736 1726 } 1737 1727 1738 - rc = reset_control_assert(tegra_host->rst); 1728 + rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 1739 1729 if (rc) 1740 1730 goto err_rst_get; 1731 + 1732 + pm_runtime_enable(&pdev->dev); 1733 + rc = pm_runtime_resume_and_get(&pdev->dev); 1734 + if (rc) 1735 + goto err_pm_get; 1736 + 1737 + rc = reset_control_assert(tegra_host->rst); 1738 + if (rc) 1739 + goto err_rst_assert; 1741 1740 1742 1741 usleep_range(2000, 4000); 1743 1742 1744 1743 rc = reset_control_deassert(tegra_host->rst); 1745 1744 if (rc) 1746 - goto err_rst_get; 1745 + goto err_rst_assert; 1747 1746 1748 1747 usleep_range(2000, 4000); 1749 1748 ··· 1764 1745 1765 1746 err_add_host: 1766 1747 reset_control_assert(tegra_host->rst); 1748 + err_rst_assert: 1749 + pm_runtime_put_sync_suspend(&pdev->dev); 1750 + err_pm_get: 1751 + pm_runtime_disable(&pdev->dev); 1767 1752 err_rst_get: 1768 - clk_disable_unprepare(pltfm_host->clk); 1769 1753 err_clk_get: 1770 1754 clk_disable_unprepare(tegra_host->tmclk); 1771 1755 err_power_req: ··· 1787 1765 1788 1766 reset_control_assert(tegra_host->rst); 1789 1767 usleep_range(2000, 4000); 1790 - clk_disable_unprepare(pltfm_host->clk); 1791 - clk_disable_unprepare(tegra_host->tmclk); 1792 1768 1769 + pm_runtime_put_sync_suspend(&pdev->dev); 1770 + pm_runtime_force_suspend(&pdev->dev); 1771 + 1772 + clk_disable_unprepare(tegra_host->tmclk); 1793 1773 sdhci_pltfm_free(pdev); 1794 1774 1795 1775 return 0; 1796 1776 } 1797 1777 1798 - #ifdef CONFIG_PM_SLEEP 1799 - static int __maybe_unused sdhci_tegra_suspend(struct device *dev) 1778 + static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev) 1800 1779 { 1801 1780 struct sdhci_host *host = dev_get_drvdata(dev); 1802 1781 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1782 + 1783 + clk_disable_unprepare(pltfm_host->clk); 1784 + 1785 + return 0; 1786 + } 1787 + 1788 + static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev) 1789 + { 1790 + struct sdhci_host *host = dev_get_drvdata(dev); 1791 + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1792 + 1793 + return clk_prepare_enable(pltfm_host->clk); 1794 + } 1795 + 1796 + #ifdef CONFIG_PM_SLEEP 1797 + static int sdhci_tegra_suspend(struct device *dev) 1798 + { 1799 + struct sdhci_host *host = dev_get_drvdata(dev); 1803 1800 int ret; 1804 1801 1805 1802 if (host->mmc->caps2 & MMC_CAP2_CQE) { ··· 1833 1792 return ret; 1834 1793 } 1835 1794 1836 - clk_disable_unprepare(pltfm_host->clk); 1795 + ret = pm_runtime_force_suspend(dev); 1796 + if (ret) { 1797 + sdhci_resume_host(host); 1798 + cqhci_resume(host->mmc); 1799 + return ret; 1800 + } 1801 + 1837 1802 return 0; 1838 1803 } 1839 1804 1840 - static int __maybe_unused sdhci_tegra_resume(struct device *dev) 1805 + static int sdhci_tegra_resume(struct device *dev) 1841 1806 { 1842 1807 struct sdhci_host *host = dev_get_drvdata(dev); 1843 - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1844 1808 int ret; 1845 1809 1846 - ret = clk_prepare_enable(pltfm_host->clk); 1810 + ret = pm_runtime_force_resume(dev); 1847 1811 if (ret) 1848 1812 return ret; 1849 1813 ··· 1867 1821 suspend_host: 1868 1822 sdhci_suspend_host(host); 1869 1823 disable_clk: 1870 - clk_disable_unprepare(pltfm_host->clk); 1824 + pm_runtime_force_suspend(dev); 1871 1825 return ret; 1872 1826 } 1873 1827 #endif 1874 1828 1875 - static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, 1876 - sdhci_tegra_resume); 1829 + static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = { 1830 + SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume, 1831 + NULL) 1832 + SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume) 1833 + }; 1877 1834 1878 1835 static struct platform_driver sdhci_tegra_driver = { 1879 1836 .driver = {
+3 -1
drivers/mtd/hyperbus/rpc-if.c
··· 130 130 131 131 rpcif_enable_rpm(&hyperbus->rpc); 132 132 133 - rpcif_hw_init(&hyperbus->rpc, true); 133 + error = rpcif_hw_init(&hyperbus->rpc, true); 134 + if (error) 135 + return error; 134 136 135 137 hyperbus->hbdev.map.size = hyperbus->rpc.size; 136 138 hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
+50 -8
drivers/mtd/nand/raw/tegra_nand.c
··· 17 17 #include <linux/mtd/rawnand.h> 18 18 #include <linux/of.h> 19 19 #include <linux/platform_device.h> 20 + #include <linux/pm_runtime.h> 20 21 #include <linux/reset.h> 22 + 23 + #include <soc/tegra/common.h> 21 24 22 25 #define COMMAND 0x00 23 26 #define COMMAND_GO BIT(31) ··· 1154 1151 return -ENOMEM; 1155 1152 1156 1153 ctrl->dev = &pdev->dev; 1154 + platform_set_drvdata(pdev, ctrl); 1157 1155 nand_controller_init(&ctrl->controller); 1158 1156 ctrl->controller.ops = &tegra_nand_controller_ops; 1159 1157 ··· 1170 1166 if (IS_ERR(ctrl->clk)) 1171 1167 return PTR_ERR(ctrl->clk); 1172 1168 1173 - err = clk_prepare_enable(ctrl->clk); 1169 + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 1170 + if (err) 1171 + return err; 1172 + 1173 + /* 1174 + * This driver doesn't support active power management yet, 1175 + * so we will simply keep device resumed. 1176 + */ 1177 + pm_runtime_enable(&pdev->dev); 1178 + err = pm_runtime_resume_and_get(&pdev->dev); 1174 1179 if (err) 1175 1180 return err; 1176 1181 1177 1182 err = reset_control_reset(rst); 1178 1183 if (err) { 1179 1184 dev_err(ctrl->dev, "Failed to reset HW: %d\n", err); 1180 - goto err_disable_clk; 1185 + goto err_put_pm; 1181 1186 } 1182 1187 1183 1188 writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD); ··· 1201 1188 dev_name(&pdev->dev), ctrl); 1202 1189 if (err) { 1203 1190 dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err); 1204 - goto err_disable_clk; 1191 + goto err_put_pm; 1205 1192 } 1206 1193 1207 1194 writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL); 1208 1195 1209 1196 err = tegra_nand_chips_init(ctrl->dev, ctrl); 1210 1197 if (err) 1211 - goto err_disable_clk; 1212 - 1213 - platform_set_drvdata(pdev, ctrl); 1198 + goto err_put_pm; 1214 1199 1215 1200 return 0; 1216 1201 1217 - err_disable_clk: 1218 - clk_disable_unprepare(ctrl->clk); 1202 + err_put_pm: 1203 + pm_runtime_put_sync_suspend(ctrl->dev); 1204 + pm_runtime_force_suspend(ctrl->dev); 1219 1205 return err; 1220 1206 } 1221 1207 ··· 1231 1219 1232 1220 nand_cleanup(chip); 1233 1221 1222 + pm_runtime_put_sync_suspend(ctrl->dev); 1223 + pm_runtime_force_suspend(ctrl->dev); 1224 + 1225 + return 0; 1226 + } 1227 + 1228 + static int __maybe_unused tegra_nand_runtime_resume(struct device *dev) 1229 + { 1230 + struct tegra_nand_controller *ctrl = dev_get_drvdata(dev); 1231 + int err; 1232 + 1233 + err = clk_prepare_enable(ctrl->clk); 1234 + if (err) { 1235 + dev_err(dev, "Failed to enable clock: %d\n", err); 1236 + return err; 1237 + } 1238 + 1239 + return 0; 1240 + } 1241 + 1242 + static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev) 1243 + { 1244 + struct tegra_nand_controller *ctrl = dev_get_drvdata(dev); 1245 + 1234 1246 clk_disable_unprepare(ctrl->clk); 1235 1247 1236 1248 return 0; 1237 1249 } 1250 + 1251 + static const struct dev_pm_ops tegra_nand_pm = { 1252 + SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume, 1253 + NULL) 1254 + }; 1238 1255 1239 1256 static const struct of_device_id tegra_nand_of_match[] = { 1240 1257 { .compatible = "nvidia,tegra20-nand" }, ··· 1275 1234 .driver = { 1276 1235 .name = "tegra-nand", 1277 1236 .of_match_table = tegra_nand_of_match, 1237 + .pm = &tegra_nand_pm, 1278 1238 }, 1279 1239 .probe = tegra_nand_probe, 1280 1240 .remove = tegra_nand_remove,
+64 -18
drivers/pwm/pwm-tegra.c
··· 42 42 #include <linux/module.h> 43 43 #include <linux/of.h> 44 44 #include <linux/of_device.h> 45 + #include <linux/pm_opp.h> 45 46 #include <linux/pwm.h> 46 47 #include <linux/platform_device.h> 47 48 #include <linux/pinctrl/consumer.h> 49 + #include <linux/pm_runtime.h> 48 50 #include <linux/slab.h> 49 51 #include <linux/reset.h> 52 + 53 + #include <soc/tegra/common.h> 50 54 51 55 #define PWM_ENABLE (1 << 31) 52 56 #define PWM_DUTY_WIDTH 8 ··· 149 145 required_clk_rate = 150 146 (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; 151 147 152 - err = clk_set_rate(pc->clk, required_clk_rate); 148 + err = dev_pm_opp_set_rate(pc->dev, required_clk_rate); 153 149 if (err < 0) 154 150 return -EINVAL; 155 151 ··· 185 181 * before writing the register. Otherwise, keep it enabled. 186 182 */ 187 183 if (!pwm_is_enabled(pwm)) { 188 - err = clk_prepare_enable(pc->clk); 189 - if (err < 0) 184 + err = pm_runtime_resume_and_get(pc->dev); 185 + if (err) 190 186 return err; 191 187 } else 192 188 val |= PWM_ENABLE; ··· 197 193 * If the PWM is not enabled, turn the clock off again to save power. 198 194 */ 199 195 if (!pwm_is_enabled(pwm)) 200 - clk_disable_unprepare(pc->clk); 196 + pm_runtime_put(pc->dev); 201 197 202 198 return 0; 203 199 } ··· 208 204 int rc = 0; 209 205 u32 val; 210 206 211 - rc = clk_prepare_enable(pc->clk); 212 - if (rc < 0) 207 + rc = pm_runtime_resume_and_get(pc->dev); 208 + if (rc) 213 209 return rc; 214 210 215 211 val = pwm_readl(pc, pwm->hwpwm); ··· 228 224 val &= ~PWM_ENABLE; 229 225 pwm_writel(pc, pwm->hwpwm, val); 230 226 231 - clk_disable_unprepare(pc->clk); 227 + pm_runtime_put_sync(pc->dev); 232 228 } 233 229 234 230 static const struct pwm_ops tegra_pwm_ops = { ··· 260 256 if (IS_ERR(pwm->clk)) 261 257 return PTR_ERR(pwm->clk); 262 258 259 + ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 260 + if (ret) 261 + return ret; 262 + 263 + pm_runtime_enable(&pdev->dev); 264 + ret = pm_runtime_resume_and_get(&pdev->dev); 265 + if (ret) 266 + return ret; 267 + 263 268 /* Set maximum frequency of the IP */ 264 - ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency); 269 + ret = dev_pm_opp_set_rate(pwm->dev, pwm->soc->max_frequency); 265 270 if (ret < 0) { 266 271 dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret); 267 - return ret; 272 + goto put_pm; 268 273 } 269 274 270 275 /* ··· 291 278 if (IS_ERR(pwm->rst)) { 292 279 ret = PTR_ERR(pwm->rst); 293 280 dev_err(&pdev->dev, "Reset control is not found: %d\n", ret); 294 - return ret; 281 + goto put_pm; 295 282 } 296 283 297 284 reset_control_deassert(pwm->rst); ··· 304 291 if (ret < 0) { 305 292 dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); 306 293 reset_control_assert(pwm->rst); 307 - return ret; 294 + goto put_pm; 308 295 } 309 296 297 + pm_runtime_put(&pdev->dev); 298 + 310 299 return 0; 300 + put_pm: 301 + pm_runtime_put_sync_suspend(&pdev->dev); 302 + pm_runtime_force_suspend(&pdev->dev); 303 + return ret; 311 304 } 312 305 313 306 static int tegra_pwm_remove(struct platform_device *pdev) ··· 324 305 325 306 reset_control_assert(pc->rst); 326 307 308 + pm_runtime_force_suspend(&pdev->dev); 309 + 327 310 return 0; 328 311 } 329 312 330 - #ifdef CONFIG_PM_SLEEP 331 - static int tegra_pwm_suspend(struct device *dev) 313 + static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev) 332 314 { 333 - return pinctrl_pm_select_sleep_state(dev); 315 + struct tegra_pwm_chip *pc = dev_get_drvdata(dev); 316 + int err; 317 + 318 + clk_disable_unprepare(pc->clk); 319 + 320 + err = pinctrl_pm_select_sleep_state(dev); 321 + if (err) { 322 + clk_prepare_enable(pc->clk); 323 + return err; 324 + } 325 + 326 + return 0; 334 327 } 335 328 336 - static int tegra_pwm_resume(struct device *dev) 329 + static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev) 337 330 { 338 - return pinctrl_pm_select_default_state(dev); 331 + struct tegra_pwm_chip *pc = dev_get_drvdata(dev); 332 + int err; 333 + 334 + err = pinctrl_pm_select_default_state(dev); 335 + if (err) 336 + return err; 337 + 338 + err = clk_prepare_enable(pc->clk); 339 + if (err) { 340 + pinctrl_pm_select_sleep_state(dev); 341 + return err; 342 + } 343 + 344 + return 0; 339 345 } 340 - #endif 341 346 342 347 static const struct tegra_pwm_soc tegra20_pwm_soc = { 343 348 .num_channels = 4, ··· 387 344 MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); 388 345 389 346 static const struct dev_pm_ops tegra_pwm_pm_ops = { 390 - SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume) 347 + SET_RUNTIME_PM_OPS(tegra_pwm_runtime_suspend, tegra_pwm_runtime_resume, 348 + NULL) 349 + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 350 + pm_runtime_force_resume) 391 351 }; 392 352 393 353 static struct platform_driver tegra_pwm_driver = {
+1
drivers/soc/Kconfig
··· 3 3 4 4 source "drivers/soc/actions/Kconfig" 5 5 source "drivers/soc/amlogic/Kconfig" 6 + source "drivers/soc/apple/Kconfig" 6 7 source "drivers/soc/aspeed/Kconfig" 7 8 source "drivers/soc/atmel/Kconfig" 8 9 source "drivers/soc/bcm/Kconfig"
+1
drivers/soc/Makefile
··· 4 4 # 5 5 6 6 obj-$(CONFIG_ARCH_ACTIONS) += actions/ 7 + obj-$(CONFIG_ARCH_APPLE) += apple/ 7 8 obj-y += aspeed/ 8 9 obj-$(CONFIG_ARCH_AT91) += atmel/ 9 10 obj-y += bcm/
+22
drivers/soc/apple/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + 3 + if ARCH_APPLE || COMPILE_TEST 4 + 5 + menu "Apple SoC drivers" 6 + 7 + config APPLE_PMGR_PWRSTATE 8 + bool "Apple SoC PMGR power state control" 9 + depends on PM 10 + select REGMAP 11 + select MFD_SYSCON 12 + select PM_GENERIC_DOMAINS 13 + select RESET_CONTROLLER 14 + default ARCH_APPLE 15 + help 16 + The PMGR block in Apple SoCs provides high-level power state 17 + controls for SoC devices. This driver manages them through the 18 + generic power domain framework, and also provides reset support. 19 + 20 + endmenu 21 + 22 + endif
+2
drivers/soc/apple/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o
+324
drivers/soc/apple/apple-pmgr-pwrstate.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 + /* 3 + * Apple SoC PMGR device power state driver 4 + * 5 + * Copyright The Asahi Linux Contributors 6 + */ 7 + 8 + #include <linux/bitops.h> 9 + #include <linux/bitfield.h> 10 + #include <linux/err.h> 11 + #include <linux/of.h> 12 + #include <linux/of_address.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/pm_domain.h> 15 + #include <linux/regmap.h> 16 + #include <linux/mfd/syscon.h> 17 + #include <linux/reset-controller.h> 18 + #include <linux/module.h> 19 + 20 + #define APPLE_PMGR_RESET BIT(31) 21 + #define APPLE_PMGR_AUTO_ENABLE BIT(28) 22 + #define APPLE_PMGR_PS_AUTO GENMASK(27, 24) 23 + #define APPLE_PMGR_PS_MIN GENMASK(19, 16) 24 + #define APPLE_PMGR_PARENT_OFF BIT(11) 25 + #define APPLE_PMGR_DEV_DISABLE BIT(10) 26 + #define APPLE_PMGR_WAS_CLKGATED BIT(9) 27 + #define APPLE_PMGR_WAS_PWRGATED BIT(8) 28 + #define APPLE_PMGR_PS_ACTUAL GENMASK(7, 4) 29 + #define APPLE_PMGR_PS_TARGET GENMASK(3, 0) 30 + 31 + #define APPLE_PMGR_FLAGS (APPLE_PMGR_WAS_CLKGATED | APPLE_PMGR_WAS_PWRGATED) 32 + 33 + #define APPLE_PMGR_PS_ACTIVE 0xf 34 + #define APPLE_PMGR_PS_CLKGATE 0x4 35 + #define APPLE_PMGR_PS_PWRGATE 0x0 36 + 37 + #define APPLE_PMGR_PS_SET_TIMEOUT 100 38 + #define APPLE_PMGR_RESET_TIME 1 39 + 40 + struct apple_pmgr_ps { 41 + struct device *dev; 42 + struct generic_pm_domain genpd; 43 + struct reset_controller_dev rcdev; 44 + struct regmap *regmap; 45 + u32 offset; 46 + u32 min_state; 47 + }; 48 + 49 + #define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd) 50 + #define rcdev_to_apple_pmgr_ps(_rcdev) container_of(_rcdev, struct apple_pmgr_ps, rcdev) 51 + 52 + static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool auto_enable) 53 + { 54 + int ret; 55 + struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd); 56 + u32 reg; 57 + 58 + ret = regmap_read(ps->regmap, ps->offset, &reg); 59 + if (ret < 0) 60 + return ret; 61 + 62 + /* Resets are synchronous, and only work if the device is powered and clocked. */ 63 + if (reg & APPLE_PMGR_RESET && pstate != APPLE_PMGR_PS_ACTIVE) 64 + dev_err(ps->dev, "PS %s: powering off with RESET active\n", 65 + genpd->name); 66 + 67 + reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET); 68 + reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate); 69 + 70 + dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg); 71 + 72 + regmap_write(ps->regmap, ps->offset, reg); 73 + 74 + ret = regmap_read_poll_timeout_atomic( 75 + ps->regmap, ps->offset, reg, 76 + (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1, 77 + APPLE_PMGR_PS_SET_TIMEOUT); 78 + if (ret < 0) 79 + dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n", 80 + genpd->name, pstate, reg); 81 + 82 + if (auto_enable) { 83 + /* Not all devices implement this; this is a no-op where not implemented. */ 84 + reg &= ~APPLE_PMGR_FLAGS; 85 + reg |= APPLE_PMGR_AUTO_ENABLE; 86 + regmap_write(ps->regmap, ps->offset, reg); 87 + } 88 + 89 + return ret; 90 + } 91 + 92 + static bool apple_pmgr_ps_is_active(struct apple_pmgr_ps *ps) 93 + { 94 + u32 reg = 0; 95 + 96 + regmap_read(ps->regmap, ps->offset, &reg); 97 + /* 98 + * We consider domains as active if they are actually on, or if they have auto-PM 99 + * enabled and the intended target is on. 100 + */ 101 + return (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == APPLE_PMGR_PS_ACTIVE || 102 + (FIELD_GET(APPLE_PMGR_PS_TARGET, reg) == APPLE_PMGR_PS_ACTIVE && 103 + reg & APPLE_PMGR_AUTO_ENABLE)); 104 + } 105 + 106 + static int apple_pmgr_ps_power_on(struct generic_pm_domain *genpd) 107 + { 108 + return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_ACTIVE, true); 109 + } 110 + 111 + static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd) 112 + { 113 + return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_PWRGATE, false); 114 + } 115 + 116 + static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) 117 + { 118 + struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev); 119 + 120 + mutex_lock(&ps->genpd.mlock); 121 + 122 + if (ps->genpd.status == GENPD_STATE_OFF) 123 + dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset); 124 + 125 + dev_dbg(ps->dev, "PS 0x%x: assert reset\n", ps->offset); 126 + /* Quiesce device before asserting reset */ 127 + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 128 + APPLE_PMGR_DEV_DISABLE); 129 + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 130 + APPLE_PMGR_RESET); 131 + 132 + mutex_unlock(&ps->genpd.mlock); 133 + 134 + return 0; 135 + } 136 + 137 + static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) 138 + { 139 + struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev); 140 + 141 + mutex_lock(&ps->genpd.mlock); 142 + 143 + dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset); 144 + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0); 145 + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 0); 146 + 147 + if (ps->genpd.status == GENPD_STATE_OFF) 148 + dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset); 149 + 150 + mutex_unlock(&ps->genpd.mlock); 151 + 152 + return 0; 153 + } 154 + 155 + static int apple_pmgr_reset_reset(struct reset_controller_dev *rcdev, unsigned long id) 156 + { 157 + int ret; 158 + 159 + ret = apple_pmgr_reset_assert(rcdev, id); 160 + if (ret) 161 + return ret; 162 + 163 + usleep_range(APPLE_PMGR_RESET_TIME, 2 * APPLE_PMGR_RESET_TIME); 164 + 165 + return apple_pmgr_reset_deassert(rcdev, id); 166 + } 167 + 168 + static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned long id) 169 + { 170 + struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev); 171 + u32 reg = 0; 172 + 173 + regmap_read(ps->regmap, ps->offset, &reg); 174 + 175 + return !!(reg & APPLE_PMGR_RESET); 176 + } 177 + 178 + const struct reset_control_ops apple_pmgr_reset_ops = { 179 + .assert = apple_pmgr_reset_assert, 180 + .deassert = apple_pmgr_reset_deassert, 181 + .reset = apple_pmgr_reset_reset, 182 + .status = apple_pmgr_reset_status, 183 + }; 184 + 185 + static int apple_pmgr_reset_xlate(struct reset_controller_dev *rcdev, 186 + const struct of_phandle_args *reset_spec) 187 + { 188 + return 0; 189 + } 190 + 191 + static int apple_pmgr_ps_probe(struct platform_device *pdev) 192 + { 193 + struct device *dev = &pdev->dev; 194 + struct device_node *node = dev->of_node; 195 + struct apple_pmgr_ps *ps; 196 + struct regmap *regmap; 197 + struct of_phandle_iterator it; 198 + int ret; 199 + const char *name; 200 + bool active; 201 + 202 + regmap = syscon_node_to_regmap(node->parent); 203 + if (IS_ERR(regmap)) 204 + return PTR_ERR(regmap); 205 + 206 + ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL); 207 + if (!ps) 208 + return -ENOMEM; 209 + 210 + ps->dev = dev; 211 + ps->regmap = regmap; 212 + 213 + ret = of_property_read_string(node, "label", &name); 214 + if (ret < 0) { 215 + dev_err(dev, "missing label property\n"); 216 + return ret; 217 + } 218 + 219 + ret = of_property_read_u32(node, "reg", &ps->offset); 220 + if (ret < 0) { 221 + dev_err(dev, "missing reg property\n"); 222 + return ret; 223 + } 224 + 225 + ps->genpd.name = name; 226 + ps->genpd.power_on = apple_pmgr_ps_power_on; 227 + ps->genpd.power_off = apple_pmgr_ps_power_off; 228 + 229 + ret = of_property_read_u32(node, "apple,min-state", &ps->min_state); 230 + if (ret == 0 && ps->min_state <= APPLE_PMGR_PS_ACTIVE) 231 + regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN, 232 + FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state)); 233 + 234 + active = apple_pmgr_ps_is_active(ps); 235 + if (of_property_read_bool(node, "apple,always-on")) { 236 + ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON; 237 + if (!active) { 238 + dev_warn(dev, "always-on domain %s is not on at boot\n", name); 239 + /* Turn it on so pm_genpd_init does not fail */ 240 + active = apple_pmgr_ps_power_on(&ps->genpd) == 0; 241 + } 242 + } 243 + 244 + /* Turn on auto-PM if the domain is already on */ 245 + if (active) 246 + regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_AUTO_ENABLE, 247 + APPLE_PMGR_AUTO_ENABLE); 248 + 249 + ret = pm_genpd_init(&ps->genpd, NULL, !active); 250 + if (ret < 0) { 251 + dev_err(dev, "pm_genpd_init failed\n"); 252 + return ret; 253 + } 254 + 255 + ret = of_genpd_add_provider_simple(node, &ps->genpd); 256 + if (ret < 0) { 257 + dev_err(dev, "of_genpd_add_provider_simple failed\n"); 258 + return ret; 259 + } 260 + 261 + of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) { 262 + struct of_phandle_args parent, child; 263 + 264 + parent.np = it.node; 265 + parent.args_count = of_phandle_iterator_args(&it, parent.args, MAX_PHANDLE_ARGS); 266 + child.np = node; 267 + child.args_count = 0; 268 + ret = of_genpd_add_subdomain(&parent, &child); 269 + 270 + if (ret == -EPROBE_DEFER) { 271 + of_node_put(parent.np); 272 + goto err_remove; 273 + } else if (ret < 0) { 274 + dev_err(dev, "failed to add to parent domain: %d (%s -> %s)\n", 275 + ret, it.node->name, node->name); 276 + of_node_put(parent.np); 277 + goto err_remove; 278 + } 279 + } 280 + 281 + /* 282 + * Do not participate in regular PM; parent power domains are handled via the 283 + * genpd hierarchy. 284 + */ 285 + pm_genpd_remove_device(dev); 286 + 287 + ps->rcdev.owner = THIS_MODULE; 288 + ps->rcdev.nr_resets = 1; 289 + ps->rcdev.ops = &apple_pmgr_reset_ops; 290 + ps->rcdev.of_node = dev->of_node; 291 + ps->rcdev.of_reset_n_cells = 0; 292 + ps->rcdev.of_xlate = apple_pmgr_reset_xlate; 293 + 294 + ret = devm_reset_controller_register(dev, &ps->rcdev); 295 + if (ret < 0) 296 + goto err_remove; 297 + 298 + return 0; 299 + err_remove: 300 + of_genpd_del_provider(node); 301 + pm_genpd_remove(&ps->genpd); 302 + return ret; 303 + } 304 + 305 + static const struct of_device_id apple_pmgr_ps_of_match[] = { 306 + { .compatible = "apple,pmgr-pwrstate" }, 307 + {} 308 + }; 309 + 310 + MODULE_DEVICE_TABLE(of, apple_pmgr_ps_of_match); 311 + 312 + static struct platform_driver apple_pmgr_ps_driver = { 313 + .probe = apple_pmgr_ps_probe, 314 + .driver = { 315 + .name = "apple-pmgr-pwrstate", 316 + .of_match_table = apple_pmgr_ps_of_match, 317 + }, 318 + }; 319 + 320 + MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); 321 + MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs"); 322 + MODULE_LICENSE("GPL v2"); 323 + 324 + module_platform_driver(apple_pmgr_ps_driver);
+28 -1
drivers/soc/imx/gpcv2.c
··· 377 377 } 378 378 } 379 379 380 - pm_runtime_put(domain->dev); 380 + pm_runtime_put_sync_suspend(domain->dev); 381 381 382 382 return 0; 383 383 ··· 734 734 .map = IMX8MM_VPUH1_A53_DOMAIN, 735 735 }, 736 736 .pgc = BIT(IMX8MM_PGC_VPUH1), 737 + .keep_clocks = true, 737 738 }, 738 739 739 740 [IMX8MM_POWER_DOMAIN_DISPMIX] = { ··· 841 840 .hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN, 842 841 }, 843 842 .pgc = BIT(IMX8MN_PGC_GPUMIX), 843 + .keep_clocks = true, 844 + }, 845 + 846 + [IMX8MN_POWER_DOMAIN_DISPMIX] = { 847 + .genpd = { 848 + .name = "dispmix", 849 + }, 850 + .bits = { 851 + .pxx = IMX8MN_DISPMIX_SW_Pxx_REQ, 852 + .map = IMX8MN_DISPMIX_A53_DOMAIN, 853 + .hskreq = IMX8MN_DISPMIX_HSK_PWRDNREQN, 854 + .hskack = IMX8MN_DISPMIX_HSK_PWRDNACKN, 855 + }, 856 + .pgc = BIT(IMX8MN_PGC_DISPMIX), 857 + .keep_clocks = true, 858 + }, 859 + 860 + [IMX8MN_POWER_DOMAIN_MIPI] = { 861 + .genpd = { 862 + .name = "mipi", 863 + }, 864 + .bits = { 865 + .pxx = IMX8MN_MIPI_SW_Pxx_REQ, 866 + .map = IMX8MN_MIPI_A53_DOMAIN, 867 + }, 868 + .pgc = BIT(IMX8MN_PGC_MIPI), 844 869 }, 845 870 }; 846 871
+76 -1
drivers/soc/imx/imx8m-blk-ctrl.c
··· 14 14 #include <linux/clk.h> 15 15 16 16 #include <dt-bindings/power/imx8mm-power.h> 17 + #include <dt-bindings/power/imx8mn-power.h> 17 18 18 19 #define BLK_SFT_RSTN 0x0 19 20 #define BLK_CLK_EN 0x4 ··· 518 517 .num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data), 519 518 }; 520 519 520 + 521 + static int imx8mn_disp_power_notifier(struct notifier_block *nb, 522 + unsigned long action, void *data) 523 + { 524 + struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl, 525 + power_nb); 526 + 527 + if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF) 528 + return NOTIFY_OK; 529 + 530 + /* Enable bus clock and deassert bus reset */ 531 + regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8)); 532 + regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8)); 533 + 534 + /* 535 + * On power up we have no software backchannel to the GPC to 536 + * wait for the ADB handshake to happen, so we just delay for a 537 + * bit. On power down the GPC driver waits for the handshake. 538 + */ 539 + if (action == GENPD_NOTIFY_ON) 540 + udelay(5); 541 + 542 + 543 + return NOTIFY_OK; 544 + } 545 + 546 + static const struct imx8m_blk_ctrl_domain_data imx8mn_disp_blk_ctl_domain_data[] = { 547 + [IMX8MN_DISPBLK_PD_MIPI_DSI] = { 548 + .name = "dispblk-mipi-dsi", 549 + .clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", }, 550 + .num_clks = 2, 551 + .gpc_name = "mipi-dsi", 552 + .rst_mask = BIT(0) | BIT(1), 553 + .clk_mask = BIT(0) | BIT(1), 554 + .mipi_phy_rst_mask = BIT(17), 555 + }, 556 + [IMX8MN_DISPBLK_PD_MIPI_CSI] = { 557 + .name = "dispblk-mipi-csi", 558 + .clk_names = (const char *[]){ "csi-aclk", "csi-pclk" }, 559 + .num_clks = 2, 560 + .gpc_name = "mipi-csi", 561 + .rst_mask = BIT(2) | BIT(3), 562 + .clk_mask = BIT(2) | BIT(3), 563 + .mipi_phy_rst_mask = BIT(16), 564 + }, 565 + [IMX8MN_DISPBLK_PD_LCDIF] = { 566 + .name = "dispblk-lcdif", 567 + .clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", }, 568 + .num_clks = 3, 569 + .gpc_name = "lcdif", 570 + .rst_mask = BIT(4) | BIT(5), 571 + .clk_mask = BIT(4) | BIT(5), 572 + }, 573 + [IMX8MN_DISPBLK_PD_ISI] = { 574 + .name = "dispblk-isi", 575 + .clk_names = (const char *[]){ "disp_axi", "disp_apb", "disp_axi_root", 576 + "disp_apb_root"}, 577 + .num_clks = 4, 578 + .gpc_name = "isi", 579 + .rst_mask = BIT(6) | BIT(7), 580 + .clk_mask = BIT(6) | BIT(7), 581 + }, 582 + }; 583 + 584 + static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = { 585 + .max_reg = 0x84, 586 + .power_notifier_fn = imx8mn_disp_power_notifier, 587 + .domains = imx8mn_disp_blk_ctl_domain_data, 588 + .num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data), 589 + }; 590 + 521 591 static const struct of_device_id imx8m_blk_ctrl_of_match[] = { 522 592 { 523 593 .compatible = "fsl,imx8mm-vpu-blk-ctrl", ··· 596 524 }, { 597 525 .compatible = "fsl,imx8mm-disp-blk-ctrl", 598 526 .data = &imx8mm_disp_blk_ctl_dev_data 599 - } ,{ 527 + }, { 528 + .compatible = "fsl,imx8mn-disp-blk-ctrl", 529 + .data = &imx8mn_disp_blk_ctl_dev_data 530 + }, { 600 531 /* Sentinel */ 601 532 } 602 533 };
+1 -1
drivers/soc/qcom/cpr.c
··· 1010 1010 return corner->uV; 1011 1011 1012 1012 temp = f_diff * (uV_high - uV_low); 1013 - do_div(temp, f_high - f_low); 1013 + temp = div64_ul(temp, f_high - f_low); 1014 1014 1015 1015 /* 1016 1016 * max_volt_scale has units of uV/MHz while freq values
+28
drivers/soc/qcom/llcc-qcom.c
··· 195 195 { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 196 196 }; 197 197 198 + static const struct llcc_slice_config sm8350_data[] = { 199 + { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 }, 200 + { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 201 + { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, 202 + { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 203 + { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 204 + { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 205 + { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 206 + { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 }, 207 + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, 208 + { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 209 + { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 }, 210 + { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 211 + { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 212 + { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 }, 213 + { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 }, 214 + { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, 215 + { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 216 + { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, 217 + { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, 218 + }; 219 + 198 220 static const struct qcom_llcc_config sc7180_cfg = { 199 221 .sct_data = sc7180_data, 200 222 .size = ARRAY_SIZE(sc7180_data), ··· 248 226 static const struct qcom_llcc_config sm8250_cfg = { 249 227 .sct_data = sm8250_data, 250 228 .size = ARRAY_SIZE(sm8250_data), 229 + }; 230 + 231 + static const struct qcom_llcc_config sm8350_cfg = { 232 + .sct_data = sm8350_data, 233 + .size = ARRAY_SIZE(sm8350_data), 251 234 }; 252 235 253 236 static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; ··· 671 644 { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg }, 672 645 { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg }, 673 646 { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg }, 647 + { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg }, 674 648 { } 675 649 }; 676 650
+1 -1
drivers/soc/qcom/qcom_aoss.c
··· 352 352 return ret; 353 353 } 354 354 355 - static struct thermal_cooling_device_ops qmp_cooling_device_ops = { 355 + static const struct thermal_cooling_device_ops qmp_cooling_device_ops = { 356 356 .get_max_state = qmp_cdev_get_max_state, 357 357 .get_cur_state = qmp_cdev_get_cur_state, 358 358 .set_cur_state = qmp_cdev_set_cur_state,
+13
drivers/soc/qcom/qcom_stats.c
··· 237 237 .subsystem_stats_in_smem = false, 238 238 }; 239 239 240 + /* Older RPM firmwares have the stats at a fixed offset instead */ 241 + static const struct stats_config rpm_data_dba0 = { 242 + .stats_offset = 0xdba0, 243 + .num_records = 2, 244 + .appended_stats_avail = true, 245 + .dynamic_offset = false, 246 + .subsystem_stats_in_smem = false, 247 + }; 248 + 240 249 static const struct stats_config rpmh_data = { 241 250 .stats_offset = 0x48, 242 251 .num_records = 3, ··· 255 246 }; 256 247 257 248 static const struct of_device_id qcom_stats_table[] = { 249 + { .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 }, 250 + { .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 }, 251 + { .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 }, 252 + { .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 }, 258 253 { .compatible = "qcom,rpm-stats", .data = &rpm_data }, 259 254 { .compatible = "qcom,rpmh-stats", .data = &rpmh_data }, 260 255 { }
+1 -1
drivers/soc/qcom/qmi_interface.c
··· 96 96 * @node: id of the dying node 97 97 * 98 98 * Signals the client that all previously registered services on this node are 99 - * now gone and then calls the bye callback to allow the client client further 99 + * now gone and then calls the bye callback to allow the client further 100 100 * cleaning up resources associated with this remote. 101 101 */ 102 102 static void qmi_recv_bye(struct qmi_handle *qmi,
+1 -1
drivers/soc/qcom/rpmh-rsc.c
··· 691 691 * @drv: The controller. 692 692 * @msg: The data to be written to the controller. 693 693 * 694 - * This should only be called for for sleep/wake state, never active-only 694 + * This should only be called for sleep/wake state, never active-only 695 695 * state. 696 696 * 697 697 * The caller must ensure that no other RPMH actions are happening and the
+195 -139
drivers/soc/qcom/rpmhpd.c
··· 63 63 64 64 static DEFINE_MUTEX(rpmhpd_lock); 65 65 66 - /* SDM845 RPMH powerdomains */ 66 + /* RPMH powerdomains */ 67 67 68 - static struct rpmhpd sdm845_ebi = { 68 + static struct rpmhpd cx_ao; 69 + static struct rpmhpd mx; 70 + static struct rpmhpd mx_ao; 71 + static struct rpmhpd cx = { 72 + .pd = { .name = "cx", }, 73 + .peer = &cx_ao, 74 + .res_name = "cx.lvl", 75 + }; 76 + 77 + static struct rpmhpd cx_ao = { 78 + .pd = { .name = "cx_ao", }, 79 + .active_only = true, 80 + .peer = &cx, 81 + .res_name = "cx.lvl", 82 + }; 83 + 84 + static struct rpmhpd cx_ao_w_mx_parent; 85 + static struct rpmhpd cx_w_mx_parent = { 86 + .pd = { .name = "cx", }, 87 + .peer = &cx_ao_w_mx_parent, 88 + .parent = &mx.pd, 89 + .res_name = "cx.lvl", 90 + }; 91 + 92 + static struct rpmhpd cx_ao_w_mx_parent = { 93 + .pd = { .name = "cx_ao", }, 94 + .active_only = true, 95 + .peer = &cx_w_mx_parent, 96 + .parent = &mx_ao.pd, 97 + .res_name = "cx.lvl", 98 + }; 99 + 100 + static struct rpmhpd ebi = { 69 101 .pd = { .name = "ebi", }, 70 102 .res_name = "ebi.lvl", 71 103 }; 72 104 73 - static struct rpmhpd sdm845_lmx = { 74 - .pd = { .name = "lmx", }, 75 - .res_name = "lmx.lvl", 76 - }; 77 - 78 - static struct rpmhpd sdm845_lcx = { 79 - .pd = { .name = "lcx", }, 80 - .res_name = "lcx.lvl", 81 - }; 82 - 83 - static struct rpmhpd sdm845_gfx = { 105 + static struct rpmhpd gfx = { 84 106 .pd = { .name = "gfx", }, 85 107 .res_name = "gfx.lvl", 86 108 }; 87 109 88 - static struct rpmhpd sdm845_mss = { 110 + static struct rpmhpd lcx = { 111 + .pd = { .name = "lcx", }, 112 + .res_name = "lcx.lvl", 113 + }; 114 + 115 + static struct rpmhpd lmx = { 116 + .pd = { .name = "lmx", }, 117 + .res_name = "lmx.lvl", 118 + }; 119 + 120 + static struct rpmhpd mmcx_ao; 121 + static struct rpmhpd mmcx = { 122 + .pd = { .name = "mmcx", }, 123 + .peer = &mmcx_ao, 124 + .res_name = "mmcx.lvl", 125 + }; 126 + 127 + static struct rpmhpd mmcx_ao = { 128 + .pd = { .name = "mmcx_ao", }, 129 + .active_only = true, 130 + .peer = &mmcx, 131 + .res_name = "mmcx.lvl", 132 + }; 133 + 134 + static struct rpmhpd mmcx_ao_w_cx_parent; 135 + static struct rpmhpd mmcx_w_cx_parent = { 136 + .pd = { .name = "mmcx", }, 137 + .peer = &mmcx_ao_w_cx_parent, 138 + .parent = &cx.pd, 139 + .res_name = "mmcx.lvl", 140 + }; 141 + 142 + static struct rpmhpd mmcx_ao_w_cx_parent = { 143 + .pd = { .name = "mmcx_ao", }, 144 + .active_only = true, 145 + .peer = &mmcx_w_cx_parent, 146 + .parent = &cx_ao.pd, 147 + .res_name = "mmcx.lvl", 148 + }; 149 + 150 + static struct rpmhpd mss = { 89 151 .pd = { .name = "mss", }, 90 152 .res_name = "mss.lvl", 91 153 }; 92 154 93 - static struct rpmhpd sdm845_mx_ao; 94 - static struct rpmhpd sdm845_mx = { 155 + static struct rpmhpd mx_ao; 156 + static struct rpmhpd mx = { 95 157 .pd = { .name = "mx", }, 96 - .peer = &sdm845_mx_ao, 158 + .peer = &mx_ao, 97 159 .res_name = "mx.lvl", 98 160 }; 99 161 100 - static struct rpmhpd sdm845_mx_ao = { 162 + static struct rpmhpd mx_ao = { 101 163 .pd = { .name = "mx_ao", }, 102 164 .active_only = true, 103 - .peer = &sdm845_mx, 165 + .peer = &mx, 104 166 .res_name = "mx.lvl", 105 167 }; 106 168 107 - static struct rpmhpd sdm845_cx_ao; 108 - static struct rpmhpd sdm845_cx = { 109 - .pd = { .name = "cx", }, 110 - .peer = &sdm845_cx_ao, 111 - .parent = &sdm845_mx.pd, 112 - .res_name = "cx.lvl", 169 + static struct rpmhpd mxc_ao; 170 + static struct rpmhpd mxc = { 171 + .pd = { .name = "mxc", }, 172 + .peer = &mxc_ao, 173 + .res_name = "mxc.lvl", 113 174 }; 114 175 115 - static struct rpmhpd sdm845_cx_ao = { 116 - .pd = { .name = "cx_ao", }, 176 + static struct rpmhpd mxc_ao = { 177 + .pd = { .name = "mxc_ao", }, 117 178 .active_only = true, 118 - .peer = &sdm845_cx, 119 - .parent = &sdm845_mx_ao.pd, 120 - .res_name = "cx.lvl", 179 + .peer = &mxc, 180 + .res_name = "mxc.lvl", 121 181 }; 122 182 183 + /* SDM845 RPMH powerdomains */ 123 184 static struct rpmhpd *sdm845_rpmhpds[] = { 124 - [SDM845_EBI] = &sdm845_ebi, 125 - [SDM845_MX] = &sdm845_mx, 126 - [SDM845_MX_AO] = &sdm845_mx_ao, 127 - [SDM845_CX] = &sdm845_cx, 128 - [SDM845_CX_AO] = &sdm845_cx_ao, 129 - [SDM845_LMX] = &sdm845_lmx, 130 - [SDM845_LCX] = &sdm845_lcx, 131 - [SDM845_GFX] = &sdm845_gfx, 132 - [SDM845_MSS] = &sdm845_mss, 185 + [SDM845_CX] = &cx_w_mx_parent, 186 + [SDM845_CX_AO] = &cx_ao_w_mx_parent, 187 + [SDM845_EBI] = &ebi, 188 + [SDM845_GFX] = &gfx, 189 + [SDM845_LCX] = &lcx, 190 + [SDM845_LMX] = &lmx, 191 + [SDM845_MSS] = &mss, 192 + [SDM845_MX] = &mx, 193 + [SDM845_MX_AO] = &mx_ao, 133 194 }; 134 195 135 196 static const struct rpmhpd_desc sdm845_desc = { ··· 200 139 201 140 /* SDX55 RPMH powerdomains */ 202 141 static struct rpmhpd *sdx55_rpmhpds[] = { 203 - [SDX55_MSS] = &sdm845_mss, 204 - [SDX55_MX] = &sdm845_mx, 205 - [SDX55_CX] = &sdm845_cx, 142 + [SDX55_CX] = &cx_w_mx_parent, 143 + [SDX55_MSS] = &mss, 144 + [SDX55_MX] = &mx, 206 145 }; 207 146 208 147 static const struct rpmhpd_desc sdx55_desc = { ··· 212 151 213 152 /* SM6350 RPMH powerdomains */ 214 153 static struct rpmhpd *sm6350_rpmhpds[] = { 215 - [SM6350_CX] = &sdm845_cx, 216 - [SM6350_GFX] = &sdm845_gfx, 217 - [SM6350_LCX] = &sdm845_lcx, 218 - [SM6350_LMX] = &sdm845_lmx, 219 - [SM6350_MSS] = &sdm845_mss, 220 - [SM6350_MX] = &sdm845_mx, 154 + [SM6350_CX] = &cx_w_mx_parent, 155 + [SM6350_GFX] = &gfx, 156 + [SM6350_LCX] = &lcx, 157 + [SM6350_LMX] = &lmx, 158 + [SM6350_MSS] = &mss, 159 + [SM6350_MX] = &mx, 221 160 }; 222 161 223 162 static const struct rpmhpd_desc sm6350_desc = { ··· 226 165 }; 227 166 228 167 /* SM8150 RPMH powerdomains */ 229 - 230 - static struct rpmhpd sm8150_mmcx_ao; 231 - static struct rpmhpd sm8150_mmcx = { 232 - .pd = { .name = "mmcx", }, 233 - .peer = &sm8150_mmcx_ao, 234 - .res_name = "mmcx.lvl", 235 - }; 236 - 237 - static struct rpmhpd sm8150_mmcx_ao = { 238 - .pd = { .name = "mmcx_ao", }, 239 - .active_only = true, 240 - .peer = &sm8150_mmcx, 241 - .res_name = "mmcx.lvl", 242 - }; 243 - 244 168 static struct rpmhpd *sm8150_rpmhpds[] = { 245 - [SM8150_MSS] = &sdm845_mss, 246 - [SM8150_EBI] = &sdm845_ebi, 247 - [SM8150_LMX] = &sdm845_lmx, 248 - [SM8150_LCX] = &sdm845_lcx, 249 - [SM8150_GFX] = &sdm845_gfx, 250 - [SM8150_MX] = &sdm845_mx, 251 - [SM8150_MX_AO] = &sdm845_mx_ao, 252 - [SM8150_CX] = &sdm845_cx, 253 - [SM8150_CX_AO] = &sdm845_cx_ao, 254 - [SM8150_MMCX] = &sm8150_mmcx, 255 - [SM8150_MMCX_AO] = &sm8150_mmcx_ao, 169 + [SM8150_CX] = &cx_w_mx_parent, 170 + [SM8150_CX_AO] = &cx_ao_w_mx_parent, 171 + [SM8150_EBI] = &ebi, 172 + [SM8150_GFX] = &gfx, 173 + [SM8150_LCX] = &lcx, 174 + [SM8150_LMX] = &lmx, 175 + [SM8150_MMCX] = &mmcx, 176 + [SM8150_MMCX_AO] = &mmcx_ao, 177 + [SM8150_MSS] = &mss, 178 + [SM8150_MX] = &mx, 179 + [SM8150_MX_AO] = &mx_ao, 256 180 }; 257 181 258 182 static const struct rpmhpd_desc sm8150_desc = { ··· 245 199 .num_pds = ARRAY_SIZE(sm8150_rpmhpds), 246 200 }; 247 201 202 + /* SM8250 RPMH powerdomains */ 248 203 static struct rpmhpd *sm8250_rpmhpds[] = { 249 - [SM8250_CX] = &sdm845_cx, 250 - [SM8250_CX_AO] = &sdm845_cx_ao, 251 - [SM8250_EBI] = &sdm845_ebi, 252 - [SM8250_GFX] = &sdm845_gfx, 253 - [SM8250_LCX] = &sdm845_lcx, 254 - [SM8250_LMX] = &sdm845_lmx, 255 - [SM8250_MMCX] = &sm8150_mmcx, 256 - [SM8250_MMCX_AO] = &sm8150_mmcx_ao, 257 - [SM8250_MX] = &sdm845_mx, 258 - [SM8250_MX_AO] = &sdm845_mx_ao, 204 + [SM8250_CX] = &cx_w_mx_parent, 205 + [SM8250_CX_AO] = &cx_ao_w_mx_parent, 206 + [SM8250_EBI] = &ebi, 207 + [SM8250_GFX] = &gfx, 208 + [SM8250_LCX] = &lcx, 209 + [SM8250_LMX] = &lmx, 210 + [SM8250_MMCX] = &mmcx, 211 + [SM8250_MMCX_AO] = &mmcx_ao, 212 + [SM8250_MX] = &mx, 213 + [SM8250_MX_AO] = &mx_ao, 259 214 }; 260 215 261 216 static const struct rpmhpd_desc sm8250_desc = { ··· 265 218 }; 266 219 267 220 /* SM8350 Power domains */ 268 - static struct rpmhpd sm8350_mxc_ao; 269 - static struct rpmhpd sm8350_mxc = { 270 - .pd = { .name = "mxc", }, 271 - .peer = &sm8350_mxc_ao, 272 - .res_name = "mxc.lvl", 273 - }; 274 - 275 - static struct rpmhpd sm8350_mxc_ao = { 276 - .pd = { .name = "mxc_ao", }, 277 - .active_only = true, 278 - .peer = &sm8350_mxc, 279 - .res_name = "mxc.lvl", 280 - }; 281 - 282 221 static struct rpmhpd *sm8350_rpmhpds[] = { 283 - [SM8350_CX] = &sdm845_cx, 284 - [SM8350_CX_AO] = &sdm845_cx_ao, 285 - [SM8350_EBI] = &sdm845_ebi, 286 - [SM8350_GFX] = &sdm845_gfx, 287 - [SM8350_LCX] = &sdm845_lcx, 288 - [SM8350_LMX] = &sdm845_lmx, 289 - [SM8350_MMCX] = &sm8150_mmcx, 290 - [SM8350_MMCX_AO] = &sm8150_mmcx_ao, 291 - [SM8350_MX] = &sdm845_mx, 292 - [SM8350_MX_AO] = &sdm845_mx_ao, 293 - [SM8350_MXC] = &sm8350_mxc, 294 - [SM8350_MXC_AO] = &sm8350_mxc_ao, 295 - [SM8350_MSS] = &sdm845_mss, 222 + [SM8350_CX] = &cx_w_mx_parent, 223 + [SM8350_CX_AO] = &cx_ao_w_mx_parent, 224 + [SM8350_EBI] = &ebi, 225 + [SM8350_GFX] = &gfx, 226 + [SM8350_LCX] = &lcx, 227 + [SM8350_LMX] = &lmx, 228 + [SM8350_MMCX] = &mmcx, 229 + [SM8350_MMCX_AO] = &mmcx_ao, 230 + [SM8350_MSS] = &mss, 231 + [SM8350_MX] = &mx, 232 + [SM8350_MX_AO] = &mx_ao, 233 + [SM8350_MXC] = &mxc, 234 + [SM8350_MXC_AO] = &mxc_ao, 296 235 }; 297 236 298 237 static const struct rpmhpd_desc sm8350_desc = { ··· 286 253 .num_pds = ARRAY_SIZE(sm8350_rpmhpds), 287 254 }; 288 255 256 + /* SM8450 RPMH powerdomains */ 257 + static struct rpmhpd *sm8450_rpmhpds[] = { 258 + [SM8450_CX] = &cx, 259 + [SM8450_CX_AO] = &cx_ao, 260 + [SM8450_EBI] = &ebi, 261 + [SM8450_GFX] = &gfx, 262 + [SM8450_LCX] = &lcx, 263 + [SM8450_LMX] = &lmx, 264 + [SM8450_MMCX] = &mmcx_w_cx_parent, 265 + [SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent, 266 + [SM8450_MSS] = &mss, 267 + [SM8450_MX] = &mx, 268 + [SM8450_MX_AO] = &mx_ao, 269 + [SM8450_MXC] = &mxc, 270 + [SM8450_MXC_AO] = &mxc_ao, 271 + }; 272 + 273 + static const struct rpmhpd_desc sm8450_desc = { 274 + .rpmhpds = sm8450_rpmhpds, 275 + .num_pds = ARRAY_SIZE(sm8450_rpmhpds), 276 + }; 277 + 289 278 /* SC7180 RPMH powerdomains */ 290 279 static struct rpmhpd *sc7180_rpmhpds[] = { 291 - [SC7180_CX] = &sdm845_cx, 292 - [SC7180_CX_AO] = &sdm845_cx_ao, 293 - [SC7180_GFX] = &sdm845_gfx, 294 - [SC7180_MX] = &sdm845_mx, 295 - [SC7180_MX_AO] = &sdm845_mx_ao, 296 - [SC7180_LMX] = &sdm845_lmx, 297 - [SC7180_LCX] = &sdm845_lcx, 298 - [SC7180_MSS] = &sdm845_mss, 280 + [SC7180_CX] = &cx_w_mx_parent, 281 + [SC7180_CX_AO] = &cx_ao_w_mx_parent, 282 + [SC7180_GFX] = &gfx, 283 + [SC7180_LCX] = &lcx, 284 + [SC7180_LMX] = &lmx, 285 + [SC7180_MSS] = &mss, 286 + [SC7180_MX] = &mx, 287 + [SC7180_MX_AO] = &mx_ao, 299 288 }; 300 289 301 290 static const struct rpmhpd_desc sc7180_desc = { ··· 327 272 328 273 /* SC7280 RPMH powerdomains */ 329 274 static struct rpmhpd *sc7280_rpmhpds[] = { 330 - [SC7280_CX] = &sdm845_cx, 331 - [SC7280_CX_AO] = &sdm845_cx_ao, 332 - [SC7280_EBI] = &sdm845_ebi, 333 - [SC7280_GFX] = &sdm845_gfx, 334 - [SC7280_MX] = &sdm845_mx, 335 - [SC7280_MX_AO] = &sdm845_mx_ao, 336 - [SC7280_LMX] = &sdm845_lmx, 337 - [SC7280_LCX] = &sdm845_lcx, 338 - [SC7280_MSS] = &sdm845_mss, 275 + [SC7280_CX] = &cx, 276 + [SC7280_CX_AO] = &cx_ao, 277 + [SC7280_EBI] = &ebi, 278 + [SC7280_GFX] = &gfx, 279 + [SC7280_LCX] = &lcx, 280 + [SC7280_LMX] = &lmx, 281 + [SC7280_MSS] = &mss, 282 + [SC7280_MX] = &mx, 283 + [SC7280_MX_AO] = &mx_ao, 339 284 }; 340 285 341 286 static const struct rpmhpd_desc sc7280_desc = { ··· 345 290 346 291 /* SC8180x RPMH powerdomains */ 347 292 static struct rpmhpd *sc8180x_rpmhpds[] = { 348 - [SC8180X_CX] = &sdm845_cx, 349 - [SC8180X_CX_AO] = &sdm845_cx_ao, 350 - [SC8180X_EBI] = &sdm845_ebi, 351 - [SC8180X_GFX] = &sdm845_gfx, 352 - [SC8180X_LCX] = &sdm845_lcx, 353 - [SC8180X_LMX] = &sdm845_lmx, 354 - [SC8180X_MMCX] = &sm8150_mmcx, 355 - [SC8180X_MMCX_AO] = &sm8150_mmcx_ao, 356 - [SC8180X_MSS] = &sdm845_mss, 357 - [SC8180X_MX] = &sdm845_mx, 358 - [SC8180X_MX_AO] = &sdm845_mx_ao, 293 + [SC8180X_CX] = &cx_w_mx_parent, 294 + [SC8180X_CX_AO] = &cx_ao_w_mx_parent, 295 + [SC8180X_EBI] = &ebi, 296 + [SC8180X_GFX] = &gfx, 297 + [SC8180X_LCX] = &lcx, 298 + [SC8180X_LMX] = &lmx, 299 + [SC8180X_MMCX] = &mmcx, 300 + [SC8180X_MMCX_AO] = &mmcx_ao, 301 + [SC8180X_MSS] = &mss, 302 + [SC8180X_MX] = &mx, 303 + [SC8180X_MX_AO] = &mx_ao, 359 304 }; 360 305 361 306 static const struct rpmhpd_desc sc8180x_desc = { ··· 373 318 { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, 374 319 { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, 375 320 { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc }, 321 + { .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc }, 376 322 { } 377 323 }; 378 324 MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
+41 -1
drivers/soc/qcom/rpmpd.c
··· 102 102 const bool active_only; 103 103 unsigned int corner; 104 104 bool enabled; 105 - const char *res_name; 106 105 const int res_type; 107 106 const int res_id; 108 107 struct qcom_smd_rpm *rpm; ··· 395 396 .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, 396 397 }; 397 398 399 + /* sm6125 RPM Power domains */ 400 + DEFINE_RPMPD_PAIR(sm6125, vddcx, vddcx_ao, RWCX, LEVEL, 0); 401 + DEFINE_RPMPD_VFL(sm6125, vddcx_vfl, RWCX, 0); 402 + 403 + DEFINE_RPMPD_PAIR(sm6125, vddmx, vddmx_ao, RWMX, LEVEL, 0); 404 + DEFINE_RPMPD_VFL(sm6125, vddmx_vfl, RWMX, 0); 405 + 406 + static struct rpmpd *sm6125_rpmpds[] = { 407 + [SM6125_VDDCX] = &sm6125_vddcx, 408 + [SM6125_VDDCX_AO] = &sm6125_vddcx_ao, 409 + [SM6125_VDDCX_VFL] = &sm6125_vddcx_vfl, 410 + [SM6125_VDDMX] = &sm6125_vddmx, 411 + [SM6125_VDDMX_AO] = &sm6125_vddmx_ao, 412 + [SM6125_VDDMX_VFL] = &sm6125_vddmx_vfl, 413 + }; 414 + 415 + static const struct rpmpd_desc sm6125_desc = { 416 + .rpmpds = sm6125_rpmpds, 417 + .num_pds = ARRAY_SIZE(sm6125_rpmpds), 418 + .max_state = RPM_SMD_LEVEL_BINNING, 419 + }; 420 + 421 + static struct rpmpd *qcm2290_rpmpds[] = { 422 + [QCM2290_VDDCX] = &sm6115_vddcx, 423 + [QCM2290_VDDCX_AO] = &sm6115_vddcx_ao, 424 + [QCM2290_VDDCX_VFL] = &sm6115_vddcx_vfl, 425 + [QCM2290_VDDMX] = &sm6115_vddmx, 426 + [QCM2290_VDDMX_AO] = &sm6115_vddmx_ao, 427 + [QCM2290_VDDMX_VFL] = &sm6115_vddmx_vfl, 428 + [QCM2290_VDD_LPI_CX] = &sm6115_vdd_lpi_cx, 429 + [QCM2290_VDD_LPI_MX] = &sm6115_vdd_lpi_mx, 430 + }; 431 + 432 + static const struct rpmpd_desc qcm2290_desc = { 433 + .rpmpds = qcm2290_rpmpds, 434 + .num_pds = ARRAY_SIZE(qcm2290_rpmpds), 435 + .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, 436 + }; 437 + 398 438 static const struct of_device_id rpmpd_match_table[] = { 399 439 { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, 400 440 { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, ··· 443 405 { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc }, 444 406 { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, 445 407 { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc }, 408 + { .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc }, 446 409 { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, 447 410 { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc }, 448 411 { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc }, 412 + { .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc }, 449 413 { } 450 414 }; 451 415 MODULE_DEVICE_TABLE(of, rpmpd_match_table);
+1 -1
drivers/soc/qcom/smem.c
··· 85 85 #define SMEM_GLOBAL_HOST 0xfffe 86 86 87 87 /* Max number of processors/hosts in a system */ 88 - #define SMEM_HOST_COUNT 14 88 + #define SMEM_HOST_COUNT 15 89 89 90 90 /** 91 91 * struct smem_proc_comm - proc_comm communication struct (legacy)
+3
drivers/soc/qcom/socinfo.c
··· 313 313 { 421, "IPQ6000" }, 314 314 { 422, "IPQ6010" }, 315 315 { 425, "SC7180" }, 316 + { 434, "SM6350" }, 316 317 { 453, "IPQ6005" }, 317 318 { 455, "QRB5165" }, 319 + { 457, "SM8450" }, 320 + { 459, "SM7225" }, 318 321 }; 319 322 320 323 static const char *socinfo_machine(struct device *dev, unsigned int id)
+15
drivers/soc/renesas/Kconfig
··· 235 235 This enables support for the Renesas R-Car M3-W+ SoC. 236 236 This includes different gradings like R-Car M3e and M3e-2G. 237 237 238 + config ARCH_R8A779F0 239 + bool "ARM64 Platform support for R-Car S4-8" 240 + select ARCH_RCAR_GEN3 241 + select SYSC_R8A779F0 242 + help 243 + This enables support for the Renesas R-Car S4-8 SoC. 244 + 238 245 config ARCH_R8A77980 239 246 bool "ARM64 Platform support for R-Car V3H" 240 247 select ARCH_RCAR_GEN3 ··· 304 297 config SYSC_RCAR 305 298 bool "System Controller support for R-Car" if COMPILE_TEST 306 299 300 + config SYSC_RCAR_GEN4 301 + bool "System Controller support for R-Car Gen4" if COMPILE_TEST 302 + 307 303 config SYSC_R8A77995 308 304 bool "System Controller support for R-Car D3" if COMPILE_TEST 309 305 select SYSC_RCAR ··· 347 337 bool "System Controller support for R-Car M3-W+" if COMPILE_TEST 348 338 select SYSC_RCAR 349 339 340 + config SYSC_R8A779F0 341 + bool "System Controller support for R-Car S4-8" if COMPILE_TEST 342 + select SYSC_RCAR_GEN4 343 + 350 344 config SYSC_R8A7792 351 345 bool "System Controller support for R-Car V2H" if COMPILE_TEST 352 346 select SYSC_RCAR ··· 365 351 366 352 config SYSC_R8A779A0 367 353 bool "System Controller support for R-Car V3U" if COMPILE_TEST 354 + select SYSC_RCAR_GEN4 368 355 369 356 config SYSC_RMOBILE 370 357 bool "System Controller support for R-Mobile" if COMPILE_TEST
+2
drivers/soc/renesas/Makefile
··· 25 25 obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o 26 26 obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o 27 27 obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o 28 + obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o 28 29 ifdef CONFIG_SMP 29 30 obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o 30 31 endif ··· 33 32 # Family 34 33 obj-$(CONFIG_RST_RCAR) += rcar-rst.o 35 34 obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o 35 + obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o 36 36 obj-$(CONFIG_SYSC_RMOBILE) += rmobile-sysc.o
+3 -377
drivers/soc/renesas/r8a779a0-sysc.c
··· 21 21 22 22 #include <dt-bindings/power/r8a779a0-sysc.h> 23 23 24 - /* 25 - * Power Domain flags 26 - */ 27 - #define PD_CPU BIT(0) /* Area contains main CPU core */ 28 - #define PD_SCU BIT(1) /* Area contains SCU and L2 cache */ 29 - #define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */ 24 + #include "rcar-gen4-sysc.h" 30 25 31 - #define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */ 32 - #define PD_ALWAYS_ON PD_NO_CR /* Always-on area */ 33 - 34 - /* 35 - * Description of a Power Area 36 - */ 37 - struct r8a779a0_sysc_area { 38 - const char *name; 39 - u8 pdr; /* PDRn */ 40 - int parent; /* -1 if none */ 41 - unsigned int flags; /* See PD_* */ 42 - }; 43 - 44 - /* 45 - * SoC-specific Power Area Description 46 - */ 47 - struct r8a779a0_sysc_info { 48 - const struct r8a779a0_sysc_area *areas; 49 - unsigned int num_areas; 50 - }; 51 - 52 - static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = { 26 + static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = { 53 27 { "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, 54 28 { "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU }, 55 29 { "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU }, ··· 70 96 { "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 }, 71 97 }; 72 98 73 - static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = { 99 + const struct rcar_gen4_sysc_info r8a779a0_sysc_info __initconst = { 74 100 .areas = r8a779a0_areas, 75 101 .num_areas = ARRAY_SIZE(r8a779a0_areas), 76 102 }; 77 - 78 - /* SYSC Common */ 79 - #define SYSCSR 0x000 /* SYSC Status Register */ 80 - #define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */ 81 - #define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */ 82 - #define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */ 83 - #define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */ 84 - #define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */ 85 - 86 - /* Power Domain Registers */ 87 - #define PDRSR(n) (0x1000 + ((n) * 0x40)) 88 - #define PDRONCR(n) (0x1004 + ((n) * 0x40)) 89 - #define PDROFFCR(n) (0x1008 + ((n) * 0x40)) 90 - #define PDRESR(n) (0x100C + ((n) * 0x40)) 91 - 92 - /* PWRON/PWROFF */ 93 - #define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */ 94 - 95 - /* PDRESR */ 96 - #define PDRESR_ERR BIT(0) 97 - 98 - /* PDRSR */ 99 - #define PDRSR_OFF BIT(0) /* Power-OFF state */ 100 - #define PDRSR_ON BIT(4) /* Power-ON state */ 101 - #define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */ 102 - #define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */ 103 - 104 - #define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */ 105 - 106 - #define SYSCSR_TIMEOUT 10000 107 - #define SYSCSR_DELAY_US 10 108 - 109 - #define PDRESR_RETRIES 1000 110 - #define PDRESR_DELAY_US 10 111 - 112 - #define SYSCISR_TIMEOUT 10000 113 - #define SYSCISR_DELAY_US 10 114 - 115 - #define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32) 116 - 117 - static void __iomem *r8a779a0_sysc_base; 118 - static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */ 119 - 120 - static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on) 121 - { 122 - unsigned int reg_offs; 123 - u32 val; 124 - int ret; 125 - 126 - if (on) 127 - reg_offs = PDRONCR(pdr); 128 - else 129 - reg_offs = PDROFFCR(pdr); 130 - 131 - /* Wait until SYSC is ready to accept a power request */ 132 - ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val, 133 - (val & SYSCSR_BUSY) == SYSCSR_BUSY, 134 - SYSCSR_DELAY_US, SYSCSR_TIMEOUT); 135 - if (ret < 0) 136 - return -EAGAIN; 137 - 138 - /* Submit power shutoff or power resume request */ 139 - iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs); 140 - 141 - return 0; 142 - } 143 - 144 - static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask) 145 - { 146 - u32 val; 147 - int ret; 148 - 149 - iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx)); 150 - 151 - ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx), 152 - val, !(val & isr_mask), 153 - SYSCISR_DELAY_US, SYSCISR_TIMEOUT); 154 - if (ret < 0) { 155 - pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__); 156 - return -EIO; 157 - } 158 - 159 - return 0; 160 - } 161 - 162 - static int r8a779a0_sysc_power(u8 pdr, bool on) 163 - { 164 - unsigned int isr_mask; 165 - unsigned int reg_idx, bit_idx; 166 - unsigned int status; 167 - unsigned long flags; 168 - int ret = 0; 169 - u32 val; 170 - int k; 171 - 172 - spin_lock_irqsave(&r8a779a0_sysc_lock, flags); 173 - 174 - reg_idx = pdr / NUM_DOMAINS_EACH_REG; 175 - bit_idx = pdr % NUM_DOMAINS_EACH_REG; 176 - 177 - isr_mask = BIT(bit_idx); 178 - 179 - /* 180 - * The interrupt source needs to be enabled, but masked, to prevent the 181 - * CPU from receiving it. 182 - */ 183 - iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask, 184 - r8a779a0_sysc_base + SYSCIER(reg_idx)); 185 - iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask, 186 - r8a779a0_sysc_base + SYSCIMR(reg_idx)); 187 - 188 - ret = clear_irq_flags(reg_idx, isr_mask); 189 - if (ret) 190 - goto out; 191 - 192 - /* Submit power shutoff or resume request until it was accepted */ 193 - for (k = 0; k < PDRESR_RETRIES; k++) { 194 - ret = r8a779a0_sysc_pwr_on_off(pdr, on); 195 - if (ret) 196 - goto out; 197 - 198 - status = ioread32(r8a779a0_sysc_base + PDRESR(pdr)); 199 - if (!(status & PDRESR_ERR)) 200 - break; 201 - 202 - udelay(PDRESR_DELAY_US); 203 - } 204 - 205 - if (k == PDRESR_RETRIES) { 206 - ret = -EIO; 207 - goto out; 208 - } 209 - 210 - /* Wait until the power shutoff or resume request has completed * */ 211 - ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx), 212 - val, (val & isr_mask), 213 - SYSCISR_DELAY_US, SYSCISR_TIMEOUT); 214 - if (ret < 0) { 215 - ret = -EIO; 216 - goto out; 217 - } 218 - 219 - /* Clear interrupt flags */ 220 - ret = clear_irq_flags(reg_idx, isr_mask); 221 - if (ret) 222 - goto out; 223 - 224 - out: 225 - spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags); 226 - 227 - pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off", 228 - pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret); 229 - return ret; 230 - } 231 - 232 - static bool r8a779a0_sysc_power_is_off(u8 pdr) 233 - { 234 - unsigned int st; 235 - 236 - st = ioread32(r8a779a0_sysc_base + PDRSR(pdr)); 237 - 238 - if (st & PDRSR_OFF) 239 - return true; 240 - 241 - return false; 242 - } 243 - 244 - struct r8a779a0_sysc_pd { 245 - struct generic_pm_domain genpd; 246 - u8 pdr; 247 - unsigned int flags; 248 - char name[]; 249 - }; 250 - 251 - static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d) 252 - { 253 - return container_of(d, struct r8a779a0_sysc_pd, genpd); 254 - } 255 - 256 - static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd) 257 - { 258 - struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd); 259 - 260 - pr_debug("%s: %s\n", __func__, genpd->name); 261 - return r8a779a0_sysc_power(pd->pdr, false); 262 - } 263 - 264 - static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd) 265 - { 266 - struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd); 267 - 268 - pr_debug("%s: %s\n", __func__, genpd->name); 269 - return r8a779a0_sysc_power(pd->pdr, true); 270 - } 271 - 272 - static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd) 273 - { 274 - struct generic_pm_domain *genpd = &pd->genpd; 275 - const char *name = pd->genpd.name; 276 - int error; 277 - 278 - if (pd->flags & PD_CPU) { 279 - /* 280 - * This domain contains a CPU core and therefore it should 281 - * only be turned off if the CPU is not in use. 282 - */ 283 - pr_debug("PM domain %s contains %s\n", name, "CPU"); 284 - genpd->flags |= GENPD_FLAG_ALWAYS_ON; 285 - } else if (pd->flags & PD_SCU) { 286 - /* 287 - * This domain contains an SCU and cache-controller, and 288 - * therefore it should only be turned off if the CPU cores are 289 - * not in use. 290 - */ 291 - pr_debug("PM domain %s contains %s\n", name, "SCU"); 292 - genpd->flags |= GENPD_FLAG_ALWAYS_ON; 293 - } else if (pd->flags & PD_NO_CR) { 294 - /* 295 - * This domain cannot be turned off. 296 - */ 297 - genpd->flags |= GENPD_FLAG_ALWAYS_ON; 298 - } 299 - 300 - if (!(pd->flags & (PD_CPU | PD_SCU))) { 301 - /* Enable Clock Domain for I/O devices */ 302 - genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; 303 - genpd->attach_dev = cpg_mssr_attach_dev; 304 - genpd->detach_dev = cpg_mssr_detach_dev; 305 - } 306 - 307 - genpd->power_off = r8a779a0_sysc_pd_power_off; 308 - genpd->power_on = r8a779a0_sysc_pd_power_on; 309 - 310 - if (pd->flags & (PD_CPU | PD_NO_CR)) { 311 - /* Skip CPUs (handled by SMP code) and areas without control */ 312 - pr_debug("%s: Not touching %s\n", __func__, genpd->name); 313 - goto finalize; 314 - } 315 - 316 - if (!r8a779a0_sysc_power_is_off(pd->pdr)) { 317 - pr_debug("%s: %s is already powered\n", __func__, genpd->name); 318 - goto finalize; 319 - } 320 - 321 - r8a779a0_sysc_power(pd->pdr, true); 322 - 323 - finalize: 324 - error = pm_genpd_init(genpd, &simple_qos_governor, false); 325 - if (error) 326 - pr_err("Failed to init PM domain %s: %d\n", name, error); 327 - 328 - return error; 329 - } 330 - 331 - static const struct of_device_id r8a779a0_sysc_matches[] __initconst = { 332 - { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info }, 333 - { /* sentinel */ } 334 - }; 335 - 336 - struct r8a779a0_pm_domains { 337 - struct genpd_onecell_data onecell_data; 338 - struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1]; 339 - }; 340 - 341 - static struct genpd_onecell_data *r8a779a0_sysc_onecell_data; 342 - 343 - static int __init r8a779a0_sysc_pd_init(void) 344 - { 345 - const struct r8a779a0_sysc_info *info; 346 - const struct of_device_id *match; 347 - struct r8a779a0_pm_domains *domains; 348 - struct device_node *np; 349 - void __iomem *base; 350 - unsigned int i; 351 - int error; 352 - 353 - np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match); 354 - if (!np) 355 - return -ENODEV; 356 - 357 - info = match->data; 358 - 359 - base = of_iomap(np, 0); 360 - if (!base) { 361 - pr_warn("%pOF: Cannot map regs\n", np); 362 - error = -ENOMEM; 363 - goto out_put; 364 - } 365 - 366 - r8a779a0_sysc_base = base; 367 - 368 - domains = kzalloc(sizeof(*domains), GFP_KERNEL); 369 - if (!domains) { 370 - error = -ENOMEM; 371 - goto out_put; 372 - } 373 - 374 - domains->onecell_data.domains = domains->domains; 375 - domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains); 376 - r8a779a0_sysc_onecell_data = &domains->onecell_data; 377 - 378 - for (i = 0; i < info->num_areas; i++) { 379 - const struct r8a779a0_sysc_area *area = &info->areas[i]; 380 - struct r8a779a0_sysc_pd *pd; 381 - size_t n; 382 - 383 - if (!area->name) { 384 - /* Skip NULLified area */ 385 - continue; 386 - } 387 - 388 - n = strlen(area->name) + 1; 389 - pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); 390 - if (!pd) { 391 - error = -ENOMEM; 392 - goto out_put; 393 - } 394 - 395 - memcpy(pd->name, area->name, n); 396 - pd->genpd.name = pd->name; 397 - pd->pdr = area->pdr; 398 - pd->flags = area->flags; 399 - 400 - error = r8a779a0_sysc_pd_setup(pd); 401 - if (error) 402 - goto out_put; 403 - 404 - domains->domains[area->pdr] = &pd->genpd; 405 - 406 - if (area->parent < 0) 407 - continue; 408 - 409 - error = pm_genpd_add_subdomain(domains->domains[area->parent], 410 - &pd->genpd); 411 - if (error) { 412 - pr_warn("Failed to add PM subdomain %s to parent %u\n", 413 - area->name, area->parent); 414 - goto out_put; 415 - } 416 - } 417 - 418 - error = of_genpd_add_provider_onecell(np, &domains->onecell_data); 419 - 420 - out_put: 421 - of_node_put(np); 422 - return error; 423 - } 424 - early_initcall(r8a779a0_sysc_pd_init);
+47
drivers/soc/renesas/r8a779f0-sysc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Renesas R-Car S4-8 System Controller 4 + * 5 + * Copyright (C) 2021 Renesas Electronics Corp. 6 + */ 7 + 8 + #include <linux/bits.h> 9 + #include <linux/clk/renesas.h> 10 + #include <linux/delay.h> 11 + #include <linux/err.h> 12 + #include <linux/io.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/kernel.h> 15 + #include <linux/mm.h> 16 + #include <linux/of_address.h> 17 + #include <linux/pm_domain.h> 18 + #include <linux/slab.h> 19 + #include <linux/spinlock.h> 20 + #include <linux/types.h> 21 + 22 + #include <dt-bindings/power/r8a779f0-sysc.h> 23 + 24 + #include "rcar-gen4-sysc.h" 25 + 26 + static struct rcar_gen4_sysc_area r8a779f0_areas[] __initdata = { 27 + { "always-on", R8A779F0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, 28 + { "a3e0", R8A779F0_PD_A3E0, R8A779F0_PD_ALWAYS_ON, PD_SCU }, 29 + { "a3e1", R8A779F0_PD_A3E1, R8A779F0_PD_ALWAYS_ON, PD_SCU }, 30 + { "a2e0d0", R8A779F0_PD_A2E0D0, R8A779F0_PD_A3E0, PD_SCU }, 31 + { "a2e0d1", R8A779F0_PD_A2E0D1, R8A779F0_PD_A3E0, PD_SCU }, 32 + { "a2e1d0", R8A779F0_PD_A2E1D0, R8A779F0_PD_A3E1, PD_SCU }, 33 + { "a2e1d1", R8A779F0_PD_A2E1D1, R8A779F0_PD_A3E1, PD_SCU }, 34 + { "a1e0d0c0", R8A779F0_PD_A1E0D0C0, R8A779F0_PD_A2E0D0, PD_CPU_NOCR }, 35 + { "a1e0d0c1", R8A779F0_PD_A1E0D0C1, R8A779F0_PD_A2E0D0, PD_CPU_NOCR }, 36 + { "a1e0d1c0", R8A779F0_PD_A1E0D1C0, R8A779F0_PD_A2E0D1, PD_CPU_NOCR }, 37 + { "a1e0d1c1", R8A779F0_PD_A1E0D1C1, R8A779F0_PD_A2E0D1, PD_CPU_NOCR }, 38 + { "a1e1d0c0", R8A779F0_PD_A1E1D0C0, R8A779F0_PD_A2E1D0, PD_CPU_NOCR }, 39 + { "a1e1d0c1", R8A779F0_PD_A1E1D0C1, R8A779F0_PD_A2E1D0, PD_CPU_NOCR }, 40 + { "a1e1d1c0", R8A779F0_PD_A1E1D1C0, R8A779F0_PD_A2E1D1, PD_CPU_NOCR }, 41 + { "a1e1d1c1", R8A779F0_PD_A1E1D1C1, R8A779F0_PD_A2E1D1, PD_CPU_NOCR }, 42 + }; 43 + 44 + const struct rcar_gen4_sysc_info r8a779f0_sysc_info __initconst = { 45 + .areas = r8a779f0_areas, 46 + .num_areas = ARRAY_SIZE(r8a779f0_areas), 47 + };
+376
drivers/soc/renesas/rcar-gen4-sysc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * R-Car Gen4 SYSC Power management support 4 + * 5 + * Copyright (C) 2021 Renesas Electronics Corp. 6 + */ 7 + 8 + #include <linux/bits.h> 9 + #include <linux/clk/renesas.h> 10 + #include <linux/delay.h> 11 + #include <linux/err.h> 12 + #include <linux/io.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/kernel.h> 15 + #include <linux/mm.h> 16 + #include <linux/of_address.h> 17 + #include <linux/pm_domain.h> 18 + #include <linux/slab.h> 19 + #include <linux/spinlock.h> 20 + #include <linux/types.h> 21 + 22 + #include "rcar-gen4-sysc.h" 23 + 24 + /* SYSC Common */ 25 + #define SYSCSR 0x000 /* SYSC Status Register */ 26 + #define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */ 27 + #define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */ 28 + #define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */ 29 + #define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */ 30 + #define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */ 31 + 32 + /* Power Domain Registers */ 33 + #define PDRSR(n) (0x1000 + ((n) * 0x40)) 34 + #define PDRONCR(n) (0x1004 + ((n) * 0x40)) 35 + #define PDROFFCR(n) (0x1008 + ((n) * 0x40)) 36 + #define PDRESR(n) (0x100C + ((n) * 0x40)) 37 + 38 + /* PWRON/PWROFF */ 39 + #define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */ 40 + 41 + /* PDRESR */ 42 + #define PDRESR_ERR BIT(0) 43 + 44 + /* PDRSR */ 45 + #define PDRSR_OFF BIT(0) /* Power-OFF state */ 46 + #define PDRSR_ON BIT(4) /* Power-ON state */ 47 + #define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */ 48 + #define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */ 49 + 50 + #define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */ 51 + 52 + #define SYSCSR_TIMEOUT 10000 53 + #define SYSCSR_DELAY_US 10 54 + 55 + #define PDRESR_RETRIES 1000 56 + #define PDRESR_DELAY_US 10 57 + 58 + #define SYSCISR_TIMEOUT 10000 59 + #define SYSCISR_DELAY_US 10 60 + 61 + #define RCAR_GEN4_PD_ALWAYS_ON 64 62 + #define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32) 63 + 64 + static void __iomem *rcar_gen4_sysc_base; 65 + static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */ 66 + 67 + static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on) 68 + { 69 + unsigned int reg_offs; 70 + u32 val; 71 + int ret; 72 + 73 + if (on) 74 + reg_offs = PDRONCR(pdr); 75 + else 76 + reg_offs = PDROFFCR(pdr); 77 + 78 + /* Wait until SYSC is ready to accept a power request */ 79 + ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val, 80 + (val & SYSCSR_BUSY) == SYSCSR_BUSY, 81 + SYSCSR_DELAY_US, SYSCSR_TIMEOUT); 82 + if (ret < 0) 83 + return -EAGAIN; 84 + 85 + /* Submit power shutoff or power resume request */ 86 + iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs); 87 + 88 + return 0; 89 + } 90 + 91 + static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask) 92 + { 93 + u32 val; 94 + int ret; 95 + 96 + iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx)); 97 + 98 + ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx), 99 + val, !(val & isr_mask), 100 + SYSCISR_DELAY_US, SYSCISR_TIMEOUT); 101 + if (ret < 0) { 102 + pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__); 103 + return -EIO; 104 + } 105 + 106 + return 0; 107 + } 108 + 109 + static int rcar_gen4_sysc_power(u8 pdr, bool on) 110 + { 111 + unsigned int isr_mask; 112 + unsigned int reg_idx, bit_idx; 113 + unsigned int status; 114 + unsigned long flags; 115 + int ret = 0; 116 + u32 val; 117 + int k; 118 + 119 + spin_lock_irqsave(&rcar_gen4_sysc_lock, flags); 120 + 121 + reg_idx = pdr / NUM_DOMAINS_EACH_REG; 122 + bit_idx = pdr % NUM_DOMAINS_EACH_REG; 123 + 124 + isr_mask = BIT(bit_idx); 125 + 126 + /* 127 + * The interrupt source needs to be enabled, but masked, to prevent the 128 + * CPU from receiving it. 129 + */ 130 + iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask, 131 + rcar_gen4_sysc_base + SYSCIER(reg_idx)); 132 + iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask, 133 + rcar_gen4_sysc_base + SYSCIMR(reg_idx)); 134 + 135 + ret = clear_irq_flags(reg_idx, isr_mask); 136 + if (ret) 137 + goto out; 138 + 139 + /* Submit power shutoff or resume request until it was accepted */ 140 + for (k = 0; k < PDRESR_RETRIES; k++) { 141 + ret = rcar_gen4_sysc_pwr_on_off(pdr, on); 142 + if (ret) 143 + goto out; 144 + 145 + status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr)); 146 + if (!(status & PDRESR_ERR)) 147 + break; 148 + 149 + udelay(PDRESR_DELAY_US); 150 + } 151 + 152 + if (k == PDRESR_RETRIES) { 153 + ret = -EIO; 154 + goto out; 155 + } 156 + 157 + /* Wait until the power shutoff or resume request has completed * */ 158 + ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx), 159 + val, (val & isr_mask), 160 + SYSCISR_DELAY_US, SYSCISR_TIMEOUT); 161 + if (ret < 0) { 162 + ret = -EIO; 163 + goto out; 164 + } 165 + 166 + /* Clear interrupt flags */ 167 + ret = clear_irq_flags(reg_idx, isr_mask); 168 + if (ret) 169 + goto out; 170 + 171 + out: 172 + spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags); 173 + 174 + pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off", 175 + pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret); 176 + return ret; 177 + } 178 + 179 + static bool rcar_gen4_sysc_power_is_off(u8 pdr) 180 + { 181 + unsigned int st; 182 + 183 + st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr)); 184 + 185 + if (st & PDRSR_OFF) 186 + return true; 187 + 188 + return false; 189 + } 190 + 191 + struct rcar_gen4_sysc_pd { 192 + struct generic_pm_domain genpd; 193 + u8 pdr; 194 + unsigned int flags; 195 + char name[]; 196 + }; 197 + 198 + static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d) 199 + { 200 + return container_of(d, struct rcar_gen4_sysc_pd, genpd); 201 + } 202 + 203 + static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd) 204 + { 205 + struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd); 206 + 207 + pr_debug("%s: %s\n", __func__, genpd->name); 208 + return rcar_gen4_sysc_power(pd->pdr, false); 209 + } 210 + 211 + static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd) 212 + { 213 + struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd); 214 + 215 + pr_debug("%s: %s\n", __func__, genpd->name); 216 + return rcar_gen4_sysc_power(pd->pdr, true); 217 + } 218 + 219 + static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd) 220 + { 221 + struct generic_pm_domain *genpd = &pd->genpd; 222 + const char *name = pd->genpd.name; 223 + int error; 224 + 225 + if (pd->flags & PD_CPU) { 226 + /* 227 + * This domain contains a CPU core and therefore it should 228 + * only be turned off if the CPU is not in use. 229 + */ 230 + pr_debug("PM domain %s contains %s\n", name, "CPU"); 231 + genpd->flags |= GENPD_FLAG_ALWAYS_ON; 232 + } else if (pd->flags & PD_SCU) { 233 + /* 234 + * This domain contains an SCU and cache-controller, and 235 + * therefore it should only be turned off if the CPU cores are 236 + * not in use. 237 + */ 238 + pr_debug("PM domain %s contains %s\n", name, "SCU"); 239 + genpd->flags |= GENPD_FLAG_ALWAYS_ON; 240 + } else if (pd->flags & PD_NO_CR) { 241 + /* 242 + * This domain cannot be turned off. 243 + */ 244 + genpd->flags |= GENPD_FLAG_ALWAYS_ON; 245 + } 246 + 247 + if (!(pd->flags & (PD_CPU | PD_SCU))) { 248 + /* Enable Clock Domain for I/O devices */ 249 + genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; 250 + genpd->attach_dev = cpg_mssr_attach_dev; 251 + genpd->detach_dev = cpg_mssr_detach_dev; 252 + } 253 + 254 + genpd->power_off = rcar_gen4_sysc_pd_power_off; 255 + genpd->power_on = rcar_gen4_sysc_pd_power_on; 256 + 257 + if (pd->flags & (PD_CPU | PD_NO_CR)) { 258 + /* Skip CPUs (handled by SMP code) and areas without control */ 259 + pr_debug("%s: Not touching %s\n", __func__, genpd->name); 260 + goto finalize; 261 + } 262 + 263 + if (!rcar_gen4_sysc_power_is_off(pd->pdr)) { 264 + pr_debug("%s: %s is already powered\n", __func__, genpd->name); 265 + goto finalize; 266 + } 267 + 268 + rcar_gen4_sysc_power(pd->pdr, true); 269 + 270 + finalize: 271 + error = pm_genpd_init(genpd, &simple_qos_governor, false); 272 + if (error) 273 + pr_err("Failed to init PM domain %s: %d\n", name, error); 274 + 275 + return error; 276 + } 277 + 278 + static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = { 279 + #ifdef CONFIG_SYSC_R8A779A0 280 + { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info }, 281 + #endif 282 + #ifdef CONFIG_SYSC_R8A779F0 283 + { .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info }, 284 + #endif 285 + { /* sentinel */ } 286 + }; 287 + 288 + struct rcar_gen4_pm_domains { 289 + struct genpd_onecell_data onecell_data; 290 + struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1]; 291 + }; 292 + 293 + static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data; 294 + 295 + static int __init rcar_gen4_sysc_pd_init(void) 296 + { 297 + const struct rcar_gen4_sysc_info *info; 298 + const struct of_device_id *match; 299 + struct rcar_gen4_pm_domains *domains; 300 + struct device_node *np; 301 + void __iomem *base; 302 + unsigned int i; 303 + int error; 304 + 305 + np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match); 306 + if (!np) 307 + return -ENODEV; 308 + 309 + info = match->data; 310 + 311 + base = of_iomap(np, 0); 312 + if (!base) { 313 + pr_warn("%pOF: Cannot map regs\n", np); 314 + error = -ENOMEM; 315 + goto out_put; 316 + } 317 + 318 + rcar_gen4_sysc_base = base; 319 + 320 + domains = kzalloc(sizeof(*domains), GFP_KERNEL); 321 + if (!domains) { 322 + error = -ENOMEM; 323 + goto out_put; 324 + } 325 + 326 + domains->onecell_data.domains = domains->domains; 327 + domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains); 328 + rcar_gen4_sysc_onecell_data = &domains->onecell_data; 329 + 330 + for (i = 0; i < info->num_areas; i++) { 331 + const struct rcar_gen4_sysc_area *area = &info->areas[i]; 332 + struct rcar_gen4_sysc_pd *pd; 333 + size_t n; 334 + 335 + if (!area->name) { 336 + /* Skip NULLified area */ 337 + continue; 338 + } 339 + 340 + n = strlen(area->name) + 1; 341 + pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); 342 + if (!pd) { 343 + error = -ENOMEM; 344 + goto out_put; 345 + } 346 + 347 + memcpy(pd->name, area->name, n); 348 + pd->genpd.name = pd->name; 349 + pd->pdr = area->pdr; 350 + pd->flags = area->flags; 351 + 352 + error = rcar_gen4_sysc_pd_setup(pd); 353 + if (error) 354 + goto out_put; 355 + 356 + domains->domains[area->pdr] = &pd->genpd; 357 + 358 + if (area->parent < 0) 359 + continue; 360 + 361 + error = pm_genpd_add_subdomain(domains->domains[area->parent], 362 + &pd->genpd); 363 + if (error) { 364 + pr_warn("Failed to add PM subdomain %s to parent %u\n", 365 + area->name, area->parent); 366 + goto out_put; 367 + } 368 + } 369 + 370 + error = of_genpd_add_provider_onecell(np, &domains->onecell_data); 371 + 372 + out_put: 373 + of_node_put(np); 374 + return error; 375 + } 376 + early_initcall(rcar_gen4_sysc_pd_init);
+43
drivers/soc/renesas/rcar-gen4-sysc.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * R-Car Gen4 System Controller 4 + * 5 + * Copyright (C) 2021 Renesas Electronics Corp. 6 + */ 7 + #ifndef __SOC_RENESAS_RCAR_GEN4_SYSC_H__ 8 + #define __SOC_RENESAS_RCAR_GEN4_SYSC_H__ 9 + 10 + #include <linux/types.h> 11 + 12 + /* 13 + * Power Domain flags 14 + */ 15 + #define PD_CPU BIT(0) /* Area contains main CPU core */ 16 + #define PD_SCU BIT(1) /* Area contains SCU and L2 cache */ 17 + #define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */ 18 + 19 + #define PD_CPU_NOCR (PD_CPU | PD_NO_CR) /* CPU area lacks CR */ 20 + #define PD_ALWAYS_ON PD_NO_CR /* Always-on area */ 21 + 22 + /* 23 + * Description of a Power Area 24 + */ 25 + struct rcar_gen4_sysc_area { 26 + const char *name; 27 + u8 pdr; /* PDRn */ 28 + int parent; /* -1 if none */ 29 + unsigned int flags; /* See PD_* */ 30 + }; 31 + 32 + /* 33 + * SoC-specific Power Area Description 34 + */ 35 + struct rcar_gen4_sysc_info { 36 + const struct rcar_gen4_sysc_area *areas; 37 + unsigned int num_areas; 38 + }; 39 + 40 + extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info; 41 + extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info; 42 + 43 + #endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
+44 -6
drivers/soc/renesas/rcar-rst.c
··· 13 13 #define WDTRSTCR_RESET 0xA55A0002 14 14 #define WDTRSTCR 0x0054 15 15 16 + #define CR7BAR 0x0070 17 + #define CR7BAREN BIT(4) 18 + #define CR7BAR_MASK 0xFFFC0000 19 + 20 + static void __iomem *rcar_rst_base; 21 + static u32 saved_mode __initdata; 22 + static int (*rcar_rst_set_rproc_boot_addr_func)(u64 boot_addr); 23 + 16 24 static int rcar_rst_enable_wdt_reset(void __iomem *base) 17 25 { 18 26 iowrite32(WDTRSTCR_RESET, base + WDTRSTCR); 19 27 return 0; 20 28 } 21 29 30 + /* 31 + * Most of the R-Car Gen3 SoCs have an ARM Realtime Core. 32 + * Firmware boot address has to be set in CR7BAR before 33 + * starting the realtime core. 34 + * Boot address must be aligned on a 256k boundary. 35 + */ 36 + static int rcar_rst_set_gen3_rproc_boot_addr(u64 boot_addr) 37 + { 38 + if (boot_addr & ~(u64)CR7BAR_MASK) { 39 + pr_err("Invalid boot address got %llx\n", boot_addr); 40 + return -EINVAL; 41 + } 42 + 43 + iowrite32(boot_addr, rcar_rst_base + CR7BAR); 44 + iowrite32(boot_addr | CR7BAREN, rcar_rst_base + CR7BAR); 45 + 46 + return 0; 47 + } 48 + 22 49 struct rst_config { 23 50 unsigned int modemr; /* Mode Monitoring Register Offset */ 24 51 int (*configure)(void __iomem *base); /* Platform specific config */ 52 + int (*set_rproc_boot_addr)(u64 boot_addr); 25 53 }; 26 54 27 55 static const struct rst_config rcar_rst_gen1 __initconst = { ··· 63 35 64 36 static const struct rst_config rcar_rst_gen3 __initconst = { 65 37 .modemr = 0x60, 38 + .set_rproc_boot_addr = rcar_rst_set_gen3_rproc_boot_addr, 66 39 }; 67 40 68 - static const struct rst_config rcar_rst_r8a779a0 __initconst = { 41 + static const struct rst_config rcar_rst_gen4 __initconst = { 69 42 .modemr = 0x00, /* MODEMR0 and it has CPG related bits */ 70 43 }; 71 44 ··· 100 71 { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 }, 101 72 { .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 }, 102 73 { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 }, 103 - /* R-Car V3U */ 104 - { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 }, 74 + /* R-Car Gen4 */ 75 + { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 }, 76 + { .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 }, 105 77 { /* sentinel */ } 106 78 }; 107 - 108 - static void __iomem *rcar_rst_base __initdata; 109 - static u32 saved_mode __initdata; 110 79 111 80 static int __init rcar_rst_init(void) 112 81 { ··· 127 100 128 101 rcar_rst_base = base; 129 102 cfg = match->data; 103 + rcar_rst_set_rproc_boot_addr_func = cfg->set_rproc_boot_addr; 104 + 130 105 saved_mode = ioread32(base + cfg->modemr); 131 106 if (cfg->configure) { 132 107 error = cfg->configure(base); ··· 159 130 *mode = saved_mode; 160 131 return 0; 161 132 } 133 + 134 + int rcar_rst_set_rproc_boot_addr(u64 boot_addr) 135 + { 136 + if (!rcar_rst_set_rproc_boot_addr_func) 137 + return -EIO; 138 + 139 + return rcar_rst_set_rproc_boot_addr_func(boot_addr); 140 + } 141 + EXPORT_SYMBOL_GPL(rcar_rst_set_rproc_boot_addr);
+69 -60
drivers/soc/renesas/renesas-soc.c
··· 33 33 .reg = 0xfff00044, /* PRR (Product Register) */ 34 34 }; 35 35 36 + static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = { 37 + .name = "R-Car Gen4", 38 + }; 39 + 36 40 static const struct renesas_family fam_rmobile __initconst __maybe_unused = { 37 41 .name = "R-Mobile", 38 42 .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ ··· 218 214 .id = 0x59, 219 215 }; 220 216 217 + static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = { 218 + .family = &fam_rcar_gen4, 219 + .id = 0x5a, 220 + }; 221 + 221 222 static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { 222 223 .family = &fam_shmobile, 223 224 .id = 0x37, ··· 328 319 #ifdef CONFIG_ARCH_R8A779A0 329 320 { .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u }, 330 321 #endif 322 + #ifdef CONFIG_ARCH_R8A779F0 323 + { .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 }, 324 + #endif 331 325 #if defined(CONFIG_ARCH_R9A07G044) 332 326 { .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l }, 333 327 #endif ··· 340 328 { /* sentinel */ } 341 329 }; 342 330 331 + struct renesas_id { 332 + unsigned int offset; 333 + u32 mask; 334 + }; 335 + 336 + static const struct renesas_id id_bsid __initconst = { 337 + .offset = 0, 338 + .mask = 0xff0000, 339 + /* 340 + * TODO: Upper 4 bits of BSID are for chip version, but the format is 341 + * not known at this time so we don't know how to specify eshi and eslo 342 + */ 343 + }; 344 + 345 + static const struct renesas_id id_rzg2l __initconst = { 346 + .offset = 0xa04, 347 + .mask = 0xfffffff, 348 + }; 349 + 350 + static const struct renesas_id id_prr __initconst = { 351 + .offset = 0, 352 + .mask = 0xff00, 353 + }; 354 + 355 + static const struct of_device_id renesas_ids[] __initconst = { 356 + { .compatible = "renesas,bsid", .data = &id_bsid }, 357 + { .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l }, 358 + { .compatible = "renesas,prr", .data = &id_prr }, 359 + { /* sentinel */ } 360 + }; 361 + 343 362 static int __init renesas_soc_init(void) 344 363 { 345 364 struct soc_device_attribute *soc_dev_attr; 365 + unsigned int product, eshi = 0, eslo; 346 366 const struct renesas_family *family; 347 367 const struct of_device_id *match; 348 368 const struct renesas_soc *soc; 369 + const struct renesas_id *id; 349 370 void __iomem *chipid = NULL; 350 371 struct soc_device *soc_dev; 351 372 struct device_node *np; 352 - unsigned int product, eshi = 0, eslo; 373 + const char *soc_id; 353 374 354 375 match = of_match_node(renesas_socs, of_root); 355 376 if (!match) 356 377 return -ENODEV; 357 378 379 + soc_id = strchr(match->compatible, ',') + 1; 358 380 soc = match->data; 359 381 family = soc->family; 360 382 361 - np = of_find_compatible_node(NULL, NULL, "renesas,bsid"); 383 + np = of_find_matching_node_and_match(NULL, renesas_ids, &match); 362 384 if (np) { 363 - chipid = of_iomap(np, 0); 364 - of_node_put(np); 365 - 366 - if (chipid) { 367 - product = readl(chipid); 368 - iounmap(chipid); 369 - 370 - if (soc->id && ((product >> 16) & 0xff) != soc->id) { 371 - pr_warn("SoC mismatch (product = 0x%x)\n", 372 - product); 373 - return -ENODEV; 374 - } 375 - } 376 - 377 - /* 378 - * TODO: Upper 4 bits of BSID are for chip version, but the 379 - * format is not known at this time so we don't know how to 380 - * specify eshi and eslo 381 - */ 382 - 383 - goto done; 384 - } 385 - 386 - np = of_find_compatible_node(NULL, NULL, "renesas,r9a07g044-sysc"); 387 - if (np) { 388 - chipid = of_iomap(np, 0); 389 - of_node_put(np); 390 - 391 - if (chipid) { 392 - product = readl(chipid + 0x0a04); 393 - iounmap(chipid); 394 - 395 - if (soc->id && (product & 0xfffffff) != soc->id) { 396 - pr_warn("SoC mismatch (product = 0x%x)\n", 397 - product); 398 - return -ENODEV; 399 - } 400 - } 401 - 402 - goto done; 403 - } 404 - 405 - /* Try PRR first, then hardcoded fallback */ 406 - np = of_find_compatible_node(NULL, NULL, "renesas,prr"); 407 - if (np) { 385 + id = match->data; 408 386 chipid = of_iomap(np, 0); 409 387 of_node_put(np); 410 388 } else if (soc->id && family->reg) { 389 + /* Try hardcoded CCCR/PRR fallback */ 390 + id = &id_prr; 411 391 chipid = ioremap(family->reg, 4); 412 392 } 393 + 413 394 if (chipid) { 414 - product = readl(chipid); 395 + product = readl(chipid + id->offset); 415 396 iounmap(chipid); 416 - /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ 417 - if ((product & 0x7fff) == 0x5210) 418 - product ^= 0x11; 419 - /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ 420 - if ((product & 0x7fff) == 0x5211) 421 - product ^= 0x12; 422 - if (soc->id && ((product >> 8) & 0xff) != soc->id) { 397 + 398 + if (id == &id_prr) { 399 + /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ 400 + if ((product & 0x7fff) == 0x5210) 401 + product ^= 0x11; 402 + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ 403 + if ((product & 0x7fff) == 0x5211) 404 + product ^= 0x12; 405 + 406 + eshi = ((product >> 4) & 0x0f) + 1; 407 + eslo = product & 0xf; 408 + } 409 + 410 + if (soc->id && 411 + ((product & id->mask) >> __ffs(id->mask)) != soc->id) { 423 412 pr_warn("SoC mismatch (product = 0x%x)\n", product); 424 413 return -ENODEV; 425 414 } 426 - eshi = ((product >> 4) & 0x0f) + 1; 427 - eslo = product & 0xf; 428 415 } 429 416 430 - done: 431 417 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 432 418 if (!soc_dev_attr) 433 419 return -ENOMEM; ··· 435 425 of_node_put(np); 436 426 437 427 soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); 438 - soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1, 439 - GFP_KERNEL); 428 + soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL); 440 429 if (eshi) 441 430 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi, 442 431 eslo);
+14
drivers/soc/samsung/Kconfig
··· 23 23 Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage. 24 24 This driver can also be built as module (exynos_chipid). 25 25 26 + config EXYNOS_USI 27 + tristate "Exynos USI (Universal Serial Interface) driver" 28 + default ARCH_EXYNOS && ARM64 29 + depends on ARCH_EXYNOS || COMPILE_TEST 30 + select MFD_SYSCON 31 + help 32 + Enable support for USI block. USI (Universal Serial Interface) is an 33 + IP-core found in modern Samsung Exynos SoCs, like Exynos850 and 34 + ExynosAutoV0. USI block can be configured to provide one of the 35 + following serial protocols: UART, SPI or High Speed I2C. 36 + 37 + This driver allows one to configure USI for desired protocol, which 38 + is usually done in USI node in Device Tree. 39 + 26 40 config EXYNOS_PMU 27 41 bool "Exynos PMU controller driver" if COMPILE_TEST 28 42 depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
+2
drivers/soc/samsung/Makefile
··· 4 4 obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o 5 5 exynos_chipid-y += exynos-chipid.o exynos-asv.o 6 6 7 + obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o 8 + 7 9 obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o 8 10 9 11 obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
+3
drivers/soc/samsung/exynos-chipid.c
··· 42 42 unsigned int id; 43 43 } soc_ids[] = { 44 44 /* List ordered by SoC name */ 45 + /* Compatible with: samsung,exynos4210-chipid */ 45 46 { "EXYNOS3250", 0xE3472000 }, 46 47 { "EXYNOS4210", 0x43200000 }, /* EVT0 revision */ 47 48 { "EXYNOS4210", 0x43210000 }, ··· 56 55 { "EXYNOS5440", 0xE5440000 }, 57 56 { "EXYNOS5800", 0xE5422000 }, 58 57 { "EXYNOS7420", 0xE7420000 }, 58 + /* Compatible with: samsung,exynos850-chipid */ 59 + { "EXYNOS7885", 0xE7885000 }, 59 60 { "EXYNOS850", 0xE3830000 }, 60 61 { "EXYNOSAUTOV9", 0xAAA80000 }, 61 62 };
+2
drivers/soc/samsung/exynos-pmu.c
··· 94 94 .compatible = "samsung,exynos5433-pmu", 95 95 }, { 96 96 .compatible = "samsung,exynos7-pmu", 97 + }, { 98 + .compatible = "samsung,exynos850-pmu", 97 99 }, 98 100 { /*sentinel*/ }, 99 101 };
+285
drivers/soc/samsung/exynos-usi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2021 Linaro Ltd. 4 + * Author: Sam Protsenko <semen.protsenko@linaro.org> 5 + * 6 + * Samsung Exynos USI driver (Universal Serial Interface). 7 + */ 8 + 9 + #include <linux/clk.h> 10 + #include <linux/mfd/syscon.h> 11 + #include <linux/module.h> 12 + #include <linux/of.h> 13 + #include <linux/of_platform.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/regmap.h> 16 + 17 + #include <dt-bindings/soc/samsung,exynos-usi.h> 18 + 19 + /* USIv2: System Register: SW_CONF register bits */ 20 + #define USI_V2_SW_CONF_NONE 0x0 21 + #define USI_V2_SW_CONF_UART BIT(0) 22 + #define USI_V2_SW_CONF_SPI BIT(1) 23 + #define USI_V2_SW_CONF_I2C BIT(2) 24 + #define USI_V2_SW_CONF_MASK (USI_V2_SW_CONF_UART | USI_V2_SW_CONF_SPI | \ 25 + USI_V2_SW_CONF_I2C) 26 + 27 + /* USIv2: USI register offsets */ 28 + #define USI_CON 0x04 29 + #define USI_OPTION 0x08 30 + 31 + /* USIv2: USI register bits */ 32 + #define USI_CON_RESET BIT(0) 33 + #define USI_OPTION_CLKREQ_ON BIT(1) 34 + #define USI_OPTION_CLKSTOP_ON BIT(2) 35 + 36 + enum exynos_usi_ver { 37 + USI_VER2 = 2, 38 + }; 39 + 40 + struct exynos_usi_variant { 41 + enum exynos_usi_ver ver; /* USI IP-core version */ 42 + unsigned int sw_conf_mask; /* SW_CONF mask for all protocols */ 43 + size_t min_mode; /* first index in exynos_usi_modes[] */ 44 + size_t max_mode; /* last index in exynos_usi_modes[] */ 45 + size_t num_clks; /* number of clocks to assert */ 46 + const char * const *clk_names; /* clock names to assert */ 47 + }; 48 + 49 + struct exynos_usi { 50 + struct device *dev; 51 + void __iomem *regs; /* USI register map */ 52 + struct clk_bulk_data *clks; /* USI clocks */ 53 + 54 + size_t mode; /* current USI SW_CONF mode index */ 55 + bool clkreq_on; /* always provide clock to IP */ 56 + 57 + /* System Register */ 58 + struct regmap *sysreg; /* System Register map */ 59 + unsigned int sw_conf; /* SW_CONF register offset in sysreg */ 60 + 61 + const struct exynos_usi_variant *data; 62 + }; 63 + 64 + struct exynos_usi_mode { 65 + const char *name; /* mode name */ 66 + unsigned int val; /* mode register value */ 67 + }; 68 + 69 + static const struct exynos_usi_mode exynos_usi_modes[] = { 70 + [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE }, 71 + [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART }, 72 + [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI }, 73 + [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C }, 74 + }; 75 + 76 + static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" }; 77 + static const struct exynos_usi_variant exynos850_usi_data = { 78 + .ver = USI_VER2, 79 + .sw_conf_mask = USI_V2_SW_CONF_MASK, 80 + .min_mode = USI_V2_NONE, 81 + .max_mode = USI_V2_I2C, 82 + .num_clks = ARRAY_SIZE(exynos850_usi_clk_names), 83 + .clk_names = exynos850_usi_clk_names, 84 + }; 85 + 86 + static const struct of_device_id exynos_usi_dt_match[] = { 87 + { 88 + .compatible = "samsung,exynos850-usi", 89 + .data = &exynos850_usi_data, 90 + }, 91 + { } /* sentinel */ 92 + }; 93 + MODULE_DEVICE_TABLE(of, exynos_usi_dt_match); 94 + 95 + /** 96 + * exynos_usi_set_sw_conf - Set USI block configuration mode 97 + * @usi: USI driver object 98 + * @mode: Mode index 99 + * 100 + * Select underlying serial protocol (UART/SPI/I2C) in USI IP-core. 101 + * 102 + * Return: 0 on success, or negative error code on failure. 103 + */ 104 + static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode) 105 + { 106 + unsigned int val; 107 + int ret; 108 + 109 + if (mode < usi->data->min_mode || mode > usi->data->max_mode) 110 + return -EINVAL; 111 + 112 + val = exynos_usi_modes[mode].val; 113 + ret = regmap_update_bits(usi->sysreg, usi->sw_conf, 114 + usi->data->sw_conf_mask, val); 115 + if (ret) 116 + return ret; 117 + 118 + usi->mode = mode; 119 + dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name); 120 + 121 + return 0; 122 + } 123 + 124 + /** 125 + * exynos_usi_enable - Initialize USI block 126 + * @usi: USI driver object 127 + * 128 + * USI IP-core start state is "reset" (on startup and after CPU resume). This 129 + * routine enables the USI block by clearing the reset flag. It also configures 130 + * HWACG behavior (needed e.g. for UART Rx). It should be performed before 131 + * underlying protocol becomes functional. 132 + * 133 + * Return: 0 on success, or negative error code on failure. 134 + */ 135 + static int exynos_usi_enable(const struct exynos_usi *usi) 136 + { 137 + u32 val; 138 + int ret; 139 + 140 + ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks); 141 + if (ret) 142 + return ret; 143 + 144 + /* Enable USI block */ 145 + val = readl(usi->regs + USI_CON); 146 + val &= ~USI_CON_RESET; 147 + writel(val, usi->regs + USI_CON); 148 + udelay(1); 149 + 150 + /* Continuously provide the clock to USI IP w/o gating */ 151 + if (usi->clkreq_on) { 152 + val = readl(usi->regs + USI_OPTION); 153 + val &= ~USI_OPTION_CLKSTOP_ON; 154 + val |= USI_OPTION_CLKREQ_ON; 155 + writel(val, usi->regs + USI_OPTION); 156 + } 157 + 158 + clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks); 159 + 160 + return ret; 161 + } 162 + 163 + static int exynos_usi_configure(struct exynos_usi *usi) 164 + { 165 + int ret; 166 + 167 + ret = exynos_usi_set_sw_conf(usi, usi->mode); 168 + if (ret) 169 + return ret; 170 + 171 + if (usi->data->ver == USI_VER2) 172 + return exynos_usi_enable(usi); 173 + 174 + return 0; 175 + } 176 + 177 + static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi) 178 + { 179 + int ret; 180 + u32 mode; 181 + 182 + ret = of_property_read_u32(np, "samsung,mode", &mode); 183 + if (ret) 184 + return ret; 185 + if (mode < usi->data->min_mode || mode > usi->data->max_mode) 186 + return -EINVAL; 187 + usi->mode = mode; 188 + 189 + usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg"); 190 + if (IS_ERR(usi->sysreg)) 191 + return PTR_ERR(usi->sysreg); 192 + 193 + ret = of_property_read_u32_index(np, "samsung,sysreg", 1, 194 + &usi->sw_conf); 195 + if (ret) 196 + return ret; 197 + 198 + usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on"); 199 + 200 + return 0; 201 + } 202 + 203 + static int exynos_usi_get_clocks(struct exynos_usi *usi) 204 + { 205 + const size_t num = usi->data->num_clks; 206 + struct device *dev = usi->dev; 207 + size_t i; 208 + 209 + if (num == 0) 210 + return 0; 211 + 212 + usi->clks = devm_kcalloc(dev, num, sizeof(*usi->clks), GFP_KERNEL); 213 + if (!usi->clks) 214 + return -ENOMEM; 215 + 216 + for (i = 0; i < num; ++i) 217 + usi->clks[i].id = usi->data->clk_names[i]; 218 + 219 + return devm_clk_bulk_get(dev, num, usi->clks); 220 + } 221 + 222 + static int exynos_usi_probe(struct platform_device *pdev) 223 + { 224 + struct device *dev = &pdev->dev; 225 + struct device_node *np = dev->of_node; 226 + struct exynos_usi *usi; 227 + int ret; 228 + 229 + usi = devm_kzalloc(dev, sizeof(*usi), GFP_KERNEL); 230 + if (!usi) 231 + return -ENOMEM; 232 + 233 + usi->dev = dev; 234 + platform_set_drvdata(pdev, usi); 235 + 236 + usi->data = of_device_get_match_data(dev); 237 + if (!usi->data) 238 + return -EINVAL; 239 + 240 + ret = exynos_usi_parse_dt(np, usi); 241 + if (ret) 242 + return ret; 243 + 244 + ret = exynos_usi_get_clocks(usi); 245 + if (ret) 246 + return ret; 247 + 248 + if (usi->data->ver == USI_VER2) { 249 + usi->regs = devm_platform_ioremap_resource(pdev, 0); 250 + if (IS_ERR(usi->regs)) 251 + return PTR_ERR(usi->regs); 252 + } 253 + 254 + ret = exynos_usi_configure(usi); 255 + if (ret) 256 + return ret; 257 + 258 + /* Make it possible to embed protocol nodes into USI np */ 259 + return of_platform_populate(np, NULL, NULL, dev); 260 + } 261 + 262 + static int __maybe_unused exynos_usi_resume_noirq(struct device *dev) 263 + { 264 + struct exynos_usi *usi = dev_get_drvdata(dev); 265 + 266 + return exynos_usi_configure(usi); 267 + } 268 + 269 + static const struct dev_pm_ops exynos_usi_pm = { 270 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, exynos_usi_resume_noirq) 271 + }; 272 + 273 + static struct platform_driver exynos_usi_driver = { 274 + .driver = { 275 + .name = "exynos-usi", 276 + .pm = &exynos_usi_pm, 277 + .of_match_table = exynos_usi_dt_match, 278 + }, 279 + .probe = exynos_usi_probe, 280 + }; 281 + module_platform_driver(exynos_usi_driver); 282 + 283 + MODULE_DESCRIPTION("Samsung USI driver"); 284 + MODULE_AUTHOR("Sam Protsenko <semen.protsenko@linaro.org>"); 285 + MODULE_LICENSE("GPL");
+26 -3
drivers/soc/tegra/common.c
··· 10 10 #include <linux/export.h> 11 11 #include <linux/of.h> 12 12 #include <linux/pm_opp.h> 13 + #include <linux/pm_runtime.h> 13 14 14 15 #include <soc/tegra/common.h> 15 16 #include <soc/tegra/fuse.h> ··· 44 43 { 45 44 unsigned long rate; 46 45 struct clk *clk; 46 + bool rpm_enabled; 47 47 int err; 48 48 49 49 clk = devm_clk_get(dev, NULL); ··· 59 57 return -EINVAL; 60 58 } 61 59 60 + /* 61 + * Runtime PM of the device must be enabled in order to set up 62 + * GENPD's performance properly because GENPD core checks whether 63 + * device is suspended and this check doesn't work while RPM is 64 + * disabled. This makes sure the OPP vote below gets cached in 65 + * GENPD for the device. Instead, the vote is done the next time 66 + * the device gets runtime resumed. 67 + */ 68 + rpm_enabled = pm_runtime_enabled(dev); 69 + if (!rpm_enabled) 70 + pm_runtime_enable(dev); 71 + 72 + /* should never happen in practice */ 73 + if (!pm_runtime_enabled(dev)) { 74 + dev_WARN(dev, "failed to enable runtime PM\n"); 75 + pm_runtime_disable(dev); 76 + return -EINVAL; 77 + } 78 + 62 79 /* first dummy rate-setting initializes voltage vote */ 63 80 err = dev_pm_opp_set_rate(dev, rate); 81 + 82 + if (!rpm_enabled) 83 + pm_runtime_disable(dev); 84 + 64 85 if (err) { 65 86 dev_err(dev, "failed to initialize OPP clock: %d\n", err); 66 87 return err; ··· 136 111 */ 137 112 err = devm_pm_opp_of_add_table(dev); 138 113 if (err) { 139 - if (err == -ENODEV) 140 - dev_err_once(dev, "OPP table not found, please update device-tree\n"); 141 - else 114 + if (err != -ENODEV) 142 115 dev_err(dev, "failed to add OPP table: %d\n", err); 143 116 144 117 return err;
+40 -11
drivers/soc/tegra/fuse/fuse-tegra.c
··· 14 14 #include <linux/of_address.h> 15 15 #include <linux/platform_device.h> 16 16 #include <linux/pm_runtime.h> 17 + #include <linux/reset.h> 17 18 #include <linux/slab.h> 18 19 #include <linux/sys_soc.h> 19 20 ··· 182 181 }, 183 182 }; 184 183 184 + static void tegra_fuse_restore(void *base) 185 + { 186 + fuse->clk = NULL; 187 + fuse->base = base; 188 + } 189 + 185 190 static int tegra_fuse_probe(struct platform_device *pdev) 186 191 { 187 192 void __iomem *base = fuse->base; ··· 195 188 struct resource *res; 196 189 int err; 197 190 191 + err = devm_add_action(&pdev->dev, tegra_fuse_restore, base); 192 + if (err) 193 + return err; 194 + 198 195 /* take over the memory region from the early initialization */ 199 196 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 200 197 fuse->phys = res->start; 201 198 fuse->base = devm_ioremap_resource(&pdev->dev, res); 202 199 if (IS_ERR(fuse->base)) { 203 200 err = PTR_ERR(fuse->base); 204 - fuse->base = base; 205 201 return err; 206 202 } 207 203 ··· 214 204 dev_err(&pdev->dev, "failed to get FUSE clock: %ld", 215 205 PTR_ERR(fuse->clk)); 216 206 217 - fuse->base = base; 218 207 return PTR_ERR(fuse->clk); 219 208 } 220 209 221 210 platform_set_drvdata(pdev, fuse); 222 211 fuse->dev = &pdev->dev; 223 212 224 - pm_runtime_enable(&pdev->dev); 213 + err = devm_pm_runtime_enable(&pdev->dev); 214 + if (err) 215 + return err; 225 216 226 217 if (fuse->soc->probe) { 227 218 err = fuse->soc->probe(fuse); 228 219 if (err < 0) 229 - goto restore; 220 + return err; 230 221 } 231 222 232 223 memset(&nvmem, 0, sizeof(nvmem)); ··· 251 240 err = PTR_ERR(fuse->nvmem); 252 241 dev_err(&pdev->dev, "failed to register NVMEM device: %d\n", 253 242 err); 254 - goto restore; 243 + return err; 244 + } 245 + 246 + fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse"); 247 + if (IS_ERR(fuse->rst)) { 248 + err = PTR_ERR(fuse->rst); 249 + dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n", 250 + fuse->rst); 251 + return err; 252 + } 253 + 254 + /* 255 + * FUSE clock is enabled at a boot time, hence this resume/suspend 256 + * disables the clock besides the h/w resetting. 257 + */ 258 + err = pm_runtime_resume_and_get(&pdev->dev); 259 + if (err) 260 + return err; 261 + 262 + err = reset_control_reset(fuse->rst); 263 + pm_runtime_put(&pdev->dev); 264 + 265 + if (err < 0) { 266 + dev_err(&pdev->dev, "failed to reset FUSE: %d\n", err); 267 + return err; 255 268 } 256 269 257 270 /* release the early I/O memory mapping */ 258 271 iounmap(base); 259 272 260 273 return 0; 261 - 262 - restore: 263 - fuse->clk = NULL; 264 - fuse->base = base; 265 - pm_runtime_disable(&pdev->dev); 266 - return err; 267 274 } 268 275 269 276 static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev)
+30 -3
drivers/soc/tegra/fuse/fuse-tegra20.c
··· 94 94 return of_device_is_compatible(np, "nvidia,tegra20-apbdma"); 95 95 } 96 96 97 + static void tegra20_fuse_release_channel(void *data) 98 + { 99 + struct tegra_fuse *fuse = data; 100 + 101 + dma_release_channel(fuse->apbdma.chan); 102 + fuse->apbdma.chan = NULL; 103 + } 104 + 105 + static void tegra20_fuse_free_coherent(void *data) 106 + { 107 + struct tegra_fuse *fuse = data; 108 + 109 + dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt, 110 + fuse->apbdma.phys); 111 + fuse->apbdma.virt = NULL; 112 + fuse->apbdma.phys = 0x0; 113 + } 114 + 97 115 static int tegra20_fuse_probe(struct tegra_fuse *fuse) 98 116 { 99 117 dma_cap_mask_t mask; 118 + int err; 100 119 101 120 dma_cap_zero(mask); 102 121 dma_cap_set(DMA_SLAVE, mask); ··· 124 105 if (!fuse->apbdma.chan) 125 106 return -EPROBE_DEFER; 126 107 108 + err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel, 109 + fuse); 110 + if (err) 111 + return err; 112 + 127 113 fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32), 128 114 &fuse->apbdma.phys, 129 115 GFP_KERNEL); 130 - if (!fuse->apbdma.virt) { 131 - dma_release_channel(fuse->apbdma.chan); 116 + if (!fuse->apbdma.virt) 132 117 return -ENOMEM; 133 - } 118 + 119 + err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent, 120 + fuse); 121 + if (err) 122 + return err; 134 123 135 124 fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 136 125 fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+1
drivers/soc/tegra/fuse/fuse.h
··· 43 43 void __iomem *base; 44 44 phys_addr_t phys; 45 45 struct clk *clk; 46 + struct reset_control *rst; 46 47 47 48 u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset); 48 49 u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+33 -8
drivers/soc/tegra/pmc.c
··· 1064 1064 return tegra_powergate_remove_clamping(id); 1065 1065 } 1066 1066 1067 - static int tegra_pmc_restart_notify(struct notifier_block *this, 1068 - unsigned long action, void *data) 1067 + static void tegra_pmc_program_reboot_reason(const char *cmd) 1069 1068 { 1070 - const char *cmd = data; 1071 1069 u32 value; 1072 1070 1073 1071 value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0); ··· 1083 1085 } 1084 1086 1085 1087 tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0); 1088 + } 1089 + 1090 + static int tegra_pmc_reboot_notify(struct notifier_block *this, 1091 + unsigned long action, void *data) 1092 + { 1093 + if (action == SYS_RESTART) 1094 + tegra_pmc_program_reboot_reason(data); 1095 + 1096 + return NOTIFY_DONE; 1097 + } 1098 + 1099 + static struct notifier_block tegra_pmc_reboot_notifier = { 1100 + .notifier_call = tegra_pmc_reboot_notify, 1101 + }; 1102 + 1103 + static int tegra_pmc_restart_notify(struct notifier_block *this, 1104 + unsigned long action, void *data) 1105 + { 1106 + u32 value; 1086 1107 1087 1108 /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */ 1088 1109 value = tegra_pmc_readl(pmc, PMC_CNTRL); ··· 1370 1353 if (!genpd) 1371 1354 return -ENOMEM; 1372 1355 1373 - genpd->name = np->name; 1356 + genpd->name = "core"; 1374 1357 genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state; 1375 1358 genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state; 1376 1359 ··· 2907 2890 goto cleanup_sysfs; 2908 2891 } 2909 2892 2893 + err = devm_register_reboot_notifier(&pdev->dev, 2894 + &tegra_pmc_reboot_notifier); 2895 + if (err) { 2896 + dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", 2897 + err); 2898 + goto cleanup_debugfs; 2899 + } 2900 + 2910 2901 err = register_restart_handler(&tegra_pmc_restart_handler); 2911 2902 if (err) { 2912 2903 dev_err(&pdev->dev, "unable to register restart handler, %d\n", ··· 2988 2963 2989 2964 static const char * const tegra20_powergates[] = { 2990 2965 [TEGRA_POWERGATE_CPU] = "cpu", 2991 - [TEGRA_POWERGATE_3D] = "3d", 2966 + [TEGRA_POWERGATE_3D] = "td", 2992 2967 [TEGRA_POWERGATE_VENC] = "venc", 2993 2968 [TEGRA_POWERGATE_VDEC] = "vdec", 2994 2969 [TEGRA_POWERGATE_PCIE] = "pcie", ··· 3096 3071 3097 3072 static const char * const tegra30_powergates[] = { 3098 3073 [TEGRA_POWERGATE_CPU] = "cpu0", 3099 - [TEGRA_POWERGATE_3D] = "3d0", 3074 + [TEGRA_POWERGATE_3D] = "td", 3100 3075 [TEGRA_POWERGATE_VENC] = "venc", 3101 3076 [TEGRA_POWERGATE_VDEC] = "vdec", 3102 3077 [TEGRA_POWERGATE_PCIE] = "pcie", ··· 3108 3083 [TEGRA_POWERGATE_CPU2] = "cpu2", 3109 3084 [TEGRA_POWERGATE_CPU3] = "cpu3", 3110 3085 [TEGRA_POWERGATE_CELP] = "celp", 3111 - [TEGRA_POWERGATE_3D1] = "3d1", 3086 + [TEGRA_POWERGATE_3D1] = "td2", 3112 3087 }; 3113 3088 3114 3089 static const u8 tegra30_cpu_powergates[] = { ··· 3157 3132 3158 3133 static const char * const tegra114_powergates[] = { 3159 3134 [TEGRA_POWERGATE_CPU] = "crail", 3160 - [TEGRA_POWERGATE_3D] = "3d", 3135 + [TEGRA_POWERGATE_3D] = "td", 3161 3136 [TEGRA_POWERGATE_VENC] = "venc", 3162 3137 [TEGRA_POWERGATE_VDEC] = "vdec", 3163 3138 [TEGRA_POWERGATE_MPE] = "mpe",
+99
drivers/soc/tegra/regulators-tegra20.c
··· 16 16 #include <linux/regulator/coupler.h> 17 17 #include <linux/regulator/driver.h> 18 18 #include <linux/regulator/machine.h> 19 + #include <linux/suspend.h> 19 20 21 + #include <soc/tegra/fuse.h> 20 22 #include <soc/tegra/pmc.h> 21 23 22 24 struct tegra_regulator_coupler { ··· 27 25 struct regulator_dev *cpu_rdev; 28 26 struct regulator_dev *rtc_rdev; 29 27 struct notifier_block reboot_notifier; 28 + struct notifier_block suspend_notifier; 30 29 int core_min_uV, cpu_min_uV; 31 30 bool sys_reboot_mode_req; 32 31 bool sys_reboot_mode; 32 + bool sys_suspend_mode_req; 33 + bool sys_suspend_mode; 33 34 }; 34 35 35 36 static inline struct tegra_regulator_coupler * ··· 110 105 return 150000; 111 106 } 112 107 108 + static int tegra20_cpu_nominal_uV(void) 109 + { 110 + switch (tegra_sku_info.soc_speedo_id) { 111 + case 0: 112 + return 1100000; 113 + case 1: 114 + return 1025000; 115 + default: 116 + return 1125000; 117 + } 118 + } 119 + 120 + static int tegra20_core_nominal_uV(void) 121 + { 122 + switch (tegra_sku_info.soc_speedo_id) { 123 + default: 124 + return 1225000; 125 + case 2: 126 + return 1300000; 127 + } 128 + } 129 + 113 130 static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, 114 131 struct regulator_dev *core_rdev, 115 132 struct regulator_dev *rtc_rdev, ··· 170 143 PM_SUSPEND_ON); 171 144 if (err) 172 145 return err; 146 + 147 + /* prepare voltage level for suspend */ 148 + if (tegra->sys_suspend_mode) 149 + core_min_uV = clamp(tegra20_core_nominal_uV(), 150 + core_min_uV, core_max_uV); 173 151 174 152 core_uV = regulator_get_voltage_rdev(core_rdev); 175 153 if (core_uV < 0) ··· 311 279 if (tegra->sys_reboot_mode) 312 280 cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV); 313 281 282 + /* prepare voltage level for suspend */ 283 + if (tegra->sys_suspend_mode) 284 + cpu_min_uV = clamp(tegra20_cpu_nominal_uV(), 285 + cpu_min_uV, cpu_max_uV); 286 + 314 287 if (cpu_min_uV > cpu_uV) { 315 288 err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev, 316 289 cpu_uV, cpu_min_uV); ··· 357 320 } 358 321 359 322 tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req); 323 + tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req); 360 324 361 325 if (rdev == cpu_rdev) 362 326 return tegra20_cpu_voltage_update(tegra, cpu_rdev, ··· 370 332 pr_err("changing %s voltage not permitted\n", rdev_get_name(rtc_rdev)); 371 333 372 334 return -EPERM; 335 + } 336 + 337 + static int tegra20_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra, 338 + bool sys_suspend_mode) 339 + { 340 + int err; 341 + 342 + if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev) 343 + return 0; 344 + 345 + /* 346 + * All power domains are enabled early during resume from suspend 347 + * by GENPD core. Domains like VENC may require a higher voltage 348 + * when enabled during resume from suspend. This also prepares 349 + * hardware for resuming from LP0. 350 + */ 351 + 352 + WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode); 353 + 354 + err = regulator_sync_voltage_rdev(tegra->cpu_rdev); 355 + if (err) 356 + return err; 357 + 358 + err = regulator_sync_voltage_rdev(tegra->core_rdev); 359 + if (err) 360 + return err; 361 + 362 + return 0; 363 + } 364 + 365 + static int tegra20_regulator_suspend(struct notifier_block *notifier, 366 + unsigned long mode, void *arg) 367 + { 368 + struct tegra_regulator_coupler *tegra; 369 + int ret = 0; 370 + 371 + tegra = container_of(notifier, struct tegra_regulator_coupler, 372 + suspend_notifier); 373 + 374 + switch (mode) { 375 + case PM_HIBERNATION_PREPARE: 376 + case PM_RESTORE_PREPARE: 377 + case PM_SUSPEND_PREPARE: 378 + ret = tegra20_regulator_prepare_suspend(tegra, true); 379 + break; 380 + 381 + case PM_POST_HIBERNATION: 382 + case PM_POST_RESTORE: 383 + case PM_POST_SUSPEND: 384 + ret = tegra20_regulator_prepare_suspend(tegra, false); 385 + break; 386 + } 387 + 388 + if (ret) 389 + pr_err("failed to prepare regulators: %d\n", ret); 390 + 391 + return notifier_from_errno(ret); 373 392 } 374 393 375 394 static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra, ··· 539 444 .balance_voltage = tegra20_regulator_balance_voltage, 540 445 }, 541 446 .reboot_notifier.notifier_call = tegra20_regulator_reboot, 447 + .suspend_notifier.notifier_call = tegra20_regulator_suspend, 542 448 }; 543 449 544 450 static int __init tegra_regulator_coupler_init(void) ··· 550 454 return 0; 551 455 552 456 err = register_reboot_notifier(&tegra20_coupler.reboot_notifier); 457 + WARN_ON(err); 458 + 459 + err = register_pm_notifier(&tegra20_coupler.suspend_notifier); 553 460 WARN_ON(err); 554 461 555 462 return regulator_coupler_register(&tegra20_coupler.coupler);
+122
drivers/soc/tegra/regulators-tegra30.c
··· 16 16 #include <linux/regulator/coupler.h> 17 17 #include <linux/regulator/driver.h> 18 18 #include <linux/regulator/machine.h> 19 + #include <linux/suspend.h> 19 20 20 21 #include <soc/tegra/fuse.h> 21 22 #include <soc/tegra/pmc.h> ··· 26 25 struct regulator_dev *core_rdev; 27 26 struct regulator_dev *cpu_rdev; 28 27 struct notifier_block reboot_notifier; 28 + struct notifier_block suspend_notifier; 29 29 int core_min_uV, cpu_min_uV; 30 30 bool sys_reboot_mode_req; 31 31 bool sys_reboot_mode; 32 + bool sys_suspend_mode_req; 33 + bool sys_suspend_mode; 32 34 }; 33 35 34 36 static inline struct tegra_regulator_coupler * ··· 117 113 return -EINVAL; 118 114 } 119 115 116 + static int tegra30_cpu_nominal_uV(void) 117 + { 118 + switch (tegra_sku_info.cpu_speedo_id) { 119 + case 10 ... 11: 120 + return 850000; 121 + 122 + case 9: 123 + return 912000; 124 + 125 + case 1 ... 3: 126 + case 7 ... 8: 127 + return 1050000; 128 + 129 + default: 130 + return 1125000; 131 + 132 + case 4 ... 6: 133 + case 12 ... 13: 134 + return 1237000; 135 + } 136 + } 137 + 138 + static int tegra30_core_nominal_uV(void) 139 + { 140 + switch (tegra_sku_info.soc_speedo_id) { 141 + case 0: 142 + return 1200000; 143 + 144 + case 1: 145 + if (tegra_sku_info.cpu_speedo_id != 7 && 146 + tegra_sku_info.cpu_speedo_id != 8) 147 + return 1200000; 148 + 149 + fallthrough; 150 + 151 + case 2: 152 + if (tegra_sku_info.cpu_speedo_id != 13) 153 + return 1300000; 154 + 155 + return 1350000; 156 + 157 + default: 158 + return 1250000; 159 + } 160 + } 161 + 120 162 static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra, 121 163 struct regulator_dev *cpu_rdev, 122 164 struct regulator_dev *core_rdev) ··· 218 168 if (err) 219 169 return err; 220 170 171 + /* prepare voltage level for suspend */ 172 + if (tegra->sys_suspend_mode) 173 + core_min_uV = clamp(tegra30_core_nominal_uV(), 174 + core_min_uV, core_max_uV); 175 + 221 176 core_uV = regulator_get_voltage_rdev(core_rdev); 222 177 if (core_uV < 0) 223 178 return core_uV; ··· 277 222 /* restore boot voltage level */ 278 223 if (tegra->sys_reboot_mode) 279 224 cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV); 225 + 226 + /* prepare voltage level for suspend */ 227 + if (tegra->sys_suspend_mode) 228 + cpu_min_uV = clamp(tegra30_cpu_nominal_uV(), 229 + cpu_min_uV, cpu_max_uV); 280 230 281 231 if (core_min_limited_uV > core_uV) { 282 232 pr_err("core voltage constraint violated: %d %d %d\n", ··· 352 292 } 353 293 354 294 tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req); 295 + tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req); 355 296 356 297 return tegra30_voltage_update(tegra, cpu_rdev, core_rdev); 298 + } 299 + 300 + static int tegra30_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra, 301 + bool sys_suspend_mode) 302 + { 303 + int err; 304 + 305 + if (!tegra->core_rdev || !tegra->cpu_rdev) 306 + return 0; 307 + 308 + /* 309 + * All power domains are enabled early during resume from suspend 310 + * by GENPD core. Domains like VENC may require a higher voltage 311 + * when enabled during resume from suspend. This also prepares 312 + * hardware for resuming from LP0. 313 + */ 314 + 315 + WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode); 316 + 317 + err = regulator_sync_voltage_rdev(tegra->cpu_rdev); 318 + if (err) 319 + return err; 320 + 321 + err = regulator_sync_voltage_rdev(tegra->core_rdev); 322 + if (err) 323 + return err; 324 + 325 + return 0; 326 + } 327 + 328 + static int tegra30_regulator_suspend(struct notifier_block *notifier, 329 + unsigned long mode, void *arg) 330 + { 331 + struct tegra_regulator_coupler *tegra; 332 + int ret = 0; 333 + 334 + tegra = container_of(notifier, struct tegra_regulator_coupler, 335 + suspend_notifier); 336 + 337 + switch (mode) { 338 + case PM_HIBERNATION_PREPARE: 339 + case PM_RESTORE_PREPARE: 340 + case PM_SUSPEND_PREPARE: 341 + ret = tegra30_regulator_prepare_suspend(tegra, true); 342 + break; 343 + 344 + case PM_POST_HIBERNATION: 345 + case PM_POST_RESTORE: 346 + case PM_POST_SUSPEND: 347 + ret = tegra30_regulator_prepare_suspend(tegra, false); 348 + break; 349 + } 350 + 351 + if (ret) 352 + pr_err("failed to prepare regulators: %d\n", ret); 353 + 354 + return notifier_from_errno(ret); 357 355 } 358 356 359 357 static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra, ··· 513 395 .balance_voltage = tegra30_regulator_balance_voltage, 514 396 }, 515 397 .reboot_notifier.notifier_call = tegra30_regulator_reboot, 398 + .suspend_notifier.notifier_call = tegra30_regulator_suspend, 516 399 }; 517 400 518 401 static int __init tegra_regulator_coupler_init(void) ··· 524 405 return 0; 525 406 526 407 err = register_reboot_notifier(&tegra30_coupler.reboot_notifier); 408 + WARN_ON(err); 409 + 410 + err = register_pm_notifier(&tegra30_coupler.suspend_notifier); 527 411 WARN_ON(err); 528 412 529 413 return regulator_coupler_register(&tegra30_coupler.coupler);
+2 -1
drivers/soc/ti/k3-socinfo.c
··· 40 40 { 0xBB5A, "AM65X" }, 41 41 { 0xBB64, "J721E" }, 42 42 { 0xBB6D, "J7200" }, 43 - { 0xBB38, "AM64X" } 43 + { 0xBB38, "AM64X" }, 44 + { 0xBB75, "J721S2"}, 44 45 }; 45 46 46 47 static int
+10 -10
drivers/soc/ti/knav_dma.c
··· 646 646 } 647 647 648 648 dma->reg_global = pktdma_get_regs(dma, node, 0, &size); 649 - if (!dma->reg_global) 650 - return -ENODEV; 649 + if (IS_ERR(dma->reg_global)) 650 + return PTR_ERR(dma->reg_global); 651 651 if (size < sizeof(struct reg_global)) { 652 652 dev_err(kdev->dev, "bad size %pa for global regs\n", &size); 653 653 return -ENODEV; 654 654 } 655 655 656 656 dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size); 657 - if (!dma->reg_tx_chan) 658 - return -ENODEV; 657 + if (IS_ERR(dma->reg_tx_chan)) 658 + return PTR_ERR(dma->reg_tx_chan); 659 659 660 660 max_tx_chan = size / sizeof(struct reg_chan); 661 661 dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size); 662 - if (!dma->reg_rx_chan) 663 - return -ENODEV; 662 + if (IS_ERR(dma->reg_rx_chan)) 663 + return PTR_ERR(dma->reg_rx_chan); 664 664 665 665 max_rx_chan = size / sizeof(struct reg_chan); 666 666 dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size); 667 - if (!dma->reg_tx_sched) 668 - return -ENODEV; 667 + if (IS_ERR(dma->reg_tx_sched)) 668 + return PTR_ERR(dma->reg_tx_sched); 669 669 670 670 max_tx_sched = size / sizeof(struct reg_tx_sched); 671 671 dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size); 672 - if (!dma->reg_rx_flow) 673 - return -ENODEV; 672 + if (IS_ERR(dma->reg_rx_flow)) 673 + return PTR_ERR(dma->reg_rx_flow); 674 674 675 675 max_rx_flow = size / sizeof(struct reg_rx_flow); 676 676 dma->rx_priority = DMA_PRIO_DEFAULT;
+1 -1
drivers/soc/ti/pruss.c
··· 129 129 130 130 clks_np = of_get_child_by_name(cfg_node, "clocks"); 131 131 if (!clks_np) { 132 - dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np); 132 + dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node); 133 133 return -ENODEV; 134 134 } 135 135
+51 -40
drivers/soc/xilinx/zynqmp_pm_domains.c
··· 20 20 #include <linux/firmware/xlnx-zynqmp.h> 21 21 22 22 #define ZYNQMP_NUM_DOMAINS (100) 23 - /* Flag stating if PM nodes mapped to the PM domain has been requested */ 24 - #define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0) 25 23 26 24 static int min_capability; 27 25 ··· 27 29 * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain 28 30 * @gpd: Generic power domain 29 31 * @node_id: PM node ID corresponding to device inside PM domain 30 - * @flags: ZynqMP PM domain flags 32 + * @requested: The PM node mapped to the PM domain has been requested 31 33 */ 32 34 struct zynqmp_pm_domain { 33 35 struct generic_pm_domain gpd; 34 36 u32 node_id; 35 - u8 flags; 37 + bool requested; 36 38 }; 39 + 40 + #define to_zynqmp_pm_domain(pm_domain) \ 41 + container_of(pm_domain, struct zynqmp_pm_domain, gpd) 37 42 38 43 /** 39 44 * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source ··· 72 71 */ 73 72 static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) 74 73 { 74 + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); 75 75 int ret; 76 - struct zynqmp_pm_domain *pd; 77 76 78 - pd = container_of(domain, struct zynqmp_pm_domain, gpd); 79 77 ret = zynqmp_pm_set_requirement(pd->node_id, 80 78 ZYNQMP_PM_CAPABILITY_ACCESS, 81 79 ZYNQMP_PM_MAX_QOS, 82 80 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 83 81 if (ret) { 84 - pr_err("%s() %s set requirement for node %d failed: %d\n", 85 - __func__, domain->name, pd->node_id, ret); 82 + dev_err(&domain->dev, 83 + "failed to set requirement to 0x%x for PM node id %d: %d\n", 84 + ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret); 86 85 return ret; 87 86 } 88 87 89 - pr_debug("%s() Powered on %s domain\n", __func__, domain->name); 88 + dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n", 89 + ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id); 90 + 90 91 return 0; 91 92 } 92 93 ··· 103 100 */ 104 101 static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) 105 102 { 103 + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); 106 104 int ret; 107 105 struct pm_domain_data *pdd, *tmp; 108 - struct zynqmp_pm_domain *pd; 109 106 u32 capabilities = min_capability; 110 107 bool may_wakeup; 111 108 112 - pd = container_of(domain, struct zynqmp_pm_domain, gpd); 113 - 114 109 /* If domain is already released there is nothing to be done */ 115 - if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) { 116 - pr_debug("%s() %s domain is already released\n", 117 - __func__, domain->name); 110 + if (!pd->requested) { 111 + dev_dbg(&domain->dev, "PM node id %d is already released\n", 112 + pd->node_id); 118 113 return 0; 119 114 } 120 115 ··· 129 128 130 129 ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0, 131 130 ZYNQMP_PM_REQUEST_ACK_NO); 132 - /** 133 - * If powering down of any node inside this domain fails, 134 - * report and return the error 135 - */ 136 131 if (ret) { 137 - pr_err("%s() %s set requirement for node %d failed: %d\n", 138 - __func__, domain->name, pd->node_id, ret); 132 + dev_err(&domain->dev, 133 + "failed to set requirement to 0x%x for PM node id %d: %d\n", 134 + capabilities, pd->node_id, ret); 139 135 return ret; 140 136 } 141 137 142 - pr_debug("%s() Powered off %s domain\n", __func__, domain->name); 138 + dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n", 139 + capabilities, pd->node_id); 140 + 143 141 return 0; 144 142 } 145 143 ··· 152 152 static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, 153 153 struct device *dev) 154 154 { 155 + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); 156 + struct device_link *link; 155 157 int ret; 156 - struct zynqmp_pm_domain *pd; 157 158 158 - pd = container_of(domain, struct zynqmp_pm_domain, gpd); 159 + link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY); 160 + if (!link) 161 + dev_dbg(&domain->dev, "failed to create device link for %s\n", 162 + dev_name(dev)); 159 163 160 164 /* If this is not the first device to attach there is nothing to do */ 161 165 if (domain->device_count) ··· 167 163 168 164 ret = zynqmp_pm_request_node(pd->node_id, 0, 0, 169 165 ZYNQMP_PM_REQUEST_ACK_BLOCKING); 170 - /* If requesting a node fails print and return the error */ 171 166 if (ret) { 172 - pr_err("%s() %s request failed for node %d: %d\n", 173 - __func__, domain->name, pd->node_id, ret); 167 + dev_err(&domain->dev, "%s request failed for node %d: %d\n", 168 + domain->name, pd->node_id, ret); 174 169 return ret; 175 170 } 176 171 177 - pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED; 172 + pd->requested = true; 178 173 179 - pr_debug("%s() %s attached to %s domain\n", __func__, 180 - dev_name(dev), domain->name); 174 + dev_dbg(&domain->dev, "%s requested PM node id %d\n", 175 + dev_name(dev), pd->node_id); 176 + 181 177 return 0; 182 178 } 183 179 ··· 189 185 static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain, 190 186 struct device *dev) 191 187 { 188 + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); 192 189 int ret; 193 - struct zynqmp_pm_domain *pd; 194 - 195 - pd = container_of(domain, struct zynqmp_pm_domain, gpd); 196 190 197 191 /* If this is not the last device to detach there is nothing to do */ 198 192 if (domain->device_count) 199 193 return; 200 194 201 195 ret = zynqmp_pm_release_node(pd->node_id); 202 - /* If releasing a node fails print the error and return */ 203 196 if (ret) { 204 - pr_err("%s() %s release failed for node %d: %d\n", 205 - __func__, domain->name, pd->node_id, ret); 197 + dev_err(&domain->dev, "failed to release PM node id %d: %d\n", 198 + pd->node_id, ret); 206 199 return; 207 200 } 208 201 209 - pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED; 202 + pd->requested = false; 210 203 211 - pr_debug("%s() %s detached from %s domain\n", __func__, 212 - dev_name(dev), domain->name); 204 + dev_dbg(&domain->dev, "%s released PM node id %d\n", 205 + dev_name(dev), pd->node_id); 213 206 } 214 207 215 208 static struct generic_pm_domain *zynqmp_gpd_xlate ··· 216 215 unsigned int i, idx = genpdspec->args[0]; 217 216 struct zynqmp_pm_domain *pd; 218 217 219 - pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd); 218 + pd = to_zynqmp_pm_domain(genpd_data->domains[0]); 220 219 221 220 if (genpdspec->args_count != 1) 222 221 return ERR_PTR(-EINVAL); ··· 300 299 return 0; 301 300 } 302 301 302 + static void zynqmp_gpd_sync_state(struct device *dev) 303 + { 304 + int ret; 305 + 306 + ret = zynqmp_pm_init_finalize(); 307 + if (ret) 308 + dev_warn(dev, "failed to release power management to firmware\n"); 309 + } 310 + 303 311 static struct platform_driver zynqmp_power_domain_driver = { 304 312 .driver = { 305 313 .name = "zynqmp_power_controller", 314 + .sync_state = zynqmp_gpd_sync_state, 306 315 }, 307 316 .probe = zynqmp_gpd_probe, 308 317 .remove = zynqmp_gpd_remove,
-1
drivers/soc/xilinx/zynqmp_power.c
··· 178 178 u32 pm_api_version; 179 179 struct mbox_client *client; 180 180 181 - zynqmp_pm_init_finalize(); 182 181 zynqmp_pm_get_api_version(&pm_api_version); 183 182 184 183 /* Check PM API version number */
+3 -1
drivers/spi/spi-rpc-if.c
··· 156 156 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD; 157 157 ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; 158 158 159 - rpcif_hw_init(rpc, false); 159 + error = rpcif_hw_init(rpc, false); 160 + if (error) 161 + return error; 160 162 161 163 error = spi_register_controller(ctlr); 162 164 if (error) {
+8 -1
drivers/spi/spi-tegra20-slink.c
··· 18 18 #include <linux/kthread.h> 19 19 #include <linux/module.h> 20 20 #include <linux/platform_device.h> 21 + #include <linux/pm_opp.h> 21 22 #include <linux/pm_runtime.h> 22 23 #include <linux/of.h> 23 24 #include <linux/of_device.h> 24 25 #include <linux/reset.h> 25 26 #include <linux/spi/spi.h> 27 + 28 + #include <soc/tegra/common.h> 26 29 27 30 #define SLINK_COMMAND 0x000 28 31 #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) ··· 683 680 bits_per_word = t->bits_per_word; 684 681 speed = t->speed_hz; 685 682 if (speed != tspi->cur_speed) { 686 - clk_set_rate(tspi->clk, speed * 4); 683 + dev_pm_opp_set_rate(tspi->dev, speed * 4); 687 684 tspi->cur_speed = speed; 688 685 } 689 686 ··· 1068 1065 ret = PTR_ERR(tspi->rst); 1069 1066 goto exit_free_master; 1070 1067 } 1068 + 1069 + ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 1070 + if (ret) 1071 + goto exit_free_master; 1071 1072 1072 1073 tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; 1073 1074 tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+52 -11
drivers/staging/media/tegra-vde/vde.c
··· 20 20 #include <linux/slab.h> 21 21 #include <linux/uaccess.h> 22 22 23 + #include <soc/tegra/common.h> 23 24 #include <soc/tegra/pmc.h> 24 25 25 26 #include "uapi.h" ··· 921 920 struct tegra_vde *vde = dev_get_drvdata(dev); 922 921 int err; 923 922 924 - err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC); 925 - if (err) { 926 - dev_err(dev, "Failed to power down HW: %d\n", err); 927 - return err; 923 + if (!dev->pm_domain) { 924 + err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC); 925 + if (err) { 926 + dev_err(dev, "Failed to power down HW: %d\n", err); 927 + return err; 928 + } 928 929 } 929 930 930 931 clk_disable_unprepare(vde->clk); 932 + reset_control_release(vde->rst); 933 + reset_control_release(vde->rst_mc); 931 934 932 935 return 0; 933 936 } ··· 941 936 struct tegra_vde *vde = dev_get_drvdata(dev); 942 937 int err; 943 938 944 - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC, 945 - vde->clk, vde->rst); 939 + err = reset_control_acquire(vde->rst_mc); 946 940 if (err) { 947 - dev_err(dev, "Failed to power up HW : %d\n", err); 941 + dev_err(dev, "Failed to acquire mc reset: %d\n", err); 948 942 return err; 949 943 } 950 944 945 + err = reset_control_acquire(vde->rst); 946 + if (err) { 947 + dev_err(dev, "Failed to acquire reset: %d\n", err); 948 + goto release_mc_reset; 949 + } 950 + 951 + if (!dev->pm_domain) { 952 + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC, 953 + vde->clk, vde->rst); 954 + if (err) { 955 + dev_err(dev, "Failed to power up HW : %d\n", err); 956 + goto release_reset; 957 + } 958 + } else { 959 + /* 960 + * tegra_powergate_sequence_power_up() leaves clocks enabled, 961 + * while GENPD not. 962 + */ 963 + err = clk_prepare_enable(vde->clk); 964 + if (err) { 965 + dev_err(dev, "Failed to enable clock: %d\n", err); 966 + goto release_reset; 967 + } 968 + } 969 + 951 970 return 0; 971 + 972 + release_reset: 973 + reset_control_release(vde->rst); 974 + release_mc_reset: 975 + reset_control_release(vde->rst_mc); 976 + 977 + return err; 952 978 } 953 979 954 980 static int tegra_vde_probe(struct platform_device *pdev) ··· 1037 1001 return err; 1038 1002 } 1039 1003 1040 - vde->rst = devm_reset_control_get(dev, NULL); 1004 + vde->rst = devm_reset_control_get_exclusive_released(dev, NULL); 1041 1005 if (IS_ERR(vde->rst)) { 1042 1006 err = PTR_ERR(vde->rst); 1043 1007 dev_err(dev, "Could not get VDE reset %d\n", err); 1044 1008 return err; 1045 1009 } 1046 1010 1047 - vde->rst_mc = devm_reset_control_get_optional(dev, "mc"); 1011 + vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc"); 1048 1012 if (IS_ERR(vde->rst_mc)) { 1049 1013 err = PTR_ERR(vde->rst_mc); 1050 1014 dev_err(dev, "Could not get MC reset %d\n", err); ··· 1059 1023 dev_name(dev), vde); 1060 1024 if (err) { 1061 1025 dev_err(dev, "Could not request IRQ %d\n", err); 1026 + return err; 1027 + } 1028 + 1029 + err = devm_tegra_core_dev_init_opp_table_common(dev); 1030 + if (err) { 1031 + dev_err(dev, "Could initialize OPP table %d\n", err); 1062 1032 return err; 1063 1033 } 1064 1034 ··· 1175 1133 * On some devices bootloader isn't ready to a power-gated VDE on 1176 1134 * a warm-reboot, machine will hang in that case. 1177 1135 */ 1178 - if (pm_runtime_status_suspended(&pdev->dev)) 1179 - tegra_vde_runtime_resume(&pdev->dev); 1136 + pm_runtime_get_sync(&pdev->dev); 1180 1137 } 1181 1138 1182 1139 static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
+1
drivers/tee/optee/Makefile
··· 2 2 obj-$(CONFIG_OPTEE) += optee.o 3 3 optee-objs += core.o 4 4 optee-objs += call.o 5 + optee-objs += notif.o 5 6 optee-objs += rpc.o 6 7 optee-objs += supp.o 7 8 optee-objs += device.o
+1 -1
drivers/tee/optee/core.c
··· 157 157 /* Unregister OP-TEE specific client devices on TEE bus */ 158 158 optee_unregister_devices(); 159 159 160 + optee_notif_uninit(optee); 160 161 /* 161 162 * The two devices have to be unregistered before we can free the 162 163 * other resources. ··· 166 165 tee_device_unregister(optee->teedev); 167 166 168 167 tee_shm_pool_free(optee->pool); 169 - optee_wait_queue_exit(&optee->wait_queue); 170 168 optee_supp_uninit(&optee->supp); 171 169 mutex_destroy(&optee->call_queue.mutex); 172 170 }
+5 -1
drivers/tee/optee/ffa_abi.c
··· 855 855 mutex_init(&optee->ffa.mutex); 856 856 mutex_init(&optee->call_queue.mutex); 857 857 INIT_LIST_HEAD(&optee->call_queue.waiters); 858 - optee_wait_queue_init(&optee->wait_queue); 859 858 optee_supp_init(&optee->supp); 860 859 ffa_dev_set_drvdata(ffa_dev, optee); 860 + rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE); 861 + if (rc) { 862 + optee_ffa_remove(ffa_dev); 863 + return rc; 864 + } 861 865 862 866 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 863 867 if (rc) {
+125
drivers/tee/optee/notif.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2015-2021, Linaro Limited 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/arm-smccc.h> 9 + #include <linux/errno.h> 10 + #include <linux/slab.h> 11 + #include <linux/spinlock.h> 12 + #include <linux/tee_drv.h> 13 + #include "optee_private.h" 14 + 15 + struct notif_entry { 16 + struct list_head link; 17 + struct completion c; 18 + u_int key; 19 + }; 20 + 21 + static bool have_key(struct optee *optee, u_int key) 22 + { 23 + struct notif_entry *entry; 24 + 25 + list_for_each_entry(entry, &optee->notif.db, link) 26 + if (entry->key == key) 27 + return true; 28 + 29 + return false; 30 + } 31 + 32 + int optee_notif_wait(struct optee *optee, u_int key) 33 + { 34 + unsigned long flags; 35 + struct notif_entry *entry; 36 + int rc = 0; 37 + 38 + if (key > optee->notif.max_key) 39 + return -EINVAL; 40 + 41 + entry = kmalloc(sizeof(*entry), GFP_KERNEL); 42 + if (!entry) 43 + return -ENOMEM; 44 + init_completion(&entry->c); 45 + entry->key = key; 46 + 47 + spin_lock_irqsave(&optee->notif.lock, flags); 48 + 49 + /* 50 + * If the bit is already set it means that the key has already 51 + * been posted and we must not wait. 52 + */ 53 + if (test_bit(key, optee->notif.bitmap)) { 54 + clear_bit(key, optee->notif.bitmap); 55 + goto out; 56 + } 57 + 58 + /* 59 + * Check if someone is already waiting for this key. If there is 60 + * it's a programming error. 61 + */ 62 + if (have_key(optee, key)) { 63 + rc = -EBUSY; 64 + goto out; 65 + } 66 + 67 + list_add_tail(&entry->link, &optee->notif.db); 68 + 69 + /* 70 + * Unlock temporarily and wait for completion. 71 + */ 72 + spin_unlock_irqrestore(&optee->notif.lock, flags); 73 + wait_for_completion(&entry->c); 74 + spin_lock_irqsave(&optee->notif.lock, flags); 75 + 76 + list_del(&entry->link); 77 + out: 78 + spin_unlock_irqrestore(&optee->notif.lock, flags); 79 + 80 + kfree(entry); 81 + 82 + return rc; 83 + } 84 + 85 + int optee_notif_send(struct optee *optee, u_int key) 86 + { 87 + unsigned long flags; 88 + struct notif_entry *entry; 89 + 90 + if (key > optee->notif.max_key) 91 + return -EINVAL; 92 + 93 + spin_lock_irqsave(&optee->notif.lock, flags); 94 + 95 + list_for_each_entry(entry, &optee->notif.db, link) 96 + if (entry->key == key) { 97 + complete(&entry->c); 98 + goto out; 99 + } 100 + 101 + /* Only set the bit in case there where nobody waiting */ 102 + set_bit(key, optee->notif.bitmap); 103 + out: 104 + spin_unlock_irqrestore(&optee->notif.lock, flags); 105 + 106 + return 0; 107 + } 108 + 109 + int optee_notif_init(struct optee *optee, u_int max_key) 110 + { 111 + spin_lock_init(&optee->notif.lock); 112 + INIT_LIST_HEAD(&optee->notif.db); 113 + optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL); 114 + if (!optee->notif.bitmap) 115 + return -ENOMEM; 116 + 117 + optee->notif.max_key = max_key; 118 + 119 + return 0; 120 + } 121 + 122 + void optee_notif_uninit(struct optee *optee) 123 + { 124 + kfree(optee->notif.bitmap); 125 + }
+9
drivers/tee/optee/optee_msg.h
··· 318 318 * [in] param[0].u.rmem.shm_ref holds shared memory reference 319 319 * [in] param[0].u.rmem.offs 0 320 320 * [in] param[0].u.rmem.size 0 321 + * 322 + * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing 323 + * of a driver. 324 + * 325 + * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is 326 + * normal world unable to process asynchronous notifications. Typically 327 + * used when the driver is shut down. 321 328 */ 322 329 #define OPTEE_MSG_CMD_OPEN_SESSION 0 323 330 #define OPTEE_MSG_CMD_INVOKE_COMMAND 1 ··· 332 325 #define OPTEE_MSG_CMD_CANCEL 3 333 326 #define OPTEE_MSG_CMD_REGISTER_SHM 4 334 327 #define OPTEE_MSG_CMD_UNREGISTER_SHM 5 328 + #define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6 329 + #define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7 335 330 #define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 336 331 337 332 #endif /* _OPTEE_MSG_H */
+20 -8
drivers/tee/optee/optee_private.h
··· 28 28 29 29 #define TEEC_ORIGIN_COMMS 0x00000002 30 30 31 + /* 32 + * This value should be larger than the number threads in secure world to 33 + * meet the need from secure world. The number of threads in secure world 34 + * are usually not even close to 255 so we should be safe for now. 35 + */ 36 + #define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255 37 + 31 38 typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, 32 39 unsigned long, unsigned long, unsigned long, 33 40 unsigned long, unsigned long, ··· 51 44 struct list_head waiters; 52 45 }; 53 46 54 - struct optee_wait_queue { 55 - /* Serializes access to this struct */ 56 - struct mutex mu; 47 + struct optee_notif { 48 + u_int max_key; 49 + struct tee_context *ctx; 50 + /* Serializes access to the elements below in this struct */ 51 + spinlock_t lock; 57 52 struct list_head db; 53 + u_long *bitmap; 58 54 }; 59 55 60 56 /** ··· 89 79 optee_invoke_fn *invoke_fn; 90 80 void *memremaped_shm; 91 81 u32 sec_caps; 82 + unsigned int notif_irq; 92 83 }; 93 84 94 85 /** ··· 140 129 * @smc: specific to SMC ABI 141 130 * @ffa: specific to FF-A ABI 142 131 * @call_queue: queue of threads waiting to call @invoke_fn 143 - * @wait_queue: queue of threads from secure world waiting for a 144 - * secure world sync object 132 + * @notif: notification synchronization struct 145 133 * @supp: supplicant synchronization struct for RPC to supplicant 146 134 * @pool: shared memory pool 147 135 * @rpc_arg_count: If > 0 number of RPC parameters to make room for ··· 157 147 struct optee_ffa ffa; 158 148 }; 159 149 struct optee_call_queue call_queue; 160 - struct optee_wait_queue wait_queue; 150 + struct optee_notif notif; 161 151 struct optee_supp supp; 162 152 struct tee_shm_pool *pool; 163 153 unsigned int rpc_arg_count; ··· 195 185 size_t num_entries; 196 186 }; 197 187 198 - void optee_wait_queue_init(struct optee_wait_queue *wq); 199 - void optee_wait_queue_exit(struct optee_wait_queue *wq); 188 + int optee_notif_init(struct optee *optee, u_int max_key); 189 + void optee_notif_uninit(struct optee *optee); 190 + int optee_notif_wait(struct optee *optee, u_int key); 191 + int optee_notif_send(struct optee *optee, u_int key); 200 192 201 193 u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, 202 194 struct tee_param *param);
+17 -14
drivers/tee/optee/optee_rpc_cmd.h
··· 28 28 #define OPTEE_RPC_CMD_GET_TIME 3 29 29 30 30 /* 31 - * Wait queue primitive, helper for secure world to implement a wait queue. 31 + * Notification from/to secure world. 32 32 * 33 - * If secure world needs to wait for a secure world mutex it issues a sleep 34 - * request instead of spinning in secure world. Conversely is a wakeup 35 - * request issued when a secure world mutex with a thread waiting thread is 36 - * unlocked. 33 + * If secure world needs to wait for something, for instance a mutex, it 34 + * does a notification wait request instead of spinning in secure world. 35 + * Conversely can a synchronous notification can be sent when a secure 36 + * world mutex with a thread waiting thread is unlocked. 37 37 * 38 - * Waiting on a key 39 - * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP 40 - * [in] value[0].b Wait key 38 + * This interface can also be used to wait for a asynchronous notification 39 + * which instead is sent via a non-secure interrupt. 41 40 * 42 - * Waking up a key 43 - * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP 44 - * [in] value[0].b Wakeup key 41 + * Waiting on notification 42 + * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT 43 + * [in] value[0].b notification value 44 + * 45 + * Sending a synchronous notification 46 + * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND 47 + * [in] value[0].b notification value 45 48 */ 46 - #define OPTEE_RPC_CMD_WAIT_QUEUE 4 47 - #define OPTEE_RPC_WAIT_QUEUE_SLEEP 0 48 - #define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1 49 + #define OPTEE_RPC_CMD_NOTIFICATION 4 50 + #define OPTEE_RPC_NOTIFICATION_WAIT 0 51 + #define OPTEE_RPC_NOTIFICATION_SEND 1 49 52 50 53 /* 51 54 * Suspend execution
+73 -2
drivers/tee/optee/optee_smc.h
··· 107 107 /* 108 108 * Call with struct optee_msg_arg as argument 109 109 * 110 + * When calling this function normal world has a few responsibilities: 111 + * 1. It must be able to handle eventual RPCs 112 + * 2. Non-secure interrupts should not be masked 113 + * 3. If asynchronous notifications has been negotiated successfully, then 114 + * asynchronous notifications should be unmasked during this call. 115 + * 110 116 * Call register usage: 111 117 * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG 112 118 * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg ··· 201 195 * Normal return register usage: 202 196 * a0 OPTEE_SMC_RETURN_OK 203 197 * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* 204 - * a2-7 Preserved 198 + * a2 The maximum secure world notification number 199 + * a3-7 Preserved 205 200 * 206 201 * Error return register usage: 207 202 * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world ··· 225 218 #define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3) 226 219 /* Secure world supports Shared Memory with a NULL reference */ 227 220 #define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4) 221 + /* Secure world supports asynchronous notification of normal world */ 222 + #define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5) 228 223 229 224 #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 230 225 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ ··· 235 226 struct optee_smc_exchange_capabilities_result { 236 227 unsigned long status; 237 228 unsigned long capabilities; 229 + unsigned long max_notif_value; 238 230 unsigned long reserved0; 239 - unsigned long reserved1; 240 231 }; 241 232 242 233 /* ··· 327 318 #define OPTEE_SMC_FUNCID_GET_THREAD_COUNT 15 328 319 #define OPTEE_SMC_GET_THREAD_COUNT \ 329 320 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT) 321 + 322 + /* 323 + * Inform OP-TEE that normal world is able to receive asynchronous 324 + * notifications. 325 + * 326 + * Call requests usage: 327 + * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF 328 + * a1-6 Not used 329 + * a7 Hypervisor Client ID register 330 + * 331 + * Normal return register usage: 332 + * a0 OPTEE_SMC_RETURN_OK 333 + * a1-7 Preserved 334 + * 335 + * Not supported return register usage: 336 + * a0 OPTEE_SMC_RETURN_ENOTAVAIL 337 + * a1-7 Preserved 338 + */ 339 + #define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16 340 + #define OPTEE_SMC_ENABLE_ASYNC_NOTIF \ 341 + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF) 342 + 343 + /* 344 + * Retrieve a value of notifications pending since the last call of this 345 + * function. 346 + * 347 + * OP-TEE keeps a record of all posted values. When an interrupt is 348 + * received which indicates that there are posted values this function 349 + * should be called until all pended values have been retrieved. When a 350 + * value is retrieved, it's cleared from the record in secure world. 351 + * 352 + * Call requests usage: 353 + * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE 354 + * a1-6 Not used 355 + * a7 Hypervisor Client ID register 356 + * 357 + * Normal return register usage: 358 + * a0 OPTEE_SMC_RETURN_OK 359 + * a1 value 360 + * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is 361 + * valid, else 0 if no values where pending 362 + * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is 363 + * pending, else 0. 364 + * Bit[31:2]: MBZ 365 + * a3-7 Preserved 366 + * 367 + * Not supported return register usage: 368 + * a0 OPTEE_SMC_RETURN_ENOTAVAIL 369 + * a1-7 Preserved 370 + */ 371 + #define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0) 372 + #define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1) 373 + 374 + /* 375 + * Notification that OP-TEE expects a yielding call to do some bottom half 376 + * work in a driver. 377 + */ 378 + #define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0 379 + 380 + #define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17 381 + #define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \ 382 + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE) 330 383 331 384 /* 332 385 * Resume from RPC (for example after processing a foreign interrupt)
+7 -64
drivers/tee/optee/rpc.c
··· 12 12 #include "optee_private.h" 13 13 #include "optee_rpc_cmd.h" 14 14 15 - struct wq_entry { 16 - struct list_head link; 17 - struct completion c; 18 - u32 key; 19 - }; 20 - 21 - void optee_wait_queue_init(struct optee_wait_queue *priv) 22 - { 23 - mutex_init(&priv->mu); 24 - INIT_LIST_HEAD(&priv->db); 25 - } 26 - 27 - void optee_wait_queue_exit(struct optee_wait_queue *priv) 28 - { 29 - mutex_destroy(&priv->mu); 30 - } 31 - 32 15 static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) 33 16 { 34 17 struct timespec64 ts; ··· 127 144 } 128 145 #endif 129 146 130 - static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) 131 - { 132 - struct wq_entry *w; 133 - 134 - mutex_lock(&wq->mu); 135 - 136 - list_for_each_entry(w, &wq->db, link) 137 - if (w->key == key) 138 - goto out; 139 - 140 - w = kmalloc(sizeof(*w), GFP_KERNEL); 141 - if (w) { 142 - init_completion(&w->c); 143 - w->key = key; 144 - list_add_tail(&w->link, &wq->db); 145 - } 146 - out: 147 - mutex_unlock(&wq->mu); 148 - return w; 149 - } 150 - 151 - static void wq_sleep(struct optee_wait_queue *wq, u32 key) 152 - { 153 - struct wq_entry *w = wq_entry_get(wq, key); 154 - 155 - if (w) { 156 - wait_for_completion(&w->c); 157 - mutex_lock(&wq->mu); 158 - list_del(&w->link); 159 - mutex_unlock(&wq->mu); 160 - kfree(w); 161 - } 162 - } 163 - 164 - static void wq_wakeup(struct optee_wait_queue *wq, u32 key) 165 - { 166 - struct wq_entry *w = wq_entry_get(wq, key); 167 - 168 - if (w) 169 - complete(&w->c); 170 - } 171 - 172 147 static void handle_rpc_func_cmd_wq(struct optee *optee, 173 148 struct optee_msg_arg *arg) 174 149 { ··· 138 197 goto bad; 139 198 140 199 switch (arg->params[0].u.value.a) { 141 - case OPTEE_RPC_WAIT_QUEUE_SLEEP: 142 - wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); 200 + case OPTEE_RPC_NOTIFICATION_WAIT: 201 + if (optee_notif_wait(optee, arg->params[0].u.value.b)) 202 + goto bad; 143 203 break; 144 - case OPTEE_RPC_WAIT_QUEUE_WAKEUP: 145 - wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); 204 + case OPTEE_RPC_NOTIFICATION_SEND: 205 + if (optee_notif_send(optee, arg->params[0].u.value.b)) 206 + goto bad; 146 207 break; 147 208 default: 148 209 goto bad; ··· 262 319 case OPTEE_RPC_CMD_GET_TIME: 263 320 handle_rpc_func_cmd_get_time(arg); 264 321 break; 265 - case OPTEE_RPC_CMD_WAIT_QUEUE: 322 + case OPTEE_RPC_CMD_NOTIFICATION: 266 323 handle_rpc_func_cmd_wq(optee, arg); 267 324 break; 268 325 case OPTEE_RPC_CMD_SUSPEND:
+206 -31
drivers/tee/optee/smc_abi.c
··· 8 8 9 9 #include <linux/arm-smccc.h> 10 10 #include <linux/errno.h> 11 + #include <linux/interrupt.h> 11 12 #include <linux/io.h> 12 - #include <linux/sched.h> 13 + #include <linux/irqdomain.h> 13 14 #include <linux/mm.h> 14 15 #include <linux/module.h> 15 16 #include <linux/of.h> 17 + #include <linux/of_irq.h> 16 18 #include <linux/of_platform.h> 17 19 #include <linux/platform_device.h> 20 + #include <linux/sched.h> 18 21 #include <linux/slab.h> 19 22 #include <linux/string.h> 20 23 #include <linux/tee_drv.h> ··· 38 35 * 2. Low level support functions to register shared memory in secure world 39 36 * 3. Dynamic shared memory pool based on alloc_pages() 40 37 * 4. Do a normal scheduled call into secure world 41 - * 5. Driver initialization. 38 + * 5. Asynchronous notification 39 + * 6. Driver initialization. 42 40 */ 43 41 44 42 #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES ··· 881 877 return rc; 882 878 } 883 879 880 + static int simple_call_with_arg(struct tee_context *ctx, u32 cmd) 881 + { 882 + struct optee_msg_arg *msg_arg; 883 + struct tee_shm *shm; 884 + 885 + shm = optee_get_msg_arg(ctx, 0, &msg_arg); 886 + if (IS_ERR(shm)) 887 + return PTR_ERR(shm); 888 + 889 + msg_arg->cmd = cmd; 890 + optee_smc_do_call_with_arg(ctx, shm); 891 + 892 + tee_shm_free(shm); 893 + return 0; 894 + } 895 + 896 + static int optee_smc_do_bottom_half(struct tee_context *ctx) 897 + { 898 + return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF); 899 + } 900 + 901 + static int optee_smc_stop_async_notif(struct tee_context *ctx) 902 + { 903 + return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF); 904 + } 905 + 884 906 /* 885 - * 5. Driver initialization 907 + * 5. Asynchronous notification 908 + */ 909 + 910 + static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid, 911 + bool *value_pending) 912 + { 913 + struct arm_smccc_res res; 914 + 915 + invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res); 916 + 917 + if (res.a0) 918 + return 0; 919 + *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID); 920 + *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING); 921 + return res.a1; 922 + } 923 + 924 + static irqreturn_t notif_irq_handler(int irq, void *dev_id) 925 + { 926 + struct optee *optee = dev_id; 927 + bool do_bottom_half = false; 928 + bool value_valid; 929 + bool value_pending; 930 + u32 value; 931 + 932 + do { 933 + value = get_async_notif_value(optee->smc.invoke_fn, 934 + &value_valid, &value_pending); 935 + if (!value_valid) 936 + break; 937 + 938 + if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF) 939 + do_bottom_half = true; 940 + else 941 + optee_notif_send(optee, value); 942 + } while (value_pending); 943 + 944 + if (do_bottom_half) 945 + return IRQ_WAKE_THREAD; 946 + return IRQ_HANDLED; 947 + } 948 + 949 + static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id) 950 + { 951 + struct optee *optee = dev_id; 952 + 953 + optee_smc_do_bottom_half(optee->notif.ctx); 954 + 955 + return IRQ_HANDLED; 956 + } 957 + 958 + static int optee_smc_notif_init_irq(struct optee *optee, u_int irq) 959 + { 960 + struct tee_context *ctx; 961 + int rc; 962 + 963 + ctx = teedev_open(optee->teedev); 964 + if (IS_ERR(ctx)) 965 + return PTR_ERR(ctx); 966 + 967 + optee->notif.ctx = ctx; 968 + rc = request_threaded_irq(irq, notif_irq_handler, 969 + notif_irq_thread_fn, 970 + 0, "optee_notification", optee); 971 + if (rc) 972 + goto err_close_ctx; 973 + 974 + optee->smc.notif_irq = irq; 975 + 976 + return 0; 977 + 978 + err_close_ctx: 979 + teedev_close_context(optee->notif.ctx); 980 + optee->notif.ctx = NULL; 981 + 982 + return rc; 983 + } 984 + 985 + static void optee_smc_notif_uninit_irq(struct optee *optee) 986 + { 987 + if (optee->notif.ctx) { 988 + optee_smc_stop_async_notif(optee->notif.ctx); 989 + if (optee->smc.notif_irq) { 990 + free_irq(optee->smc.notif_irq, optee); 991 + irq_dispose_mapping(optee->smc.notif_irq); 992 + } 993 + 994 + /* 995 + * The thread normally working with optee->notif.ctx was 996 + * stopped with free_irq() above. 997 + * 998 + * Note we're not using teedev_close_context() or 999 + * tee_client_close_context() since we have already called 1000 + * tee_device_put() while initializing to avoid a circular 1001 + * reference counting. 1002 + */ 1003 + teedev_close_context(optee->notif.ctx); 1004 + } 1005 + } 1006 + 1007 + /* 1008 + * 6. Driver initialization 886 1009 * 887 - * During driver inititialization is secure world probed to find out which 1010 + * During driver initialization is secure world probed to find out which 888 1011 * features it supports so the driver can be initialized with a matching 889 1012 * configuration. This involves for instance support for dynamic shared 890 1013 * memory instead of a static memory carvout. ··· 1083 952 .from_msg_param = optee_from_msg_param, 1084 953 }; 1085 954 955 + static int enable_async_notif(optee_invoke_fn *invoke_fn) 956 + { 957 + struct arm_smccc_res res; 958 + 959 + invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res); 960 + 961 + if (res.a0) 962 + return -EINVAL; 963 + return 0; 964 + } 965 + 1086 966 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) 1087 967 { 1088 968 struct arm_smccc_res res; ··· 1143 1001 } 1144 1002 1145 1003 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, 1146 - u32 *sec_caps) 1004 + u32 *sec_caps, u32 *max_notif_value) 1147 1005 { 1148 1006 union { 1149 1007 struct arm_smccc_res smccc; ··· 1166 1024 return false; 1167 1025 1168 1026 *sec_caps = res.result.capabilities; 1027 + if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) 1028 + *max_notif_value = res.result.max_notif_value; 1029 + else 1030 + *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE; 1031 + 1169 1032 return true; 1170 1033 } 1171 1034 ··· 1335 1188 */ 1336 1189 optee_disable_shm_cache(optee); 1337 1190 1191 + optee_smc_notif_uninit_irq(optee); 1192 + 1338 1193 optee_remove_common(optee); 1339 1194 1340 1195 if (optee->smc.memremaped_shm) ··· 1366 1217 struct optee *optee = NULL; 1367 1218 void *memremaped_shm = NULL; 1368 1219 struct tee_device *teedev; 1220 + u32 max_notif_value; 1369 1221 u32 sec_caps; 1370 1222 int rc; 1371 1223 ··· 1386 1236 return -EINVAL; 1387 1237 } 1388 1238 1389 - if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { 1239 + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps, 1240 + &max_notif_value)) { 1390 1241 pr_warn("capabilities mismatch\n"); 1391 1242 return -EINVAL; 1392 1243 } ··· 1410 1259 optee = kzalloc(sizeof(*optee), GFP_KERNEL); 1411 1260 if (!optee) { 1412 1261 rc = -ENOMEM; 1413 - goto err; 1262 + goto err_free_pool; 1414 1263 } 1415 1264 1416 1265 optee->ops = &optee_ops; ··· 1420 1269 teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); 1421 1270 if (IS_ERR(teedev)) { 1422 1271 rc = PTR_ERR(teedev); 1423 - goto err; 1272 + goto err_free_optee; 1424 1273 } 1425 1274 optee->teedev = teedev; 1426 1275 1427 1276 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); 1428 1277 if (IS_ERR(teedev)) { 1429 1278 rc = PTR_ERR(teedev); 1430 - goto err; 1279 + goto err_unreg_teedev; 1431 1280 } 1432 1281 optee->supp_teedev = teedev; 1433 1282 1434 1283 rc = tee_device_register(optee->teedev); 1435 1284 if (rc) 1436 - goto err; 1285 + goto err_unreg_supp_teedev; 1437 1286 1438 1287 rc = tee_device_register(optee->supp_teedev); 1439 1288 if (rc) 1440 - goto err; 1289 + goto err_unreg_supp_teedev; 1441 1290 1442 1291 mutex_init(&optee->call_queue.mutex); 1443 1292 INIT_LIST_HEAD(&optee->call_queue.waiters); 1444 - optee_wait_queue_init(&optee->wait_queue); 1445 1293 optee_supp_init(&optee->supp); 1446 1294 optee->smc.memremaped_shm = memremaped_shm; 1447 1295 optee->pool = pool; 1296 + 1297 + platform_set_drvdata(pdev, optee); 1298 + rc = optee_notif_init(optee, max_notif_value); 1299 + if (rc) 1300 + goto err_supp_uninit; 1301 + 1302 + if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { 1303 + unsigned int irq; 1304 + 1305 + rc = platform_get_irq(pdev, 0); 1306 + if (rc < 0) { 1307 + pr_err("platform_get_irq: ret %d\n", rc); 1308 + goto err_notif_uninit; 1309 + } 1310 + irq = rc; 1311 + 1312 + rc = optee_smc_notif_init_irq(optee, irq); 1313 + if (rc) { 1314 + irq_dispose_mapping(irq); 1315 + goto err_notif_uninit; 1316 + } 1317 + enable_async_notif(optee->smc.invoke_fn); 1318 + pr_info("Asynchronous notifications enabled\n"); 1319 + } 1448 1320 1449 1321 /* 1450 1322 * Ensure that there are no pre-existing shm objects before enabling ··· 1483 1309 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1484 1310 pr_info("dynamic shared memory is enabled\n"); 1485 1311 1486 - platform_set_drvdata(pdev, optee); 1487 - 1488 1312 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 1489 - if (rc) { 1490 - optee_smc_remove(pdev); 1491 - return rc; 1492 - } 1313 + if (rc) 1314 + goto err_disable_shm_cache; 1493 1315 1494 1316 pr_info("initialized driver\n"); 1495 1317 return 0; 1496 - err: 1497 - if (optee) { 1498 - /* 1499 - * tee_device_unregister() is safe to call even if the 1500 - * devices hasn't been registered with 1501 - * tee_device_register() yet. 1502 - */ 1503 - tee_device_unregister(optee->supp_teedev); 1504 - tee_device_unregister(optee->teedev); 1505 - kfree(optee); 1506 - } 1507 - if (pool) 1508 - tee_shm_pool_free(pool); 1318 + 1319 + err_disable_shm_cache: 1320 + optee_disable_shm_cache(optee); 1321 + optee_smc_notif_uninit_irq(optee); 1322 + optee_unregister_devices(); 1323 + err_notif_uninit: 1324 + optee_notif_uninit(optee); 1325 + err_supp_uninit: 1326 + optee_supp_uninit(&optee->supp); 1327 + mutex_destroy(&optee->call_queue.mutex); 1328 + err_unreg_supp_teedev: 1329 + tee_device_unregister(optee->supp_teedev); 1330 + err_unreg_teedev: 1331 + tee_device_unregister(optee->teedev); 1332 + err_free_optee: 1333 + kfree(optee); 1334 + err_free_pool: 1335 + tee_shm_pool_free(pool); 1509 1336 if (memremaped_shm) 1510 1337 memunmap(memremaped_shm); 1511 1338 return rc;
+7 -3
drivers/tee/tee_core.c
··· 43 43 static struct class *tee_class; 44 44 static dev_t tee_devt; 45 45 46 - static struct tee_context *teedev_open(struct tee_device *teedev) 46 + struct tee_context *teedev_open(struct tee_device *teedev) 47 47 { 48 48 int rc; 49 49 struct tee_context *ctx; ··· 70 70 return ERR_PTR(rc); 71 71 72 72 } 73 + EXPORT_SYMBOL_GPL(teedev_open); 73 74 74 75 void teedev_ctx_get(struct tee_context *ctx) 75 76 { ··· 97 96 kref_put(&ctx->refcount, teedev_ctx_release); 98 97 } 99 98 100 - static void teedev_close_context(struct tee_context *ctx) 99 + void teedev_close_context(struct tee_context *ctx) 101 100 { 102 - tee_device_put(ctx->teedev); 101 + struct tee_device *teedev = ctx->teedev; 102 + 103 103 teedev_ctx_put(ctx); 104 + tee_device_put(teedev); 104 105 } 106 + EXPORT_SYMBOL_GPL(teedev_close_context); 105 107 106 108 static int tee_open(struct inode *inode, struct file *filp) 107 109 {
+46 -7
drivers/usb/chipidea/ci_hdrc_tegra.c
··· 7 7 #include <linux/io.h> 8 8 #include <linux/module.h> 9 9 #include <linux/of_device.h> 10 + #include <linux/pm_runtime.h> 10 11 #include <linux/reset.h> 11 12 12 13 #include <linux/usb.h> ··· 15 14 #include <linux/usb/hcd.h> 16 15 #include <linux/usb/of.h> 17 16 #include <linux/usb/phy.h> 17 + 18 + #include <soc/tegra/common.h> 18 19 19 20 #include "../host/ehci.h" 20 21 ··· 281 278 if (!usb) 282 279 return -ENOMEM; 283 280 281 + platform_set_drvdata(pdev, usb); 282 + 284 283 soc = of_device_get_match_data(&pdev->dev); 285 284 if (!soc) { 286 285 dev_err(&pdev->dev, "failed to match OF data\n"); ··· 301 296 return err; 302 297 } 303 298 304 - err = clk_prepare_enable(usb->clk); 305 - if (err < 0) { 306 - dev_err(&pdev->dev, "failed to enable clock: %d\n", err); 299 + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); 300 + if (err) 307 301 return err; 308 - } 302 + 303 + pm_runtime_enable(&pdev->dev); 304 + err = pm_runtime_resume_and_get(&pdev->dev); 305 + if (err) 306 + return err; 309 307 310 308 if (device_property_present(&pdev->dev, "nvidia,needs-double-reset")) 311 309 usb->needs_double_reset = true; ··· 327 319 err = usb_phy_init(usb->phy); 328 320 if (err) 329 321 goto fail_power_off; 330 - 331 - platform_set_drvdata(pdev, usb); 332 322 333 323 /* setup and register ChipIdea HDRC device */ 334 324 usb->soc = soc; ··· 356 350 phy_shutdown: 357 351 usb_phy_shutdown(usb->phy); 358 352 fail_power_off: 359 - clk_disable_unprepare(usb->clk); 353 + pm_runtime_put_sync_suspend(&pdev->dev); 354 + pm_runtime_force_suspend(&pdev->dev); 355 + 360 356 return err; 361 357 } 362 358 ··· 368 360 369 361 ci_hdrc_remove_device(usb->dev); 370 362 usb_phy_shutdown(usb->phy); 363 + 364 + pm_runtime_put_sync_suspend(&pdev->dev); 365 + pm_runtime_force_suspend(&pdev->dev); 366 + 367 + return 0; 368 + } 369 + 370 + static int __maybe_unused tegra_usb_runtime_resume(struct device *dev) 371 + { 372 + struct tegra_usb *usb = dev_get_drvdata(dev); 373 + int err; 374 + 375 + err = clk_prepare_enable(usb->clk); 376 + if (err < 0) { 377 + dev_err(dev, "failed to enable clock: %d\n", err); 378 + return err; 379 + } 380 + 381 + return 0; 382 + } 383 + 384 + static int __maybe_unused tegra_usb_runtime_suspend(struct device *dev) 385 + { 386 + struct tegra_usb *usb = dev_get_drvdata(dev); 387 + 371 388 clk_disable_unprepare(usb->clk); 372 389 373 390 return 0; 374 391 } 375 392 393 + static const struct dev_pm_ops tegra_usb_pm = { 394 + SET_RUNTIME_PM_OPS(tegra_usb_runtime_suspend, tegra_usb_runtime_resume, 395 + NULL) 396 + }; 397 + 376 398 static struct platform_driver tegra_usb_driver = { 377 399 .driver = { 378 400 .name = "tegra-usb", 379 401 .of_match_table = tegra_usb_of_match, 402 + .pm = &tegra_usb_pm, 380 403 }, 381 404 .probe = tegra_usb_probe, 382 405 .remove = tegra_usb_remove,
+64
include/dt-bindings/clock/r8a779f0-cpg-mssr.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 or MIT) */ 2 + /* 3 + * Copyright (C) 2021 Renesas Electronics Corp. 4 + */ 5 + #ifndef __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ 6 + #define __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ 7 + 8 + #include <dt-bindings/clock/renesas-cpg-mssr.h> 9 + 10 + /* r8a779f0 CPG Core Clocks */ 11 + 12 + #define R8A779F0_CLK_ZX 0 13 + #define R8A779F0_CLK_ZS 1 14 + #define R8A779F0_CLK_ZT 2 15 + #define R8A779F0_CLK_ZTR 3 16 + #define R8A779F0_CLK_S0D2 4 17 + #define R8A779F0_CLK_S0D3 5 18 + #define R8A779F0_CLK_S0D4 6 19 + #define R8A779F0_CLK_S0D2_MM 7 20 + #define R8A779F0_CLK_S0D3_MM 8 21 + #define R8A779F0_CLK_S0D4_MM 9 22 + #define R8A779F0_CLK_S0D2_RT 10 23 + #define R8A779F0_CLK_S0D3_RT 11 24 + #define R8A779F0_CLK_S0D4_RT 12 25 + #define R8A779F0_CLK_S0D6_RT 13 26 + #define R8A779F0_CLK_S0D3_PER 14 27 + #define R8A779F0_CLK_S0D6_PER 15 28 + #define R8A779F0_CLK_S0D12_PER 16 29 + #define R8A779F0_CLK_S0D24_PER 17 30 + #define R8A779F0_CLK_S0D2_HSC 18 31 + #define R8A779F0_CLK_S0D3_HSC 19 32 + #define R8A779F0_CLK_S0D4_HSC 20 33 + #define R8A779F0_CLK_S0D6_HSC 21 34 + #define R8A779F0_CLK_S0D12_HSC 22 35 + #define R8A779F0_CLK_S0D2_CC 23 36 + #define R8A779F0_CLK_CL 24 37 + #define R8A779F0_CLK_CL16M 25 38 + #define R8A779F0_CLK_CL16M_MM 26 39 + #define R8A779F0_CLK_CL16M_RT 27 40 + #define R8A779F0_CLK_CL16M_PER 28 41 + #define R8A779F0_CLK_CL16M_HSC 29 42 + #define R8A779F0_CLK_Z0 30 43 + #define R8A779F0_CLK_Z1 31 44 + #define R8A779F0_CLK_ZB3 32 45 + #define R8A779F0_CLK_ZB3D2 33 46 + #define R8A779F0_CLK_ZB3D4 34 47 + #define R8A779F0_CLK_SD0H 35 48 + #define R8A779F0_CLK_SD0 36 49 + #define R8A779F0_CLK_RPC 37 50 + #define R8A779F0_CLK_RPCD2 38 51 + #define R8A779F0_CLK_MSO 39 52 + #define R8A779F0_CLK_SASYNCRT 40 53 + #define R8A779F0_CLK_SASYNCPERD1 41 54 + #define R8A779F0_CLK_SASYNCPERD2 42 55 + #define R8A779F0_CLK_SASYNCPERD4 43 56 + #define R8A779F0_CLK_DBGSOC_HSC 44 57 + #define R8A779F0_CLK_RSW2 45 58 + #define R8A779F0_CLK_OSC 46 59 + #define R8A779F0_CLK_ZR 47 60 + #define R8A779F0_CLK_CPEX 48 61 + #define R8A779F0_CLK_CBFUSA 49 62 + #define R8A779F0_CLK_R 50 63 + 64 + #endif /* __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ */
+5
include/dt-bindings/power/imx8mn-power.h
··· 12 12 #define IMX8MN_POWER_DOMAIN_DISPMIX 3 13 13 #define IMX8MN_POWER_DOMAIN_MIPI 4 14 14 15 + #define IMX8MN_DISPBLK_PD_MIPI_DSI 0 16 + #define IMX8MN_DISPBLK_PD_MIPI_CSI 1 17 + #define IMX8MN_DISPBLK_PD_LCDIF 2 18 + #define IMX8MN_DISPBLK_PD_ISI 3 19 + 15 20 #endif
+33
include/dt-bindings/power/qcom-rpmpd.h
··· 68 68 #define SM8350_MXC_AO 11 69 69 #define SM8350_MSS 12 70 70 71 + /* SM8450 Power Domain Indexes */ 72 + #define SM8450_CX 0 73 + #define SM8450_CX_AO 1 74 + #define SM8450_EBI 2 75 + #define SM8450_GFX 3 76 + #define SM8450_LCX 4 77 + #define SM8450_LMX 5 78 + #define SM8450_MMCX 6 79 + #define SM8450_MMCX_AO 7 80 + #define SM8450_MX 8 81 + #define SM8450_MX_AO 9 82 + #define SM8450_MXC 10 83 + #define SM8450_MXC_AO 11 84 + #define SM8450_MSS 12 85 + 71 86 /* SC7180 Power Domain Indexes */ 72 87 #define SC7180_CX 0 73 88 #define SC7180_CX_AO 1 ··· 233 218 #define SM6115_VDDMX_VFL 5 234 219 #define SM6115_VDD_LPI_CX 6 235 220 #define SM6115_VDD_LPI_MX 7 221 + 222 + /* SM6125 Power Domains */ 223 + #define SM6125_VDDCX 0 224 + #define SM6125_VDDCX_AO 1 225 + #define SM6125_VDDCX_VFL 2 226 + #define SM6125_VDDMX 3 227 + #define SM6125_VDDMX_AO 4 228 + #define SM6125_VDDMX_VFL 5 229 + 230 + /* QCM2290 Power Domains */ 231 + #define QCM2290_VDDCX 0 232 + #define QCM2290_VDDCX_AO 1 233 + #define QCM2290_VDDCX_VFL 2 234 + #define QCM2290_VDDMX 3 235 + #define QCM2290_VDDMX_AO 4 236 + #define QCM2290_VDDMX_VFL 5 237 + #define QCM2290_VDD_LPI_CX 6 238 + #define QCM2290_VDD_LPI_MX 7 236 239 237 240 /* RPM SMD Power Domain performance levels */ 238 241 #define RPM_SMD_LEVEL_RETENTION 16
+30
include/dt-bindings/power/r8a779f0-sysc.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 or MIT) */ 2 + /* 3 + * Copyright (C) 2021 Renesas Electronics Corp. 4 + */ 5 + #ifndef __DT_BINDINGS_POWER_R8A779F0_SYSC_H__ 6 + #define __DT_BINDINGS_POWER_R8A779F0_SYSC_H__ 7 + 8 + /* 9 + * These power domain indices match the Power Domain Register Numbers (PDR) 10 + */ 11 + 12 + #define R8A779F0_PD_A1E0D0C0 0 13 + #define R8A779F0_PD_A1E0D0C1 1 14 + #define R8A779F0_PD_A1E0D1C0 2 15 + #define R8A779F0_PD_A1E0D1C1 3 16 + #define R8A779F0_PD_A1E1D0C0 4 17 + #define R8A779F0_PD_A1E1D0C1 5 18 + #define R8A779F0_PD_A1E1D1C0 6 19 + #define R8A779F0_PD_A1E1D1C1 7 20 + #define R8A779F0_PD_A2E0D0 16 21 + #define R8A779F0_PD_A2E0D1 17 22 + #define R8A779F0_PD_A2E1D0 18 23 + #define R8A779F0_PD_A2E1D1 19 24 + #define R8A779F0_PD_A3E0 20 25 + #define R8A779F0_PD_A3E1 21 26 + 27 + /* Always-on power area */ 28 + #define R8A779F0_PD_ALWAYS_ON 64 29 + 30 + #endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__*/
+17
include/dt-bindings/soc/samsung,exynos-usi.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* 3 + * Copyright (c) 2021 Linaro Ltd. 4 + * Author: Sam Protsenko <semen.protsenko@linaro.org> 5 + * 6 + * Device Tree bindings for Samsung Exynos USI (Universal Serial Interface). 7 + */ 8 + 9 + #ifndef __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H 10 + #define __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H 11 + 12 + #define USI_V2_NONE 0 13 + #define USI_V2_UART 1 14 + #define USI_V2_SPI 2 15 + #define USI_V2_I2C 3 16 + 17 + #endif /* __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H */
+3
include/linux/soc/qcom/llcc-qcom.h
··· 33 33 #define LLCC_MODPE 29 34 34 #define LLCC_APTCM 30 35 35 #define LLCC_WRCACHE 31 36 + #define LLCC_CVPFW 32 37 + #define LLCC_CPUSS1 33 38 + #define LLCC_CPUHWT 36 36 39 37 40 /** 38 41 * struct llcc_slice_desc - Cache slice descriptor
+2
include/linux/soc/renesas/rcar-rst.h
··· 4 4 5 5 #ifdef CONFIG_RST_RCAR 6 6 int rcar_rst_read_mode_pins(u32 *mode); 7 + int rcar_rst_set_rproc_boot_addr(u64 boot_addr); 7 8 #else 8 9 static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; } 10 + static inline int rcar_rst_set_rproc_boot_addr(u64 boot_addr) { return -ENODEV; } 9 11 #endif 10 12 11 13 #endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
+14
include/linux/tee_drv.h
··· 587 587 #define to_tee_client_driver(d) \ 588 588 container_of(d, struct tee_client_driver, driver) 589 589 590 + /** 591 + * teedev_open() - Open a struct tee_device 592 + * @teedev: Device to open 593 + * 594 + * @return a pointer to struct tee_context on success or an ERR_PTR on failure. 595 + */ 596 + struct tee_context *teedev_open(struct tee_device *teedev); 597 + 598 + /** 599 + * teedev_close_context() - closes a struct tee_context 600 + * @ctx: The struct tee_context to close 601 + */ 602 + void teedev_close_context(struct tee_context *ctx); 603 + 590 604 #endif /*__TEE_DRV_H*/
+7 -1
include/memory/renesas-rpc-if.h
··· 57 57 } data; 58 58 }; 59 59 60 + enum rpcif_type { 61 + RPCIF_RCAR_GEN3, 62 + RPCIF_RZ_G2L, 63 + }; 64 + 60 65 struct rpcif { 61 66 struct device *dev; 62 67 void __iomem *base; ··· 69 64 struct regmap *regmap; 70 65 struct reset_control *rstc; 71 66 size_t size; 67 + enum rpcif_type type; 72 68 enum rpcif_data_dir dir; 73 69 u8 bus_size; 74 70 void *buffer; ··· 84 78 }; 85 79 86 80 int rpcif_sw_init(struct rpcif *rpc, struct device *dev); 87 - void rpcif_hw_init(struct rpcif *rpc, bool hyperflash); 81 + int rpcif_hw_init(struct rpcif *rpc, bool hyperflash); 88 82 void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, 89 83 size_t *len); 90 84 int rpcif_manual_xfer(struct rpcif *rpc);
+15
include/soc/tegra/common.h
··· 39 39 } 40 40 #endif 41 41 42 + static inline int 43 + devm_tegra_core_dev_init_opp_table_common(struct device *dev) 44 + { 45 + struct tegra_core_opp_params opp_params = {}; 46 + int err; 47 + 48 + opp_params.init_state = true; 49 + 50 + err = devm_tegra_core_dev_init_opp_table(dev, &opp_params); 51 + if (err != -ENODEV) 52 + return err; 53 + 54 + return 0; 55 + } 56 + 42 57 #endif /* __SOC_TEGRA_COMMON_H__ */