Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'soc-drivers-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc

Pull SoC driver updates from Arnd Bergmann:
"Lots of platform specific updates for Qualcomm SoCs, including a new
TEE subsystem driver for the Qualcomm QTEE firmware interface.

Added support for the Apple A11 SoC in drivers that are shared with
the M1/M2 series, among more updates for those.

Smaller platform specific driver updates for Renesas, ASpeed,
Broadcom, Nvidia, Mediatek, Amlogic, TI, Allwinner, and Freescale
SoCs.

Driver updates in the cache controller, memory controller and reset
controller subsystems.

SCMI firmware updates to add more features and improve robustness.
This includes support for having multiple SCMI providers in a single
system.

TEE subsystem support for protected DMA-bufs, allowing hardware to
access memory areas that managed by the kernel but remain inaccessible
from the CPU in EL1/EL0"

* tag 'soc-drivers-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (139 commits)
soc/fsl/qbman: Use for_each_online_cpu() instead of for_each_cpu()
soc: fsl: qe: Drop legacy-of-mm-gpiochip.h header from GPIO driver
soc: fsl: qe: Change GPIO driver to a proper platform driver
tee: fix register_shm_helper()
pmdomain: apple: Add "apple,t8103-pmgr-pwrstate"
dt-bindings: spmi: Add Apple A11 and T2 compatible
serial: qcom-geni: Load UART qup Firmware from linux side
spi: geni-qcom: Load spi qup Firmware from linux side
i2c: qcom-geni: Load i2c qup Firmware from linux side
soc: qcom: geni-se: Add support to load QUP SE Firmware via Linux subsystem
soc: qcom: geni-se: Cleanup register defines and update copyright
dt-bindings: qcom: se-common: Add QUP Peripheral-specific properties for I2C, SPI, and SERIAL bus
Documentation: tee: Add Qualcomm TEE driver
tee: qcom: enable TEE_IOC_SHM_ALLOC ioctl
tee: qcom: add primordial object
tee: add Qualcomm TEE driver
tee: increase TEE_MAX_ARG_SIZE to 4096
tee: add TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF
tee: add TEE_IOCTL_PARAM_ATTR_TYPE_UBUF
tee: add close_context to TEE driver operation
...

+9434 -647
+20 -13
Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
··· 20 20 pattern: "^power-management@[0-9a-f]+$" 21 21 22 22 compatible: 23 - items: 24 - - enum: 25 - - apple,s5l8960x-pmgr 26 - - apple,t7000-pmgr 27 - - apple,s8000-pmgr 28 - - apple,t8010-pmgr 29 - - apple,t8015-pmgr 30 - - apple,t8103-pmgr 31 - - apple,t8112-pmgr 32 - - apple,t6000-pmgr 33 - - const: apple,pmgr 34 - - const: syscon 35 - - const: simple-mfd 23 + oneOf: 24 + - items: 25 + - enum: 26 + # Do not add additional SoC to this list. 27 + - apple,s5l8960x-pmgr 28 + - apple,t7000-pmgr 29 + - apple,s8000-pmgr 30 + - apple,t8010-pmgr 31 + - apple,t8015-pmgr 32 + - apple,t8103-pmgr 33 + - apple,t8112-pmgr 34 + - apple,t6000-pmgr 35 + - const: apple,pmgr 36 + - const: syscon 37 + - const: simple-mfd 38 + - items: 39 + - const: apple,t6020-pmgr 40 + - const: apple,t8103-pmgr 41 + - const: syscon 42 + - const: simple-mfd 36 43 37 44 reg: 38 45 maxItems: 1
+5 -1
Documentation/devicetree/bindings/cache/andestech,ax45mp-cache.yaml
··· 47 47 const: 2 48 48 49 49 cache-sets: 50 - const: 1024 50 + enum: [1024, 2048] 51 51 52 52 cache-size: 53 53 enum: [131072, 262144, 524288, 1048576, 2097152] ··· 81 81 const: 2048 82 82 cache-size: 83 83 const: 2097152 84 + else: 85 + properties: 86 + cache-sets: 87 + const: 1024 84 88 85 89 examples: 86 90 - |
+11 -6
Documentation/devicetree/bindings/clock/apple,nco.yaml
··· 19 19 20 20 properties: 21 21 compatible: 22 - items: 23 - - enum: 24 - - apple,t6000-nco 25 - - apple,t8103-nco 26 - - apple,t8112-nco 27 - - const: apple,nco 22 + oneOf: 23 + - items: 24 + - const: apple,t6020-nco 25 + - const: apple,t8103-nco 26 + - items: 27 + - enum: 28 + # Do not add additional SoC to this list. 29 + - apple,t6000-nco 30 + - apple,t8103-nco 31 + - apple,t8112-nco 32 + - const: apple,nco 28 33 29 34 clocks: 30 35 description:
+3
Documentation/devicetree/bindings/cpufreq/apple,cluster-cpufreq.yaml
··· 35 35 - const: apple,t7000-cluster-cpufreq 36 36 - const: apple,s5l8960x-cluster-cpufreq 37 37 - const: apple,s5l8960x-cluster-cpufreq 38 + - items: 39 + - const: apple,t6020-cluster-cpufreq 40 + - const: apple,t8112-cluster-cpufreq 38 41 39 42 reg: 40 43 maxItems: 1
+11 -6
Documentation/devicetree/bindings/dma/apple,admac.yaml
··· 22 22 23 23 properties: 24 24 compatible: 25 - items: 26 - - enum: 27 - - apple,t6000-admac 28 - - apple,t8103-admac 29 - - apple,t8112-admac 30 - - const: apple,admac 25 + oneOf: 26 + - items: 27 + - const: apple,t6020-admac 28 + - const: apple,t8103-admac 29 + - items: 30 + - enum: 31 + # Do not add additional SoC to this list. 32 + - apple,t6000-admac 33 + - apple,t8103-admac 34 + - apple,t8112-admac 35 + - const: apple,admac 31 36 32 37 reg: 33 38 maxItems: 1
+1 -1
Documentation/devicetree/bindings/firmware/arm,scmi.yaml
··· 27 27 28 28 properties: 29 29 $nodename: 30 - const: scmi 30 + pattern: '^scmi(-[0-9]+)?$' 31 31 32 32 compatible: 33 33 oneOf:
+3
Documentation/devicetree/bindings/firmware/qcom,scm.yaml
··· 36 36 - qcom,scm-msm8226 37 37 - qcom,scm-msm8660 38 38 - qcom,scm-msm8916 39 + - qcom,scm-msm8937 39 40 - qcom,scm-msm8953 40 41 - qcom,scm-msm8960 41 42 - qcom,scm-msm8974 ··· 135 134 - qcom,scm-msm8226 136 135 - qcom,scm-msm8660 137 136 - qcom,scm-msm8916 137 + - qcom,scm-msm8937 138 138 - qcom,scm-msm8953 139 139 - qcom,scm-msm8960 140 140 - qcom,scm-msm8974 ··· 179 177 - qcom,scm-mdm9607 180 178 - qcom,scm-msm8226 181 179 - qcom,scm-msm8916 180 + - qcom,scm-msm8937 182 181 - qcom,scm-msm8953 183 182 - qcom,scm-msm8974 184 183 - qcom,scm-msm8976
+6
Documentation/devicetree/bindings/gpu/apple,agx.yaml
··· 16 16 - apple,agx-g13g 17 17 - apple,agx-g13s 18 18 - apple,agx-g14g 19 + - apple,agx-g14s 19 20 - items: 20 21 - enum: 21 22 - apple,agx-g13c 22 23 - apple,agx-g13d 23 24 - const: apple,agx-g13s 25 + - items: 26 + - enum: 27 + - apple,agx-g14c 28 + - apple,agx-g14d 29 + - const: apple,agx-g14s 24 30 25 31 reg: 26 32 items:
+1
Documentation/devicetree/bindings/i2c/qcom,i2c-geni-qcom.yaml
··· 75 75 76 76 allOf: 77 77 - $ref: /schemas/i2c/i2c-controller.yaml# 78 + - $ref: /schemas/soc/qcom/qcom,se-common-props.yaml# 78 79 - if: 79 80 properties: 80 81 compatible:
+1
Documentation/devicetree/bindings/interrupt-controller/apple,aic2.yaml
··· 34 34 - enum: 35 35 - apple,t8112-aic 36 36 - apple,t6000-aic 37 + - apple,t6020-aic 37 38 - const: apple,aic2 38 39 39 40 interrupt-controller: true
+9 -5
Documentation/devicetree/bindings/iommu/apple,dart.yaml
··· 22 22 23 23 properties: 24 24 compatible: 25 - enum: 26 - - apple,t8103-dart 27 - - apple,t8103-usb4-dart 28 - - apple,t8110-dart 29 - - apple,t6000-dart 25 + oneOf: 26 + - enum: 27 + - apple,t8103-dart 28 + - apple,t8103-usb4-dart 29 + - apple,t8110-dart 30 + - apple,t6000-dart 31 + - items: 32 + - const: apple,t6020-dart 33 + - const: apple,t8110-dart 30 34 31 35 reg: 32 36 maxItems: 1
+4 -1
Documentation/devicetree/bindings/iommu/apple,sart.yaml
··· 30 30 compatible: 31 31 oneOf: 32 32 - items: 33 - - const: apple,t8112-sart 33 + - enum: 34 + - apple,t6020-sart 35 + - apple,t8112-sart 34 36 - const: apple,t6000-sart 35 37 - enum: 36 38 - apple,t6000-sart 39 + - apple,t8015-sart 37 40 - apple,t8103-sart 38 41 39 42 reg:
+8
Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
··· 31 31 - apple,t8103-asc-mailbox 32 32 - apple,t8112-asc-mailbox 33 33 - apple,t6000-asc-mailbox 34 + - apple,t6020-asc-mailbox 34 35 - const: apple,asc-mailbox-v4 36 + 37 + - description: 38 + An older ASC mailbox interface found on T2 and A11 that is also 39 + used for the NVMe coprocessor and the system management 40 + controller. 41 + items: 42 + - const: apple,t8015-asc-mailbox 35 43 36 44 - description: 37 45 M3 mailboxes are an older variant with a slightly different MMIO
+4
Documentation/devicetree/bindings/memory-controllers/brcm,brcmstb-memc-ddr.yaml
··· 42 42 items: 43 43 - const: brcm,brcmstb-memc-ddr-rev-b.1.x 44 44 - const: brcm,brcmstb-memc-ddr 45 + - description: Revision 0.x controllers 46 + items: 47 + - const: brcm,brcmstb-memc-ddr-rev-a.0.0 48 + - const: brcm,brcmstb-memc-ddr 45 49 46 50 reg: 47 51 maxItems: 1
+11
Documentation/devicetree/bindings/memory-controllers/nvidia,tegra210-emc.yaml
··· 33 33 items: 34 34 - description: EMC general interrupt 35 35 36 + "#interconnect-cells": 37 + const: 0 38 + 36 39 memory-region: 37 40 maxItems: 1 38 41 description: ··· 46 43 $ref: /schemas/types.yaml#/definitions/phandle 47 44 description: 48 45 phandle of the memory controller node 46 + 47 + operating-points-v2: 48 + description: 49 + Should contain freqs and voltages and opp-supported-hw property, which 50 + is a bitfield indicating SoC speedo ID mask. 49 51 50 52 required: 51 53 - compatible ··· 87 79 interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>; 88 80 memory-region = <&emc_table>; 89 81 nvidia,memory-controller = <&mc>; 82 + operating-points-v2 = <&dvfs_opp_table>; 83 + 84 + #interconnect-cells = <0>; 90 85 };
+11 -6
Documentation/devicetree/bindings/mfd/apple,smc.yaml
··· 15 15 16 16 properties: 17 17 compatible: 18 - items: 19 - - enum: 20 - - apple,t6000-smc 21 - - apple,t8103-smc 22 - - apple,t8112-smc 23 - - const: apple,smc 18 + oneOf: 19 + - items: 20 + - const: apple,t6020-smc 21 + - const: apple,t8103-smc 22 + - items: 23 + - enum: 24 + # Do not add additional SoC to this list. 25 + - apple,t6000-smc 26 + - apple,t8103-smc 27 + - apple,t8112-smc 28 + - const: apple,smc 24 29 25 30 reg: 26 31 items:
+1
Documentation/devicetree/bindings/net/bluetooth/brcm,bcm4377-bluetooth.yaml
··· 23 23 - pci14e4,5fa0 # BCM4377 24 24 - pci14e4,5f69 # BCM4378 25 25 - pci14e4,5f71 # BCM4387 26 + - pci14e4,5f72 # BCM4388 26 27 27 28 reg: 28 29 maxItems: 1
+1
Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
··· 53 53 - pci14e4,4488 # BCM4377 54 54 - pci14e4,4425 # BCM4378 55 55 - pci14e4,4433 # BCM4387 56 + - pci14e4,4434 # BCM4388 56 57 - pci14e4,449d # BCM43752 57 58 58 59 reg:
+18 -12
Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
··· 11 11 12 12 properties: 13 13 compatible: 14 - items: 15 - - enum: 16 - - apple,t8103-nvme-ans2 17 - - apple,t8112-nvme-ans2 18 - - apple,t6000-nvme-ans2 19 - - const: apple,nvme-ans2 14 + oneOf: 15 + - const: apple,t8015-nvme-ans2 16 + - items: 17 + - const: apple,t6020-nvme-ans2 18 + - const: apple,t8103-nvme-ans2 19 + - items: 20 + - enum: 21 + # Do not add additional SoC to this list. 22 + - apple,t8103-nvme-ans2 23 + - apple,t8112-nvme-ans2 24 + - apple,t6000-nvme-ans2 25 + - const: apple,nvme-ans2 20 26 21 27 reg: 22 28 items: ··· 73 67 compatible: 74 68 contains: 75 69 enum: 76 - - apple,t8103-nvme-ans2 77 - - apple,t8112-nvme-ans2 70 + - apple,t6000-nvme-ans2 71 + - apple,t6020-nvme-ans2 78 72 then: 79 73 properties: 80 74 power-domains: 81 - maxItems: 2 75 + minItems: 3 82 76 power-domain-names: 83 - maxItems: 2 77 + minItems: 3 84 78 else: 85 79 properties: 86 80 power-domains: 87 - minItems: 3 81 + maxItems: 2 88 82 power-domain-names: 89 - minItems: 3 83 + maxItems: 2 90 84 91 85 required: 92 86 - compatible
+16 -11
Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
··· 16 16 17 17 properties: 18 18 compatible: 19 - items: 20 - - enum: 21 - - apple,s5l8960x-pinctrl 22 - - apple,t7000-pinctrl 23 - - apple,s8000-pinctrl 24 - - apple,t8010-pinctrl 25 - - apple,t8015-pinctrl 26 - - apple,t8103-pinctrl 27 - - apple,t8112-pinctrl 28 - - apple,t6000-pinctrl 29 - - const: apple,pinctrl 19 + oneOf: 20 + - items: 21 + - const: apple,t6020-pinctrl 22 + - const: apple,t8103-pinctrl 23 + - items: 24 + # Do not add additional SoC to this list. 25 + - enum: 26 + - apple,s5l8960x-pinctrl 27 + - apple,t7000-pinctrl 28 + - apple,s8000-pinctrl 29 + - apple,t8010-pinctrl 30 + - apple,t8015-pinctrl 31 + - apple,t8103-pinctrl 32 + - apple,t8112-pinctrl 33 + - apple,t6000-pinctrl 34 + - const: apple,pinctrl 30 35 31 36 reg: 32 37 maxItems: 1
+16 -11
Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
··· 29 29 30 30 properties: 31 31 compatible: 32 - items: 33 - - enum: 34 - - apple,s5l8960x-pmgr-pwrstate 35 - - apple,t7000-pmgr-pwrstate 36 - - apple,s8000-pmgr-pwrstate 37 - - apple,t8010-pmgr-pwrstate 38 - - apple,t8015-pmgr-pwrstate 39 - - apple,t8103-pmgr-pwrstate 40 - - apple,t8112-pmgr-pwrstate 41 - - apple,t6000-pmgr-pwrstate 42 - - const: apple,pmgr-pwrstate 32 + oneOf: 33 + - items: 34 + - enum: 35 + # Do not add additional SoC to this list. 36 + - apple,s5l8960x-pmgr-pwrstate 37 + - apple,t7000-pmgr-pwrstate 38 + - apple,s8000-pmgr-pwrstate 39 + - apple,t8010-pmgr-pwrstate 40 + - apple,t8015-pmgr-pwrstate 41 + - apple,t8103-pmgr-pwrstate 42 + - apple,t8112-pmgr-pwrstate 43 + - apple,t6000-pmgr-pwrstate 44 + - const: apple,pmgr-pwrstate 45 + - items: 46 + - const: apple,t6020-pmgr-pwrstate 47 + - const: apple,t8103-pmgr-pwrstate 43 48 44 49 reg: 45 50 maxItems: 1
+3 -1
Documentation/devicetree/bindings/reset/brcm,bcm6345-reset.yaml
··· 13 13 14 14 properties: 15 15 compatible: 16 - const: brcm,bcm6345-reset 16 + enum: 17 + - brcm,bcm6345-reset 18 + - brcm,bcm63xx-ephy-ctrl 17 19 18 20 reg: 19 21 maxItems: 1
+1
Documentation/devicetree/bindings/serial/qcom,serial-geni-qcom.yaml
··· 12 12 13 13 allOf: 14 14 - $ref: /schemas/serial/serial.yaml# 15 + - $ref: /schemas/soc/qcom/qcom,se-common-props.yaml# 15 16 16 17 properties: 17 18 compatible:
+1 -1
Documentation/devicetree/bindings/soc/qcom/qcom,rpmh-rsc.yaml
··· 28 28 SLEEP - Triggered by F/W 29 29 WAKE - Triggered by F/W 30 30 CONTROL - Triggered by F/W 31 - See also:: <dt-bindings/soc/qcom,rpmh-rsc.h> 31 + See also: <dt-bindings/soc/qcom,rpmh-rsc.h> 32 32 33 33 The order in which they are described in the DT, should match the hardware 34 34 configuration.
+26
Documentation/devicetree/bindings/soc/qcom/qcom,se-common-props.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/soc/qcom/qcom,se-common-props.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: QUP Peripheral-specific properties for I2C, SPI and SERIAL bus 8 + 9 + description: 10 + The Generic Interface (GENI) based Qualcomm Universal Peripheral (QUP) is 11 + a programmable module that supports a wide range of serial interfaces 12 + such as UART, SPI, I2C, I3C, etc. This defines the common properties used 13 + across QUP-supported peripherals. 14 + 15 + maintainers: 16 + - Mukesh Kumar Savaliya <mukesh.savaliya@oss.qualcomm.com> 17 + - Viken Dadhaniya <viken.dadhaniya@oss.qualcomm.com> 18 + 19 + properties: 20 + qcom,enable-gsi-dma: 21 + $ref: /schemas/types.yaml#/definitions/flag 22 + description: 23 + Configure the Serial Engine (SE) to transfer data in QCOM GPI DMA mode. 24 + By default, FIFO mode (PIO/CPU DMA) will be selected. 25 + 26 + additionalProperties: true
+1
Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
··· 36 36 - items: 37 37 - enum: 38 38 - google,gs101-usi 39 + - samsung,exynos2200-usi 39 40 - samsung,exynosautov9-usi 40 41 - samsung,exynosautov920-usi 41 42 - const: samsung,exynos850-usi
+11 -6
Documentation/devicetree/bindings/sound/apple,mca.yaml
··· 19 19 20 20 properties: 21 21 compatible: 22 - items: 23 - - enum: 24 - - apple,t6000-mca 25 - - apple,t8103-mca 26 - - apple,t8112-mca 27 - - const: apple,mca 22 + oneOf: 23 + - items: 24 + - const: apple,t6020-mca 25 + - const: apple,t8103-mca 26 + - items: 27 + - enum: 28 + # Do not add additional SoC to this list. 29 + - apple,t6000-mca 30 + - apple,t8103-mca 31 + - apple,t8112-mca 32 + - const: apple,mca 28 33 29 34 reg: 30 35 items:
+10 -6
Documentation/devicetree/bindings/spi/apple,spi.yaml
··· 14 14 15 15 properties: 16 16 compatible: 17 - items: 18 - - enum: 19 - - apple,t8103-spi 20 - - apple,t8112-spi 21 - - apple,t6000-spi 22 - - const: apple,spi 17 + oneOf: 18 + - items: 19 + - const: apple,t6020-spi 20 + - const: apple,t8103-spi 21 + - items: 22 + - enum: 23 + - apple,t8103-spi 24 + - apple,t8112-spi 25 + - apple,t6000-spi 26 + - const: apple,spi 23 27 24 28 reg: 25 29 maxItems: 1
+1
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.yaml
··· 25 25 26 26 allOf: 27 27 - $ref: /schemas/spi/spi-controller.yaml# 28 + - $ref: /schemas/soc/qcom/qcom,se-common-props.yaml# 28 29 29 30 properties: 30 31 compatible:
+14 -6
Documentation/devicetree/bindings/spmi/apple,spmi.yaml
··· 16 16 17 17 properties: 18 18 compatible: 19 - items: 20 - - enum: 21 - - apple,t8103-spmi 22 - - apple,t6000-spmi 23 - - apple,t8112-spmi 24 - - const: apple,spmi 19 + oneOf: 20 + - items: 21 + - enum: 22 + - apple,t6020-spmi 23 + - apple,t8012-spmi 24 + - apple,t8015-spmi 25 + - const: apple,t8103-spmi 26 + - items: 27 + - enum: 28 + # Do not add additional SoC to this list. 29 + - apple,t8103-spmi 30 + - apple,t6000-spmi 31 + - apple,t8112-spmi 32 + - const: apple,spmi 25 33 26 34 reg: 27 35 maxItems: 1
+1
Documentation/devicetree/bindings/sram/qcom,imem.yaml
··· 18 18 items: 19 19 - enum: 20 20 - qcom,apq8064-imem 21 + - qcom,ipq5424-imem 21 22 - qcom,msm8226-imem 22 23 - qcom,msm8974-imem 23 24 - qcom,msm8976-imem
+16 -11
Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
··· 14 14 15 15 properties: 16 16 compatible: 17 - items: 18 - - enum: 19 - - apple,s5l8960x-wdt 20 - - apple,t7000-wdt 21 - - apple,s8000-wdt 22 - - apple,t8010-wdt 23 - - apple,t8015-wdt 24 - - apple,t8103-wdt 25 - - apple,t8112-wdt 26 - - apple,t6000-wdt 27 - - const: apple,wdt 17 + oneOf: 18 + - items: 19 + - const: apple,t6020-wdt 20 + - const: apple,t8103-wdt 21 + - items: 22 + - enum: 23 + # Do not add additional SoC to this list. 24 + - apple,s5l8960x-wdt 25 + - apple,t7000-wdt 26 + - apple,s8000-wdt 27 + - apple,t8010-wdt 28 + - apple,t8015-wdt 29 + - apple,t8103-wdt 30 + - apple,t8112-wdt 31 + - apple,t6000-wdt 32 + - const: apple,wdt 28 33 29 34 reg: 30 35 maxItems: 1
+1
Documentation/tee/index.rst
··· 11 11 op-tee 12 12 amd-tee 13 13 ts-tee 14 + qtee 14 15 15 16 .. only:: subproject and html 16 17
+96
Documentation/tee/qtee.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + ============================================= 4 + QTEE (Qualcomm Trusted Execution Environment) 5 + ============================================= 6 + 7 + The QTEE driver handles communication with Qualcomm TEE [1]. 8 + 9 + The lowest level of communication with QTEE builds on the ARM SMC Calling 10 + Convention (SMCCC) [2], which is the foundation for QTEE's Secure Channel 11 + Manager (SCM) [3] used internally by the driver. 12 + 13 + In a QTEE-based system, services are represented as objects with a series of 14 + operations that can be called to produce results, including other objects. 15 + 16 + When an object is hosted within QTEE, executing its operations is referred 17 + to as "direct invocation". QTEE can also invoke objects hosted in the non-secure 18 + world using a method known as "callback request". 19 + 20 + The SCM provides two functions to support direct invocation and callback requests: 21 + 22 + - QCOM_SCM_SMCINVOKE_INVOKE: Used for direct invocation. It can return either 23 + a result or initiate a callback request. 24 + - QCOM_SCM_SMCINVOKE_CB_RSP: Used to submit a response to a callback request 25 + triggered by a previous direct invocation. 26 + 27 + The QTEE Transport Message [4] is stacked on top of the SCM driver functions. 28 + 29 + A message consists of two buffers shared with QTEE: inbound and outbound 30 + buffers. The inbound buffer is used for direct invocation, and the outbound 31 + buffer is used to make callback requests. This picture shows the contents of 32 + a QTEE transport message:: 33 + 34 + +---------------------+ 35 + | v 36 + +-----------------+-------+-------+------+--------------------------+ 37 + | qcomtee_msg_ |object | buffer | | 38 + | object_invoke | id | offset, size | | (inbound buffer) 39 + +-----------------+-------+--------------+--------------------------+ 40 + <---- header -----><---- arguments ------><- in/out buffer payload -> 41 + 42 + +-----------+ 43 + | v 44 + +-----------------+-------+-------+------+----------------------+ 45 + | qcomtee_msg_ |object | buffer | | 46 + | callback | id | offset, size | | (outbound buffer) 47 + +-----------------+-------+--------------+----------------------+ 48 + 49 + Each buffer is started with a header and array of arguments. 50 + 51 + QTEE Transport Message supports four types of arguments: 52 + 53 + - Input Object (IO) is an object parameter to the current invocation 54 + or callback request. 55 + - Output Object (OO) is an object parameter from the current invocation 56 + or callback request. 57 + - Input Buffer (IB) is (offset, size) pair to the inbound or outbound region 58 + to store parameter to the current invocation or callback request. 59 + - Output Buffer (OB) is (offset, size) pair to the inbound or outbound region 60 + to store parameter from the current invocation or callback request. 61 + 62 + Picture of the relationship between the different components in the QTEE 63 + architecture:: 64 + 65 + User space Kernel Secure world 66 + ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~ 67 + +--------+ +----------+ +--------------+ 68 + | Client | |callback | | Trusted | 69 + +--------+ |server | | Application | 70 + /\ +----------+ +--------------+ 71 + || +----------+ /\ /\ 72 + || |callback | || || 73 + || |server | || \/ 74 + || +----------+ || +--------------+ 75 + || /\ || | TEE Internal | 76 + || || || | API | 77 + \/ \/ \/ +--------+--------+ +--------------+ 78 + +---------------------+ | TEE | QTEE | | QTEE | 79 + | libqcomtee [5] | | subsys | driver | | Trusted OS | 80 + +-------+-------------+--+----+-------+----+-------------+--------------+ 81 + | Generic TEE API | | QTEE MSG | 82 + | IOCTL (TEE_IOC_*) | | SMCCC (QCOM_SCM_SMCINVOKE_*) | 83 + +-----------------------------+ +---------------------------------+ 84 + 85 + References 86 + ========== 87 + 88 + [1] https://docs.qualcomm.com/bundle/publicresource/topics/80-70015-11/qualcomm-trusted-execution-environment.html 89 + 90 + [2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html 91 + 92 + [3] drivers/firmware/qcom/qcom_scm.c 93 + 94 + [4] drivers/tee/qcomtee/qcomtee_msg.h 95 + 96 + [5] https://github.com/quic/quic-teec
+8 -1
MAINTAINERS
··· 21038 21038 F: drivers/net/ethernet/qualcomm/rmnet/ 21039 21039 F: include/linux/if_rmnet.h 21040 21040 21041 + QUALCOMM TEE (QCOMTEE) DRIVER 21042 + M: Amirreza Zarrabi <amirreza.zarrabi@oss.qualcomm.com> 21043 + L: linux-arm-msm@vger.kernel.org 21044 + S: Maintained 21045 + F: Documentation/tee/qtee.rst 21046 + F: drivers/tee/qcomtee/ 21047 + 21041 21048 QUALCOMM TRUST ZONE MEMORY ALLOCATOR 21042 21049 M: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> 21043 21050 L: linux-arm-msm@vger.kernel.org ··· 21746 21739 RESET CONTROLLER FRAMEWORK 21747 21740 M: Philipp Zabel <p.zabel@pengutronix.de> 21748 21741 S: Maintained 21749 - T: git git://git.pengutronix.de/git/pza/linux 21742 + T: git https://git.pengutronix.de/git/pza/linux.git 21750 21743 F: Documentation/devicetree/bindings/reset/ 21751 21744 F: Documentation/driver-api/reset.rst 21752 21745 F: drivers/reset/
+37 -14
arch/arm64/Kconfig.platforms
··· 138 138 help 139 139 This enables support for ARMv8 based Samsung Exynos SoC family. 140 140 141 - config ARCH_SPARX5 142 - bool "Microchip Sparx5 SoC family" 143 - select PINCTRL 144 - select DW_APB_TIMER_OF 145 - help 146 - This enables support for the Microchip Sparx5 ARMv8-based 147 - SoC family of TSN-capable gigabit switches. 148 - 149 - The SparX-5 Ethernet switch family provides a rich set of 150 - switching features such as advanced TCAM-based VLAN and QoS 151 - processing enabling delivery of differentiated services, and 152 - security through TCAM-based frame processing using versatile 153 - content aware processor (VCAP). 154 - 155 141 config ARCH_K3 156 142 bool "Texas Instruments Inc. K3 multicore SoC architecture" 157 143 select SOC_TI ··· 178 192 help 179 193 This enables support for the arm64 based Amlogic SoCs 180 194 such as the s905, S905X/D, S912, A113X/D or S905X/D2 195 + 196 + menu "Microchip SoC support" 197 + 198 + config ARCH_MICROCHIP 199 + bool 200 + 201 + config ARCH_LAN969X 202 + bool "Microchip LAN969X SoC family" 203 + select PINCTRL 204 + select DW_APB_TIMER_OF 205 + select ARCH_MICROCHIP 206 + help 207 + This enables support for the Microchip LAN969X ARMv8-based 208 + SoC family of TSN-capable gigabit switches. 209 + 210 + The LAN969X Ethernet switch family provides a rich set of 211 + switching features such as advanced TCAM-based VLAN and QoS 212 + processing enabling delivery of differentiated services, and 213 + security through TCAM-based frame processing using versatile 214 + content aware processor (VCAP). 215 + 216 + config ARCH_SPARX5 217 + bool "Microchip Sparx5 SoC family" 218 + select PINCTRL 219 + select DW_APB_TIMER_OF 220 + select ARCH_MICROCHIP 221 + help 222 + This enables support for the Microchip Sparx5 ARMv8-based 223 + SoC family of TSN-capable gigabit switches. 224 + 225 + The SparX-5 Ethernet switch family provides a rich set of 226 + switching features such as advanced TCAM-based VLAN and QoS 227 + processing enabling delivery of differentiated services, and 228 + security through TCAM-based frame processing using versatile 229 + content aware processor (VCAP). 230 + 231 + endmenu 181 232 182 233 config ARCH_MMP 183 234 bool "Marvell MMP SoC Family"
-1
arch/powerpc/platforms/Kconfig
··· 232 232 bool "QE GPIO support" 233 233 depends on QUICC_ENGINE 234 234 select GPIOLIB 235 - select OF_GPIO_MM_GPIOCHIP 236 235 help 237 236 Say Y here if you're going to use hardware that connects to the 238 237 QE GPIOs.
+6 -3
drivers/bus/fsl-mc/fsl-mc-bus.c
··· 176 176 { 177 177 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); 178 178 179 - return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor, 180 - mc_dev->obj_desc.type); 179 + return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor, 180 + mc_dev->obj_desc.type); 181 181 } 182 182 static DEVICE_ATTR_RO(modalias); 183 183 ··· 203 203 { 204 204 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); 205 205 206 - return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override); 206 + return sysfs_emit(buf, "%s\n", mc_dev->driver_override); 207 207 } 208 208 static DEVICE_ATTR_RW(driver_override); 209 209 ··· 1104 1104 * Get physical address of MC portal for the root DPRC: 1105 1105 */ 1106 1106 plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1107 + if (!plat_res) 1108 + return -EINVAL; 1109 + 1107 1110 mc_portal_phys_addr = plat_res->start; 1108 1111 mc_portal_size = resource_size(plat_res); 1109 1112 mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
+4 -4
drivers/cache/sifive_ccache.c
··· 151 151 if (!len) 152 152 return; 153 153 154 - mb(); 154 + mb(); /* complete earlier memory accesses before the cache flush */ 155 155 for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end; 156 156 line += SIFIVE_CCACHE_LINE_SIZE) { 157 157 #ifdef CONFIG_32BIT 158 - writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32); 158 + writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32); 159 159 #else 160 - writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64); 160 + writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64); 161 161 #endif 162 - mb(); 163 162 } 163 + mb(); /* issue later memory accesses after the cache flush */ 164 164 } 165 165 166 166 static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
+1 -1
drivers/char/hw_random/Kconfig
··· 77 77 78 78 config HW_RANDOM_ATMEL 79 79 tristate "Atmel Random Number Generator support" 80 - depends on (ARCH_AT91 || COMPILE_TEST) 80 + depends on (ARCH_MICROCHIP || COMPILE_TEST) 81 81 default HW_RANDOM 82 82 help 83 83 This driver provides kernel-side support for the Random Number
+995 -27
drivers/clk/clk-rp1.c
··· 368 368 struct clk_divider div; 369 369 }; 370 370 371 + static struct rp1_clk_desc *clk_audio_core; 372 + static struct rp1_clk_desc *clk_audio; 373 + static struct rp1_clk_desc *clk_i2s; 374 + static struct clk_hw *clk_xosc; 375 + 371 376 static inline 372 377 void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val) 373 378 { ··· 480 475 struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw); 481 476 struct rp1_clockman *clockman = pll_core->clockman; 482 477 const struct rp1_pll_core_data *data = pll_core->data; 483 - unsigned long calc_rate; 484 478 u32 fbdiv_int, fbdiv_frac; 485 479 486 480 /* Disable dividers to start with. */ ··· 488 484 clockman_write(clockman, data->fbdiv_frac_reg, 0); 489 485 spin_unlock(&clockman->regs_lock); 490 486 491 - calc_rate = get_pll_core_divider(hw, rate, parent_rate, 492 - &fbdiv_int, &fbdiv_frac); 487 + get_pll_core_divider(hw, rate, parent_rate, 488 + &fbdiv_int, &fbdiv_frac); 493 489 494 490 spin_lock(&clockman->regs_lock); 495 491 clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD); ··· 500 496 /* Check that reference frequency is no greater than VCO / 16. */ 501 497 if (WARN_ON_ONCE(parent_rate > (rate / 16))) 502 498 return -ERANGE; 503 - 504 - pll_core->cached_rate = calc_rate; 505 499 506 500 spin_lock(&clockman->regs_lock); 507 501 /* Don't need to divide ref unless parent_rate > (output freq / 16) */ ··· 532 530 return calc_rate; 533 531 } 534 532 535 - static long rp1_pll_core_round_rate(struct clk_hw *hw, unsigned long rate, 536 - unsigned long *parent_rate) 533 + static int rp1_pll_core_determine_rate(struct clk_hw *hw, 534 + struct clk_rate_request *req) 537 535 { 538 536 u32 fbdiv_int, fbdiv_frac; 539 537 540 - return get_pll_core_divider(hw, rate, *parent_rate, 541 - &fbdiv_int, &fbdiv_frac); 538 + req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate, 539 + &fbdiv_int, 540 + &fbdiv_frac); 541 + 542 + return 0; 542 543 } 543 544 544 545 static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate, ··· 619 614 return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2); 620 615 } 621 616 622 - static long rp1_pll_round_rate(struct clk_hw *hw, unsigned long rate, 623 - unsigned long *parent_rate) 617 + static int rp1_pll_determine_rate(struct clk_hw *hw, 618 + struct clk_rate_request *req) 624 619 { 620 + struct clk_hw *clk_audio_hw = &clk_audio->hw; 625 621 u32 div1, div2; 626 622 627 - get_pll_prim_dividers(rate, *parent_rate, &div1, &div2); 623 + if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate) 624 + req->best_parent_rate = clk_audio_core->cached_rate; 628 625 629 - return DIV_ROUND_CLOSEST(*parent_rate, div1 * div2); 626 + get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2); 627 + 628 + req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2); 629 + 630 + return 0; 630 631 } 631 632 632 633 static int rp1_pll_ph_is_on(struct clk_hw *hw) ··· 682 671 return parent_rate / data->fixed_divider; 683 672 } 684 673 685 - static long rp1_pll_ph_round_rate(struct clk_hw *hw, unsigned long rate, 686 - unsigned long *parent_rate) 674 + static int rp1_pll_ph_determine_rate(struct clk_hw *hw, 675 + struct clk_rate_request *req) 687 676 { 688 677 struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw); 689 678 const struct rp1_pll_ph_data *data = pll_ph->data; 690 679 691 - return *parent_rate / data->fixed_divider; 680 + req->rate = req->best_parent_rate / data->fixed_divider; 681 + 682 + return 0; 692 683 } 693 684 694 685 static int rp1_pll_divider_is_on(struct clk_hw *hw) ··· 767 754 return clk_divider_ops.recalc_rate(hw, parent_rate); 768 755 } 769 756 770 - static long rp1_pll_divider_round_rate(struct clk_hw *hw, 771 - unsigned long rate, 772 - unsigned long *parent_rate) 757 + static int rp1_pll_divider_determine_rate(struct clk_hw *hw, 758 + struct clk_rate_request *req) 773 759 { 774 - return clk_divider_ops.round_rate(hw, rate, parent_rate); 760 + req->rate = clk_divider_ops.determine_rate(hw, req); 761 + 762 + return 0; 775 763 } 776 764 777 765 static int rp1_clock_is_on(struct clk_hw *hw) ··· 978 964 return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff); 979 965 } 980 966 967 + static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw, 968 + unsigned long target_rate, 969 + int *pdiv_prim, int *pdiv_clk) 970 + { 971 + static const int prim_divs[] = { 972 + 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16, 973 + 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49, 974 + }; 975 + const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc); 976 + const unsigned long core_min = xosc_rate * 16; 977 + const unsigned long core_max = 2400000000; 978 + int best_div_prim = 1, best_div_clk = 1; 979 + unsigned long best_rate = core_max + 1; 980 + unsigned long core_rate = 0; 981 + int div_int, div_frac; 982 + u64 div; 983 + int i; 984 + 985 + /* Given the target rate, choose a set of divisors/multipliers */ 986 + for (i = 0; i < ARRAY_SIZE(prim_divs); i++) { 987 + int div_prim = prim_divs[i]; 988 + int div_clk; 989 + 990 + for (div_clk = 1; div_clk <= 256; div_clk++) { 991 + core_rate = target_rate * div_clk * div_prim; 992 + if (core_rate >= core_min) { 993 + if (core_rate < best_rate) { 994 + best_rate = core_rate; 995 + best_div_prim = div_prim; 996 + best_div_clk = div_clk; 997 + } 998 + break; 999 + } 1000 + } 1001 + } 1002 + 1003 + if (best_rate < core_max) { 1004 + div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate; 1005 + div_int = div >> 24; 1006 + div_frac = div % (1 << 24); 1007 + core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24; 1008 + } else { 1009 + core_rate = 0; 1010 + } 1011 + 1012 + if (pdiv_prim) 1013 + *pdiv_prim = best_div_prim; 1014 + if (pdiv_clk) 1015 + *pdiv_clk = best_div_clk; 1016 + 1017 + return core_rate; 1018 + } 1019 + 981 1020 static void rp1_clock_choose_div_and_prate(struct clk_hw *hw, 982 1021 int parent_idx, 983 1022 unsigned long rate, ··· 1039 972 { 1040 973 struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw); 1041 974 const struct rp1_clock_data *data = clock->data; 975 + struct clk_hw *clk_audio_hw = &clk_audio->hw; 976 + struct clk_hw *clk_i2s_hw = &clk_i2s->hw; 1042 977 struct clk_hw *parent; 1043 978 u32 div; 1044 979 u64 tmp; 1045 980 1046 981 parent = clk_hw_get_parent_by_index(hw, parent_idx); 982 + 983 + if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) { 984 + *prate = clk_audio->cached_rate; 985 + *calc_rate = rate; 986 + return; 987 + } 988 + 989 + if (hw == clk_i2s_hw && parent == clk_audio_hw) { 990 + unsigned long core_rate, audio_rate, i2s_rate; 991 + int div_prim, div_clk; 992 + 993 + core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk); 994 + audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim); 995 + i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk); 996 + clk_audio_core->cached_rate = core_rate; 997 + clk_audio->cached_rate = audio_rate; 998 + clk_i2s->cached_rate = i2s_rate; 999 + *prate = audio_rate; 1000 + *calc_rate = i2s_rate; 1001 + return; 1002 + } 1047 1003 1048 1004 *prate = clk_hw_get_rate(parent); 1049 1005 div = rp1_clock_choose_div(rate, *prate, data); ··· 1152 1062 return 0; 1153 1063 } 1154 1064 1065 + static int rp1_varsrc_set_rate(struct clk_hw *hw, 1066 + unsigned long rate, unsigned long parent_rate) 1067 + { 1068 + struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw); 1069 + 1070 + /* 1071 + * "varsrc" exists purely to let clock dividers know the frequency 1072 + * of an externally-managed clock source (such as MIPI DSI byte-clock) 1073 + * which may change at run-time as a side-effect of some other driver. 1074 + */ 1075 + clock->cached_rate = rate; 1076 + return 0; 1077 + } 1078 + 1079 + static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw, 1080 + unsigned long parent_rate) 1081 + { 1082 + struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw); 1083 + 1084 + return clock->cached_rate; 1085 + } 1086 + 1087 + static int rp1_varsrc_determine_rate(struct clk_hw *hw, 1088 + struct clk_rate_request *req) 1089 + { 1090 + return 0; 1091 + } 1092 + 1155 1093 static const struct clk_ops rp1_pll_core_ops = { 1156 1094 .is_prepared = rp1_pll_core_is_on, 1157 1095 .prepare = rp1_pll_core_on, 1158 1096 .unprepare = rp1_pll_core_off, 1159 1097 .set_rate = rp1_pll_core_set_rate, 1160 1098 .recalc_rate = rp1_pll_core_recalc_rate, 1161 - .round_rate = rp1_pll_core_round_rate, 1099 + .determine_rate = rp1_pll_core_determine_rate, 1162 1100 }; 1163 1101 1164 1102 static const struct clk_ops rp1_pll_ops = { 1165 1103 .set_rate = rp1_pll_set_rate, 1166 1104 .recalc_rate = rp1_pll_recalc_rate, 1167 - .round_rate = rp1_pll_round_rate, 1105 + .determine_rate = rp1_pll_determine_rate, 1168 1106 }; 1169 1107 1170 1108 static const struct clk_ops rp1_pll_ph_ops = { ··· 1200 1082 .prepare = rp1_pll_ph_on, 1201 1083 .unprepare = rp1_pll_ph_off, 1202 1084 .recalc_rate = rp1_pll_ph_recalc_rate, 1203 - .round_rate = rp1_pll_ph_round_rate, 1085 + .determine_rate = rp1_pll_ph_determine_rate, 1204 1086 }; 1205 1087 1206 1088 static const struct clk_ops rp1_pll_divider_ops = { ··· 1209 1091 .unprepare = rp1_pll_divider_off, 1210 1092 .set_rate = rp1_pll_divider_set_rate, 1211 1093 .recalc_rate = rp1_pll_divider_recalc_rate, 1212 - .round_rate = rp1_pll_divider_round_rate, 1094 + .determine_rate = rp1_pll_divider_determine_rate, 1213 1095 }; 1214 1096 1215 1097 static const struct clk_ops rp1_clk_ops = { ··· 1222 1104 .set_rate_and_parent = rp1_clock_set_rate_and_parent, 1223 1105 .set_rate = rp1_clock_set_rate, 1224 1106 .determine_rate = rp1_clock_determine_rate, 1107 + }; 1108 + 1109 + static const struct clk_ops rp1_varsrc_ops = { 1110 + .set_rate = rp1_varsrc_set_rate, 1111 + .recalc_rate = rp1_varsrc_recalc_rate, 1112 + .determine_rate = rp1_varsrc_determine_rate, 1225 1113 }; 1226 1114 1227 1115 static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman, ··· 1365 1241 ) 1366 1242 ); 1367 1243 1244 + static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL( 1245 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1246 + "pll_audio", 1247 + (const struct clk_parent_data[]) { 1248 + { .hw = &pll_audio_core_desc.hw } 1249 + }, 1250 + &rp1_pll_ops, 1251 + CLK_SET_RATE_PARENT 1252 + ), 1253 + CLK_DATA(rp1_pll_data, 1254 + .ctrl_reg = PLL_AUDIO_PRIM, 1255 + .fc0_src = FC_NUM(4, 2), 1256 + ) 1257 + ); 1258 + 1259 + static struct rp1_clk_desc pll_video_desc = REGISTER_PLL( 1260 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1261 + "pll_video", 1262 + (const struct clk_parent_data[]) { 1263 + { .hw = &pll_video_core_desc.hw } 1264 + }, 1265 + &rp1_pll_ops, 1266 + 0 1267 + ), 1268 + CLK_DATA(rp1_pll_data, 1269 + .ctrl_reg = PLL_VIDEO_PRIM, 1270 + .fc0_src = FC_NUM(3, 2), 1271 + ) 1272 + ); 1273 + 1368 1274 static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV( 1369 1275 .hw.init = CLK_HW_INIT_PARENTS_DATA( 1370 1276 "pll_sys_sec", ··· 1410 1256 ) 1411 1257 ); 1412 1258 1259 + static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV( 1260 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1261 + "pll_video_sec", 1262 + (const struct clk_parent_data[]) { 1263 + { .hw = &pll_video_core_desc.hw } 1264 + }, 1265 + &rp1_pll_divider_ops, 1266 + 0 1267 + ), 1268 + CLK_DATA(rp1_pll_data, 1269 + .ctrl_reg = PLL_VIDEO_SEC, 1270 + .fc0_src = FC_NUM(5, 3), 1271 + ) 1272 + ); 1273 + 1274 + static const struct clk_parent_data clk_eth_tsu_parents[] = { 1275 + { .index = 0 }, 1276 + { .hw = &pll_video_sec_desc.hw }, 1277 + { .index = -1 }, 1278 + { .index = -1 }, 1279 + { .index = -1 }, 1280 + { .index = -1 }, 1281 + { .index = -1 }, 1282 + { .index = -1 }, 1283 + }; 1284 + 1413 1285 static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK( 1414 1286 .hw.init = CLK_HW_INIT_PARENTS_DATA( 1415 1287 "clk_eth_tsu", 1416 - (const struct clk_parent_data[]) { { .index = 0 } }, 1288 + clk_eth_tsu_parents, 1417 1289 &rp1_clk_ops, 1418 1290 0 1419 1291 ), 1420 1292 CLK_DATA(rp1_clock_data, 1421 1293 .num_std_parents = 0, 1422 - .num_aux_parents = 1, 1294 + .num_aux_parents = 8, 1423 1295 .ctrl_reg = CLK_ETH_TSU_CTRL, 1424 1296 .div_int_reg = CLK_ETH_TSU_DIV_INT, 1425 1297 .sel_reg = CLK_ETH_TSU_SEL, ··· 1458 1278 static const struct clk_parent_data clk_eth_parents[] = { 1459 1279 { .hw = &pll_sys_sec_desc.div.hw }, 1460 1280 { .hw = &pll_sys_desc.hw }, 1281 + { .hw = &pll_video_sec_desc.hw }, 1461 1282 }; 1462 1283 1463 1284 static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK( ··· 1470 1289 ), 1471 1290 CLK_DATA(rp1_clock_data, 1472 1291 .num_std_parents = 0, 1473 - .num_aux_parents = 2, 1292 + .num_aux_parents = 3, 1474 1293 .ctrl_reg = CLK_ETH_CTRL, 1475 1294 .div_int_reg = CLK_ETH_DIV_INT, 1476 1295 .sel_reg = CLK_ETH_SEL, ··· 1523 1342 ) 1524 1343 ); 1525 1344 1345 + static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL( 1346 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1347 + "pll_audio_pri_ph", 1348 + (const struct clk_parent_data[]) { 1349 + { .hw = &pll_audio_desc.hw } 1350 + }, 1351 + &rp1_pll_ph_ops, 1352 + 0 1353 + ), 1354 + CLK_DATA(rp1_pll_ph_data, 1355 + .ph_reg = PLL_AUDIO_PRIM, 1356 + .fixed_divider = 2, 1357 + .phase = RP1_PLL_PHASE_0, 1358 + .fc0_src = FC_NUM(5, 1), 1359 + ) 1360 + ); 1361 + 1362 + static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL( 1363 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1364 + "pll_video_pri_ph", 1365 + (const struct clk_parent_data[]) { 1366 + { .hw = &pll_video_desc.hw } 1367 + }, 1368 + &rp1_pll_ph_ops, 1369 + 0 1370 + ), 1371 + CLK_DATA(rp1_pll_ph_data, 1372 + .ph_reg = PLL_VIDEO_PRIM, 1373 + .fixed_divider = 2, 1374 + .phase = RP1_PLL_PHASE_0, 1375 + .fc0_src = FC_NUM(4, 3), 1376 + ) 1377 + ); 1378 + 1379 + static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV( 1380 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1381 + "pll_audio_sec", 1382 + (const struct clk_parent_data[]) { 1383 + { .hw = &pll_audio_core_desc.hw } 1384 + }, 1385 + &rp1_pll_divider_ops, 1386 + 0 1387 + ), 1388 + CLK_DATA(rp1_pll_data, 1389 + .ctrl_reg = PLL_AUDIO_SEC, 1390 + .fc0_src = FC_NUM(6, 2), 1391 + ) 1392 + ); 1393 + 1394 + static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV( 1395 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1396 + "pll_audio_tern", 1397 + (const struct clk_parent_data[]) { 1398 + { .hw = &pll_audio_core_desc.hw } 1399 + }, 1400 + &rp1_pll_divider_ops, 1401 + 0 1402 + ), 1403 + CLK_DATA(rp1_pll_data, 1404 + .ctrl_reg = PLL_AUDIO_TERN, 1405 + .fc0_src = FC_NUM(6, 2), 1406 + ) 1407 + ); 1408 + 1409 + static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK( 1410 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1411 + "clk_slow_sys", 1412 + (const struct clk_parent_data[]) { { .index = 0 } }, 1413 + &rp1_clk_ops, 1414 + CLK_IS_CRITICAL 1415 + ), 1416 + CLK_DATA(rp1_clock_data, 1417 + .num_std_parents = 1, 1418 + .num_aux_parents = 0, 1419 + .ctrl_reg = CLK_SLOW_SYS_CTRL, 1420 + .div_int_reg = CLK_SLOW_SYS_DIV_INT, 1421 + .sel_reg = CLK_SLOW_SYS_SEL, 1422 + .div_int_max = DIV_INT_8BIT_MAX, 1423 + .max_freq = 50 * HZ_PER_MHZ, 1424 + .fc0_src = FC_NUM(1, 4), 1425 + .clk_src_mask = 0x1, 1426 + ) 1427 + ); 1428 + 1429 + static const struct clk_parent_data clk_dma_parents[] = { 1430 + { .hw = &pll_sys_pri_ph_desc.hw }, 1431 + { .hw = &pll_video_desc.hw }, 1432 + { .index = 0 }, 1433 + }; 1434 + 1435 + static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK( 1436 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1437 + "clk_dma", 1438 + clk_dma_parents, 1439 + &rp1_clk_ops, 1440 + 0 1441 + ), 1442 + CLK_DATA(rp1_clock_data, 1443 + .num_std_parents = 0, 1444 + .num_aux_parents = 3, 1445 + .ctrl_reg = CLK_DMA_CTRL, 1446 + .div_int_reg = CLK_DMA_DIV_INT, 1447 + .sel_reg = CLK_DMA_SEL, 1448 + .div_int_max = DIV_INT_8BIT_MAX, 1449 + .max_freq = 100 * HZ_PER_MHZ, 1450 + .fc0_src = FC_NUM(2, 2), 1451 + ) 1452 + ); 1453 + 1454 + static const struct clk_parent_data clk_uart_parents[] = { 1455 + { .hw = &pll_sys_pri_ph_desc.hw }, 1456 + { .hw = &pll_video_desc.hw }, 1457 + { .index = 0 }, 1458 + }; 1459 + 1460 + static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK( 1461 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1462 + "clk_uart", 1463 + clk_uart_parents, 1464 + &rp1_clk_ops, 1465 + 0 1466 + ), 1467 + CLK_DATA(rp1_clock_data, 1468 + .num_std_parents = 0, 1469 + .num_aux_parents = 3, 1470 + .ctrl_reg = CLK_UART_CTRL, 1471 + .div_int_reg = CLK_UART_DIV_INT, 1472 + .sel_reg = CLK_UART_SEL, 1473 + .div_int_max = DIV_INT_8BIT_MAX, 1474 + .max_freq = 100 * HZ_PER_MHZ, 1475 + .fc0_src = FC_NUM(6, 7), 1476 + ) 1477 + ); 1478 + 1479 + static const struct clk_parent_data clk_pwm0_parents[] = { 1480 + { .index = -1 }, 1481 + { .hw = &pll_video_sec_desc.hw }, 1482 + { .index = 0 }, 1483 + }; 1484 + 1485 + static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK( 1486 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1487 + "clk_pwm0", 1488 + clk_pwm0_parents, 1489 + &rp1_clk_ops, 1490 + 0 1491 + ), 1492 + CLK_DATA(rp1_clock_data, 1493 + .num_std_parents = 0, 1494 + .num_aux_parents = 3, 1495 + .ctrl_reg = CLK_PWM0_CTRL, 1496 + .div_int_reg = CLK_PWM0_DIV_INT, 1497 + .div_frac_reg = CLK_PWM0_DIV_FRAC, 1498 + .sel_reg = CLK_PWM0_SEL, 1499 + .div_int_max = DIV_INT_16BIT_MAX, 1500 + .max_freq = 76800 * HZ_PER_KHZ, 1501 + .fc0_src = FC_NUM(0, 5), 1502 + ) 1503 + ); 1504 + 1505 + static const struct clk_parent_data clk_pwm1_parents[] = { 1506 + { .index = -1 }, 1507 + { .hw = &pll_video_sec_desc.hw }, 1508 + { .index = 0 }, 1509 + }; 1510 + 1511 + static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK( 1512 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1513 + "clk_pwm1", 1514 + clk_pwm1_parents, 1515 + &rp1_clk_ops, 1516 + 0 1517 + ), 1518 + CLK_DATA(rp1_clock_data, 1519 + .num_std_parents = 0, 1520 + .num_aux_parents = 3, 1521 + .ctrl_reg = CLK_PWM1_CTRL, 1522 + .div_int_reg = CLK_PWM1_DIV_INT, 1523 + .div_frac_reg = CLK_PWM1_DIV_FRAC, 1524 + .sel_reg = CLK_PWM1_SEL, 1525 + .div_int_max = DIV_INT_16BIT_MAX, 1526 + .max_freq = 76800 * HZ_PER_KHZ, 1527 + .fc0_src = FC_NUM(1, 5), 1528 + ) 1529 + ); 1530 + 1531 + static const struct clk_parent_data clk_audio_in_parents[] = { 1532 + { .index = -1 }, 1533 + { .index = -1 }, 1534 + { .index = -1 }, 1535 + { .hw = &pll_video_sec_desc.hw }, 1536 + { .index = 0 }, 1537 + }; 1538 + 1539 + static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK( 1540 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1541 + "clk_audio_in", 1542 + clk_audio_in_parents, 1543 + &rp1_clk_ops, 1544 + 0 1545 + ), 1546 + CLK_DATA(rp1_clock_data, 1547 + .num_std_parents = 0, 1548 + .num_aux_parents = 5, 1549 + .ctrl_reg = CLK_AUDIO_IN_CTRL, 1550 + .div_int_reg = CLK_AUDIO_IN_DIV_INT, 1551 + .sel_reg = CLK_AUDIO_IN_SEL, 1552 + .div_int_max = DIV_INT_8BIT_MAX, 1553 + .max_freq = 76800 * HZ_PER_KHZ, 1554 + .fc0_src = FC_NUM(2, 5), 1555 + ) 1556 + ); 1557 + 1558 + static const struct clk_parent_data clk_audio_out_parents[] = { 1559 + { .index = -1 }, 1560 + { .index = -1 }, 1561 + { .hw = &pll_video_sec_desc.hw }, 1562 + { .index = 0 }, 1563 + }; 1564 + 1565 + static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK( 1566 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1567 + "clk_audio_out", 1568 + clk_audio_out_parents, 1569 + &rp1_clk_ops, 1570 + 0 1571 + ), 1572 + CLK_DATA(rp1_clock_data, 1573 + .num_std_parents = 0, 1574 + .num_aux_parents = 4, 1575 + .ctrl_reg = CLK_AUDIO_OUT_CTRL, 1576 + .div_int_reg = CLK_AUDIO_OUT_DIV_INT, 1577 + .sel_reg = CLK_AUDIO_OUT_SEL, 1578 + .div_int_max = DIV_INT_8BIT_MAX, 1579 + .max_freq = 153600 * HZ_PER_KHZ, 1580 + .fc0_src = FC_NUM(3, 5), 1581 + ) 1582 + ); 1583 + 1584 + static const struct clk_parent_data clk_i2s_parents[] = { 1585 + { .index = 0 }, 1586 + { .hw = &pll_audio_desc.hw }, 1587 + { .hw = &pll_audio_sec_desc.hw }, 1588 + }; 1589 + 1590 + static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK( 1591 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1592 + "clk_i2s", 1593 + clk_i2s_parents, 1594 + &rp1_clk_ops, 1595 + CLK_SET_RATE_PARENT 1596 + ), 1597 + CLK_DATA(rp1_clock_data, 1598 + .num_std_parents = 0, 1599 + .num_aux_parents = 3, 1600 + .ctrl_reg = CLK_I2S_CTRL, 1601 + .div_int_reg = CLK_I2S_DIV_INT, 1602 + .sel_reg = CLK_I2S_SEL, 1603 + .div_int_max = DIV_INT_8BIT_MAX, 1604 + .max_freq = 50 * HZ_PER_MHZ, 1605 + .fc0_src = FC_NUM(4, 4), 1606 + ) 1607 + ); 1608 + 1609 + static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK( 1610 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1611 + "clk_mipi0_cfg", 1612 + (const struct clk_parent_data[]) { { .index = 0 } }, 1613 + &rp1_clk_ops, 1614 + 0 1615 + ), 1616 + CLK_DATA(rp1_clock_data, 1617 + .num_std_parents = 0, 1618 + .num_aux_parents = 1, 1619 + .ctrl_reg = CLK_MIPI0_CFG_CTRL, 1620 + .div_int_reg = CLK_MIPI0_CFG_DIV_INT, 1621 + .sel_reg = CLK_MIPI0_CFG_SEL, 1622 + .div_int_max = DIV_INT_8BIT_MAX, 1623 + .max_freq = 50 * HZ_PER_MHZ, 1624 + .fc0_src = FC_NUM(4, 5), 1625 + ) 1626 + ); 1627 + 1628 + static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK( 1629 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1630 + "clk_mipi1_cfg", 1631 + (const struct clk_parent_data[]) { { .index = 0 } }, 1632 + &rp1_clk_ops, 1633 + 0 1634 + ), 1635 + CLK_DATA(rp1_clock_data, 1636 + .num_std_parents = 0, 1637 + .num_aux_parents = 1, 1638 + .ctrl_reg = CLK_MIPI1_CFG_CTRL, 1639 + .div_int_reg = CLK_MIPI1_CFG_DIV_INT, 1640 + .sel_reg = CLK_MIPI1_CFG_SEL, 1641 + .div_int_max = DIV_INT_8BIT_MAX, 1642 + .max_freq = 50 * HZ_PER_MHZ, 1643 + .fc0_src = FC_NUM(5, 6), 1644 + .clk_src_mask = 0x1, 1645 + ) 1646 + ); 1647 + 1648 + static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK( 1649 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1650 + "clk_adc", 1651 + (const struct clk_parent_data[]) { { .index = 0 } }, 1652 + &rp1_clk_ops, 1653 + 0 1654 + ), 1655 + CLK_DATA(rp1_clock_data, 1656 + .num_std_parents = 0, 1657 + .num_aux_parents = 1, 1658 + .ctrl_reg = CLK_ADC_CTRL, 1659 + .div_int_reg = CLK_ADC_DIV_INT, 1660 + .sel_reg = CLK_ADC_SEL, 1661 + .div_int_max = DIV_INT_8BIT_MAX, 1662 + .max_freq = 50 * HZ_PER_MHZ, 1663 + .fc0_src = FC_NUM(5, 5), 1664 + ) 1665 + ); 1666 + 1667 + static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK( 1668 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1669 + "clk_sdio_timer", 1670 + (const struct clk_parent_data[]) { { .index = 0 } }, 1671 + &rp1_clk_ops, 1672 + 0 1673 + ), 1674 + CLK_DATA(rp1_clock_data, 1675 + .num_std_parents = 0, 1676 + .num_aux_parents = 1, 1677 + .ctrl_reg = CLK_SDIO_TIMER_CTRL, 1678 + .div_int_reg = CLK_SDIO_TIMER_DIV_INT, 1679 + .sel_reg = CLK_SDIO_TIMER_SEL, 1680 + .div_int_max = DIV_INT_8BIT_MAX, 1681 + .max_freq = 50 * HZ_PER_MHZ, 1682 + .fc0_src = FC_NUM(3, 4), 1683 + ) 1684 + ); 1685 + 1686 + static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK( 1687 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1688 + "clk_sdio_alt_src", 1689 + (const struct clk_parent_data[]) { 1690 + { .hw = &pll_sys_desc.hw } 1691 + }, 1692 + &rp1_clk_ops, 1693 + 0 1694 + ), 1695 + CLK_DATA(rp1_clock_data, 1696 + .num_std_parents = 0, 1697 + .num_aux_parents = 1, 1698 + .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL, 1699 + .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT, 1700 + .sel_reg = CLK_SDIO_ALT_SRC_SEL, 1701 + .div_int_max = DIV_INT_8BIT_MAX, 1702 + .max_freq = 200 * HZ_PER_MHZ, 1703 + .fc0_src = FC_NUM(5, 4), 1704 + ) 1705 + ); 1706 + 1707 + static const struct clk_parent_data clk_dpi_parents[] = { 1708 + { .hw = &pll_sys_desc.hw }, 1709 + { .hw = &pll_video_sec_desc.hw }, 1710 + { .hw = &pll_video_desc.hw }, 1711 + { .index = -1 }, 1712 + { .index = -1 }, 1713 + { .index = -1 }, 1714 + { .index = -1 }, 1715 + { .index = -1 }, 1716 + }; 1717 + 1718 + static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK( 1719 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1720 + "clk_dpi", 1721 + clk_dpi_parents, 1722 + &rp1_clk_ops, 1723 + CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */ 1724 + ), 1725 + CLK_DATA(rp1_clock_data, 1726 + .num_std_parents = 0, 1727 + .num_aux_parents = 8, 1728 + .ctrl_reg = VIDEO_CLK_DPI_CTRL, 1729 + .div_int_reg = VIDEO_CLK_DPI_DIV_INT, 1730 + .sel_reg = VIDEO_CLK_DPI_SEL, 1731 + .div_int_max = DIV_INT_8BIT_MAX, 1732 + .max_freq = 200 * HZ_PER_MHZ, 1733 + .fc0_src = FC_NUM(1, 6), 1734 + ) 1735 + ); 1736 + 1737 + static const struct clk_parent_data clk_gp0_parents[] = { 1738 + { .index = 0 }, 1739 + { .index = -1 }, 1740 + { .index = -1 }, 1741 + { .index = -1 }, 1742 + { .index = -1 }, 1743 + { .index = -1 }, 1744 + { .hw = &pll_sys_desc.hw }, 1745 + { .index = -1 }, 1746 + { .index = -1 }, 1747 + { .index = -1 }, 1748 + { .hw = &clk_i2s_desc.hw }, 1749 + { .hw = &clk_adc_desc.hw }, 1750 + { .index = -1 }, 1751 + { .index = -1 }, 1752 + { .index = -1 }, 1753 + { .hw = &clk_sys_desc.hw }, 1754 + }; 1755 + 1756 + static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK( 1757 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1758 + "clk_gp0", 1759 + clk_gp0_parents, 1760 + &rp1_clk_ops, 1761 + 0 1762 + ), 1763 + CLK_DATA(rp1_clock_data, 1764 + .num_std_parents = 0, 1765 + .num_aux_parents = 16, 1766 + .oe_mask = BIT(0), 1767 + .ctrl_reg = CLK_GP0_CTRL, 1768 + .div_int_reg = CLK_GP0_DIV_INT, 1769 + .div_frac_reg = CLK_GP0_DIV_FRAC, 1770 + .sel_reg = CLK_GP0_SEL, 1771 + .div_int_max = DIV_INT_16BIT_MAX, 1772 + .max_freq = 100 * HZ_PER_MHZ, 1773 + .fc0_src = FC_NUM(0, 1), 1774 + ) 1775 + ); 1776 + 1777 + static const struct clk_parent_data clk_gp1_parents[] = { 1778 + { .hw = &clk_sdio_timer_desc.hw }, 1779 + { .index = -1 }, 1780 + { .index = -1 }, 1781 + { .index = -1 }, 1782 + { .index = -1 }, 1783 + { .index = -1 }, 1784 + { .hw = &pll_sys_pri_ph_desc.hw }, 1785 + { .index = -1 }, 1786 + { .index = -1 }, 1787 + { .index = -1 }, 1788 + { .hw = &clk_adc_desc.hw }, 1789 + { .hw = &clk_dpi_desc.hw }, 1790 + { .hw = &clk_pwm0_desc.hw }, 1791 + { .index = -1 }, 1792 + { .index = -1 }, 1793 + { .index = -1 }, 1794 + }; 1795 + 1796 + static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK( 1797 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1798 + "clk_gp1", 1799 + clk_gp1_parents, 1800 + &rp1_clk_ops, 1801 + 0 1802 + ), 1803 + CLK_DATA(rp1_clock_data, 1804 + .num_std_parents = 0, 1805 + .num_aux_parents = 16, 1806 + .oe_mask = BIT(1), 1807 + .ctrl_reg = CLK_GP1_CTRL, 1808 + .div_int_reg = CLK_GP1_DIV_INT, 1809 + .div_frac_reg = CLK_GP1_DIV_FRAC, 1810 + .sel_reg = CLK_GP1_SEL, 1811 + .div_int_max = DIV_INT_16BIT_MAX, 1812 + .max_freq = 100 * HZ_PER_MHZ, 1813 + .fc0_src = FC_NUM(1, 1), 1814 + ) 1815 + ); 1816 + 1817 + static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK( 1818 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1819 + "clksrc_mipi0_dsi_byteclk", 1820 + (const struct clk_parent_data[]) { { .index = 0 } }, 1821 + &rp1_varsrc_ops, 1822 + 0 1823 + ), 1824 + CLK_DATA(rp1_clock_data, 1825 + .num_std_parents = 1, 1826 + .num_aux_parents = 0, 1827 + ) 1828 + ); 1829 + 1830 + static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK( 1831 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1832 + "clksrc_mipi1_dsi_byteclk", 1833 + (const struct clk_parent_data[]) { { .index = 0 } }, 1834 + &rp1_varsrc_ops, 1835 + 0 1836 + ), 1837 + CLK_DATA(rp1_clock_data, 1838 + .num_std_parents = 1, 1839 + .num_aux_parents = 0, 1840 + ) 1841 + ); 1842 + 1843 + static const struct clk_parent_data clk_mipi0_dpi_parents[] = { 1844 + { .hw = &pll_sys_desc.hw }, 1845 + { .hw = &pll_video_sec_desc.hw }, 1846 + { .hw = &pll_video_desc.hw }, 1847 + { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw }, 1848 + { .index = -1 }, 1849 + { .index = -1 }, 1850 + { .index = -1 }, 1851 + { .index = -1 }, 1852 + }; 1853 + 1854 + static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK( 1855 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1856 + "clk_mipi0_dpi", 1857 + clk_mipi0_dpi_parents, 1858 + &rp1_clk_ops, 1859 + CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */ 1860 + ), 1861 + CLK_DATA(rp1_clock_data, 1862 + .num_std_parents = 0, 1863 + .num_aux_parents = 8, 1864 + .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL, 1865 + .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT, 1866 + .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC, 1867 + .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL, 1868 + .div_int_max = DIV_INT_8BIT_MAX, 1869 + .max_freq = 200 * HZ_PER_MHZ, 1870 + .fc0_src = FC_NUM(2, 6), 1871 + ) 1872 + ); 1873 + 1874 + static const struct clk_parent_data clk_mipi1_dpi_parents[] = { 1875 + { .hw = &pll_sys_desc.hw }, 1876 + { .hw = &pll_video_sec_desc.hw }, 1877 + { .hw = &pll_video_desc.hw }, 1878 + { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw }, 1879 + { .index = -1 }, 1880 + { .index = -1 }, 1881 + { .index = -1 }, 1882 + { .index = -1 }, 1883 + }; 1884 + 1885 + static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK( 1886 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1887 + "clk_mipi1_dpi", 1888 + clk_mipi1_dpi_parents, 1889 + &rp1_clk_ops, 1890 + CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */ 1891 + ), 1892 + CLK_DATA(rp1_clock_data, 1893 + .num_std_parents = 0, 1894 + .num_aux_parents = 8, 1895 + .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL, 1896 + .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT, 1897 + .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC, 1898 + .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL, 1899 + .div_int_max = DIV_INT_8BIT_MAX, 1900 + .max_freq = 200 * HZ_PER_MHZ, 1901 + .fc0_src = FC_NUM(3, 6), 1902 + ) 1903 + ); 1904 + 1905 + static const struct clk_parent_data clk_gp2_parents[] = { 1906 + { .hw = &clk_sdio_alt_src_desc.hw }, 1907 + { .index = -1 }, 1908 + { .index = -1 }, 1909 + { .index = -1 }, 1910 + { .index = -1 }, 1911 + { .index = -1 }, 1912 + { .hw = &pll_sys_sec_desc.hw }, 1913 + { .index = -1 }, 1914 + { .hw = &pll_video_desc.hw }, 1915 + { .hw = &clk_audio_in_desc.hw }, 1916 + { .hw = &clk_dpi_desc.hw }, 1917 + { .hw = &clk_pwm0_desc.hw }, 1918 + { .hw = &clk_pwm1_desc.hw }, 1919 + { .hw = &clk_mipi0_dpi_desc.hw }, 1920 + { .hw = &clk_mipi1_cfg_desc.hw }, 1921 + { .hw = &clk_sys_desc.hw }, 1922 + }; 1923 + 1924 + static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK( 1925 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1926 + "clk_gp2", 1927 + clk_gp2_parents, 1928 + &rp1_clk_ops, 1929 + 0 1930 + ), 1931 + CLK_DATA(rp1_clock_data, 1932 + .num_std_parents = 0, 1933 + .num_aux_parents = 16, 1934 + .oe_mask = BIT(2), 1935 + .ctrl_reg = CLK_GP2_CTRL, 1936 + .div_int_reg = CLK_GP2_DIV_INT, 1937 + .div_frac_reg = CLK_GP2_DIV_FRAC, 1938 + .sel_reg = CLK_GP2_SEL, 1939 + .div_int_max = DIV_INT_16BIT_MAX, 1940 + .max_freq = 100 * HZ_PER_MHZ, 1941 + .fc0_src = FC_NUM(2, 1), 1942 + ) 1943 + ); 1944 + 1945 + static const struct clk_parent_data clk_gp3_parents[] = { 1946 + { .index = 0 }, 1947 + { .index = -1 }, 1948 + { .index = -1 }, 1949 + { .index = -1 }, 1950 + { .index = -1 }, 1951 + { .index = -1 }, 1952 + { .index = -1 }, 1953 + { .index = -1 }, 1954 + { .hw = &pll_video_pri_ph_desc.hw }, 1955 + { .hw = &clk_audio_out_desc.hw }, 1956 + { .index = -1 }, 1957 + { .index = -1 }, 1958 + { .hw = &clk_mipi1_dpi_desc.hw }, 1959 + { .index = -1 }, 1960 + { .index = -1 }, 1961 + { .index = -1 }, 1962 + }; 1963 + 1964 + static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK( 1965 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 1966 + "clk_gp3", 1967 + clk_gp3_parents, 1968 + &rp1_clk_ops, 1969 + 0 1970 + ), 1971 + CLK_DATA(rp1_clock_data, 1972 + .num_std_parents = 0, 1973 + .num_aux_parents = 16, 1974 + .oe_mask = BIT(3), 1975 + .ctrl_reg = CLK_GP3_CTRL, 1976 + .div_int_reg = CLK_GP3_DIV_INT, 1977 + .div_frac_reg = CLK_GP3_DIV_FRAC, 1978 + .sel_reg = CLK_GP3_SEL, 1979 + .div_int_max = DIV_INT_16BIT_MAX, 1980 + .max_freq = 100 * HZ_PER_MHZ, 1981 + .fc0_src = FC_NUM(3, 1), 1982 + ) 1983 + ); 1984 + 1985 + static const struct clk_parent_data clk_gp4_parents[] = { 1986 + { .index = 0 }, 1987 + { .index = -1 }, 1988 + { .index = -1 }, 1989 + { .index = -1 }, 1990 + { .index = -1 }, 1991 + { .index = -1 }, 1992 + { .index = -1 }, 1993 + { .hw = &pll_video_sec_desc.hw }, 1994 + { .index = -1 }, 1995 + { .index = -1 }, 1996 + { .index = -1 }, 1997 + { .hw = &clk_mipi0_cfg_desc.hw }, 1998 + { .hw = &clk_uart_desc.hw }, 1999 + { .index = -1 }, 2000 + { .index = -1 }, 2001 + { .hw = &clk_sys_desc.hw }, 2002 + }; 2003 + 2004 + static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK( 2005 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 2006 + "clk_gp4", 2007 + clk_gp4_parents, 2008 + &rp1_clk_ops, 2009 + 0 2010 + ), 2011 + CLK_DATA(rp1_clock_data, 2012 + .num_std_parents = 0, 2013 + .num_aux_parents = 16, 2014 + .oe_mask = BIT(4), 2015 + .ctrl_reg = CLK_GP4_CTRL, 2016 + .div_int_reg = CLK_GP4_DIV_INT, 2017 + .div_frac_reg = CLK_GP4_DIV_FRAC, 2018 + .sel_reg = CLK_GP4_SEL, 2019 + .div_int_max = DIV_INT_16BIT_MAX, 2020 + .max_freq = 100 * HZ_PER_MHZ, 2021 + .fc0_src = FC_NUM(4, 1), 2022 + ) 2023 + ); 2024 + 2025 + static const struct clk_parent_data clk_vec_parents[] = { 2026 + { .hw = &pll_sys_pri_ph_desc.hw }, 2027 + { .hw = &pll_video_sec_desc.hw }, 2028 + { .hw = &pll_video_desc.hw }, 2029 + { .index = -1 }, 2030 + { .index = -1 }, 2031 + { .index = -1 }, 2032 + { .index = -1 }, 2033 + { .index = -1 }, 2034 + }; 2035 + 2036 + static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK( 2037 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 2038 + "clk_vec", 2039 + clk_vec_parents, 2040 + &rp1_clk_ops, 2041 + CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */ 2042 + ), 2043 + CLK_DATA(rp1_clock_data, 2044 + .num_std_parents = 0, 2045 + .num_aux_parents = 8, 2046 + .ctrl_reg = VIDEO_CLK_VEC_CTRL, 2047 + .div_int_reg = VIDEO_CLK_VEC_DIV_INT, 2048 + .sel_reg = VIDEO_CLK_VEC_SEL, 2049 + .div_int_max = DIV_INT_8BIT_MAX, 2050 + .max_freq = 108 * HZ_PER_MHZ, 2051 + .fc0_src = FC_NUM(0, 6), 2052 + ) 2053 + ); 2054 + 2055 + static const struct clk_parent_data clk_gp5_parents[] = { 2056 + { .index = 0 }, 2057 + { .index = -1 }, 2058 + { .index = -1 }, 2059 + { .index = -1 }, 2060 + { .index = -1 }, 2061 + { .index = -1 }, 2062 + { .index = -1 }, 2063 + { .hw = &pll_video_sec_desc.hw }, 2064 + { .hw = &clk_eth_tsu_desc.hw }, 2065 + { .index = -1 }, 2066 + { .hw = &clk_vec_desc.hw }, 2067 + { .index = -1 }, 2068 + { .index = -1 }, 2069 + { .index = -1 }, 2070 + { .index = -1 }, 2071 + { .index = -1 }, 2072 + }; 2073 + 2074 + static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK( 2075 + .hw.init = CLK_HW_INIT_PARENTS_DATA( 2076 + "clk_gp5", 2077 + clk_gp5_parents, 2078 + &rp1_clk_ops, 2079 + 0 2080 + ), 2081 + CLK_DATA(rp1_clock_data, 2082 + .num_std_parents = 0, 2083 + .num_aux_parents = 16, 2084 + .oe_mask = BIT(5), 2085 + .ctrl_reg = CLK_GP5_CTRL, 2086 + .div_int_reg = CLK_GP5_DIV_INT, 2087 + .div_frac_reg = CLK_GP5_DIV_FRAC, 2088 + .sel_reg = CLK_GP5_SEL, 2089 + .div_int_max = DIV_INT_16BIT_MAX, 2090 + .max_freq = 100 * HZ_PER_MHZ, 2091 + .fc0_src = FC_NUM(5, 1), 2092 + ) 2093 + ); 2094 + 1526 2095 static struct rp1_clk_desc *const clk_desc_array[] = { 1527 2096 [RP1_PLL_SYS_CORE] = &pll_sys_core_desc, 1528 2097 [RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc, ··· 2283 1352 [RP1_CLK_SYS] = &clk_sys_desc, 2284 1353 [RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc, 2285 1354 [RP1_PLL_SYS_SEC] = &pll_sys_sec_desc, 1355 + [RP1_PLL_AUDIO] = &pll_audio_desc, 1356 + [RP1_PLL_VIDEO] = &pll_video_desc, 1357 + [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc, 1358 + [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc, 1359 + [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc, 1360 + [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc, 1361 + [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc, 1362 + [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc, 1363 + [RP1_CLK_DMA] = &clk_dma_desc, 1364 + [RP1_CLK_UART] = &clk_uart_desc, 1365 + [RP1_CLK_PWM0] = &clk_pwm0_desc, 1366 + [RP1_CLK_PWM1] = &clk_pwm1_desc, 1367 + [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc, 1368 + [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc, 1369 + [RP1_CLK_I2S] = &clk_i2s_desc, 1370 + [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc, 1371 + [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc, 1372 + [RP1_CLK_ADC] = &clk_adc_desc, 1373 + [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc, 1374 + [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc, 1375 + [RP1_CLK_GP0] = &clk_gp0_desc, 1376 + [RP1_CLK_GP1] = &clk_gp1_desc, 1377 + [RP1_CLK_GP2] = &clk_gp2_desc, 1378 + [RP1_CLK_GP3] = &clk_gp3_desc, 1379 + [RP1_CLK_GP4] = &clk_gp4_desc, 1380 + [RP1_CLK_GP5] = &clk_gp5_desc, 1381 + [RP1_CLK_VEC] = &clk_vec_desc, 1382 + [RP1_CLK_DPI] = &clk_dpi_desc, 1383 + [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc, 1384 + [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc, 1385 + [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc, 1386 + [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc, 2286 1387 }; 2287 1388 2288 1389 static const struct regmap_range rp1_reg_ranges[] = { ··· 2428 1465 if (desc && desc->clk_register && desc->data) 2429 1466 hws[i] = desc->clk_register(clockman, desc); 2430 1467 } 1468 + 1469 + clk_audio_core = &pll_audio_core_desc; 1470 + clk_audio = &pll_audio_desc; 1471 + clk_i2s = &clk_i2s_desc; 1472 + clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0); 2431 1473 2432 1474 platform_set_drvdata(pdev, clockman); 2433 1475
+1 -1
drivers/crypto/Kconfig
··· 439 439 440 440 config CRYPTO_DEV_ATMEL_AES 441 441 tristate "Support for Atmel AES hw accelerator" 442 - depends on ARCH_AT91 || COMPILE_TEST 442 + depends on ARCH_MICROCHIP || COMPILE_TEST 443 443 select CRYPTO_AES 444 444 select CRYPTO_AEAD 445 445 select CRYPTO_SKCIPHER
+4
drivers/dma-buf/dma-heap.c
··· 11 11 #include <linux/dma-buf.h> 12 12 #include <linux/dma-heap.h> 13 13 #include <linux/err.h> 14 + #include <linux/export.h> 14 15 #include <linux/list.h> 15 16 #include <linux/nospec.h> 16 17 #include <linux/syscalls.h> ··· 203 202 { 204 203 return heap->priv; 205 204 } 205 + EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP"); 206 206 207 207 /** 208 208 * dma_heap_get_name - get heap name ··· 216 214 { 217 215 return heap->name; 218 216 } 217 + EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP"); 219 218 220 219 /** 221 220 * dma_heap_add - adds a heap to dmabuf heaps ··· 306 303 kfree(heap); 307 304 return err_ret; 308 305 } 306 + EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP"); 309 307 310 308 static char *dma_heap_devnode(const struct device *dev, umode_t *mode) 311 309 {
+6 -7
drivers/firmware/arm_scmi/bus.c
··· 401 401 402 402 static void __scmi_device_destroy(struct scmi_device *scmi_dev) 403 403 { 404 - pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n", 405 - of_node_full_name(scmi_dev->dev.parent->of_node), 404 + pr_debug("(%pOF) Destroying SCMI device '%s' for protocol 0x%x (%s)\n", 405 + scmi_dev->dev.parent->of_node, 406 406 dev_name(&scmi_dev->dev), scmi_dev->protocol_id, 407 407 scmi_dev->name); 408 408 ··· 474 474 if (retval) 475 475 goto put_dev; 476 476 477 - pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n", 478 - of_node_full_name(parent->of_node), 479 - dev_name(&scmi_dev->dev), protocol, name); 477 + pr_debug("(%pOF) Created SCMI device '%s' for protocol 0x%x (%s)\n", 478 + parent->of_node, dev_name(&scmi_dev->dev), protocol, name); 480 479 481 480 return scmi_dev; 482 481 put_dev: ··· 492 493 493 494 sdev = __scmi_device_create(np, parent, protocol, name); 494 495 if (!sdev) 495 - pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n", 496 - of_node_full_name(parent->of_node), protocol, name); 496 + pr_err("(%pOF) Failed to create device for protocol 0x%x (%s)\n", 497 + parent->of_node, protocol, name); 497 498 498 499 return sdev; 499 500 }
+10 -5
drivers/firmware/arm_scmi/quirks.c
··· 71 71 */ 72 72 73 73 #include <linux/ctype.h> 74 + #include <linux/cleanup.h> 74 75 #include <linux/device.h> 75 76 #include <linux/export.h> 76 77 #include <linux/hashtable.h> ··· 90 89 struct scmi_quirk { 91 90 bool enabled; 92 91 const char *name; 93 - char *vendor; 94 - char *sub_vendor_id; 95 - char *impl_ver_range; 92 + const char *vendor; 93 + const char *sub_vendor_id; 94 + const char *impl_ver_range; 96 95 u32 start_range; 97 96 u32 end_range; 98 97 struct static_key_false *key; ··· 218 217 219 218 static int scmi_quirk_range_parse(struct scmi_quirk *quirk) 220 219 { 221 - const char *last, *first = quirk->impl_ver_range; 220 + const char *last, *first __free(kfree) = NULL; 222 221 size_t len; 223 222 char *sep; 224 223 int ret; ··· 229 228 if (!len) 230 229 return 0; 231 230 231 + first = kmemdup(quirk->impl_ver_range, len + 1, GFP_KERNEL); 232 + if (!first) 233 + return -ENOMEM; 234 + 232 235 last = first + len - 1; 233 - sep = strchr(quirk->impl_ver_range, '-'); 236 + sep = strchr(first, '-'); 234 237 if (sep) 235 238 *sep = '\0'; 236 239
+3 -4
drivers/firmware/arm_scmi/transports/mailbox.c
··· 127 127 (num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) || 128 128 (num_mb == 4 && num_sh != 2)) { 129 129 dev_warn(cdev, 130 - "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n", 131 - of_node_full_name(np), num_mb, num_sh); 130 + "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n", 131 + np, num_mb, num_sh); 132 132 return -EINVAL; 133 133 } 134 134 ··· 140 140 of_parse_phandle(np, "shmem", 1); 141 141 142 142 if (!np_tx || !np_rx || np_tx == np_rx) { 143 - dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", 144 - of_node_full_name(np)); 143 + dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np); 145 144 ret = -EINVAL; 146 145 } 147 146 }
+1 -1
drivers/firmware/arm_scmi/transports/optee.c
··· 498 498 mutex_unlock(&channel->mu); 499 499 } 500 500 501 - static struct scmi_transport_ops scmi_optee_ops = { 501 + static const struct scmi_transport_ops scmi_optee_ops = { 502 502 .chan_available = scmi_optee_chan_available, 503 503 .chan_setup = scmi_optee_chan_setup, 504 504 .chan_free = scmi_optee_chan_free,
+3
drivers/firmware/arm_scmi/transports/virtio.c
··· 871 871 /* Ensure initialized scmi_vdev is visible */ 872 872 smp_store_mb(scmi_vdev, vdev); 873 873 874 + /* Set device ready */ 875 + virtio_device_ready(vdev); 876 + 874 877 ret = platform_driver_register(&scmi_virtio_driver); 875 878 if (ret) { 876 879 vdev->priv = NULL;
+111
drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
··· 25 25 enum scmi_imx_misc_protocol_cmd { 26 26 SCMI_IMX_MISC_CTRL_SET = 0x3, 27 27 SCMI_IMX_MISC_CTRL_GET = 0x4, 28 + SCMI_IMX_MISC_DISCOVER_BUILD_INFO = 0x6, 28 29 SCMI_IMX_MISC_CTRL_NOTIFY = 0x8, 30 + SCMI_IMX_MISC_CFG_INFO_GET = 0xC, 31 + SCMI_IMX_MISC_BOARD_INFO = 0xE, 29 32 }; 30 33 31 34 struct scmi_imx_misc_info { ··· 66 63 struct scmi_imx_misc_ctrl_get_out { 67 64 __le32 num; 68 65 __le32 val[]; 66 + }; 67 + 68 + struct scmi_imx_misc_buildinfo_out { 69 + __le32 buildnum; 70 + __le32 buildcommit; 71 + #define MISC_MAX_BUILDDATE 16 72 + u8 builddate[MISC_MAX_BUILDDATE]; 73 + #define MISC_MAX_BUILDTIME 16 74 + u8 buildtime[MISC_MAX_BUILDTIME]; 75 + }; 76 + 77 + struct scmi_imx_misc_board_info_out { 78 + __le32 attributes; 79 + #define MISC_MAX_BRDNAME 16 80 + u8 brdname[MISC_MAX_BRDNAME]; 81 + }; 82 + 83 + struct scmi_imx_misc_cfg_info_out { 84 + __le32 msel; 85 + #define MISC_MAX_CFGNAME 16 86 + u8 cfgname[MISC_MAX_CFGNAME]; 69 87 }; 70 88 71 89 static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph, ··· 296 272 return ret; 297 273 } 298 274 275 + static int scmi_imx_misc_build_info_discover(const struct scmi_protocol_handle *ph) 276 + { 277 + char date[MISC_MAX_BUILDDATE], time[MISC_MAX_BUILDTIME]; 278 + struct scmi_imx_misc_buildinfo_out *out; 279 + struct scmi_xfer *t; 280 + int ret; 281 + 282 + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_DISCOVER_BUILD_INFO, 0, 283 + sizeof(*out), &t); 284 + if (ret) 285 + return ret; 286 + 287 + ret = ph->xops->do_xfer(ph, t); 288 + if (!ret) { 289 + out = t->rx.buf; 290 + strscpy(date, out->builddate, MISC_MAX_BUILDDATE); 291 + strscpy(time, out->buildtime, MISC_MAX_BUILDTIME); 292 + dev_info(ph->dev, "SM Version\t= Build %u, Commit %08x %s %s\n", 293 + le32_to_cpu(out->buildnum), le32_to_cpu(out->buildcommit), 294 + date, time); 295 + } 296 + 297 + ph->xops->xfer_put(ph, t); 298 + 299 + return ret; 300 + } 301 + 302 + static int scmi_imx_misc_board_info(const struct scmi_protocol_handle *ph) 303 + { 304 + struct scmi_imx_misc_board_info_out *out; 305 + char name[MISC_MAX_BRDNAME]; 306 + struct scmi_xfer *t; 307 + int ret; 308 + 309 + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_BOARD_INFO, 0, sizeof(*out), &t); 310 + if (ret) 311 + return ret; 312 + 313 + ret = ph->xops->do_xfer(ph, t); 314 + if (!ret) { 315 + out = t->rx.buf; 316 + strscpy(name, out->brdname, MISC_MAX_BRDNAME); 317 + dev_info(ph->dev, "Board\t\t= %s, attr=0x%08x\n", 318 + name, le32_to_cpu(out->attributes)); 319 + } 320 + 321 + ph->xops->xfer_put(ph, t); 322 + 323 + return ret; 324 + } 325 + 326 + static int scmi_imx_misc_cfg_info_get(const struct scmi_protocol_handle *ph) 327 + { 328 + struct scmi_imx_misc_cfg_info_out *out; 329 + char name[MISC_MAX_CFGNAME]; 330 + struct scmi_xfer *t; 331 + int ret; 332 + 333 + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CFG_INFO_GET, 0, sizeof(*out), &t); 334 + if (ret) 335 + return ret; 336 + 337 + ret = ph->xops->do_xfer(ph, t); 338 + if (!ret) { 339 + out = t->rx.buf; 340 + strscpy(name, out->cfgname, MISC_MAX_CFGNAME); 341 + dev_info(ph->dev, "SM Config\t= %s, mSel = %u\n", 342 + name, le32_to_cpu(out->msel)); 343 + } 344 + 345 + ph->xops->xfer_put(ph, t); 346 + 347 + return ret; 348 + } 349 + 299 350 static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = { 300 351 .misc_ctrl_set = scmi_imx_misc_ctrl_set, 301 352 .misc_ctrl_get = scmi_imx_misc_ctrl_get, ··· 396 297 397 298 ret = scmi_imx_misc_attributes_get(ph, minfo); 398 299 if (ret) 300 + return ret; 301 + 302 + ret = scmi_imx_misc_build_info_discover(ph); 303 + if (ret && ret != -EOPNOTSUPP) 304 + return ret; 305 + 306 + ret = scmi_imx_misc_board_info(ph); 307 + if (ret && ret != -EOPNOTSUPP) 308 + return ret; 309 + 310 + ret = scmi_imx_misc_cfg_info_get(ph); 311 + if (ret && ret != -EOPNOTSUPP) 399 312 return ret; 400 313 401 314 return ph->set_priv(ph, minfo, version);
+25
drivers/firmware/arm_scmi/vendors/imx/imx95.rst
··· 1660 1660 |Name |Description | 1661 1661 +--------------------+---------------------------------------------------------+ 1662 1662 |int32 status |SUCCESS: system log return | 1663 + | |NOT_SUPPORTED: system log not available | 1663 1664 +--------------------+---------------------------------------------------------+ 1664 1665 |uint32 numLogflags |Descriptor for the log data returned by this call. | 1665 1666 | |Bits[31:20] Number of remaining log words. | ··· 1669 1668 | |call | 1670 1669 +--------------------+---------------------------------------------------------+ 1671 1670 |uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags| 1671 + +--------------------+---------------------------------------------------------+ 1672 + 1673 + MISC_BOARD_INFO 1674 + ~~~~~~~~~~~~~~~ 1675 + 1676 + message_id: 0xE 1677 + protocol_id: 0x84 1678 + 1679 + +--------------------+---------------------------------------------------------+ 1680 + |Return values | 1681 + +--------------------+---------------------------------------------------------+ 1682 + |Name |Description | 1683 + +--------------------+---------------------------------------------------------+ 1684 + |int32 status |SUCCESS: config name return | 1685 + | |NOT_SUPPORTED: name not available | 1686 + +--------------------+---------------------------------------------------------+ 1687 + |uint32 attributes |Board-specific attributes reserved for future expansion | 1688 + | |without breaking backwards compatibility. The firmware | 1689 + | |sets the value to 0 | 1690 + +--------------------+---------------------------------------------------------+ 1691 + |uint8 boardname[16] |Board name. NULL terminated ASCII string, up to 16 bytes | 1692 + | |in length. This is System Manager(SM) firmware-exported | 1693 + | |board-name and may not align with the board name in the | 1694 + | |device tree. | 1672 1695 +--------------------+---------------------------------------------------------+ 1673 1696 1674 1697 NEGOTIATE_PROTOCOL_VERSION
+1 -1
drivers/firmware/arm_scmi/voltage.c
··· 393 393 return vinfo->num_domains; 394 394 } 395 395 396 - static struct scmi_voltage_proto_ops voltage_proto_ops = { 396 + static const struct scmi_voltage_proto_ops voltage_proto_ops = { 397 397 .num_domains_get = scmi_voltage_domains_num_get, 398 398 .info_get = scmi_voltage_info_get, 399 399 .config_set = scmi_voltage_config_set,
+1 -1
drivers/firmware/broadcom/bcm47xx_sprom.c
··· 404 404 ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb); 405 405 ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb); 406 406 } 407 - #undef ENTRY /* It's specififc, uses local variable, don't use it (again). */ 407 + #undef ENTRY /* It's specific, uses local variable, don't use it (again). */ 408 408 409 409 static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom, 410 410 const char *prefix, bool fallback)
+1 -1
drivers/firmware/meson/Kconfig
··· 5 5 config MESON_SM 6 6 tristate "Amlogic Secure Monitor driver" 7 7 depends on ARCH_MESON || COMPILE_TEST 8 - default y 8 + default ARCH_MESON 9 9 depends on ARM64_4K_PAGES 10 10 help 11 11 Say y here to enable the Amlogic secure monitor driver
+6 -1
drivers/firmware/meson/meson_sm.c
··· 232 232 struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node) 233 233 { 234 234 struct platform_device *pdev = of_find_device_by_node(sm_node); 235 + struct meson_sm_firmware *fw; 235 236 236 237 if (!pdev) 237 238 return NULL; 238 239 239 - return platform_get_drvdata(pdev); 240 + fw = platform_get_drvdata(pdev); 241 + 242 + put_device(&pdev->dev); 243 + 244 + return fw; 240 245 } 241 246 EXPORT_SYMBOL_GPL(meson_sm_get); 242 247
+124 -1
drivers/firmware/qcom/qcom_scm.c
··· 1119 1119 if (ret) { 1120 1120 dev_err(__scm->dev, 1121 1121 "Assign memory protection call failed %d\n", ret); 1122 - return -EINVAL; 1122 + return ret; 1123 1123 } 1124 1124 1125 1125 *srcvm = next_vm; ··· 1994 1994 { .compatible = "asus,vivobook-s15" }, 1995 1995 { .compatible = "asus,zenbook-a14-ux3407qa" }, 1996 1996 { .compatible = "asus,zenbook-a14-ux3407ra" }, 1997 + { .compatible = "dell,inspiron-14-plus-7441" }, 1998 + { .compatible = "dell,latitude-7455" }, 1997 1999 { .compatible = "dell,xps13-9345" }, 1998 2000 { .compatible = "hp,elitebook-ultra-g1q" }, 1999 2001 { .compatible = "hp,omnibook-x14" }, 2000 2002 { .compatible = "huawei,gaokun3" }, 2001 2003 { .compatible = "lenovo,flex-5g" }, 2004 + { .compatible = "lenovo,thinkbook-16" }, 2002 2005 { .compatible = "lenovo,thinkpad-t14s" }, 2003 2006 { .compatible = "lenovo,thinkpad-x13s", }, 2004 2007 { .compatible = "lenovo,yoga-slim7x" }, ··· 2009 2006 { .compatible = "microsoft,blackrock" }, 2010 2007 { .compatible = "microsoft,romulus13", }, 2011 2008 { .compatible = "microsoft,romulus15", }, 2009 + { .compatible = "qcom,hamoa-iot-evk" }, 2012 2010 { .compatible = "qcom,sc8180x-primus" }, 2013 2011 { .compatible = "qcom,x1e001de-devkit" }, 2014 2012 { .compatible = "qcom,x1e80100-crd" }, ··· 2096 2092 } 2097 2093 2098 2094 #endif /* CONFIG_QCOM_QSEECOM */ 2095 + 2096 + /** 2097 + * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object. 2098 + * @inbuf: start address of memory area used for inbound buffer. 2099 + * @inbuf_size: size of the memory area used for inbound buffer. 2100 + * @outbuf: start address of memory area used for outbound buffer. 2101 + * @outbuf_size: size of the memory area used for outbound buffer. 2102 + * @result: result of QTEE object invocation. 2103 + * @response_type: response type returned by QTEE. 2104 + * 2105 + * @response_type determines how the contents of @inbuf and @outbuf 2106 + * should be processed. 2107 + * 2108 + * Return: On success, return 0 or <0 on failure. 2109 + */ 2110 + int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 2111 + phys_addr_t outbuf, size_t outbuf_size, 2112 + u64 *result, u64 *response_type) 2113 + { 2114 + struct qcom_scm_desc desc = { 2115 + .svc = QCOM_SCM_SVC_SMCINVOKE, 2116 + .cmd = QCOM_SCM_SMCINVOKE_INVOKE, 2117 + .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2118 + .args[0] = inbuf, 2119 + .args[1] = inbuf_size, 2120 + .args[2] = outbuf, 2121 + .args[3] = outbuf_size, 2122 + .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL, 2123 + QCOM_SCM_RW, QCOM_SCM_VAL), 2124 + }; 2125 + struct qcom_scm_res res; 2126 + int ret; 2127 + 2128 + ret = qcom_scm_call(__scm->dev, &desc, &res); 2129 + if (ret) 2130 + return ret; 2131 + 2132 + if (response_type) 2133 + *response_type = res.result[0]; 2134 + 2135 + if (result) 2136 + *result = res.result[1]; 2137 + 2138 + return 0; 2139 + } 2140 + EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc); 2141 + 2142 + /** 2143 + * qcom_scm_qtee_callback_response() - Submit response for callback request. 2144 + * @buf: start address of memory area used for outbound buffer. 2145 + * @buf_size: size of the memory area used for outbound buffer. 2146 + * @result: Result of QTEE object invocation. 2147 + * @response_type: Response type returned by QTEE. 2148 + * 2149 + * @response_type determines how the contents of @buf should be processed. 2150 + * 2151 + * Return: On success, return 0 or <0 on failure. 2152 + */ 2153 + int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 2154 + u64 *result, u64 *response_type) 2155 + { 2156 + struct qcom_scm_desc desc = { 2157 + .svc = QCOM_SCM_SVC_SMCINVOKE, 2158 + .cmd = QCOM_SCM_SMCINVOKE_CB_RSP, 2159 + .owner = ARM_SMCCC_OWNER_TRUSTED_OS, 2160 + .args[0] = buf, 2161 + .args[1] = buf_size, 2162 + .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL), 2163 + }; 2164 + struct qcom_scm_res res; 2165 + int ret; 2166 + 2167 + ret = qcom_scm_call(__scm->dev, &desc, &res); 2168 + if (ret) 2169 + return ret; 2170 + 2171 + if (response_type) 2172 + *response_type = res.result[0]; 2173 + 2174 + if (result) 2175 + *result = res.result[1]; 2176 + 2177 + return 0; 2178 + } 2179 + EXPORT_SYMBOL(qcom_scm_qtee_callback_response); 2180 + 2181 + static void qcom_scm_qtee_free(void *data) 2182 + { 2183 + struct platform_device *qtee_dev = data; 2184 + 2185 + platform_device_unregister(qtee_dev); 2186 + } 2187 + 2188 + static void qcom_scm_qtee_init(struct qcom_scm *scm) 2189 + { 2190 + struct platform_device *qtee_dev; 2191 + u64 result, response_type; 2192 + int ret; 2193 + 2194 + /* 2195 + * Probe for smcinvoke support. This will fail due to invalid buffers, 2196 + * but first, it checks whether the call is supported in QTEE syscall 2197 + * handler. If it is not supported, -EIO is returned. 2198 + */ 2199 + ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type); 2200 + if (ret == -EIO) 2201 + return; 2202 + 2203 + /* Setup QTEE interface device. */ 2204 + qtee_dev = platform_device_register_data(scm->dev, "qcomtee", 2205 + PLATFORM_DEVID_NONE, NULL, 0); 2206 + if (IS_ERR(qtee_dev)) 2207 + return; 2208 + 2209 + devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev); 2210 + } 2099 2211 2100 2212 /** 2101 2213 * qcom_scm_is_available() - Checks if SCM is available ··· 2444 2324 */ 2445 2325 ret = qcom_scm_qseecom_init(scm); 2446 2326 WARN(ret < 0, "failed to initialize qseecom: %d\n", ret); 2327 + 2328 + /* Initialize the QTEE object interface. */ 2329 + qcom_scm_qtee_init(scm); 2447 2330 2448 2331 return 0; 2449 2332 }
+7
drivers/firmware/qcom/qcom_scm.h
··· 156 156 #define QCOM_SCM_SVC_GPU 0x28 157 157 #define QCOM_SCM_SVC_GPU_INIT_REGS 0x01 158 158 159 + /* ARM_SMCCC_OWNER_TRUSTED_OS calls */ 160 + 161 + #define QCOM_SCM_SVC_SMCINVOKE 0x06 162 + #define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00 163 + #define QCOM_SCM_SMCINVOKE_CB_RSP 0x01 164 + #define QCOM_SCM_SMCINVOKE_INVOKE 0x02 165 + 159 166 /* common error codes */ 160 167 #define QCOM_SCM_V2_EBUSY -12 161 168 #define QCOM_SCM_ENOMEM -5
+53 -11
drivers/firmware/qcom/qcom_tzmem.c
··· 77 77 78 78 /* List of machines that are known to not support SHM bridge correctly. */ 79 79 static const char *const qcom_tzmem_blacklist[] = { 80 + "qcom,sc7180", /* hang in rmtfs memory assignment */ 80 81 "qcom,sc8180x", 81 82 "qcom,sdm670", /* failure in GPU firmware loading */ 82 83 "qcom,sdm845", /* reset in rmtfs memory assignment */ ··· 110 109 return 0; 111 110 } 112 111 113 - static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) 112 + /** 113 + * qcom_tzmem_shm_bridge_create() - Create a SHM bridge. 114 + * @paddr: Physical address of the memory to share. 115 + * @size: Size of the memory to share. 116 + * @handle: Handle to the SHM bridge. 117 + * 118 + * On platforms that support SHM bridge, this function creates a SHM bridge 119 + * for the given memory region with QTEE. The handle returned by this function 120 + * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge. 121 + * 122 + * Return: On success, returns 0; on failure, returns < 0. 123 + */ 124 + int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle) 114 125 { 115 126 u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags; 116 127 int ret; ··· 130 117 if (!qcom_tzmem_using_shm_bridge) 131 118 return 0; 132 119 133 - pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; 134 - ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW; 135 - size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT); 120 + pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW; 121 + ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW; 122 + size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT); 123 + 124 + ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm, 125 + size_and_flags, QCOM_SCM_VMID_HLOS, 126 + handle); 127 + if (ret) { 128 + dev_err(qcom_tzmem_dev, 129 + "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n", 130 + ret, &paddr, size); 131 + 132 + return ret; 133 + } 134 + 135 + return 0; 136 + } 137 + EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create); 138 + 139 + /** 140 + * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge. 141 + * @handle: Handle to the SHM bridge. 142 + * 143 + * On platforms that support SHM bridge, this function deletes the SHM bridge 144 + * for the given memory region. The handle must be the same as the one 145 + * returned by qcom_tzmem_shm_bridge_create(). 146 + */ 147 + void qcom_tzmem_shm_bridge_delete(u64 handle) 148 + { 149 + if (qcom_tzmem_using_shm_bridge) 150 + qcom_scm_shm_bridge_delete(handle); 151 + } 152 + EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete); 153 + 154 + static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) 155 + { 156 + int ret; 136 157 137 158 u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL); 138 159 if (!handle) 139 160 return -ENOMEM; 140 161 141 - ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm, 142 - size_and_flags, QCOM_SCM_VMID_HLOS, 143 - handle); 162 + ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle); 144 163 if (ret) 145 164 return ret; 146 165 ··· 185 140 { 186 141 u64 *handle = area->priv; 187 142 188 - if (!qcom_tzmem_using_shm_bridge) 189 - return; 190 - 191 - qcom_scm_shm_bridge_delete(*handle); 143 + qcom_tzmem_shm_bridge_delete(*handle); 192 144 kfree(handle); 193 145 } 194 146
+20 -5
drivers/firmware/samsung/exynos-acpm-pmic.c
··· 4 4 * Copyright 2020 Google LLC. 5 5 * Copyright 2024 Linaro Ltd. 6 6 */ 7 + #include <linux/array_size.h> 7 8 #include <linux/bitfield.h> 9 + #include <linux/errno.h> 8 10 #include <linux/firmware/samsung/exynos-acpm-protocol.h> 9 11 #include <linux/ktime.h> 10 12 #include <linux/types.h> ··· 34 32 ACPM_PMIC_BULK_READ, 35 33 ACPM_PMIC_BULK_WRITE, 36 34 }; 35 + 36 + static const int acpm_pmic_linux_errmap[] = { 37 + [0] = 0, /* ACPM_PMIC_SUCCESS */ 38 + [1] = -EACCES, /* Read register can't be accessed or issues to access it. */ 39 + [2] = -EACCES, /* Write register can't be accessed or issues to access it. */ 40 + }; 41 + 42 + static int acpm_pmic_to_linux_err(int err) 43 + { 44 + if (err >= 0 && err < ARRAY_SIZE(acpm_pmic_linux_errmap)) 45 + return acpm_pmic_linux_errmap[err]; 46 + return -EIO; 47 + } 37 48 38 49 static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i) 39 50 { ··· 94 79 95 80 *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]); 96 81 97 - return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); 82 + return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1])); 98 83 } 99 84 100 85 static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, ··· 125 110 if (ret) 126 111 return ret; 127 112 128 - ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); 113 + ret = acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1])); 129 114 if (ret) 130 115 return ret; 131 116 ··· 165 150 if (ret) 166 151 return ret; 167 152 168 - return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); 153 + return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1])); 169 154 } 170 155 171 156 static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, ··· 205 190 if (ret) 206 191 return ret; 207 192 208 - return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); 193 + return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1])); 209 194 } 210 195 211 196 static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan, ··· 235 220 if (ret) 236 221 return ret; 237 222 238 - return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]); 223 + return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1])); 239 224 }
+55 -2
drivers/firmware/ti_sci.c
··· 2015 2015 return ret; 2016 2016 } 2017 2017 2018 + /** 2019 + * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter 2020 + * @dev: Device pointer corresponding to the SCI entity 2021 + * 2022 + * Return: 0 if all went well, else returns appropriate error value. 2023 + */ 2024 + static int ti_sci_cmd_lpm_abort(struct device *dev) 2025 + { 2026 + struct ti_sci_info *info = dev_get_drvdata(dev); 2027 + struct ti_sci_msg_hdr *req; 2028 + struct ti_sci_msg_hdr *resp; 2029 + struct ti_sci_xfer *xfer; 2030 + int ret = 0; 2031 + 2032 + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT, 2033 + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, 2034 + sizeof(*req), sizeof(*resp)); 2035 + if (IS_ERR(xfer)) { 2036 + ret = PTR_ERR(xfer); 2037 + dev_err(dev, "Message alloc failed(%d)\n", ret); 2038 + return ret; 2039 + } 2040 + req = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2041 + 2042 + ret = ti_sci_do_xfer(info, xfer); 2043 + if (ret) { 2044 + dev_err(dev, "Mbox send fail %d\n", ret); 2045 + goto fail; 2046 + } 2047 + 2048 + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; 2049 + 2050 + if (!ti_sci_is_response_ack(resp)) 2051 + ret = -ENODEV; 2052 + 2053 + fail: 2054 + ti_sci_put_one_xfer(&info->minfo, xfer); 2055 + 2056 + return ret; 2057 + } 2058 + 2018 2059 static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) 2019 2060 { 2020 2061 struct ti_sci_info *info; ··· 3780 3739 return 0; 3781 3740 } 3782 3741 3742 + static void __maybe_unused ti_sci_pm_complete(struct device *dev) 3743 + { 3744 + struct ti_sci_info *info = dev_get_drvdata(dev); 3745 + 3746 + if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) { 3747 + if (ti_sci_cmd_lpm_abort(dev)) 3748 + dev_err(dev, "LPM clear selection failed.\n"); 3749 + } 3750 + } 3751 + 3783 3752 static const struct dev_pm_ops ti_sci_pm_ops = { 3784 3753 #ifdef CONFIG_PM_SLEEP 3785 3754 .suspend = ti_sci_suspend, 3786 3755 .suspend_noirq = ti_sci_suspend_noirq, 3787 3756 .resume_noirq = ti_sci_resume_noirq, 3757 + .complete = ti_sci_pm_complete, 3788 3758 #endif 3789 3759 }; 3790 3760 ··· 3928 3876 } 3929 3877 3930 3878 ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps); 3931 - dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n", 3879 + dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n", 3932 3880 info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "", 3933 3881 info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "", 3934 - info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "" 3882 + info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "", 3883 + info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "" 3935 3884 ); 3936 3885 3937 3886 ti_sci_setup_ops(info);
+3
drivers/firmware/ti_sci.h
··· 42 42 #define TI_SCI_MSG_SET_IO_ISOLATION 0x0307 43 43 #define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309 44 44 #define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A 45 + #define TI_SCI_MSG_LPM_ABORT 0x0311 45 46 46 47 /* Resource Management Requests */ 47 48 #define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500 ··· 148 147 * MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported) 149 148 * MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM 150 149 * MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM 150 + * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM 151 151 * 152 152 * Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS 153 153 * providing currently available SOC/firmware capabilities. SoC that don't ··· 159 157 #define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0) 160 158 #define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4) 161 159 #define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5) 160 + #define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9) 162 161 #define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1) 163 162 u64 fw_caps; 164 163 } __packed;
+1 -1
drivers/i2c/busses/Kconfig
··· 415 415 416 416 config I2C_AT91 417 417 tristate "Atmel AT91 I2C Two-Wire interface (TWI)" 418 - depends on ARCH_AT91 || COMPILE_TEST 418 + depends on ARCH_MICROCHIP || COMPILE_TEST 419 419 help 420 420 This supports the use of the I2C interface on Atmel AT91 421 421 processors.
+7 -1
drivers/i2c/busses/i2c-qcom-geni.c
··· 870 870 goto err_clk; 871 871 } 872 872 proto = geni_se_read_proto(&gi2c->se); 873 - if (proto != GENI_SE_I2C) { 873 + if (proto == GENI_SE_INVALID_PROTO) { 874 + ret = geni_load_se_firmware(&gi2c->se, GENI_SE_I2C); 875 + if (ret) { 876 + dev_err_probe(dev, ret, "i2c firmware load failed ret: %d\n", ret); 877 + goto err_resources; 878 + } 879 + } else if (proto != GENI_SE_I2C) { 874 880 ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto); 875 881 goto err_resources; 876 882 }
+2 -2
drivers/media/platform/qcom/venus/firmware.c
··· 136 136 ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, 137 137 mem_va, *mem_phys, *mem_size, NULL); 138 138 else 139 - ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID, 140 - mem_va, *mem_phys, *mem_size, NULL); 139 + ret = qcom_mdt_load_no_init(dev, mdt, fwname, mem_va, 140 + *mem_phys, *mem_size, NULL); 141 141 142 142 memunmap(mem_va); 143 143 err_release_fw:
+4 -6
drivers/memory/samsung/exynos-srom.c
··· 121 121 return -ENOMEM; 122 122 123 123 srom->dev = dev; 124 - srom->reg_base = of_iomap(np, 0); 125 - if (!srom->reg_base) { 124 + srom->reg_base = devm_platform_ioremap_resource(pdev, 0); 125 + if (IS_ERR(srom->reg_base)) { 126 126 dev_err(&pdev->dev, "iomap of exynos srom controller failed\n"); 127 - return -ENOMEM; 127 + return PTR_ERR(srom->reg_base); 128 128 } 129 129 130 130 platform_set_drvdata(pdev, srom); 131 131 132 132 srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets, 133 133 ARRAY_SIZE(exynos_srom_offsets)); 134 - if (!srom->reg_offset) { 135 - iounmap(srom->reg_base); 134 + if (!srom->reg_offset) 136 135 return -ENOMEM; 137 - } 138 136 139 137 for_each_child_of_node(np, child) { 140 138 if (exynos_srom_configure_bank(srom, child)) {
+1 -1
drivers/memory/stm32_omm.c
··· 238 238 if (mux & CR_MUXEN) { 239 239 ret = of_property_read_u32(dev->of_node, "st,omm-req2ack-ns", 240 240 &req2ack); 241 - if (!ret && !req2ack) { 241 + if (!ret && req2ack) { 242 242 req2ack = DIV_ROUND_UP(req2ack, NSEC_PER_SEC / clk_rate_max) - 1; 243 243 244 244 if (req2ack > 256)
+73 -73
drivers/memory/tegra/tegra210.c
··· 9 9 10 10 static const struct tegra_mc_client tegra210_mc_clients[] = { 11 11 { 12 - .id = 0x00, 12 + .id = TEGRA210_MC_PTCR, 13 13 .name = "ptcr", 14 14 .swgroup = TEGRA_SWGROUP_PTC, 15 15 }, { 16 - .id = 0x01, 16 + .id = TEGRA210_MC_DISPLAY0A, 17 17 .name = "display0a", 18 18 .swgroup = TEGRA_SWGROUP_DC, 19 19 .regs = { ··· 29 29 }, 30 30 }, 31 31 }, { 32 - .id = 0x02, 32 + .id = TEGRA210_MC_DISPLAY0AB, 33 33 .name = "display0ab", 34 34 .swgroup = TEGRA_SWGROUP_DCB, 35 35 .regs = { ··· 45 45 }, 46 46 }, 47 47 }, { 48 - .id = 0x03, 48 + .id = TEGRA210_MC_DISPLAY0B, 49 49 .name = "display0b", 50 50 .swgroup = TEGRA_SWGROUP_DC, 51 51 .regs = { ··· 61 61 }, 62 62 }, 63 63 }, { 64 - .id = 0x04, 64 + .id = TEGRA210_MC_DISPLAY0BB, 65 65 .name = "display0bb", 66 66 .swgroup = TEGRA_SWGROUP_DCB, 67 67 .regs = { ··· 77 77 }, 78 78 }, 79 79 }, { 80 - .id = 0x05, 80 + .id = TEGRA210_MC_DISPLAY0C, 81 81 .name = "display0c", 82 82 .swgroup = TEGRA_SWGROUP_DC, 83 83 .regs = { ··· 93 93 }, 94 94 }, 95 95 }, { 96 - .id = 0x06, 96 + .id = TEGRA210_MC_DISPLAY0CB, 97 97 .name = "display0cb", 98 98 .swgroup = TEGRA_SWGROUP_DCB, 99 99 .regs = { ··· 109 109 }, 110 110 }, 111 111 }, { 112 - .id = 0x0e, 112 + .id = TEGRA210_MC_AFIR, 113 113 .name = "afir", 114 114 .swgroup = TEGRA_SWGROUP_AFI, 115 115 .regs = { ··· 125 125 }, 126 126 }, 127 127 }, { 128 - .id = 0x0f, 128 + .id = TEGRA210_MC_AVPCARM7R, 129 129 .name = "avpcarm7r", 130 130 .swgroup = TEGRA_SWGROUP_AVPC, 131 131 .regs = { ··· 141 141 }, 142 142 }, 143 143 }, { 144 - .id = 0x10, 144 + .id = TEGRA210_MC_DISPLAYHC, 145 145 .name = "displayhc", 146 146 .swgroup = TEGRA_SWGROUP_DC, 147 147 .regs = { ··· 157 157 }, 158 158 }, 159 159 }, { 160 - .id = 0x11, 160 + .id = TEGRA210_MC_DISPLAYHCB, 161 161 .name = "displayhcb", 162 162 .swgroup = TEGRA_SWGROUP_DCB, 163 163 .regs = { ··· 173 173 }, 174 174 }, 175 175 }, { 176 - .id = 0x15, 176 + .id = TEGRA210_MC_HDAR, 177 177 .name = "hdar", 178 178 .swgroup = TEGRA_SWGROUP_HDA, 179 179 .regs = { ··· 189 189 }, 190 190 }, 191 191 }, { 192 - .id = 0x16, 192 + .id = TEGRA210_MC_HOST1XDMAR, 193 193 .name = "host1xdmar", 194 194 .swgroup = TEGRA_SWGROUP_HC, 195 195 .regs = { ··· 205 205 }, 206 206 }, 207 207 }, { 208 - .id = 0x17, 208 + .id = TEGRA210_MC_HOST1XR, 209 209 .name = "host1xr", 210 210 .swgroup = TEGRA_SWGROUP_HC, 211 211 .regs = { ··· 221 221 }, 222 222 }, 223 223 }, { 224 - .id = 0x1c, 224 + .id = TEGRA210_MC_NVENCSRD, 225 225 .name = "nvencsrd", 226 226 .swgroup = TEGRA_SWGROUP_NVENC, 227 227 .regs = { ··· 237 237 }, 238 238 }, 239 239 }, { 240 - .id = 0x1d, 240 + .id = TEGRA210_MC_PPCSAHBDMAR, 241 241 .name = "ppcsahbdmar", 242 242 .swgroup = TEGRA_SWGROUP_PPCS, 243 243 .regs = { ··· 253 253 }, 254 254 }, 255 255 }, { 256 - .id = 0x1e, 256 + .id = TEGRA210_MC_PPCSAHBSLVR, 257 257 .name = "ppcsahbslvr", 258 258 .swgroup = TEGRA_SWGROUP_PPCS, 259 259 .regs = { ··· 269 269 }, 270 270 }, 271 271 }, { 272 - .id = 0x1f, 272 + .id = TEGRA210_MC_SATAR, 273 273 .name = "satar", 274 274 .swgroup = TEGRA_SWGROUP_SATA, 275 275 .regs = { ··· 285 285 }, 286 286 }, 287 287 }, { 288 - .id = 0x27, 288 + .id = TEGRA210_MC_MPCORER, 289 289 .name = "mpcorer", 290 290 .swgroup = TEGRA_SWGROUP_MPCORE, 291 291 .regs = { ··· 297 297 }, 298 298 }, 299 299 }, { 300 - .id = 0x2b, 300 + .id = TEGRA210_MC_NVENCSWR, 301 301 .name = "nvencswr", 302 302 .swgroup = TEGRA_SWGROUP_NVENC, 303 303 .regs = { ··· 313 313 }, 314 314 }, 315 315 }, { 316 - .id = 0x31, 316 + .id = TEGRA210_MC_AFIW, 317 317 .name = "afiw", 318 318 .swgroup = TEGRA_SWGROUP_AFI, 319 319 .regs = { ··· 329 329 }, 330 330 }, 331 331 }, { 332 - .id = 0x32, 332 + .id = TEGRA210_MC_AVPCARM7W, 333 333 .name = "avpcarm7w", 334 334 .swgroup = TEGRA_SWGROUP_AVPC, 335 335 .regs = { ··· 345 345 }, 346 346 }, 347 347 }, { 348 - .id = 0x35, 348 + .id = TEGRA210_MC_HDAW, 349 349 .name = "hdaw", 350 350 .swgroup = TEGRA_SWGROUP_HDA, 351 351 .regs = { ··· 361 361 }, 362 362 }, 363 363 }, { 364 - .id = 0x36, 364 + .id = TEGRA210_MC_HOST1XW, 365 365 .name = "host1xw", 366 366 .swgroup = TEGRA_SWGROUP_HC, 367 367 .regs = { ··· 377 377 }, 378 378 }, 379 379 }, { 380 - .id = 0x39, 380 + .id = TEGRA210_MC_MPCOREW, 381 381 .name = "mpcorew", 382 382 .swgroup = TEGRA_SWGROUP_MPCORE, 383 383 .regs = { ··· 389 389 }, 390 390 }, 391 391 }, { 392 - .id = 0x3b, 392 + .id = TEGRA210_MC_PPCSAHBDMAW, 393 393 .name = "ppcsahbdmaw", 394 394 .swgroup = TEGRA_SWGROUP_PPCS, 395 395 .regs = { ··· 405 405 }, 406 406 }, 407 407 }, { 408 - .id = 0x3c, 408 + .id = TEGRA210_MC_PPCSAHBSLVW, 409 409 .name = "ppcsahbslvw", 410 410 .swgroup = TEGRA_SWGROUP_PPCS, 411 411 .regs = { ··· 421 421 }, 422 422 }, 423 423 }, { 424 - .id = 0x3d, 424 + .id = TEGRA210_MC_SATAW, 425 425 .name = "sataw", 426 426 .swgroup = TEGRA_SWGROUP_SATA, 427 427 .regs = { ··· 437 437 }, 438 438 }, 439 439 }, { 440 - .id = 0x44, 440 + .id = TEGRA210_MC_ISPRA, 441 441 .name = "ispra", 442 442 .swgroup = TEGRA_SWGROUP_ISP2, 443 443 .regs = { ··· 453 453 }, 454 454 }, 455 455 }, { 456 - .id = 0x46, 456 + .id = TEGRA210_MC_ISPWA, 457 457 .name = "ispwa", 458 458 .swgroup = TEGRA_SWGROUP_ISP2, 459 459 .regs = { ··· 469 469 }, 470 470 }, 471 471 }, { 472 - .id = 0x47, 472 + .id = TEGRA210_MC_ISPWB, 473 473 .name = "ispwb", 474 474 .swgroup = TEGRA_SWGROUP_ISP2, 475 475 .regs = { ··· 485 485 }, 486 486 }, 487 487 }, { 488 - .id = 0x4a, 488 + .id = TEGRA210_MC_XUSB_HOSTR, 489 489 .name = "xusb_hostr", 490 490 .swgroup = TEGRA_SWGROUP_XUSB_HOST, 491 491 .regs = { ··· 501 501 }, 502 502 }, 503 503 }, { 504 - .id = 0x4b, 504 + .id = TEGRA210_MC_XUSB_HOSTW, 505 505 .name = "xusb_hostw", 506 506 .swgroup = TEGRA_SWGROUP_XUSB_HOST, 507 507 .regs = { ··· 517 517 }, 518 518 }, 519 519 }, { 520 - .id = 0x4c, 520 + .id = TEGRA210_MC_XUSB_DEVR, 521 521 .name = "xusb_devr", 522 522 .swgroup = TEGRA_SWGROUP_XUSB_DEV, 523 523 .regs = { ··· 533 533 }, 534 534 }, 535 535 }, { 536 - .id = 0x4d, 536 + .id = TEGRA210_MC_XUSB_DEVW, 537 537 .name = "xusb_devw", 538 538 .swgroup = TEGRA_SWGROUP_XUSB_DEV, 539 539 .regs = { ··· 549 549 }, 550 550 }, 551 551 }, { 552 - .id = 0x4e, 552 + .id = TEGRA210_MC_ISPRAB, 553 553 .name = "isprab", 554 554 .swgroup = TEGRA_SWGROUP_ISP2B, 555 555 .regs = { ··· 565 565 }, 566 566 }, 567 567 }, { 568 - .id = 0x50, 568 + .id = TEGRA210_MC_ISPWAB, 569 569 .name = "ispwab", 570 570 .swgroup = TEGRA_SWGROUP_ISP2B, 571 571 .regs = { ··· 581 581 }, 582 582 }, 583 583 }, { 584 - .id = 0x51, 584 + .id = TEGRA210_MC_ISPWBB, 585 585 .name = "ispwbb", 586 586 .swgroup = TEGRA_SWGROUP_ISP2B, 587 587 .regs = { ··· 597 597 }, 598 598 }, 599 599 }, { 600 - .id = 0x54, 600 + .id = TEGRA210_MC_TSECSRD, 601 601 .name = "tsecsrd", 602 602 .swgroup = TEGRA_SWGROUP_TSEC, 603 603 .regs = { ··· 613 613 }, 614 614 }, 615 615 }, { 616 - .id = 0x55, 616 + .id = TEGRA210_MC_TSECSWR, 617 617 .name = "tsecswr", 618 618 .swgroup = TEGRA_SWGROUP_TSEC, 619 619 .regs = { ··· 629 629 }, 630 630 }, 631 631 }, { 632 - .id = 0x56, 632 + .id = TEGRA210_MC_A9AVPSCR, 633 633 .name = "a9avpscr", 634 634 .swgroup = TEGRA_SWGROUP_A9AVP, 635 635 .regs = { ··· 645 645 }, 646 646 }, 647 647 }, { 648 - .id = 0x57, 648 + .id = TEGRA210_MC_A9AVPSCW, 649 649 .name = "a9avpscw", 650 650 .swgroup = TEGRA_SWGROUP_A9AVP, 651 651 .regs = { ··· 661 661 }, 662 662 }, 663 663 }, { 664 - .id = 0x58, 664 + .id = TEGRA210_MC_GPUSRD, 665 665 .name = "gpusrd", 666 666 .swgroup = TEGRA_SWGROUP_GPU, 667 667 .regs = { ··· 678 678 }, 679 679 }, 680 680 }, { 681 - .id = 0x59, 681 + .id = TEGRA210_MC_GPUSWR, 682 682 .name = "gpuswr", 683 683 .swgroup = TEGRA_SWGROUP_GPU, 684 684 .regs = { ··· 695 695 }, 696 696 }, 697 697 }, { 698 - .id = 0x5a, 698 + .id = TEGRA210_MC_DISPLAYT, 699 699 .name = "displayt", 700 700 .swgroup = TEGRA_SWGROUP_DC, 701 701 .regs = { ··· 711 711 }, 712 712 }, 713 713 }, { 714 - .id = 0x60, 714 + .id = TEGRA210_MC_SDMMCRA, 715 715 .name = "sdmmcra", 716 716 .swgroup = TEGRA_SWGROUP_SDMMC1A, 717 717 .regs = { ··· 727 727 }, 728 728 }, 729 729 }, { 730 - .id = 0x61, 730 + .id = TEGRA210_MC_SDMMCRAA, 731 731 .name = "sdmmcraa", 732 732 .swgroup = TEGRA_SWGROUP_SDMMC2A, 733 733 .regs = { ··· 743 743 }, 744 744 }, 745 745 }, { 746 - .id = 0x62, 746 + .id = TEGRA210_MC_SDMMCR, 747 747 .name = "sdmmcr", 748 748 .swgroup = TEGRA_SWGROUP_SDMMC3A, 749 749 .regs = { ··· 759 759 }, 760 760 }, 761 761 }, { 762 - .id = 0x63, 762 + .id = TEGRA210_MC_SDMMCRAB, 763 763 .swgroup = TEGRA_SWGROUP_SDMMC4A, 764 764 .name = "sdmmcrab", 765 765 .regs = { ··· 775 775 }, 776 776 }, 777 777 }, { 778 - .id = 0x64, 778 + .id = TEGRA210_MC_SDMMCWA, 779 779 .name = "sdmmcwa", 780 780 .swgroup = TEGRA_SWGROUP_SDMMC1A, 781 781 .regs = { ··· 791 791 }, 792 792 }, 793 793 }, { 794 - .id = 0x65, 794 + .id = TEGRA210_MC_SDMMCWAA, 795 795 .name = "sdmmcwaa", 796 796 .swgroup = TEGRA_SWGROUP_SDMMC2A, 797 797 .regs = { ··· 807 807 }, 808 808 }, 809 809 }, { 810 - .id = 0x66, 810 + .id = TEGRA210_MC_SDMMCW, 811 811 .name = "sdmmcw", 812 812 .swgroup = TEGRA_SWGROUP_SDMMC3A, 813 813 .regs = { ··· 823 823 }, 824 824 }, 825 825 }, { 826 - .id = 0x67, 826 + .id = TEGRA210_MC_SDMMCWAB, 827 827 .name = "sdmmcwab", 828 828 .swgroup = TEGRA_SWGROUP_SDMMC4A, 829 829 .regs = { ··· 839 839 }, 840 840 }, 841 841 }, { 842 - .id = 0x6c, 842 + .id = TEGRA210_MC_VICSRD, 843 843 .name = "vicsrd", 844 844 .swgroup = TEGRA_SWGROUP_VIC, 845 845 .regs = { ··· 855 855 }, 856 856 }, 857 857 }, { 858 - .id = 0x6d, 858 + .id = TEGRA210_MC_VICSWR, 859 859 .name = "vicswr", 860 860 .swgroup = TEGRA_SWGROUP_VIC, 861 861 .regs = { ··· 871 871 }, 872 872 }, 873 873 }, { 874 - .id = 0x72, 874 + .id = TEGRA210_MC_VIW, 875 875 .name = "viw", 876 876 .swgroup = TEGRA_SWGROUP_VI, 877 877 .regs = { ··· 887 887 }, 888 888 }, 889 889 }, { 890 - .id = 0x73, 890 + .id = TEGRA210_MC_DISPLAYD, 891 891 .name = "displayd", 892 892 .swgroup = TEGRA_SWGROUP_DC, 893 893 .regs = { ··· 903 903 }, 904 904 }, 905 905 }, { 906 - .id = 0x78, 906 + .id = TEGRA210_MC_NVDECSRD, 907 907 .name = "nvdecsrd", 908 908 .swgroup = TEGRA_SWGROUP_NVDEC, 909 909 .regs = { ··· 919 919 }, 920 920 }, 921 921 }, { 922 - .id = 0x79, 922 + .id = TEGRA210_MC_NVDECSWR, 923 923 .name = "nvdecswr", 924 924 .swgroup = TEGRA_SWGROUP_NVDEC, 925 925 .regs = { ··· 935 935 }, 936 936 }, 937 937 }, { 938 - .id = 0x7a, 938 + .id = TEGRA210_MC_APER, 939 939 .name = "aper", 940 940 .swgroup = TEGRA_SWGROUP_APE, 941 941 .regs = { ··· 951 951 }, 952 952 }, 953 953 }, { 954 - .id = 0x7b, 954 + .id = TEGRA210_MC_APEW, 955 955 .name = "apew", 956 956 .swgroup = TEGRA_SWGROUP_APE, 957 957 .regs = { ··· 967 967 }, 968 968 }, 969 969 }, { 970 - .id = 0x7e, 970 + .id = TEGRA210_MC_NVJPGRD, 971 971 .name = "nvjpgsrd", 972 972 .swgroup = TEGRA_SWGROUP_NVJPG, 973 973 .regs = { ··· 983 983 }, 984 984 }, 985 985 }, { 986 - .id = 0x7f, 986 + .id = TEGRA210_MC_NVJPGWR, 987 987 .name = "nvjpgswr", 988 988 .swgroup = TEGRA_SWGROUP_NVJPG, 989 989 .regs = { ··· 999 999 }, 1000 1000 }, 1001 1001 }, { 1002 - .id = 0x80, 1002 + .id = TEGRA210_MC_SESRD, 1003 1003 .name = "sesrd", 1004 1004 .swgroup = TEGRA_SWGROUP_SE, 1005 1005 .regs = { ··· 1015 1015 }, 1016 1016 }, 1017 1017 }, { 1018 - .id = 0x81, 1018 + .id = TEGRA210_MC_SESRD, 1019 1019 .name = "seswr", 1020 1020 .swgroup = TEGRA_SWGROUP_SE, 1021 1021 .regs = { ··· 1031 1031 }, 1032 1032 }, 1033 1033 }, { 1034 - .id = 0x82, 1034 + .id = TEGRA210_MC_AXIAPR, 1035 1035 .name = "axiapr", 1036 1036 .swgroup = TEGRA_SWGROUP_AXIAP, 1037 1037 .regs = { ··· 1047 1047 }, 1048 1048 }, 1049 1049 }, { 1050 - .id = 0x83, 1050 + .id = TEGRA210_MC_AXIAPW, 1051 1051 .name = "axiapw", 1052 1052 .swgroup = TEGRA_SWGROUP_AXIAP, 1053 1053 .regs = { ··· 1063 1063 }, 1064 1064 }, 1065 1065 }, { 1066 - .id = 0x84, 1066 + .id = TEGRA210_MC_ETRR, 1067 1067 .name = "etrr", 1068 1068 .swgroup = TEGRA_SWGROUP_ETR, 1069 1069 .regs = { ··· 1079 1079 }, 1080 1080 }, 1081 1081 }, { 1082 - .id = 0x85, 1082 + .id = TEGRA210_MC_ETRR, 1083 1083 .name = "etrw", 1084 1084 .swgroup = TEGRA_SWGROUP_ETR, 1085 1085 .regs = { ··· 1095 1095 }, 1096 1096 }, 1097 1097 }, { 1098 - .id = 0x86, 1098 + .id = TEGRA210_MC_TSECSRDB, 1099 1099 .name = "tsecsrdb", 1100 1100 .swgroup = TEGRA_SWGROUP_TSECB, 1101 1101 .regs = { ··· 1111 1111 }, 1112 1112 }, 1113 1113 }, { 1114 - .id = 0x87, 1114 + .id = TEGRA210_MC_TSECSWRB, 1115 1115 .name = "tsecswrb", 1116 1116 .swgroup = TEGRA_SWGROUP_TSECB, 1117 1117 .regs = { ··· 1127 1127 }, 1128 1128 }, 1129 1129 }, { 1130 - .id = 0x88, 1130 + .id = TEGRA210_MC_GPUSRD2, 1131 1131 .name = "gpusrd2", 1132 1132 .swgroup = TEGRA_SWGROUP_GPU, 1133 1133 .regs = { ··· 1144 1144 }, 1145 1145 }, 1146 1146 }, { 1147 - .id = 0x89, 1147 + .id = TEGRA210_MC_GPUSWR2, 1148 1148 .name = "gpuswr2", 1149 1149 .swgroup = TEGRA_SWGROUP_GPU, 1150 1150 .regs = {
+1 -1
drivers/mfd/Kconfig
··· 139 139 config MFD_AT91_USART 140 140 tristate "AT91 USART Driver" 141 141 select MFD_CORE 142 - depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST 142 + depends on ARCH_MICROCHIP || COMPILE_TEST 143 143 help 144 144 Select this to get support for AT91 USART IP. This is a wrapper 145 145 over at91-usart-serial driver and usart-spi-driver. Only one function
+1 -1
drivers/net/wireless/ath/ath12k/ahb.c
··· 414 414 goto err_fw2; 415 415 } 416 416 417 - ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, pasid, mem_region, mem_phys, 417 + ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys, 418 418 mem_size, &mem_phys); 419 419 if (ret) { 420 420 ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+137 -60
drivers/nvme/host/apple.c
··· 35 35 #include "nvme.h" 36 36 37 37 #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC 38 - #define APPLE_ANS_MAX_QUEUE_DEPTH 64 39 38 40 39 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44 41 40 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) ··· 73 74 */ 74 75 #define APPLE_NVME_AQ_DEPTH 2 75 76 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) 77 + 78 + #define APPLE_NVME_IOSQES 7 76 79 77 80 /* 78 81 * These can be higher, but we need to ensure that any command doesn't ··· 143 142 u32 __iomem *sq_db; 144 143 u32 __iomem *cq_db; 145 144 145 + u16 sq_tail; 146 146 u16 cq_head; 147 147 u8 cq_phase; 148 148 ··· 168 166 struct scatterlist *sg; 169 167 }; 170 168 169 + struct apple_nvme_hw { 170 + bool has_lsq_nvmmu; 171 + u32 max_queue_depth; 172 + }; 173 + 171 174 struct apple_nvme { 172 175 struct device *dev; 173 176 174 177 void __iomem *mmio_coproc; 175 178 void __iomem *mmio_nvme; 179 + const struct apple_nvme_hw *hw; 176 180 177 181 struct device **pd_dev; 178 182 struct device_link **pd_link; ··· 223 215 224 216 static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) 225 217 { 226 - if (q->is_adminq) 218 + struct apple_nvme *anv = queue_to_apple_nvme(q); 219 + 220 + if (q->is_adminq && anv->hw->has_lsq_nvmmu) 227 221 return APPLE_NVME_AQ_DEPTH; 228 222 229 - return APPLE_ANS_MAX_QUEUE_DEPTH; 223 + return anv->hw->max_queue_depth; 230 224 } 231 225 232 226 static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size) ··· 290 280 "NVMMU TCB invalidation failed\n"); 291 281 } 292 282 293 - static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, 283 + static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q, 284 + struct nvme_command *cmd) 285 + { 286 + struct apple_nvme *anv = queue_to_apple_nvme(q); 287 + 288 + spin_lock_irq(&anv->lock); 289 + 290 + if (q->is_adminq) 291 + memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd)); 292 + else 293 + memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES), 294 + cmd, sizeof(*cmd)); 295 + 296 + if (++q->sq_tail == anv->hw->max_queue_depth) 297 + q->sq_tail = 0; 298 + 299 + writel(q->sq_tail, q->sq_db); 300 + spin_unlock_irq(&anv->lock); 301 + } 302 + 303 + 304 + static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q, 294 305 struct nvme_command *cmd) 295 306 { 296 307 struct apple_nvme *anv = queue_to_apple_nvme(q); ··· 621 590 __u16 command_id = READ_ONCE(cqe->command_id); 622 591 struct request *req; 623 592 624 - apple_nvmmu_inval(q, command_id); 593 + if (anv->hw->has_lsq_nvmmu) 594 + apple_nvmmu_inval(q, command_id); 625 595 626 596 req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id); 627 597 if (unlikely(!req)) { ··· 717 685 c.create_cq.opcode = nvme_admin_create_cq; 718 686 c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); 719 687 c.create_cq.cqid = cpu_to_le16(1); 720 - c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); 688 + c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1); 721 689 c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); 722 690 c.create_cq.irq_vector = cpu_to_le16(0); 723 691 ··· 745 713 c.create_sq.opcode = nvme_admin_create_sq; 746 714 c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); 747 715 c.create_sq.sqid = cpu_to_le16(1); 748 - c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); 716 + c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1); 749 717 c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); 750 718 c.create_sq.cqid = cpu_to_le16(1); 751 719 ··· 797 765 } 798 766 799 767 nvme_start_request(req); 800 - apple_nvme_submit_cmd(q, cmnd); 768 + 769 + if (anv->hw->has_lsq_nvmmu) 770 + apple_nvme_submit_cmd_t8103(q, cmnd); 771 + else 772 + apple_nvme_submit_cmd_t8015(q, cmnd); 773 + 801 774 return BLK_STS_OK; 802 775 803 776 out_free_cmd: ··· 1007 970 static void apple_nvme_init_queue(struct apple_nvme_queue *q) 1008 971 { 1009 972 unsigned int depth = apple_nvme_queue_depth(q); 973 + struct apple_nvme *anv = queue_to_apple_nvme(q); 1010 974 1011 975 q->cq_head = 0; 1012 976 q->cq_phase = 1; 1013 - memset(q->tcbs, 0, 1014 - APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); 977 + if (anv->hw->has_lsq_nvmmu) 978 + memset(q->tcbs, 0, anv->hw->max_queue_depth 979 + * sizeof(struct apple_nvmmu_tcb)); 1015 980 memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); 1016 981 WRITE_ONCE(q->enabled, true); 1017 982 wmb(); /* ensure the first interrupt sees the initialization */ ··· 1108 1069 1109 1070 dma_set_max_seg_size(anv->dev, 0xffffffff); 1110 1071 1111 - /* 1112 - * Enable NVMMU and linear submission queues. 1113 - * While we could keep those disabled and pretend this is slightly 1114 - * more common NVMe controller we'd still need some quirks (e.g. 1115 - * sq entries will be 128 bytes) and Apple might drop support for 1116 - * that mode in the future. 1117 - */ 1118 - writel(APPLE_ANS_LINEAR_SQ_EN, 1119 - anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); 1072 + if (anv->hw->has_lsq_nvmmu) { 1073 + /* 1074 + * Enable NVMMU and linear submission queues which is required 1075 + * since T6000. 1076 + */ 1077 + writel(APPLE_ANS_LINEAR_SQ_EN, 1078 + anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); 1120 1079 1121 - /* Allow as many pending command as possible for both queues */ 1122 - writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), 1123 - anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); 1080 + /* Allow as many pending command as possible for both queues */ 1081 + writel(anv->hw->max_queue_depth 1082 + | (anv->hw->max_queue_depth << 16), anv->mmio_nvme 1083 + + APPLE_ANS_MAX_PEND_CMDS_CTRL); 1124 1084 1125 - /* Setup the NVMMU for the maximum admin and IO queue depth */ 1126 - writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, 1127 - anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); 1085 + /* Setup the NVMMU for the maximum admin and IO queue depth */ 1086 + writel(anv->hw->max_queue_depth - 1, 1087 + anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); 1128 1088 1129 - /* 1130 - * This is probably a chicken bit: without it all commands where any PRP 1131 - * is set to zero (including those that don't use that field) fail and 1132 - * the co-processor complains about "completed with err BAD_CMD-" or 1133 - * a "NULL_PRP_PTR_ERR" in the syslog 1134 - */ 1135 - writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & 1136 - ~APPLE_ANS_PRP_NULL_CHECK, 1137 - anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); 1089 + /* 1090 + * This is probably a chicken bit: without it all commands 1091 + * where any PRP is set to zero (including those that don't use 1092 + * that field) fail and the co-processor complains about 1093 + * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the 1094 + * syslog 1095 + */ 1096 + writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & 1097 + ~APPLE_ANS_PRP_NULL_CHECK, 1098 + anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); 1099 + } 1138 1100 1139 1101 /* Setup the admin queue */ 1140 - aqa = APPLE_NVME_AQ_DEPTH - 1; 1102 + if (anv->hw->has_lsq_nvmmu) 1103 + aqa = APPLE_NVME_AQ_DEPTH - 1; 1104 + else 1105 + aqa = anv->hw->max_queue_depth - 1; 1141 1106 aqa |= aqa << 16; 1142 1107 writel(aqa, anv->mmio_nvme + NVME_REG_AQA); 1143 1108 writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ); 1144 1109 writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ); 1145 1110 1146 - /* Setup NVMMU for both queues */ 1147 - writeq(anv->adminq.tcb_dma_addr, 1148 - anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); 1149 - writeq(anv->ioq.tcb_dma_addr, 1150 - anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); 1111 + if (anv->hw->has_lsq_nvmmu) { 1112 + /* Setup NVMMU for both queues */ 1113 + writeq(anv->adminq.tcb_dma_addr, 1114 + anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); 1115 + writeq(anv->ioq.tcb_dma_addr, 1116 + anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); 1117 + } 1151 1118 1152 1119 anv->ctrl.sqsize = 1153 - APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ 1120 + anv->hw->max_queue_depth - 1; /* 0's based queue depth */ 1154 1121 anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP); 1155 1122 1156 1123 dev_dbg(anv->dev, "Enabling controller now"); ··· 1327 1282 * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which 1328 1283 * must be marked as reserved in the IO queue. 1329 1284 */ 1330 - anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; 1331 - anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; 1285 + if (anv->hw->has_lsq_nvmmu) 1286 + anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; 1287 + anv->tagset.queue_depth = anv->hw->max_queue_depth - 1; 1332 1288 anv->tagset.timeout = NVME_IO_TIMEOUT; 1333 1289 anv->tagset.numa_node = NUMA_NO_NODE; 1334 1290 anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); ··· 1353 1307 struct apple_nvme_queue *q) 1354 1308 { 1355 1309 unsigned int depth = apple_nvme_queue_depth(q); 1310 + size_t iosq_size; 1356 1311 1357 1312 q->cqes = dmam_alloc_coherent(anv->dev, 1358 1313 depth * sizeof(struct nvme_completion), ··· 1361 1314 if (!q->cqes) 1362 1315 return -ENOMEM; 1363 1316 1364 - q->sqes = dmam_alloc_coherent(anv->dev, 1365 - depth * sizeof(struct nvme_command), 1317 + if (anv->hw->has_lsq_nvmmu) 1318 + iosq_size = depth * sizeof(struct nvme_command); 1319 + else 1320 + iosq_size = depth << APPLE_NVME_IOSQES; 1321 + 1322 + q->sqes = dmam_alloc_coherent(anv->dev, iosq_size, 1366 1323 &q->sq_dma_addr, GFP_KERNEL); 1367 1324 if (!q->sqes) 1368 1325 return -ENOMEM; 1369 1326 1370 - /* 1371 - * We need the maximum queue depth here because the NVMMU only has a 1372 - * single depth configuration shared between both queues. 1373 - */ 1374 - q->tcbs = dmam_alloc_coherent(anv->dev, 1375 - APPLE_ANS_MAX_QUEUE_DEPTH * 1376 - sizeof(struct apple_nvmmu_tcb), 1377 - &q->tcb_dma_addr, GFP_KERNEL); 1378 - if (!q->tcbs) 1379 - return -ENOMEM; 1327 + if (anv->hw->has_lsq_nvmmu) { 1328 + /* 1329 + * We need the maximum queue depth here because the NVMMU only 1330 + * has a single depth configuration shared between both queues. 1331 + */ 1332 + q->tcbs = dmam_alloc_coherent(anv->dev, 1333 + anv->hw->max_queue_depth * 1334 + sizeof(struct apple_nvmmu_tcb), 1335 + &q->tcb_dma_addr, GFP_KERNEL); 1336 + if (!q->tcbs) 1337 + return -ENOMEM; 1338 + } 1380 1339 1381 1340 /* 1382 1341 * initialize phase to make sure the allocated and empty memory ··· 1466 1413 anv->adminq.is_adminq = true; 1467 1414 platform_set_drvdata(pdev, anv); 1468 1415 1416 + anv->hw = of_device_get_match_data(&pdev->dev); 1417 + if (!anv->hw) { 1418 + ret = -ENODEV; 1419 + goto put_dev; 1420 + } 1421 + 1469 1422 ret = apple_nvme_attach_genpd(anv); 1470 1423 if (ret < 0) { 1471 1424 dev_err_probe(dev, ret, "Failed to attach power domains"); ··· 1503 1444 goto put_dev; 1504 1445 } 1505 1446 1506 - anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; 1507 - anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; 1508 - anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; 1509 - anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; 1447 + if (anv->hw->has_lsq_nvmmu) { 1448 + anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; 1449 + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; 1450 + anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; 1451 + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; 1452 + } else { 1453 + anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS; 1454 + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; 1455 + anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8; 1456 + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; 1457 + } 1510 1458 1511 1459 anv->sart = devm_apple_sart_get(dev); 1512 1460 if (IS_ERR(anv->sart)) { ··· 1691 1625 static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, 1692 1626 apple_nvme_resume); 1693 1627 1628 + static const struct apple_nvme_hw apple_nvme_t8015_hw = { 1629 + .has_lsq_nvmmu = false, 1630 + .max_queue_depth = 16, 1631 + }; 1632 + 1633 + static const struct apple_nvme_hw apple_nvme_t8103_hw = { 1634 + .has_lsq_nvmmu = true, 1635 + .max_queue_depth = 64, 1636 + }; 1637 + 1694 1638 static const struct of_device_id apple_nvme_of_match[] = { 1695 - { .compatible = "apple,nvme-ans2" }, 1639 + { .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw }, 1640 + { .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw }, 1696 1641 {}, 1697 1642 }; 1698 1643 MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
+1
drivers/pinctrl/pinctrl-apple-gpio.c
··· 515 515 } 516 516 517 517 static const struct of_device_id apple_gpio_pinctrl_of_match[] = { 518 + { .compatible = "apple,t8103-pinctrl", }, 518 519 { .compatible = "apple,pinctrl", }, 519 520 { } 520 521 };
+1
drivers/pmdomain/apple/pmgr-pwrstate.c
··· 306 306 } 307 307 308 308 static const struct of_device_id apple_pmgr_ps_of_match[] = { 309 + { .compatible = "apple,t8103-pmgr-pwrstate" }, 309 310 { .compatible = "apple,pmgr-pwrstate" }, 310 311 {} 311 312 };
+1 -1
drivers/remoteproc/qcom_q6v5_adsp.c
··· 317 317 struct qcom_adsp *adsp = rproc->priv; 318 318 int ret; 319 319 320 - ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0, 320 + ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 321 321 adsp->mem_region, adsp->mem_phys, 322 322 adsp->mem_size, &adsp->mem_reloc); 323 323 if (ret)
+3 -4
drivers/remoteproc/qcom_q6v5_pas.c
··· 242 242 goto release_dtb_firmware; 243 243 244 244 ret = qcom_mdt_load_no_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name, 245 - pas->dtb_pas_id, pas->dtb_mem_region, 246 - pas->dtb_mem_phys, pas->dtb_mem_size, 247 - &pas->dtb_mem_reloc); 245 + pas->dtb_mem_region, pas->dtb_mem_phys, 246 + pas->dtb_mem_size, &pas->dtb_mem_reloc); 248 247 if (ret) 249 248 goto release_dtb_metadata; 250 249 } ··· 306 307 if (ret) 307 308 goto disable_px_supply; 308 309 309 - ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id, 310 + ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware, 310 311 pas->mem_region, pas->mem_phys, pas->mem_size, 311 312 &pas->mem_reloc); 312 313 if (ret)
+1 -1
drivers/remoteproc/qcom_q6v5_wcss.c
··· 757 757 int ret; 758 758 759 759 ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware, 760 - 0, wcss->mem_region, wcss->mem_phys, 760 + wcss->mem_region, wcss->mem_phys, 761 761 wcss->mem_size, &wcss->mem_reloc); 762 762 if (ret) 763 763 return ret;
+7
drivers/reset/Kconfig
··· 22 22 This option enables support for the external reset functions for 23 23 peripheral PHYs on the Altera Arria10 System Resource Chip. 24 24 25 + config RESET_ASPEED 26 + tristate "ASPEED Reset Driver" 27 + depends on ARCH_ASPEED || COMPILE_TEST 28 + select AUXILIARY_BUS 29 + help 30 + This enables the reset controller driver for AST2700. 31 + 25 32 config RESET_ATH79 26 33 bool "AR71xx Reset Driver" if COMPILE_TEST 27 34 default ATH79
+1
drivers/reset/Makefile
··· 6 6 obj-y += sti/ 7 7 obj-y += tegra/ 8 8 obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o 9 + obj-$(CONFIG_RESET_ASPEED) += reset-aspeed.o 9 10 obj-$(CONFIG_RESET_ATH79) += reset-ath79.o 10 11 obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o 11 12 obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
+253
drivers/reset/reset-aspeed.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Copyright (c) 2024 ASPEED Technology Inc. 4 + */ 5 + 6 + #include <linux/auxiliary_bus.h> 7 + #include <linux/cleanup.h> 8 + #include <linux/device.h> 9 + #include <linux/io.h> 10 + #include <linux/module.h> 11 + #include <linux/reset-controller.h> 12 + #include <linux/slab.h> 13 + 14 + #include <dt-bindings/reset/aspeed,ast2700-scu.h> 15 + 16 + #define SCU0_RESET_CTRL1 0x200 17 + #define SCU0_RESET_CTRL2 0x220 18 + #define SCU1_RESET_CTRL1 0x200 19 + #define SCU1_RESET_CTRL2 0x220 20 + #define SCU1_PCIE3_CTRL 0x908 21 + 22 + struct ast2700_reset_signal { 23 + bool dedicated_clr; /* dedicated reset clr offset */ 24 + u32 offset, bit; 25 + }; 26 + 27 + struct aspeed_reset_info { 28 + unsigned int nr_resets; 29 + const struct ast2700_reset_signal *signal; 30 + }; 31 + 32 + struct aspeed_reset { 33 + struct reset_controller_dev rcdev; 34 + struct aspeed_reset_info *info; 35 + spinlock_t lock; /* Protect read-modify-write cycle */ 36 + void __iomem *base; 37 + }; 38 + 39 + static const struct ast2700_reset_signal ast2700_reset0_signals[] = { 40 + [SCU0_RESET_SDRAM] = { true, SCU0_RESET_CTRL1, BIT(0) }, 41 + [SCU0_RESET_DDRPHY] = { true, SCU0_RESET_CTRL1, BIT(1) }, 42 + [SCU0_RESET_RSA] = { true, SCU0_RESET_CTRL1, BIT(2) }, 43 + [SCU0_RESET_SHA3] = { true, SCU0_RESET_CTRL1, BIT(3) }, 44 + [SCU0_RESET_HACE] = { true, SCU0_RESET_CTRL1, BIT(4) }, 45 + [SCU0_RESET_SOC] = { true, SCU0_RESET_CTRL1, BIT(5) }, 46 + [SCU0_RESET_VIDEO] = { true, SCU0_RESET_CTRL1, BIT(6) }, 47 + [SCU0_RESET_2D] = { true, SCU0_RESET_CTRL1, BIT(7) }, 48 + [SCU0_RESET_PCIS] = { true, SCU0_RESET_CTRL1, BIT(8) }, 49 + [SCU0_RESET_RVAS0] = { true, SCU0_RESET_CTRL1, BIT(9) }, 50 + [SCU0_RESET_RVAS1] = { true, SCU0_RESET_CTRL1, BIT(10) }, 51 + [SCU0_RESET_SM3] = { true, SCU0_RESET_CTRL1, BIT(11) }, 52 + [SCU0_RESET_SM4] = { true, SCU0_RESET_CTRL1, BIT(12) }, 53 + [SCU0_RESET_CRT0] = { true, SCU0_RESET_CTRL1, BIT(13) }, 54 + [SCU0_RESET_ECC] = { true, SCU0_RESET_CTRL1, BIT(14) }, 55 + [SCU0_RESET_DP_PCI] = { true, SCU0_RESET_CTRL1, BIT(15) }, 56 + [SCU0_RESET_UFS] = { true, SCU0_RESET_CTRL1, BIT(16) }, 57 + [SCU0_RESET_EMMC] = { true, SCU0_RESET_CTRL1, BIT(17) }, 58 + [SCU0_RESET_PCIE1RST] = { true, SCU0_RESET_CTRL1, BIT(18) }, 59 + [SCU0_RESET_PCIE1RSTOE] = { true, SCU0_RESET_CTRL1, BIT(19) }, 60 + [SCU0_RESET_PCIE0RST] = { true, SCU0_RESET_CTRL1, BIT(20) }, 61 + [SCU0_RESET_PCIE0RSTOE] = { true, SCU0_RESET_CTRL1, BIT(21) }, 62 + [SCU0_RESET_JTAG] = { true, SCU0_RESET_CTRL1, BIT(22) }, 63 + [SCU0_RESET_MCTP0] = { true, SCU0_RESET_CTRL1, BIT(23) }, 64 + [SCU0_RESET_MCTP1] = { true, SCU0_RESET_CTRL1, BIT(24) }, 65 + [SCU0_RESET_XDMA0] = { true, SCU0_RESET_CTRL1, BIT(25) }, 66 + [SCU0_RESET_XDMA1] = { true, SCU0_RESET_CTRL1, BIT(26) }, 67 + [SCU0_RESET_H2X1] = { true, SCU0_RESET_CTRL1, BIT(27) }, 68 + [SCU0_RESET_DP] = { true, SCU0_RESET_CTRL1, BIT(28) }, 69 + [SCU0_RESET_DP_MCU] = { true, SCU0_RESET_CTRL1, BIT(29) }, 70 + [SCU0_RESET_SSP] = { true, SCU0_RESET_CTRL1, BIT(30) }, 71 + [SCU0_RESET_H2X0] = { true, SCU0_RESET_CTRL1, BIT(31) }, 72 + [SCU0_RESET_PORTA_VHUB] = { true, SCU0_RESET_CTRL2, BIT(0) }, 73 + [SCU0_RESET_PORTA_PHY3] = { true, SCU0_RESET_CTRL2, BIT(1) }, 74 + [SCU0_RESET_PORTA_XHCI] = { true, SCU0_RESET_CTRL2, BIT(2) }, 75 + [SCU0_RESET_PORTB_VHUB] = { true, SCU0_RESET_CTRL2, BIT(3) }, 76 + [SCU0_RESET_PORTB_PHY3] = { true, SCU0_RESET_CTRL2, BIT(4) }, 77 + [SCU0_RESET_PORTB_XHCI] = { true, SCU0_RESET_CTRL2, BIT(5) }, 78 + [SCU0_RESET_PORTA_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(6) }, 79 + [SCU0_RESET_PORTB_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(7) }, 80 + [SCU0_RESET_UHCI] = { true, SCU0_RESET_CTRL2, BIT(8) }, 81 + [SCU0_RESET_TSP] = { true, SCU0_RESET_CTRL2, BIT(9) }, 82 + [SCU0_RESET_E2M0] = { true, SCU0_RESET_CTRL2, BIT(10) }, 83 + [SCU0_RESET_E2M1] = { true, SCU0_RESET_CTRL2, BIT(11) }, 84 + [SCU0_RESET_VLINK] = { true, SCU0_RESET_CTRL2, BIT(12) }, 85 + }; 86 + 87 + static const struct ast2700_reset_signal ast2700_reset1_signals[] = { 88 + [SCU1_RESET_LPC0] = { true, SCU1_RESET_CTRL1, BIT(0) }, 89 + [SCU1_RESET_LPC1] = { true, SCU1_RESET_CTRL1, BIT(1) }, 90 + [SCU1_RESET_MII] = { true, SCU1_RESET_CTRL1, BIT(2) }, 91 + [SCU1_RESET_PECI] = { true, SCU1_RESET_CTRL1, BIT(3) }, 92 + [SCU1_RESET_PWM] = { true, SCU1_RESET_CTRL1, BIT(4) }, 93 + [SCU1_RESET_MAC0] = { true, SCU1_RESET_CTRL1, BIT(5) }, 94 + [SCU1_RESET_MAC1] = { true, SCU1_RESET_CTRL1, BIT(6) }, 95 + [SCU1_RESET_MAC2] = { true, SCU1_RESET_CTRL1, BIT(7) }, 96 + [SCU1_RESET_ADC] = { true, SCU1_RESET_CTRL1, BIT(8) }, 97 + [SCU1_RESET_SD] = { true, SCU1_RESET_CTRL1, BIT(9) }, 98 + [SCU1_RESET_ESPI0] = { true, SCU1_RESET_CTRL1, BIT(10) }, 99 + [SCU1_RESET_ESPI1] = { true, SCU1_RESET_CTRL1, BIT(11) }, 100 + [SCU1_RESET_JTAG1] = { true, SCU1_RESET_CTRL1, BIT(12) }, 101 + [SCU1_RESET_SPI0] = { true, SCU1_RESET_CTRL1, BIT(13) }, 102 + [SCU1_RESET_SPI1] = { true, SCU1_RESET_CTRL1, BIT(14) }, 103 + [SCU1_RESET_SPI2] = { true, SCU1_RESET_CTRL1, BIT(15) }, 104 + [SCU1_RESET_I3C0] = { true, SCU1_RESET_CTRL1, BIT(16) }, 105 + [SCU1_RESET_I3C1] = { true, SCU1_RESET_CTRL1, BIT(17) }, 106 + [SCU1_RESET_I3C2] = { true, SCU1_RESET_CTRL1, BIT(18) }, 107 + [SCU1_RESET_I3C3] = { true, SCU1_RESET_CTRL1, BIT(19) }, 108 + [SCU1_RESET_I3C4] = { true, SCU1_RESET_CTRL1, BIT(20) }, 109 + [SCU1_RESET_I3C5] = { true, SCU1_RESET_CTRL1, BIT(21) }, 110 + [SCU1_RESET_I3C6] = { true, SCU1_RESET_CTRL1, BIT(22) }, 111 + [SCU1_RESET_I3C7] = { true, SCU1_RESET_CTRL1, BIT(23) }, 112 + [SCU1_RESET_I3C8] = { true, SCU1_RESET_CTRL1, BIT(24) }, 113 + [SCU1_RESET_I3C9] = { true, SCU1_RESET_CTRL1, BIT(25) }, 114 + [SCU1_RESET_I3C10] = { true, SCU1_RESET_CTRL1, BIT(26) }, 115 + [SCU1_RESET_I3C11] = { true, SCU1_RESET_CTRL1, BIT(27) }, 116 + [SCU1_RESET_I3C12] = { true, SCU1_RESET_CTRL1, BIT(28) }, 117 + [SCU1_RESET_I3C13] = { true, SCU1_RESET_CTRL1, BIT(29) }, 118 + [SCU1_RESET_I3C14] = { true, SCU1_RESET_CTRL1, BIT(30) }, 119 + [SCU1_RESET_I3C15] = { true, SCU1_RESET_CTRL1, BIT(31) }, 120 + [SCU1_RESET_MCU0] = { true, SCU1_RESET_CTRL2, BIT(0) }, 121 + [SCU1_RESET_MCU1] = { true, SCU1_RESET_CTRL2, BIT(1) }, 122 + [SCU1_RESET_H2A_SPI1] = { true, SCU1_RESET_CTRL2, BIT(2) }, 123 + [SCU1_RESET_H2A_SPI2] = { true, SCU1_RESET_CTRL2, BIT(3) }, 124 + [SCU1_RESET_UART0] = { true, SCU1_RESET_CTRL2, BIT(4) }, 125 + [SCU1_RESET_UART1] = { true, SCU1_RESET_CTRL2, BIT(5) }, 126 + [SCU1_RESET_UART2] = { true, SCU1_RESET_CTRL2, BIT(6) }, 127 + [SCU1_RESET_UART3] = { true, SCU1_RESET_CTRL2, BIT(7) }, 128 + [SCU1_RESET_I2C_FILTER] = { true, SCU1_RESET_CTRL2, BIT(8) }, 129 + [SCU1_RESET_CALIPTRA] = { true, SCU1_RESET_CTRL2, BIT(9) }, 130 + [SCU1_RESET_XDMA] = { true, SCU1_RESET_CTRL2, BIT(10) }, 131 + [SCU1_RESET_FSI] = { true, SCU1_RESET_CTRL2, BIT(12) }, 132 + [SCU1_RESET_CAN] = { true, SCU1_RESET_CTRL2, BIT(13) }, 133 + [SCU1_RESET_MCTP] = { true, SCU1_RESET_CTRL2, BIT(14) }, 134 + [SCU1_RESET_I2C] = { true, SCU1_RESET_CTRL2, BIT(15) }, 135 + [SCU1_RESET_UART6] = { true, SCU1_RESET_CTRL2, BIT(16) }, 136 + [SCU1_RESET_UART7] = { true, SCU1_RESET_CTRL2, BIT(17) }, 137 + [SCU1_RESET_UART8] = { true, SCU1_RESET_CTRL2, BIT(18) }, 138 + [SCU1_RESET_UART9] = { true, SCU1_RESET_CTRL2, BIT(19) }, 139 + [SCU1_RESET_LTPI0] = { true, SCU1_RESET_CTRL2, BIT(20) }, 140 + [SCU1_RESET_VGAL] = { true, SCU1_RESET_CTRL2, BIT(21) }, 141 + [SCU1_RESET_LTPI1] = { true, SCU1_RESET_CTRL2, BIT(22) }, 142 + [SCU1_RESET_ACE] = { true, SCU1_RESET_CTRL2, BIT(23) }, 143 + [SCU1_RESET_E2M] = { true, SCU1_RESET_CTRL2, BIT(24) }, 144 + [SCU1_RESET_UHCI] = { true, SCU1_RESET_CTRL2, BIT(25) }, 145 + [SCU1_RESET_PORTC_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(26) }, 146 + [SCU1_RESET_PORTC_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(27) }, 147 + [SCU1_RESET_PORTD_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(28) }, 148 + [SCU1_RESET_PORTD_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(29) }, 149 + [SCU1_RESET_H2X] = { true, SCU1_RESET_CTRL2, BIT(30) }, 150 + [SCU1_RESET_I3CDMA] = { true, SCU1_RESET_CTRL2, BIT(31) }, 151 + [SCU1_RESET_PCIE2RST] = { false, SCU1_PCIE3_CTRL, BIT(0) }, 152 + }; 153 + 154 + static inline struct aspeed_reset *to_aspeed_reset(struct reset_controller_dev *rcdev) 155 + { 156 + return container_of(rcdev, struct aspeed_reset, rcdev); 157 + } 158 + 159 + static int aspeed_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) 160 + { 161 + struct aspeed_reset *rc = to_aspeed_reset(rcdev); 162 + void __iomem *reg_offset = rc->base + rc->info->signal[id].offset; 163 + 164 + if (rc->info->signal[id].dedicated_clr) { 165 + writel(rc->info->signal[id].bit, reg_offset); 166 + } else { 167 + guard(spinlock_irqsave)(&rc->lock); 168 + writel(readl(reg_offset) & ~rc->info->signal[id].bit, reg_offset); 169 + } 170 + 171 + return 0; 172 + } 173 + 174 + static int aspeed_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) 175 + { 176 + struct aspeed_reset *rc = to_aspeed_reset(rcdev); 177 + void __iomem *reg_offset = rc->base + rc->info->signal[id].offset; 178 + 179 + if (rc->info->signal[id].dedicated_clr) { 180 + writel(rc->info->signal[id].bit, reg_offset + 0x04); 181 + } else { 182 + guard(spinlock_irqsave)(&rc->lock); 183 + writel(readl(reg_offset) | rc->info->signal[id].bit, reg_offset); 184 + } 185 + 186 + return 0; 187 + } 188 + 189 + static int aspeed_reset_status(struct reset_controller_dev *rcdev, unsigned long id) 190 + { 191 + struct aspeed_reset *rc = to_aspeed_reset(rcdev); 192 + void __iomem *reg_offset = rc->base + rc->info->signal[id].offset; 193 + 194 + return (readl(reg_offset) & rc->info->signal[id].bit) ? 1 : 0; 195 + } 196 + 197 + static const struct reset_control_ops aspeed_reset_ops = { 198 + .assert = aspeed_reset_assert, 199 + .deassert = aspeed_reset_deassert, 200 + .status = aspeed_reset_status, 201 + }; 202 + 203 + static int aspeed_reset_probe(struct auxiliary_device *adev, 204 + const struct auxiliary_device_id *id) 205 + { 206 + struct aspeed_reset *reset; 207 + struct device *dev = &adev->dev; 208 + 209 + reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL); 210 + if (!reset) 211 + return -ENOMEM; 212 + 213 + spin_lock_init(&reset->lock); 214 + 215 + reset->info = (struct aspeed_reset_info *)id->driver_data; 216 + reset->rcdev.owner = THIS_MODULE; 217 + reset->rcdev.nr_resets = reset->info->nr_resets; 218 + reset->rcdev.ops = &aspeed_reset_ops; 219 + reset->rcdev.of_node = dev->parent->of_node; 220 + reset->rcdev.dev = dev; 221 + reset->rcdev.of_reset_n_cells = 1; 222 + reset->base = (void __iomem *)adev->dev.platform_data; 223 + 224 + return devm_reset_controller_register(dev, &reset->rcdev); 225 + } 226 + 227 + static const struct aspeed_reset_info ast2700_reset0_info = { 228 + .nr_resets = ARRAY_SIZE(ast2700_reset0_signals), 229 + .signal = ast2700_reset0_signals, 230 + }; 231 + 232 + static const struct aspeed_reset_info ast2700_reset1_info = { 233 + .nr_resets = ARRAY_SIZE(ast2700_reset1_signals), 234 + .signal = ast2700_reset1_signals, 235 + }; 236 + 237 + static const struct auxiliary_device_id aspeed_reset_ids[] = { 238 + { .name = "clk_ast2700.reset0", .driver_data = (kernel_ulong_t)&ast2700_reset0_info }, 239 + { .name = "clk_ast2700.reset1", .driver_data = (kernel_ulong_t)&ast2700_reset1_info }, 240 + { } 241 + }; 242 + MODULE_DEVICE_TABLE(auxiliary, aspeed_reset_ids); 243 + 244 + static struct auxiliary_driver aspeed_reset_driver = { 245 + .probe = aspeed_reset_probe, 246 + .id_table = aspeed_reset_ids, 247 + }; 248 + 249 + module_auxiliary_driver(aspeed_reset_driver); 250 + 251 + MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>"); 252 + MODULE_DESCRIPTION("ASPEED SoC Reset Controller Driver"); 253 + MODULE_LICENSE("GPL");
+1
drivers/reset/reset-bcm6345.c
··· 119 119 120 120 static const struct of_device_id bcm6345_reset_of_match[] = { 121 121 { .compatible = "brcm,bcm6345-reset" }, 122 + { .compatible = "brcm,bcm63xx-ephy-ctrl" }, 122 123 { /* sentinel */ }, 123 124 }; 124 125
-1
drivers/reset/reset-intel-gw.c
··· 40 40 .reg_bits = 32, 41 41 .reg_stride = 4, 42 42 .val_bits = 32, 43 - .fast_io = true, 44 43 }; 45 44 46 45 /*
-1
drivers/reset/reset-qcom-pdc.c
··· 36 36 .reg_stride = 4, 37 37 .val_bits = 32, 38 38 .max_register = 0x20000, 39 - .fast_io = true, 40 39 }; 41 40 42 41 static const struct qcom_pdc_reset_map sdm845_pdc_resets[] = {
+39 -2
drivers/reset/reset-th1520.c
··· 14 14 /* register offset in VOSYS_REGMAP */ 15 15 #define TH1520_GPU_RST_CFG 0x0 16 16 #define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0) 17 + #define TH1520_DPU_RST_CFG 0x4 18 + #define TH1520_DSI0_RST_CFG 0x8 19 + #define TH1520_DSI1_RST_CFG 0xc 20 + #define TH1520_HDMI_RST_CFG 0x14 17 21 18 22 /* register values */ 19 23 #define TH1520_GPU_SW_GPU_RST BIT(0) 20 24 #define TH1520_GPU_SW_CLKGEN_RST BIT(1) 25 + #define TH1520_DPU_SW_DPU_HRST BIT(0) 26 + #define TH1520_DPU_SW_DPU_ARST BIT(1) 27 + #define TH1520_DPU_SW_DPU_CRST BIT(2) 28 + #define TH1520_DSI_SW_DSI_PRST BIT(0) 29 + #define TH1520_HDMI_SW_MAIN_RST BIT(0) 30 + #define TH1520_HDMI_SW_PRST BIT(1) 21 31 22 32 struct th1520_reset_priv { 23 33 struct reset_controller_dev rcdev; ··· 47 37 [TH1520_RESET_ID_GPU_CLKGEN] = { 48 38 .bit = TH1520_GPU_SW_CLKGEN_RST, 49 39 .reg = TH1520_GPU_RST_CFG, 50 - } 40 + }, 41 + [TH1520_RESET_ID_DPU_AHB] = { 42 + .bit = TH1520_DPU_SW_DPU_HRST, 43 + .reg = TH1520_DPU_RST_CFG, 44 + }, 45 + [TH1520_RESET_ID_DPU_AXI] = { 46 + .bit = TH1520_DPU_SW_DPU_ARST, 47 + .reg = TH1520_DPU_RST_CFG, 48 + }, 49 + [TH1520_RESET_ID_DPU_CORE] = { 50 + .bit = TH1520_DPU_SW_DPU_CRST, 51 + .reg = TH1520_DPU_RST_CFG, 52 + }, 53 + [TH1520_RESET_ID_DSI0_APB] = { 54 + .bit = TH1520_DSI_SW_DSI_PRST, 55 + .reg = TH1520_DSI0_RST_CFG, 56 + }, 57 + [TH1520_RESET_ID_DSI1_APB] = { 58 + .bit = TH1520_DSI_SW_DSI_PRST, 59 + .reg = TH1520_DSI1_RST_CFG, 60 + }, 61 + [TH1520_RESET_ID_HDMI] = { 62 + .bit = TH1520_HDMI_SW_MAIN_RST, 63 + .reg = TH1520_HDMI_RST_CFG, 64 + }, 65 + [TH1520_RESET_ID_HDMI_APB] = { 66 + .bit = TH1520_HDMI_SW_PRST, 67 + .reg = TH1520_HDMI_RST_CFG, 68 + }, 51 69 }; 52 70 53 71 static inline struct th1520_reset_priv * ··· 116 78 .reg_bits = 32, 117 79 .val_bits = 32, 118 80 .reg_stride = 4, 119 - .fast_io = true, 120 81 }; 121 82 122 83 static int th1520_reset_probe(struct platform_device *pdev)
-3
drivers/soc/apple/Kconfig
··· 8 8 tristate "Apple SoC mailboxes" 9 9 depends on PM 10 10 depends on ARCH_APPLE || (64BIT && COMPILE_TEST) 11 - default ARCH_APPLE 12 11 help 13 12 Apple SoCs have various co-processors required for certain 14 13 peripherals to work (NVMe, display controller, etc.). This ··· 20 21 tristate "Apple RTKit co-processor IPC protocol" 21 22 depends on APPLE_MAILBOX 22 23 depends on ARCH_APPLE || COMPILE_TEST 23 - default ARCH_APPLE 24 24 help 25 25 Apple SoCs such as the M1 come with various co-processors running 26 26 their proprietary RTKit operating system. This option enables support ··· 31 33 config APPLE_SART 32 34 tristate "Apple SART DMA address filter" 33 35 depends on ARCH_APPLE || COMPILE_TEST 34 - default ARCH_APPLE 35 36 help 36 37 Apple SART is a simple DMA address filter used on Apple SoCs such 37 38 as the M1. It is usually required for the NVMe coprocessor which does
+19
drivers/soc/apple/mailbox.c
··· 47 47 #define APPLE_ASC_MBOX_I2A_RECV0 0x830 48 48 #define APPLE_ASC_MBOX_I2A_RECV1 0x838 49 49 50 + #define APPLE_T8015_MBOX_A2I_CONTROL 0x108 51 + #define APPLE_T8015_MBOX_I2A_CONTROL 0x10c 52 + 50 53 #define APPLE_M3_MBOX_CONTROL_FULL BIT(16) 51 54 #define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17) 52 55 ··· 385 382 return 0; 386 383 } 387 384 385 + static const struct apple_mbox_hw apple_mbox_t8015_hw = { 386 + .control_full = APPLE_ASC_MBOX_CONTROL_FULL, 387 + .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY, 388 + 389 + .a2i_control = APPLE_T8015_MBOX_A2I_CONTROL, 390 + .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0, 391 + .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1, 392 + 393 + .i2a_control = APPLE_T8015_MBOX_I2A_CONTROL, 394 + .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0, 395 + .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1, 396 + 397 + .has_irq_controls = false, 398 + }; 399 + 388 400 static const struct apple_mbox_hw apple_mbox_asc_hw = { 389 401 .control_full = APPLE_ASC_MBOX_CONTROL_FULL, 390 402 .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY, ··· 436 418 437 419 static const struct of_device_id apple_mbox_of_match[] = { 438 420 { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw }, 421 + { .compatible = "apple,t8015-asc-mailbox", .data = &apple_mbox_t8015_hw }, 439 422 { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw }, 440 423 {} 441 424 };
+57 -3
drivers/soc/apple/sart.c
··· 25 25 26 26 #define APPLE_SART_MAX_ENTRIES 16 27 27 28 - /* This is probably a bitfield but the exact meaning of each bit is unknown. */ 29 - #define APPLE_SART_FLAGS_ALLOW 0xff 28 + /* SARTv0 registers */ 29 + #define APPLE_SART0_CONFIG(idx) (0x00 + 4 * (idx)) 30 + #define APPLE_SART0_CONFIG_FLAGS GENMASK(28, 24) 31 + #define APPLE_SART0_CONFIG_SIZE GENMASK(18, 0) 32 + #define APPLE_SART0_CONFIG_SIZE_SHIFT 12 33 + #define APPLE_SART0_CONFIG_SIZE_MAX GENMASK(18, 0) 34 + 35 + #define APPLE_SART0_PADDR(idx) (0x40 + 4 * (idx)) 36 + #define APPLE_SART0_PADDR_SHIFT 12 37 + 38 + #define APPLE_SART0_FLAGS_ALLOW 0xf 30 39 31 40 /* SARTv2 registers */ 32 41 #define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx)) ··· 47 38 #define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx)) 48 39 #define APPLE_SART2_PADDR_SHIFT 12 49 40 41 + #define APPLE_SART2_FLAGS_ALLOW 0xff 42 + 50 43 /* SARTv3 registers */ 51 44 #define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx)) 52 45 ··· 59 48 #define APPLE_SART3_SIZE_SHIFT 12 60 49 #define APPLE_SART3_SIZE_MAX GENMASK(29, 0) 61 50 51 + #define APPLE_SART3_FLAGS_ALLOW 0xff 52 + 62 53 struct apple_sart_ops { 63 54 void (*get_entry)(struct apple_sart *sart, int index, u8 *flags, 64 55 phys_addr_t *paddr, size_t *size); 65 56 void (*set_entry)(struct apple_sart *sart, int index, u8 flags, 66 57 phys_addr_t paddr_shifted, size_t size_shifted); 58 + /* This is probably a bitfield but the exact meaning of each bit is unknown. */ 59 + unsigned int flags_allow; 67 60 unsigned int size_shift; 68 61 unsigned int paddr_shift; 69 62 size_t size_max; ··· 81 66 82 67 unsigned long protected_entries; 83 68 unsigned long used_entries; 69 + }; 70 + 71 + static void sart0_get_entry(struct apple_sart *sart, int index, u8 *flags, 72 + phys_addr_t *paddr, size_t *size) 73 + { 74 + u32 cfg = readl(sart->regs + APPLE_SART0_CONFIG(index)); 75 + phys_addr_t paddr_ = readl(sart->regs + APPLE_SART0_PADDR(index)); 76 + size_t size_ = FIELD_GET(APPLE_SART0_CONFIG_SIZE, cfg); 77 + 78 + *flags = FIELD_GET(APPLE_SART0_CONFIG_FLAGS, cfg); 79 + *size = size_ << APPLE_SART0_CONFIG_SIZE_SHIFT; 80 + *paddr = paddr_ << APPLE_SART0_PADDR_SHIFT; 81 + } 82 + 83 + static void sart0_set_entry(struct apple_sart *sart, int index, u8 flags, 84 + phys_addr_t paddr_shifted, size_t size_shifted) 85 + { 86 + u32 cfg; 87 + 88 + cfg = FIELD_PREP(APPLE_SART0_CONFIG_FLAGS, flags); 89 + cfg |= FIELD_PREP(APPLE_SART0_CONFIG_SIZE, size_shifted); 90 + 91 + writel(paddr_shifted, sart->regs + APPLE_SART0_PADDR(index)); 92 + writel(cfg, sart->regs + APPLE_SART0_CONFIG(index)); 93 + } 94 + 95 + static struct apple_sart_ops sart_ops_v0 = { 96 + .get_entry = sart0_get_entry, 97 + .set_entry = sart0_set_entry, 98 + .flags_allow = APPLE_SART0_FLAGS_ALLOW, 99 + .size_shift = APPLE_SART0_CONFIG_SIZE_SHIFT, 100 + .paddr_shift = APPLE_SART0_PADDR_SHIFT, 101 + .size_max = APPLE_SART0_CONFIG_SIZE_MAX, 84 102 }; 85 103 86 104 static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags, ··· 143 95 static struct apple_sart_ops sart_ops_v2 = { 144 96 .get_entry = sart2_get_entry, 145 97 .set_entry = sart2_set_entry, 98 + .flags_allow = APPLE_SART2_FLAGS_ALLOW, 146 99 .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT, 147 100 .paddr_shift = APPLE_SART2_PADDR_SHIFT, 148 101 .size_max = APPLE_SART2_CONFIG_SIZE_MAX, ··· 171 122 static struct apple_sart_ops sart_ops_v3 = { 172 123 .get_entry = sart3_get_entry, 173 124 .set_entry = sart3_set_entry, 125 + .flags_allow = APPLE_SART3_FLAGS_ALLOW, 174 126 .size_shift = APPLE_SART3_SIZE_SHIFT, 175 127 .paddr_shift = APPLE_SART3_PADDR_SHIFT, 176 128 .size_max = APPLE_SART3_SIZE_MAX, ··· 283 233 if (test_and_set_bit(i, &sart->used_entries)) 284 234 continue; 285 235 286 - ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr, 236 + ret = sart_set_entry(sart, i, sart->ops->flags_allow, paddr, 287 237 size); 288 238 if (ret) { 289 239 dev_dbg(sart->dev, ··· 363 313 { 364 314 .compatible = "apple,t8103-sart", 365 315 .data = &sart_ops_v2, 316 + }, 317 + { 318 + .compatible = "apple,t8015-sart", 319 + .data = &sart_ops_v0, 366 320 }, 367 321 {} 368 322 };
+3 -11
drivers/soc/aspeed/aspeed-lpc-ctrl.c
··· 10 10 #include <linux/mm.h> 11 11 #include <linux/module.h> 12 12 #include <linux/of_address.h> 13 + #include <linux/of_reserved_mem.h> 13 14 #include <linux/platform_device.h> 14 15 #include <linux/poll.h> 15 16 #include <linux/regmap.h> ··· 255 254 dev_set_drvdata(&pdev->dev, lpc_ctrl); 256 255 257 256 /* If memory-region is described in device tree then store */ 258 - node = of_parse_phandle(dev->of_node, "memory-region", 0); 259 - if (!node) { 260 - dev_dbg(dev, "Didn't find reserved memory\n"); 261 - } else { 262 - rc = of_address_to_resource(node, 0, &resm); 263 - of_node_put(node); 264 - if (rc) { 265 - dev_err(dev, "Couldn't address to resource for reserved memory\n"); 266 - return -ENXIO; 267 - } 268 - 257 + rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm); 258 + if (!rc) { 269 259 lpc_ctrl->mem_size = resource_size(&resm); 270 260 lpc_ctrl->mem_base = resm.start; 271 261
+3 -11
drivers/soc/aspeed/aspeed-p2a-ctrl.c
··· 19 19 #include <linux/module.h> 20 20 #include <linux/mutex.h> 21 21 #include <linux/of.h> 22 - #include <linux/of_address.h> 22 + #include <linux/of_reserved_mem.h> 23 23 #include <linux/platform_device.h> 24 24 #include <linux/regmap.h> 25 25 #include <linux/slab.h> ··· 334 334 struct aspeed_p2a_ctrl *misc_ctrl; 335 335 struct device *dev; 336 336 struct resource resm; 337 - struct device_node *node; 338 337 int rc = 0; 339 338 340 339 dev = &pdev->dev; ··· 345 346 mutex_init(&misc_ctrl->tracking); 346 347 347 348 /* optional. */ 348 - node = of_parse_phandle(dev->of_node, "memory-region", 0); 349 - if (node) { 350 - rc = of_address_to_resource(node, 0, &resm); 351 - of_node_put(node); 352 - if (rc) { 353 - dev_err(dev, "Couldn't address to resource for reserved memory\n"); 354 - return -ENODEV; 355 - } 356 - 349 + rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm); 350 + if (!rc) { 357 351 misc_ctrl->mem_size = resource_size(&resm); 358 352 misc_ctrl->mem_base = resm.start; 359 353 }
+4
drivers/soc/aspeed/aspeed-socinfo.c
··· 27 27 { "AST2620", 0x05010203 }, 28 28 { "AST2605", 0x05030103 }, 29 29 { "AST2625", 0x05030403 }, 30 + /* AST2700 */ 31 + { "AST2750", 0x06000003 }, 32 + { "AST2700", 0x06000103 }, 33 + { "AST2720", 0x06000203 }, 30 34 }; 31 35 32 36 static const char *siliconid_to_name(u32 siliconid)
+1 -1
drivers/soc/fsl/qbman/qman_test_stash.c
··· 103 103 { 104 104 int cpu; 105 105 106 - for_each_cpu(cpu, cpu_online_mask) { 106 + for_each_online_cpu(cpu) { 107 107 struct bstrap bstrap = { 108 108 .fn = fn, 109 109 .started = ATOMIC_INIT(0)
+70 -59
drivers/soc/fsl/qe/gpio.c
··· 12 12 #include <linux/spinlock.h> 13 13 #include <linux/err.h> 14 14 #include <linux/io.h> 15 - #include <linux/of.h> 16 - #include <linux/gpio/legacy-of-mm-gpiochip.h> 17 15 #include <linux/gpio/consumer.h> 18 16 #include <linux/gpio/driver.h> 19 17 #include <linux/slab.h> 20 18 #include <linux/export.h> 21 - #include <linux/property.h> 19 + #include <linux/platform_device.h> 22 20 23 21 #include <soc/fsl/qe/qe.h> 24 22 23 + #define PIN_MASK(gpio) (1UL << (QE_PIO_PINS - 1 - (gpio))) 24 + 25 25 struct qe_gpio_chip { 26 - struct of_mm_gpio_chip mm_gc; 26 + struct gpio_chip gc; 27 + void __iomem *regs; 27 28 spinlock_t lock; 28 29 29 30 /* shadowed data register to clear/set bits safely */ ··· 34 33 struct qe_pio_regs saved_regs; 35 34 }; 36 35 37 - static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 36 + static void qe_gpio_save_regs(struct qe_gpio_chip *qe_gc) 38 37 { 39 - struct qe_gpio_chip *qe_gc = 40 - container_of(mm_gc, struct qe_gpio_chip, mm_gc); 41 - struct qe_pio_regs __iomem *regs = mm_gc->regs; 38 + struct qe_pio_regs __iomem *regs = qe_gc->regs; 42 39 43 40 qe_gc->cpdata = ioread32be(&regs->cpdata); 44 41 qe_gc->saved_regs.cpdata = qe_gc->cpdata; ··· 49 50 50 51 static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio) 51 52 { 52 - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 53 - struct qe_pio_regs __iomem *regs = mm_gc->regs; 54 - u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); 53 + struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc); 54 + struct qe_pio_regs __iomem *regs = qe_gc->regs; 55 + u32 pin_mask = PIN_MASK(gpio); 55 56 56 57 return !!(ioread32be(&regs->cpdata) & pin_mask); 57 58 } 58 59 59 60 static int qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) 60 61 { 61 - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 62 62 struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc); 63 - struct qe_pio_regs __iomem *regs = mm_gc->regs; 63 + struct qe_pio_regs __iomem *regs = qe_gc->regs; 64 64 unsigned long flags; 65 - u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio); 65 + u32 pin_mask = PIN_MASK(gpio); 66 66 67 67 spin_lock_irqsave(&qe_gc->lock, flags); 68 68 ··· 80 82 static int qe_gpio_set_multiple(struct gpio_chip *gc, 81 83 unsigned long *mask, unsigned long *bits) 82 84 { 83 - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 84 85 struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc); 85 - struct qe_pio_regs __iomem *regs = mm_gc->regs; 86 + struct qe_pio_regs __iomem *regs = qe_gc->regs; 86 87 unsigned long flags; 87 88 int i; 88 89 ··· 92 95 break; 93 96 if (__test_and_clear_bit(i, mask)) { 94 97 if (test_bit(i, bits)) 95 - qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i)); 98 + qe_gc->cpdata |= PIN_MASK(i); 96 99 else 97 - qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i)); 100 + qe_gc->cpdata &= ~PIN_MASK(i); 98 101 } 99 102 } 100 103 ··· 107 110 108 111 static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) 109 112 { 110 - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 111 113 struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc); 112 114 unsigned long flags; 113 115 114 116 spin_lock_irqsave(&qe_gc->lock, flags); 115 117 116 - __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0); 118 + __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0); 117 119 118 120 spin_unlock_irqrestore(&qe_gc->lock, flags); 119 121 ··· 121 125 122 126 static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) 123 127 { 124 - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); 125 128 struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc); 126 129 unsigned long flags; 127 130 ··· 128 133 129 134 spin_lock_irqsave(&qe_gc->lock, flags); 130 135 131 - __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0); 136 + __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0); 132 137 133 138 spin_unlock_irqrestore(&qe_gc->lock, flags); 134 139 ··· 234 239 void qe_pin_set_dedicated(struct qe_pin *qe_pin) 235 240 { 236 241 struct qe_gpio_chip *qe_gc = qe_pin->controller; 237 - struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; 242 + struct qe_pio_regs __iomem *regs = qe_gc->regs; 238 243 struct qe_pio_regs *sregs = &qe_gc->saved_regs; 239 244 int pin = qe_pin->num; 240 245 u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1)); ··· 263 268 264 269 iowrite32be(qe_gc->cpdata, &regs->cpdata); 265 270 qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1); 266 - 267 271 spin_unlock_irqrestore(&qe_gc->lock, flags); 268 272 } 269 273 EXPORT_SYMBOL(qe_pin_set_dedicated); ··· 277 283 void qe_pin_set_gpio(struct qe_pin *qe_pin) 278 284 { 279 285 struct qe_gpio_chip *qe_gc = qe_pin->controller; 280 - struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs; 286 + struct qe_pio_regs __iomem *regs = qe_gc->regs; 281 287 unsigned long flags; 282 288 283 289 spin_lock_irqsave(&qe_gc->lock, flags); ··· 289 295 } 290 296 EXPORT_SYMBOL(qe_pin_set_gpio); 291 297 292 - static int __init qe_add_gpiochips(void) 298 + static int qe_gpio_probe(struct platform_device *ofdev) 293 299 { 294 - struct device_node *np; 300 + struct device *dev = &ofdev->dev; 301 + struct device_node *np = dev->of_node; 302 + struct qe_gpio_chip *qe_gc; 303 + struct gpio_chip *gc; 295 304 296 - for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") { 297 - int ret; 298 - struct qe_gpio_chip *qe_gc; 299 - struct of_mm_gpio_chip *mm_gc; 300 - struct gpio_chip *gc; 305 + qe_gc = devm_kzalloc(dev, sizeof(*qe_gc), GFP_KERNEL); 306 + if (!qe_gc) 307 + return -ENOMEM; 301 308 302 - qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL); 303 - if (!qe_gc) { 304 - ret = -ENOMEM; 305 - goto err; 306 - } 309 + spin_lock_init(&qe_gc->lock); 307 310 308 - spin_lock_init(&qe_gc->lock); 311 + gc = &qe_gc->gc; 309 312 310 - mm_gc = &qe_gc->mm_gc; 311 - gc = &mm_gc->gc; 313 + gc->base = -1; 314 + gc->ngpio = QE_PIO_PINS; 315 + gc->direction_input = qe_gpio_dir_in; 316 + gc->direction_output = qe_gpio_dir_out; 317 + gc->get = qe_gpio_get; 318 + gc->set = qe_gpio_set; 319 + gc->set_multiple = qe_gpio_set_multiple; 320 + gc->parent = dev; 321 + gc->owner = THIS_MODULE; 312 322 313 - mm_gc->save_regs = qe_gpio_save_regs; 314 - gc->ngpio = QE_PIO_PINS; 315 - gc->direction_input = qe_gpio_dir_in; 316 - gc->direction_output = qe_gpio_dir_out; 317 - gc->get = qe_gpio_get; 318 - gc->set = qe_gpio_set; 319 - gc->set_multiple = qe_gpio_set_multiple; 323 + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); 324 + if (!gc->label) 325 + return -ENOMEM; 320 326 321 - ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc); 322 - if (ret) 323 - goto err; 324 - continue; 325 - err: 326 - pr_err("%pOF: registration failed with status %d\n", 327 - np, ret); 328 - kfree(qe_gc); 329 - /* try others anyway */ 330 - } 331 - return 0; 327 + qe_gc->regs = devm_of_iomap(dev, np, 0, NULL); 328 + if (IS_ERR(qe_gc->regs)) 329 + return PTR_ERR(qe_gc->regs); 330 + 331 + qe_gpio_save_regs(qe_gc); 332 + 333 + return devm_gpiochip_add_data(dev, gc, qe_gc); 332 334 } 333 - arch_initcall(qe_add_gpiochips); 335 + 336 + static const struct of_device_id qe_gpio_match[] = { 337 + { 338 + .compatible = "fsl,mpc8323-qe-pario-bank", 339 + }, 340 + {}, 341 + }; 342 + MODULE_DEVICE_TABLE(of, qe_gpio_match); 343 + 344 + static struct platform_driver qe_gpio_driver = { 345 + .probe = qe_gpio_probe, 346 + .driver = { 347 + .name = "qe-gpio", 348 + .of_match_table = qe_gpio_match, 349 + }, 350 + }; 351 + 352 + static int __init qe_gpio_init(void) 353 + { 354 + return platform_driver_register(&qe_gpio_driver); 355 + } 356 + arch_initcall(qe_gpio_init);
+1 -1
drivers/soc/hisilicon/kunpeng_hccs.c
··· 1464 1464 goto out; 1465 1465 if (!all_in_idle) { 1466 1466 ret = -EBUSY; 1467 - dev_err(hdev->dev, "please don't decrese lanes on high load with %s, ret = %d.\n", 1467 + dev_err(hdev->dev, "please don't decrease lanes on high load with %s, ret = %d.\n", 1468 1468 hccs_port_type_to_name(hdev, port_type), ret); 1469 1469 goto out; 1470 1470 }
+23
drivers/soc/mediatek/mtk-svs.c
··· 2165 2165 return dev; 2166 2166 } 2167 2167 2168 + static void svs_put_device(void *_dev) 2169 + { 2170 + struct device *dev = _dev; 2171 + 2172 + put_device(dev); 2173 + } 2174 + 2168 2175 static int svs_mt8192_platform_probe(struct svs_platform *svsp) 2169 2176 { 2170 2177 struct device *dev; 2171 2178 u32 idx; 2179 + int ret; 2172 2180 2173 2181 svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); 2174 2182 if (IS_ERR(svsp->rst)) ··· 2187 2179 if (IS_ERR(dev)) 2188 2180 return dev_err_probe(svsp->dev, PTR_ERR(dev), 2189 2181 "failed to get lvts device\n"); 2182 + put_device(dev); 2190 2183 2191 2184 for (idx = 0; idx < svsp->bank_max; idx++) { 2192 2185 struct svs_bank *svsb = &svsp->banks[idx]; ··· 2197 2188 case SVSB_SWID_CPU_LITTLE: 2198 2189 case SVSB_SWID_CPU_BIG: 2199 2190 svsb->opp_dev = get_cpu_device(bdata->cpu_id); 2191 + get_device(svsb->opp_dev); 2200 2192 break; 2201 2193 case SVSB_SWID_CCI: 2202 2194 svsb->opp_dev = svs_add_device_link(svsp, "cci"); ··· 2217 2207 return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), 2218 2208 "failed to get OPP device for bank %d\n", 2219 2209 idx); 2210 + 2211 + ret = devm_add_action_or_reset(svsp->dev, svs_put_device, 2212 + svsb->opp_dev); 2213 + if (ret) 2214 + return ret; 2220 2215 } 2221 2216 2222 2217 return 0; ··· 2231 2216 { 2232 2217 struct device *dev; 2233 2218 u32 idx; 2219 + int ret; 2234 2220 2235 2221 dev = svs_add_device_link(svsp, "thermal-sensor"); 2236 2222 if (IS_ERR(dev)) 2237 2223 return dev_err_probe(svsp->dev, PTR_ERR(dev), 2238 2224 "failed to get thermal device\n"); 2225 + put_device(dev); 2239 2226 2240 2227 for (idx = 0; idx < svsp->bank_max; idx++) { 2241 2228 struct svs_bank *svsb = &svsp->banks[idx]; ··· 2247 2230 case SVSB_SWID_CPU_LITTLE: 2248 2231 case SVSB_SWID_CPU_BIG: 2249 2232 svsb->opp_dev = get_cpu_device(bdata->cpu_id); 2233 + get_device(svsb->opp_dev); 2250 2234 break; 2251 2235 case SVSB_SWID_CCI: 2252 2236 svsb->opp_dev = svs_add_device_link(svsp, "cci"); ··· 2264 2246 return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), 2265 2247 "failed to get OPP device for bank %d\n", 2266 2248 idx); 2249 + 2250 + ret = devm_add_action_or_reset(svsp->dev, svs_put_device, 2251 + svsb->opp_dev); 2252 + if (ret) 2253 + return ret; 2267 2254 } 2268 2255 2269 2256 return 0;
+3
drivers/soc/qcom/icc-bwmon.c
··· 656 656 if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE) 657 657 target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0); 658 658 659 + if (IS_ERR(target_opp)) 660 + return IRQ_HANDLED; 661 + 659 662 bwmon->target_kbps = bw_kbps; 660 663 661 664 bw_kbps--;
-1
drivers/soc/qcom/llcc-qcom.c
··· 4409 4409 .reg_bits = 32, 4410 4410 .reg_stride = 4, 4411 4411 .val_bits = 32, 4412 - .fast_io = true, 4413 4412 }; 4414 4413 4415 4414 base = devm_platform_ioremap_resource(pdev, index);
+9 -11
drivers/soc/qcom/mdt_loader.c
··· 304 304 } 305 305 EXPORT_SYMBOL_GPL(qcom_mdt_pas_init); 306 306 307 - static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_name) 307 + static bool qcom_mdt_bins_are_split(const struct firmware *fw) 308 308 { 309 309 const struct elf32_phdr *phdrs; 310 310 const struct elf32_hdr *ehdr; ··· 333 333 } 334 334 335 335 static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, 336 - const char *fw_name, int pas_id, void *mem_region, 336 + const char *fw_name, void *mem_region, 337 337 phys_addr_t mem_phys, size_t mem_size, 338 - phys_addr_t *reloc_base, bool pas_init) 338 + phys_addr_t *reloc_base) 339 339 { 340 340 const struct elf32_phdr *phdrs; 341 341 const struct elf32_phdr *phdr; ··· 355 355 if (!mdt_header_valid(fw)) 356 356 return -EINVAL; 357 357 358 - is_split = qcom_mdt_bins_are_split(fw, fw_name); 358 + is_split = qcom_mdt_bins_are_split(fw); 359 359 ehdr = (struct elf32_hdr *)fw->data; 360 360 phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff); 361 361 ··· 460 460 if (ret) 461 461 return ret; 462 462 463 - return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, 464 - mem_size, reloc_base, true); 463 + return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys, 464 + mem_size, reloc_base); 465 465 } 466 466 EXPORT_SYMBOL_GPL(qcom_mdt_load); 467 467 ··· 470 470 * @dev: device handle to associate resources with 471 471 * @fw: firmware object for the mdt file 472 472 * @firmware: name of the firmware, for construction of segment file names 473 - * @pas_id: PAS identifier 474 473 * @mem_region: allocated memory region to load firmware into 475 474 * @mem_phys: physical address of allocated memory region 476 475 * @mem_size: size of the allocated memory region ··· 478 479 * Returns 0 on success, negative errno otherwise. 479 480 */ 480 481 int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, 481 - const char *firmware, int pas_id, 482 - void *mem_region, phys_addr_t mem_phys, 482 + const char *firmware, void *mem_region, phys_addr_t mem_phys, 483 483 size_t mem_size, phys_addr_t *reloc_base) 484 484 { 485 - return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys, 486 - mem_size, reloc_base, false); 485 + return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys, 486 + mem_size, reloc_base); 487 487 } 488 488 EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init); 489 489
+487 -19
drivers/soc/qcom/qcom-geni-se.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - // Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. 2 + /* 3 + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. 4 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 + */ 3 6 4 7 /* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */ 5 8 #define __DISABLE_TRACE_MMIO__ 6 9 7 10 #include <linux/acpi.h> 11 + #include <linux/bitfield.h> 8 12 #include <linux/clk.h> 13 + #include <linux/firmware.h> 9 14 #include <linux/slab.h> 10 15 #include <linux/dma-mapping.h> 11 16 #include <linux/io.h> ··· 115 110 static const char * const icc_path_names[] = {"qup-core", "qup-config", 116 111 "qup-memory"}; 117 112 118 - #define QUP_HW_VER_REG 0x4 113 + static const char * const protocol_name[] = { "None", "SPI", "UART", "I2C", "I3C", "SPI SLAVE" }; 114 + 115 + /** 116 + * struct se_fw_hdr - Serial Engine firmware configuration header 117 + * 118 + * This structure defines the SE firmware header, which together with the 119 + * firmware payload is stored in individual ELF segments. 120 + * 121 + * @magic: Set to 'SEFW'. 122 + * @version: Structure version number. 123 + * @core_version: QUPV3 hardware version. 124 + * @serial_protocol: Encoded in GENI_FW_REVISION. 125 + * @fw_version: Firmware version, from GENI_FW_REVISION. 126 + * @cfg_version: Configuration version, from GENI_INIT_CFG_REVISION. 127 + * @fw_size_in_items: Number of 32-bit words in GENI_FW_RAM. 128 + * @fw_offset: Byte offset to GENI_FW_RAM array. 129 + * @cfg_size_in_items: Number of GENI_FW_CFG index/value pairs. 130 + * @cfg_idx_offset: Byte offset to GENI_FW_CFG index array. 131 + * @cfg_val_offset: Byte offset to GENI_FW_CFG values array. 132 + */ 133 + struct se_fw_hdr { 134 + __le32 magic; 135 + __le32 version; 136 + __le32 core_version; 137 + __le16 serial_protocol; 138 + __le16 fw_version; 139 + __le16 cfg_version; 140 + __le16 fw_size_in_items; 141 + __le16 fw_offset; 142 + __le16 cfg_size_in_items; 143 + __le16 cfg_idx_offset; 144 + __le16 cfg_val_offset; 145 + }; 146 + 147 + /*Magic numbers*/ 148 + #define SE_MAGIC_NUM 0x57464553 149 + 150 + #define MAX_GENI_CFG_RAMn_CNT 455 151 + 152 + #define MI_PBT_NON_PAGED_SEGMENT 0x0 153 + #define MI_PBT_HASH_SEGMENT 0x2 154 + #define MI_PBT_NOTUSED_SEGMENT 0x3 155 + #define MI_PBT_SHARED_SEGMENT 0x4 156 + 157 + #define MI_PBT_FLAG_PAGE_MODE BIT(20) 158 + #define MI_PBT_FLAG_SEGMENT_TYPE GENMASK(26, 24) 159 + #define MI_PBT_FLAG_ACCESS_TYPE GENMASK(23, 21) 160 + 161 + #define MI_PBT_PAGE_MODE_VALUE(x) FIELD_GET(MI_PBT_FLAG_PAGE_MODE, x) 162 + 163 + #define MI_PBT_SEGMENT_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_SEGMENT_TYPE, x) 164 + 165 + #define MI_PBT_ACCESS_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_ACCESS_TYPE, x) 166 + 167 + #define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \ 168 + M_IO_DATA_DEASSERT_EN | \ 169 + M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \ 170 + M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \ 171 + M_TX_FIFO_WR_ERR_EN) 172 + 173 + /* Common QUPV3 registers */ 174 + #define QUPV3_HW_VER_REG 0x4 175 + #define QUPV3_SE_AHB_M_CFG 0x118 176 + #define QUPV3_COMMON_CFG 0x120 177 + #define QUPV3_COMMON_CGC_CTRL 0x21c 178 + 179 + /* QUPV3_COMMON_CFG fields */ 180 + #define FAST_SWITCH_TO_HIGH_DISABLE BIT(0) 181 + 182 + /* QUPV3_SE_AHB_M_CFG fields */ 183 + #define AHB_M_CLK_CGC_ON BIT(0) 184 + 185 + /* QUPV3_COMMON_CGC_CTRL fields */ 186 + #define COMMON_CSR_SLV_CLK_CGC_ON BIT(0) 119 187 120 188 /* Common SE registers */ 121 - #define GENI_INIT_CFG_REVISION 0x0 122 - #define GENI_S_INIT_CFG_REVISION 0x4 123 - #define GENI_OUTPUT_CTRL 0x24 124 - #define GENI_CGC_CTRL 0x28 125 - #define GENI_CLK_CTRL_RO 0x60 126 - #define GENI_FW_S_REVISION_RO 0x6c 189 + #define SE_GENI_INIT_CFG_REVISION 0x0 190 + #define SE_GENI_S_INIT_CFG_REVISION 0x4 191 + #define SE_GENI_CGC_CTRL 0x28 192 + #define SE_GENI_CLK_CTRL_RO 0x60 193 + #define SE_GENI_FW_S_REVISION_RO 0x6c 194 + #define SE_GENI_CFG_REG0 0x100 127 195 #define SE_GENI_BYTE_GRAN 0x254 128 196 #define SE_GENI_TX_PACKING_CFG0 0x260 129 197 #define SE_GENI_TX_PACKING_CFG1 0x264 130 198 #define SE_GENI_RX_PACKING_CFG0 0x284 131 199 #define SE_GENI_RX_PACKING_CFG1 0x288 132 - #define SE_GENI_M_GP_LENGTH 0x910 133 - #define SE_GENI_S_GP_LENGTH 0x914 200 + #define SE_GENI_S_IRQ_ENABLE 0x644 134 201 #define SE_DMA_TX_PTR_L 0xc30 135 202 #define SE_DMA_TX_PTR_H 0xc34 136 203 #define SE_DMA_TX_ATTR 0xc38 ··· 219 142 #define SE_DMA_RX_IRQ_EN 0xd48 220 143 #define SE_DMA_RX_IRQ_EN_SET 0xd4c 221 144 #define SE_DMA_RX_IRQ_EN_CLR 0xd50 222 - #define SE_DMA_RX_LEN_IN 0xd54 223 145 #define SE_DMA_RX_MAX_BURST 0xd5c 224 146 #define SE_DMA_RX_FLUSH 0xd60 225 147 #define SE_GSI_EVENT_EN 0xe18 226 148 #define SE_IRQ_EN 0xe1c 227 149 #define SE_DMA_GENERAL_CFG 0xe30 150 + #define SE_GENI_FW_REVISION 0x1000 151 + #define SE_GENI_S_FW_REVISION 0x1004 152 + #define SE_GENI_CFG_RAMN 0x1010 153 + #define SE_GENI_CLK_CTRL 0x2000 154 + #define SE_DMA_IF_EN 0x2004 155 + #define SE_FIFO_IF_DISABLE 0x2008 156 + 157 + /* GENI_FW_REVISION_RO fields */ 158 + #define FW_REV_VERSION_MSK GENMASK(7, 0) 228 159 229 160 /* GENI_OUTPUT_CTRL fields */ 230 161 #define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0) ··· 264 179 /* SE_DMA_GENERAL_CFG */ 265 180 #define DMA_RX_CLK_CGC_ON BIT(0) 266 181 #define DMA_TX_CLK_CGC_ON BIT(1) 267 - #define DMA_AHB_SLV_CFG_ON BIT(2) 182 + #define DMA_AHB_SLV_CLK_CGC_ON BIT(2) 268 183 #define AHB_SEC_SLV_CLK_CGC_ON BIT(3) 269 184 #define DUMMY_RX_NON_BUFFERABLE BIT(4) 270 185 #define RX_DMA_ZERO_PADDING_EN BIT(5) 271 186 #define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6) 272 187 #define RX_DMA_IRQ_DELAY_SHFT 6 188 + 189 + /* GENI_CLK_CTRL fields */ 190 + #define SER_CLK_SEL BIT(0) 191 + 192 + /* GENI_DMA_IF_EN fields */ 193 + #define DMA_IF_EN BIT(0) 194 + 195 + #define geni_setbits32(_addr, _v) writel(readl(_addr) | (_v), _addr) 196 + #define geni_clrbits32(_addr, _v) writel(readl(_addr) & ~(_v), _addr) 273 197 274 198 /** 275 199 * geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version ··· 290 196 { 291 197 struct geni_wrapper *wrapper = se->wrapper; 292 198 293 - return readl_relaxed(wrapper->base + QUP_HW_VER_REG); 199 + return readl_relaxed(wrapper->base + QUPV3_HW_VER_REG); 294 200 } 295 201 EXPORT_SYMBOL_GPL(geni_se_get_qup_hw_version); 296 202 ··· 314 220 { 315 221 u32 val; 316 222 317 - val = readl_relaxed(base + GENI_CGC_CTRL); 223 + val = readl_relaxed(base + SE_GENI_CGC_CTRL); 318 224 val |= DEFAULT_CGC_EN; 319 - writel_relaxed(val, base + GENI_CGC_CTRL); 225 + writel_relaxed(val, base + SE_GENI_CGC_CTRL); 320 226 321 227 val = readl_relaxed(base + SE_DMA_GENERAL_CFG); 322 - val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON; 228 + val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON; 323 229 val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON; 324 230 writel_relaxed(val, base + SE_DMA_GENERAL_CFG); 325 231 ··· 752 658 } 753 659 EXPORT_SYMBOL_GPL(geni_se_clk_freq_match); 754 660 755 - #define GENI_SE_DMA_DONE_EN BIT(0) 756 - #define GENI_SE_DMA_EOT_EN BIT(1) 757 - #define GENI_SE_DMA_AHB_ERR_EN BIT(2) 661 + #define GENI_SE_DMA_DONE_EN BIT(0) 662 + #define GENI_SE_DMA_EOT_EN BIT(1) 663 + #define GENI_SE_DMA_AHB_ERR_EN BIT(2) 664 + #define GENI_SE_DMA_RESET_DONE_EN BIT(3) 665 + #define GENI_SE_DMA_FLUSH_DONE BIT(4) 666 + 758 667 #define GENI_SE_DMA_EOT_BUF BIT(0) 759 668 760 669 /** ··· 987 890 return 0; 988 891 } 989 892 EXPORT_SYMBOL_GPL(geni_icc_disable); 893 + 894 + /** 895 + * geni_find_protocol_fw() - Locate and validate SE firmware for a protocol. 896 + * @dev: Pointer to the device structure. 897 + * @fw: Pointer to the firmware image. 898 + * @protocol: Expected serial engine protocol type. 899 + * 900 + * Identifies the appropriate firmware image or configuration required for a 901 + * specific communication protocol instance running on a Qualcomm GENI 902 + * controller. 903 + * 904 + * Return: pointer to a valid 'struct se_fw_hdr' if found, or NULL otherwise. 905 + */ 906 + static struct se_fw_hdr *geni_find_protocol_fw(struct device *dev, const struct firmware *fw, 907 + enum geni_se_protocol_type protocol) 908 + { 909 + const struct elf32_hdr *ehdr; 910 + const struct elf32_phdr *phdrs; 911 + const struct elf32_phdr *phdr; 912 + struct se_fw_hdr *sefw; 913 + u32 fw_end, cfg_idx_end, cfg_val_end; 914 + u16 fw_size; 915 + int i; 916 + 917 + if (!fw || fw->size < sizeof(struct elf32_hdr)) 918 + return NULL; 919 + 920 + ehdr = (const struct elf32_hdr *)fw->data; 921 + phdrs = (const struct elf32_phdr *)(fw->data + ehdr->e_phoff); 922 + 923 + /* 924 + * The firmware is expected to have at least two program headers (segments). 925 + * One for metadata and the other for the actual protocol-specific firmware. 926 + */ 927 + if (ehdr->e_phnum < 2) { 928 + dev_err(dev, "Invalid firmware: less than 2 program headers\n"); 929 + return NULL; 930 + } 931 + 932 + for (i = 0; i < ehdr->e_phnum; i++) { 933 + phdr = &phdrs[i]; 934 + 935 + if (fw->size < phdr->p_offset + phdr->p_filesz) { 936 + dev_err(dev, "Firmware size (%zu) < expected offset (%u) + size (%u)\n", 937 + fw->size, phdr->p_offset, phdr->p_filesz); 938 + return NULL; 939 + } 940 + 941 + if (phdr->p_type != PT_LOAD || !phdr->p_memsz) 942 + continue; 943 + 944 + if (MI_PBT_PAGE_MODE_VALUE(phdr->p_flags) != MI_PBT_NON_PAGED_SEGMENT || 945 + MI_PBT_SEGMENT_TYPE_VALUE(phdr->p_flags) == MI_PBT_HASH_SEGMENT || 946 + MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_NOTUSED_SEGMENT || 947 + MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_SHARED_SEGMENT) 948 + continue; 949 + 950 + if (phdr->p_filesz < sizeof(struct se_fw_hdr)) 951 + continue; 952 + 953 + sefw = (struct se_fw_hdr *)(fw->data + phdr->p_offset); 954 + fw_size = le16_to_cpu(sefw->fw_size_in_items); 955 + fw_end = le16_to_cpu(sefw->fw_offset) + fw_size * sizeof(u32); 956 + cfg_idx_end = le16_to_cpu(sefw->cfg_idx_offset) + 957 + le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u8); 958 + cfg_val_end = le16_to_cpu(sefw->cfg_val_offset) + 959 + le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u32); 960 + 961 + if (le32_to_cpu(sefw->magic) != SE_MAGIC_NUM || le32_to_cpu(sefw->version) != 1) 962 + continue; 963 + 964 + if (le32_to_cpu(sefw->serial_protocol) != protocol) 965 + continue; 966 + 967 + if (fw_size % 2 != 0) { 968 + fw_size++; 969 + sefw->fw_size_in_items = cpu_to_le16(fw_size); 970 + } 971 + 972 + if (fw_size >= MAX_GENI_CFG_RAMn_CNT) { 973 + dev_err(dev, 974 + "Firmware size (%u) exceeds max allowed RAMn count (%u)\n", 975 + fw_size, MAX_GENI_CFG_RAMn_CNT); 976 + continue; 977 + } 978 + 979 + if (fw_end > phdr->p_filesz || cfg_idx_end > phdr->p_filesz || 980 + cfg_val_end > phdr->p_filesz) { 981 + dev_err(dev, "Truncated or corrupt SE FW segment found at index %d\n", i); 982 + continue; 983 + } 984 + 985 + return sefw; 986 + } 987 + 988 + dev_err(dev, "Failed to get %s protocol firmware\n", protocol_name[protocol]); 989 + return NULL; 990 + } 991 + 992 + /** 993 + * geni_configure_xfer_mode() - Set the transfer mode. 994 + * @se: Pointer to the concerned serial engine. 995 + * @mode: SE data transfer mode. 996 + * 997 + * Set the transfer mode to either FIFO or DMA according to the mode specified 998 + * by the protocol driver. 999 + * 1000 + * Return: 0 if successful, otherwise return an error value. 1001 + */ 1002 + static int geni_configure_xfer_mode(struct geni_se *se, enum geni_se_xfer_mode mode) 1003 + { 1004 + /* Configure SE FIFO, DMA or GSI mode. */ 1005 + switch (mode) { 1006 + case GENI_GPI_DMA: 1007 + geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN); 1008 + writel(0x0, se->base + SE_IRQ_EN); 1009 + writel(DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN, 1010 + se->base + SE_GSI_EVENT_EN); 1011 + break; 1012 + 1013 + case GENI_SE_FIFO: 1014 + geni_clrbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN); 1015 + writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN, 1016 + se->base + SE_IRQ_EN); 1017 + writel(0x0, se->base + SE_GSI_EVENT_EN); 1018 + break; 1019 + 1020 + case GENI_SE_DMA: 1021 + geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN); 1022 + writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN, 1023 + se->base + SE_IRQ_EN); 1024 + writel(0x0, se->base + SE_GSI_EVENT_EN); 1025 + break; 1026 + 1027 + default: 1028 + dev_err(se->dev, "Invalid geni-se transfer mode: %d\n", mode); 1029 + return -EINVAL; 1030 + } 1031 + return 0; 1032 + } 1033 + 1034 + /** 1035 + * geni_enable_interrupts() - Enable interrupts. 1036 + * @se: Pointer to the concerned serial engine. 1037 + * 1038 + * Enable the required interrupts during the firmware load process. 1039 + */ 1040 + static void geni_enable_interrupts(struct geni_se *se) 1041 + { 1042 + u32 val; 1043 + 1044 + /* Enable required interrupts. */ 1045 + writel(M_COMMON_GENI_M_IRQ_EN, se->base + SE_GENI_M_IRQ_EN); 1046 + 1047 + val = S_CMD_OVERRUN_EN | S_ILLEGAL_CMD_EN | S_CMD_CANCEL_EN | S_CMD_ABORT_EN | 1048 + S_GP_IRQ_0_EN | S_GP_IRQ_1_EN | S_GP_IRQ_2_EN | S_GP_IRQ_3_EN | 1049 + S_RX_FIFO_WR_ERR_EN | S_RX_FIFO_RD_ERR_EN; 1050 + writel(val, se->base + SE_GENI_S_IRQ_ENABLE); 1051 + 1052 + /* DMA mode configuration. */ 1053 + val = GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN | GENI_SE_DMA_DONE_EN; 1054 + writel(val, se->base + SE_DMA_TX_IRQ_EN_SET); 1055 + val = GENI_SE_DMA_FLUSH_DONE | GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN | 1056 + GENI_SE_DMA_DONE_EN; 1057 + writel(val, se->base + SE_DMA_RX_IRQ_EN_SET); 1058 + } 1059 + 1060 + /** 1061 + * geni_write_fw_revision() - Write the firmware revision. 1062 + * @se: Pointer to the concerned serial engine. 1063 + * @serial_protocol: serial protocol type. 1064 + * @fw_version: QUP firmware version. 1065 + * 1066 + * Write the firmware revision and protocol into the respective register. 1067 + */ 1068 + static void geni_write_fw_revision(struct geni_se *se, u16 serial_protocol, u16 fw_version) 1069 + { 1070 + u32 reg; 1071 + 1072 + reg = FIELD_PREP(FW_REV_PROTOCOL_MSK, serial_protocol); 1073 + reg |= FIELD_PREP(FW_REV_VERSION_MSK, fw_version); 1074 + 1075 + writel(reg, se->base + SE_GENI_FW_REVISION); 1076 + writel(reg, se->base + SE_GENI_S_FW_REVISION); 1077 + } 1078 + 1079 + /** 1080 + * geni_load_se_fw() - Load Serial Engine specific firmware. 1081 + * @se: Pointer to the concerned serial engine. 1082 + * @fw: Pointer to the firmware structure. 1083 + * @mode: SE data transfer mode. 1084 + * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C). 1085 + * 1086 + * Load the protocol firmware into the IRAM of the Serial Engine. 1087 + * 1088 + * Return: 0 if successful, otherwise return an error value. 1089 + */ 1090 + static int geni_load_se_fw(struct geni_se *se, const struct firmware *fw, 1091 + enum geni_se_xfer_mode mode, enum geni_se_protocol_type protocol) 1092 + { 1093 + const u32 *fw_data, *cfg_val_arr; 1094 + const u8 *cfg_idx_arr; 1095 + u32 i, reg_value; 1096 + int ret; 1097 + struct se_fw_hdr *hdr; 1098 + 1099 + hdr = geni_find_protocol_fw(se->dev, fw, protocol); 1100 + if (!hdr) 1101 + return -EINVAL; 1102 + 1103 + fw_data = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->fw_offset)); 1104 + cfg_idx_arr = (const u8 *)hdr + le16_to_cpu(hdr->cfg_idx_offset); 1105 + cfg_val_arr = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->cfg_val_offset)); 1106 + 1107 + ret = geni_icc_set_bw(se); 1108 + if (ret) 1109 + return ret; 1110 + 1111 + ret = geni_icc_enable(se); 1112 + if (ret) 1113 + return ret; 1114 + 1115 + ret = geni_se_resources_on(se); 1116 + if (ret) 1117 + goto out_icc_disable; 1118 + 1119 + /* 1120 + * Disable high-priority interrupts until all currently executing 1121 + * low-priority interrupts have been fully handled. 1122 + */ 1123 + geni_setbits32(se->wrapper->base + QUPV3_COMMON_CFG, FAST_SWITCH_TO_HIGH_DISABLE); 1124 + 1125 + /* Set AHB_M_CLK_CGC_ON to indicate hardware controls se-wrapper cgc clock. */ 1126 + geni_setbits32(se->wrapper->base + QUPV3_SE_AHB_M_CFG, AHB_M_CLK_CGC_ON); 1127 + 1128 + /* Let hardware to control common cgc. */ 1129 + geni_setbits32(se->wrapper->base + QUPV3_COMMON_CGC_CTRL, COMMON_CSR_SLV_CLK_CGC_ON); 1130 + 1131 + /* 1132 + * Setting individual bits in GENI_OUTPUT_CTRL activates corresponding output lines, 1133 + * allowing the hardware to drive data as configured. 1134 + */ 1135 + writel(0x0, se->base + GENI_OUTPUT_CTRL); 1136 + 1137 + /* Set SCLK and HCLK to program RAM */ 1138 + geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF); 1139 + writel(0x0, se->base + SE_GENI_CLK_CTRL); 1140 + geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF); 1141 + 1142 + /* Enable required clocks for DMA CSR, TX and RX. */ 1143 + reg_value = AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON | 1144 + DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON; 1145 + geni_setbits32(se->base + SE_DMA_GENERAL_CFG, reg_value); 1146 + 1147 + /* Let hardware control CGC by default. */ 1148 + writel(DEFAULT_CGC_EN, se->base + SE_GENI_CGC_CTRL); 1149 + 1150 + /* Set version of the configuration register part of firmware. */ 1151 + writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_INIT_CFG_REVISION); 1152 + writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_S_INIT_CFG_REVISION); 1153 + 1154 + /* Configure GENI primitive table. */ 1155 + for (i = 0; i < le16_to_cpu(hdr->cfg_size_in_items); i++) 1156 + writel(cfg_val_arr[i], 1157 + se->base + SE_GENI_CFG_REG0 + (cfg_idx_arr[i] * sizeof(u32))); 1158 + 1159 + /* Configure condition for assertion of RX_RFR_WATERMARK condition. */ 1160 + reg_value = geni_se_get_rx_fifo_depth(se); 1161 + writel(reg_value - 2, se->base + SE_GENI_RX_RFR_WATERMARK_REG); 1162 + 1163 + /* Let hardware control CGC */ 1164 + geni_setbits32(se->base + GENI_OUTPUT_CTRL, DEFAULT_IO_OUTPUT_CTRL_MSK); 1165 + 1166 + ret = geni_configure_xfer_mode(se, mode); 1167 + if (ret) 1168 + goto out_resources_off; 1169 + 1170 + geni_enable_interrupts(se); 1171 + 1172 + geni_write_fw_revision(se, le16_to_cpu(hdr->serial_protocol), le16_to_cpu(hdr->fw_version)); 1173 + 1174 + /* Program RAM address space. */ 1175 + memcpy_toio(se->base + SE_GENI_CFG_RAMN, fw_data, 1176 + le16_to_cpu(hdr->fw_size_in_items) * sizeof(u32)); 1177 + 1178 + /* Put default values on GENI's output pads. */ 1179 + writel_relaxed(0x1, se->base + GENI_FORCE_DEFAULT_REG); 1180 + 1181 + /* Toggle SCLK/HCLK from high to low to finalize RAM programming and apply config. */ 1182 + geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF); 1183 + geni_setbits32(se->base + SE_GENI_CLK_CTRL, SER_CLK_SEL); 1184 + geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF); 1185 + 1186 + /* Serial engine DMA interface is enabled. */ 1187 + geni_setbits32(se->base + SE_DMA_IF_EN, DMA_IF_EN); 1188 + 1189 + /* Enable or disable FIFO interface of the serial engine. */ 1190 + if (mode == GENI_SE_FIFO) 1191 + geni_clrbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE); 1192 + else 1193 + geni_setbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE); 1194 + 1195 + out_resources_off: 1196 + geni_se_resources_off(se); 1197 + 1198 + out_icc_disable: 1199 + geni_icc_disable(se); 1200 + return ret; 1201 + } 1202 + 1203 + /** 1204 + * geni_load_se_firmware() - Load firmware for SE based on protocol 1205 + * @se: Pointer to the concerned serial engine. 1206 + * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C). 1207 + * 1208 + * Retrieves the firmware name from device properties and sets the transfer mode 1209 + * (FIFO or GSI DMA) based on device tree configuration. Enforces FIFO mode for 1210 + * UART protocol due to lack of GSI DMA support. Requests the firmware and loads 1211 + * it into the SE. 1212 + * 1213 + * Return: 0 on success, negative error code on failure. 1214 + */ 1215 + int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol) 1216 + { 1217 + const char *fw_name; 1218 + const struct firmware *fw; 1219 + enum geni_se_xfer_mode mode = GENI_SE_FIFO; 1220 + int ret; 1221 + 1222 + if (protocol >= ARRAY_SIZE(protocol_name)) { 1223 + dev_err(se->dev, "Invalid geni-se protocol: %d", protocol); 1224 + return -EINVAL; 1225 + } 1226 + 1227 + ret = device_property_read_string(se->wrapper->dev, "firmware-name", &fw_name); 1228 + if (ret) { 1229 + dev_err(se->dev, "Failed to read firmware-name property: %d\n", ret); 1230 + return -EINVAL; 1231 + } 1232 + 1233 + if (of_property_read_bool(se->dev->of_node, "qcom,enable-gsi-dma")) 1234 + mode = GENI_GPI_DMA; 1235 + 1236 + /* GSI mode is not supported by the UART driver; therefore, setting FIFO mode */ 1237 + if (protocol == GENI_SE_UART) 1238 + mode = GENI_SE_FIFO; 1239 + 1240 + ret = request_firmware(&fw, fw_name, se->dev); 1241 + if (ret) { 1242 + if (ret == -ENOENT) 1243 + return -EPROBE_DEFER; 1244 + 1245 + dev_err(se->dev, "Failed to request firmware '%s' for protocol %d: ret: %d\n", 1246 + fw_name, protocol, ret); 1247 + return ret; 1248 + } 1249 + 1250 + ret = geni_load_se_fw(se, fw, mode, protocol); 1251 + release_firmware(fw); 1252 + 1253 + if (ret) { 1254 + dev_err(se->dev, "Failed to load SE firmware for protocol %d: ret: %d\n", 1255 + protocol, ret); 1256 + return ret; 1257 + } 1258 + 1259 + dev_dbg(se->dev, "Firmware load for %s protocol is successful for xfer mode: %d\n", 1260 + protocol_name[protocol], mode); 1261 + return 0; 1262 + } 1263 + EXPORT_SYMBOL_GPL(geni_load_se_firmware); 990 1264 991 1265 static int geni_se_probe(struct platform_device *pdev) 992 1266 {
+1
drivers/soc/qcom/qcom_pd_mapper.c
··· 584 584 { .compatible = "qcom,sm8450", .data = sm8350_domains, }, 585 585 { .compatible = "qcom,sm8550", .data = sm8550_domains, }, 586 586 { .compatible = "qcom,sm8650", .data = sm8550_domains, }, 587 + { .compatible = "qcom,sm8750", .data = sm8550_domains, }, 587 588 { .compatible = "qcom,x1e80100", .data = x1e80100_domains, }, 588 589 { .compatible = "qcom,x1p42100", .data = x1e80100_domains, }, 589 590 {},
-1
drivers/soc/qcom/ramp_controller.c
··· 229 229 .reg_stride = 4, 230 230 .val_bits = 32, 231 231 .max_register = 0x68, 232 - .fast_io = true, 233 232 }; 234 233 235 234 static const struct reg_sequence msm8976_cfg_dfs_sid[] = {
+1 -1
drivers/soc/qcom/rpm_master_stats.c
··· 78 78 if (count < 0) 79 79 return count; 80 80 81 - data = devm_kzalloc(dev, count * sizeof(*data), GFP_KERNEL); 81 + data = devm_kcalloc(dev, count, sizeof(*data), GFP_KERNEL); 82 82 if (!data) 83 83 return -ENOMEM; 84 84
+2 -5
drivers/soc/qcom/rpmh-rsc.c
··· 453 453 454 454 trace_rpmh_tx_done(drv, i, req); 455 455 456 - /* 457 - * If wake tcs was re-purposed for sending active 458 - * votes, clear AMC trigger & enable modes and 456 + /* Clear AMC trigger & enable modes and 459 457 * disable interrupt for this TCS 460 458 */ 461 - if (!drv->tcs[ACTIVE_TCS].num_tcs) 462 - __tcs_set_trigger(drv, i, false); 459 + __tcs_set_trigger(drv, i, false); 463 460 skip: 464 461 /* Reclaim the TCS */ 465 462 write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
+1 -1
drivers/soc/qcom/smem.c
··· 898 898 if (IS_ERR_OR_NULL(ptable)) 899 899 return SMEM_ITEM_COUNT; 900 900 901 - info = (struct smem_info *)&ptable->entry[ptable->num_entries]; 901 + info = (struct smem_info *)&ptable->entry[le32_to_cpu(ptable->num_entries)]; 902 902 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) 903 903 return SMEM_ITEM_COUNT; 904 904
+13
drivers/soc/renesas/Kconfig
··· 39 39 bool 40 40 select ARCH_RCAR_GEN3 41 41 42 + config ARCH_RCAR_GEN5 43 + bool 44 + select ARCH_RCAR_GEN4 45 + 42 46 config ARCH_RMOBILE 43 47 bool 44 48 select PM ··· 352 348 help 353 349 This enables support for the Renesas R-Car V4M SoC. 354 350 351 + config ARCH_R8A78000 352 + bool "ARM64 Platform support for R8A78000 (R-Car X5H)" 353 + default y if ARCH_RENESAS 354 + default ARCH_RENESAS 355 + select ARCH_RCAR_GEN5 356 + help 357 + This enables support for the Renesas R-Car X5H SoC. 358 + 355 359 config ARCH_R9A07G043 356 360 bool "ARM64 Platform support for R9A07G043U (RZ/G2UL)" 357 361 default y if ARCH_RENESAS ··· 461 449 462 450 config SYSC_RZ 463 451 bool "System controller for RZ SoCs" if COMPILE_TEST 452 + select MFD_SYSCON 464 453 465 454 config SYSC_R9A08G045 466 455 bool "Renesas System controller support for R9A08G045 (RZ/G3S)" if COMPILE_TEST
+1
drivers/soc/renesas/r9a08g045-sysc.c
··· 20 20 21 21 const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = { 22 22 .soc_id_init_data = &rzg3s_sysc_soc_id_init_data, 23 + .max_register = 0xe20, 23 24 };
+1
drivers/soc/renesas/r9a09g047-sys.c
··· 64 64 65 65 const struct rz_sysc_init_data rzg3e_sys_init_data = { 66 66 .soc_id_init_data = &rzg3e_sys_soc_id_init_data, 67 + .max_register = 0x170c, 67 68 };
+1
drivers/soc/renesas/r9a09g057-sys.c
··· 64 64 65 65 const struct rz_sysc_init_data rzv2h_sys_init_data = { 66 66 .soc_id_init_data = &rzv2h_sys_soc_id_init_data, 67 + .max_register = 0x170c, 67 68 };
+12
drivers/soc/renesas/renesas-soc.c
··· 36 36 .name = "R-Car Gen4", 37 37 }; 38 38 39 + static const struct renesas_family fam_rcar_gen5 __initconst __maybe_unused = { 40 + .name = "R-Car Gen5", 41 + }; 42 + 39 43 static const struct renesas_family fam_rmobile __initconst __maybe_unused = { 40 44 .name = "R-Mobile", 41 45 .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ ··· 270 266 .id = 0x5d, 271 267 }; 272 268 269 + static const struct renesas_soc soc_rcar_x5h __initconst __maybe_unused = { 270 + .family = &fam_rcar_gen5, 271 + .id = 0x60, 272 + }; 273 + 273 274 static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { 274 275 .family = &fam_shmobile, 275 276 .id = 0x37, ··· 386 377 #endif 387 378 #ifdef CONFIG_ARCH_R8A779H0 388 379 { .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m }, 380 + #endif 381 + #ifdef CONFIG_ARCH_R8A78000 382 + { .compatible = "renesas,r8a78000", .data = &soc_rcar_x5h }, 389 383 #endif 390 384 #ifdef CONFIG_ARCH_R9A07G043 391 385 #ifdef CONFIG_RISCV
+29 -1
drivers/soc/renesas/rz-sysc.c
··· 5 5 * Copyright (C) 2024 Renesas Electronics Corp. 6 6 */ 7 7 8 + #include <linux/cleanup.h> 8 9 #include <linux/io.h> 10 + #include <linux/mfd/syscon.h> 9 11 #include <linux/of.h> 10 12 #include <linux/platform_device.h> 13 + #include <linux/regmap.h> 14 + #include <linux/slab.h> 11 15 #include <linux/sys_soc.h> 12 16 13 17 #include "rz-sysc.h" ··· 104 100 105 101 static int rz_sysc_probe(struct platform_device *pdev) 106 102 { 103 + const struct rz_sysc_init_data *data; 107 104 const struct of_device_id *match; 108 105 struct device *dev = &pdev->dev; 106 + struct regmap *regmap; 109 107 struct rz_sysc *sysc; 108 + int ret; 109 + 110 + struct regmap_config *regmap_cfg __free(kfree) = kzalloc(sizeof(*regmap_cfg), GFP_KERNEL); 111 + if (!regmap_cfg) 112 + return -ENOMEM; 110 113 111 114 match = of_match_node(rz_sysc_match, dev->of_node); 112 115 if (!match) 113 116 return -ENODEV; 117 + 118 + data = match->data; 114 119 115 120 sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL); 116 121 if (!sysc) ··· 130 117 return PTR_ERR(sysc->base); 131 118 132 119 sysc->dev = dev; 133 - return rz_sysc_soc_init(sysc, match); 120 + ret = rz_sysc_soc_init(sysc, match); 121 + if (ret) 122 + return ret; 123 + 124 + regmap_cfg->name = "rz_sysc_regs"; 125 + regmap_cfg->reg_bits = 32; 126 + regmap_cfg->reg_stride = 4; 127 + regmap_cfg->val_bits = 32; 128 + regmap_cfg->fast_io = true; 129 + regmap_cfg->max_register = data->max_register; 130 + 131 + regmap = devm_regmap_init_mmio(dev, sysc->base, regmap_cfg); 132 + if (IS_ERR(regmap)) 133 + return PTR_ERR(regmap); 134 + 135 + return of_syscon_register_regmap(dev->of_node, regmap); 134 136 } 135 137 136 138 static struct platform_driver rz_sysc_driver = {
+2
drivers/soc/renesas/rz-sysc.h
··· 34 34 /** 35 35 * struct rz_sysc_init_data - RZ SYSC initialization data 36 36 * @soc_id_init_data: RZ SYSC SoC ID initialization data 37 + * @max_register: Maximum SYSC register offset to be used by the regmap config 37 38 */ 38 39 struct rz_sysc_init_data { 39 40 const struct rz_sysc_soc_id_init_data *soc_id_init_data; 41 + u32 max_register; 40 42 }; 41 43 42 44 extern const struct rz_sysc_init_data rzg3e_sys_init_data;
+254 -22
drivers/soc/samsung/exynos-pmu.c
··· 7 7 8 8 #include <linux/array_size.h> 9 9 #include <linux/arm-smccc.h> 10 + #include <linux/bitmap.h> 10 11 #include <linux/cpuhotplug.h> 12 + #include <linux/cpu_pm.h> 11 13 #include <linux/of.h> 12 14 #include <linux/of_address.h> 13 15 #include <linux/mfd/core.h> ··· 17 15 #include <linux/of_platform.h> 18 16 #include <linux/platform_device.h> 19 17 #include <linux/delay.h> 18 + #include <linux/reboot.h> 20 19 #include <linux/regmap.h> 21 20 22 21 #include <linux/soc/samsung/exynos-regs-pmu.h> ··· 38 35 const struct exynos_pmu_data *pmu_data; 39 36 struct regmap *pmureg; 40 37 struct regmap *pmuintrgen; 38 + /* 39 + * Serialization lock for CPU hot plug and cpuidle ACPM hint 40 + * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot 41 + * flags. 42 + */ 43 + raw_spinlock_t cpupm_lock; 44 + unsigned long *in_cpuhp; 45 + bool sys_insuspend; 46 + bool sys_inreboot; 41 47 }; 42 48 43 49 void __iomem *pmu_base_addr; ··· 233 221 .reg_read = tensor_sec_reg_read, 234 222 .reg_write = tensor_sec_reg_write, 235 223 .reg_update_bits = tensor_sec_update_bits, 224 + .use_raw_spinlock = true, 225 + }; 226 + 227 + static const struct regmap_config regmap_pmu_intr = { 228 + .name = "pmu_intr_gen", 229 + .reg_bits = 32, 230 + .reg_stride = 4, 231 + .val_bits = 32, 232 + .use_raw_spinlock = true, 236 233 }; 237 234 238 235 static const struct exynos_pmu_data gs101_pmu_data = { ··· 351 330 EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); 352 331 353 332 /* 354 - * CPU_INFORM register hint values which are used by 355 - * EL3 firmware (el3mon). 333 + * CPU_INFORM register "hint" values are required to be programmed in addition to 334 + * the standard PSCI calls to have functional CPU hotplug and CPU idle states. 335 + * This is required to workaround limitations in the el3mon/ACPM firmware. 356 336 */ 357 337 #define CPU_INFORM_CLEAR 0 358 338 #define CPU_INFORM_C2 1 359 339 360 - static int gs101_cpuhp_pmu_online(unsigned int cpu) 340 + /* 341 + * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers 342 + * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs 343 + * disabled and cpupm_lock held. 344 + */ 345 + static int __gs101_cpu_pmu_online(unsigned int cpu) 361 346 { 362 347 unsigned int cpuhint = smp_processor_id(); 363 348 u32 reg, mask; ··· 385 358 return 0; 386 359 } 387 360 388 - static int gs101_cpuhp_pmu_offline(unsigned int cpu) 361 + /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */ 362 + static int gs101_cpu_pmu_online(void) 389 363 { 390 - u32 reg, mask; 364 + int cpu; 365 + 366 + raw_spin_lock(&pmu_context->cpupm_lock); 367 + 368 + if (pmu_context->sys_inreboot) { 369 + raw_spin_unlock(&pmu_context->cpupm_lock); 370 + return NOTIFY_OK; 371 + } 372 + 373 + cpu = smp_processor_id(); 374 + __gs101_cpu_pmu_online(cpu); 375 + raw_spin_unlock(&pmu_context->cpupm_lock); 376 + 377 + return NOTIFY_OK; 378 + } 379 + 380 + /* Called from CPU hot plug callback with IRQs enabled */ 381 + static int gs101_cpuhp_pmu_online(unsigned int cpu) 382 + { 383 + unsigned long flags; 384 + 385 + raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); 386 + 387 + __gs101_cpu_pmu_online(cpu); 388 + /* 389 + * Mark this CPU as having finished the hotplug. 390 + * This means this CPU can now enter C2 idle state. 391 + */ 392 + clear_bit(cpu, pmu_context->in_cpuhp); 393 + raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); 394 + 395 + return 0; 396 + } 397 + 398 + /* Common function shared by both CPU hot plug and CPUIdle */ 399 + static int __gs101_cpu_pmu_offline(unsigned int cpu) 400 + { 391 401 unsigned int cpuhint = smp_processor_id(); 402 + u32 reg, mask; 392 403 393 404 /* set cpu inform hint */ 394 405 regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint), ··· 444 379 regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg); 445 380 regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR, 446 381 reg & mask); 382 + 383 + return 0; 384 + } 385 + 386 + /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */ 387 + static int gs101_cpu_pmu_offline(void) 388 + { 389 + int cpu; 390 + 391 + raw_spin_lock(&pmu_context->cpupm_lock); 392 + cpu = smp_processor_id(); 393 + 394 + if (test_bit(cpu, pmu_context->in_cpuhp)) { 395 + raw_spin_unlock(&pmu_context->cpupm_lock); 396 + return NOTIFY_BAD; 397 + } 398 + 399 + /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */ 400 + if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) { 401 + raw_spin_unlock(&pmu_context->cpupm_lock); 402 + return NOTIFY_OK; 403 + } 404 + 405 + __gs101_cpu_pmu_offline(cpu); 406 + raw_spin_unlock(&pmu_context->cpupm_lock); 407 + 408 + return NOTIFY_OK; 409 + } 410 + 411 + /* Called from CPU hot plug callback with IRQs enabled */ 412 + static int gs101_cpuhp_pmu_offline(unsigned int cpu) 413 + { 414 + unsigned long flags; 415 + 416 + raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); 417 + /* 418 + * Mark this CPU as entering hotplug. So as not to confuse 419 + * ACPM the CPU entering hotplug should not enter C2 idle state. 420 + */ 421 + set_bit(cpu, pmu_context->in_cpuhp); 422 + __gs101_cpu_pmu_offline(cpu); 423 + 424 + raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); 425 + 426 + return 0; 427 + } 428 + 429 + static int gs101_cpu_pm_notify_callback(struct notifier_block *self, 430 + unsigned long action, void *v) 431 + { 432 + switch (action) { 433 + case CPU_PM_ENTER: 434 + return gs101_cpu_pmu_offline(); 435 + 436 + case CPU_PM_EXIT: 437 + return gs101_cpu_pmu_online(); 438 + } 439 + 440 + return NOTIFY_OK; 441 + } 442 + 443 + static struct notifier_block gs101_cpu_pm_notifier = { 444 + .notifier_call = gs101_cpu_pm_notify_callback, 445 + /* 446 + * We want to be called first, as the ACPM hint and handshake is what 447 + * puts the CPU into C2. 448 + */ 449 + .priority = INT_MAX 450 + }; 451 + 452 + static int exynos_cpupm_reboot_notifier(struct notifier_block *nb, 453 + unsigned long event, void *v) 454 + { 455 + unsigned long flags; 456 + 457 + switch (event) { 458 + case SYS_POWER_OFF: 459 + case SYS_RESTART: 460 + raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); 461 + pmu_context->sys_inreboot = true; 462 + raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); 463 + break; 464 + } 465 + 466 + return NOTIFY_OK; 467 + } 468 + 469 + static struct notifier_block exynos_cpupm_reboot_nb = { 470 + .priority = INT_MAX, 471 + .notifier_call = exynos_cpupm_reboot_notifier, 472 + }; 473 + 474 + static int setup_cpuhp_and_cpuidle(struct device *dev) 475 + { 476 + struct device_node *intr_gen_node; 477 + struct resource intrgen_res; 478 + void __iomem *virt_addr; 479 + int ret, cpu; 480 + 481 + intr_gen_node = of_parse_phandle(dev->of_node, 482 + "google,pmu-intr-gen-syscon", 0); 483 + if (!intr_gen_node) { 484 + /* 485 + * To maintain support for older DTs that didn't specify syscon 486 + * phandle just issue a warning rather than fail to probe. 487 + */ 488 + dev_warn(dev, "pmu-intr-gen syscon unavailable\n"); 489 + return 0; 490 + } 491 + 492 + /* 493 + * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create 494 + * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of 495 + * syscon provided regmap. 496 + */ 497 + ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res); 498 + of_node_put(intr_gen_node); 499 + 500 + virt_addr = devm_ioremap(dev, intrgen_res.start, 501 + resource_size(&intrgen_res)); 502 + if (!virt_addr) 503 + return -ENOMEM; 504 + 505 + pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr, 506 + &regmap_pmu_intr); 507 + if (IS_ERR(pmu_context->pmuintrgen)) { 508 + dev_err(dev, "failed to initialize pmu-intr-gen regmap\n"); 509 + return PTR_ERR(pmu_context->pmuintrgen); 510 + } 511 + 512 + /* register custom mmio regmap with syscon */ 513 + ret = of_syscon_register_regmap(intr_gen_node, 514 + pmu_context->pmuintrgen); 515 + if (ret) 516 + return ret; 517 + 518 + pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(), 519 + GFP_KERNEL); 520 + if (!pmu_context->in_cpuhp) 521 + return -ENOMEM; 522 + 523 + raw_spin_lock_init(&pmu_context->cpupm_lock); 524 + pmu_context->sys_inreboot = false; 525 + pmu_context->sys_insuspend = false; 526 + 527 + /* set PMU to power on */ 528 + for_each_online_cpu(cpu) 529 + gs101_cpuhp_pmu_online(cpu); 530 + 531 + /* register CPU hotplug callbacks */ 532 + cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare", 533 + gs101_cpuhp_pmu_online, NULL); 534 + 535 + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online", 536 + NULL, gs101_cpuhp_pmu_offline); 537 + 538 + /* register CPU PM notifiers for cpuidle */ 539 + cpu_pm_register_notifier(&gs101_cpu_pm_notifier); 540 + register_reboot_notifier(&exynos_cpupm_reboot_nb); 447 541 return 0; 448 542 } 449 543 ··· 659 435 pmu_context->dev = dev; 660 436 661 437 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) { 662 - pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node, 663 - "google,pmu-intr-gen-syscon"); 664 - if (IS_ERR(pmu_context->pmuintrgen)) { 665 - /* 666 - * To maintain support for older DTs that didn't specify syscon phandle 667 - * just issue a warning rather than fail to probe. 668 - */ 669 - dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n"); 670 - } else { 671 - cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, 672 - "soc/exynos-pmu:prepare", 673 - gs101_cpuhp_pmu_online, NULL); 674 - 675 - cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 676 - "soc/exynos-pmu:online", 677 - NULL, gs101_cpuhp_pmu_offline); 678 - } 438 + ret = setup_cpuhp_and_cpuidle(dev); 439 + if (ret) 440 + return ret; 679 441 } 680 442 681 443 if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) ··· 681 471 return 0; 682 472 } 683 473 474 + static int exynos_cpupm_suspend_noirq(struct device *dev) 475 + { 476 + raw_spin_lock(&pmu_context->cpupm_lock); 477 + pmu_context->sys_insuspend = true; 478 + raw_spin_unlock(&pmu_context->cpupm_lock); 479 + return 0; 480 + } 481 + 482 + static int exynos_cpupm_resume_noirq(struct device *dev) 483 + { 484 + raw_spin_lock(&pmu_context->cpupm_lock); 485 + pmu_context->sys_insuspend = false; 486 + raw_spin_unlock(&pmu_context->cpupm_lock); 487 + return 0; 488 + } 489 + 490 + static const struct dev_pm_ops cpupm_pm_ops = { 491 + NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq, 492 + exynos_cpupm_resume_noirq) 493 + }; 494 + 684 495 static struct platform_driver exynos_pmu_driver = { 685 496 .driver = { 686 497 .name = "exynos-pmu", 687 498 .of_match_table = exynos_pmu_of_device_ids, 499 + .pm = pm_sleep_ptr(&cpupm_pm_ops), 688 500 }, 689 501 .probe = exynos_pmu_probe, 690 502 };
+14
drivers/soc/sunxi/sunxi_sram.c
··· 12 12 13 13 #include <linux/debugfs.h> 14 14 #include <linux/io.h> 15 + #include <linux/mfd/syscon.h> 15 16 #include <linux/module.h> 16 17 #include <linux/of.h> 17 18 #include <linux/of_address.h> ··· 311 310 .has_ths_offset = true, 312 311 }; 313 312 313 + static const struct sunxi_sramc_variant sun55i_a523_sramc_variant = { 314 + .num_emac_clocks = 2, 315 + }; 316 + 314 317 #define SUNXI_SRAM_THS_OFFSET_REG 0x0 315 318 #define SUNXI_SRAM_EMAC_CLOCK_REG 0x30 316 319 #define SUNXI_SYS_LDO_CTRL_REG 0x150 ··· 368 363 const struct sunxi_sramc_variant *variant; 369 364 struct device *dev = &pdev->dev; 370 365 struct regmap *regmap; 366 + int ret; 371 367 372 368 sram_dev = &pdev->dev; 373 369 ··· 386 380 regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config); 387 381 if (IS_ERR(regmap)) 388 382 return PTR_ERR(regmap); 383 + 384 + ret = of_syscon_register_regmap(dev->of_node, regmap); 385 + if (ret) 386 + return ret; 389 387 } 390 388 391 389 of_platform_populate(dev->of_node, NULL, NULL, dev); ··· 439 429 { 440 430 .compatible = "allwinner,sun50i-h616-system-control", 441 431 .data = &sun50i_h616_sramc_variant, 432 + }, 433 + { 434 + .compatible = "allwinner,sun55i-a523-system-control", 435 + .data = &sun55i_a523_sramc_variant, 442 436 }, 443 437 { }, 444 438 };
+122
drivers/soc/tegra/fuse/fuse-tegra30.c
··· 117 117 #endif 118 118 119 119 #ifdef CONFIG_ARCH_TEGRA_114_SOC 120 + static const struct nvmem_cell_info tegra114_fuse_cells[] = { 121 + { 122 + .name = "tsensor-cpu1", 123 + .offset = 0x084, 124 + .bytes = 4, 125 + .bit_offset = 0, 126 + .nbits = 32, 127 + }, { 128 + .name = "tsensor-cpu2", 129 + .offset = 0x088, 130 + .bytes = 4, 131 + .bit_offset = 0, 132 + .nbits = 32, 133 + }, { 134 + .name = "tsensor-common", 135 + .offset = 0x08c, 136 + .bytes = 4, 137 + .bit_offset = 0, 138 + .nbits = 32, 139 + }, { 140 + .name = "tsensor-cpu0", 141 + .offset = 0x098, 142 + .bytes = 4, 143 + .bit_offset = 0, 144 + .nbits = 32, 145 + }, { 146 + .name = "xusb-pad-calibration", 147 + .offset = 0x0f0, 148 + .bytes = 4, 149 + .bit_offset = 0, 150 + .nbits = 32, 151 + }, { 152 + .name = "tsensor-cpu3", 153 + .offset = 0x12c, 154 + .bytes = 4, 155 + .bit_offset = 0, 156 + .nbits = 32, 157 + }, { 158 + .name = "tsensor-gpu", 159 + .offset = 0x154, 160 + .bytes = 4, 161 + .bit_offset = 0, 162 + .nbits = 32, 163 + }, { 164 + .name = "tsensor-mem0", 165 + .offset = 0x158, 166 + .bytes = 4, 167 + .bit_offset = 0, 168 + .nbits = 32, 169 + }, { 170 + .name = "tsensor-mem1", 171 + .offset = 0x15c, 172 + .bytes = 4, 173 + .bit_offset = 0, 174 + .nbits = 32, 175 + }, { 176 + .name = "tsensor-pllx", 177 + .offset = 0x160, 178 + .bytes = 4, 179 + .bit_offset = 0, 180 + .nbits = 32, 181 + }, 182 + }; 183 + 184 + static const struct nvmem_cell_lookup tegra114_fuse_lookups[] = { 185 + { 186 + .nvmem_name = "fuse", 187 + .cell_name = "xusb-pad-calibration", 188 + .dev_id = "7009f000.padctl", 189 + .con_id = "calibration", 190 + }, { 191 + .nvmem_name = "fuse", 192 + .cell_name = "tsensor-common", 193 + .dev_id = "700e2000.thermal-sensor", 194 + .con_id = "common", 195 + }, { 196 + .nvmem_name = "fuse", 197 + .cell_name = "tsensor-cpu0", 198 + .dev_id = "700e2000.thermal-sensor", 199 + .con_id = "cpu0", 200 + }, { 201 + .nvmem_name = "fuse", 202 + .cell_name = "tsensor-cpu1", 203 + .dev_id = "700e2000.thermal-sensor", 204 + .con_id = "cpu1", 205 + }, { 206 + .nvmem_name = "fuse", 207 + .cell_name = "tsensor-cpu2", 208 + .dev_id = "700e2000.thermal-sensor", 209 + .con_id = "cpu2", 210 + }, { 211 + .nvmem_name = "fuse", 212 + .cell_name = "tsensor-cpu3", 213 + .dev_id = "700e2000.thermal-sensor", 214 + .con_id = "cpu3", 215 + }, { 216 + .nvmem_name = "fuse", 217 + .cell_name = "tsensor-mem0", 218 + .dev_id = "700e2000.thermal-sensor", 219 + .con_id = "mem0", 220 + }, { 221 + .nvmem_name = "fuse", 222 + .cell_name = "tsensor-mem1", 223 + .dev_id = "700e2000.thermal-sensor", 224 + .con_id = "mem1", 225 + }, { 226 + .nvmem_name = "fuse", 227 + .cell_name = "tsensor-gpu", 228 + .dev_id = "700e2000.thermal-sensor", 229 + .con_id = "gpu", 230 + }, { 231 + .nvmem_name = "fuse", 232 + .cell_name = "tsensor-pllx", 233 + .dev_id = "700e2000.thermal-sensor", 234 + .con_id = "pllx", 235 + }, 236 + }; 237 + 120 238 static const struct tegra_fuse_info tegra114_fuse_info = { 121 239 .read = tegra30_fuse_read, 122 240 .size = 0x2a0, ··· 245 127 .init = tegra30_fuse_init, 246 128 .speedo_init = tegra114_init_speedo_data, 247 129 .info = &tegra114_fuse_info, 130 + .lookups = tegra114_fuse_lookups, 131 + .num_lookups = ARRAY_SIZE(tegra114_fuse_lookups), 132 + .cells = tegra114_fuse_cells, 133 + .num_cells = ARRAY_SIZE(tegra114_fuse_cells), 248 134 .soc_attr_group = &tegra_soc_attr_group, 249 135 .clk_suspend_on = false, 250 136 };
+10
drivers/soc/ti/k3-socinfo.c
··· 66 66 "1.0", "1.1", "2.0", 67 67 }; 68 68 69 + static const char * const am62lx_rev_string_map[] = { 70 + "1.0", "1.1", 71 + }; 72 + 69 73 static int 70 74 k3_chipinfo_partno_to_names(unsigned int partno, 71 75 struct soc_device_attribute *soc_dev_attr) ··· 95 91 goto err_unknown_variant; 96 92 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s", 97 93 j721e_rev_string_map[variant]); 94 + break; 95 + case JTAG_ID_PARTNO_AM62LX: 96 + if (variant >= ARRAY_SIZE(am62lx_rev_string_map)) 97 + goto err_unknown_variant; 98 + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s", 99 + am62lx_rev_string_map[variant]); 98 100 break; 99 101 default: 100 102 variant++;
+1 -1
drivers/soc/ti/pruss.c
··· 449 449 pruss->mem_regions[i].pa = res.start; 450 450 pruss->mem_regions[i].size = resource_size(&res); 451 451 452 - dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n", 452 + dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %p\n", 453 453 mem_names[i], &pruss->mem_regions[i].pa, 454 454 pruss->mem_regions[i].size, pruss->mem_regions[i].va); 455 455 }
+1 -1
drivers/spi/Kconfig
··· 164 164 165 165 config SPI_ATMEL 166 166 tristate "Atmel SPI Controller" 167 - depends on ARCH_AT91 || COMPILE_TEST 167 + depends on ARCH_MICROCHIP || COMPILE_TEST 168 168 depends on OF 169 169 help 170 170 This selects a driver for the Atmel SPI Controller, present on
+1
drivers/spi/spi-apple.c
··· 511 511 } 512 512 513 513 static const struct of_device_id apple_spi_of_match[] = { 514 + { .compatible = "apple,t8103-spi", }, 514 515 { .compatible = "apple,spi", }, 515 516 {} 516 517 };
+6
drivers/spi/spi-geni-qcom.c
··· 671 671 goto out_pm; 672 672 } 673 673 spi_slv_setup(mas); 674 + } else if (proto == GENI_SE_INVALID_PROTO) { 675 + ret = geni_load_se_firmware(se, GENI_SE_SPI); 676 + if (ret) { 677 + dev_err(mas->dev, "spi master firmware load failed ret: %d\n", ret); 678 + goto out_pm; 679 + } 674 680 } else if (proto != GENI_SE_SPI) { 675 681 dev_err(mas->dev, "Invalid proto %d\n", proto); 676 682 goto out_pm;
+7 -2
drivers/tee/Kconfig
··· 3 3 menuconfig TEE 4 4 tristate "Trusted Execution Environment support" 5 5 depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD 6 - select CRYPTO 7 - select CRYPTO_SHA1 6 + select CRYPTO_LIB_SHA1 8 7 select DMA_SHARED_BUFFER 9 8 select GENERIC_ALLOCATOR 10 9 help ··· 12 13 13 14 if TEE 14 15 16 + config TEE_DMABUF_HEAPS 17 + bool 18 + depends on HAS_DMA && DMABUF_HEAPS 19 + default y 20 + 15 21 source "drivers/tee/optee/Kconfig" 16 22 source "drivers/tee/amdtee/Kconfig" 17 23 source "drivers/tee/tstee/Kconfig" 24 + source "drivers/tee/qcomtee/Kconfig" 18 25 19 26 endif
+2
drivers/tee/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_TEE) += tee.o 3 3 tee-objs += tee_core.o 4 + tee-objs += tee_heap.o 4 5 tee-objs += tee_shm.o 5 6 tee-objs += tee_shm_pool.o 6 7 obj-$(CONFIG_OPTEE) += optee/ 7 8 obj-$(CONFIG_AMDTEE) += amdtee/ 8 9 obj-$(CONFIG_ARM_TSTEE) += tstee/ 10 + obj-$(CONFIG_QCOMTEE) += qcomtee/
+5
drivers/tee/optee/Kconfig
··· 25 25 26 26 Additional documentation on kernel security risks are at 27 27 Documentation/tee/op-tee.rst. 28 + 29 + config OPTEE_STATIC_PROTMEM_POOL 30 + bool 31 + depends on HAS_IOMEM && TEE_DMABUF_HEAPS 32 + default y
+1
drivers/tee/optee/Makefile
··· 4 4 optee-objs += call.o 5 5 optee-objs += notif.o 6 6 optee-objs += rpc.o 7 + optee-objs += protmem.o 7 8 optee-objs += supp.o 8 9 optee-objs += device.o 9 10 optee-objs += smc_abi.o
+8 -1
drivers/tee/optee/core.c
··· 56 56 return 0; 57 57 } 58 58 59 + int optee_set_dma_mask(struct optee *optee, u_int pa_width) 60 + { 61 + u64 mask = DMA_BIT_MASK(min(64, pa_width)); 62 + 63 + return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask); 64 + } 65 + 59 66 static void optee_bus_scan(struct work_struct *work) 60 67 { 61 68 WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); ··· 79 72 else 80 73 s = "user"; 81 74 82 - return scnprintf(buf, PAGE_SIZE, "%s\n", s); 75 + return sysfs_emit(buf, "%s\n", s); 83 76 } 84 77 static DEVICE_ATTR_RO(rpmb_routing_model); 85 78
+144 -2
drivers/tee/optee/ffa_abi.c
··· 649 649 return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread); 650 650 } 651 651 652 + static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case) 653 + { 654 + struct optee_shm_arg_entry *entry; 655 + struct optee_msg_arg *msg_arg; 656 + struct tee_shm *shm; 657 + u_int offs; 658 + int rc; 659 + 660 + msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs); 661 + if (IS_ERR(msg_arg)) 662 + return PTR_ERR(msg_arg); 663 + 664 + msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM; 665 + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 666 + msg_arg->params[0].u.value.a = cookie; 667 + msg_arg->params[0].u.value.b = use_case; 668 + 669 + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); 670 + if (rc) 671 + goto out; 672 + if (msg_arg->ret != TEEC_SUCCESS) { 673 + rc = -EINVAL; 674 + goto out; 675 + } 676 + 677 + out: 678 + optee_free_msg_arg(optee->ctx, entry, offs); 679 + return rc; 680 + } 681 + 682 + static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem, 683 + u32 *mem_attrs, unsigned int ma_count, 684 + u32 use_case) 685 + { 686 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 687 + const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops; 688 + const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops; 689 + struct ffa_send_direct_data data; 690 + struct ffa_mem_region_attributes *mem_attr; 691 + struct ffa_mem_ops_args args = { 692 + .use_txbuf = true, 693 + .tag = use_case, 694 + }; 695 + struct page *page; 696 + struct scatterlist sgl; 697 + unsigned int n; 698 + int rc; 699 + 700 + mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL); 701 + for (n = 0; n < ma_count; n++) { 702 + mem_attr[n].receiver = mem_attrs[n] & U16_MAX; 703 + mem_attr[n].attrs = mem_attrs[n] >> 16; 704 + } 705 + args.attrs = mem_attr; 706 + args.nattrs = ma_count; 707 + 708 + page = phys_to_page(protmem->paddr); 709 + sg_init_table(&sgl, 1); 710 + sg_set_page(&sgl, page, protmem->size, 0); 711 + 712 + args.sg = &sgl; 713 + rc = mem_ops->memory_lend(&args); 714 + kfree(mem_attr); 715 + if (rc) 716 + return rc; 717 + 718 + rc = do_call_lend_protmem(optee, args.g_handle, use_case); 719 + if (rc) 720 + goto err_reclaim; 721 + 722 + rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle); 723 + if (rc) 724 + goto err_unreg; 725 + 726 + protmem->sec_world_id = args.g_handle; 727 + 728 + return 0; 729 + 730 + err_unreg: 731 + data = (struct ffa_send_direct_data){ 732 + .data0 = OPTEE_FFA_RELEASE_PROTMEM, 733 + .data1 = (u32)args.g_handle, 734 + .data2 = (u32)(args.g_handle >> 32), 735 + }; 736 + msg_ops->sync_send_receive(ffa_dev, &data); 737 + err_reclaim: 738 + mem_ops->memory_reclaim(args.g_handle, 0); 739 + return rc; 740 + } 741 + 742 + static int optee_ffa_reclaim_protmem(struct optee *optee, 743 + struct tee_shm *protmem) 744 + { 745 + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; 746 + const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops; 747 + const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops; 748 + u64 global_handle = protmem->sec_world_id; 749 + struct ffa_send_direct_data data = { 750 + .data0 = OPTEE_FFA_RELEASE_PROTMEM, 751 + .data1 = (u32)global_handle, 752 + .data2 = (u32)(global_handle >> 32) 753 + }; 754 + int rc; 755 + 756 + optee_shm_rem_ffa_handle(optee, global_handle); 757 + protmem->sec_world_id = 0; 758 + 759 + rc = msg_ops->sync_send_receive(ffa_dev, &data); 760 + if (rc) 761 + pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc); 762 + 763 + rc = mem_ops->memory_reclaim(global_handle, 0); 764 + if (rc) 765 + pr_err("mem_reclaim: 0x%llx %d", global_handle, rc); 766 + 767 + return rc; 768 + } 769 + 652 770 /* 653 771 * 6. Driver initialization 654 772 * ··· 937 819 .do_call_with_arg = optee_ffa_do_call_with_arg, 938 820 .to_msg_param = optee_ffa_to_msg_param, 939 821 .from_msg_param = optee_ffa_from_msg_param, 822 + .lend_protmem = optee_ffa_lend_protmem, 823 + .reclaim_protmem = optee_ffa_reclaim_protmem, 940 824 }; 941 825 942 826 static void optee_ffa_remove(struct ffa_device *ffa_dev) ··· 1011 891 return rc; 1012 892 } 1013 893 894 + static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps) 895 + { 896 + enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY; 897 + struct tee_protmem_pool *pool; 898 + int rc = 0; 899 + 900 + if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) { 901 + pool = optee_protmem_alloc_dyn_pool(optee, id); 902 + if (IS_ERR(pool)) 903 + return PTR_ERR(pool); 904 + 905 + rc = tee_device_register_dma_heap(optee->teedev, id, pool); 906 + if (rc) 907 + pool->ops->destroy_pool(pool); 908 + } 909 + 910 + return rc; 911 + } 912 + 1014 913 static int optee_ffa_probe(struct ffa_device *ffa_dev) 1015 914 { 1016 915 const struct ffa_notifier_ops *notif_ops; ··· 1080 941 optee); 1081 942 if (IS_ERR(teedev)) { 1082 943 rc = PTR_ERR(teedev); 1083 - goto err_free_pool; 944 + goto err_free_shm_pool; 1084 945 } 1085 946 optee->teedev = teedev; 1086 947 ··· 1127 988 rc); 1128 989 } 1129 990 991 + if (optee_ffa_protmem_pool_init(optee, sec_caps)) 992 + pr_info("Protected memory service not available\n"); 993 + 1130 994 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 1131 995 if (rc) 1132 996 goto err_unregister_devices; ··· 1160 1018 tee_device_unregister(optee->supp_teedev); 1161 1019 err_unreg_teedev: 1162 1020 tee_device_unregister(optee->teedev); 1163 - err_free_pool: 1021 + err_free_shm_pool: 1164 1022 tee_shm_pool_free(pool); 1165 1023 err_free_optee: 1166 1024 kfree(optee);
+23 -6
drivers/tee/optee/optee_ffa.h
··· 81 81 * as the second MSG arg struct for 82 82 * OPTEE_FFA_YIELDING_CALL_WITH_ARG. 83 83 * Bit[31:8]: Reserved (MBZ) 84 - * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below, 84 + * w5: Bitfield of OP-TEE capabilities OPTEE_FFA_SEC_CAP_* 85 85 * w6: The maximum secure world notification number 86 86 * w7: Not used (MBZ) 87 87 */ ··· 94 94 #define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1) 95 95 /* OP-TEE supports probing for RPMB device if needed */ 96 96 #define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2) 97 + /* OP-TEE supports Protected Memory for secure data path */ 98 + #define OPTEE_FFA_SEC_CAP_PROTMEM BIT(3) 97 99 98 100 #define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2) 99 101 ··· 110 108 * 111 109 * Return register usage: 112 110 * w3: Error code, 0 on success 113 - * w4-w7: Note used (MBZ) 111 + * w4-w7: Not used (MBZ) 114 112 */ 115 113 #define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3) 116 114 ··· 121 119 * Call register usage: 122 120 * w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF 123 121 * w4: Notification value to request bottom half processing, should be 124 - * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE. 122 + * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 125 123 * w5-w7: Not used (MBZ) 124 + * 125 + * Return register usage: 126 + * w3: Error code, 0 on success 127 + * w4-w7: Not used (MBZ) 128 + */ 129 + #define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5) 130 + 131 + #define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64 132 + 133 + /* 134 + * Release Protected memory 135 + * 136 + * Call register usage: 137 + * w3: Service ID, OPTEE_FFA_RECLAIM_PROTMEM 138 + * w4: Shared memory handle, lower bits 139 + * w5: Shared memory handle, higher bits 140 + * w6-w7: Not used (MBZ) 126 141 * 127 142 * Return register usage: 128 143 * w3: Error code, 0 on success 129 144 * w4-w7: Note used (MBZ) 130 145 */ 131 - #define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5) 132 - 133 - #define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64 146 + #define OPTEE_FFA_RELEASE_PROTMEM OPTEE_FFA_BLOCKING_CALL(8) 134 147 135 148 /* 136 149 * Call with struct optee_msg_arg as argument in the supplied shared memory
+72 -12
drivers/tee/optee/optee_msg.h
··· 133 133 }; 134 134 135 135 /** 136 - * struct optee_msg_param_fmem - ffa memory reference parameter 136 + * struct optee_msg_param_fmem - FF-A memory reference parameter 137 137 * @offs_lower: Lower bits of offset into shared memory reference 138 138 * @offs_upper: Upper bits of offset into shared memory reference 139 139 * @internal_offs: Internal offset into the first page of shared memory 140 140 * reference 141 141 * @size: Size of the buffer 142 - * @global_id: Global identifier of Shared memory 142 + * @global_id: Global identifier of the shared memory 143 143 */ 144 144 struct optee_msg_param_fmem { 145 145 u32 offs_low; ··· 165 165 * @attr: attributes 166 166 * @tmem: parameter by temporary memory reference 167 167 * @rmem: parameter by registered memory reference 168 - * @fmem: parameter by ffa registered memory reference 168 + * @fmem: parameter by FF-A registered memory reference 169 169 * @value: parameter by opaque value 170 170 * @octets: parameter by octet string 171 171 * ··· 297 297 #define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 298 298 299 299 /* 300 + * Values used in OPTEE_MSG_CMD_LEND_PROTMEM below 301 + * OPTEE_MSG_PROTMEM_RESERVED Reserved 302 + * OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY Secure Video Playback 303 + * OPTEE_MSG_PROTMEM_TRUSTED_UI Trused UI 304 + * OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD Secure Video Recording 305 + */ 306 + #define OPTEE_MSG_PROTMEM_RESERVED 0 307 + #define OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY 1 308 + #define OPTEE_MSG_PROTMEM_TRUSTED_UI 2 309 + #define OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD 3 310 + 311 + /* 300 312 * Do a secure call with struct optee_msg_arg as argument 301 313 * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd 302 314 * ··· 349 337 * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is 350 338 * normal world unable to process asynchronous notifications. Typically 351 339 * used when the driver is shut down. 340 + * 341 + * OPTEE_MSG_CMD_LEND_PROTMEM lends protected memory. The passed normal 342 + * physical memory is protected from normal world access. The memory 343 + * should be unmapped prior to this call since it becomes inaccessible 344 + * during the request. 345 + * Parameters are passed as: 346 + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 347 + * [in] param[0].u.value.a OPTEE_MSG_PROTMEM_* defined above 348 + * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 349 + * [in] param[1].u.tmem.buf_ptr physical address 350 + * [in] param[1].u.tmem.size size 351 + * [in] param[1].u.tmem.shm_ref holds protected memory reference 352 + * 353 + * OPTEE_MSG_CMD_RECLAIM_PROTMEM reclaims a previously lent protected 354 + * memory reference. The physical memory is accessible by the normal world 355 + * after this function has return and can be mapped again. The information 356 + * is passed as: 357 + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 358 + * [in] param[0].u.value.a holds protected memory cookie 359 + * 360 + * OPTEE_MSG_CMD_GET_PROTMEM_CONFIG get configuration for a specific 361 + * protected memory use case. Parameters are passed as: 362 + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 363 + * [in] param[0].value.a OPTEE_MSG_PROTMEM_* 364 + * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_{R,F}MEM_OUTPUT 365 + * [in] param[1].u.{r,f}mem Buffer or NULL 366 + * [in] param[1].u.{r,f}mem.size Provided size of buffer or 0 for query 367 + * output for the protected use case: 368 + * [out] param[0].value.a Minimal size of protected memory 369 + * [out] param[0].value.b Required alignment of size and start of 370 + * protected memory 371 + * [out] param[0].value.c PA width, max 64 372 + * [out] param[1].{r,f}mem.size Size of output data 373 + * [out] param[1].{r,f}mem If non-NULL, contains an array of 374 + * uint32_t memory attributes that must be 375 + * included when lending memory for this 376 + * use case 377 + * 378 + * OPTEE_MSG_CMD_ASSIGN_PROTMEM assigns use-case to protected memory 379 + * previously lent using the FFA_LEND framework ABI. Parameters are passed 380 + * as: 381 + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 382 + * [in] param[0].u.value.a holds protected memory cookie 383 + * [in] param[0].u.value.b OPTEE_MSG_PROTMEM_* defined above 352 384 */ 353 - #define OPTEE_MSG_CMD_OPEN_SESSION 0 354 - #define OPTEE_MSG_CMD_INVOKE_COMMAND 1 355 - #define OPTEE_MSG_CMD_CLOSE_SESSION 2 356 - #define OPTEE_MSG_CMD_CANCEL 3 357 - #define OPTEE_MSG_CMD_REGISTER_SHM 4 358 - #define OPTEE_MSG_CMD_UNREGISTER_SHM 5 359 - #define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6 360 - #define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7 361 - #define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 385 + #define OPTEE_MSG_CMD_OPEN_SESSION 0 386 + #define OPTEE_MSG_CMD_INVOKE_COMMAND 1 387 + #define OPTEE_MSG_CMD_CLOSE_SESSION 2 388 + #define OPTEE_MSG_CMD_CANCEL 3 389 + #define OPTEE_MSG_CMD_REGISTER_SHM 4 390 + #define OPTEE_MSG_CMD_UNREGISTER_SHM 5 391 + #define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6 392 + #define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7 393 + #define OPTEE_MSG_CMD_LEND_PROTMEM 8 394 + #define OPTEE_MSG_CMD_RECLAIM_PROTMEM 9 395 + #define OPTEE_MSG_CMD_GET_PROTMEM_CONFIG 10 396 + #define OPTEE_MSG_CMD_ASSIGN_PROTMEM 11 397 + #define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 362 398 363 399 #endif /* _OPTEE_MSG_H */
+14 -1
drivers/tee/optee/optee_private.h
··· 176 176 * @do_call_with_arg: enters OP-TEE in secure world 177 177 * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters 178 178 * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param 179 + * @lend_protmem: lends physically contiguous memory as restricted 180 + * memory, inaccessible by the kernel 181 + * @reclaim_protmem: reclaims restricted memory previously lent with 182 + * @lend_protmem() and makes it accessible by the 183 + * kernel again 179 184 * 180 185 * These OPs are only supposed to be used internally in the OP-TEE driver 181 - * as a way of abstracting the different methogs of entering OP-TEE in 186 + * as a way of abstracting the different methods of entering OP-TEE in 182 187 * secure world. 183 188 */ 184 189 struct optee_ops { ··· 196 191 int (*from_msg_param)(struct optee *optee, struct tee_param *params, 197 192 size_t num_params, 198 193 const struct optee_msg_param *msg_params); 194 + int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem, 195 + u32 *mem_attr, unsigned int ma_count, 196 + u32 use_case); 197 + int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem); 199 198 }; 200 199 201 200 /** ··· 283 274 284 275 extern struct blocking_notifier_head optee_rpmb_intf_added; 285 276 277 + int optee_set_dma_mask(struct optee *optee, u_int pa_width); 278 + 286 279 int optee_notif_init(struct optee *optee, u_int max_key); 287 280 void optee_notif_uninit(struct optee *optee); 288 281 int optee_notif_wait(struct optee *optee, u_int key, u32 timeout); ··· 296 285 void optee_supp_init(struct optee_supp *supp); 297 286 void optee_supp_uninit(struct optee_supp *supp); 298 287 void optee_supp_release(struct optee_supp *supp); 288 + struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee, 289 + enum tee_dma_heap_id id); 299 290 300 291 int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, 301 292 struct tee_param *param);
+36 -1
drivers/tee/optee/optee_smc.h
··· 264 264 #define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) 265 265 /* Secure world can communicate via previously unregistered shared memory */ 266 266 #define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) 267 - 268 267 /* 269 268 * Secure world supports commands "register/unregister shared memory", 270 269 * secure world accepts command buffers located in any parts of non-secure RAM ··· 279 280 #define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6) 280 281 /* Secure world supports probing for RPMB device if needed */ 281 282 #define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7) 283 + /* Secure world supports protected memory */ 284 + #define OPTEE_SMC_SEC_CAP_PROTMEM BIT(8) 285 + /* Secure world supports dynamic protected memory */ 286 + #define OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM BIT(9) 282 287 283 288 #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 284 289 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ ··· 454 451 455 452 /* See OPTEE_SMC_CALL_WITH_REGD_ARG above */ 456 453 #define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19 454 + /* 455 + * Get protected memory config 456 + * 457 + * Returns the protected memory config. 458 + * 459 + * Call register usage: 460 + * a0 SMC Function ID, OPTEE_SMC_GET_PROTMEM_CONFIG 461 + * a2-6 Not used, must be zero 462 + * a7 Hypervisor Client ID register 463 + * 464 + * Have config return register usage: 465 + * a0 OPTEE_SMC_RETURN_OK 466 + * a1 Physical address of start of protected memory 467 + * a2 Size of protected memory 468 + * a3 PA width, max 64 469 + * a4-7 Preserved 470 + * 471 + * Not available register usage: 472 + * a0 OPTEE_SMC_RETURN_ENOTAVAIL 473 + * a1-3 Not used 474 + * a4-7 Preserved 475 + */ 476 + #define OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG 20 477 + #define OPTEE_SMC_GET_PROTMEM_CONFIG \ 478 + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG) 479 + 480 + struct optee_smc_get_protmem_config_result { 481 + unsigned long status; 482 + unsigned long start; 483 + unsigned long size; 484 + unsigned long pa_width; 485 + }; 457 486 458 487 /* 459 488 * Resume from RPC (for example after processing a foreign interrupt)
+335
drivers/tee/optee/protmem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2025, Linaro Limited 4 + */ 5 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 + 7 + #include <linux/errno.h> 8 + #include <linux/genalloc.h> 9 + #include <linux/slab.h> 10 + #include <linux/string.h> 11 + #include <linux/tee_core.h> 12 + #include <linux/types.h> 13 + #include "optee_private.h" 14 + 15 + struct optee_protmem_dyn_pool { 16 + struct tee_protmem_pool pool; 17 + struct gen_pool *gen_pool; 18 + struct optee *optee; 19 + size_t page_count; 20 + u32 *mem_attrs; 21 + u_int mem_attr_count; 22 + refcount_t refcount; 23 + u32 use_case; 24 + struct tee_shm *protmem; 25 + /* Protects when initializing and tearing down this struct */ 26 + struct mutex mutex; 27 + }; 28 + 29 + static struct optee_protmem_dyn_pool * 30 + to_protmem_dyn_pool(struct tee_protmem_pool *pool) 31 + { 32 + return container_of(pool, struct optee_protmem_dyn_pool, pool); 33 + } 34 + 35 + static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp) 36 + { 37 + int rc; 38 + 39 + rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count); 40 + if (IS_ERR(rp->protmem)) { 41 + rc = PTR_ERR(rp->protmem); 42 + goto err_null_protmem; 43 + } 44 + 45 + /* 46 + * TODO unmap the memory range since the physical memory will 47 + * become inaccesible after the lend_protmem() call. 48 + * 49 + * If the platform supports a hypervisor at EL2, it will unmap the 50 + * intermediate physical memory for us and stop cache pre-fetch of 51 + * the memory. 52 + */ 53 + rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem, 54 + rp->mem_attrs, 55 + rp->mem_attr_count, rp->use_case); 56 + if (rc) 57 + goto err_put_shm; 58 + rp->protmem->flags |= TEE_SHM_DYNAMIC; 59 + 60 + rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1); 61 + if (!rp->gen_pool) { 62 + rc = -ENOMEM; 63 + goto err_reclaim; 64 + } 65 + 66 + rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr, 67 + rp->protmem->size, -1); 68 + if (rc) 69 + goto err_free_pool; 70 + 71 + refcount_set(&rp->refcount, 1); 72 + return 0; 73 + 74 + err_free_pool: 75 + gen_pool_destroy(rp->gen_pool); 76 + rp->gen_pool = NULL; 77 + err_reclaim: 78 + rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem); 79 + err_put_shm: 80 + tee_shm_put(rp->protmem); 81 + err_null_protmem: 82 + rp->protmem = NULL; 83 + return rc; 84 + } 85 + 86 + static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp) 87 + { 88 + int rc = 0; 89 + 90 + if (!refcount_inc_not_zero(&rp->refcount)) { 91 + mutex_lock(&rp->mutex); 92 + if (rp->gen_pool) { 93 + /* 94 + * Another thread has already initialized the pool 95 + * before us, or the pool was just about to be torn 96 + * down. Either way we only need to increase the 97 + * refcount and we're done. 98 + */ 99 + refcount_inc(&rp->refcount); 100 + } else { 101 + rc = init_dyn_protmem(rp); 102 + } 103 + mutex_unlock(&rp->mutex); 104 + } 105 + 106 + return rc; 107 + } 108 + 109 + static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp) 110 + { 111 + gen_pool_destroy(rp->gen_pool); 112 + rp->gen_pool = NULL; 113 + 114 + rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem); 115 + rp->protmem->flags &= ~TEE_SHM_DYNAMIC; 116 + 117 + WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount"); 118 + tee_shm_put(rp->protmem); 119 + rp->protmem = NULL; 120 + } 121 + 122 + static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp) 123 + { 124 + if (refcount_dec_and_test(&rp->refcount)) { 125 + mutex_lock(&rp->mutex); 126 + if (rp->gen_pool) 127 + release_dyn_protmem(rp); 128 + mutex_unlock(&rp->mutex); 129 + } 130 + } 131 + 132 + static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool, 133 + struct sg_table *sgt, size_t size, 134 + size_t *offs) 135 + { 136 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 137 + size_t sz = ALIGN(size, PAGE_SIZE); 138 + phys_addr_t pa; 139 + int rc; 140 + 141 + rc = get_dyn_protmem(rp); 142 + if (rc) 143 + return rc; 144 + 145 + pa = gen_pool_alloc(rp->gen_pool, sz); 146 + if (!pa) { 147 + rc = -ENOMEM; 148 + goto err_put; 149 + } 150 + 151 + rc = sg_alloc_table(sgt, 1, GFP_KERNEL); 152 + if (rc) 153 + goto err_free; 154 + 155 + sg_set_page(sgt->sgl, phys_to_page(pa), size, 0); 156 + *offs = pa - rp->protmem->paddr; 157 + 158 + return 0; 159 + err_free: 160 + gen_pool_free(rp->gen_pool, pa, size); 161 + err_put: 162 + put_dyn_protmem(rp); 163 + 164 + return rc; 165 + } 166 + 167 + static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool, 168 + struct sg_table *sgt) 169 + { 170 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 171 + struct scatterlist *sg; 172 + int i; 173 + 174 + for_each_sgtable_sg(sgt, sg, i) 175 + gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length); 176 + sg_free_table(sgt); 177 + put_dyn_protmem(rp); 178 + } 179 + 180 + static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool, 181 + struct sg_table *sgt, size_t offs, 182 + struct tee_shm *shm, 183 + struct tee_shm **parent_shm) 184 + { 185 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 186 + 187 + *parent_shm = rp->protmem; 188 + 189 + return 0; 190 + } 191 + 192 + static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool) 193 + { 194 + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); 195 + 196 + mutex_destroy(&rp->mutex); 197 + kfree(rp); 198 + } 199 + 200 + static struct tee_protmem_pool_ops protmem_pool_ops_dyn = { 201 + .alloc = protmem_pool_op_dyn_alloc, 202 + .free = protmem_pool_op_dyn_free, 203 + .update_shm = protmem_pool_op_dyn_update_shm, 204 + .destroy_pool = pool_op_dyn_destroy_pool, 205 + }; 206 + 207 + static int get_protmem_config(struct optee *optee, u32 use_case, 208 + size_t *min_size, u_int *pa_width, 209 + u32 *mem_attrs, u_int *ma_count) 210 + { 211 + struct tee_param params[2] = { 212 + [0] = { 213 + .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT, 214 + .u.value.a = use_case, 215 + }, 216 + [1] = { 217 + .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT, 218 + }, 219 + }; 220 + struct optee_shm_arg_entry *entry; 221 + struct tee_shm *shm_param = NULL; 222 + struct optee_msg_arg *msg_arg; 223 + struct tee_shm *shm; 224 + u_int offs; 225 + int rc; 226 + 227 + if (mem_attrs && *ma_count) { 228 + params[1].u.memref.size = *ma_count * sizeof(*mem_attrs); 229 + shm_param = tee_shm_alloc_priv_buf(optee->ctx, 230 + params[1].u.memref.size); 231 + if (IS_ERR(shm_param)) 232 + return PTR_ERR(shm_param); 233 + params[1].u.memref.shm = shm_param; 234 + } 235 + 236 + msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry, 237 + &shm, &offs); 238 + if (IS_ERR(msg_arg)) { 239 + rc = PTR_ERR(msg_arg); 240 + goto out_free_shm; 241 + } 242 + msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG; 243 + 244 + rc = optee->ops->to_msg_param(optee, msg_arg->params, 245 + ARRAY_SIZE(params), params); 246 + if (rc) 247 + goto out_free_msg; 248 + 249 + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); 250 + if (rc) 251 + goto out_free_msg; 252 + if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) { 253 + rc = -EINVAL; 254 + goto out_free_msg; 255 + } 256 + 257 + rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params), 258 + msg_arg->params); 259 + if (rc) 260 + goto out_free_msg; 261 + 262 + if (!msg_arg->ret && mem_attrs && 263 + *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) { 264 + rc = -EINVAL; 265 + goto out_free_msg; 266 + } 267 + 268 + *min_size = params[0].u.value.a; 269 + *pa_width = params[0].u.value.c; 270 + *ma_count = params[1].u.memref.size / sizeof(*mem_attrs); 271 + 272 + if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) { 273 + rc = -ENOSPC; 274 + goto out_free_msg; 275 + } 276 + 277 + if (mem_attrs) 278 + memcpy(mem_attrs, tee_shm_get_va(shm_param, 0), 279 + params[1].u.memref.size); 280 + 281 + out_free_msg: 282 + optee_free_msg_arg(optee->ctx, entry, offs); 283 + out_free_shm: 284 + if (shm_param) 285 + tee_shm_free(shm_param); 286 + return rc; 287 + } 288 + 289 + struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee, 290 + enum tee_dma_heap_id id) 291 + { 292 + struct optee_protmem_dyn_pool *rp; 293 + size_t min_size; 294 + u_int pa_width; 295 + int rc; 296 + 297 + rp = kzalloc(sizeof(*rp), GFP_KERNEL); 298 + if (!rp) 299 + return ERR_PTR(-ENOMEM); 300 + rp->use_case = id; 301 + 302 + rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL, 303 + &rp->mem_attr_count); 304 + if (rc) { 305 + if (rc != -ENOSPC) 306 + goto err; 307 + rp->mem_attrs = kcalloc(rp->mem_attr_count, 308 + sizeof(*rp->mem_attrs), GFP_KERNEL); 309 + if (!rp->mem_attrs) { 310 + rc = -ENOMEM; 311 + goto err; 312 + } 313 + rc = get_protmem_config(optee, id, &min_size, &pa_width, 314 + rp->mem_attrs, &rp->mem_attr_count); 315 + if (rc) 316 + goto err_kfree_eps; 317 + } 318 + 319 + rc = optee_set_dma_mask(optee, pa_width); 320 + if (rc) 321 + goto err_kfree_eps; 322 + 323 + rp->pool.ops = &protmem_pool_ops_dyn; 324 + rp->optee = optee; 325 + rp->page_count = min_size / PAGE_SIZE; 326 + mutex_init(&rp->mutex); 327 + 328 + return &rp->pool; 329 + 330 + err_kfree_eps: 331 + kfree(rp->mem_attrs); 332 + err: 333 + kfree(rp); 334 + return ERR_PTR(rc); 335 + }
+139 -2
drivers/tee/optee/smc_abi.c
··· 965 965 return rc; 966 966 } 967 967 968 + static int optee_smc_lend_protmem(struct optee *optee, struct tee_shm *protmem, 969 + u32 *mem_attrs, unsigned int ma_count, 970 + u32 use_case) 971 + { 972 + struct optee_shm_arg_entry *entry; 973 + struct optee_msg_arg *msg_arg; 974 + struct tee_shm *shm; 975 + u_int offs; 976 + int rc; 977 + 978 + msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs); 979 + if (IS_ERR(msg_arg)) 980 + return PTR_ERR(msg_arg); 981 + 982 + msg_arg->cmd = OPTEE_MSG_CMD_LEND_PROTMEM; 983 + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; 984 + msg_arg->params[0].u.value.a = use_case; 985 + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; 986 + msg_arg->params[1].u.tmem.buf_ptr = protmem->paddr; 987 + msg_arg->params[1].u.tmem.size = protmem->size; 988 + msg_arg->params[1].u.tmem.shm_ref = (u_long)protmem; 989 + 990 + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); 991 + if (rc) 992 + goto out; 993 + if (msg_arg->ret != TEEC_SUCCESS) { 994 + rc = -EINVAL; 995 + goto out; 996 + } 997 + protmem->sec_world_id = (u_long)protmem; 998 + 999 + out: 1000 + optee_free_msg_arg(optee->ctx, entry, offs); 1001 + return rc; 1002 + } 1003 + 1004 + static int optee_smc_reclaim_protmem(struct optee *optee, 1005 + struct tee_shm *protmem) 1006 + { 1007 + struct optee_shm_arg_entry *entry; 1008 + struct optee_msg_arg *msg_arg; 1009 + struct tee_shm *shm; 1010 + u_int offs; 1011 + int rc; 1012 + 1013 + msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs); 1014 + if (IS_ERR(msg_arg)) 1015 + return PTR_ERR(msg_arg); 1016 + 1017 + msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_PROTMEM; 1018 + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 1019 + msg_arg->params[0].u.rmem.shm_ref = (u_long)protmem; 1020 + 1021 + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); 1022 + if (rc) 1023 + goto out; 1024 + if (msg_arg->ret != TEEC_SUCCESS) 1025 + rc = -EINVAL; 1026 + 1027 + out: 1028 + optee_free_msg_arg(optee->ctx, entry, offs); 1029 + return rc; 1030 + } 1031 + 968 1032 /* 969 1033 * 5. Asynchronous notification 970 1034 */ ··· 1280 1216 .do_call_with_arg = optee_smc_do_call_with_arg, 1281 1217 .to_msg_param = optee_to_msg_param, 1282 1218 .from_msg_param = optee_from_msg_param, 1219 + .lend_protmem = optee_smc_lend_protmem, 1220 + .reclaim_protmem = optee_smc_reclaim_protmem, 1283 1221 }; 1284 1222 1285 1223 static int enable_async_notif(optee_invoke_fn *invoke_fn) ··· 1649 1583 } 1650 1584 #endif 1651 1585 1586 + static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee) 1587 + { 1588 + #if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL) 1589 + union { 1590 + struct arm_smccc_res smccc; 1591 + struct optee_smc_get_protmem_config_result result; 1592 + } res; 1593 + struct tee_protmem_pool *pool; 1594 + void *p; 1595 + int rc; 1596 + 1597 + optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0, 1598 + 0, 0, 0, &res.smccc); 1599 + if (res.result.status != OPTEE_SMC_RETURN_OK) 1600 + return ERR_PTR(-EINVAL); 1601 + 1602 + rc = optee_set_dma_mask(optee, res.result.pa_width); 1603 + if (rc) 1604 + return ERR_PTR(rc); 1605 + 1606 + /* 1607 + * Map the memory as uncached to make sure the kernel can work with 1608 + * __pfn_to_page() and friends since that's needed when passing the 1609 + * protected DMA-buf to a device. The memory should otherwise not 1610 + * be touched by the kernel since it's likely to cause an external 1611 + * abort due to the protection status. 1612 + */ 1613 + p = devm_memremap(&optee->teedev->dev, res.result.start, 1614 + res.result.size, MEMREMAP_WC); 1615 + if (IS_ERR(p)) 1616 + return p; 1617 + 1618 + pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size); 1619 + if (IS_ERR(pool)) 1620 + devm_memunmap(&optee->teedev->dev, p); 1621 + 1622 + return pool; 1623 + #else 1624 + return ERR_PTR(-EINVAL); 1625 + #endif 1626 + } 1627 + 1628 + static int optee_protmem_pool_init(struct optee *optee) 1629 + { 1630 + bool protm = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM; 1631 + bool dyn_protm = optee->smc.sec_caps & 1632 + OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM; 1633 + enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY; 1634 + struct tee_protmem_pool *pool = ERR_PTR(-EINVAL); 1635 + int rc = -EINVAL; 1636 + 1637 + if (!protm && !dyn_protm) 1638 + return 0; 1639 + 1640 + if (protm) 1641 + pool = static_protmem_pool_init(optee); 1642 + if (dyn_protm && IS_ERR(pool)) 1643 + pool = optee_protmem_alloc_dyn_pool(optee, heap_id); 1644 + if (IS_ERR(pool)) 1645 + return PTR_ERR(pool); 1646 + 1647 + rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool); 1648 + if (rc) 1649 + pool->ops->destroy_pool(pool); 1650 + 1651 + return rc; 1652 + } 1653 + 1652 1654 static int optee_probe(struct platform_device *pdev) 1653 1655 { 1654 1656 optee_invoke_fn *invoke_fn; ··· 1812 1678 optee = kzalloc(sizeof(*optee), GFP_KERNEL); 1813 1679 if (!optee) { 1814 1680 rc = -ENOMEM; 1815 - goto err_free_pool; 1681 + goto err_free_shm_pool; 1816 1682 } 1817 1683 1818 1684 optee->ops = &optee_ops; ··· 1885 1751 pr_info("Asynchronous notifications enabled\n"); 1886 1752 } 1887 1753 1754 + if (optee_protmem_pool_init(optee)) 1755 + pr_info("Protected memory service not available\n"); 1756 + 1888 1757 /* 1889 1758 * Ensure that there are no pre-existing shm objects before enabling 1890 1759 * the shm cache so that there's no chance of receiving an invalid ··· 1939 1802 tee_device_unregister(optee->teedev); 1940 1803 err_free_optee: 1941 1804 kfree(optee); 1942 - err_free_pool: 1805 + err_free_shm_pool: 1943 1806 tee_shm_pool_free(pool); 1944 1807 if (memremaped_shm) 1945 1808 memunmap(memremaped_shm);
+12
drivers/tee/qcomtee/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # Qualcomm Trusted Execution Environment Configuration 3 + config QCOMTEE 4 + tristate "Qualcomm TEE Support" 5 + depends on !CPU_BIG_ENDIAN 6 + select QCOM_SCM 7 + select QCOM_TZMEM_MODE_SHMBRIDGE 8 + help 9 + This option enables the Qualcomm Trusted Execution Environment (QTEE) 10 + driver. It provides an API to access services offered by QTEE and 11 + its loaded Trusted Applications (TAs). Additionally, it facilitates 12 + the export of userspace services provided by supplicants to QTEE.
+9
drivers/tee/qcomtee/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + obj-$(CONFIG_QCOMTEE) += qcomtee.o 3 + qcomtee-objs += async.o 4 + qcomtee-objs += call.o 5 + qcomtee-objs += core.o 6 + qcomtee-objs += mem_obj.o 7 + qcomtee-objs += primordial_obj.o 8 + qcomtee-objs += shm.o 9 + qcomtee-objs += user_obj.o
+182
drivers/tee/qcomtee/async.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include "qcomtee.h" 9 + 10 + #define QCOMTEE_ASYNC_VERSION_1_0 0x00010000U /* Maj: 0x0001, Min: 0x0000. */ 11 + #define QCOMTEE_ASYNC_VERSION_1_1 0x00010001U /* Maj: 0x0001, Min: 0x0001. */ 12 + #define QCOMTEE_ASYNC_VERSION_1_2 0x00010002U /* Maj: 0x0001, Min: 0x0002. */ 13 + #define QCOMTEE_ASYNC_VERSION_CURRENT QCOMTEE_ASYNC_VERSION_1_2 14 + 15 + #define QCOMTEE_ASYNC_VERSION_MAJOR(n) upper_16_bits(n) 16 + #define QCOMTEE_ASYNC_VERSION_MINOR(n) lower_16_bits(n) 17 + 18 + #define QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR \ 19 + QCOMTEE_ASYNC_VERSION_MAJOR(QCOMTEE_ASYNC_VERSION_CURRENT) 20 + #define QCOMTEE_ASYNC_VERSION_CURRENT_MINOR \ 21 + QCOMTEE_ASYNC_VERSION_MINOR(QCOMTEE_ASYNC_VERSION_CURRENT) 22 + 23 + /** 24 + * struct qcomtee_async_msg_hdr - Asynchronous message header format. 25 + * @version: current async protocol version of the remote endpoint. 26 + * @op: async operation. 27 + * 28 + * @version specifies the endpoint's (QTEE or driver) supported async protocol. 29 + * For example, if QTEE sets @version to %QCOMTEE_ASYNC_VERSION_1_1, QTEE 30 + * handles operations supported in %QCOMTEE_ASYNC_VERSION_1_1 or 31 + * %QCOMTEE_ASYNC_VERSION_1_0. @op determines the message format. 32 + */ 33 + struct qcomtee_async_msg_hdr { 34 + u32 version; 35 + u32 op; 36 + }; 37 + 38 + /* Size of an empty async message. */ 39 + #define QCOMTEE_ASYNC_MSG_ZERO sizeof(struct qcomtee_async_msg_hdr) 40 + 41 + /** 42 + * struct qcomtee_async_release_msg - Release asynchronous message. 43 + * @hdr: message header as &struct qcomtee_async_msg_hdr. 44 + * @counts: number of objects in @object_ids. 45 + * @object_ids: array of object IDs that should be released. 46 + * 47 + * Available in Maj = 0x0001, Min >= 0x0000. 48 + */ 49 + struct qcomtee_async_release_msg { 50 + struct qcomtee_async_msg_hdr hdr; 51 + u32 counts; 52 + u32 object_ids[] __counted_by(counts); 53 + }; 54 + 55 + /** 56 + * qcomtee_get_async_buffer() - Get the start of the asynchronous message. 57 + * @oic: context used for the current invocation. 58 + * @async_buffer: return buffer to extract from or fill in async messages. 59 + * 60 + * If @oic is used for direct object invocation, the whole outbound buffer 61 + * is available for the async message. If @oic is used for a callback request, 62 + * the tail of the outbound buffer (after the callback request message) is 63 + * available for the async message. 64 + * 65 + * The start of the async buffer is aligned, see qcomtee_msg_offset_align(). 66 + */ 67 + static void qcomtee_get_async_buffer(struct qcomtee_object_invoke_ctx *oic, 68 + struct qcomtee_buffer *async_buffer) 69 + { 70 + struct qcomtee_msg_callback *msg; 71 + unsigned int offset; 72 + int i; 73 + 74 + if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY)) { 75 + /* The outbound buffer is empty. Using the whole buffer. */ 76 + offset = 0; 77 + } else { 78 + msg = (struct qcomtee_msg_callback *)oic->out_msg.addr; 79 + 80 + /* Start offset in a message for buffer arguments. */ 81 + offset = qcomtee_msg_buffer_args(struct qcomtee_msg_callback, 82 + qcomtee_msg_args(msg)); 83 + 84 + /* Add size of IB arguments. */ 85 + qcomtee_msg_for_each_input_buffer(i, msg) 86 + offset += qcomtee_msg_offset_align(msg->args[i].b.size); 87 + 88 + /* Add size of OB arguments. */ 89 + qcomtee_msg_for_each_output_buffer(i, msg) 90 + offset += qcomtee_msg_offset_align(msg->args[i].b.size); 91 + } 92 + 93 + async_buffer->addr = oic->out_msg.addr + offset; 94 + async_buffer->size = oic->out_msg.size - offset; 95 + } 96 + 97 + /** 98 + * async_release() - Process QTEE async release requests. 99 + * @oic: context used for the current invocation. 100 + * @msg: async message for object release. 101 + * @size: size of the async buffer available. 102 + * 103 + * Return: Size of the outbound buffer used when processing @msg. 104 + */ 105 + static size_t async_release(struct qcomtee_object_invoke_ctx *oic, 106 + struct qcomtee_async_msg_hdr *async_msg, 107 + size_t size) 108 + { 109 + struct qcomtee_async_release_msg *msg; 110 + struct qcomtee_object *object; 111 + int i; 112 + 113 + msg = (struct qcomtee_async_release_msg *)async_msg; 114 + 115 + for (i = 0; i < msg->counts; i++) { 116 + object = qcomtee_idx_erase(oic, msg->object_ids[i]); 117 + qcomtee_object_put(object); 118 + } 119 + 120 + return struct_size(msg, object_ids, msg->counts); 121 + } 122 + 123 + /** 124 + * qcomtee_fetch_async_reqs() - Fetch and process asynchronous messages. 125 + * @oic: context used for the current invocation. 126 + * 127 + * Calls handlers to process the requested operations in the async message. 128 + * Currently, only supports async release requests. 129 + */ 130 + void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic) 131 + { 132 + struct qcomtee_async_msg_hdr *async_msg; 133 + struct qcomtee_buffer async_buffer; 134 + size_t consumed, used = 0; 135 + u16 major_ver; 136 + 137 + qcomtee_get_async_buffer(oic, &async_buffer); 138 + 139 + while (async_buffer.size - used > QCOMTEE_ASYNC_MSG_ZERO) { 140 + async_msg = (struct qcomtee_async_msg_hdr *)(async_buffer.addr + 141 + used); 142 + /* 143 + * QTEE assumes that the unused space of the async buffer is 144 + * zeroed; so if version is zero, the buffer is unused. 145 + */ 146 + if (async_msg->version == 0) 147 + goto out; 148 + 149 + major_ver = QCOMTEE_ASYNC_VERSION_MAJOR(async_msg->version); 150 + /* Major version mismatch is a compatibility break. */ 151 + if (major_ver != QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR) { 152 + pr_err("Async message version mismatch (%u != %u)\n", 153 + major_ver, QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR); 154 + 155 + goto out; 156 + } 157 + 158 + switch (async_msg->op) { 159 + case QCOMTEE_MSG_OBJECT_OP_RELEASE: 160 + consumed = async_release(oic, async_msg, 161 + async_buffer.size - used); 162 + break; 163 + default: 164 + pr_err("Unsupported async message %u\n", async_msg->op); 165 + goto out; 166 + } 167 + 168 + /* Supported operation but unable to parse the message. */ 169 + if (!consumed) { 170 + pr_err("Unable to parse async message for op %u\n", 171 + async_msg->op); 172 + goto out; 173 + } 174 + 175 + /* Next async message. */ 176 + used += qcomtee_msg_offset_align(consumed); 177 + } 178 + 179 + out: 180 + /* Reset the async buffer so async requests do not loop to QTEE. */ 181 + memzero_explicit(async_buffer.addr, async_buffer.size); 182 + }
+820
drivers/tee/qcomtee/call.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/slab.h> 9 + #include <linux/tee.h> 10 + #include <linux/platform_device.h> 11 + #include <linux/xarray.h> 12 + 13 + #include "qcomtee.h" 14 + 15 + static int find_qtee_object(struct qcomtee_object **object, unsigned long id, 16 + struct qcomtee_context_data *ctxdata) 17 + { 18 + int err = 0; 19 + 20 + guard(rcu)(); 21 + /* Object release is RCU protected. */ 22 + *object = idr_find(&ctxdata->qtee_objects_idr, id); 23 + if (!qcomtee_object_get(*object)) 24 + err = -EINVAL; 25 + 26 + return err; 27 + } 28 + 29 + static void del_qtee_object(unsigned long id, 30 + struct qcomtee_context_data *ctxdata) 31 + { 32 + struct qcomtee_object *object; 33 + 34 + scoped_guard(mutex, &ctxdata->qtee_lock) 35 + object = idr_remove(&ctxdata->qtee_objects_idr, id); 36 + 37 + qcomtee_object_put(object); 38 + } 39 + 40 + /** 41 + * qcomtee_context_add_qtee_object() - Add a QTEE object to the context. 42 + * @param: TEE parameter representing @object. 43 + * @object: QTEE object. 44 + * @ctx: context to add the object. 45 + * 46 + * It assumes @object is %QCOMTEE_OBJECT_TYPE_TEE and the caller has already 47 + * issued qcomtee_object_get() for @object. 48 + * 49 + * Return: On success, returns 0; on failure, returns < 0. 50 + */ 51 + int qcomtee_context_add_qtee_object(struct tee_param *param, 52 + struct qcomtee_object *object, 53 + struct tee_context *ctx) 54 + { 55 + int ret; 56 + struct qcomtee_context_data *ctxdata = ctx->data; 57 + 58 + scoped_guard(mutex, &ctxdata->qtee_lock) 59 + ret = idr_alloc(&ctxdata->qtee_objects_idr, object, 0, 0, 60 + GFP_KERNEL); 61 + if (ret < 0) 62 + return ret; 63 + 64 + param->u.objref.id = ret; 65 + /* QTEE Object: QCOMTEE_OBJREF_FLAG_TEE set. */ 66 + param->u.objref.flags = QCOMTEE_OBJREF_FLAG_TEE; 67 + 68 + return 0; 69 + } 70 + 71 + /* Retrieve the QTEE object added with qcomtee_context_add_qtee_object(). */ 72 + int qcomtee_context_find_qtee_object(struct qcomtee_object **object, 73 + struct tee_param *param, 74 + struct tee_context *ctx) 75 + { 76 + struct qcomtee_context_data *ctxdata = ctx->data; 77 + 78 + return find_qtee_object(object, param->u.objref.id, ctxdata); 79 + } 80 + 81 + /** 82 + * qcomtee_context_del_qtee_object() - Delete a QTEE object from the context. 83 + * @param: TEE parameter representing @object. 84 + * @ctx: context for deleting the object. 85 + * 86 + * The @param has been initialized by qcomtee_context_add_qtee_object(). 87 + */ 88 + void qcomtee_context_del_qtee_object(struct tee_param *param, 89 + struct tee_context *ctx) 90 + { 91 + struct qcomtee_context_data *ctxdata = ctx->data; 92 + /* 'qtee_objects_idr' stores QTEE objects only. */ 93 + if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE) 94 + del_qtee_object(param->u.objref.id, ctxdata); 95 + } 96 + 97 + /** 98 + * qcomtee_objref_to_arg() - Convert OBJREF parameter to QTEE argument. 99 + * @arg: QTEE argument. 100 + * @param: TEE parameter. 101 + * @ctx: context in which the conversion should happen. 102 + * 103 + * It assumes @param is an OBJREF. 104 + * It does not set @arg.type; the caller should initialize it to a correct 105 + * &enum qcomtee_arg_type value. It gets the object's refcount in @arg; 106 + * the caller should manage to put it afterward. 107 + * 108 + * Return: On success, returns 0; on failure, returns < 0. 109 + */ 110 + int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param, 111 + struct tee_context *ctx) 112 + { 113 + int err = -EINVAL; 114 + 115 + arg->o = NULL_QCOMTEE_OBJECT; 116 + /* param is a NULL object: */ 117 + if (param->u.objref.id == TEE_OBJREF_NULL) 118 + return 0; 119 + 120 + /* param is a callback object: */ 121 + if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_USER) 122 + err = qcomtee_user_param_to_object(&arg->o, param, ctx); 123 + /* param is a QTEE object: */ 124 + else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE) 125 + err = qcomtee_context_find_qtee_object(&arg->o, param, ctx); 126 + /* param is a memory object: */ 127 + else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_MEM) 128 + err = qcomtee_memobj_param_to_object(&arg->o, param, ctx); 129 + 130 + /* 131 + * For callback objects, call qcomtee_object_get() to keep a temporary 132 + * copy for the driver, as these objects are released asynchronously 133 + * and may disappear even before returning from QTEE. 134 + * 135 + * - For direct object invocations, the matching put is called in 136 + * qcomtee_object_invoke() when parsing the QTEE response. 137 + * - For callback responses, put is called in qcomtee_user_object_notify() 138 + * after QTEE has received its copies. 139 + */ 140 + 141 + if (!err && (typeof_qcomtee_object(arg->o) == QCOMTEE_OBJECT_TYPE_CB)) 142 + qcomtee_object_get(arg->o); 143 + 144 + return err; 145 + } 146 + 147 + /** 148 + * qcomtee_objref_from_arg() - Convert QTEE argument to OBJREF param. 149 + * @param: TEE parameter. 150 + * @arg: QTEE argument. 151 + * @ctx: context in which the conversion should happen. 152 + * 153 + * It assumes @arg is of %QCOMTEE_ARG_TYPE_IO or %QCOMTEE_ARG_TYPE_OO. 154 + * It does not set @param.attr; the caller should initialize it to a 155 + * correct type. 156 + * 157 + * Return: On success, returns 0; on failure, returns < 0. 158 + */ 159 + int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg, 160 + struct tee_context *ctx) 161 + { 162 + struct qcomtee_object *object = arg->o; 163 + 164 + switch (typeof_qcomtee_object(object)) { 165 + case QCOMTEE_OBJECT_TYPE_NULL: 166 + param->u.objref.id = TEE_OBJREF_NULL; 167 + 168 + return 0; 169 + case QCOMTEE_OBJECT_TYPE_CB: 170 + /* object is a callback object: */ 171 + if (is_qcomtee_user_object(object)) 172 + return qcomtee_user_param_from_object(param, object, 173 + ctx); 174 + /* object is a memory object: */ 175 + else if (is_qcomtee_memobj_object(object)) 176 + return qcomtee_memobj_param_from_object(param, object, 177 + ctx); 178 + 179 + break; 180 + case QCOMTEE_OBJECT_TYPE_TEE: 181 + return qcomtee_context_add_qtee_object(param, object, ctx); 182 + 183 + case QCOMTEE_OBJECT_TYPE_ROOT: 184 + default: 185 + break; 186 + } 187 + 188 + return -EINVAL; 189 + } 190 + 191 + /** 192 + * qcomtee_params_to_args() - Convert TEE parameters to QTEE arguments. 193 + * @u: QTEE arguments. 194 + * @params: TEE parameters. 195 + * @num_params: number of elements in the parameter array. 196 + * @ctx: context in which the conversion should happen. 197 + * 198 + * It assumes @u has at least @num_params + 1 entries and has been initialized 199 + * with %QCOMTEE_ARG_TYPE_INV as &struct qcomtee_arg.type. 200 + * 201 + * Return: On success, returns 0; on failure, returns < 0. 202 + */ 203 + static int qcomtee_params_to_args(struct qcomtee_arg *u, 204 + struct tee_param *params, int num_params, 205 + struct tee_context *ctx) 206 + { 207 + int i; 208 + 209 + for (i = 0; i < num_params; i++) { 210 + switch (params[i].attr) { 211 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: 212 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: 213 + u[i].flags = QCOMTEE_ARG_FLAGS_UADDR; 214 + u[i].b.uaddr = params[i].u.ubuf.uaddr; 215 + u[i].b.size = params[i].u.ubuf.size; 216 + 217 + if (params[i].attr == 218 + TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT) 219 + u[i].type = QCOMTEE_ARG_TYPE_IB; 220 + else /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */ 221 + u[i].type = QCOMTEE_ARG_TYPE_OB; 222 + 223 + break; 224 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: 225 + u[i].type = QCOMTEE_ARG_TYPE_IO; 226 + if (qcomtee_objref_to_arg(&u[i], &params[i], ctx)) 227 + goto out_failed; 228 + 229 + break; 230 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: 231 + u[i].type = QCOMTEE_ARG_TYPE_OO; 232 + u[i].o = NULL_QCOMTEE_OBJECT; 233 + break; 234 + default: 235 + goto out_failed; 236 + } 237 + } 238 + 239 + return 0; 240 + 241 + out_failed: 242 + /* Undo qcomtee_objref_to_arg(). */ 243 + for (i--; i >= 0; i--) { 244 + if (u[i].type != QCOMTEE_ARG_TYPE_IO) 245 + continue; 246 + 247 + qcomtee_user_object_set_notify(u[i].o, false); 248 + /* See docs for qcomtee_objref_to_arg() for double put. */ 249 + if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB) 250 + qcomtee_object_put(u[i].o); 251 + 252 + qcomtee_object_put(u[i].o); 253 + } 254 + 255 + return -EINVAL; 256 + } 257 + 258 + /** 259 + * qcomtee_params_from_args() - Convert QTEE arguments to TEE parameters. 260 + * @params: TEE parameters. 261 + * @u: QTEE arguments. 262 + * @num_params: number of elements in the parameter array. 263 + * @ctx: context in which the conversion should happen. 264 + * 265 + * @u should have already been initialized by qcomtee_params_to_args(). 266 + * This also represents the end of a QTEE invocation that started with 267 + * qcomtee_params_to_args() by releasing %QCOMTEE_ARG_TYPE_IO objects. 268 + * 269 + * Return: On success, returns 0; on failure, returns < 0. 270 + */ 271 + static int qcomtee_params_from_args(struct tee_param *params, 272 + struct qcomtee_arg *u, int num_params, 273 + struct tee_context *ctx) 274 + { 275 + int i, np; 276 + 277 + qcomtee_arg_for_each(np, u) { 278 + switch (u[np].type) { 279 + case QCOMTEE_ARG_TYPE_OB: 280 + /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */ 281 + params[np].u.ubuf.size = u[np].b.size; 282 + 283 + break; 284 + case QCOMTEE_ARG_TYPE_IO: 285 + /* IEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT */ 286 + qcomtee_object_put(u[np].o); 287 + 288 + break; 289 + case QCOMTEE_ARG_TYPE_OO: 290 + /* TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT */ 291 + if (qcomtee_objref_from_arg(&params[np], &u[np], ctx)) 292 + goto out_failed; 293 + 294 + break; 295 + case QCOMTEE_ARG_TYPE_IB: 296 + default: 297 + break; 298 + } 299 + } 300 + 301 + return 0; 302 + 303 + out_failed: 304 + /* Undo qcomtee_objref_from_arg(). */ 305 + for (i = 0; i < np; i++) { 306 + if (params[i].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) 307 + qcomtee_context_del_qtee_object(&params[i], ctx); 308 + } 309 + 310 + /* Release any IO and OO objects not processed. */ 311 + for (; u[i].type && i < num_params; i++) { 312 + if (u[i].type == QCOMTEE_ARG_TYPE_OO || 313 + u[i].type == QCOMTEE_ARG_TYPE_IO) 314 + qcomtee_object_put(u[i].o); 315 + } 316 + 317 + return -EINVAL; 318 + } 319 + 320 + /* TEE Device Ops. */ 321 + 322 + static int qcomtee_params_check(struct tee_param *params, int num_params) 323 + { 324 + int io = 0, oo = 0, ib = 0, ob = 0; 325 + int i; 326 + 327 + /* QTEE can accept 64 arguments. */ 328 + if (num_params > QCOMTEE_ARGS_MAX) 329 + return -EINVAL; 330 + 331 + /* Supported parameter types. */ 332 + for (i = 0; i < num_params; i++) { 333 + switch (params[i].attr) { 334 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: 335 + ib++; 336 + break; 337 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: 338 + ob++; 339 + break; 340 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: 341 + io++; 342 + break; 343 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: 344 + oo++; 345 + break; 346 + default: 347 + return -EINVAL; 348 + } 349 + } 350 + 351 + /* QTEE can accept 16 arguments of each supported types. */ 352 + if (io > QCOMTEE_ARGS_PER_TYPE || oo > QCOMTEE_ARGS_PER_TYPE || 353 + ib > QCOMTEE_ARGS_PER_TYPE || ob > QCOMTEE_ARGS_PER_TYPE) 354 + return -EINVAL; 355 + 356 + return 0; 357 + } 358 + 359 + /* Check if an operation on ROOT_QCOMTEE_OBJECT from userspace is permitted. */ 360 + static int qcomtee_root_object_check(u32 op, struct tee_param *params, 361 + int num_params) 362 + { 363 + /* Some privileged operations recognized by QTEE. */ 364 + if (op == QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE || 365 + op == QCOMTEE_ROOT_OP_ADCI_ACCEPT || 366 + op == QCOMTEE_ROOT_OP_ADCI_SHUTDOWN) 367 + return -EINVAL; 368 + 369 + /* 370 + * QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS is to register with QTEE 371 + * by passing a credential object as input OBJREF. TEE_OBJREF_NULL as a 372 + * credential object represents a privileged client for QTEE and 373 + * is used by the kernel only. 374 + */ 375 + if (op == QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS && num_params == 2) { 376 + if (params[0].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT && 377 + params[1].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) { 378 + if (params[0].u.objref.id == TEE_OBJREF_NULL) 379 + return -EINVAL; 380 + } 381 + } 382 + 383 + return 0; 384 + } 385 + 386 + /** 387 + * qcomtee_object_invoke() - Invoke a QTEE object. 388 + * @ctx: TEE context. 389 + * @arg: ioctl arguments. 390 + * @params: parameters for the object. 391 + * 392 + * Return: On success, returns 0; on failure, returns < 0. 393 + */ 394 + static int qcomtee_object_invoke(struct tee_context *ctx, 395 + struct tee_ioctl_object_invoke_arg *arg, 396 + struct tee_param *params) 397 + { 398 + struct qcomtee_object_invoke_ctx *oic __free(kfree) = NULL; 399 + struct qcomtee_context_data *ctxdata = ctx->data; 400 + struct qcomtee_arg *u __free(kfree) = NULL; 401 + struct qcomtee_object *object; 402 + int i, ret, result; 403 + 404 + if (qcomtee_params_check(params, arg->num_params)) 405 + return -EINVAL; 406 + 407 + /* First, handle reserved operations: */ 408 + if (arg->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) { 409 + del_qtee_object(arg->id, ctxdata); 410 + 411 + return 0; 412 + } 413 + 414 + /* Otherwise, invoke a QTEE object: */ 415 + oic = qcomtee_object_invoke_ctx_alloc(ctx); 416 + if (!oic) 417 + return -ENOMEM; 418 + 419 + /* +1 for ending QCOMTEE_ARG_TYPE_INV. */ 420 + u = kcalloc(arg->num_params + 1, sizeof(*u), GFP_KERNEL); 421 + if (!u) 422 + return -ENOMEM; 423 + 424 + /* Get an object to invoke. */ 425 + if (arg->id == TEE_OBJREF_NULL) { 426 + /* Use ROOT if TEE_OBJREF_NULL is invoked. */ 427 + if (qcomtee_root_object_check(arg->op, params, arg->num_params)) 428 + return -EINVAL; 429 + 430 + object = ROOT_QCOMTEE_OBJECT; 431 + } else if (find_qtee_object(&object, arg->id, ctxdata)) { 432 + return -EINVAL; 433 + } 434 + 435 + ret = qcomtee_params_to_args(u, params, arg->num_params, ctx); 436 + if (ret) 437 + goto out; 438 + 439 + ret = qcomtee_object_do_invoke(oic, object, arg->op, u, &result); 440 + if (ret) { 441 + qcomtee_arg_for_each_input_object(i, u) { 442 + qcomtee_user_object_set_notify(u[i].o, false); 443 + qcomtee_object_put(u[i].o); 444 + } 445 + 446 + goto out; 447 + } 448 + 449 + /* Prase QTEE response and put driver's object copies: */ 450 + 451 + if (!result) { 452 + /* Assume service is UNAVAIL if unable to process the result. */ 453 + if (qcomtee_params_from_args(params, u, arg->num_params, ctx)) 454 + result = QCOMTEE_MSG_ERROR_UNAVAIL; 455 + } else { 456 + /* 457 + * qcomtee_params_to_args() gets a copy of IO for the driver to 458 + * make sure they do not get released while in the middle of 459 + * invocation. On success (!result), qcomtee_params_from_args() 460 + * puts them; Otherwise, put them here. 461 + */ 462 + qcomtee_arg_for_each_input_object(i, u) 463 + qcomtee_object_put(u[i].o); 464 + } 465 + 466 + arg->ret = result; 467 + out: 468 + qcomtee_object_put(object); 469 + 470 + return ret; 471 + } 472 + 473 + /** 474 + * qcomtee_supp_recv() - Wait for a request for the supplicant. 475 + * @ctx: TEE context. 476 + * @op: requested operation on the object. 477 + * @num_params: number of elements in the parameter array. 478 + * @params: parameters for @op. 479 + * 480 + * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT. 481 + * On input, it provides a user buffer. This buffer is used for parameters of 482 + * type %TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT in qcomtee_cb_params_from_args(). 483 + * On output, the object ID and request ID are stored in the meta parameter. 484 + * 485 + * @num_params is updated to the number of parameters that actually exist 486 + * in @params on return. 487 + * 488 + * Return: On success, returns 0; on failure, returns < 0. 489 + */ 490 + static int qcomtee_supp_recv(struct tee_context *ctx, u32 *op, u32 *num_params, 491 + struct tee_param *params) 492 + { 493 + struct qcomtee_user_object_request_data data; 494 + void __user *uaddr; 495 + size_t ubuf_size; 496 + int i, ret; 497 + 498 + if (!*num_params) 499 + return -EINVAL; 500 + 501 + /* First parameter should be an INOUT + meta parameter. */ 502 + if (params->attr != 503 + (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | TEE_IOCTL_PARAM_ATTR_META)) 504 + return -EINVAL; 505 + 506 + /* Other parameters are none. */ 507 + for (i = 1; i < *num_params; i++) 508 + if (params[i].attr) 509 + return -EINVAL; 510 + 511 + if (!IS_ALIGNED(params->u.value.a, 8)) 512 + return -EINVAL; 513 + 514 + /* User buffer and size from meta parameter. */ 515 + uaddr = u64_to_user_ptr(params->u.value.a); 516 + ubuf_size = params->u.value.b; 517 + /* Process TEE parameters. +/-1 to ignore the meta parameter. */ 518 + ret = qcomtee_user_object_select(ctx, params + 1, *num_params - 1, 519 + uaddr, ubuf_size, &data); 520 + if (ret) 521 + return ret; 522 + 523 + params->u.value.a = data.object_id; 524 + params->u.value.b = data.id; 525 + params->u.value.c = 0; 526 + *op = data.op; 527 + *num_params = data.np + 1; 528 + 529 + return 0; 530 + } 531 + 532 + /** 533 + * qcomtee_supp_send() - Submit a response for a request. 534 + * @ctx: TEE context. 535 + * @errno: return value for the request. 536 + * @num_params: number of elements in the parameter array. 537 + * @params: returned parameters. 538 + * 539 + * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT. 540 + * It specifies the request ID this response belongs to. 541 + * 542 + * Return: On success, returns 0; on failure, returns < 0. 543 + */ 544 + static int qcomtee_supp_send(struct tee_context *ctx, u32 errno, u32 num_params, 545 + struct tee_param *params) 546 + { 547 + int req_id; 548 + 549 + if (!num_params) 550 + return -EINVAL; 551 + 552 + /* First parameter should be an OUTPUT + meta parameter. */ 553 + if (params->attr != (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT | 554 + TEE_IOCTL_PARAM_ATTR_META)) 555 + return -EINVAL; 556 + 557 + req_id = params->u.value.a; 558 + /* Process TEE parameters. +/-1 to ignore the meta parameter. */ 559 + return qcomtee_user_object_submit(ctx, params + 1, num_params - 1, 560 + req_id, errno); 561 + } 562 + 563 + static int qcomtee_open(struct tee_context *ctx) 564 + { 565 + struct qcomtee_context_data *ctxdata __free(kfree) = NULL; 566 + 567 + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); 568 + if (!ctxdata) 569 + return -ENOMEM; 570 + 571 + /* 572 + * In the QTEE driver, the same context is used to refcount resources 573 + * shared by QTEE. For example, teedev_ctx_get() is called for any 574 + * instance of callback objects (see qcomtee_user_param_to_object()). 575 + * 576 + * Maintain a copy of teedev for QTEE as it serves as a direct user of 577 + * this context. The teedev will be released in the context's release(). 578 + * 579 + * tee_device_unregister() will remain blocked until all contexts 580 + * are released. This includes contexts owned by the user, which are 581 + * closed by teedev_close_context(), as well as those owned by QTEE 582 + * closed by teedev_ctx_put() in object's release(). 583 + */ 584 + if (!tee_device_get(ctx->teedev)) 585 + return -EINVAL; 586 + 587 + idr_init(&ctxdata->qtee_objects_idr); 588 + mutex_init(&ctxdata->qtee_lock); 589 + idr_init(&ctxdata->reqs_idr); 590 + INIT_LIST_HEAD(&ctxdata->reqs_list); 591 + mutex_init(&ctxdata->reqs_lock); 592 + init_completion(&ctxdata->req_c); 593 + 594 + ctx->data = no_free_ptr(ctxdata); 595 + 596 + return 0; 597 + } 598 + 599 + /* Gets called when the user closes the device */ 600 + static void qcomtee_close_context(struct tee_context *ctx) 601 + { 602 + struct qcomtee_context_data *ctxdata = ctx->data; 603 + struct qcomtee_object *object; 604 + int id; 605 + 606 + /* Process QUEUED or PROCESSING requests. */ 607 + qcomtee_requests_destroy(ctxdata); 608 + /* Release QTEE objects. */ 609 + idr_for_each_entry(&ctxdata->qtee_objects_idr, object, id) 610 + qcomtee_object_put(object); 611 + } 612 + 613 + /* Gets called when the final reference to the context goes away. */ 614 + static void qcomtee_release(struct tee_context *ctx) 615 + { 616 + struct qcomtee_context_data *ctxdata = ctx->data; 617 + 618 + idr_destroy(&ctxdata->qtee_objects_idr); 619 + idr_destroy(&ctxdata->reqs_idr); 620 + kfree(ctxdata); 621 + 622 + /* There is nothing shared in this context with QTEE. */ 623 + tee_device_put(ctx->teedev); 624 + } 625 + 626 + static void qcomtee_get_version(struct tee_device *teedev, 627 + struct tee_ioctl_version_data *vers) 628 + { 629 + struct tee_ioctl_version_data v = { 630 + .impl_id = TEE_IMPL_ID_QTEE, 631 + .gen_caps = TEE_GEN_CAP_OBJREF, 632 + }; 633 + 634 + *vers = v; 635 + } 636 + 637 + /** 638 + * qcomtee_get_qtee_feature_list() - Query QTEE features versions. 639 + * @ctx: TEE context. 640 + * @id: ID of the feature to query. 641 + * @version: version of the feature. 642 + * 643 + * Used to query the verion of features supported by QTEE. 644 + */ 645 + static void qcomtee_get_qtee_feature_list(struct tee_context *ctx, u32 id, 646 + u32 *version) 647 + { 648 + struct qcomtee_object_invoke_ctx *oic __free(kfree); 649 + struct qcomtee_object *client_env, *service; 650 + struct qcomtee_arg u[3] = { 0 }; 651 + int result; 652 + 653 + oic = qcomtee_object_invoke_ctx_alloc(ctx); 654 + if (!oic) 655 + return; 656 + 657 + client_env = qcomtee_object_get_client_env(oic); 658 + if (client_env == NULL_QCOMTEE_OBJECT) 659 + return; 660 + 661 + /* Get ''FeatureVersions Service'' object. */ 662 + service = qcomtee_object_get_service(oic, client_env, 663 + QCOMTEE_FEATURE_VER_UID); 664 + if (service == NULL_QCOMTEE_OBJECT) 665 + goto out_failed; 666 + 667 + /* IB: Feature to query. */ 668 + u[0].b.addr = &id; 669 + u[0].b.size = sizeof(id); 670 + u[0].type = QCOMTEE_ARG_TYPE_IB; 671 + 672 + /* OB: Version returned. */ 673 + u[1].b.addr = version; 674 + u[1].b.size = sizeof(*version); 675 + u[1].type = QCOMTEE_ARG_TYPE_OB; 676 + 677 + qcomtee_object_do_invoke(oic, service, QCOMTEE_FEATURE_VER_OP_GET, u, 678 + &result); 679 + 680 + out_failed: 681 + qcomtee_object_put(service); 682 + qcomtee_object_put(client_env); 683 + } 684 + 685 + static const struct tee_driver_ops qcomtee_ops = { 686 + .get_version = qcomtee_get_version, 687 + .open = qcomtee_open, 688 + .close_context = qcomtee_close_context, 689 + .release = qcomtee_release, 690 + .object_invoke_func = qcomtee_object_invoke, 691 + .supp_recv = qcomtee_supp_recv, 692 + .supp_send = qcomtee_supp_send, 693 + }; 694 + 695 + static const struct tee_desc qcomtee_desc = { 696 + .name = "qcomtee", 697 + .ops = &qcomtee_ops, 698 + .owner = THIS_MODULE, 699 + }; 700 + 701 + static int qcomtee_probe(struct platform_device *pdev) 702 + { 703 + struct workqueue_struct *async_wq; 704 + struct tee_device *teedev; 705 + struct tee_shm_pool *pool; 706 + struct tee_context *ctx; 707 + struct qcomtee *qcomtee; 708 + int err; 709 + 710 + qcomtee = kzalloc(sizeof(*qcomtee), GFP_KERNEL); 711 + if (!qcomtee) 712 + return -ENOMEM; 713 + 714 + pool = qcomtee_shm_pool_alloc(); 715 + if (IS_ERR(pool)) { 716 + err = PTR_ERR(pool); 717 + 718 + goto err_free_qcomtee; 719 + } 720 + 721 + teedev = tee_device_alloc(&qcomtee_desc, NULL, pool, qcomtee); 722 + if (IS_ERR(teedev)) { 723 + err = PTR_ERR(teedev); 724 + 725 + goto err_pool_destroy; 726 + } 727 + 728 + qcomtee->teedev = teedev; 729 + qcomtee->pool = pool; 730 + err = tee_device_register(qcomtee->teedev); 731 + if (err) 732 + goto err_unreg_teedev; 733 + 734 + platform_set_drvdata(pdev, qcomtee); 735 + /* Start async wq. */ 736 + async_wq = alloc_ordered_workqueue("qcomtee_wq", 0); 737 + if (!async_wq) { 738 + err = -ENOMEM; 739 + 740 + goto err_unreg_teedev; 741 + } 742 + 743 + qcomtee->wq = async_wq; 744 + /* Driver context used for async operations of teedev. */ 745 + ctx = teedev_open(qcomtee->teedev); 746 + if (IS_ERR(ctx)) { 747 + err = PTR_ERR(ctx); 748 + 749 + goto err_dest_wq; 750 + } 751 + 752 + qcomtee->ctx = ctx; 753 + /* Init Object table. */ 754 + qcomtee->xa_last_id = 0; 755 + xa_init_flags(&qcomtee->xa_local_objects, XA_FLAGS_ALLOC); 756 + /* Get QTEE verion. */ 757 + qcomtee_get_qtee_feature_list(qcomtee->ctx, 758 + QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID, 759 + &qcomtee->qtee_version); 760 + 761 + pr_info("QTEE version %u.%u.%u\n", 762 + QTEE_VERSION_GET_MAJOR(qcomtee->qtee_version), 763 + QTEE_VERSION_GET_MINOR(qcomtee->qtee_version), 764 + QTEE_VERSION_GET_PATCH(qcomtee->qtee_version)); 765 + 766 + return 0; 767 + 768 + err_dest_wq: 769 + destroy_workqueue(qcomtee->wq); 770 + err_unreg_teedev: 771 + tee_device_unregister(qcomtee->teedev); 772 + err_pool_destroy: 773 + tee_shm_pool_free(pool); 774 + err_free_qcomtee: 775 + kfree(qcomtee); 776 + 777 + return err; 778 + } 779 + 780 + /** 781 + * qcomtee_remove() - Device Removal Routine. 782 + * @pdev: platform device information struct. 783 + * 784 + * It is called by the platform subsystem to alert the driver that it should 785 + * release the device. 786 + * 787 + * QTEE does not provide an API to inform it about a callback object going away. 788 + * However, when releasing QTEE objects, any callback object sent to QTEE 789 + * previously would be released by QTEE as part of the object release. 790 + */ 791 + static void qcomtee_remove(struct platform_device *pdev) 792 + { 793 + struct qcomtee *qcomtee = platform_get_drvdata(pdev); 794 + 795 + teedev_close_context(qcomtee->ctx); 796 + /* Wait for RELEASE operations to be processed for QTEE objects. */ 797 + tee_device_unregister(qcomtee->teedev); 798 + destroy_workqueue(qcomtee->wq); 799 + tee_shm_pool_free(qcomtee->pool); 800 + kfree(qcomtee); 801 + } 802 + 803 + static const struct platform_device_id qcomtee_ids[] = { { "qcomtee", 0 }, {} }; 804 + MODULE_DEVICE_TABLE(platform, qcomtee_ids); 805 + 806 + static struct platform_driver qcomtee_platform_driver = { 807 + .probe = qcomtee_probe, 808 + .remove = qcomtee_remove, 809 + .driver = { 810 + .name = "qcomtee", 811 + }, 812 + .id_table = qcomtee_ids, 813 + }; 814 + 815 + module_platform_driver(qcomtee_platform_driver); 816 + 817 + MODULE_AUTHOR("Qualcomm"); 818 + MODULE_DESCRIPTION("QTEE driver"); 819 + MODULE_VERSION("1.0"); 820 + MODULE_LICENSE("GPL");
+915
drivers/tee/qcomtee/core.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/firmware/qcom/qcom_scm.h> 9 + #include <linux/init.h> 10 + #include <linux/module.h> 11 + #include <linux/slab.h> 12 + #include <linux/uaccess.h> 13 + #include <linux/xarray.h> 14 + 15 + #include "qcomtee.h" 16 + 17 + /* QTEE root object. */ 18 + struct qcomtee_object qcomtee_object_root = { 19 + .name = "root", 20 + .object_type = QCOMTEE_OBJECT_TYPE_ROOT, 21 + .info.qtee_id = QCOMTEE_MSG_OBJECT_ROOT, 22 + }; 23 + 24 + /* Next argument of type @type after index @i. */ 25 + int qcomtee_next_arg_type(struct qcomtee_arg *u, int i, 26 + enum qcomtee_arg_type type) 27 + { 28 + while (u[i].type != QCOMTEE_ARG_TYPE_INV && u[i].type != type) 29 + i++; 30 + return i; 31 + } 32 + 33 + /* 34 + * QTEE expects IDs with QCOMTEE_MSG_OBJECT_NS_BIT set for objects of 35 + * QCOMTEE_OBJECT_TYPE_CB type. The first ID with QCOMTEE_MSG_OBJECT_NS_BIT 36 + * set is reserved for the primordial object. 37 + */ 38 + #define QCOMTEE_OBJECT_PRIMORDIAL (QCOMTEE_MSG_OBJECT_NS_BIT) 39 + #define QCOMTEE_OBJECT_ID_START (QCOMTEE_OBJECT_PRIMORDIAL + 1) 40 + #define QCOMTEE_OBJECT_ID_END (U32_MAX) 41 + 42 + #define QCOMTEE_OBJECT_SET(p, type, ...) \ 43 + __QCOMTEE_OBJECT_SET(p, type, ##__VA_ARGS__, 0UL) 44 + #define __QCOMTEE_OBJECT_SET(p, type, optr, ...) \ 45 + do { \ 46 + (p)->object_type = (type); \ 47 + (p)->info.qtee_id = (unsigned long)(optr); \ 48 + } while (0) 49 + 50 + static struct qcomtee_object * 51 + qcomtee_qtee_object_alloc(struct qcomtee_object_invoke_ctx *oic, 52 + unsigned int object_id) 53 + { 54 + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); 55 + struct qcomtee_object *object; 56 + 57 + object = kzalloc(sizeof(*object), GFP_KERNEL); 58 + if (!object) 59 + return NULL_QCOMTEE_OBJECT; 60 + 61 + /* If failed, "no-name". */ 62 + object->name = kasprintf(GFP_KERNEL, "qcomtee-%u", object_id); 63 + QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_TEE, object_id); 64 + kref_init(&object->refcount); 65 + /* A QTEE object requires a context for async operations. */ 66 + object->info.qcomtee_async_ctx = qcomtee->ctx; 67 + teedev_ctx_get(object->info.qcomtee_async_ctx); 68 + 69 + return object; 70 + } 71 + 72 + static void qcomtee_qtee_object_free(struct qcomtee_object *object) 73 + { 74 + /* See qcomtee_qtee_object_alloc(). */ 75 + teedev_ctx_put(object->info.qcomtee_async_ctx); 76 + 77 + kfree(object->name); 78 + kfree(object); 79 + } 80 + 81 + static void qcomtee_do_release_qtee_object(struct work_struct *work) 82 + { 83 + struct qcomtee_object *object; 84 + struct qcomtee *qcomtee; 85 + int ret, result; 86 + 87 + /* RELEASE does not require any argument. */ 88 + struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } }; 89 + 90 + object = container_of(work, struct qcomtee_object, work); 91 + qcomtee = tee_get_drvdata(object->info.qcomtee_async_ctx->teedev); 92 + /* Get the TEE context used for asynchronous operations. */ 93 + qcomtee->oic.ctx = object->info.qcomtee_async_ctx; 94 + 95 + ret = qcomtee_object_do_invoke_internal(&qcomtee->oic, object, 96 + QCOMTEE_MSG_OBJECT_OP_RELEASE, 97 + args, &result); 98 + 99 + /* Is it safe to retry the release? */ 100 + if (ret && ret != -ENODEV) { 101 + queue_work(qcomtee->wq, &object->work); 102 + } else { 103 + if (ret || result) 104 + pr_err("%s release failed, ret = %d (%x)\n", 105 + qcomtee_object_name(object), ret, result); 106 + qcomtee_qtee_object_free(object); 107 + } 108 + } 109 + 110 + static void qcomtee_release_qtee_object(struct qcomtee_object *object) 111 + { 112 + struct qcomtee *qcomtee = 113 + tee_get_drvdata(object->info.qcomtee_async_ctx->teedev); 114 + 115 + INIT_WORK(&object->work, qcomtee_do_release_qtee_object); 116 + queue_work(qcomtee->wq, &object->work); 117 + } 118 + 119 + static void qcomtee_object_release(struct kref *refcount) 120 + { 121 + struct qcomtee_object *object; 122 + const char *name; 123 + 124 + object = container_of(refcount, struct qcomtee_object, refcount); 125 + 126 + /* 127 + * qcomtee_object_get() is called in a RCU read lock. synchronize_rcu() 128 + * to avoid releasing the object while it is being accessed in 129 + * qcomtee_object_get(). 130 + */ 131 + synchronize_rcu(); 132 + 133 + switch (typeof_qcomtee_object(object)) { 134 + case QCOMTEE_OBJECT_TYPE_TEE: 135 + qcomtee_release_qtee_object(object); 136 + 137 + break; 138 + case QCOMTEE_OBJECT_TYPE_CB: 139 + name = object->name; 140 + 141 + if (object->ops->release) 142 + object->ops->release(object); 143 + 144 + kfree_const(name); 145 + 146 + break; 147 + case QCOMTEE_OBJECT_TYPE_ROOT: 148 + case QCOMTEE_OBJECT_TYPE_NULL: 149 + default: 150 + break; 151 + } 152 + } 153 + 154 + /** 155 + * qcomtee_object_get() - Increase the object's reference count. 156 + * @object: object to increase the reference count. 157 + * 158 + * Context: The caller should hold RCU read lock. 159 + */ 160 + int qcomtee_object_get(struct qcomtee_object *object) 161 + { 162 + if (object != &qcomtee_primordial_object && 163 + object != NULL_QCOMTEE_OBJECT && 164 + object != ROOT_QCOMTEE_OBJECT) 165 + return kref_get_unless_zero(&object->refcount); 166 + 167 + return 0; 168 + } 169 + 170 + /** 171 + * qcomtee_object_put() - Decrease the object's reference count. 172 + * @object: object to decrease the reference count. 173 + */ 174 + void qcomtee_object_put(struct qcomtee_object *object) 175 + { 176 + if (object != &qcomtee_primordial_object && 177 + object != NULL_QCOMTEE_OBJECT && 178 + object != ROOT_QCOMTEE_OBJECT) 179 + kref_put(&object->refcount, qcomtee_object_release); 180 + } 181 + 182 + static int qcomtee_idx_alloc(struct qcomtee_object_invoke_ctx *oic, u32 *idx, 183 + struct qcomtee_object *object) 184 + { 185 + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); 186 + 187 + /* Every ID allocated here has QCOMTEE_MSG_OBJECT_NS_BIT set. */ 188 + return xa_alloc_cyclic(&qcomtee->xa_local_objects, idx, object, 189 + XA_LIMIT(QCOMTEE_OBJECT_ID_START, 190 + QCOMTEE_OBJECT_ID_END), 191 + &qcomtee->xa_last_id, GFP_KERNEL); 192 + } 193 + 194 + struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic, 195 + u32 idx) 196 + { 197 + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); 198 + 199 + if (idx < QCOMTEE_OBJECT_ID_START || idx > QCOMTEE_OBJECT_ID_END) 200 + return NULL_QCOMTEE_OBJECT; 201 + 202 + return xa_erase(&qcomtee->xa_local_objects, idx); 203 + } 204 + 205 + /** 206 + * qcomtee_object_id_get() - Get an ID for an object to send to QTEE. 207 + * @oic: context to use for the invocation. 208 + * @object: object to assign an ID. 209 + * @object_id: object ID. 210 + * 211 + * Called on the path to QTEE to construct the message; see 212 + * qcomtee_prepare_msg() and qcomtee_update_msg(). 213 + * 214 + * Return: On success, returns 0; on failure, returns < 0. 215 + */ 216 + static int qcomtee_object_id_get(struct qcomtee_object_invoke_ctx *oic, 217 + struct qcomtee_object *object, 218 + unsigned int *object_id) 219 + { 220 + u32 idx; 221 + 222 + switch (typeof_qcomtee_object(object)) { 223 + case QCOMTEE_OBJECT_TYPE_CB: 224 + if (qcomtee_idx_alloc(oic, &idx, object) < 0) 225 + return -ENOSPC; 226 + 227 + *object_id = idx; 228 + 229 + break; 230 + case QCOMTEE_OBJECT_TYPE_ROOT: 231 + case QCOMTEE_OBJECT_TYPE_TEE: 232 + *object_id = object->info.qtee_id; 233 + 234 + break; 235 + case QCOMTEE_OBJECT_TYPE_NULL: 236 + *object_id = QCOMTEE_MSG_OBJECT_NULL; 237 + 238 + break; 239 + } 240 + 241 + return 0; 242 + } 243 + 244 + /* Release object ID assigned in qcomtee_object_id_get. */ 245 + static void qcomtee_object_id_put(struct qcomtee_object_invoke_ctx *oic, 246 + unsigned int object_id) 247 + { 248 + qcomtee_idx_erase(oic, object_id); 249 + } 250 + 251 + /** 252 + * qcomtee_local_object_get() - Get the object referenced by the ID. 253 + * @oic: context to use for the invocation. 254 + * @object_id: object ID. 255 + * 256 + * It is called on the path from QTEE. 257 + * It is called on behalf of QTEE to obtain an instance of an object 258 + * for a given ID. It increases the object's reference count on success. 259 + * 260 + * Return: On error, returns %NULL_QCOMTEE_OBJECT. 261 + * On success, returns the object. 262 + */ 263 + static struct qcomtee_object * 264 + qcomtee_local_object_get(struct qcomtee_object_invoke_ctx *oic, 265 + unsigned int object_id) 266 + { 267 + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); 268 + struct qcomtee_object *object; 269 + 270 + if (object_id == QCOMTEE_OBJECT_PRIMORDIAL) 271 + return &qcomtee_primordial_object; 272 + 273 + guard(rcu)(); 274 + object = xa_load(&qcomtee->xa_local_objects, object_id); 275 + /* It already checks for %NULL_QCOMTEE_OBJECT. */ 276 + qcomtee_object_get(object); 277 + 278 + return object; 279 + } 280 + 281 + /** 282 + * qcomtee_object_user_init() - Initialize an object for the user. 283 + * @object: object to initialize. 284 + * @ot: type of object as &enum qcomtee_object_type. 285 + * @ops: instance of callbacks. 286 + * @fmt: name assigned to the object. 287 + * 288 + * Return: On success, returns 0; on failure, returns < 0. 289 + */ 290 + int qcomtee_object_user_init(struct qcomtee_object *object, 291 + enum qcomtee_object_type ot, 292 + struct qcomtee_object_operations *ops, 293 + const char *fmt, ...) 294 + { 295 + va_list ap; 296 + int ret; 297 + 298 + kref_init(&object->refcount); 299 + QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_NULL); 300 + 301 + va_start(ap, fmt); 302 + switch (ot) { 303 + case QCOMTEE_OBJECT_TYPE_NULL: 304 + ret = 0; 305 + 306 + break; 307 + case QCOMTEE_OBJECT_TYPE_CB: 308 + object->ops = ops; 309 + if (!object->ops->dispatch) 310 + return -EINVAL; 311 + 312 + /* If failed, "no-name". */ 313 + object->name = kvasprintf_const(GFP_KERNEL, fmt, ap); 314 + QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_CB); 315 + 316 + ret = 0; 317 + break; 318 + case QCOMTEE_OBJECT_TYPE_ROOT: 319 + case QCOMTEE_OBJECT_TYPE_TEE: 320 + default: 321 + ret = -EINVAL; 322 + } 323 + va_end(ap); 324 + 325 + return ret; 326 + } 327 + 328 + /** 329 + * qcomtee_object_type() - Returns the type of object represented by an ID. 330 + * @object_id: object ID for the object. 331 + * 332 + * Similar to typeof_qcomtee_object(), but instead of receiving an object as 333 + * an argument, it receives an object ID. It is used internally on the return 334 + * path from QTEE. 335 + * 336 + * Return: Returns the type of object referenced by @object_id. 337 + */ 338 + static enum qcomtee_object_type qcomtee_object_type(unsigned int object_id) 339 + { 340 + if (object_id == QCOMTEE_MSG_OBJECT_NULL) 341 + return QCOMTEE_OBJECT_TYPE_NULL; 342 + 343 + if (object_id & QCOMTEE_MSG_OBJECT_NS_BIT) 344 + return QCOMTEE_OBJECT_TYPE_CB; 345 + 346 + return QCOMTEE_OBJECT_TYPE_TEE; 347 + } 348 + 349 + /** 350 + * qcomtee_object_qtee_init() - Initialize an object for QTEE. 351 + * @oic: context to use for the invocation. 352 + * @object: object returned. 353 + * @object_id: object ID received from QTEE. 354 + * 355 + * Return: On failure, returns < 0 and sets @object to %NULL_QCOMTEE_OBJECT. 356 + * On success, returns 0 357 + */ 358 + static int qcomtee_object_qtee_init(struct qcomtee_object_invoke_ctx *oic, 359 + struct qcomtee_object **object, 360 + unsigned int object_id) 361 + { 362 + int ret = 0; 363 + 364 + switch (qcomtee_object_type(object_id)) { 365 + case QCOMTEE_OBJECT_TYPE_NULL: 366 + *object = NULL_QCOMTEE_OBJECT; 367 + 368 + break; 369 + case QCOMTEE_OBJECT_TYPE_CB: 370 + *object = qcomtee_local_object_get(oic, object_id); 371 + if (*object == NULL_QCOMTEE_OBJECT) 372 + ret = -EINVAL; 373 + 374 + break; 375 + 376 + default: /* QCOMTEE_OBJECT_TYPE_TEE */ 377 + *object = qcomtee_qtee_object_alloc(oic, object_id); 378 + if (*object == NULL_QCOMTEE_OBJECT) 379 + ret = -ENOMEM; 380 + 381 + break; 382 + } 383 + 384 + return ret; 385 + } 386 + 387 + /* 388 + * ''Marshaling API'' 389 + * qcomtee_prepare_msg - Prepare the inbound buffer for sending to QTEE 390 + * qcomtee_update_args - Parse the QTEE response in the inbound buffer 391 + * qcomtee_prepare_args - Parse the QTEE request from the outbound buffer 392 + * qcomtee_update_msg - Update the outbound buffer with the response for QTEE 393 + */ 394 + 395 + static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic, 396 + struct qcomtee_object *object, u32 op, 397 + struct qcomtee_arg *u) 398 + { 399 + struct qcomtee_msg_object_invoke *msg; 400 + unsigned int object_id; 401 + int i, ib, ob, io, oo; 402 + size_t offset; 403 + 404 + /* Use the input message buffer in 'oic'. */ 405 + msg = oic->in_msg.addr; 406 + 407 + /* Start offset in a message for buffer arguments. */ 408 + offset = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke, 409 + qcomtee_args_len(u)); 410 + 411 + /* Get the ID of the object being invoked. */ 412 + if (qcomtee_object_id_get(oic, object, &object_id)) 413 + return -ENOSPC; 414 + 415 + ib = 0; 416 + qcomtee_arg_for_each_input_buffer(i, u) { 417 + void *msgptr; /* Address of buffer payload: */ 418 + /* Overflow already checked in qcomtee_msg_buffers_alloc(). */ 419 + msg->args[ib].b.offset = offset; 420 + msg->args[ib].b.size = u[i].b.size; 421 + 422 + msgptr = qcomtee_msg_offset_to_ptr(msg, offset); 423 + /* Userspace client or kernel client!? */ 424 + if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR)) 425 + memcpy(msgptr, u[i].b.addr, u[i].b.size); 426 + else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size)) 427 + return -EINVAL; 428 + 429 + offset += qcomtee_msg_offset_align(u[i].b.size); 430 + ib++; 431 + } 432 + 433 + ob = ib; 434 + qcomtee_arg_for_each_output_buffer(i, u) { 435 + /* Overflow already checked in qcomtee_msg_buffers_alloc(). */ 436 + msg->args[ob].b.offset = offset; 437 + msg->args[ob].b.size = u[i].b.size; 438 + 439 + offset += qcomtee_msg_offset_align(u[i].b.size); 440 + ob++; 441 + } 442 + 443 + io = ob; 444 + qcomtee_arg_for_each_input_object(i, u) { 445 + if (qcomtee_object_id_get(oic, u[i].o, &msg->args[io].o)) { 446 + qcomtee_object_id_put(oic, object_id); 447 + for (io--; io >= ob; io--) 448 + qcomtee_object_id_put(oic, msg->args[io].o); 449 + 450 + return -ENOSPC; 451 + } 452 + 453 + io++; 454 + } 455 + 456 + oo = io; 457 + qcomtee_arg_for_each_output_object(i, u) 458 + oo++; 459 + 460 + /* Set object, operation, and argument counts. */ 461 + qcomtee_msg_init(msg, object_id, op, ib, ob, io, oo); 462 + 463 + return 0; 464 + } 465 + 466 + /** 467 + * qcomtee_update_args() - Parse the QTEE response in the inbound buffer. 468 + * @u: array of arguments for the invocation. 469 + * @oic: context to use for the invocation. 470 + * 471 + * @u must be the same as the one used in qcomtee_prepare_msg() when 472 + * initializing the inbound buffer. 473 + * 474 + * On failure, it continues processing the QTEE message. The caller should 475 + * do the necessary cleanup, including calling qcomtee_object_put() 476 + * on the output objects. 477 + * 478 + * Return: On success, returns 0; on failure, returns < 0. 479 + */ 480 + static int qcomtee_update_args(struct qcomtee_arg *u, 481 + struct qcomtee_object_invoke_ctx *oic) 482 + { 483 + struct qcomtee_msg_object_invoke *msg; 484 + int i, ib, ob, io, oo; 485 + int ret = 0; 486 + 487 + /* Use the input message buffer in 'oic'. */ 488 + msg = oic->in_msg.addr; 489 + 490 + ib = 0; 491 + qcomtee_arg_for_each_input_buffer(i, u) 492 + ib++; 493 + 494 + ob = ib; 495 + qcomtee_arg_for_each_output_buffer(i, u) { 496 + void *msgptr; /* Address of buffer payload: */ 497 + /* QTEE can override the size to a smaller value. */ 498 + u[i].b.size = msg->args[ob].b.size; 499 + 500 + msgptr = qcomtee_msg_offset_to_ptr(msg, msg->args[ob].b.offset); 501 + /* Userspace client or kernel client!? */ 502 + if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR)) 503 + memcpy(u[i].b.addr, msgptr, u[i].b.size); 504 + else if (copy_to_user(u[i].b.uaddr, msgptr, u[i].b.size)) 505 + ret = -EINVAL; 506 + 507 + ob++; 508 + } 509 + 510 + io = ob; 511 + qcomtee_arg_for_each_input_object(i, u) 512 + io++; 513 + 514 + oo = io; 515 + qcomtee_arg_for_each_output_object(i, u) { 516 + if (qcomtee_object_qtee_init(oic, &u[i].o, msg->args[oo].o)) 517 + ret = -EINVAL; 518 + 519 + oo++; 520 + } 521 + 522 + return ret; 523 + } 524 + 525 + /** 526 + * qcomtee_prepare_args() - Parse the QTEE request from the outbound buffer. 527 + * @oic: context to use for the invocation. 528 + * 529 + * It initializes &qcomtee_object_invoke_ctx->u based on the QTEE request in 530 + * the outbound buffer. It sets %QCOMTEE_ARG_TYPE_INV at the end of the array. 531 + * 532 + * On failure, it continues processing the QTEE message. The caller should 533 + * do the necessary cleanup, including calling qcomtee_object_put() 534 + * on the input objects. 535 + * 536 + * Return: On success, returns 0; on failure, returns < 0. 537 + */ 538 + static int qcomtee_prepare_args(struct qcomtee_object_invoke_ctx *oic) 539 + { 540 + struct qcomtee_msg_callback *msg; 541 + int i, ret = 0; 542 + 543 + /* Use the output message buffer in 'oic'. */ 544 + msg = oic->out_msg.addr; 545 + 546 + qcomtee_msg_for_each_input_buffer(i, msg) { 547 + oic->u[i].b.addr = 548 + qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset); 549 + oic->u[i].b.size = msg->args[i].b.size; 550 + oic->u[i].type = QCOMTEE_ARG_TYPE_IB; 551 + } 552 + 553 + qcomtee_msg_for_each_output_buffer(i, msg) { 554 + oic->u[i].b.addr = 555 + qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset); 556 + oic->u[i].b.size = msg->args[i].b.size; 557 + oic->u[i].type = QCOMTEE_ARG_TYPE_OB; 558 + } 559 + 560 + qcomtee_msg_for_each_input_object(i, msg) { 561 + if (qcomtee_object_qtee_init(oic, &oic->u[i].o, msg->args[i].o)) 562 + ret = -EINVAL; 563 + 564 + oic->u[i].type = QCOMTEE_ARG_TYPE_IO; 565 + } 566 + 567 + qcomtee_msg_for_each_output_object(i, msg) 568 + oic->u[i].type = QCOMTEE_ARG_TYPE_OO; 569 + 570 + /* End of Arguments. */ 571 + oic->u[i].type = QCOMTEE_ARG_TYPE_INV; 572 + 573 + return ret; 574 + } 575 + 576 + static int qcomtee_update_msg(struct qcomtee_object_invoke_ctx *oic) 577 + { 578 + struct qcomtee_msg_callback *msg; 579 + int i, ib, ob, io, oo; 580 + 581 + /* Use the output message buffer in 'oic'. */ 582 + msg = oic->out_msg.addr; 583 + 584 + ib = 0; 585 + qcomtee_arg_for_each_input_buffer(i, oic->u) 586 + ib++; 587 + 588 + ob = ib; 589 + qcomtee_arg_for_each_output_buffer(i, oic->u) { 590 + /* Only reduce size; never increase it. */ 591 + if (msg->args[ob].b.size < oic->u[i].b.size) 592 + return -EINVAL; 593 + 594 + msg->args[ob].b.size = oic->u[i].b.size; 595 + ob++; 596 + } 597 + 598 + io = ob; 599 + qcomtee_arg_for_each_input_object(i, oic->u) 600 + io++; 601 + 602 + oo = io; 603 + qcomtee_arg_for_each_output_object(i, oic->u) { 604 + if (qcomtee_object_id_get(oic, oic->u[i].o, &msg->args[oo].o)) { 605 + for (oo--; oo >= io; oo--) 606 + qcomtee_object_id_put(oic, msg->args[oo].o); 607 + 608 + return -ENOSPC; 609 + } 610 + 611 + oo++; 612 + } 613 + 614 + return 0; 615 + } 616 + 617 + /* Invoke a callback object. */ 618 + static void qcomtee_cb_object_invoke(struct qcomtee_object_invoke_ctx *oic, 619 + struct qcomtee_msg_callback *msg) 620 + { 621 + int i, errno; 622 + u32 op; 623 + 624 + /* Get the object being invoked. */ 625 + unsigned int object_id = msg->cxt; 626 + struct qcomtee_object *object; 627 + 628 + /* QTEE cannot invoke a NULL object or objects it hosts. */ 629 + if (qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_NULL || 630 + qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_TEE) { 631 + errno = -EINVAL; 632 + goto out; 633 + } 634 + 635 + object = qcomtee_local_object_get(oic, object_id); 636 + if (object == NULL_QCOMTEE_OBJECT) { 637 + errno = -EINVAL; 638 + goto out; 639 + } 640 + 641 + oic->object = object; 642 + 643 + /* Filter bits used by transport. */ 644 + op = msg->op & QCOMTEE_MSG_OBJECT_OP_MASK; 645 + 646 + switch (op) { 647 + case QCOMTEE_MSG_OBJECT_OP_RELEASE: 648 + qcomtee_object_id_put(oic, object_id); 649 + qcomtee_object_put(object); 650 + errno = 0; 651 + 652 + break; 653 + case QCOMTEE_MSG_OBJECT_OP_RETAIN: 654 + qcomtee_object_get(object); 655 + errno = 0; 656 + 657 + break; 658 + default: 659 + errno = qcomtee_prepare_args(oic); 660 + if (errno) { 661 + /* Release any object that arrived as input. */ 662 + qcomtee_arg_for_each_input_buffer(i, oic->u) 663 + qcomtee_object_put(oic->u[i].o); 664 + 665 + break; 666 + } 667 + 668 + errno = object->ops->dispatch(oic, object, op, oic->u); 669 + if (!errno) { 670 + /* On success, notify at the appropriate time. */ 671 + oic->flags |= QCOMTEE_OIC_FLAG_NOTIFY; 672 + } 673 + } 674 + 675 + out: 676 + 677 + oic->errno = errno; 678 + } 679 + 680 + static int 681 + qcomtee_object_invoke_ctx_invoke(struct qcomtee_object_invoke_ctx *oic, 682 + int *result, u64 *res_type) 683 + { 684 + phys_addr_t out_msg_paddr; 685 + phys_addr_t in_msg_paddr; 686 + int ret; 687 + u64 res; 688 + 689 + tee_shm_get_pa(oic->out_shm, 0, &out_msg_paddr); 690 + tee_shm_get_pa(oic->in_shm, 0, &in_msg_paddr); 691 + if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY)) 692 + ret = qcom_scm_qtee_invoke_smc(in_msg_paddr, oic->in_msg.size, 693 + out_msg_paddr, oic->out_msg.size, 694 + &res, res_type); 695 + else 696 + ret = qcom_scm_qtee_callback_response(out_msg_paddr, 697 + oic->out_msg.size, 698 + &res, res_type); 699 + 700 + if (ret) 701 + pr_err("QTEE returned with %d.\n", ret); 702 + else 703 + *result = (int)res; 704 + 705 + return ret; 706 + } 707 + 708 + /** 709 + * qcomtee_qtee_objects_put() - Put the callback objects in the argument array. 710 + * @u: array of arguments. 711 + * 712 + * When qcomtee_object_do_invoke_internal() is successfully invoked, 713 + * QTEE takes ownership of the callback objects. If the invocation fails, 714 + * qcomtee_object_do_invoke_internal() calls qcomtee_qtee_objects_put() 715 + * to mimic the release of callback objects by QTEE. 716 + */ 717 + static void qcomtee_qtee_objects_put(struct qcomtee_arg *u) 718 + { 719 + int i; 720 + 721 + qcomtee_arg_for_each_input_object(i, u) { 722 + if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB) 723 + qcomtee_object_put(u[i].o); 724 + } 725 + } 726 + 727 + /** 728 + * qcomtee_object_do_invoke_internal() - Submit an invocation for an object. 729 + * @oic: context to use for the current invocation. 730 + * @object: object being invoked. 731 + * @op: requested operation on the object. 732 + * @u: array of arguments for the current invocation. 733 + * @result: result returned from QTEE. 734 + * 735 + * The caller is responsible for keeping track of the refcount for each 736 + * object, including @object. On return, the caller loses ownership of all 737 + * input objects of type %QCOMTEE_OBJECT_TYPE_CB. 738 + * 739 + * Return: On success, returns 0; on failure, returns < 0. 740 + */ 741 + int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic, 742 + struct qcomtee_object *object, u32 op, 743 + struct qcomtee_arg *u, int *result) 744 + { 745 + struct qcomtee_msg_callback *cb_msg; 746 + struct qcomtee_object *qto; 747 + int i, ret, errno; 748 + u64 res_type; 749 + 750 + /* Allocate inbound and outbound buffers. */ 751 + ret = qcomtee_msg_buffers_alloc(oic, u); 752 + if (ret) { 753 + qcomtee_qtee_objects_put(u); 754 + 755 + return ret; 756 + } 757 + 758 + ret = qcomtee_prepare_msg(oic, object, op, u); 759 + if (ret) { 760 + qcomtee_qtee_objects_put(u); 761 + 762 + goto out; 763 + } 764 + 765 + /* Use input message buffer in 'oic'. */ 766 + cb_msg = oic->out_msg.addr; 767 + 768 + while (1) { 769 + if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) { 770 + errno = oic->errno; 771 + if (!errno) 772 + errno = qcomtee_update_msg(oic); 773 + qcomtee_msg_set_result(cb_msg, errno); 774 + } 775 + 776 + /* Invoke the remote object. */ 777 + ret = qcomtee_object_invoke_ctx_invoke(oic, result, &res_type); 778 + /* Return form callback objects result submission: */ 779 + if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) { 780 + qto = oic->object; 781 + if (qto) { 782 + if (oic->flags & QCOMTEE_OIC_FLAG_NOTIFY) { 783 + if (qto->ops->notify) 784 + qto->ops->notify(oic, qto, 785 + errno || ret); 786 + } 787 + 788 + /* Get is in qcomtee_cb_object_invoke(). */ 789 + qcomtee_object_put(qto); 790 + } 791 + 792 + oic->object = NULL_QCOMTEE_OBJECT; 793 + oic->flags &= ~(QCOMTEE_OIC_FLAG_BUSY | 794 + QCOMTEE_OIC_FLAG_NOTIFY); 795 + } 796 + 797 + if (ret) { 798 + /* 799 + * Unable to finished the invocation. 800 + * If QCOMTEE_OIC_FLAG_SHARED is not set, put 801 + * QCOMTEE_OBJECT_TYPE_CB input objects. 802 + */ 803 + if (!(oic->flags & QCOMTEE_OIC_FLAG_SHARED)) 804 + qcomtee_qtee_objects_put(u); 805 + else 806 + ret = -ENODEV; 807 + 808 + goto out; 809 + 810 + } else { 811 + /* 812 + * QTEE obtained ownership of QCOMTEE_OBJECT_TYPE_CB 813 + * input objects in 'u'. On further failure, QTEE is 814 + * responsible for releasing them. 815 + */ 816 + oic->flags |= QCOMTEE_OIC_FLAG_SHARED; 817 + } 818 + 819 + /* Is it a callback request? */ 820 + if (res_type != QCOMTEE_RESULT_INBOUND_REQ_NEEDED) { 821 + /* 822 + * Parse results. If failed, assume the service 823 + * was unavailable (i.e. QCOMTEE_MSG_ERROR_UNAVAIL) 824 + * and put output objects to initiate cleanup. 825 + */ 826 + if (!*result && qcomtee_update_args(u, oic)) { 827 + *result = QCOMTEE_MSG_ERROR_UNAVAIL; 828 + qcomtee_arg_for_each_output_object(i, u) 829 + qcomtee_object_put(u[i].o); 830 + } 831 + 832 + break; 833 + 834 + } else { 835 + oic->flags |= QCOMTEE_OIC_FLAG_BUSY; 836 + qcomtee_fetch_async_reqs(oic); 837 + qcomtee_cb_object_invoke(oic, cb_msg); 838 + } 839 + } 840 + 841 + qcomtee_fetch_async_reqs(oic); 842 + out: 843 + qcomtee_msg_buffers_free(oic); 844 + 845 + return ret; 846 + } 847 + 848 + int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic, 849 + struct qcomtee_object *object, u32 op, 850 + struct qcomtee_arg *u, int *result) 851 + { 852 + /* User can not set bits used by transport. */ 853 + if (op & ~QCOMTEE_MSG_OBJECT_OP_MASK) 854 + return -EINVAL; 855 + 856 + /* User can only invoke QTEE hosted objects. */ 857 + if (typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_TEE && 858 + typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_ROOT) 859 + return -EINVAL; 860 + 861 + /* User cannot directly issue these operations to QTEE. */ 862 + if (op == QCOMTEE_MSG_OBJECT_OP_RELEASE || 863 + op == QCOMTEE_MSG_OBJECT_OP_RETAIN) 864 + return -EINVAL; 865 + 866 + return qcomtee_object_do_invoke_internal(oic, object, op, u, result); 867 + } 868 + 869 + /** 870 + * qcomtee_object_get_client_env() - Get a privileged client env. object. 871 + * @oic: context to use for the current invocation. 872 + * 873 + * The caller should call qcomtee_object_put() on the returned object 874 + * to release it. 875 + * 876 + * Return: On error, returns %NULL_QCOMTEE_OBJECT. 877 + * On success, returns the object. 878 + */ 879 + struct qcomtee_object * 880 + qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic) 881 + { 882 + struct qcomtee_arg u[3] = { 0 }; 883 + int ret, result; 884 + 885 + u[0].o = NULL_QCOMTEE_OBJECT; 886 + u[0].type = QCOMTEE_ARG_TYPE_IO; 887 + u[1].type = QCOMTEE_ARG_TYPE_OO; 888 + ret = qcomtee_object_do_invoke(oic, ROOT_QCOMTEE_OBJECT, 889 + QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS, u, 890 + &result); 891 + if (ret || result) 892 + return NULL_QCOMTEE_OBJECT; 893 + 894 + return u[1].o; 895 + } 896 + 897 + struct qcomtee_object * 898 + qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic, 899 + struct qcomtee_object *client_env, u32 uid) 900 + { 901 + struct qcomtee_arg u[3] = { 0 }; 902 + int ret, result; 903 + 904 + u[0].b.addr = &uid; 905 + u[0].b.size = sizeof(uid); 906 + u[0].type = QCOMTEE_ARG_TYPE_IB; 907 + u[1].type = QCOMTEE_ARG_TYPE_OO; 908 + ret = qcomtee_object_do_invoke(oic, client_env, QCOMTEE_CLIENT_ENV_OPEN, 909 + u, &result); 910 + 911 + if (ret || result) 912 + return NULL_QCOMTEE_OBJECT; 913 + 914 + return u[1].o; 915 + }
+169
drivers/tee/qcomtee/mem_obj.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/firmware/qcom/qcom_scm.h> 9 + #include <linux/mm.h> 10 + 11 + #include "qcomtee.h" 12 + 13 + /** 14 + * DOC: Memory and Mapping Objects 15 + * 16 + * QTEE uses memory objects for memory sharing with Linux. 17 + * A memory object can be a standard dma_buf or a contiguous memory range, 18 + * e.g., tee_shm. A memory object should support one operation: map. When 19 + * invoked by QTEE, a mapping object is generated. A mapping object supports 20 + * one operation: unmap. 21 + * 22 + * (1) To map a memory object, QTEE invokes the primordial object with 23 + * %QCOMTEE_OBJECT_OP_MAP_REGION operation; see 24 + * qcomtee_primordial_obj_dispatch(). 25 + * (2) To unmap a memory object, QTEE releases the mapping object which 26 + * calls qcomtee_mem_object_release(). 27 + * 28 + * The map operation is implemented in the primordial object as a privileged 29 + * operation instead of qcomtee_mem_object_dispatch(). Otherwise, on 30 + * platforms without shm_bridge, a user can trick QTEE into writing to the 31 + * kernel memory by passing a user object as a memory object and returning a 32 + * random physical address as the result of the mapping request. 33 + */ 34 + 35 + struct qcomtee_mem_object { 36 + struct qcomtee_object object; 37 + struct tee_shm *shm; 38 + /* QTEE requires these felids to be page aligned. */ 39 + phys_addr_t paddr; /* Physical address of range. */ 40 + size_t size; /* Size of the range. */ 41 + }; 42 + 43 + #define to_qcomtee_mem_object(o) \ 44 + container_of((o), struct qcomtee_mem_object, object) 45 + 46 + static struct qcomtee_object_operations qcomtee_mem_object_ops; 47 + 48 + /* Is it a memory object using tee_shm? */ 49 + int is_qcomtee_memobj_object(struct qcomtee_object *object) 50 + { 51 + return object != NULL_QCOMTEE_OBJECT && 52 + typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB && 53 + object->ops == &qcomtee_mem_object_ops; 54 + } 55 + 56 + static int qcomtee_mem_object_dispatch(struct qcomtee_object_invoke_ctx *oic, 57 + struct qcomtee_object *object, u32 op, 58 + struct qcomtee_arg *args) 59 + { 60 + return -EINVAL; 61 + } 62 + 63 + static void qcomtee_mem_object_release(struct qcomtee_object *object) 64 + { 65 + struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object); 66 + 67 + /* Matching get is in qcomtee_memobj_param_to_object(). */ 68 + tee_shm_put(mem_object->shm); 69 + kfree(mem_object); 70 + } 71 + 72 + static struct qcomtee_object_operations qcomtee_mem_object_ops = { 73 + .release = qcomtee_mem_object_release, 74 + .dispatch = qcomtee_mem_object_dispatch, 75 + }; 76 + 77 + /** 78 + * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object. 79 + * @object: object returned. 80 + * @param: TEE parameter. 81 + * @ctx: context in which the conversion should happen. 82 + * 83 + * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags. 84 + * 85 + * Return: On success return 0 or <0 on failure. 86 + */ 87 + int qcomtee_memobj_param_to_object(struct qcomtee_object **object, 88 + struct tee_param *param, 89 + struct tee_context *ctx) 90 + { 91 + struct qcomtee_mem_object *mem_object __free(kfree) = NULL; 92 + struct tee_shm *shm; 93 + int err; 94 + 95 + mem_object = kzalloc(sizeof(*mem_object), GFP_KERNEL); 96 + if (!mem_object) 97 + return -ENOMEM; 98 + 99 + shm = tee_shm_get_from_id(ctx, param->u.objref.id); 100 + if (IS_ERR(shm)) 101 + return PTR_ERR(shm); 102 + 103 + /* mem-object wrapping the memref. */ 104 + err = qcomtee_object_user_init(&mem_object->object, 105 + QCOMTEE_OBJECT_TYPE_CB, 106 + &qcomtee_mem_object_ops, "tee-shm-%d", 107 + shm->id); 108 + if (err) { 109 + tee_shm_put(shm); 110 + 111 + return err; 112 + } 113 + 114 + mem_object->paddr = shm->paddr; 115 + mem_object->size = shm->size; 116 + mem_object->shm = shm; 117 + 118 + *object = &no_free_ptr(mem_object)->object; 119 + 120 + return 0; 121 + } 122 + 123 + /* Reverse what qcomtee_memobj_param_to_object() does. */ 124 + int qcomtee_memobj_param_from_object(struct tee_param *param, 125 + struct qcomtee_object *object, 126 + struct tee_context *ctx) 127 + { 128 + struct qcomtee_mem_object *mem_object; 129 + 130 + mem_object = to_qcomtee_mem_object(object); 131 + /* Sure if the memobj is in a same context it is originated from. */ 132 + if (mem_object->shm->ctx != ctx) 133 + return -EINVAL; 134 + 135 + param->u.objref.id = mem_object->shm->id; 136 + param->u.objref.flags = QCOMTEE_OBJREF_FLAG_MEM; 137 + 138 + /* Passing shm->id to userspace; drop the reference. */ 139 + qcomtee_object_put(object); 140 + 141 + return 0; 142 + } 143 + 144 + /** 145 + * qcomtee_mem_object_map() - Map a memory object. 146 + * @object: memory object. 147 + * @map_object: created mapping object. 148 + * @mem_paddr: physical address of the memory. 149 + * @mem_size: size of the memory. 150 + * @perms: QTEE access permissions. 151 + * 152 + * Return: On success return 0 or <0 on failure. 153 + */ 154 + int qcomtee_mem_object_map(struct qcomtee_object *object, 155 + struct qcomtee_object **map_object, u64 *mem_paddr, 156 + u64 *mem_size, u32 *perms) 157 + { 158 + struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object); 159 + 160 + /* Reuses the memory object as a mapping object by re-sharing it. */ 161 + qcomtee_object_get(&mem_object->object); 162 + 163 + *map_object = &mem_object->object; 164 + *mem_paddr = mem_object->paddr; 165 + *mem_size = mem_object->size; 166 + *perms = QCOM_SCM_PERM_RW; 167 + 168 + return 0; 169 + }
+113
drivers/tee/qcomtee/primordial_obj.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #include <linux/delay.h> 7 + #include "qcomtee.h" 8 + 9 + /** 10 + * DOC: Primordial Object 11 + * 12 + * After boot, the kernel provides a static object of type 13 + * %QCOMTEE_OBJECT_TYPE_CB called the primordial object. This object is used 14 + * for native kernel services or privileged operations. 15 + * 16 + * We support: 17 + * - %QCOMTEE_OBJECT_OP_MAP_REGION to map a memory object and return mapping 18 + * object and mapping information (see qcomtee_mem_object_map()). 19 + * - %QCOMTEE_OBJECT_OP_YIELD to yield by the thread running in QTEE. 20 + * - %QCOMTEE_OBJECT_OP_SLEEP to wait for a period of time. 21 + */ 22 + 23 + #define QCOMTEE_OBJECT_OP_MAP_REGION 0 24 + #define QCOMTEE_OBJECT_OP_YIELD 1 25 + #define QCOMTEE_OBJECT_OP_SLEEP 2 26 + 27 + /* Mapping information format as expected by QTEE. */ 28 + struct qcomtee_mapping_info { 29 + u64 paddr; 30 + u64 len; 31 + u32 perms; 32 + } __packed; 33 + 34 + static int 35 + qcomtee_primordial_obj_dispatch(struct qcomtee_object_invoke_ctx *oic, 36 + struct qcomtee_object *primordial_object_unused, 37 + u32 op, struct qcomtee_arg *args) 38 + { 39 + struct qcomtee_mapping_info *map_info; 40 + struct qcomtee_object *mem_object; 41 + struct qcomtee_object *map_object; 42 + int err = 0; 43 + 44 + switch (op) { 45 + case QCOMTEE_OBJECT_OP_YIELD: 46 + cond_resched(); 47 + /* No output object. */ 48 + oic->data = NULL; 49 + 50 + break; 51 + case QCOMTEE_OBJECT_OP_SLEEP: 52 + /* Check message format matched QCOMTEE_OBJECT_OP_SLEEP op. */ 53 + if (qcomtee_args_len(args) != 1 || 54 + args[0].type != QCOMTEE_ARG_TYPE_IB || 55 + args[0].b.size < sizeof(u32)) 56 + return -EINVAL; 57 + 58 + msleep(*(u32 *)(args[0].b.addr)); 59 + /* No output object. */ 60 + oic->data = NULL; 61 + 62 + break; 63 + case QCOMTEE_OBJECT_OP_MAP_REGION: 64 + if (qcomtee_args_len(args) != 3 || 65 + args[0].type != QCOMTEE_ARG_TYPE_OB || 66 + args[1].type != QCOMTEE_ARG_TYPE_IO || 67 + args[2].type != QCOMTEE_ARG_TYPE_OO || 68 + args[0].b.size < sizeof(struct qcomtee_mapping_info)) 69 + return -EINVAL; 70 + 71 + map_info = args[0].b.addr; 72 + mem_object = args[1].o; 73 + 74 + qcomtee_mem_object_map(mem_object, &map_object, 75 + &map_info->paddr, &map_info->len, 76 + &map_info->perms); 77 + 78 + args[2].o = map_object; 79 + /* One output object; pass it for cleanup to notify. */ 80 + oic->data = map_object; 81 + 82 + qcomtee_object_put(mem_object); 83 + 84 + break; 85 + default: 86 + err = -EINVAL; 87 + } 88 + 89 + return err; 90 + } 91 + 92 + /* Called after submitting the callback response. */ 93 + static void qcomtee_primordial_obj_notify(struct qcomtee_object_invoke_ctx *oic, 94 + struct qcomtee_object *unused, 95 + int err) 96 + { 97 + struct qcomtee_object *object = oic->data; 98 + 99 + /* If err, QTEE did not obtain mapping object. Drop it. */ 100 + if (object && err) 101 + qcomtee_object_put(object); 102 + } 103 + 104 + static struct qcomtee_object_operations qcomtee_primordial_obj_ops = { 105 + .dispatch = qcomtee_primordial_obj_dispatch, 106 + .notify = qcomtee_primordial_obj_notify, 107 + }; 108 + 109 + struct qcomtee_object qcomtee_primordial_object = { 110 + .name = "primordial", 111 + .object_type = QCOMTEE_OBJECT_TYPE_CB, 112 + .ops = &qcomtee_primordial_obj_ops 113 + };
+185
drivers/tee/qcomtee/qcomtee.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #ifndef QCOMTEE_H 7 + #define QCOMTEE_H 8 + 9 + #include <linux/kobject.h> 10 + #include <linux/tee_core.h> 11 + 12 + #include "qcomtee_msg.h" 13 + #include "qcomtee_object.h" 14 + 15 + /* Flags relating to object reference. */ 16 + #define QCOMTEE_OBJREF_FLAG_TEE BIT(0) 17 + #define QCOMTEE_OBJREF_FLAG_USER BIT(1) 18 + #define QCOMTEE_OBJREF_FLAG_MEM BIT(2) 19 + 20 + /** 21 + * struct qcomtee - Main service struct. 22 + * @teedev: client device. 23 + * @pool: shared memory pool. 24 + * @ctx: driver private context. 25 + * @oic: context to use for the current driver invocation. 26 + * @wq: workqueue for QTEE async operations. 27 + * @xa_local_objects: array of objects exported to QTEE. 28 + * @xa_last_id: next ID to allocate. 29 + * @qtee_version: QTEE version. 30 + */ 31 + struct qcomtee { 32 + struct tee_device *teedev; 33 + struct tee_shm_pool *pool; 34 + struct tee_context *ctx; 35 + struct qcomtee_object_invoke_ctx oic; 36 + struct workqueue_struct *wq; 37 + struct xarray xa_local_objects; 38 + u32 xa_last_id; 39 + u32 qtee_version; 40 + }; 41 + 42 + void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic); 43 + struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic, 44 + u32 idx); 45 + 46 + struct tee_shm_pool *qcomtee_shm_pool_alloc(void); 47 + void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic); 48 + int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic, 49 + struct qcomtee_arg *u); 50 + 51 + /** 52 + * qcomtee_object_do_invoke_internal() - Submit an invocation for an object. 53 + * @oic: context to use for the current invocation. 54 + * @object: object being invoked. 55 + * @op: requested operation on the object. 56 + * @u: array of arguments for the current invocation. 57 + * @result: result returned from QTEE. 58 + * 59 + * The caller is responsible for keeping track of the refcount for each 60 + * object, including @object. On return, the caller loses ownership of all 61 + * input objects of type %QCOMTEE_OBJECT_TYPE_CB. 62 + * 63 + * Return: On success, returns 0; on failure, returns < 0. 64 + */ 65 + int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic, 66 + struct qcomtee_object *object, u32 op, 67 + struct qcomtee_arg *u, int *result); 68 + 69 + /** 70 + * struct qcomtee_context_data - Clients' or supplicants' context. 71 + * @qtee_objects_idr: QTEE objects in this context. 72 + * @qtee_lock: mutex for @qtee_objects_idr. 73 + * @reqs_idr: requests in this context that hold ID. 74 + * @reqs_list: FIFO for requests in PROCESSING or QUEUED state. 75 + * @reqs_lock: mutex for @reqs_idr, @reqs_list and request states. 76 + * @req_c: completion used when the supplicant is waiting for requests. 77 + * @released: state of this context. 78 + */ 79 + struct qcomtee_context_data { 80 + struct idr qtee_objects_idr; 81 + /* Synchronize access to @qtee_objects_idr. */ 82 + struct mutex qtee_lock; 83 + 84 + struct idr reqs_idr; 85 + struct list_head reqs_list; 86 + /* Synchronize access to @reqs_idr, @reqs_list and updating requests states. */ 87 + struct mutex reqs_lock; 88 + 89 + struct completion req_c; 90 + 91 + bool released; 92 + }; 93 + 94 + int qcomtee_context_add_qtee_object(struct tee_param *param, 95 + struct qcomtee_object *object, 96 + struct tee_context *ctx); 97 + int qcomtee_context_find_qtee_object(struct qcomtee_object **object, 98 + struct tee_param *param, 99 + struct tee_context *ctx); 100 + void qcomtee_context_del_qtee_object(struct tee_param *param, 101 + struct tee_context *ctx); 102 + 103 + int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param, 104 + struct tee_context *ctx); 105 + int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg, 106 + struct tee_context *ctx); 107 + 108 + /* OBJECTS: */ 109 + 110 + /* (1) User Object API. */ 111 + 112 + int is_qcomtee_user_object(struct qcomtee_object *object); 113 + void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify); 114 + void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata); 115 + int qcomtee_user_param_to_object(struct qcomtee_object **object, 116 + struct tee_param *param, 117 + struct tee_context *ctx); 118 + int qcomtee_user_param_from_object(struct tee_param *param, 119 + struct qcomtee_object *object, 120 + struct tee_context *ctx); 121 + 122 + /** 123 + * struct qcomtee_user_object_request_data - Data for user object request. 124 + * @id: ID assigned to the request. 125 + * @object_id: Object ID being invoked by QTEE. 126 + * @op: Requested operation on object. 127 + * @np: Number of parameters in the request. 128 + */ 129 + struct qcomtee_user_object_request_data { 130 + int id; 131 + u64 object_id; 132 + u32 op; 133 + int np; 134 + }; 135 + 136 + int qcomtee_user_object_select(struct tee_context *ctx, 137 + struct tee_param *params, int num_params, 138 + void __user *uaddr, size_t size, 139 + struct qcomtee_user_object_request_data *data); 140 + int qcomtee_user_object_submit(struct tee_context *ctx, 141 + struct tee_param *params, int num_params, 142 + int req_id, int errno); 143 + 144 + /* (2) Primordial Object. */ 145 + extern struct qcomtee_object qcomtee_primordial_object; 146 + 147 + /* (3) Memory Object API. */ 148 + 149 + /* Is it a memory object using tee_shm? */ 150 + int is_qcomtee_memobj_object(struct qcomtee_object *object); 151 + 152 + /** 153 + * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object. 154 + * @object: object returned. 155 + * @param: TEE parameter. 156 + * @ctx: context in which the conversion should happen. 157 + * 158 + * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags. 159 + * 160 + * Return: On success return 0 or <0 on failure. 161 + */ 162 + int qcomtee_memobj_param_to_object(struct qcomtee_object **object, 163 + struct tee_param *param, 164 + struct tee_context *ctx); 165 + 166 + /* Reverse what qcomtee_memobj_param_to_object() does. */ 167 + int qcomtee_memobj_param_from_object(struct tee_param *param, 168 + struct qcomtee_object *object, 169 + struct tee_context *ctx); 170 + 171 + /** 172 + * qcomtee_mem_object_map() - Map a memory object. 173 + * @object: memory object. 174 + * @map_object: created mapping object. 175 + * @mem_paddr: physical address of the memory. 176 + * @mem_size: size of the memory. 177 + * @perms: QTEE access permissions. 178 + * 179 + * Return: On success return 0 or <0 on failure. 180 + */ 181 + int qcomtee_mem_object_map(struct qcomtee_object *object, 182 + struct qcomtee_object **map_object, u64 *mem_paddr, 183 + u64 *mem_size, u32 *perms); 184 + 185 + #endif /* QCOMTEE_H */
+304
drivers/tee/qcomtee/qcomtee_msg.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #ifndef QCOMTEE_MSG_H 7 + #define QCOMTEE_MSG_H 8 + 9 + #include <linux/bitfield.h> 10 + 11 + /** 12 + * DOC: ''Qualcomm TEE'' (QTEE) Transport Message 13 + * 14 + * There are two buffers shared with QTEE: inbound and outbound buffers. 15 + * The inbound buffer is used for direct object invocation, and the outbound 16 + * buffer is used to make a request from QTEE to the kernel; i.e., a callback 17 + * request. 18 + * 19 + * The unused tail of the outbound buffer is also used for sending and 20 + * receiving asynchronous messages. An asynchronous message is independent of 21 + * the current object invocation (i.e., contents of the inbound buffer) or 22 + * callback request (i.e., the head of the outbound buffer); see 23 + * qcomtee_get_async_buffer(). It is used by endpoints (QTEE or kernel) as an 24 + * optimization to reduce the number of context switches between the secure and 25 + * non-secure worlds. 26 + * 27 + * For instance, QTEE never sends an explicit callback request to release an 28 + * object in the kernel. Instead, it sends asynchronous release messages in the 29 + * outbound buffer when QTEE returns from the previous direct object invocation, 30 + * or appends asynchronous release messages after the current callback request. 31 + * 32 + * QTEE supports two types of arguments in a message: buffer and object 33 + * arguments. Depending on the direction of data flow, they could be input 34 + * buffer (IO) to QTEE, output buffer (OB) from QTEE, input object (IO) to QTEE, 35 + * or output object (OO) from QTEE. Object arguments hold object IDs. Buffer 36 + * arguments hold (offset, size) pairs into the inbound or outbound buffers. 37 + * 38 + * QTEE holds an object table for objects it hosts and exposes to the kernel. 39 + * An object ID is an index to the object table in QTEE. 40 + * 41 + * For the direct object invocation message format in the inbound buffer, see 42 + * &struct qcomtee_msg_object_invoke. For the callback request message format 43 + * in the outbound buffer, see &struct qcomtee_msg_callback. For the message 44 + * format for asynchronous messages in the outbound buffer, see 45 + * &struct qcomtee_async_msg_hdr. 46 + */ 47 + 48 + /** 49 + * define QCOMTEE_MSG_OBJECT_NS_BIT - Non-secure bit 50 + * 51 + * Object ID is a globally unique 32-bit number. IDs referencing objects 52 + * in the kernel should have %QCOMTEE_MSG_OBJECT_NS_BIT set. 53 + */ 54 + #define QCOMTEE_MSG_OBJECT_NS_BIT BIT(31) 55 + 56 + /* Static object IDs recognized by QTEE. */ 57 + #define QCOMTEE_MSG_OBJECT_NULL (0U) 58 + #define QCOMTEE_MSG_OBJECT_ROOT (1U) 59 + 60 + /* Definitions from QTEE as part of the transport protocol. */ 61 + 62 + /* qcomtee_msg_arg is an argument as recognized by QTEE. */ 63 + union qcomtee_msg_arg { 64 + struct { 65 + u32 offset; 66 + u32 size; 67 + } b; 68 + u32 o; 69 + }; 70 + 71 + /* BI and BO payloads in QTEE messages should be at 64-bit boundaries. */ 72 + #define qcomtee_msg_offset_align(o) ALIGN((o), sizeof(u64)) 73 + 74 + /* Operations for objects are 32-bit. Transport uses the upper 16 bits. */ 75 + #define QCOMTEE_MSG_OBJECT_OP_MASK GENMASK(15, 0) 76 + 77 + /* Reserved Operation IDs sent to QTEE: */ 78 + /* QCOMTEE_MSG_OBJECT_OP_RELEASE - Reduces the refcount and releases the object. 79 + * QCOMTEE_MSG_OBJECT_OP_RETAIN - Increases the refcount. 80 + * 81 + * These operation IDs are valid for all objects. 82 + */ 83 + 84 + #define QCOMTEE_MSG_OBJECT_OP_RELEASE (QCOMTEE_MSG_OBJECT_OP_MASK - 0) 85 + #define QCOMTEE_MSG_OBJECT_OP_RETAIN (QCOMTEE_MSG_OBJECT_OP_MASK - 1) 86 + 87 + /* Subset of operations supported by QTEE root object. */ 88 + 89 + #define QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS 5 90 + #define QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE 4 91 + #define QCOMTEE_ROOT_OP_ADCI_ACCEPT 8 92 + #define QCOMTEE_ROOT_OP_ADCI_SHUTDOWN 9 93 + 94 + /* Subset of operations supported by client_env object. */ 95 + 96 + #define QCOMTEE_CLIENT_ENV_OPEN 0 97 + 98 + /* List of available QTEE service UIDs and subset of operations. */ 99 + 100 + #define QCOMTEE_FEATURE_VER_UID 2033 101 + #define QCOMTEE_FEATURE_VER_OP_GET 0 102 + /* Get QTEE version number. */ 103 + #define QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID 10 104 + #define QTEE_VERSION_GET_MAJOR(x) (((x) >> 22) & 0xffU) 105 + #define QTEE_VERSION_GET_MINOR(x) (((x) >> 12) & 0xffU) 106 + #define QTEE_VERSION_GET_PATCH(x) ((x) >> 0 & 0xfffU) 107 + 108 + /* Response types as returned from qcomtee_object_invoke_ctx_invoke(). */ 109 + 110 + /* The message contains a callback request. */ 111 + #define QCOMTEE_RESULT_INBOUND_REQ_NEEDED 3 112 + 113 + /** 114 + * struct qcomtee_msg_object_invoke - Direct object invocation message. 115 + * @ctx: object ID hosted in QTEE. 116 + * @op: operation for the object. 117 + * @counts: number of different types of arguments in @args. 118 + * @args: array of arguments. 119 + * 120 + * @counts consists of 4 * 4-bit fields. Bits 0 - 3 represent the number of 121 + * input buffers, bits 4 - 7 represent the number of output buffers, 122 + * bits 8 - 11 represent the number of input objects, and bits 12 - 15 123 + * represent the number of output objects. The remaining bits should be zero. 124 + * 125 + * 15 12 11 8 7 4 3 0 126 + * +----------------+----------------+----------------+----------------+ 127 + * | #OO objects | #IO objects | #OB buffers | #IB buffers | 128 + * +----------------+----------------+----------------+----------------+ 129 + * 130 + * The maximum number of arguments of each type is defined by 131 + * %QCOMTEE_ARGS_PER_TYPE. 132 + */ 133 + struct qcomtee_msg_object_invoke { 134 + u32 cxt; 135 + u32 op; 136 + u32 counts; 137 + union qcomtee_msg_arg args[]; 138 + }; 139 + 140 + /* Bit masks for the four 4-bit nibbles holding the counts. */ 141 + #define QCOMTEE_MASK_IB GENMASK(3, 0) 142 + #define QCOMTEE_MASK_OB GENMASK(7, 4) 143 + #define QCOMTEE_MASK_IO GENMASK(11, 8) 144 + #define QCOMTEE_MASK_OO GENMASK(15, 12) 145 + 146 + /** 147 + * struct qcomtee_msg_callback - Callback request message. 148 + * @result: result of operation @op on the object referenced by @cxt. 149 + * @cxt: object ID hosted in the kernel. 150 + * @op: operation for the object. 151 + * @counts: number of different types of arguments in @args. 152 + * @args: array of arguments. 153 + * 154 + * For details of @counts, see &qcomtee_msg_object_invoke.counts. 155 + */ 156 + struct qcomtee_msg_callback { 157 + u32 result; 158 + u32 cxt; 159 + u32 op; 160 + u32 counts; 161 + union qcomtee_msg_arg args[]; 162 + }; 163 + 164 + /* Offset in the message for the beginning of the buffer argument's contents. */ 165 + #define qcomtee_msg_buffer_args(t, n) \ 166 + qcomtee_msg_offset_align(struct_size_t(t, args, n)) 167 + /* Pointer to the beginning of a buffer argument's content at an offset. */ 168 + #define qcomtee_msg_offset_to_ptr(m, off) ((void *)&((char *)(m))[(off)]) 169 + 170 + /* Some helpers to manage msg.counts. */ 171 + 172 + static inline unsigned int qcomtee_msg_num_ib(u32 counts) 173 + { 174 + return FIELD_GET(QCOMTEE_MASK_IB, counts); 175 + } 176 + 177 + static inline unsigned int qcomtee_msg_num_ob(u32 counts) 178 + { 179 + return FIELD_GET(QCOMTEE_MASK_OB, counts); 180 + } 181 + 182 + static inline unsigned int qcomtee_msg_num_io(u32 counts) 183 + { 184 + return FIELD_GET(QCOMTEE_MASK_IO, counts); 185 + } 186 + 187 + static inline unsigned int qcomtee_msg_num_oo(u32 counts) 188 + { 189 + return FIELD_GET(QCOMTEE_MASK_OO, counts); 190 + } 191 + 192 + static inline unsigned int qcomtee_msg_idx_ib(u32 counts) 193 + { 194 + return 0; 195 + } 196 + 197 + static inline unsigned int qcomtee_msg_idx_ob(u32 counts) 198 + { 199 + return qcomtee_msg_num_ib(counts); 200 + } 201 + 202 + static inline unsigned int qcomtee_msg_idx_io(u32 counts) 203 + { 204 + return qcomtee_msg_idx_ob(counts) + qcomtee_msg_num_ob(counts); 205 + } 206 + 207 + static inline unsigned int qcomtee_msg_idx_oo(u32 counts) 208 + { 209 + return qcomtee_msg_idx_io(counts) + qcomtee_msg_num_io(counts); 210 + } 211 + 212 + #define qcomtee_msg_for_each(i, first, num) \ 213 + for ((i) = (first); (i) < (first) + (num); (i)++) 214 + 215 + #define qcomtee_msg_for_each_input_buffer(i, m) \ 216 + qcomtee_msg_for_each(i, qcomtee_msg_idx_ib((m)->counts), \ 217 + qcomtee_msg_num_ib((m)->counts)) 218 + 219 + #define qcomtee_msg_for_each_output_buffer(i, m) \ 220 + qcomtee_msg_for_each(i, qcomtee_msg_idx_ob((m)->counts), \ 221 + qcomtee_msg_num_ob((m)->counts)) 222 + 223 + #define qcomtee_msg_for_each_input_object(i, m) \ 224 + qcomtee_msg_for_each(i, qcomtee_msg_idx_io((m)->counts), \ 225 + qcomtee_msg_num_io((m)->counts)) 226 + 227 + #define qcomtee_msg_for_each_output_object(i, m) \ 228 + qcomtee_msg_for_each(i, qcomtee_msg_idx_oo((m)->counts), \ 229 + qcomtee_msg_num_oo((m)->counts)) 230 + 231 + /* Sum of arguments in a message. */ 232 + #define qcomtee_msg_args(m) \ 233 + (qcomtee_msg_idx_oo((m)->counts) + qcomtee_msg_num_oo((m)->counts)) 234 + 235 + static inline void qcomtee_msg_init(struct qcomtee_msg_object_invoke *msg, 236 + u32 cxt, u32 op, int in_buffer, 237 + int out_buffer, int in_object, 238 + int out_object) 239 + { 240 + u32 counts = 0; 241 + 242 + counts |= (in_buffer & 0xfU); 243 + counts |= ((out_buffer - in_buffer) & 0xfU) << 4; 244 + counts |= ((in_object - out_buffer) & 0xfU) << 8; 245 + counts |= ((out_object - in_object) & 0xfU) << 12; 246 + 247 + msg->cxt = cxt; 248 + msg->op = op; 249 + msg->counts = counts; 250 + } 251 + 252 + /* Generic error codes. */ 253 + #define QCOMTEE_MSG_OK 0 /* non-specific success code. */ 254 + #define QCOMTEE_MSG_ERROR 1 /* non-specific error. */ 255 + #define QCOMTEE_MSG_ERROR_INVALID 2 /* unsupported/unrecognized request. */ 256 + #define QCOMTEE_MSG_ERROR_SIZE_IN 3 /* supplied buffer/string too large. */ 257 + #define QCOMTEE_MSG_ERROR_SIZE_OUT 4 /* supplied output buffer too small. */ 258 + #define QCOMTEE_MSG_ERROR_USERBASE 10 /* start of user-defined error range. */ 259 + 260 + /* Transport layer error codes. */ 261 + #define QCOMTEE_MSG_ERROR_DEFUNCT -90 /* object no longer exists. */ 262 + #define QCOMTEE_MSG_ERROR_ABORT -91 /* calling thread must exit. */ 263 + #define QCOMTEE_MSG_ERROR_BADOBJ -92 /* invalid object context. */ 264 + #define QCOMTEE_MSG_ERROR_NOSLOTS -93 /* caller's object table full. */ 265 + #define QCOMTEE_MSG_ERROR_MAXARGS -94 /* too many args. */ 266 + #define QCOMTEE_MSG_ERROR_MAXDATA -95 /* buffers too large. */ 267 + #define QCOMTEE_MSG_ERROR_UNAVAIL -96 /* the request could not be processed. */ 268 + #define QCOMTEE_MSG_ERROR_KMEM -97 /* kernel out of memory. */ 269 + #define QCOMTEE_MSG_ERROR_REMOTE -98 /* local method sent to remote object. */ 270 + #define QCOMTEE_MSG_ERROR_BUSY -99 /* Object is busy. */ 271 + #define QCOMTEE_MSG_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */ 272 + 273 + static inline void qcomtee_msg_set_result(struct qcomtee_msg_callback *cb_msg, 274 + int err) 275 + { 276 + if (!err) { 277 + cb_msg->result = QCOMTEE_MSG_OK; 278 + } else if (err < 0) { 279 + /* If err < 0, then it is a transport error. */ 280 + switch (err) { 281 + case -ENOMEM: 282 + cb_msg->result = QCOMTEE_MSG_ERROR_KMEM; 283 + break; 284 + case -ENODEV: 285 + cb_msg->result = QCOMTEE_MSG_ERROR_DEFUNCT; 286 + break; 287 + case -ENOSPC: 288 + case -EBUSY: 289 + cb_msg->result = QCOMTEE_MSG_ERROR_BUSY; 290 + break; 291 + case -EBADF: 292 + case -EINVAL: 293 + cb_msg->result = QCOMTEE_MSG_ERROR_UNAVAIL; 294 + break; 295 + default: 296 + cb_msg->result = QCOMTEE_MSG_ERROR; 297 + } 298 + } else { 299 + /* If err > 0, then it is user defined error, pass it as is. */ 300 + cb_msg->result = err; 301 + } 302 + } 303 + 304 + #endif /* QCOMTEE_MSG_H */
+316
drivers/tee/qcomtee/qcomtee_object.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #ifndef QCOMTEE_OBJECT_H 7 + #define QCOMTEE_OBJECT_H 8 + 9 + #include <linux/completion.h> 10 + #include <linux/kref.h> 11 + #include <linux/slab.h> 12 + #include <linux/workqueue.h> 13 + 14 + struct qcomtee_object; 15 + 16 + /** 17 + * DOC: Overview 18 + * 19 + * qcomtee_object provides object refcounting, ID allocation for objects hosted 20 + * in the kernel, and necessary message marshaling for Qualcomm TEE (QTEE). 21 + * 22 + * To invoke an object in QTEE, the user calls qcomtee_object_do_invoke() 23 + * while passing an instance of &struct qcomtee_object and the requested 24 + * operation + arguments. 25 + * 26 + * After boot, QTEE provides a static object %ROOT_QCOMTEE_OBJECT (type of 27 + * %QCOMTEE_OBJECT_TYPE_ROOT). The root object is invoked to pass the user's 28 + * credentials and obtain other instances of &struct qcomtee_object (type of 29 + * %QCOMTEE_OBJECT_TYPE_TEE) that represent services and TAs in QTEE; 30 + * see &enum qcomtee_object_type. 31 + * 32 + * The objects received from QTEE are refcounted. So the owner of these objects 33 + * can issue qcomtee_object_get() to increase the refcount and pass objects 34 + * to other clients, or issue qcomtee_object_put() to decrease the refcount 35 + * and release the resources in QTEE. 36 + * 37 + * The kernel can host services accessible to QTEE. A driver should embed 38 + * an instance of &struct qcomtee_object in the struct it wants to export to 39 + * QTEE (this is called a callback object). It issues qcomtee_object_user_init() 40 + * to set the dispatch() operation for the callback object and set its type 41 + * to %QCOMTEE_OBJECT_TYPE_CB. 42 + * 43 + * core.c holds an object table for callback objects. An object ID is assigned 44 + * to each callback object, which is an index to the object table. QTEE uses 45 + * these IDs to reference or invoke callback objects. 46 + * 47 + * If QTEE invokes a callback object in the kernel, the dispatch() operation is 48 + * called in the context of the thread that originally called 49 + * qcomtee_object_do_invoke(). 50 + */ 51 + 52 + /** 53 + * enum qcomtee_object_type - Object types. 54 + * @QCOMTEE_OBJECT_TYPE_TEE: object hosted on QTEE. 55 + * @QCOMTEE_OBJECT_TYPE_CB: object hosted on kernel. 56 + * @QCOMTEE_OBJECT_TYPE_ROOT: 'primordial' object. 57 + * @QCOMTEE_OBJECT_TYPE_NULL: NULL object. 58 + * 59 + * The primordial object is used for bootstrapping the IPC connection between 60 + * the kernel and QTEE. It is invoked by the kernel when it wants to get a 61 + * 'client env'. 62 + */ 63 + enum qcomtee_object_type { 64 + QCOMTEE_OBJECT_TYPE_TEE, 65 + QCOMTEE_OBJECT_TYPE_CB, 66 + QCOMTEE_OBJECT_TYPE_ROOT, 67 + QCOMTEE_OBJECT_TYPE_NULL, 68 + }; 69 + 70 + /** 71 + * enum qcomtee_arg_type - Type of QTEE argument. 72 + * @QCOMTEE_ARG_TYPE_INV: invalid type. 73 + * @QCOMTEE_ARG_TYPE_OB: output buffer (OB). 74 + * @QCOMTEE_ARG_TYPE_OO: output object (OO). 75 + * @QCOMTEE_ARG_TYPE_IB: input buffer (IB). 76 + * @QCOMTEE_ARG_TYPE_IO: input object (IO). 77 + * 78 + * Use the invalid type to specify the end of the argument array. 79 + */ 80 + enum qcomtee_arg_type { 81 + QCOMTEE_ARG_TYPE_INV = 0, 82 + QCOMTEE_ARG_TYPE_OB, 83 + QCOMTEE_ARG_TYPE_OO, 84 + QCOMTEE_ARG_TYPE_IB, 85 + QCOMTEE_ARG_TYPE_IO, 86 + QCOMTEE_ARG_TYPE_NR, 87 + }; 88 + 89 + /** 90 + * define QCOMTEE_ARGS_PER_TYPE - Maximum arguments of a specific type. 91 + * 92 + * The QTEE transport protocol limits the maximum number of arguments of 93 + * a specific type (i.e., IB, OB, IO, and OO). 94 + */ 95 + #define QCOMTEE_ARGS_PER_TYPE 16 96 + 97 + /* Maximum arguments that can fit in a QTEE message, ignoring the type. */ 98 + #define QCOMTEE_ARGS_MAX (QCOMTEE_ARGS_PER_TYPE * (QCOMTEE_ARG_TYPE_NR - 1)) 99 + 100 + struct qcomtee_buffer { 101 + union { 102 + void *addr; 103 + void __user *uaddr; 104 + }; 105 + size_t size; 106 + }; 107 + 108 + /** 109 + * struct qcomtee_arg - Argument for QTEE object invocation. 110 + * @type: type of argument as &enum qcomtee_arg_type. 111 + * @flags: extra flags. 112 + * @b: address and size if the type of argument is a buffer. 113 + * @o: object instance if the type of argument is an object. 114 + * 115 + * &qcomtee_arg.flags only accepts %QCOMTEE_ARG_FLAGS_UADDR for now, which 116 + * states that &qcomtee_arg.b contains a userspace address in uaddr. 117 + */ 118 + struct qcomtee_arg { 119 + enum qcomtee_arg_type type; 120 + /* 'b.uaddr' holds a __user address. */ 121 + #define QCOMTEE_ARG_FLAGS_UADDR BIT(0) 122 + unsigned int flags; 123 + union { 124 + struct qcomtee_buffer b; 125 + struct qcomtee_object *o; 126 + }; 127 + }; 128 + 129 + static inline int qcomtee_args_len(struct qcomtee_arg *args) 130 + { 131 + int i = 0; 132 + 133 + while (args[i].type != QCOMTEE_ARG_TYPE_INV) 134 + i++; 135 + return i; 136 + } 137 + 138 + /* Context is busy (callback is in progress). */ 139 + #define QCOMTEE_OIC_FLAG_BUSY BIT(1) 140 + /* Context needs to notify the current object. */ 141 + #define QCOMTEE_OIC_FLAG_NOTIFY BIT(2) 142 + /* Context has shared state with QTEE. */ 143 + #define QCOMTEE_OIC_FLAG_SHARED BIT(3) 144 + 145 + /** 146 + * struct qcomtee_object_invoke_ctx - QTEE context for object invocation. 147 + * @ctx: TEE context for this invocation. 148 + * @flags: flags for the invocation context. 149 + * @errno: error code for the invocation. 150 + * @object: current object invoked in this callback context. 151 + * @u: array of arguments for the current invocation (+1 for ending arg). 152 + * @in_msg: inbound buffer shared with QTEE. 153 + * @out_msg: outbound buffer shared with QTEE. 154 + * @in_shm: TEE shm allocated for inbound buffer. 155 + * @out_shm: TEE shm allocated for outbound buffer. 156 + * @data: extra data attached to this context. 157 + */ 158 + struct qcomtee_object_invoke_ctx { 159 + struct tee_context *ctx; 160 + unsigned long flags; 161 + int errno; 162 + 163 + struct qcomtee_object *object; 164 + struct qcomtee_arg u[QCOMTEE_ARGS_MAX + 1]; 165 + 166 + struct qcomtee_buffer in_msg; 167 + struct qcomtee_buffer out_msg; 168 + struct tee_shm *in_shm; 169 + struct tee_shm *out_shm; 170 + 171 + void *data; 172 + }; 173 + 174 + static inline struct qcomtee_object_invoke_ctx * 175 + qcomtee_object_invoke_ctx_alloc(struct tee_context *ctx) 176 + { 177 + struct qcomtee_object_invoke_ctx *oic; 178 + 179 + oic = kzalloc(sizeof(*oic), GFP_KERNEL); 180 + if (oic) 181 + oic->ctx = ctx; 182 + return oic; 183 + } 184 + 185 + /** 186 + * qcomtee_object_do_invoke() - Submit an invocation for an object. 187 + * @oic: context to use for the current invocation. 188 + * @object: object being invoked. 189 + * @op: requested operation on the object. 190 + * @u: array of arguments for the current invocation. 191 + * @result: result returned from QTEE. 192 + * 193 + * The caller is responsible for keeping track of the refcount for each object, 194 + * including @object. On return, the caller loses ownership of all input 195 + * objects of type %QCOMTEE_OBJECT_TYPE_CB. 196 + * 197 + * @object can be of %QCOMTEE_OBJECT_TYPE_ROOT or %QCOMTEE_OBJECT_TYPE_TEE. 198 + * 199 + * Return: On success, returns 0; on failure, returns < 0. 200 + */ 201 + int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic, 202 + struct qcomtee_object *object, u32 op, 203 + struct qcomtee_arg *u, int *result); 204 + 205 + /** 206 + * struct qcomtee_object_operations - Callback object operations. 207 + * @release: release the object if QTEE is not using it. 208 + * @dispatch: dispatch the operation requested by QTEE. 209 + * @notify: report the status of any pending response submitted by @dispatch. 210 + */ 211 + struct qcomtee_object_operations { 212 + void (*release)(struct qcomtee_object *object); 213 + int (*dispatch)(struct qcomtee_object_invoke_ctx *oic, 214 + struct qcomtee_object *object, u32 op, 215 + struct qcomtee_arg *args); 216 + void (*notify)(struct qcomtee_object_invoke_ctx *oic, 217 + struct qcomtee_object *object, int err); 218 + }; 219 + 220 + /** 221 + * struct qcomtee_object - QTEE or kernel object. 222 + * @name: object name. 223 + * @refcount: reference counter. 224 + * @object_type: object type as &enum qcomtee_object_type. 225 + * @info: extra information for the object. 226 + * @ops: callback operations for objects of type %QCOMTEE_OBJECT_TYPE_CB. 227 + * @work: work for async operations on the object. 228 + * 229 + * @work is used for releasing objects of %QCOMTEE_OBJECT_TYPE_TEE type. 230 + */ 231 + struct qcomtee_object { 232 + const char *name; 233 + struct kref refcount; 234 + 235 + enum qcomtee_object_type object_type; 236 + struct object_info { 237 + unsigned long qtee_id; 238 + /* TEE context for QTEE object async requests. */ 239 + struct tee_context *qcomtee_async_ctx; 240 + } info; 241 + 242 + struct qcomtee_object_operations *ops; 243 + struct work_struct work; 244 + }; 245 + 246 + /* Static instances of qcomtee_object objects. */ 247 + #define NULL_QCOMTEE_OBJECT ((struct qcomtee_object *)(0)) 248 + extern struct qcomtee_object qcomtee_object_root; 249 + #define ROOT_QCOMTEE_OBJECT (&qcomtee_object_root) 250 + 251 + static inline enum qcomtee_object_type 252 + typeof_qcomtee_object(struct qcomtee_object *object) 253 + { 254 + if (object == NULL_QCOMTEE_OBJECT) 255 + return QCOMTEE_OBJECT_TYPE_NULL; 256 + return object->object_type; 257 + } 258 + 259 + static inline const char *qcomtee_object_name(struct qcomtee_object *object) 260 + { 261 + if (object == NULL_QCOMTEE_OBJECT) 262 + return "null"; 263 + 264 + if (!object->name) 265 + return "no-name"; 266 + return object->name; 267 + } 268 + 269 + /** 270 + * qcomtee_object_user_init() - Initialize an object for the user. 271 + * @object: object to initialize. 272 + * @ot: type of object as &enum qcomtee_object_type. 273 + * @ops: instance of callbacks. 274 + * @fmt: name assigned to the object. 275 + * 276 + * Return: On success, returns 0; on failure, returns < 0. 277 + */ 278 + int qcomtee_object_user_init(struct qcomtee_object *object, 279 + enum qcomtee_object_type ot, 280 + struct qcomtee_object_operations *ops, 281 + const char *fmt, ...) __printf(4, 5); 282 + 283 + /* Object release is RCU protected. */ 284 + int qcomtee_object_get(struct qcomtee_object *object); 285 + void qcomtee_object_put(struct qcomtee_object *object); 286 + 287 + #define qcomtee_arg_for_each(i, args) \ 288 + for (i = 0; args[i].type != QCOMTEE_ARG_TYPE_INV; i++) 289 + 290 + /* Next argument of type @type after index @i. */ 291 + int qcomtee_next_arg_type(struct qcomtee_arg *u, int i, 292 + enum qcomtee_arg_type type); 293 + 294 + /* Iterate over argument of given type. */ 295 + #define qcomtee_arg_for_each_type(i, args, at) \ 296 + for (i = qcomtee_next_arg_type(args, 0, at); \ 297 + args[i].type != QCOMTEE_ARG_TYPE_INV; \ 298 + i = qcomtee_next_arg_type(args, i + 1, at)) 299 + 300 + #define qcomtee_arg_for_each_input_buffer(i, args) \ 301 + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IB) 302 + #define qcomtee_arg_for_each_output_buffer(i, args) \ 303 + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OB) 304 + #define qcomtee_arg_for_each_input_object(i, args) \ 305 + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IO) 306 + #define qcomtee_arg_for_each_output_object(i, args) \ 307 + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OO) 308 + 309 + struct qcomtee_object * 310 + qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic); 311 + 312 + struct qcomtee_object * 313 + qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic, 314 + struct qcomtee_object *client_env, u32 uid); 315 + 316 + #endif /* QCOMTEE_OBJECT_H */
+150
drivers/tee/qcomtee/shm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/firmware/qcom/qcom_tzmem.h> 9 + #include <linux/mm.h> 10 + 11 + #include "qcomtee.h" 12 + 13 + /** 14 + * define MAX_OUTBOUND_BUFFER_SIZE - Maximum size of outbound buffers. 15 + * 16 + * The size of outbound buffer depends on QTEE callback requests. 17 + */ 18 + #define MAX_OUTBOUND_BUFFER_SIZE SZ_4K 19 + 20 + /** 21 + * define MAX_INBOUND_BUFFER_SIZE - Maximum size of the inbound buffer. 22 + * 23 + * The size of the inbound buffer depends on the user's requests, 24 + * specifically the number of IB and OB arguments. If an invocation 25 + * requires a size larger than %MAX_INBOUND_BUFFER_SIZE, the user should 26 + * consider using another form of shared memory with QTEE. 27 + */ 28 + #define MAX_INBOUND_BUFFER_SIZE SZ_4M 29 + 30 + /** 31 + * qcomtee_msg_buffers_alloc() - Allocate inbound and outbound buffers. 32 + * @oic: context to use for the current invocation. 33 + * @u: array of arguments for the current invocation. 34 + * 35 + * It calculates the size of inbound and outbound buffers based on the 36 + * arguments in @u. It allocates the buffers from the teedev pool. 37 + * 38 + * Return: On success, returns 0. On error, returns < 0. 39 + */ 40 + int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic, 41 + struct qcomtee_arg *u) 42 + { 43 + struct tee_context *ctx = oic->ctx; 44 + struct tee_shm *shm; 45 + size_t size; 46 + int i; 47 + 48 + /* Start offset in a message for buffer arguments. */ 49 + size = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke, 50 + qcomtee_args_len(u)); 51 + if (size > MAX_INBOUND_BUFFER_SIZE) 52 + return -EINVAL; 53 + 54 + /* Add size of IB arguments. */ 55 + qcomtee_arg_for_each_input_buffer(i, u) { 56 + size = size_add(size, qcomtee_msg_offset_align(u[i].b.size)); 57 + if (size > MAX_INBOUND_BUFFER_SIZE) 58 + return -EINVAL; 59 + } 60 + 61 + /* Add size of OB arguments. */ 62 + qcomtee_arg_for_each_output_buffer(i, u) { 63 + size = size_add(size, qcomtee_msg_offset_align(u[i].b.size)); 64 + if (size > MAX_INBOUND_BUFFER_SIZE) 65 + return -EINVAL; 66 + } 67 + 68 + shm = tee_shm_alloc_priv_buf(ctx, size); 69 + if (IS_ERR(shm)) 70 + return PTR_ERR(shm); 71 + 72 + /* Allocate inbound buffer. */ 73 + oic->in_shm = shm; 74 + shm = tee_shm_alloc_priv_buf(ctx, MAX_OUTBOUND_BUFFER_SIZE); 75 + if (IS_ERR(shm)) { 76 + tee_shm_free(oic->in_shm); 77 + 78 + return PTR_ERR(shm); 79 + } 80 + /* Allocate outbound buffer. */ 81 + oic->out_shm = shm; 82 + 83 + oic->in_msg.addr = tee_shm_get_va(oic->in_shm, 0); 84 + oic->in_msg.size = tee_shm_get_size(oic->in_shm); 85 + oic->out_msg.addr = tee_shm_get_va(oic->out_shm, 0); 86 + oic->out_msg.size = tee_shm_get_size(oic->out_shm); 87 + /* QTEE assume unused buffers are zeroed. */ 88 + memzero_explicit(oic->in_msg.addr, oic->in_msg.size); 89 + memzero_explicit(oic->out_msg.addr, oic->out_msg.size); 90 + 91 + return 0; 92 + } 93 + 94 + void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic) 95 + { 96 + tee_shm_free(oic->in_shm); 97 + tee_shm_free(oic->out_shm); 98 + } 99 + 100 + /* Dynamic shared memory pool based on tee_dyn_shm_alloc_helper(). */ 101 + 102 + static int qcomtee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 103 + struct page **pages, size_t num_pages, 104 + unsigned long start) 105 + { 106 + return qcom_tzmem_shm_bridge_create(shm->paddr, shm->size, 107 + &shm->sec_world_id); 108 + } 109 + 110 + static int qcomtee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 111 + { 112 + qcom_tzmem_shm_bridge_delete(shm->sec_world_id); 113 + 114 + return 0; 115 + } 116 + 117 + static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, 118 + size_t size, size_t align) 119 + { 120 + return tee_dyn_shm_alloc_helper(shm, size, align, qcomtee_shm_register); 121 + } 122 + 123 + static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) 124 + { 125 + tee_dyn_shm_free_helper(shm, qcomtee_shm_unregister); 126 + } 127 + 128 + static void pool_op_destroy_pool(struct tee_shm_pool *pool) 129 + { 130 + kfree(pool); 131 + } 132 + 133 + static const struct tee_shm_pool_ops pool_ops = { 134 + .alloc = pool_op_alloc, 135 + .free = pool_op_free, 136 + .destroy_pool = pool_op_destroy_pool, 137 + }; 138 + 139 + struct tee_shm_pool *qcomtee_shm_pool_alloc(void) 140 + { 141 + struct tee_shm_pool *pool; 142 + 143 + pool = kzalloc(sizeof(*pool), GFP_KERNEL); 144 + if (!pool) 145 + return ERR_PTR(-ENOMEM); 146 + 147 + pool->ops = &pool_ops; 148 + 149 + return pool; 150 + }
+692
drivers/tee/qcomtee/user_obj.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/slab.h> 9 + #include <linux/uaccess.h> 10 + 11 + #include "qcomtee.h" 12 + 13 + /** 14 + * DOC: User Objects aka Supplicants 15 + * 16 + * Any userspace process with access to the TEE device file can behave as a 17 + * supplicant by creating a user object. Any TEE parameter of type OBJREF with 18 + * %QCOMTEE_OBJREF_FLAG_USER flag set is considered a user object. 19 + * 20 + * A supplicant uses qcomtee_user_object_select() (i.e. TEE_IOC_SUPPL_RECV) to 21 + * receive a QTEE user object request and qcomtee_user_object_submit() 22 + * (i.e. TEE_IOC_SUPPL_SEND) to submit a response. QTEE expects to receive the 23 + * response, including OB and OO in a specific order in the message; parameters 24 + * submitted with qcomtee_user_object_submit() should maintain this order. 25 + */ 26 + 27 + /** 28 + * struct qcomtee_user_object - User object. 29 + * @object: &struct qcomtee_object representing the user object. 30 + * @ctx: context for which the user object is defined. 31 + * @object_id: object ID in @ctx. 32 + * @notify: notify on release. 33 + * 34 + * Any object managed in userspace is represented by this struct. 35 + * If @notify is set, a notification message is sent back to userspace 36 + * upon release. 37 + */ 38 + struct qcomtee_user_object { 39 + struct qcomtee_object object; 40 + struct tee_context *ctx; 41 + u64 object_id; 42 + bool notify; 43 + }; 44 + 45 + #define to_qcomtee_user_object(o) \ 46 + container_of((o), struct qcomtee_user_object, object) 47 + 48 + static struct qcomtee_object_operations qcomtee_user_object_ops; 49 + 50 + /* Is it a user object? */ 51 + int is_qcomtee_user_object(struct qcomtee_object *object) 52 + { 53 + return object != NULL_QCOMTEE_OBJECT && 54 + typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB && 55 + object->ops == &qcomtee_user_object_ops; 56 + } 57 + 58 + /* Set the user object's 'notify on release' flag. */ 59 + void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify) 60 + { 61 + if (is_qcomtee_user_object(object)) 62 + to_qcomtee_user_object(object)->notify = notify; 63 + } 64 + 65 + /* Supplicant Requests: */ 66 + 67 + /** 68 + * enum qcomtee_req_state - Current state of request. 69 + * @QCOMTEE_REQ_QUEUED: Request is waiting for supplicant. 70 + * @QCOMTEE_REQ_PROCESSING: Request has been picked by the supplicant. 71 + * @QCOMTEE_REQ_PROCESSED: Response has been submitted for the request. 72 + */ 73 + enum qcomtee_req_state { 74 + QCOMTEE_REQ_QUEUED = 1, 75 + QCOMTEE_REQ_PROCESSING, 76 + QCOMTEE_REQ_PROCESSED, 77 + }; 78 + 79 + /* User requests sent to supplicants. */ 80 + struct qcomtee_ureq { 81 + enum qcomtee_req_state state; 82 + 83 + /* User Request: */ 84 + int req_id; 85 + u64 object_id; 86 + u32 op; 87 + struct qcomtee_arg *args; 88 + int errno; 89 + 90 + struct list_head node; 91 + struct completion c; /* Completion for whoever wait. */ 92 + }; 93 + 94 + /* 95 + * Placeholder for a PROCESSING request in qcomtee_context.reqs_idr. 96 + * 97 + * If the thread that calls qcomtee_object_invoke() dies and the supplicant 98 + * is processing the request, replace the entry in qcomtee_context.reqs_idr 99 + * with empty_ureq. This ensures that (1) the req_id remains busy and is not 100 + * reused, and (2) the supplicant fails to submit the response and performs 101 + * the necessary rollback. 102 + */ 103 + static struct qcomtee_ureq empty_ureq = { .state = QCOMTEE_REQ_PROCESSING }; 104 + 105 + /* Enqueue a user request for a context and assign a request ID. */ 106 + static int ureq_enqueue(struct qcomtee_context_data *ctxdata, 107 + struct qcomtee_ureq *ureq) 108 + { 109 + int ret; 110 + 111 + guard(mutex)(&ctxdata->reqs_lock); 112 + /* Supplicant is dying. */ 113 + if (ctxdata->released) 114 + return -ENODEV; 115 + 116 + /* Allocate an ID and queue the request. */ 117 + ret = idr_alloc(&ctxdata->reqs_idr, ureq, 0, 0, GFP_KERNEL); 118 + if (ret < 0) 119 + return ret; 120 + 121 + ureq->req_id = ret; 122 + ureq->state = QCOMTEE_REQ_QUEUED; 123 + list_add_tail(&ureq->node, &ctxdata->reqs_list); 124 + 125 + return 0; 126 + } 127 + 128 + /** 129 + * ureq_dequeue() - Dequeue a user request from a context. 130 + * @ctxdata: context data for a context to dequeue the request. 131 + * @req_id: ID of the request to be dequeued. 132 + * 133 + * It dequeues a user request and releases its request ID. 134 + * 135 + * Context: The caller should hold &qcomtee_context_data->reqs_lock. 136 + * Return: Returns the user request associated with this ID; otherwise, NULL. 137 + */ 138 + static struct qcomtee_ureq *ureq_dequeue(struct qcomtee_context_data *ctxdata, 139 + int req_id) 140 + { 141 + struct qcomtee_ureq *ureq; 142 + 143 + ureq = idr_remove(&ctxdata->reqs_idr, req_id); 144 + if (ureq == &empty_ureq || !ureq) 145 + return NULL; 146 + 147 + list_del(&ureq->node); 148 + 149 + return ureq; 150 + } 151 + 152 + /** 153 + * ureq_select() - Select the next request in a context. 154 + * @ctxdata: context data for a context to pop a request. 155 + * @ubuf_size: size of the available buffer for UBUF parameters. 156 + * @num_params: number of entries for the TEE parameter array. 157 + * 158 + * It checks if @num_params is large enough to fit the next request arguments. 159 + * It checks if @ubuf_size is large enough to fit IB buffer arguments. 160 + * 161 + * Context: The caller should hold &qcomtee_context_data->reqs_lock. 162 + * Return: On success, returns a request; 163 + * on failure, returns NULL and ERR_PTR. 164 + */ 165 + static struct qcomtee_ureq *ureq_select(struct qcomtee_context_data *ctxdata, 166 + size_t ubuf_size, int num_params) 167 + { 168 + struct qcomtee_ureq *req, *ureq = NULL; 169 + struct qcomtee_arg *u; 170 + int i; 171 + 172 + /* Find the a queued request. */ 173 + list_for_each_entry(req, &ctxdata->reqs_list, node) { 174 + if (req->state == QCOMTEE_REQ_QUEUED) { 175 + ureq = req; 176 + break; 177 + } 178 + } 179 + 180 + if (!ureq) 181 + return NULL; 182 + 183 + u = ureq->args; 184 + /* (1) Is there enough TEE parameters? */ 185 + if (num_params < qcomtee_args_len(u)) 186 + return ERR_PTR(-EINVAL); 187 + /* (2) Is there enough space to pass input buffers? */ 188 + qcomtee_arg_for_each_input_buffer(i, u) { 189 + ubuf_size = size_sub(ubuf_size, u[i].b.size); 190 + if (ubuf_size == SIZE_MAX) 191 + return ERR_PTR(-EINVAL); 192 + 193 + ubuf_size = round_down(ubuf_size, 8); 194 + } 195 + 196 + return ureq; 197 + } 198 + 199 + /* Gets called when the user closes the device. */ 200 + void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata) 201 + { 202 + struct qcomtee_ureq *req, *ureq; 203 + 204 + guard(mutex)(&ctxdata->reqs_lock); 205 + /* So ureq_enqueue() refuses new requests from QTEE. */ 206 + ctxdata->released = true; 207 + /* ureqs in reqs_list are in QUEUED or PROCESSING (!= empty_ureq) state. */ 208 + list_for_each_entry_safe(ureq, req, &ctxdata->reqs_list, node) { 209 + ureq_dequeue(ctxdata, ureq->req_id); 210 + 211 + if (ureq->op != QCOMTEE_MSG_OBJECT_OP_RELEASE) { 212 + ureq->state = QCOMTEE_REQ_PROCESSED; 213 + ureq->errno = -ENODEV; 214 + 215 + complete(&ureq->c); 216 + } else { 217 + kfree(ureq); 218 + } 219 + } 220 + } 221 + 222 + /* User Object API. */ 223 + 224 + /* User object dispatcher. */ 225 + static int qcomtee_user_object_dispatch(struct qcomtee_object_invoke_ctx *oic, 226 + struct qcomtee_object *object, u32 op, 227 + struct qcomtee_arg *args) 228 + { 229 + struct qcomtee_user_object *uo = to_qcomtee_user_object(object); 230 + struct qcomtee_context_data *ctxdata = uo->ctx->data; 231 + struct qcomtee_ureq *ureq __free(kfree) = NULL; 232 + int errno; 233 + 234 + ureq = kzalloc(sizeof(*ureq), GFP_KERNEL); 235 + if (!ureq) 236 + return -ENOMEM; 237 + 238 + init_completion(&ureq->c); 239 + ureq->object_id = uo->object_id; 240 + ureq->op = op; 241 + ureq->args = args; 242 + 243 + /* Queue the request. */ 244 + if (ureq_enqueue(ctxdata, ureq)) 245 + return -ENODEV; 246 + /* Wakeup supplicant to process it. */ 247 + complete(&ctxdata->req_c); 248 + 249 + /* 250 + * Wait for the supplicant to process the request. Wait as KILLABLE 251 + * in case the supplicant and invoke thread are both running from the 252 + * same process, the supplicant crashes, or the shutdown sequence 253 + * starts with supplicant dies first; otherwise, it stuck indefinitely. 254 + * 255 + * If the supplicant processes long-running requests, also use 256 + * TASK_FREEZABLE to allow the device to safely suspend if needed. 257 + */ 258 + if (!wait_for_completion_state(&ureq->c, 259 + TASK_KILLABLE | TASK_FREEZABLE)) { 260 + errno = ureq->errno; 261 + if (!errno) 262 + oic->data = no_free_ptr(ureq); 263 + } else { 264 + enum qcomtee_req_state prev_state; 265 + 266 + errno = -ENODEV; 267 + 268 + scoped_guard(mutex, &ctxdata->reqs_lock) { 269 + prev_state = ureq->state; 270 + /* Replace with empty_ureq to keep req_id reserved. */ 271 + if (prev_state == QCOMTEE_REQ_PROCESSING) { 272 + list_del(&ureq->node); 273 + idr_replace(&ctxdata->reqs_idr, 274 + &empty_ureq, ureq->req_id); 275 + 276 + /* Remove as supplicant has never seen this request. */ 277 + } else if (prev_state == QCOMTEE_REQ_QUEUED) { 278 + ureq_dequeue(ctxdata, ureq->req_id); 279 + } 280 + } 281 + 282 + /* Supplicant did some work, do not discard it. */ 283 + if (prev_state == QCOMTEE_REQ_PROCESSED) { 284 + errno = ureq->errno; 285 + if (!errno) 286 + oic->data = no_free_ptr(ureq); 287 + } 288 + } 289 + 290 + return errno; 291 + } 292 + 293 + /* Gets called after submitting the dispatcher response. */ 294 + static void qcomtee_user_object_notify(struct qcomtee_object_invoke_ctx *oic, 295 + struct qcomtee_object *unused_object, 296 + int err) 297 + { 298 + struct qcomtee_ureq *ureq = oic->data; 299 + struct qcomtee_arg *u = ureq->args; 300 + int i; 301 + 302 + /* 303 + * If err, there was a transport issue, and QTEE did not receive the 304 + * response for the dispatcher. Release the callback object created for 305 + * QTEE, in addition to the copies of objects kept for the drivers. 306 + */ 307 + qcomtee_arg_for_each_output_object(i, u) { 308 + if (err && 309 + (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)) 310 + qcomtee_object_put(u[i].o); 311 + qcomtee_object_put(u[i].o); 312 + } 313 + 314 + kfree(ureq); 315 + } 316 + 317 + static void qcomtee_user_object_release(struct qcomtee_object *object) 318 + { 319 + struct qcomtee_user_object *uo = to_qcomtee_user_object(object); 320 + struct qcomtee_context_data *ctxdata = uo->ctx->data; 321 + struct qcomtee_ureq *ureq; 322 + 323 + /* RELEASE does not require any argument. */ 324 + static struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } }; 325 + 326 + if (!uo->notify) 327 + goto out_no_notify; 328 + 329 + ureq = kzalloc(sizeof(*ureq), GFP_KERNEL); 330 + if (!ureq) 331 + goto out_no_notify; 332 + 333 + /* QUEUE a release request: */ 334 + ureq->object_id = uo->object_id; 335 + ureq->op = QCOMTEE_MSG_OBJECT_OP_RELEASE; 336 + ureq->args = args; 337 + if (ureq_enqueue(ctxdata, ureq)) { 338 + kfree(ureq); 339 + /* Ignore the notification if it cannot be queued. */ 340 + goto out_no_notify; 341 + } 342 + 343 + complete(&ctxdata->req_c); 344 + 345 + out_no_notify: 346 + teedev_ctx_put(uo->ctx); 347 + kfree(uo); 348 + } 349 + 350 + static struct qcomtee_object_operations qcomtee_user_object_ops = { 351 + .release = qcomtee_user_object_release, 352 + .notify = qcomtee_user_object_notify, 353 + .dispatch = qcomtee_user_object_dispatch, 354 + }; 355 + 356 + /** 357 + * qcomtee_user_param_to_object() - OBJREF parameter to &struct qcomtee_object. 358 + * @object: object returned. 359 + * @param: TEE parameter. 360 + * @ctx: context in which the conversion should happen. 361 + * 362 + * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_USER flags. 363 + * 364 + * Return: On success, returns 0; on failure, returns < 0. 365 + */ 366 + int qcomtee_user_param_to_object(struct qcomtee_object **object, 367 + struct tee_param *param, 368 + struct tee_context *ctx) 369 + { 370 + struct qcomtee_user_object *user_object __free(kfree) = NULL; 371 + int err; 372 + 373 + user_object = kzalloc(sizeof(*user_object), GFP_KERNEL); 374 + if (!user_object) 375 + return -ENOMEM; 376 + 377 + user_object->ctx = ctx; 378 + user_object->object_id = param->u.objref.id; 379 + /* By default, always notify userspace upon release. */ 380 + user_object->notify = true; 381 + err = qcomtee_object_user_init(&user_object->object, 382 + QCOMTEE_OBJECT_TYPE_CB, 383 + &qcomtee_user_object_ops, "uo-%llu", 384 + param->u.objref.id); 385 + if (err) 386 + return err; 387 + /* Matching teedev_ctx_put() is in qcomtee_user_object_release(). */ 388 + teedev_ctx_get(ctx); 389 + 390 + *object = &no_free_ptr(user_object)->object; 391 + 392 + return 0; 393 + } 394 + 395 + /* Reverse what qcomtee_user_param_to_object() does. */ 396 + int qcomtee_user_param_from_object(struct tee_param *param, 397 + struct qcomtee_object *object, 398 + struct tee_context *ctx) 399 + { 400 + struct qcomtee_user_object *uo; 401 + 402 + uo = to_qcomtee_user_object(object); 403 + /* Ensure the object is in the same context as the caller. */ 404 + if (uo->ctx != ctx) 405 + return -EINVAL; 406 + 407 + param->u.objref.id = uo->object_id; 408 + param->u.objref.flags = QCOMTEE_OBJREF_FLAG_USER; 409 + 410 + /* User objects are valid in userspace; do not keep a copy. */ 411 + qcomtee_object_put(object); 412 + 413 + return 0; 414 + } 415 + 416 + /** 417 + * qcomtee_cb_params_from_args() - Convert QTEE arguments to TEE parameters. 418 + * @params: TEE parameters. 419 + * @u: QTEE arguments. 420 + * @num_params: number of elements in the parameter array. 421 + * @ubuf_addr: user buffer for arguments of type %QCOMTEE_ARG_TYPE_IB. 422 + * @ubuf_size: size of the user buffer. 423 + * @ctx: context in which the conversion should happen. 424 + * 425 + * It expects @params to have enough entries for @u. Entries in @params are of 426 + * %TEE_IOCTL_PARAM_ATTR_TYPE_NONE. 427 + * 428 + * Return: On success, returns the number of input parameters; 429 + * on failure, returns < 0. 430 + */ 431 + static int qcomtee_cb_params_from_args(struct tee_param *params, 432 + struct qcomtee_arg *u, int num_params, 433 + void __user *ubuf_addr, size_t ubuf_size, 434 + struct tee_context *ctx) 435 + { 436 + int i, np; 437 + void __user *uaddr; 438 + 439 + qcomtee_arg_for_each(i, u) { 440 + switch (u[i].type) { 441 + case QCOMTEE_ARG_TYPE_IB: 442 + params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT; 443 + 444 + /* Underflow already checked in ureq_select(). */ 445 + ubuf_size = round_down(ubuf_size - u[i].b.size, 8); 446 + uaddr = (void __user *)(ubuf_addr + ubuf_size); 447 + 448 + params[i].u.ubuf.uaddr = uaddr; 449 + params[i].u.ubuf.size = u[i].b.size; 450 + if (copy_to_user(params[i].u.ubuf.uaddr, u[i].b.addr, 451 + u[i].b.size)) 452 + goto out_failed; 453 + 454 + break; 455 + case QCOMTEE_ARG_TYPE_OB: 456 + params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT; 457 + /* Let the user knows the maximum size QTEE expects. */ 458 + params[i].u.ubuf.size = u[i].b.size; 459 + 460 + break; 461 + case QCOMTEE_ARG_TYPE_IO: 462 + params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT; 463 + if (qcomtee_objref_from_arg(&params[i], &u[i], ctx)) 464 + goto out_failed; 465 + 466 + break; 467 + case QCOMTEE_ARG_TYPE_OO: 468 + params[i].attr = 469 + TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT; 470 + 471 + break; 472 + default: /* Never get here! */ 473 + goto out_failed; 474 + } 475 + } 476 + 477 + return i; 478 + 479 + out_failed: 480 + /* Undo qcomtee_objref_from_arg(). */ 481 + for (np = i; np >= 0; np--) { 482 + if (params[np].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT) 483 + qcomtee_context_del_qtee_object(&params[np], ctx); 484 + } 485 + 486 + /* Release any IO objects not processed. */ 487 + for (; u[i].type; i++) { 488 + if (u[i].type == QCOMTEE_ARG_TYPE_IO) 489 + qcomtee_object_put(u[i].o); 490 + } 491 + 492 + return -EINVAL; 493 + } 494 + 495 + /** 496 + * qcomtee_cb_params_to_args() - Convert TEE parameters to QTEE arguments. 497 + * @u: QTEE arguments. 498 + * @params: TEE parameters. 499 + * @num_params: number of elements in the parameter array. 500 + * @ctx: context in which the conversion should happen. 501 + * 502 + * Return: On success, returns 0; on failure, returns < 0. 503 + */ 504 + static int qcomtee_cb_params_to_args(struct qcomtee_arg *u, 505 + struct tee_param *params, int num_params, 506 + struct tee_context *ctx) 507 + { 508 + int i; 509 + 510 + qcomtee_arg_for_each(i, u) { 511 + switch (u[i].type) { 512 + case QCOMTEE_ARG_TYPE_IB: 513 + if (params[i].attr != 514 + TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT) 515 + goto out_failed; 516 + 517 + break; 518 + case QCOMTEE_ARG_TYPE_OB: 519 + if (params[i].attr != 520 + TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT) 521 + goto out_failed; 522 + 523 + /* Client can not send more data than requested. */ 524 + if (params[i].u.ubuf.size > u[i].b.size) 525 + goto out_failed; 526 + 527 + if (copy_from_user(u[i].b.addr, params[i].u.ubuf.uaddr, 528 + params[i].u.ubuf.size)) 529 + goto out_failed; 530 + 531 + u[i].b.size = params[i].u.ubuf.size; 532 + 533 + break; 534 + case QCOMTEE_ARG_TYPE_IO: 535 + if (params[i].attr != 536 + TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT) 537 + goto out_failed; 538 + 539 + break; 540 + case QCOMTEE_ARG_TYPE_OO: 541 + if (params[i].attr != 542 + TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) 543 + goto out_failed; 544 + 545 + if (qcomtee_objref_to_arg(&u[i], &params[i], ctx)) 546 + goto out_failed; 547 + 548 + break; 549 + default: /* Never get here! */ 550 + goto out_failed; 551 + } 552 + } 553 + 554 + return 0; 555 + 556 + out_failed: 557 + /* Undo qcomtee_objref_to_arg(). */ 558 + for (i--; i >= 0; i--) { 559 + if (u[i].type != QCOMTEE_ARG_TYPE_OO) 560 + continue; 561 + 562 + qcomtee_user_object_set_notify(u[i].o, false); 563 + if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB) 564 + qcomtee_object_put(u[i].o); 565 + 566 + qcomtee_object_put(u[i].o); 567 + } 568 + 569 + return -EINVAL; 570 + } 571 + 572 + /** 573 + * qcomtee_user_object_select() - Select a request for a user object. 574 + * @ctx: context to look for a user object. 575 + * @params: parameters for @op. 576 + * @num_params: number of elements in the parameter array. 577 + * @uaddr: user buffer for output UBUF parameters. 578 + * @size: size of user buffer @uaddr. 579 + * @data: information for the selected request. 580 + * 581 + * @params is filled along with @data for the selected request. 582 + * 583 + * Return: On success, returns 0; on failure, returns < 0. 584 + */ 585 + int qcomtee_user_object_select(struct tee_context *ctx, 586 + struct tee_param *params, int num_params, 587 + void __user *uaddr, size_t size, 588 + struct qcomtee_user_object_request_data *data) 589 + { 590 + struct qcomtee_context_data *ctxdata = ctx->data; 591 + struct qcomtee_ureq *ureq; 592 + int ret; 593 + 594 + /* 595 + * Hold the reqs_lock not only for ureq_select() and updating the ureq 596 + * state to PROCESSING but for the entire duration of ureq access. 597 + * This prevents qcomtee_user_object_dispatch() from freeing 598 + * ureq while it is still in use, if client dies. 599 + */ 600 + 601 + while (1) { 602 + scoped_guard(mutex, &ctxdata->reqs_lock) { 603 + ureq = ureq_select(ctxdata, size, num_params); 604 + if (!ureq) 605 + goto wait_for_request; 606 + 607 + if (IS_ERR(ureq)) 608 + return PTR_ERR(ureq); 609 + 610 + /* Processing the request 'QUEUED -> PROCESSING'. */ 611 + ureq->state = QCOMTEE_REQ_PROCESSING; 612 + /* ''Prepare user request:'' */ 613 + data->id = ureq->req_id; 614 + data->object_id = ureq->object_id; 615 + data->op = ureq->op; 616 + ret = qcomtee_cb_params_from_args(params, ureq->args, 617 + num_params, uaddr, 618 + size, ctx); 619 + if (ret >= 0) 620 + goto done_request; 621 + 622 + /* Something is wrong with the request: */ 623 + ureq_dequeue(ctxdata, data->id); 624 + /* Send error to QTEE. */ 625 + ureq->state = QCOMTEE_REQ_PROCESSED; 626 + ureq->errno = ret; 627 + 628 + complete(&ureq->c); 629 + } 630 + 631 + continue; 632 + wait_for_request: 633 + /* Wait for a new QUEUED request. */ 634 + if (wait_for_completion_interruptible(&ctxdata->req_c)) 635 + return -ERESTARTSYS; 636 + } 637 + 638 + done_request: 639 + /* No one is waiting for the response. */ 640 + if (data->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) { 641 + scoped_guard(mutex, &ctxdata->reqs_lock) 642 + ureq_dequeue(ctxdata, data->id); 643 + kfree(ureq); 644 + } 645 + 646 + data->np = ret; 647 + 648 + return 0; 649 + } 650 + 651 + /** 652 + * qcomtee_user_object_submit() - Submit a response for a user object. 653 + * @ctx: context to look for a user object. 654 + * @params: returned parameters. 655 + * @num_params: number of elements in the parameter array. 656 + * @req_id: request ID for the response. 657 + * @errno: result of user object invocation. 658 + * 659 + * Return: On success, returns 0; on failure, returns < 0. 660 + */ 661 + int qcomtee_user_object_submit(struct tee_context *ctx, 662 + struct tee_param *params, int num_params, 663 + int req_id, int errno) 664 + { 665 + struct qcomtee_context_data *ctxdata = ctx->data; 666 + struct qcomtee_ureq *ureq; 667 + 668 + /* See comments for reqs_lock in qcomtee_user_object_select(). */ 669 + guard(mutex)(&ctxdata->reqs_lock); 670 + 671 + ureq = ureq_dequeue(ctxdata, req_id); 672 + if (!ureq) 673 + return -EINVAL; 674 + 675 + ureq->state = QCOMTEE_REQ_PROCESSED; 676 + 677 + if (!errno) 678 + ureq->errno = qcomtee_cb_params_to_args(ureq->args, params, 679 + num_params, ctx); 680 + else 681 + ureq->errno = errno; 682 + /* Return errno if qcomtee_cb_params_to_args() failed; otherwise 0. */ 683 + if (!errno && ureq->errno) 684 + errno = ureq->errno; 685 + else 686 + errno = 0; 687 + 688 + /* Send result to QTEE. */ 689 + complete(&ureq->c); 690 + 691 + return errno; 692 + }
+254 -88
drivers/tee/tee_core.c
··· 14 14 #include <linux/slab.h> 15 15 #include <linux/tee_core.h> 16 16 #include <linux/uaccess.h> 17 - #include <crypto/hash.h> 18 17 #include <crypto/sha1.h> 19 18 #include "tee_private.h" 20 19 ··· 79 80 80 81 kref_get(&ctx->refcount); 81 82 } 83 + EXPORT_SYMBOL_GPL(teedev_ctx_get); 82 84 83 85 static void teedev_ctx_release(struct kref *ref) 84 86 { ··· 97 97 98 98 kref_put(&ctx->refcount, teedev_ctx_release); 99 99 } 100 + EXPORT_SYMBOL_GPL(teedev_ctx_put); 100 101 101 102 void teedev_close_context(struct tee_context *ctx) 102 103 { 103 104 struct tee_device *teedev = ctx->teedev; 105 + 106 + if (teedev->desc->ops->close_context) 107 + teedev->desc->ops->close_context(ctx); 104 108 105 109 teedev_ctx_put(ctx); 106 110 tee_device_put(teedev); ··· 146 142 * This implements section (for SHA-1): 147 143 * 4.3. Algorithm for Creating a Name-Based UUID 148 144 */ 149 - static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name, 150 - size_t size) 145 + static void uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name, 146 + size_t size) 151 147 { 152 148 unsigned char hash[SHA1_DIGEST_SIZE]; 153 - struct crypto_shash *shash = NULL; 154 - struct shash_desc *desc = NULL; 155 - int rc; 149 + struct sha1_ctx ctx; 156 150 157 - shash = crypto_alloc_shash("sha1", 0, 0); 158 - if (IS_ERR(shash)) { 159 - rc = PTR_ERR(shash); 160 - pr_err("shash(sha1) allocation failed\n"); 161 - return rc; 162 - } 163 - 164 - desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), 165 - GFP_KERNEL); 166 - if (!desc) { 167 - rc = -ENOMEM; 168 - goto out_free_shash; 169 - } 170 - 171 - desc->tfm = shash; 172 - 173 - rc = crypto_shash_init(desc); 174 - if (rc < 0) 175 - goto out_free_desc; 176 - 177 - rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns)); 178 - if (rc < 0) 179 - goto out_free_desc; 180 - 181 - rc = crypto_shash_update(desc, (const u8 *)name, size); 182 - if (rc < 0) 183 - goto out_free_desc; 184 - 185 - rc = crypto_shash_final(desc, hash); 186 - if (rc < 0) 187 - goto out_free_desc; 151 + sha1_init(&ctx); 152 + sha1_update(&ctx, (const u8 *)ns, sizeof(*ns)); 153 + sha1_update(&ctx, (const u8 *)name, size); 154 + sha1_final(&ctx, hash); 188 155 189 156 memcpy(uuid->b, hash, UUID_SIZE); 190 157 191 158 /* Tag for version 5 */ 192 159 uuid->b[6] = (hash[6] & 0x0F) | 0x50; 193 160 uuid->b[8] = (hash[8] & 0x3F) | 0x80; 194 - 195 - out_free_desc: 196 - kfree(desc); 197 - 198 - out_free_shash: 199 - crypto_free_shash(shash); 200 - return rc; 201 161 } 202 162 203 163 int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, ··· 171 203 kgid_t grp = INVALID_GID; 172 204 char *name = NULL; 173 205 int name_len; 174 - int rc; 206 + int rc = 0; 175 207 176 208 if (connection_method == TEE_IOCTL_LOGIN_PUBLIC || 177 209 connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) { ··· 228 260 goto out_free_name; 229 261 } 230 262 231 - rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len); 263 + uuid_v5(uuid, &tee_client_uuid_ns, name, name_len); 232 264 out_free_name: 233 265 kfree(name); 234 266 ··· 322 354 return ret; 323 355 } 324 356 357 + static int 358 + tee_ioctl_shm_register_fd(struct tee_context *ctx, 359 + struct tee_ioctl_shm_register_fd_data __user *udata) 360 + { 361 + struct tee_ioctl_shm_register_fd_data data; 362 + struct tee_shm *shm; 363 + long ret; 364 + 365 + if (copy_from_user(&data, udata, sizeof(data))) 366 + return -EFAULT; 367 + 368 + /* Currently no input flags are supported */ 369 + if (data.flags) 370 + return -EINVAL; 371 + 372 + shm = tee_shm_register_fd(ctx, data.fd); 373 + if (IS_ERR(shm)) 374 + return -EINVAL; 375 + 376 + data.id = shm->id; 377 + data.flags = shm->flags; 378 + data.size = shm->size; 379 + 380 + if (copy_to_user(udata, &data, sizeof(data))) 381 + ret = -EFAULT; 382 + else 383 + ret = tee_shm_get_fd(shm); 384 + 385 + /* 386 + * When user space closes the file descriptor the shared memory 387 + * should be freed or if tee_shm_get_fd() failed then it will 388 + * be freed immediately. 389 + */ 390 + tee_shm_put(shm); 391 + return ret; 392 + } 393 + 394 + static int param_from_user_memref(struct tee_context *ctx, 395 + struct tee_param_memref *memref, 396 + struct tee_ioctl_param *ip) 397 + { 398 + struct tee_shm *shm; 399 + size_t offs = 0; 400 + 401 + /* 402 + * If a NULL pointer is passed to a TA in the TEE, 403 + * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL 404 + * indicating a NULL memory reference. 405 + */ 406 + if (ip->c != TEE_MEMREF_NULL) { 407 + /* 408 + * If we fail to get a pointer to a shared 409 + * memory object (and increase the ref count) 410 + * from an identifier we return an error. All 411 + * pointers that has been added in params have 412 + * an increased ref count. It's the callers 413 + * responibility to do tee_shm_put() on all 414 + * resolved pointers. 415 + */ 416 + shm = tee_shm_get_from_id(ctx, ip->c); 417 + if (IS_ERR(shm)) 418 + return PTR_ERR(shm); 419 + 420 + /* 421 + * Ensure offset + size does not overflow 422 + * offset and does not overflow the size of 423 + * the referred shared memory object. 424 + */ 425 + if ((ip->a + ip->b) < ip->a || 426 + (ip->a + ip->b) > shm->size) { 427 + tee_shm_put(shm); 428 + return -EINVAL; 429 + } 430 + 431 + if (shm->flags & TEE_SHM_DMA_BUF) { 432 + struct tee_shm_dmabuf_ref *ref; 433 + 434 + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); 435 + if (ref->parent_shm) { 436 + /* 437 + * The shm already has one reference to 438 + * ref->parent_shm so we are clear of 0. 439 + * We're getting another reference since 440 + * this shm will be used in the parameter 441 + * list instead of the shm we got with 442 + * tee_shm_get_from_id() above. 443 + */ 444 + refcount_inc(&ref->parent_shm->refcount); 445 + tee_shm_put(shm); 446 + shm = ref->parent_shm; 447 + offs = ref->offset; 448 + } 449 + } 450 + } else if (ctx->cap_memref_null) { 451 + /* Pass NULL pointer to OP-TEE */ 452 + shm = NULL; 453 + } else { 454 + return -EINVAL; 455 + } 456 + 457 + memref->shm_offs = ip->a + offs; 458 + memref->size = ip->b; 459 + memref->shm = shm; 460 + 461 + return 0; 462 + } 463 + 325 464 static int params_from_user(struct tee_context *ctx, struct tee_param *params, 326 465 size_t num_params, 327 466 struct tee_ioctl_param __user *uparams) ··· 436 361 size_t n; 437 362 438 363 for (n = 0; n < num_params; n++) { 439 - struct tee_shm *shm; 440 364 struct tee_ioctl_param ip; 365 + int rc; 441 366 442 367 if (copy_from_user(&ip, uparams + n, sizeof(ip))) 443 368 return -EFAULT; ··· 450 375 switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { 451 376 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 452 377 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 378 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: 453 379 break; 454 380 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 455 381 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: ··· 458 382 params[n].u.value.b = ip.b; 459 383 params[n].u.value.c = ip.c; 460 384 break; 385 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: 386 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: 387 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: 388 + params[n].u.ubuf.uaddr = u64_to_user_ptr(ip.a); 389 + params[n].u.ubuf.size = ip.b; 390 + 391 + if (!access_ok(params[n].u.ubuf.uaddr, 392 + params[n].u.ubuf.size)) 393 + return -EFAULT; 394 + 395 + break; 396 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: 397 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: 398 + params[n].u.objref.id = ip.a; 399 + params[n].u.objref.flags = ip.b; 400 + break; 461 401 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 462 402 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 463 403 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 464 - /* 465 - * If a NULL pointer is passed to a TA in the TEE, 466 - * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL 467 - * indicating a NULL memory reference. 468 - */ 469 - if (ip.c != TEE_MEMREF_NULL) { 470 - /* 471 - * If we fail to get a pointer to a shared 472 - * memory object (and increase the ref count) 473 - * from an identifier we return an error. All 474 - * pointers that has been added in params have 475 - * an increased ref count. It's the callers 476 - * responibility to do tee_shm_put() on all 477 - * resolved pointers. 478 - */ 479 - shm = tee_shm_get_from_id(ctx, ip.c); 480 - if (IS_ERR(shm)) 481 - return PTR_ERR(shm); 482 - 483 - /* 484 - * Ensure offset + size does not overflow 485 - * offset and does not overflow the size of 486 - * the referred shared memory object. 487 - */ 488 - if ((ip.a + ip.b) < ip.a || 489 - (ip.a + ip.b) > shm->size) { 490 - tee_shm_put(shm); 491 - return -EINVAL; 492 - } 493 - } else if (ctx->cap_memref_null) { 494 - /* Pass NULL pointer to OP-TEE */ 495 - shm = NULL; 496 - } else { 497 - return -EINVAL; 498 - } 499 - 500 - params[n].u.memref.shm_offs = ip.a; 501 - params[n].u.memref.size = ip.b; 502 - params[n].u.memref.shm = shm; 404 + rc = param_from_user_memref(ctx, &params[n].u.memref, 405 + &ip); 406 + if (rc) 407 + return rc; 503 408 break; 504 409 default: 505 410 /* Unknown attribute */ ··· 505 448 if (put_user(p->u.value.a, &up->a) || 506 449 put_user(p->u.value.b, &up->b) || 507 450 put_user(p->u.value.c, &up->c)) 451 + return -EFAULT; 452 + break; 453 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: 454 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: 455 + if (put_user((u64)p->u.ubuf.size, &up->b)) 456 + return -EFAULT; 457 + break; 458 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: 459 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: 460 + if (put_user(p->u.objref.id, &up->a) || 461 + put_user(p->u.objref.flags, &up->b)) 508 462 return -EFAULT; 509 463 break; 510 464 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: ··· 670 602 return rc; 671 603 } 672 604 605 + static int tee_ioctl_object_invoke(struct tee_context *ctx, 606 + struct tee_ioctl_buf_data __user *ubuf) 607 + { 608 + int rc; 609 + size_t n; 610 + struct tee_ioctl_buf_data buf; 611 + struct tee_ioctl_object_invoke_arg __user *uarg; 612 + struct tee_ioctl_object_invoke_arg arg; 613 + struct tee_ioctl_param __user *uparams = NULL; 614 + struct tee_param *params = NULL; 615 + 616 + if (!ctx->teedev->desc->ops->object_invoke_func) 617 + return -EINVAL; 618 + 619 + if (copy_from_user(&buf, ubuf, sizeof(buf))) 620 + return -EFAULT; 621 + 622 + if (buf.buf_len > TEE_MAX_ARG_SIZE || 623 + buf.buf_len < sizeof(struct tee_ioctl_object_invoke_arg)) 624 + return -EINVAL; 625 + 626 + uarg = u64_to_user_ptr(buf.buf_ptr); 627 + if (copy_from_user(&arg, uarg, sizeof(arg))) 628 + return -EFAULT; 629 + 630 + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) 631 + return -EINVAL; 632 + 633 + if (arg.num_params) { 634 + params = kcalloc(arg.num_params, sizeof(struct tee_param), 635 + GFP_KERNEL); 636 + if (!params) 637 + return -ENOMEM; 638 + uparams = uarg->params; 639 + rc = params_from_user(ctx, params, arg.num_params, uparams); 640 + if (rc) 641 + goto out; 642 + } 643 + 644 + rc = ctx->teedev->desc->ops->object_invoke_func(ctx, &arg, params); 645 + if (rc) 646 + goto out; 647 + 648 + if (put_user(arg.ret, &uarg->ret)) { 649 + rc = -EFAULT; 650 + goto out; 651 + } 652 + rc = params_to_user(uparams, arg.num_params, params); 653 + out: 654 + if (params) { 655 + /* Decrease ref count for all valid shared memory pointers */ 656 + for (n = 0; n < arg.num_params; n++) 657 + if (tee_param_is_memref(params + n) && 658 + params[n].u.memref.shm) 659 + tee_shm_put(params[n].u.memref.shm); 660 + kfree(params); 661 + } 662 + return rc; 663 + } 664 + 673 665 static int tee_ioctl_cancel(struct tee_context *ctx, 674 666 struct tee_ioctl_cancel_arg __user *uarg) 675 667 { ··· 777 649 ip.a = p->u.value.a; 778 650 ip.b = p->u.value.b; 779 651 ip.c = p->u.value.c; 652 + break; 653 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: 654 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: 655 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: 656 + ip.a = (__force unsigned long)p->u.ubuf.uaddr; 657 + ip.b = p->u.ubuf.size; 658 + ip.c = 0; 659 + break; 660 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: 661 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: 662 + ip.a = p->u.objref.id; 663 + ip.b = p->u.objref.flags; 664 + ip.c = 0; 780 665 break; 781 666 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 782 667 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: ··· 893 752 p->u.value.b = ip.b; 894 753 p->u.value.c = ip.c; 895 754 break; 755 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: 756 + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: 757 + p->u.ubuf.uaddr = u64_to_user_ptr(ip.a); 758 + p->u.ubuf.size = ip.b; 759 + 760 + if (!access_ok(params[n].u.ubuf.uaddr, 761 + params[n].u.ubuf.size)) 762 + return -EFAULT; 763 + 764 + break; 765 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: 766 + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: 767 + p->u.objref.id = ip.a; 768 + p->u.objref.flags = ip.b; 769 + break; 896 770 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 897 771 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 898 772 /* ··· 984 828 return tee_ioctl_shm_alloc(ctx, uarg); 985 829 case TEE_IOC_SHM_REGISTER: 986 830 return tee_ioctl_shm_register(ctx, uarg); 831 + case TEE_IOC_SHM_REGISTER_FD: 832 + return tee_ioctl_shm_register_fd(ctx, uarg); 987 833 case TEE_IOC_OPEN_SESSION: 988 834 return tee_ioctl_open_session(ctx, uarg); 989 835 case TEE_IOC_INVOKE: 990 836 return tee_ioctl_invoke(ctx, uarg); 837 + case TEE_IOC_OBJECT_INVOKE: 838 + return tee_ioctl_object_invoke(ctx, uarg); 991 839 case TEE_IOC_CANCEL: 992 840 return tee_ioctl_cancel(ctx, uarg); 993 841 case TEE_IOC_CLOSE_SESSION: ··· 1049 889 1050 890 if (!teedesc || !teedesc->name || !teedesc->ops || 1051 891 !teedesc->ops->get_version || !teedesc->ops->open || 1052 - !teedesc->ops->release || !pool) 892 + !teedesc->ops->release) 1053 893 return ERR_PTR(-EINVAL); 1054 894 1055 895 teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); ··· 1137 977 struct tee_ioctl_version_data vers; 1138 978 1139 979 teedev->desc->ops->get_version(teedev, &vers); 1140 - return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); 980 + return sysfs_emit(buf, "%d\n", vers.impl_id); 1141 981 } 1142 982 static DEVICE_ATTR_RO(implementation_id); 1143 983 ··· 1198 1038 } 1199 1039 mutex_unlock(&teedev->mutex); 1200 1040 } 1041 + EXPORT_SYMBOL_GPL(tee_device_put); 1201 1042 1202 1043 bool tee_device_get(struct tee_device *teedev) 1203 1044 { ··· 1211 1050 mutex_unlock(&teedev->mutex); 1212 1051 return true; 1213 1052 } 1053 + EXPORT_SYMBOL_GPL(tee_device_get); 1214 1054 1215 1055 /** 1216 1056 * tee_device_unregister() - Removes a TEE device ··· 1225 1063 { 1226 1064 if (!teedev) 1227 1065 return; 1066 + 1067 + tee_device_put_all_dma_heaps(teedev); 1228 1068 1229 1069 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) 1230 1070 cdev_device_del(&teedev->cdev, &teedev->dev); ··· 1451 1287 MODULE_DESCRIPTION("TEE Driver"); 1452 1288 MODULE_VERSION("1.0"); 1453 1289 MODULE_LICENSE("GPL v2"); 1290 + MODULE_IMPORT_NS("DMA_BUF"); 1291 + MODULE_IMPORT_NS("DMA_BUF_HEAP");
+500
drivers/tee/tee_heap.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (c) 2025, Linaro Limited 4 + */ 5 + 6 + #include <linux/dma-buf.h> 7 + #include <linux/dma-heap.h> 8 + #include <linux/genalloc.h> 9 + #include <linux/module.h> 10 + #include <linux/scatterlist.h> 11 + #include <linux/slab.h> 12 + #include <linux/tee_core.h> 13 + #include <linux/xarray.h> 14 + 15 + #include "tee_private.h" 16 + 17 + struct tee_dma_heap { 18 + struct dma_heap *heap; 19 + enum tee_dma_heap_id id; 20 + struct kref kref; 21 + struct tee_protmem_pool *pool; 22 + struct tee_device *teedev; 23 + bool shutting_down; 24 + /* Protects pool, teedev, and shutting_down above */ 25 + struct mutex mu; 26 + }; 27 + 28 + struct tee_heap_buffer { 29 + struct tee_dma_heap *heap; 30 + size_t size; 31 + size_t offs; 32 + struct sg_table table; 33 + }; 34 + 35 + struct tee_heap_attachment { 36 + struct sg_table table; 37 + struct device *dev; 38 + }; 39 + 40 + struct tee_protmem_static_pool { 41 + struct tee_protmem_pool pool; 42 + struct gen_pool *gen_pool; 43 + phys_addr_t pa_base; 44 + }; 45 + 46 + #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS) 47 + static DEFINE_XARRAY_ALLOC(tee_dma_heap); 48 + 49 + static void tee_heap_release(struct kref *kref) 50 + { 51 + struct tee_dma_heap *h = container_of(kref, struct tee_dma_heap, kref); 52 + 53 + h->pool->ops->destroy_pool(h->pool); 54 + tee_device_put(h->teedev); 55 + h->pool = NULL; 56 + h->teedev = NULL; 57 + } 58 + 59 + static void put_tee_heap(struct tee_dma_heap *h) 60 + { 61 + kref_put(&h->kref, tee_heap_release); 62 + } 63 + 64 + static void get_tee_heap(struct tee_dma_heap *h) 65 + { 66 + kref_get(&h->kref); 67 + } 68 + 69 + static int copy_sg_table(struct sg_table *dst, struct sg_table *src) 70 + { 71 + struct scatterlist *dst_sg; 72 + struct scatterlist *src_sg; 73 + int ret; 74 + int i; 75 + 76 + ret = sg_alloc_table(dst, src->orig_nents, GFP_KERNEL); 77 + if (ret) 78 + return ret; 79 + 80 + dst_sg = dst->sgl; 81 + for_each_sgtable_sg(src, src_sg, i) { 82 + sg_set_page(dst_sg, sg_page(src_sg), src_sg->length, 83 + src_sg->offset); 84 + dst_sg = sg_next(dst_sg); 85 + } 86 + 87 + return 0; 88 + } 89 + 90 + static int tee_heap_attach(struct dma_buf *dmabuf, 91 + struct dma_buf_attachment *attachment) 92 + { 93 + struct tee_heap_buffer *buf = dmabuf->priv; 94 + struct tee_heap_attachment *a; 95 + int ret; 96 + 97 + a = kzalloc(sizeof(*a), GFP_KERNEL); 98 + if (!a) 99 + return -ENOMEM; 100 + 101 + ret = copy_sg_table(&a->table, &buf->table); 102 + if (ret) { 103 + kfree(a); 104 + return ret; 105 + } 106 + 107 + a->dev = attachment->dev; 108 + attachment->priv = a; 109 + 110 + return 0; 111 + } 112 + 113 + static void tee_heap_detach(struct dma_buf *dmabuf, 114 + struct dma_buf_attachment *attachment) 115 + { 116 + struct tee_heap_attachment *a = attachment->priv; 117 + 118 + sg_free_table(&a->table); 119 + kfree(a); 120 + } 121 + 122 + static struct sg_table * 123 + tee_heap_map_dma_buf(struct dma_buf_attachment *attachment, 124 + enum dma_data_direction direction) 125 + { 126 + struct tee_heap_attachment *a = attachment->priv; 127 + int ret; 128 + 129 + ret = dma_map_sgtable(attachment->dev, &a->table, direction, 130 + DMA_ATTR_SKIP_CPU_SYNC); 131 + if (ret) 132 + return ERR_PTR(ret); 133 + 134 + return &a->table; 135 + } 136 + 137 + static void tee_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, 138 + struct sg_table *table, 139 + enum dma_data_direction direction) 140 + { 141 + struct tee_heap_attachment *a = attachment->priv; 142 + 143 + WARN_ON(&a->table != table); 144 + 145 + dma_unmap_sgtable(attachment->dev, table, direction, 146 + DMA_ATTR_SKIP_CPU_SYNC); 147 + } 148 + 149 + static void tee_heap_buf_free(struct dma_buf *dmabuf) 150 + { 151 + struct tee_heap_buffer *buf = dmabuf->priv; 152 + 153 + buf->heap->pool->ops->free(buf->heap->pool, &buf->table); 154 + mutex_lock(&buf->heap->mu); 155 + put_tee_heap(buf->heap); 156 + mutex_unlock(&buf->heap->mu); 157 + kfree(buf); 158 + } 159 + 160 + static const struct dma_buf_ops tee_heap_buf_ops = { 161 + .attach = tee_heap_attach, 162 + .detach = tee_heap_detach, 163 + .map_dma_buf = tee_heap_map_dma_buf, 164 + .unmap_dma_buf = tee_heap_unmap_dma_buf, 165 + .release = tee_heap_buf_free, 166 + }; 167 + 168 + static struct dma_buf *tee_dma_heap_alloc(struct dma_heap *heap, 169 + unsigned long len, u32 fd_flags, 170 + u64 heap_flags) 171 + { 172 + struct tee_dma_heap *h = dma_heap_get_drvdata(heap); 173 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 174 + struct tee_device *teedev = NULL; 175 + struct tee_heap_buffer *buf; 176 + struct tee_protmem_pool *pool; 177 + struct dma_buf *dmabuf; 178 + int rc; 179 + 180 + mutex_lock(&h->mu); 181 + if (h->teedev) { 182 + teedev = h->teedev; 183 + pool = h->pool; 184 + get_tee_heap(h); 185 + } 186 + mutex_unlock(&h->mu); 187 + 188 + if (!teedev) 189 + return ERR_PTR(-EINVAL); 190 + 191 + buf = kzalloc(sizeof(*buf), GFP_KERNEL); 192 + if (!buf) { 193 + dmabuf = ERR_PTR(-ENOMEM); 194 + goto err; 195 + } 196 + buf->size = len; 197 + buf->heap = h; 198 + 199 + rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs); 200 + if (rc) { 201 + dmabuf = ERR_PTR(rc); 202 + goto err_kfree; 203 + } 204 + 205 + exp_info.ops = &tee_heap_buf_ops; 206 + exp_info.size = len; 207 + exp_info.priv = buf; 208 + exp_info.flags = fd_flags; 209 + dmabuf = dma_buf_export(&exp_info); 210 + if (IS_ERR(dmabuf)) 211 + goto err_protmem_free; 212 + 213 + return dmabuf; 214 + 215 + err_protmem_free: 216 + pool->ops->free(pool, &buf->table); 217 + err_kfree: 218 + kfree(buf); 219 + err: 220 + mutex_lock(&h->mu); 221 + put_tee_heap(h); 222 + mutex_unlock(&h->mu); 223 + return dmabuf; 224 + } 225 + 226 + static const struct dma_heap_ops tee_dma_heap_ops = { 227 + .allocate = tee_dma_heap_alloc, 228 + }; 229 + 230 + static const char *heap_id_2_name(enum tee_dma_heap_id id) 231 + { 232 + switch (id) { 233 + case TEE_DMA_HEAP_SECURE_VIDEO_PLAY: 234 + return "protected,secure-video"; 235 + case TEE_DMA_HEAP_TRUSTED_UI: 236 + return "protected,trusted-ui"; 237 + case TEE_DMA_HEAP_SECURE_VIDEO_RECORD: 238 + return "protected,secure-video-record"; 239 + default: 240 + return NULL; 241 + } 242 + } 243 + 244 + static int alloc_dma_heap(struct tee_device *teedev, enum tee_dma_heap_id id, 245 + struct tee_protmem_pool *pool) 246 + { 247 + struct dma_heap_export_info exp_info = { 248 + .ops = &tee_dma_heap_ops, 249 + .name = heap_id_2_name(id), 250 + }; 251 + struct tee_dma_heap *h; 252 + int rc; 253 + 254 + if (!exp_info.name) 255 + return -EINVAL; 256 + 257 + if (xa_reserve(&tee_dma_heap, id, GFP_KERNEL)) { 258 + if (!xa_load(&tee_dma_heap, id)) 259 + return -EEXIST; 260 + return -ENOMEM; 261 + } 262 + 263 + h = kzalloc(sizeof(*h), GFP_KERNEL); 264 + if (!h) 265 + return -ENOMEM; 266 + h->id = id; 267 + kref_init(&h->kref); 268 + h->teedev = teedev; 269 + h->pool = pool; 270 + mutex_init(&h->mu); 271 + 272 + exp_info.priv = h; 273 + h->heap = dma_heap_add(&exp_info); 274 + if (IS_ERR(h->heap)) { 275 + rc = PTR_ERR(h->heap); 276 + kfree(h); 277 + 278 + return rc; 279 + } 280 + 281 + /* "can't fail" due to the call to xa_reserve() above */ 282 + return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL))); 283 + } 284 + 285 + int tee_device_register_dma_heap(struct tee_device *teedev, 286 + enum tee_dma_heap_id id, 287 + struct tee_protmem_pool *pool) 288 + { 289 + struct tee_dma_heap *h; 290 + int rc; 291 + 292 + if (!tee_device_get(teedev)) 293 + return -EINVAL; 294 + 295 + h = xa_load(&tee_dma_heap, id); 296 + if (h) { 297 + mutex_lock(&h->mu); 298 + if (h->teedev) { 299 + rc = -EBUSY; 300 + } else { 301 + kref_init(&h->kref); 302 + h->shutting_down = false; 303 + h->teedev = teedev; 304 + h->pool = pool; 305 + rc = 0; 306 + } 307 + mutex_unlock(&h->mu); 308 + } else { 309 + rc = alloc_dma_heap(teedev, id, pool); 310 + } 311 + 312 + if (rc) { 313 + tee_device_put(teedev); 314 + dev_err(&teedev->dev, "can't register DMA heap id %d (%s)\n", 315 + id, heap_id_2_name(id)); 316 + } 317 + 318 + return rc; 319 + } 320 + EXPORT_SYMBOL_GPL(tee_device_register_dma_heap); 321 + 322 + void tee_device_put_all_dma_heaps(struct tee_device *teedev) 323 + { 324 + struct tee_dma_heap *h; 325 + u_long i; 326 + 327 + xa_for_each(&tee_dma_heap, i, h) { 328 + if (h) { 329 + mutex_lock(&h->mu); 330 + if (h->teedev == teedev && !h->shutting_down) { 331 + h->shutting_down = true; 332 + put_tee_heap(h); 333 + } 334 + mutex_unlock(&h->mu); 335 + } 336 + } 337 + } 338 + EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps); 339 + 340 + int tee_heap_update_from_dma_buf(struct tee_device *teedev, 341 + struct dma_buf *dmabuf, size_t *offset, 342 + struct tee_shm *shm, 343 + struct tee_shm **parent_shm) 344 + { 345 + struct tee_heap_buffer *buf; 346 + int rc; 347 + 348 + /* The DMA-buf must be from our heap */ 349 + if (dmabuf->ops != &tee_heap_buf_ops) 350 + return -EINVAL; 351 + 352 + buf = dmabuf->priv; 353 + /* The buffer must be from the same teedev */ 354 + if (buf->heap->teedev != teedev) 355 + return -EINVAL; 356 + 357 + shm->size = buf->size; 358 + 359 + rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table, 360 + buf->offs, shm, parent_shm); 361 + if (!rc && *parent_shm) 362 + *offset = buf->offs; 363 + 364 + return rc; 365 + } 366 + #else 367 + int tee_device_register_dma_heap(struct tee_device *teedev __always_unused, 368 + enum tee_dma_heap_id id __always_unused, 369 + struct tee_protmem_pool *pool __always_unused) 370 + { 371 + return -EINVAL; 372 + } 373 + EXPORT_SYMBOL_GPL(tee_device_register_dma_heap); 374 + 375 + void 376 + tee_device_put_all_dma_heaps(struct tee_device *teedev __always_unused) 377 + { 378 + } 379 + EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps); 380 + 381 + int tee_heap_update_from_dma_buf(struct tee_device *teedev __always_unused, 382 + struct dma_buf *dmabuf __always_unused, 383 + size_t *offset __always_unused, 384 + struct tee_shm *shm __always_unused, 385 + struct tee_shm **parent_shm __always_unused) 386 + { 387 + return -EINVAL; 388 + } 389 + #endif 390 + 391 + static struct tee_protmem_static_pool * 392 + to_protmem_static_pool(struct tee_protmem_pool *pool) 393 + { 394 + return container_of(pool, struct tee_protmem_static_pool, pool); 395 + } 396 + 397 + static int protmem_pool_op_static_alloc(struct tee_protmem_pool *pool, 398 + struct sg_table *sgt, size_t size, 399 + size_t *offs) 400 + { 401 + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); 402 + phys_addr_t pa; 403 + int ret; 404 + 405 + pa = gen_pool_alloc(stp->gen_pool, size); 406 + if (!pa) 407 + return -ENOMEM; 408 + 409 + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 410 + if (ret) { 411 + gen_pool_free(stp->gen_pool, pa, size); 412 + return ret; 413 + } 414 + 415 + sg_set_page(sgt->sgl, phys_to_page(pa), size, 0); 416 + *offs = pa - stp->pa_base; 417 + 418 + return 0; 419 + } 420 + 421 + static void protmem_pool_op_static_free(struct tee_protmem_pool *pool, 422 + struct sg_table *sgt) 423 + { 424 + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); 425 + struct scatterlist *sg; 426 + int i; 427 + 428 + for_each_sgtable_sg(sgt, sg, i) 429 + gen_pool_free(stp->gen_pool, sg_phys(sg), sg->length); 430 + sg_free_table(sgt); 431 + } 432 + 433 + static int protmem_pool_op_static_update_shm(struct tee_protmem_pool *pool, 434 + struct sg_table *sgt, size_t offs, 435 + struct tee_shm *shm, 436 + struct tee_shm **parent_shm) 437 + { 438 + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); 439 + 440 + shm->paddr = stp->pa_base + offs; 441 + *parent_shm = NULL; 442 + 443 + return 0; 444 + } 445 + 446 + static void protmem_pool_op_static_destroy_pool(struct tee_protmem_pool *pool) 447 + { 448 + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); 449 + 450 + gen_pool_destroy(stp->gen_pool); 451 + kfree(stp); 452 + } 453 + 454 + static struct tee_protmem_pool_ops protmem_pool_ops_static = { 455 + .alloc = protmem_pool_op_static_alloc, 456 + .free = protmem_pool_op_static_free, 457 + .update_shm = protmem_pool_op_static_update_shm, 458 + .destroy_pool = protmem_pool_op_static_destroy_pool, 459 + }; 460 + 461 + struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr, 462 + size_t size) 463 + { 464 + const size_t page_mask = PAGE_SIZE - 1; 465 + struct tee_protmem_static_pool *stp; 466 + int rc; 467 + 468 + /* Check it's page aligned */ 469 + if ((paddr | size) & page_mask) 470 + return ERR_PTR(-EINVAL); 471 + 472 + if (!pfn_valid(PHYS_PFN(paddr))) 473 + return ERR_PTR(-EINVAL); 474 + 475 + stp = kzalloc(sizeof(*stp), GFP_KERNEL); 476 + if (!stp) 477 + return ERR_PTR(-ENOMEM); 478 + 479 + stp->gen_pool = gen_pool_create(PAGE_SHIFT, -1); 480 + if (!stp->gen_pool) { 481 + rc = -ENOMEM; 482 + goto err_free; 483 + } 484 + 485 + rc = gen_pool_add(stp->gen_pool, paddr, size, -1); 486 + if (rc) 487 + goto err_free_pool; 488 + 489 + stp->pool.ops = &protmem_pool_ops_static; 490 + stp->pa_base = paddr; 491 + return &stp->pool; 492 + 493 + err_free_pool: 494 + gen_pool_destroy(stp->gen_pool); 495 + err_free: 496 + kfree(stp); 497 + 498 + return ERR_PTR(rc); 499 + } 500 + EXPORT_SYMBOL_GPL(tee_protmem_static_pool_alloc);
+14 -6
drivers/tee/tee_private.h
··· 8 8 #include <linux/cdev.h> 9 9 #include <linux/completion.h> 10 10 #include <linux/device.h> 11 + #include <linux/dma-buf.h> 11 12 #include <linux/kref.h> 12 13 #include <linux/mutex.h> 13 14 #include <linux/types.h> 14 15 16 + /* extra references appended to shm object for registered shared memory */ 17 + struct tee_shm_dmabuf_ref { 18 + struct tee_shm shm; 19 + size_t offset; 20 + struct dma_buf *dmabuf; 21 + struct tee_shm *parent_shm; 22 + }; 23 + 15 24 int tee_shm_get_fd(struct tee_shm *shm); 16 - 17 - bool tee_device_get(struct tee_device *teedev); 18 - void tee_device_put(struct tee_device *teedev); 19 - 20 - void teedev_ctx_get(struct tee_context *ctx); 21 - void teedev_ctx_put(struct tee_context *ctx); 22 25 23 26 struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size); 24 27 struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, 25 28 unsigned long addr, size_t length); 29 + 30 + int tee_heap_update_from_dma_buf(struct tee_device *teedev, 31 + struct dma_buf *dmabuf, size_t *offset, 32 + struct tee_shm *shm, 33 + struct tee_shm **parent_shm); 26 34 27 35 #endif /*TEE_PRIVATE_H*/
+161 -4
drivers/tee/tee_shm.c
··· 4 4 */ 5 5 #include <linux/anon_inodes.h> 6 6 #include <linux/device.h> 7 + #include <linux/dma-buf.h> 8 + #include <linux/dma-mapping.h> 9 + #include <linux/highmem.h> 7 10 #include <linux/idr.h> 8 11 #include <linux/io.h> 9 12 #include <linux/mm.h> ··· 15 12 #include <linux/tee_core.h> 16 13 #include <linux/uaccess.h> 17 14 #include <linux/uio.h> 18 - #include <linux/highmem.h> 19 15 #include "tee_private.h" 16 + 17 + struct tee_shm_dma_mem { 18 + struct tee_shm shm; 19 + dma_addr_t dma_addr; 20 + struct page *page; 21 + }; 20 22 21 23 static void shm_put_kernel_pages(struct page **pages, size_t page_count) 22 24 { ··· 53 45 54 46 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) 55 47 { 56 - if (shm->flags & TEE_SHM_POOL) { 48 + void *p = shm; 49 + 50 + if (shm->flags & TEE_SHM_DMA_MEM) { 51 + #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS) 52 + struct tee_shm_dma_mem *dma_mem; 53 + 54 + dma_mem = container_of(shm, struct tee_shm_dma_mem, shm); 55 + p = dma_mem; 56 + dma_free_pages(&teedev->dev, shm->size, dma_mem->page, 57 + dma_mem->dma_addr, DMA_BIDIRECTIONAL); 58 + #endif 59 + } else if (shm->flags & TEE_SHM_DMA_BUF) { 60 + struct tee_shm_dmabuf_ref *ref; 61 + 62 + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); 63 + p = ref; 64 + dma_buf_put(ref->dmabuf); 65 + } else if (shm->flags & TEE_SHM_POOL) { 57 66 teedev->pool->ops->free(teedev->pool, shm); 58 67 } else if (shm->flags & TEE_SHM_DYNAMIC) { 59 68 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); ··· 84 59 85 60 teedev_ctx_put(shm->ctx); 86 61 87 - kfree(shm); 62 + kfree(p); 88 63 89 64 tee_device_put(teedev); 90 65 } ··· 194 169 * tee_client_invoke_func(). The memory allocated is later freed with a 195 170 * call to tee_shm_free(). 196 171 * 197 - * @returns a pointer to 'struct tee_shm' 172 + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure 198 173 */ 199 174 struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) 200 175 { ··· 203 178 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); 204 179 } 205 180 EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); 181 + 182 + struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd) 183 + { 184 + struct tee_shm_dmabuf_ref *ref; 185 + int rc; 186 + 187 + if (!tee_device_get(ctx->teedev)) 188 + return ERR_PTR(-EINVAL); 189 + 190 + teedev_ctx_get(ctx); 191 + 192 + ref = kzalloc(sizeof(*ref), GFP_KERNEL); 193 + if (!ref) { 194 + rc = -ENOMEM; 195 + goto err_put_tee; 196 + } 197 + 198 + refcount_set(&ref->shm.refcount, 1); 199 + ref->shm.ctx = ctx; 200 + ref->shm.id = -1; 201 + ref->shm.flags = TEE_SHM_DMA_BUF; 202 + 203 + ref->dmabuf = dma_buf_get(fd); 204 + if (IS_ERR(ref->dmabuf)) { 205 + rc = PTR_ERR(ref->dmabuf); 206 + goto err_kfree_ref; 207 + } 208 + 209 + rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf, 210 + &ref->offset, &ref->shm, 211 + &ref->parent_shm); 212 + if (rc) 213 + goto err_put_dmabuf; 214 + 215 + mutex_lock(&ref->shm.ctx->teedev->mutex); 216 + ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm, 217 + 1, 0, GFP_KERNEL); 218 + mutex_unlock(&ref->shm.ctx->teedev->mutex); 219 + if (ref->shm.id < 0) { 220 + rc = ref->shm.id; 221 + goto err_put_dmabuf; 222 + } 223 + 224 + return &ref->shm; 225 + 226 + err_put_dmabuf: 227 + dma_buf_put(ref->dmabuf); 228 + err_kfree_ref: 229 + kfree(ref); 230 + err_put_tee: 231 + teedev_ctx_put(ctx); 232 + tee_device_put(ctx->teedev); 233 + 234 + return ERR_PTR(rc); 235 + } 236 + EXPORT_SYMBOL_GPL(tee_shm_register_fd); 206 237 207 238 /** 208 239 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared ··· 283 202 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); 284 203 } 285 204 EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); 205 + 206 + #if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS) 207 + /** 208 + * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object 209 + * @ctx: Context that allocates the shared memory 210 + * @page_count: Number of pages 211 + * 212 + * The allocated memory is expected to be lent (made inaccessible to the 213 + * kernel) to the TEE while it's used and returned (accessible to the 214 + * kernel again) before it's freed. 215 + * 216 + * This function should normally only be used internally in the TEE 217 + * drivers. 218 + * 219 + * @returns a pointer to 'struct tee_shm' 220 + */ 221 + struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx, 222 + size_t page_count) 223 + { 224 + struct tee_device *teedev = ctx->teedev; 225 + struct tee_shm_dma_mem *dma_mem; 226 + dma_addr_t dma_addr; 227 + struct page *page; 228 + 229 + if (!tee_device_get(teedev)) 230 + return ERR_PTR(-EINVAL); 231 + 232 + page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE, 233 + &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL); 234 + if (!page) 235 + goto err_put_teedev; 236 + 237 + dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL); 238 + if (!dma_mem) 239 + goto err_free_pages; 240 + 241 + refcount_set(&dma_mem->shm.refcount, 1); 242 + dma_mem->shm.ctx = ctx; 243 + dma_mem->shm.paddr = page_to_phys(page); 244 + dma_mem->dma_addr = dma_addr; 245 + dma_mem->page = page; 246 + dma_mem->shm.size = page_count * PAGE_SIZE; 247 + dma_mem->shm.flags = TEE_SHM_DMA_MEM; 248 + 249 + teedev_ctx_get(ctx); 250 + 251 + return &dma_mem->shm; 252 + 253 + err_free_pages: 254 + dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr, 255 + DMA_BIDIRECTIONAL); 256 + err_put_teedev: 257 + tee_device_put(teedev); 258 + 259 + return ERR_PTR(-ENOMEM); 260 + } 261 + EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem); 262 + #else 263 + struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx, 264 + size_t page_count) 265 + { 266 + return ERR_PTR(-EINVAL); 267 + } 268 + EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem); 269 + #endif 286 270 287 271 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, 288 272 int (*shm_register)(struct tee_context *ctx, ··· 467 321 if (unlikely(len <= 0)) { 468 322 ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); 469 323 goto err_free_shm_pages; 324 + } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) { 325 + /* 326 + * If we only got a few pages, update to release the 327 + * correct amount below. 328 + */ 329 + shm->num_pages = len / PAGE_SIZE; 330 + ret = ERR_PTR(-ENOMEM); 331 + goto err_put_shm_pages; 470 332 } 471 333 472 334 /* ··· 597 443 598 444 /* Refuse sharing shared memory provided by application */ 599 445 if (shm->flags & TEE_SHM_USER_MAPPED) 446 + return -EINVAL; 447 + /* Refuse sharing registered DMA_bufs with the application */ 448 + if (shm->flags & TEE_SHM_DMA_BUF) 600 449 return -EINVAL; 601 450 602 451 /* check for overflowing the buffer's size */
+1 -1
drivers/tty/serial/Kconfig
··· 128 128 config SERIAL_ATMEL 129 129 bool "AT91 on-chip serial port support" 130 130 depends on COMMON_CLK 131 - depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST 131 + depends on ARCH_MICROCHIP || COMPILE_TEST 132 132 select SERIAL_CORE 133 133 select SERIAL_MCTRL_GPIO if GPIOLIB 134 134 select MFD_AT91_USART
+7 -1
drivers/tty/serial/qcom_geni_serial.c
··· 1200 1200 int ret; 1201 1201 1202 1202 proto = geni_se_read_proto(&port->se); 1203 - if (proto != GENI_SE_UART) { 1203 + if (proto == GENI_SE_INVALID_PROTO) { 1204 + ret = geni_load_se_firmware(&port->se, GENI_SE_UART); 1205 + if (ret) { 1206 + dev_err(uport->dev, "UART firmware load failed ret: %d\n", ret); 1207 + return ret; 1208 + } 1209 + } else if (proto != GENI_SE_UART) { 1204 1210 dev_err(uport->dev, "Invalid FW loaded, proto: %d\n", proto); 1205 1211 return -ENXIO; 1206 1212 }
+4
include/dt-bindings/clock/raspberrypi,rp1-clocks.h
··· 58 58 #define RP1_PLL_VIDEO_PRI_PH 43 59 59 #define RP1_PLL_AUDIO_TERN 44 60 60 61 + /* MIPI clocks managed by the DSI driver */ 62 + #define RP1_CLK_MIPI0_DSI_BYTECLOCK 45 63 + #define RP1_CLK_MIPI1_DSI_BYTECLOCK 46 64 + 61 65 #endif
+74
include/dt-bindings/memory/tegra210-mc.h
··· 75 75 #define TEGRA210_MC_RESET_ETR 28 76 76 #define TEGRA210_MC_RESET_TSECB 29 77 77 78 + #define TEGRA210_MC_PTCR 0 79 + #define TEGRA210_MC_DISPLAY0A 1 80 + #define TEGRA210_MC_DISPLAY0AB 2 81 + #define TEGRA210_MC_DISPLAY0B 3 82 + #define TEGRA210_MC_DISPLAY0BB 4 83 + #define TEGRA210_MC_DISPLAY0C 5 84 + #define TEGRA210_MC_DISPLAY0CB 6 85 + #define TEGRA210_MC_AFIR 14 86 + #define TEGRA210_MC_AVPCARM7R 15 87 + #define TEGRA210_MC_DISPLAYHC 16 88 + #define TEGRA210_MC_DISPLAYHCB 17 89 + #define TEGRA210_MC_HDAR 21 90 + #define TEGRA210_MC_HOST1XDMAR 22 91 + #define TEGRA210_MC_HOST1XR 23 92 + #define TEGRA210_MC_NVENCSRD 28 93 + #define TEGRA210_MC_PPCSAHBDMAR 29 94 + #define TEGRA210_MC_PPCSAHBSLVR 30 95 + #define TEGRA210_MC_SATAR 31 96 + #define TEGRA210_MC_MPCORER 39 97 + #define TEGRA210_MC_NVENCSWR 43 98 + #define TEGRA210_MC_AFIW 49 99 + #define TEGRA210_MC_AVPCARM7W 50 100 + #define TEGRA210_MC_HDAW 53 101 + #define TEGRA210_MC_HOST1XW 54 102 + #define TEGRA210_MC_MPCOREW 57 103 + #define TEGRA210_MC_PPCSAHBDMAW 59 104 + #define TEGRA210_MC_PPCSAHBSLVW 60 105 + #define TEGRA210_MC_SATAW 61 106 + #define TEGRA210_MC_ISPRA 68 107 + #define TEGRA210_MC_ISPWA 70 108 + #define TEGRA210_MC_ISPWB 71 109 + #define TEGRA210_MC_XUSB_HOSTR 74 110 + #define TEGRA210_MC_XUSB_HOSTW 75 111 + #define TEGRA210_MC_XUSB_DEVR 76 112 + #define TEGRA210_MC_XUSB_DEVW 77 113 + #define TEGRA210_MC_ISPRAB 78 114 + #define TEGRA210_MC_ISPWAB 80 115 + #define TEGRA210_MC_ISPWBB 81 116 + #define TEGRA210_MC_TSECSRD 84 117 + #define TEGRA210_MC_TSECSWR 85 118 + #define TEGRA210_MC_A9AVPSCR 86 119 + #define TEGRA210_MC_A9AVPSCW 87 120 + #define TEGRA210_MC_GPUSRD 88 121 + #define TEGRA210_MC_GPUSWR 89 122 + #define TEGRA210_MC_DISPLAYT 90 123 + #define TEGRA210_MC_SDMMCRA 96 124 + #define TEGRA210_MC_SDMMCRAA 97 125 + #define TEGRA210_MC_SDMMCR 98 126 + #define TEGRA210_MC_SDMMCRAB 99 127 + #define TEGRA210_MC_SDMMCWA 100 128 + #define TEGRA210_MC_SDMMCWAA 101 129 + #define TEGRA210_MC_SDMMCW 102 130 + #define TEGRA210_MC_SDMMCWAB 103 131 + #define TEGRA210_MC_VICSRD 108 132 + #define TEGRA210_MC_VICSWR 109 133 + #define TEGRA210_MC_VIW 114 134 + #define TEGRA210_MC_DISPLAYD 115 135 + #define TEGRA210_MC_NVDECSRD 120 136 + #define TEGRA210_MC_NVDECSWR 121 137 + #define TEGRA210_MC_APER 122 138 + #define TEGRA210_MC_APEW 123 139 + #define TEGRA210_MC_NVJPGRD 126 140 + #define TEGRA210_MC_NVJPGWR 127 141 + #define TEGRA210_MC_SESRD 128 142 + #define TEGRA210_MC_SESWR 129 143 + #define TEGRA210_MC_AXIAPR 130 144 + #define TEGRA210_MC_AXIAPW 131 145 + #define TEGRA210_MC_ETRR 132 146 + #define TEGRA210_MC_ETRW 133 147 + #define TEGRA210_MC_TSECSRDB 134 148 + #define TEGRA210_MC_TSECSWRB 135 149 + #define TEGRA210_MC_GPUSRD2 136 150 + #define TEGRA210_MC_GPUSWR2 137 151 + 78 152 #endif
+7
include/dt-bindings/reset/thead,th1520-reset.h
··· 12 12 #define TH1520_RESET_ID_NPU 2 13 13 #define TH1520_RESET_ID_WDT0 3 14 14 #define TH1520_RESET_ID_WDT1 4 15 + #define TH1520_RESET_ID_DPU_AHB 5 16 + #define TH1520_RESET_ID_DPU_AXI 6 17 + #define TH1520_RESET_ID_DPU_CORE 7 18 + #define TH1520_RESET_ID_DSI0_APB 8 19 + #define TH1520_RESET_ID_DSI1_APB 9 20 + #define TH1520_RESET_ID_HDMI 10 21 + #define TH1520_RESET_ID_HDMI_APB 11 15 22 16 23 #endif /* _DT_BINDINGS_TH1520_RESET_H */
+6
include/linux/firmware/qcom/qcom_scm.h
··· 175 175 176 176 #endif /* CONFIG_QCOM_QSEECOM */ 177 177 178 + int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size, 179 + phys_addr_t outbuf, size_t outbuf_size, 180 + u64 *result, u64 *response_type); 181 + int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size, 182 + u64 *result, u64 *response_type); 183 + 178 184 #endif
+15
include/linux/firmware/qcom/qcom_tzmem.h
··· 53 53 54 54 phys_addr_t qcom_tzmem_to_phys(void *ptr); 55 55 56 + #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE) 57 + int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle); 58 + void qcom_tzmem_shm_bridge_delete(u64 handle); 59 + #else 60 + static inline int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, 61 + size_t size, u64 *handle) 62 + { 63 + return 0; 64 + } 65 + 66 + static inline void qcom_tzmem_shm_bridge_delete(u64 handle) 67 + { 68 + } 69 + #endif 70 + 56 71 #endif /* __QCOM_TZMEM */
+1 -1
include/linux/scmi_protocol.h
··· 153 153 * for a given device 154 154 * @fast_switch_rate_limit: gets the minimum time (us) required between 155 155 * successive fast_switching requests 156 - * @power_scale_mw_get: indicates if the power values provided are in milliWatts 156 + * @power_scale_get: indicates if the power values provided are in milliWatts 157 157 * or in some other (abstract) scale 158 158 */ 159 159 struct scmi_perf_proto_ops {
+4
include/linux/soc/qcom/geni-se.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 3 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. 4 + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 4 5 */ 5 6 6 7 #ifndef _LINUX_QCOM_GENI_SE ··· 37 36 GENI_SE_I2C, 38 37 GENI_SE_I3C, 39 38 GENI_SE_SPI_SLAVE, 39 + GENI_SE_INVALID_PROTO = 255, 40 40 }; 41 41 42 42 struct geni_wrapper; ··· 533 531 int geni_icc_enable(struct geni_se *se); 534 532 535 533 int geni_icc_disable(struct geni_se *se); 534 + 535 + int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol); 536 536 #endif 537 537 #endif
+3 -4
include/linux/soc/qcom/mdt_loader.h
··· 24 24 phys_addr_t *reloc_base); 25 25 26 26 int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, 27 - const char *fw_name, int pas_id, void *mem_region, 27 + const char *fw_name, void *mem_region, 28 28 phys_addr_t mem_phys, size_t mem_size, 29 29 phys_addr_t *reloc_base); 30 30 void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, ··· 54 54 55 55 static inline int qcom_mdt_load_no_init(struct device *dev, 56 56 const struct firmware *fw, 57 - const char *fw_name, int pas_id, 58 - void *mem_region, phys_addr_t mem_phys, 59 - size_t mem_size, 57 + const char *fw_name, void *mem_region, 58 + phys_addr_t mem_phys, size_t mem_size, 60 59 phys_addr_t *reloc_base) 61 60 { 62 61 return -ENODEV;
+111 -2
include/linux/tee_core.h
··· 8 8 9 9 #include <linux/cdev.h> 10 10 #include <linux/device.h> 11 + #include <linux/dma-buf.h> 11 12 #include <linux/idr.h> 12 13 #include <linux/kref.h> 13 14 #include <linux/list.h> 15 + #include <linux/scatterlist.h> 14 16 #include <linux/tee.h> 15 17 #include <linux/tee_drv.h> 16 18 #include <linux/types.h> ··· 28 26 #define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ 29 27 #define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ 30 28 #define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ 29 + #define TEE_SHM_DMA_BUF BIT(4) /* Memory with dma-buf handle */ 30 + #define TEE_SHM_DMA_MEM BIT(5) /* Memory allocated with */ 31 + /* dma_alloc_pages() */ 31 32 32 33 #define TEE_DEVICE_FLAG_REGISTERED 0x1 33 34 #define TEE_MAX_DEV_NAME_LEN 32 35 + 36 + enum tee_dma_heap_id { 37 + TEE_DMA_HEAP_SECURE_VIDEO_PLAY = 1, 38 + TEE_DMA_HEAP_TRUSTED_UI, 39 + TEE_DMA_HEAP_SECURE_VIDEO_RECORD, 40 + }; 34 41 35 42 /** 36 43 * struct tee_device - TEE Device representation ··· 76 65 /** 77 66 * struct tee_driver_ops - driver operations vtable 78 67 * @get_version: returns version of driver 79 - * @open: called when the device file is opened 80 - * @release: release this open file 68 + * @open: called for a context when the device file is opened 69 + * @close_context: called when the device file is closed 70 + * @release: called to release the context 81 71 * @open_session: open a new session 82 72 * @close_session: close a session 83 73 * @system_session: declare session as a system session 84 74 * @invoke_func: invoke a trusted function 75 + * @object_invoke_func: invoke a TEE object 85 76 * @cancel_req: request cancel of an ongoing invoke or open 86 77 * @supp_recv: called for supplicant to get a command 87 78 * @supp_send: called for supplicant to send a response 88 79 * @shm_register: register shared memory buffer in TEE 89 80 * @shm_unregister: unregister shared memory buffer in TEE 81 + * 82 + * The context given to @open might last longer than the device file if it is 83 + * tied to other resources in the TEE driver. @close_context is called when the 84 + * client closes the device file, even if there are existing references to the 85 + * context. The TEE driver can use @close_context to start cleaning up. 90 86 */ 91 87 struct tee_driver_ops { 92 88 void (*get_version)(struct tee_device *teedev, 93 89 struct tee_ioctl_version_data *vers); 94 90 int (*open)(struct tee_context *ctx); 91 + void (*close_context)(struct tee_context *ctx); 95 92 void (*release)(struct tee_context *ctx); 96 93 int (*open_session)(struct tee_context *ctx, 97 94 struct tee_ioctl_open_session_arg *arg, ··· 109 90 int (*invoke_func)(struct tee_context *ctx, 110 91 struct tee_ioctl_invoke_arg *arg, 111 92 struct tee_param *param); 93 + int (*object_invoke_func)(struct tee_context *ctx, 94 + struct tee_ioctl_object_invoke_arg *arg, 95 + struct tee_param *param); 112 96 int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); 113 97 int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, 114 98 struct tee_param *param); ··· 136 114 const struct tee_driver_ops *ops; 137 115 struct module *owner; 138 116 u32 flags; 117 + }; 118 + 119 + /** 120 + * struct tee_protmem_pool - protected memory pool 121 + * @ops: operations 122 + * 123 + * This is an abstract interface where this struct is expected to be 124 + * embedded in another struct specific to the implementation. 125 + */ 126 + struct tee_protmem_pool { 127 + const struct tee_protmem_pool_ops *ops; 128 + }; 129 + 130 + /** 131 + * struct tee_protmem_pool_ops - protected memory pool operations 132 + * @alloc: called when allocating protected memory 133 + * @free: called when freeing protected memory 134 + * @update_shm: called when registering a dma-buf to update the @shm 135 + * with physical address of the buffer or to return the 136 + * @parent_shm of the memory pool 137 + * @destroy_pool: called when destroying the pool 138 + */ 139 + struct tee_protmem_pool_ops { 140 + int (*alloc)(struct tee_protmem_pool *pool, struct sg_table *sgt, 141 + size_t size, size_t *offs); 142 + void (*free)(struct tee_protmem_pool *pool, struct sg_table *sgt); 143 + int (*update_shm)(struct tee_protmem_pool *pool, struct sg_table *sgt, 144 + size_t offs, struct tee_shm *shm, 145 + struct tee_shm **parent_shm); 146 + void (*destroy_pool)(struct tee_protmem_pool *pool); 139 147 }; 140 148 141 149 /** ··· 205 153 * @teedev is NULL. 206 154 */ 207 155 void tee_device_unregister(struct tee_device *teedev); 156 + 157 + int tee_device_register_dma_heap(struct tee_device *teedev, 158 + enum tee_dma_heap_id id, 159 + struct tee_protmem_pool *pool); 160 + void tee_device_put_all_dma_heaps(struct tee_device *teedev); 161 + 162 + /** 163 + * tee_device_get() - Increment the user count for a tee_device 164 + * @teedev: Pointer to the tee_device 165 + * 166 + * If tee_device_unregister() has been called and the final user of @teedev 167 + * has already released the device, this function will fail to prevent new users 168 + * from accessing the device during the unregistration process. 169 + * 170 + * Returns: true if @teedev remains valid, otherwise false 171 + */ 172 + bool tee_device_get(struct tee_device *teedev); 173 + 174 + /** 175 + * tee_device_put() - Decrease the user count for a tee_device 176 + * @teedev: pointer to the tee_device 177 + */ 178 + void tee_device_put(struct tee_device *teedev); 208 179 209 180 /** 210 181 * tee_device_set_dev_groups() - Set device attribute groups ··· 305 230 } 306 231 307 232 /** 233 + * tee_protmem_static_pool_alloc() - Create a protected memory manager 234 + * @paddr: Physical address of start of pool 235 + * @size: Size in bytes of the pool 236 + * 237 + * @returns pointer to a 'struct tee_protmem_pool' or an ERR_PTR on failure. 238 + */ 239 + struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr, 240 + size_t size); 241 + 242 + /** 308 243 * tee_get_drvdata() - Return driver_data pointer 309 244 * @returns the driver_data pointer supplied to tee_register(). 310 245 */ ··· 328 243 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 329 244 */ 330 245 struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); 246 + 247 + struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx, 248 + size_t page_count); 331 249 332 250 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, 333 251 int (*shm_register)(struct tee_context *ctx, ··· 402 314 * @ctx: The struct tee_context to close 403 315 */ 404 316 void teedev_close_context(struct tee_context *ctx); 317 + 318 + /** 319 + * teedev_ctx_get() - Increment the reference count of a context 320 + * @ctx: Pointer to the context 321 + * 322 + * This function increases the refcount of the context, which is tied to 323 + * resources shared by the same tee_device. During the unregistration process, 324 + * the context may remain valid even after tee_device_unregister() has returned. 325 + * 326 + * Users should ensure that the context's refcount is properly decreased before 327 + * calling tee_device_put(), typically within the context's release() function. 328 + * Alternatively, users can call tee_device_get() and teedev_ctx_get() together 329 + * and release them simultaneously (see shm_alloc_helper()). 330 + */ 331 + void teedev_ctx_get(struct tee_context *ctx); 332 + 333 + /** 334 + * teedev_ctx_put() - Decrease reference count on a context 335 + * @ctx: pointer to the context 336 + */ 337 + void teedev_ctx_put(struct tee_context *ctx); 405 338 406 339 #endif /*__TEE_CORE_H*/
+22
include/linux/tee_drv.h
··· 82 82 struct tee_shm *shm; 83 83 }; 84 84 85 + struct tee_param_ubuf { 86 + void __user *uaddr; 87 + size_t size; 88 + }; 89 + 90 + struct tee_param_objref { 91 + u64 id; 92 + u64 flags; 93 + }; 94 + 85 95 struct tee_param_value { 86 96 u64 a; 87 97 u64 b; ··· 102 92 u64 attr; 103 93 union { 104 94 struct tee_param_memref memref; 95 + struct tee_param_objref objref; 96 + struct tee_param_ubuf ubuf; 105 97 struct tee_param_value value; 106 98 } u; 107 99 }; ··· 127 115 */ 128 116 struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, 129 117 void *addr, size_t length); 118 + 119 + /** 120 + * tee_shm_register_fd() - Register shared memory from file descriptor 121 + * 122 + * @ctx: Context that allocates the shared memory 123 + * @fd: Shared memory file descriptor reference 124 + * 125 + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure 126 + */ 127 + struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd); 130 128 131 129 /** 132 130 * tee_shm_free() - Free shared memory
+79 -8
include/uapi/linux/tee.h
··· 42 42 #define TEE_IOC_MAGIC 0xa4 43 43 #define TEE_IOC_BASE 0 44 44 45 - #define TEE_MAX_ARG_SIZE 1024 45 + #define TEE_MAX_ARG_SIZE 4096 46 46 47 47 #define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ 48 48 #define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ 49 49 #define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */ 50 50 #define TEE_GEN_CAP_MEMREF_NULL (1 << 3)/* NULL MemRef support */ 51 + #define TEE_GEN_CAP_OBJREF (1 << 4)/* Supports generic object reference */ 51 52 52 - #define TEE_MEMREF_NULL (__u64)(-1) /* NULL MemRef Buffer */ 53 + #define TEE_MEMREF_NULL ((__u64)(-1)) /* NULL MemRef Buffer */ 54 + #define TEE_OBJREF_NULL ((__u64)(-1)) /* NULL ObjRef Object */ 53 55 54 56 /* 55 57 * TEE Implementation ID ··· 59 57 #define TEE_IMPL_ID_OPTEE 1 60 58 #define TEE_IMPL_ID_AMDTEE 2 61 59 #define TEE_IMPL_ID_TSTEE 3 60 + #define TEE_IMPL_ID_QTEE 4 62 61 63 62 /* 64 63 * OP-TEE specific capabilities ··· 155 152 #define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */ 156 153 157 154 /* 155 + * These defines userspace buffer parameters. 156 + */ 157 + #define TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT 8 158 + #define TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT 9 159 + #define TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT 10 /* input and output */ 160 + 161 + /* 162 + * These defines object reference parameters. 163 + */ 164 + #define TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT 11 165 + #define TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT 12 166 + #define TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT 13 167 + 168 + /* 158 169 * Mask for the type part of the attribute, leaves room for more types 159 170 */ 160 171 #define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff ··· 203 186 /** 204 187 * struct tee_ioctl_param - parameter 205 188 * @attr: attributes 206 - * @a: if a memref, offset into the shared memory object, else a value parameter 207 - * @b: if a memref, size of the buffer, else a value parameter 189 + * @a: if a memref, offset into the shared memory object, 190 + * else if a ubuf, address of the user buffer, 191 + * else if an objref, object identifier, else a value parameter 192 + * @b: if a memref or ubuf, size of the buffer, 193 + * else if objref, flags for the object, else a value parameter 208 194 * @c: if a memref, shared memory identifier, else a value parameter 209 195 * 210 - * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in 211 - * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and 212 - * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE 213 - * indicates that none of the members are used. 196 + * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref, ubuf, or value is 197 + * used in the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value, 198 + * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref, TEE_PARAM_ATTR_TYPE_UBUF_* 199 + * indicates ubuf, and TEE_PARAM_ATTR_TYPE_OBJREF_* indicates objref. 200 + * TEE_PARAM_ATTR_TYPE_NONE indicates that none of the members are used. 214 201 * 215 202 * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an 216 203 * identifier representing the shared memory object. A memref can reference ··· 400 379 }; 401 380 402 381 /** 382 + * struct tee_ioctl_shm_register_fd_data - Shared memory registering argument 383 + * @fd: [in] File descriptor identifying dmabuf reference 384 + * @size: [out] Size of referenced memory 385 + * @flags: [in] Flags to/from allocation. 386 + * @id: [out] Identifier of the shared memory 387 + * 388 + * The flags field should currently be zero as input. Updated by the call 389 + * with actual flags as defined by TEE_IOCTL_SHM_* above. 390 + * This structure is used as argument for TEE_IOC_SHM_REGISTER_FD below. 391 + */ 392 + struct tee_ioctl_shm_register_fd_data { 393 + __s64 fd; 394 + __u64 size; 395 + __u32 flags; 396 + __s32 id; 397 + }; 398 + 399 + /** 400 + * TEE_IOC_SHM_REGISTER_FD - register a shared memory from a file descriptor 401 + * 402 + * Returns a file descriptor on success or < 0 on failure 403 + * 404 + * The returned file descriptor refers to the shared memory object in the 405 + * kernel. The supplied file deccriptor can be closed if it's not needed 406 + * for other purposes. The shared memory is freed when the descriptor is 407 + * closed. 408 + */ 409 + #define TEE_IOC_SHM_REGISTER_FD _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 8, \ 410 + struct tee_ioctl_shm_register_fd_data) 411 + 412 + /** 403 413 * TEE_IOC_SHM_REGISTER - Register shared memory argument 404 414 * 405 415 * Registers shared memory between the user space process and secure OS. ··· 452 400 * tee_ioctl_shm_alloc_data 453 401 * munmap(): unmaps previously shared memory 454 402 */ 403 + 404 + /** 405 + * struct tee_ioctl_invoke_func_arg - Invokes an object in a Trusted Application 406 + * @id: [in] Object id 407 + * @op: [in] Object operation, specific to the object 408 + * @ret: [out] return value 409 + * @num_params: [in] number of parameters following this struct 410 + */ 411 + struct tee_ioctl_object_invoke_arg { 412 + __u64 id; 413 + __u32 op; 414 + __u32 ret; 415 + __u32 num_params; 416 + /* num_params tells the actual number of element in params */ 417 + struct tee_ioctl_param params[]; 418 + }; 419 + 420 + #define TEE_IOC_OBJECT_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 10, \ 421 + struct tee_ioctl_buf_data) 455 422 456 423 #endif /*__TEE_H*/
+1
sound/soc/apple/mca.c
··· 1191 1191 } 1192 1192 1193 1193 static const struct of_device_id apple_mca_of_match[] = { 1194 + { .compatible = "apple,t8103-mca", }, 1194 1195 { .compatible = "apple,mca", }, 1195 1196 {} 1196 1197 };