Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'spi-5.7' into spi-next

+3730 -1278
+36
Documentation/devicetree/bindings/fsi/ibm,fsi2spi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-or-later) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/fsi/ibm,fsi2spi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: IBM FSI-attached SPI controllers 8 + 9 + maintainers: 10 + - Eddie James <eajames@linux.ibm.com> 11 + 12 + description: | 13 + This binding describes an FSI CFAM engine called the FSI2SPI. Therefore this 14 + node will always be a child of an FSI CFAM node; see fsi.txt for details on 15 + FSI slave and CFAM nodes. This FSI2SPI engine provides access to a number of 16 + SPI controllers. 17 + 18 + properties: 19 + compatible: 20 + enum: 21 + - ibm,fsi2spi 22 + 23 + reg: 24 + items: 25 + - description: FSI slave address 26 + 27 + required: 28 + - compatible 29 + - reg 30 + 31 + examples: 32 + - | 33 + fsi2spi@1c00 { 34 + compatible = "ibm,fsi2spi"; 35 + reg = <0x1c00 0x400>; 36 + };
+4
Documentation/devicetree/bindings/mfd/qcom-rpm.txt
··· 61 61 "qcom,rpm-pm8901-regulators" 62 62 "qcom,rpm-pm8921-regulators" 63 63 "qcom,rpm-pm8018-regulators" 64 + "qcom,rpm-smb208-regulators" 64 65 65 66 - vdd_l0_l1_lvs-supply: 66 67 - vdd_l2_l11_l12-supply: ··· 171 170 pm8018: 172 171 s1, s2, s3, s4, s5, , l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, 173 172 l12, l14, lvs1 173 + 174 + smb208: 175 + s1a, s1b, s2a, s2b 174 176 175 177 The content of each sub-node is defined by the standard binding for regulators - 176 178 see regulator.txt - with additional custom properties described below:
+6 -9
Documentation/devicetree/bindings/mtd/mtk-quadspi.txt Documentation/devicetree/bindings/spi/spi-mtk-nor.txt
··· 1 - * Serial NOR flash controller for MediaTek SoCs 1 + * Serial NOR flash controller for MediaTek ARM SoCs 2 2 3 3 Required properties: 4 4 - compatible: For mt8173, compatible should be "mediatek,mt8173-nor", ··· 13 13 "mediatek,mt7629-nor", "mediatek,mt8173-nor" 14 14 "mediatek,mt8173-nor" 15 15 - reg: physical base address and length of the controller's register 16 + - interrupts: Interrupt number used by the controller. 16 17 - clocks: the phandle of the clocks needed by the nor controller 17 18 - clock-names: the names of the clocks 18 19 the clocks should be named "spi" and "sf". "spi" is used for spi bus, ··· 23 22 - #address-cells: should be <1> 24 23 - #size-cells: should be <0> 25 24 26 - The SPI flash must be a child of the nor_flash node and must have a 27 - compatible property. Also see jedec,spi-nor.txt. 28 - 29 - Required properties: 30 - - compatible: May include a device-specific string consisting of the manufacturer 31 - and name of the chip. Must also include "jedec,spi-nor" for any 32 - SPI NOR flash that can be identified by the JEDEC READ ID opcode (0x9F). 33 - - reg : Chip-Select number 25 + There should be only one spi slave device following generic spi bindings. 26 + It's not recommended to use this controller for devices other than SPI NOR 27 + flash due to limited transfer capability of this controller. 34 28 35 29 Example: 36 30 37 31 nor_flash: spi@1100d000 { 38 32 compatible = "mediatek,mt8173-nor"; 39 33 reg = <0 0x1100d000 0 0xe0>; 34 + interrupts = <&spi_flash_irq>; 40 35 clocks = <&pericfg CLK_PERI_SPI>, 41 36 <&topckgen CLK_TOP_SPINFI_IFR_SEL>; 42 37 clock-names = "spi", "sf";
+78
Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/regulator/mps,mp5416.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Monolithic Power System MP5416 PMIC 8 + 9 + maintainers: 10 + - Saravanan Sekar <sravanhome@gmail.com> 11 + 12 + properties: 13 + $nodename: 14 + pattern: "^pmic@[0-9a-f]{1,2}$" 15 + compatible: 16 + enum: 17 + - mps,mp5416 18 + 19 + reg: 20 + maxItems: 1 21 + 22 + regulators: 23 + type: object 24 + description: | 25 + list of regulators provided by this controller, must be named 26 + after their hardware counterparts BUCK[1-4] and LDO[1-4] 27 + 28 + patternProperties: 29 + "^buck[1-4]$": 30 + allOf: 31 + - $ref: "regulator.yaml#" 32 + type: object 33 + 34 + "^ldo[1-4]$": 35 + allOf: 36 + - $ref: "regulator.yaml#" 37 + type: object 38 + 39 + additionalProperties: false 40 + additionalProperties: false 41 + 42 + required: 43 + - compatible 44 + - reg 45 + - regulators 46 + 47 + additionalProperties: false 48 + 49 + examples: 50 + - | 51 + i2c { 52 + #address-cells = <1>; 53 + #size-cells = <0>; 54 + 55 + pmic@69 { 56 + compatible = "mps,mp5416"; 57 + reg = <0x69>; 58 + 59 + regulators { 60 + 61 + buck1 { 62 + regulator-name = "buck1"; 63 + regulator-min-microvolt = <600000>; 64 + regulator-max-microvolt = <2187500>; 65 + regulator-min-microamp = <3800000>; 66 + regulator-max-microamp = <6800000>; 67 + regulator-boot-on; 68 + }; 69 + 70 + ldo2 { 71 + regulator-name = "ldo2"; 72 + regulator-min-microvolt = <800000>; 73 + regulator-max-microvolt = <3975000>; 74 + }; 75 + }; 76 + }; 77 + }; 78 + ...
+22
Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
··· 22 22 enum: 23 23 - amlogic,meson-gx-spicc # SPICC controller on Amlogic GX and compatible SoCs 24 24 - amlogic,meson-axg-spicc # SPICC controller on Amlogic AXG and compatible SoCs 25 + - amlogic,meson-g12a-spicc # SPICC controller on Amlogic G12A and compatible SoCs 25 26 26 27 interrupts: 27 28 maxItems: 1 ··· 40 39 description: input clock for the baud rate generator 41 40 items: 42 41 - const: core 42 + 43 + if: 44 + properties: 45 + compatible: 46 + contains: 47 + enum: 48 + - amlogic,meson-g12a-spicc 49 + 50 + then: 51 + properties: 52 + clocks: 53 + contains: 54 + items: 55 + - description: controller register bus clock 56 + - description: baud rate generator and delay control clock 57 + 58 + clock-names: 59 + minItems: 2 60 + items: 61 + - const: core 62 + - const: pclk 43 63 44 64 required: 45 65 - compatible
+4 -1
Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
··· 10 10 - "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35 11 11 - "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51 12 12 - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc 13 - - "fsl,imx8mq-ecspi" for SPI compatible with the one integrated on i.MX8M 13 + - "fsl,imx8mq-ecspi" for SPI compatible with the one integrated on i.MX8MQ 14 + - "fsl,imx8mm-ecspi" for SPI compatible with the one integrated on i.MX8MM 15 + - "fsl,imx8mn-ecspi" for SPI compatible with the one integrated on i.MX8MN 16 + - "fsl,imx8mp-ecspi" for SPI compatible with the one integrated on i.MX8MP 14 17 - reg : Offset and length of the register set for the device 15 18 - interrupts : Should contain CSPI/eCSPI interrupt 16 19 - clocks : Clock specifiers for both ipg and per clocks.
+41
Documentation/devicetree/bindings/spi/qca,ar934x-spi.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/spi/qca,ar934x-spi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Qualcomm Atheros AR934x/QCA95xx SoC SPI controller 8 + 9 + maintainers: 10 + - Chuanhong Guo <gch981213@gmail.com> 11 + 12 + allOf: 13 + - $ref: spi-controller.yaml# 14 + 15 + properties: 16 + compatible: 17 + const: qca,ar934x-spi 18 + 19 + reg: 20 + maxItems: 1 21 + 22 + clocks: 23 + maxItems: 1 24 + 25 + required: 26 + - compatible 27 + - reg 28 + - clocks 29 + - '#address-cells' 30 + - '#size-cells' 31 + 32 + examples: 33 + - | 34 + #include <dt-bindings/clock/ath79-clk.h> 35 + spi: spi@1f000000 { 36 + compatible = "qca,ar934x-spi"; 37 + reg = <0x1f000000 0x1c>; 38 + clocks = <&pll ATH79_CLK_AHB>; 39 + #address-cells = <1>; 40 + #size-cells = <0>; 41 + };
+8 -2
Documentation/devicetree/bindings/spi/spi-controller.yaml
··· 52 52 description: 53 53 The SPI controller acts as a slave, instead of a master. 54 54 55 + oneOf: 56 + - required: 57 + - "#address-cells" 58 + - required: 59 + - spi-slave 60 + 55 61 patternProperties: 56 62 "^slave$": 57 63 type: object ··· 120 114 - enum: [ 1, 2, 4, 8 ] 121 115 - default: 1 122 116 description: 123 - Bus width to the SPI bus used for MISO. 117 + Bus width to the SPI bus used for read transfers. 124 118 125 119 spi-rx-delay-us: 126 120 description: ··· 132 126 - enum: [ 1, 2, 4, 8 ] 133 127 - default: 1 134 128 description: 135 - Bus width to the SPI bus used for MOSI. 129 + Bus width to the SPI bus used for write transfers. 136 130 137 131 spi-tx-delay-us: 138 132 description:
+12 -7
Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
··· 1 1 ARM Freescale DSPI controller 2 2 3 3 Required properties: 4 - - compatible : "fsl,vf610-dspi", "fsl,ls1021a-v1.0-dspi", 5 - "fsl,ls2085a-dspi" 6 - or 7 - "fsl,ls2080a-dspi" followed by "fsl,ls2085a-dspi" 8 - "fsl,ls1012a-dspi" followed by "fsl,ls1021a-v1.0-dspi" 9 - "fsl,ls1088a-dspi" followed by "fsl,ls1021a-v1.0-dspi" 4 + - compatible : must be one of: 5 + "fsl,vf610-dspi", 6 + "fsl,ls1021a-v1.0-dspi", 7 + "fsl,ls1012a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"), 8 + "fsl,ls1028a-dspi", 9 + "fsl,ls1043a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"), 10 + "fsl,ls1046a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"), 11 + "fsl,ls1088a-dspi" (optionally followed by "fsl,ls1021a-v1.0-dspi"), 12 + "fsl,ls2080a-dspi" (optionally followed by "fsl,ls2085a-dspi"), 13 + "fsl,ls2085a-dspi", 14 + "fsl,lx2160a-dspi", 10 15 - reg : Offset and length of the register set for the device 11 16 - interrupts : Should contain SPI controller interrupt 12 17 - clocks: from common clock binding: handle to dspi clock. ··· 19 14 - pinctrl-0: pin control group to be used for this controller. 20 15 - pinctrl-names: must contain a "default" entry. 21 16 - spi-num-chipselects : the number of the chipselect signals. 22 - - bus-num : the slave chip chipselect signal number. 23 17 24 18 Optional property: 25 19 - big-endian: If present the dspi device's registers are implemented 26 20 in big endian mode. 21 + - bus-num : the slave chip chipselect signal number. 27 22 28 23 Optional SPI slave node properties: 29 24 - fsl,spi-cs-sck-delay: a delay in nanoseconds between activating chip
+89
Documentation/devicetree/bindings/spi/spi-mux.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/spi/spi-mux.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Generic SPI Multiplexer 8 + 9 + description: | 10 + This binding describes a SPI bus multiplexer to route the SPI chip select 11 + signals. This can be used when you need more devices than the SPI controller 12 + has chip selects available. An example setup is shown in ASCII art; the actual 13 + setting of the multiplexer to a channel needs to be done by a specific SPI mux 14 + driver. 15 + 16 + MOSI /--------------------------------+--------+--------+--------\ 17 + MISO |/------------------------------+|-------+|-------+|-------\| 18 + SCL ||/----------------------------+||------+||------+||------\|| 19 + ||| ||| ||| ||| ||| 20 + +------------+ ||| ||| ||| ||| 21 + | SoC ||| | +-+++-+ +-+++-+ +-+++-+ +-+++-+ 22 + | ||| | | dev | | dev | | dev | | dev | 23 + | +--+++-+ | CS-X +------+\ +--+--+ +--+--+ +--+--+ +--+--+ 24 + | | SPI +-|-------+ Mux |\\ CS-0 | | | | 25 + | +------+ | +--+---+\\\-------/ CS-1 | | | 26 + | | | \\\----------------/ CS-2 | | 27 + | +------+ | | \\-------------------------/ CS-3 | 28 + | | ? +-|----------/ \----------------------------------/ 29 + | +------+ | 30 + +------------+ 31 + 32 + allOf: 33 + - $ref: "/schemas/spi/spi-controller.yaml#" 34 + 35 + maintainers: 36 + - Chris Packham <chris.packham@alliedtelesis.co.nz> 37 + 38 + properties: 39 + compatible: 40 + const: spi-mux 41 + 42 + mux-controls: 43 + maxItems: 1 44 + 45 + required: 46 + - compatible 47 + - reg 48 + - spi-max-frequency 49 + - mux-controls 50 + 51 + examples: 52 + - | 53 + #include <dt-bindings/gpio/gpio.h> 54 + mux: mux-controller { 55 + compatible = "gpio-mux"; 56 + #mux-control-cells = <0>; 57 + 58 + mux-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>; 59 + }; 60 + 61 + spi { 62 + #address-cells = <1>; 63 + #size-cells = <0>; 64 + spi@0 { 65 + compatible = "spi-mux"; 66 + reg = <0>; 67 + #address-cells = <1>; 68 + #size-cells = <0>; 69 + spi-max-frequency = <100000000>; 70 + 71 + mux-controls = <&mux>; 72 + 73 + spi-flash@0 { 74 + compatible = "jedec,spi-nor"; 75 + reg = <0>; 76 + #address-cells = <1>; 77 + #size-cells = <0>; 78 + spi-max-frequency = <40000000>; 79 + }; 80 + 81 + spi-device@1 { 82 + compatible = "lineartechnology,ltc2488"; 83 + reg = <1>; 84 + #address-cells = <1>; 85 + #size-cells = <0>; 86 + spi-max-frequency = <10000000>; 87 + }; 88 + }; 89 + };
+3
Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
··· 2 2 3 3 Required properties: 4 4 - compatible : Should be "nxp,lx2160a-fspi" 5 + "nxp,imx8qxp-fspi" 6 + "nxp,imx8mm-fspi" 7 + 5 8 - reg : First contains the register location and length, 6 9 Second contains the memory mapping address and length 7 10 - reg-names : Should contain the resource reg names:
-58
Documentation/devicetree/bindings/spi/spi-rockchip.txt
··· 1 - * Rockchip SPI Controller 2 - 3 - The Rockchip SPI controller is used to interface with various devices such as flash 4 - and display controllers using the SPI communication interface. 5 - 6 - Required Properties: 7 - 8 - - compatible: should be one of the following. 9 - "rockchip,rv1108-spi" for rv1108 SoCs. 10 - "rockchip,px30-spi", "rockchip,rk3066-spi" for px30 SoCs. 11 - "rockchip,rk3036-spi" for rk3036 SoCS. 12 - "rockchip,rk3066-spi" for rk3066 SoCs. 13 - "rockchip,rk3188-spi" for rk3188 SoCs. 14 - "rockchip,rk3228-spi" for rk3228 SoCS. 15 - "rockchip,rk3288-spi" for rk3288 SoCs. 16 - "rockchip,rk3368-spi" for rk3368 SoCs. 17 - "rockchip,rk3399-spi" for rk3399 SoCs. 18 - - reg: physical base address of the controller and length of memory mapped 19 - region. 20 - - interrupts: The interrupt number to the cpu. The interrupt specifier format 21 - depends on the interrupt controller. 22 - - clocks: Must contain an entry for each entry in clock-names. 23 - - clock-names: Shall be "spiclk" for the transfer-clock, and "apb_pclk" for 24 - the peripheral clock. 25 - - #address-cells: should be 1. 26 - - #size-cells: should be 0. 27 - 28 - Optional Properties: 29 - 30 - - dmas: DMA specifiers for tx and rx dma. See the DMA client binding, 31 - Documentation/devicetree/bindings/dma/dma.txt 32 - - dma-names: DMA request names should include "tx" and "rx" if present. 33 - - rx-sample-delay-ns: nanoseconds to delay after the SCLK edge before sampling 34 - Rx data (may need to be fine tuned for high capacitance lines). 35 - No delay (0) by default. 36 - - pinctrl-names: Names for the pin configuration(s); may be "default" or 37 - "sleep", where the "sleep" configuration may describe the state 38 - the pins should be in during system suspend. See also 39 - pinctrl/pinctrl-bindings.txt. 40 - 41 - 42 - Example: 43 - 44 - spi0: spi@ff110000 { 45 - compatible = "rockchip,rk3066-spi"; 46 - reg = <0xff110000 0x1000>; 47 - dmas = <&pdma1 11>, <&pdma1 12>; 48 - dma-names = "tx", "rx"; 49 - rx-sample-delay-ns = <10>; 50 - #address-cells = <1>; 51 - #size-cells = <0>; 52 - interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; 53 - clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>; 54 - clock-names = "spiclk", "apb_pclk"; 55 - pinctrl-0 = <&spi1_pins>; 56 - pinctrl-1 = <&spi1_sleep>; 57 - pinctrl-names = "default", "sleep"; 58 - };
+107
Documentation/devicetree/bindings/spi/spi-rockchip.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/spi/spi-rockchip.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Rockchip SPI Controller 8 + 9 + description: 10 + The Rockchip SPI controller is used to interface with various devices such 11 + as flash and display controllers using the SPI communication interface. 12 + 13 + allOf: 14 + - $ref: "spi-controller.yaml#" 15 + 16 + maintainers: 17 + - Heiko Stuebner <heiko@sntech.de> 18 + 19 + # Everything else is described in the common file 20 + properties: 21 + compatible: 22 + oneOf: 23 + - const: rockchip,rk3036-spi 24 + - const: rockchip,rk3066-spi 25 + - const: rockchip,rk3228-spi 26 + - const: rockchip,rv1108-spi 27 + - items: 28 + - enum: 29 + - rockchip,px30-spi 30 + - rockchip,rk3188-spi 31 + - rockchip,rk3288-spi 32 + - rockchip,rk3308-spi 33 + - rockchip,rk3328-spi 34 + - rockchip,rk3368-spi 35 + - rockchip,rk3399-spi 36 + - const: rockchip,rk3066-spi 37 + 38 + reg: 39 + maxItems: 1 40 + 41 + interrupts: 42 + maxItems: 1 43 + 44 + clocks: 45 + items: 46 + - description: transfer-clock 47 + - description: peripheral clock 48 + 49 + clock-names: 50 + items: 51 + - const: spiclk 52 + - const: apb_pclk 53 + 54 + dmas: 55 + items: 56 + - description: TX DMA Channel 57 + - description: RX DMA Channel 58 + 59 + dma-names: 60 + items: 61 + - const: tx 62 + - const: rx 63 + 64 + rx-sample-delay-ns: 65 + default: 0 66 + description: 67 + Nano seconds to delay after the SCLK edge before sampling Rx data 68 + (may need to be fine tuned for high capacitance lines). 69 + If not specified 0 will be used. 70 + 71 + pinctrl-names: 72 + minItems: 1 73 + items: 74 + - const: default 75 + - const: sleep 76 + description: 77 + Names for the pin configuration(s); may be "default" or "sleep", 78 + where the "sleep" configuration may describe the state 79 + the pins should be in during system suspend. 80 + 81 + required: 82 + - compatible 83 + - reg 84 + - interrupts 85 + - clocks 86 + - clock-names 87 + 88 + examples: 89 + - | 90 + #include <dt-bindings/clock/rk3188-cru-common.h> 91 + #include <dt-bindings/interrupt-controller/arm-gic.h> 92 + #include <dt-bindings/interrupt-controller/irq.h> 93 + spi0: spi@ff110000 { 94 + compatible = "rockchip,rk3066-spi"; 95 + reg = <0xff110000 0x1000>; 96 + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; 97 + clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>; 98 + clock-names = "spiclk", "apb_pclk"; 99 + dmas = <&pdma1 11>, <&pdma1 12>; 100 + dma-names = "tx", "rx"; 101 + pinctrl-0 = <&spi1_pins>; 102 + pinctrl-1 = <&spi1_sleep>; 103 + pinctrl-names = "default", "sleep"; 104 + rx-sample-delay-ns = <10>; 105 + #address-cells = <1>; 106 + #size-cells = <0>; 107 + };
+10 -1
MAINTAINERS
··· 2276 2276 S: Maintained 2277 2277 F: Documentation/devicetree/bindings/i2c/i2c-rk3x.txt 2278 2278 F: Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml 2279 + F: Documentation/devicetree/bindings/spi/spi-rockchip.yaml 2279 2280 F: arch/arm/boot/dts/rk3* 2280 2281 F: arch/arm/boot/dts/rv1108* 2281 2282 F: arch/arm/mach-rockchip/ ··· 6859 6858 F: drivers/i2c/busses/i2c-fsi.c 6860 6859 F: Documentation/devicetree/bindings/i2c/i2c-fsi.txt 6861 6860 6861 + FSI-ATTACHED SPI DRIVER 6862 + M: Eddie James <eajames@linux.ibm.com> 6863 + L: linux-spi@vger.kernel.org 6864 + S: Maintained 6865 + F: drivers/spi/spi-fsi.c 6866 + F: Documentation/devicetree/bindings/fsi/ibm,fsi2spi.yaml 6867 + 6862 6868 FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE 6863 6869 M: Jan Kara <jack@suse.cz> 6864 6870 R: Amir Goldstein <amir73il@gmail.com> ··· 11264 11256 MONOLITHIC POWER SYSTEM PMIC DRIVER 11265 11257 M: Saravanan Sekar <sravanhome@gmail.com> 11266 11258 S: Maintained 11267 - F: Documentation/devicetree/bindings/regulator/mpq7920.yaml 11259 + F: Documentation/devicetree/bindings/regulator/mps,mp*.yaml 11260 + F: drivers/regulator/mp5416.c 11268 11261 F: drivers/regulator/mpq7920.c 11269 11262 F: drivers/regulator/mpq7920.h 11270 11263
-8
drivers/mtd/spi-nor/Kconfig
··· 52 52 help 53 53 This enables support for HiSilicon FMC SPI-NOR flash controller. 54 54 55 - config SPI_MTK_QUADSPI 56 - tristate "MediaTek Quad SPI controller" 57 - depends on HAS_IOMEM 58 - help 59 - This enables support for the Quad SPI controller in master mode. 60 - This controller does not support generic SPI. It only supports 61 - SPI NOR. 62 - 63 55 config SPI_NXP_SPIFI 64 56 tristate "NXP SPI Flash Interface (SPIFI)" 65 57 depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
-1
drivers/mtd/spi-nor/Makefile
··· 3 3 obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o 4 4 obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o 5 5 obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o 6 - obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o 7 6 obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o 8 7 obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o 9 8 obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
-565
drivers/mtd/spi-nor/mtk-quadspi.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Copyright (c) 2015 MediaTek Inc. 4 - * Author: Bayi Cheng <bayi.cheng@mediatek.com> 5 - */ 6 - 7 - #include <linux/clk.h> 8 - #include <linux/delay.h> 9 - #include <linux/device.h> 10 - #include <linux/init.h> 11 - #include <linux/io.h> 12 - #include <linux/iopoll.h> 13 - #include <linux/ioport.h> 14 - #include <linux/math64.h> 15 - #include <linux/module.h> 16 - #include <linux/mutex.h> 17 - #include <linux/of.h> 18 - #include <linux/of_device.h> 19 - #include <linux/platform_device.h> 20 - #include <linux/slab.h> 21 - #include <linux/mtd/mtd.h> 22 - #include <linux/mtd/partitions.h> 23 - #include <linux/mtd/spi-nor.h> 24 - 25 - #define MTK_NOR_CMD_REG 0x00 26 - #define MTK_NOR_CNT_REG 0x04 27 - #define MTK_NOR_RDSR_REG 0x08 28 - #define MTK_NOR_RDATA_REG 0x0c 29 - #define MTK_NOR_RADR0_REG 0x10 30 - #define MTK_NOR_RADR1_REG 0x14 31 - #define MTK_NOR_RADR2_REG 0x18 32 - #define MTK_NOR_WDATA_REG 0x1c 33 - #define MTK_NOR_PRGDATA0_REG 0x20 34 - #define MTK_NOR_PRGDATA1_REG 0x24 35 - #define MTK_NOR_PRGDATA2_REG 0x28 36 - #define MTK_NOR_PRGDATA3_REG 0x2c 37 - #define MTK_NOR_PRGDATA4_REG 0x30 38 - #define MTK_NOR_PRGDATA5_REG 0x34 39 - #define MTK_NOR_SHREG0_REG 0x38 40 - #define MTK_NOR_SHREG1_REG 0x3c 41 - #define MTK_NOR_SHREG2_REG 0x40 42 - #define MTK_NOR_SHREG3_REG 0x44 43 - #define MTK_NOR_SHREG4_REG 0x48 44 - #define MTK_NOR_SHREG5_REG 0x4c 45 - #define MTK_NOR_SHREG6_REG 0x50 46 - #define MTK_NOR_SHREG7_REG 0x54 47 - #define MTK_NOR_SHREG8_REG 0x58 48 - #define MTK_NOR_SHREG9_REG 0x5c 49 - #define MTK_NOR_CFG1_REG 0x60 50 - #define MTK_NOR_CFG2_REG 0x64 51 - #define MTK_NOR_CFG3_REG 0x68 52 - #define MTK_NOR_STATUS0_REG 0x70 53 - #define MTK_NOR_STATUS1_REG 0x74 54 - #define MTK_NOR_STATUS2_REG 0x78 55 - #define MTK_NOR_STATUS3_REG 0x7c 56 - #define MTK_NOR_FLHCFG_REG 0x84 57 - #define MTK_NOR_TIME_REG 0x94 58 - #define MTK_NOR_PP_DATA_REG 0x98 59 - #define MTK_NOR_PREBUF_STUS_REG 0x9c 60 - #define MTK_NOR_DELSEL0_REG 0xa0 61 - #define MTK_NOR_DELSEL1_REG 0xa4 62 - #define MTK_NOR_INTRSTUS_REG 0xa8 63 - #define MTK_NOR_INTREN_REG 0xac 64 - #define MTK_NOR_CHKSUM_CTL_REG 0xb8 65 - #define MTK_NOR_CHKSUM_REG 0xbc 66 - #define MTK_NOR_CMD2_REG 0xc0 67 - #define MTK_NOR_WRPROT_REG 0xc4 68 - #define MTK_NOR_RADR3_REG 0xc8 69 - #define MTK_NOR_DUAL_REG 0xcc 70 - #define MTK_NOR_DELSEL2_REG 0xd0 71 - #define MTK_NOR_DELSEL3_REG 0xd4 72 - #define MTK_NOR_DELSEL4_REG 0xd8 73 - 74 - /* commands for mtk nor controller */ 75 - #define MTK_NOR_READ_CMD 0x0 76 - #define MTK_NOR_RDSR_CMD 0x2 77 - #define MTK_NOR_PRG_CMD 0x4 78 - #define MTK_NOR_WR_CMD 0x10 79 - #define MTK_NOR_PIO_WR_CMD 0x90 80 - #define MTK_NOR_WRSR_CMD 0x20 81 - #define MTK_NOR_PIO_READ_CMD 0x81 82 - #define MTK_NOR_WR_BUF_ENABLE 0x1 83 - #define MTK_NOR_WR_BUF_DISABLE 0x0 84 - #define MTK_NOR_ENABLE_SF_CMD 0x30 85 - #define MTK_NOR_DUAD_ADDR_EN 0x8 86 - #define MTK_NOR_QUAD_READ_EN 0x4 87 - #define MTK_NOR_DUAL_ADDR_EN 0x2 88 - #define MTK_NOR_DUAL_READ_EN 0x1 89 - #define MTK_NOR_DUAL_DISABLE 0x0 90 - #define MTK_NOR_FAST_READ 0x1 91 - 92 - #define SFLASH_WRBUF_SIZE 128 93 - 94 - /* Can shift up to 48 bits (6 bytes) of TX/RX */ 95 - #define MTK_NOR_MAX_RX_TX_SHIFT 6 96 - /* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */ 97 - #define MTK_NOR_MAX_SHIFT 7 98 - /* nor controller 4-byte address mode enable bit */ 99 - #define MTK_NOR_4B_ADDR_EN BIT(4) 100 - 101 - /* Helpers for accessing the program data / shift data registers */ 102 - #define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n)) 103 - #define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n)) 104 - 105 - struct mtk_nor { 106 - struct spi_nor nor; 107 - struct device *dev; 108 - void __iomem *base; /* nor flash base address */ 109 - struct clk *spi_clk; 110 - struct clk *nor_clk; 111 - }; 112 - 113 - static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor) 114 - { 115 - struct spi_nor *nor = &mtk_nor->nor; 116 - 117 - switch (nor->read_proto) { 118 - case SNOR_PROTO_1_1_1: 119 - writeb(nor->read_opcode, mtk_nor->base + 120 - MTK_NOR_PRGDATA3_REG); 121 - writeb(MTK_NOR_FAST_READ, mtk_nor->base + 122 - MTK_NOR_CFG1_REG); 123 - break; 124 - case SNOR_PROTO_1_1_2: 125 - writeb(nor->read_opcode, mtk_nor->base + 126 - MTK_NOR_PRGDATA3_REG); 127 - writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base + 128 - MTK_NOR_DUAL_REG); 129 - break; 130 - case SNOR_PROTO_1_1_4: 131 - writeb(nor->read_opcode, mtk_nor->base + 132 - MTK_NOR_PRGDATA4_REG); 133 - writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base + 134 - MTK_NOR_DUAL_REG); 135 - break; 136 - default: 137 - writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base + 138 - MTK_NOR_DUAL_REG); 139 - break; 140 - } 141 - } 142 - 143 - static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval) 144 - { 145 - int reg; 146 - u8 val = cmdval & 0x1f; 147 - 148 - writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG); 149 - return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg, 150 - !(reg & val), 100, 10000); 151 - } 152 - 153 - static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op, 154 - const u8 *tx, size_t txlen, u8 *rx, size_t rxlen) 155 - { 156 - size_t len = 1 + txlen + rxlen; 157 - int i, ret, idx; 158 - 159 - if (len > MTK_NOR_MAX_SHIFT) 160 - return -EINVAL; 161 - 162 - writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG); 163 - 164 - /* start at PRGDATA5, go down to PRGDATA0 */ 165 - idx = MTK_NOR_MAX_RX_TX_SHIFT - 1; 166 - 167 - /* opcode */ 168 - writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx)); 169 - idx--; 170 - 171 - /* program TX data */ 172 - for (i = 0; i < txlen; i++, idx--) 173 - writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx)); 174 - 175 - /* clear out rest of TX registers */ 176 - while (idx >= 0) { 177 - writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx)); 178 - idx--; 179 - } 180 - 181 - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD); 182 - if (ret) 183 - return ret; 184 - 185 - /* restart at first RX byte */ 186 - idx = rxlen - 1; 187 - 188 - /* read out RX data */ 189 - for (i = 0; i < rxlen; i++, idx--) 190 - rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx)); 191 - 192 - return 0; 193 - } 194 - 195 - /* Do a WRSR (Write Status Register) command */ 196 - static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, const u8 sr) 197 - { 198 - writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG); 199 - writeb(8, mtk_nor->base + MTK_NOR_CNT_REG); 200 - return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD); 201 - } 202 - 203 - static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor) 204 - { 205 - u8 reg; 206 - 207 - /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer 208 - * 0: pre-fetch buffer use for read 209 - * 1: pre-fetch buffer use for page program 210 - */ 211 - writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG); 212 - return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg, 213 - 0x01 == (reg & 0x01), 100, 10000); 214 - } 215 - 216 - static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor) 217 - { 218 - u8 reg; 219 - 220 - writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG); 221 - return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg, 222 - MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100, 223 - 10000); 224 - } 225 - 226 - static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor) 227 - { 228 - u8 val; 229 - struct spi_nor *nor = &mtk_nor->nor; 230 - 231 - val = readb(mtk_nor->base + MTK_NOR_DUAL_REG); 232 - 233 - switch (nor->addr_width) { 234 - case 3: 235 - val &= ~MTK_NOR_4B_ADDR_EN; 236 - break; 237 - case 4: 238 - val |= MTK_NOR_4B_ADDR_EN; 239 - break; 240 - default: 241 - dev_warn(mtk_nor->dev, "Unexpected address width %u.\n", 242 - nor->addr_width); 243 - break; 244 - } 245 - 246 - writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG); 247 - } 248 - 249 - static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr) 250 - { 251 - int i; 252 - 253 - mtk_nor_set_addr_width(mtk_nor); 254 - 255 - for (i = 0; i < 3; i++) { 256 - writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4); 257 - addr >>= 8; 258 - } 259 - /* Last register is non-contiguous */ 260 - writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG); 261 - } 262 - 263 - static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length, 264 - u_char *buffer) 265 - { 266 - int i, ret; 267 - int addr = (int)from; 268 - u8 *buf = (u8 *)buffer; 269 - struct mtk_nor *mtk_nor = nor->priv; 270 - 271 - /* set mode for fast read mode ,dual mode or quad mode */ 272 - mtk_nor_set_read_mode(mtk_nor); 273 - mtk_nor_set_addr(mtk_nor, addr); 274 - 275 - for (i = 0; i < length; i++) { 276 - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD); 277 - if (ret < 0) 278 - return ret; 279 - buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG); 280 - } 281 - return length; 282 - } 283 - 284 - static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor, 285 - int addr, int length, u8 *data) 286 - { 287 - int i, ret; 288 - 289 - mtk_nor_set_addr(mtk_nor, addr); 290 - 291 - for (i = 0; i < length; i++) { 292 - writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG); 293 - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD); 294 - if (ret < 0) 295 - return ret; 296 - } 297 - return 0; 298 - } 299 - 300 - static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr, 301 - const u8 *buf) 302 - { 303 - int i, bufidx, data; 304 - 305 - mtk_nor_set_addr(mtk_nor, addr); 306 - 307 - bufidx = 0; 308 - for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) { 309 - data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 | 310 - buf[bufidx + 1]<<8 | buf[bufidx]; 311 - bufidx += 4; 312 - writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG); 313 - } 314 - return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD); 315 - } 316 - 317 - static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len, 318 - const u_char *buf) 319 - { 320 - int ret; 321 - struct mtk_nor *mtk_nor = nor->priv; 322 - size_t i; 323 - 324 - ret = mtk_nor_write_buffer_enable(mtk_nor); 325 - if (ret < 0) { 326 - dev_warn(mtk_nor->dev, "write buffer enable failed!\n"); 327 - return ret; 328 - } 329 - 330 - for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) { 331 - ret = mtk_nor_write_buffer(mtk_nor, to, buf); 332 - if (ret < 0) { 333 - dev_err(mtk_nor->dev, "write buffer failed!\n"); 334 - return ret; 335 - } 336 - to += SFLASH_WRBUF_SIZE; 337 - buf += SFLASH_WRBUF_SIZE; 338 - } 339 - ret = mtk_nor_write_buffer_disable(mtk_nor); 340 - if (ret < 0) { 341 - dev_warn(mtk_nor->dev, "write buffer disable failed!\n"); 342 - return ret; 343 - } 344 - 345 - if (i < len) { 346 - ret = mtk_nor_write_single_byte(mtk_nor, to, 347 - (int)(len - i), (u8 *)buf); 348 - if (ret < 0) { 349 - dev_err(mtk_nor->dev, "write single byte failed!\n"); 350 - return ret; 351 - } 352 - } 353 - 354 - return len; 355 - } 356 - 357 - static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len) 358 - { 359 - int ret; 360 - struct mtk_nor *mtk_nor = nor->priv; 361 - 362 - switch (opcode) { 363 - case SPINOR_OP_RDSR: 364 - ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD); 365 - if (ret < 0) 366 - return ret; 367 - if (len == 1) 368 - *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG); 369 - else 370 - dev_err(mtk_nor->dev, "len should be 1 for read status!\n"); 371 - break; 372 - default: 373 - ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len); 374 - break; 375 - } 376 - return ret; 377 - } 378 - 379 - static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, 380 - size_t len) 381 - { 382 - int ret; 383 - struct mtk_nor *mtk_nor = nor->priv; 384 - 385 - switch (opcode) { 386 - case SPINOR_OP_WRSR: 387 - /* We only handle 1 byte */ 388 - ret = mtk_nor_wr_sr(mtk_nor, *buf); 389 - break; 390 - default: 391 - ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0); 392 - if (ret) 393 - dev_warn(mtk_nor->dev, "write reg failure!\n"); 394 - break; 395 - } 396 - return ret; 397 - } 398 - 399 - static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor) 400 - { 401 - clk_disable_unprepare(mtk_nor->spi_clk); 402 - clk_disable_unprepare(mtk_nor->nor_clk); 403 - } 404 - 405 - static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor) 406 - { 407 - int ret; 408 - 409 - ret = clk_prepare_enable(mtk_nor->spi_clk); 410 - if (ret) 411 - return ret; 412 - 413 - ret = clk_prepare_enable(mtk_nor->nor_clk); 414 - if (ret) { 415 - clk_disable_unprepare(mtk_nor->spi_clk); 416 - return ret; 417 - } 418 - 419 - return 0; 420 - } 421 - 422 - static const struct spi_nor_controller_ops mtk_controller_ops = { 423 - .read_reg = mtk_nor_read_reg, 424 - .write_reg = mtk_nor_write_reg, 425 - .read = mtk_nor_read, 426 - .write = mtk_nor_write, 427 - }; 428 - 429 - static int mtk_nor_init(struct mtk_nor *mtk_nor, 430 - struct device_node *flash_node) 431 - { 432 - const struct spi_nor_hwcaps hwcaps = { 433 - .mask = SNOR_HWCAPS_READ | 434 - SNOR_HWCAPS_READ_FAST | 435 - SNOR_HWCAPS_READ_1_1_2 | 436 - SNOR_HWCAPS_PP, 437 - }; 438 - int ret; 439 - struct spi_nor *nor; 440 - 441 - /* initialize controller to accept commands */ 442 - writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG); 443 - 444 - nor = &mtk_nor->nor; 445 - nor->dev = mtk_nor->dev; 446 - nor->priv = mtk_nor; 447 - spi_nor_set_flash_node(nor, flash_node); 448 - nor->controller_ops = &mtk_controller_ops; 449 - 450 - nor->mtd.name = "mtk_nor"; 451 - /* initialized with NULL */ 452 - ret = spi_nor_scan(nor, NULL, &hwcaps); 453 - if (ret) 454 - return ret; 455 - 456 - return mtd_device_register(&nor->mtd, NULL, 0); 457 - } 458 - 459 - static int mtk_nor_drv_probe(struct platform_device *pdev) 460 - { 461 - struct device_node *flash_np; 462 - struct resource *res; 463 - int ret; 464 - struct mtk_nor *mtk_nor; 465 - 466 - if (!pdev->dev.of_node) { 467 - dev_err(&pdev->dev, "No DT found\n"); 468 - return -EINVAL; 469 - } 470 - 471 - mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL); 472 - if (!mtk_nor) 473 - return -ENOMEM; 474 - platform_set_drvdata(pdev, mtk_nor); 475 - 476 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 477 - mtk_nor->base = devm_ioremap_resource(&pdev->dev, res); 478 - if (IS_ERR(mtk_nor->base)) 479 - return PTR_ERR(mtk_nor->base); 480 - 481 - mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi"); 482 - if (IS_ERR(mtk_nor->spi_clk)) 483 - return PTR_ERR(mtk_nor->spi_clk); 484 - 485 - mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf"); 486 - if (IS_ERR(mtk_nor->nor_clk)) 487 - return PTR_ERR(mtk_nor->nor_clk); 488 - 489 - mtk_nor->dev = &pdev->dev; 490 - 491 - ret = mtk_nor_enable_clk(mtk_nor); 492 - if (ret) 493 - return ret; 494 - 495 - /* only support one attached flash */ 496 - flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); 497 - if (!flash_np) { 498 - dev_err(&pdev->dev, "no SPI flash device to configure\n"); 499 - ret = -ENODEV; 500 - goto nor_free; 501 - } 502 - ret = mtk_nor_init(mtk_nor, flash_np); 503 - 504 - nor_free: 505 - if (ret) 506 - mtk_nor_disable_clk(mtk_nor); 507 - 508 - return ret; 509 - } 510 - 511 - static int mtk_nor_drv_remove(struct platform_device *pdev) 512 - { 513 - struct mtk_nor *mtk_nor = platform_get_drvdata(pdev); 514 - 515 - mtk_nor_disable_clk(mtk_nor); 516 - 517 - return 0; 518 - } 519 - 520 - #ifdef CONFIG_PM_SLEEP 521 - static int mtk_nor_suspend(struct device *dev) 522 - { 523 - struct mtk_nor *mtk_nor = dev_get_drvdata(dev); 524 - 525 - mtk_nor_disable_clk(mtk_nor); 526 - 527 - return 0; 528 - } 529 - 530 - static int mtk_nor_resume(struct device *dev) 531 - { 532 - struct mtk_nor *mtk_nor = dev_get_drvdata(dev); 533 - 534 - return mtk_nor_enable_clk(mtk_nor); 535 - } 536 - 537 - static const struct dev_pm_ops mtk_nor_dev_pm_ops = { 538 - .suspend = mtk_nor_suspend, 539 - .resume = mtk_nor_resume, 540 - }; 541 - 542 - #define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops) 543 - #else 544 - #define MTK_NOR_DEV_PM_OPS NULL 545 - #endif 546 - 547 - static const struct of_device_id mtk_nor_of_ids[] = { 548 - { .compatible = "mediatek,mt8173-nor"}, 549 - { /* sentinel */ } 550 - }; 551 - MODULE_DEVICE_TABLE(of, mtk_nor_of_ids); 552 - 553 - static struct platform_driver mtk_nor_driver = { 554 - .probe = mtk_nor_drv_probe, 555 - .remove = mtk_nor_drv_remove, 556 - .driver = { 557 - .name = "mtk-nor", 558 - .pm = MTK_NOR_DEV_PM_OPS, 559 - .of_match_table = mtk_nor_of_ids, 560 - }, 561 - }; 562 - 563 - module_platform_driver(mtk_nor_driver); 564 - MODULE_LICENSE("GPL v2"); 565 - MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
+11
drivers/regulator/Kconfig
··· 107 107 108 108 config REGULATOR_ANATOP 109 109 tristate "Freescale i.MX on-chip ANATOP LDO regulators" 110 + depends on ARCH_MXC || COMPILE_TEST 110 111 depends on MFD_SYSCON 111 112 help 112 113 Say y here to support Freescale i.MX on-chip ANATOP LDOs ··· 613 612 basic operations (get/set voltage, get/set operating mode) 614 613 through the regulator interface. In addition it enables 615 614 suspend-to-ram/standby transition. 615 + 616 + config REGULATOR_MP5416 617 + tristate "Monolithic MP5416 PMIC" 618 + depends on I2C && OF 619 + select REGMAP_I2C 620 + help 621 + Say y here to support the MP5416 PMIC. This will enable supports 622 + the software controllable 4 buck and 4 LDO regulators. 623 + Say M here if you want to include support for the regulator as a 624 + module. 616 625 617 626 config REGULATOR_MP8859 618 627 tristate "MPS MP8859 regulator driver"
+1
drivers/regulator/Makefile
··· 78 78 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o 79 79 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o 80 80 obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o 81 + obj-$(CONFIG_REGULATOR_MP5416) += mp5416.o 81 82 obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o 82 83 obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o 83 84 obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
+7 -3
drivers/regulator/anatop-regulator.c
··· 305 305 /* register regulator */ 306 306 rdev = devm_regulator_register(dev, rdesc, &config); 307 307 if (IS_ERR(rdev)) { 308 - dev_err(dev, "failed to register %s\n", 309 - rdesc->name); 310 - return PTR_ERR(rdev); 308 + ret = PTR_ERR(rdev); 309 + if (ret == -EPROBE_DEFER) 310 + dev_dbg(dev, "failed to register %s, deferring...\n", 311 + rdesc->name); 312 + else 313 + dev_err(dev, "failed to register %s\n", rdesc->name); 314 + return ret; 311 315 } 312 316 313 317 platform_set_drvdata(pdev, rdev);
+2 -4
drivers/regulator/axp20x-regulator.c
··· 381 381 mask = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK | 382 382 AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK; 383 383 enable = (ramp > 0) ? 384 - AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN : 385 - !AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN; 384 + AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN : 0; 386 385 break; 387 386 } 388 387 ··· 392 393 mask = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK | 393 394 AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK; 394 395 enable = (ramp > 0) ? 395 - AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN : 396 - !AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN; 396 + AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN : 0; 397 397 break; 398 398 } 399 399
+1 -1
drivers/regulator/da9062-regulator.c
··· 73 73 int irq_ldo_lim; 74 74 unsigned n_regulators; 75 75 /* Array size to be defined during init. Keep at end. */ 76 - struct da9062_regulator regulator[0]; 76 + struct da9062_regulator regulator[]; 77 77 }; 78 78 79 79 /* Regulator operations */
+37 -29
drivers/regulator/da9063-regulator.c
··· 66 66 }; 67 67 68 68 struct da9063_regulators_pdata { 69 - unsigned n_regulators; 69 + unsigned int n_regulators; 70 70 struct da9063_regulator_data *regulator_data; 71 71 }; 72 72 ··· 131 131 /* Defines asignment of regulators info table to chip model */ 132 132 struct da9063_dev_model { 133 133 const struct da9063_regulator_info *regulator_info; 134 - unsigned n_regulators; 134 + unsigned int n_regulators; 135 135 enum da9063_type type; 136 136 }; 137 137 ··· 150 150 151 151 /* Encapsulates all information for the regulators driver */ 152 152 struct da9063_regulators { 153 - unsigned n_regulators; 153 + unsigned int n_regulators; 154 154 /* Array size to be defined during init. Keep at end. */ 155 - struct da9063_regulator regulator[0]; 155 + struct da9063_regulator regulator[]; 156 156 }; 157 157 158 158 /* BUCK modes for DA9063 */ ··· 165 165 166 166 /* Regulator operations */ 167 167 168 - /* Current limits array (in uA) for BCORE1, BCORE2, BPRO. 169 - Entry indexes corresponds to register values. */ 168 + /* 169 + * Current limits array (in uA) for BCORE1, BCORE2, BPRO. 170 + * Entry indexes corresponds to register values. 171 + */ 170 172 static const unsigned int da9063_buck_a_limits[] = { 171 173 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000, 172 174 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000 173 175 }; 174 176 175 - /* Current limits array (in uA) for BMEM, BIO, BPERI. 176 - Entry indexes corresponds to register values. */ 177 + /* 178 + * Current limits array (in uA) for BMEM, BIO, BPERI. 179 + * Entry indexes corresponds to register values. 180 + */ 177 181 static const unsigned int da9063_buck_b_limits[] = { 178 182 1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000, 179 183 2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000 180 184 }; 181 185 182 - /* Current limits array (in uA) for merged BCORE1 and BCORE2. 183 - Entry indexes corresponds to register values. */ 186 + /* 187 + * Current limits array (in uA) for merged BCORE1 and BCORE2. 188 + * Entry indexes corresponds to register values. 189 + */ 184 190 static const unsigned int da9063_bcores_merged_limits[] = { 185 191 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000, 186 192 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000 187 193 }; 188 194 189 - /* Current limits array (in uA) for merged BMEM and BIO. 190 - Entry indexes corresponds to register values. */ 195 + /* 196 + * Current limits array (in uA) for merged BMEM and BIO. 197 + * Entry indexes corresponds to register values. 198 + */ 191 199 static const unsigned int da9063_bmem_bio_merged_limits[] = { 192 200 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 193 201 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000 194 202 }; 195 203 196 - static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode) 204 + static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) 197 205 { 198 206 struct da9063_regulator *regl = rdev_get_drvdata(rdev); 199 - unsigned val; 207 + unsigned int val; 200 208 201 209 switch (mode) { 202 210 case REGULATOR_MODE_FAST: ··· 229 221 * There are 3 modes to map to: FAST, NORMAL, and STANDBY. 230 222 */ 231 223 232 - static unsigned da9063_buck_get_mode(struct regulator_dev *rdev) 224 + static unsigned int da9063_buck_get_mode(struct regulator_dev *rdev) 233 225 { 234 226 struct da9063_regulator *regl = rdev_get_drvdata(rdev); 235 227 struct regmap_field *field; ··· 279 271 * There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state. 280 272 */ 281 273 282 - static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode) 274 + static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) 283 275 { 284 276 struct da9063_regulator *regl = rdev_get_drvdata(rdev); 285 - unsigned val; 277 + unsigned int val; 286 278 287 279 switch (mode) { 288 280 case REGULATOR_MODE_NORMAL: ··· 298 290 return regmap_field_write(regl->sleep, val); 299 291 } 300 292 301 - static unsigned da9063_ldo_get_mode(struct regulator_dev *rdev) 293 + static unsigned int da9063_ldo_get_mode(struct regulator_dev *rdev) 302 294 { 303 295 struct da9063_regulator *regl = rdev_get_drvdata(rdev); 304 296 struct regmap_field *field; ··· 391 383 return regmap_field_write(regl->suspend, 0); 392 384 } 393 385 394 - static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) 386 + static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, 387 + unsigned int mode) 395 388 { 396 389 struct da9063_regulator *regl = rdev_get_drvdata(rdev); 397 390 int val; ··· 414 405 return regmap_field_write(regl->mode, val); 415 406 } 416 407 417 - static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, unsigned mode) 408 + static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, 409 + unsigned int mode) 418 410 { 419 411 struct da9063_regulator *regl = rdev_get_drvdata(rdev); 420 - unsigned val; 412 + unsigned int val; 421 413 422 414 switch (mode) { 423 415 case REGULATOR_MODE_NORMAL: ··· 603 593 struct da9063_regulators *regulators = data; 604 594 struct da9063 *hw = regulators->regulator[0].hw; 605 595 struct da9063_regulator *regl; 606 - int bits, i , ret; 596 + int bits, i, ret; 607 597 608 598 ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits); 609 599 if (ret < 0) ··· 615 605 continue; 616 606 617 607 if (BIT(regl->info->oc_event.lsb) & bits) { 618 - regulator_lock(regl->rdev); 608 + regulator_lock(regl->rdev); 619 609 regulator_notifier_call_chain(regl->rdev, 620 610 REGULATOR_EVENT_OVER_CURRENT, NULL); 621 - regulator_unlock(regl->rdev); 611 + regulator_unlock(regl->rdev); 622 612 } 623 613 } 624 614 ··· 843 833 844 834 if (regl->info->suspend_sleep.reg) { 845 835 regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, 846 - da9063->regmap, regl->info->suspend_sleep); 836 + da9063->regmap, regl->info->suspend_sleep); 847 837 if (IS_ERR(regl->suspend_sleep)) 848 838 return PTR_ERR(regl->suspend_sleep); 849 839 } ··· 877 867 NULL, da9063_ldo_lim_event, 878 868 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 879 869 "LDO_LIM", regulators); 880 - if (ret) { 870 + if (ret) 881 871 dev_err(&pdev->dev, "Failed to request LDO_LIM IRQ.\n"); 882 - return ret; 883 - } 884 872 885 - return 0; 873 + return ret; 886 874 } 887 875 888 876 static struct platform_driver da9063_regulator_driver = {
+245
drivers/regulator/mp5416.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // 3 + // mp5416.c - regulator driver for mps mp5416 4 + // 5 + // Copyright 2020 Monolithic Power Systems, Inc 6 + // 7 + // Author: Saravanan Sekar <sravanhome@gmail.com> 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/module.h> 11 + #include <linux/init.h> 12 + #include <linux/err.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/regmap.h> 15 + #include <linux/regulator/driver.h> 16 + #include <linux/i2c.h> 17 + 18 + #define MP5416_REG_CTL0 0x00 19 + #define MP5416_REG_CTL1 0x01 20 + #define MP5416_REG_CTL2 0x02 21 + #define MP5416_REG_ILIM 0x03 22 + #define MP5416_REG_BUCK1 0x04 23 + #define MP5416_REG_BUCK2 0x05 24 + #define MP5416_REG_BUCK3 0x06 25 + #define MP5416_REG_BUCK4 0x07 26 + #define MP5416_REG_LDO1 0x08 27 + #define MP5416_REG_LDO2 0x09 28 + #define MP5416_REG_LDO3 0x0a 29 + #define MP5416_REG_LDO4 0x0b 30 + 31 + #define MP5416_REGULATOR_EN BIT(7) 32 + #define MP5416_MASK_VSET 0x7f 33 + #define MP5416_MASK_BUCK1_ILIM 0xc0 34 + #define MP5416_MASK_BUCK2_ILIM 0x0c 35 + #define MP5416_MASK_BUCK3_ILIM 0x30 36 + #define MP5416_MASK_BUCK4_ILIM 0x03 37 + #define MP5416_MASK_DVS_SLEWRATE 0xc0 38 + 39 + /* values in uV */ 40 + #define MP5416_VOLT1_MIN 600000 41 + #define MP5416_VOLT1_MAX 2187500 42 + #define MP5416_VOLT1_STEP 12500 43 + #define MP5416_VOLT2_MIN 800000 44 + #define MP5416_VOLT2_MAX 3975000 45 + #define MP5416_VOLT2_STEP 25000 46 + 47 + #define MP5416_VOLT1_RANGE \ 48 + ((MP5416_VOLT1_MAX - MP5416_VOLT1_MIN)/MP5416_VOLT1_STEP + 1) 49 + #define MP5416_VOLT2_RANGE \ 50 + ((MP5416_VOLT2_MAX - MP5416_VOLT2_MIN)/MP5416_VOLT2_STEP + 1) 51 + 52 + #define MP5416BUCK(_name, _id, _ilim, _dreg, _dval, _vsel) \ 53 + [MP5416_BUCK ## _id] = { \ 54 + .id = MP5416_BUCK ## _id, \ 55 + .name = _name, \ 56 + .of_match = _name, \ 57 + .regulators_node = "regulators", \ 58 + .ops = &mp5416_buck_ops, \ 59 + .min_uV = MP5416_VOLT ##_vsel## _MIN, \ 60 + .uV_step = MP5416_VOLT ##_vsel## _STEP, \ 61 + .n_voltages = MP5416_VOLT ##_vsel## _RANGE, \ 62 + .curr_table = _ilim, \ 63 + .n_current_limits = ARRAY_SIZE(_ilim), \ 64 + .csel_reg = MP5416_REG_ILIM, \ 65 + .csel_mask = MP5416_MASK_BUCK ## _id ##_ILIM, \ 66 + .vsel_reg = MP5416_REG_BUCK ## _id, \ 67 + .vsel_mask = MP5416_MASK_VSET, \ 68 + .enable_reg = MP5416_REG_BUCK ## _id, \ 69 + .enable_mask = MP5416_REGULATOR_EN, \ 70 + .active_discharge_on = _dval, \ 71 + .active_discharge_reg = _dreg, \ 72 + .active_discharge_mask = _dval, \ 73 + .owner = THIS_MODULE, \ 74 + } 75 + 76 + #define MP5416LDO(_name, _id, _dval) \ 77 + [MP5416_LDO ## _id] = { \ 78 + .id = MP5416_LDO ## _id, \ 79 + .name = _name, \ 80 + .of_match = _name, \ 81 + .regulators_node = "regulators", \ 82 + .ops = &mp5416_ldo_ops, \ 83 + .min_uV = MP5416_VOLT2_MIN, \ 84 + .uV_step = MP5416_VOLT2_STEP, \ 85 + .n_voltages = MP5416_VOLT2_RANGE, \ 86 + .vsel_reg = MP5416_REG_LDO ##_id, \ 87 + .vsel_mask = MP5416_MASK_VSET, \ 88 + .enable_reg = MP5416_REG_LDO ##_id, \ 89 + .enable_mask = MP5416_REGULATOR_EN, \ 90 + .active_discharge_on = _dval, \ 91 + .active_discharge_reg = MP5416_REG_CTL2, \ 92 + .active_discharge_mask = _dval, \ 93 + .owner = THIS_MODULE, \ 94 + } 95 + 96 + enum mp5416_regulators { 97 + MP5416_BUCK1, 98 + MP5416_BUCK2, 99 + MP5416_BUCK3, 100 + MP5416_BUCK4, 101 + MP5416_LDO1, 102 + MP5416_LDO2, 103 + MP5416_LDO3, 104 + MP5416_LDO4, 105 + MP5416_MAX_REGULATORS, 106 + }; 107 + 108 + static const struct regmap_config mp5416_regmap_config = { 109 + .reg_bits = 8, 110 + .val_bits = 8, 111 + .max_register = 0x0d, 112 + }; 113 + 114 + /* Current limits array (in uA) 115 + * ILIM1 & ILIM3 116 + */ 117 + static const unsigned int mp5416_I_limits1[] = { 118 + 3800000, 4600000, 5600000, 6800000 119 + }; 120 + 121 + /* ILIM2 & ILIM4 */ 122 + static const unsigned int mp5416_I_limits2[] = { 123 + 2200000, 3200000, 4200000, 5200000 124 + }; 125 + 126 + static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay); 127 + 128 + static const struct regulator_ops mp5416_ldo_ops = { 129 + .enable = regulator_enable_regmap, 130 + .disable = regulator_disable_regmap, 131 + .is_enabled = regulator_is_enabled_regmap, 132 + .list_voltage = regulator_list_voltage_linear, 133 + .map_voltage = regulator_map_voltage_linear, 134 + .get_voltage_sel = regulator_get_voltage_sel_regmap, 135 + .set_voltage_sel = regulator_set_voltage_sel_regmap, 136 + .set_active_discharge = regulator_set_active_discharge_regmap, 137 + }; 138 + 139 + static const struct regulator_ops mp5416_buck_ops = { 140 + .enable = regulator_enable_regmap, 141 + .disable = regulator_disable_regmap, 142 + .is_enabled = regulator_is_enabled_regmap, 143 + .list_voltage = regulator_list_voltage_linear, 144 + .map_voltage = regulator_map_voltage_linear, 145 + .get_voltage_sel = regulator_get_voltage_sel_regmap, 146 + .set_voltage_sel = regulator_set_voltage_sel_regmap, 147 + .set_active_discharge = regulator_set_active_discharge_regmap, 148 + .get_current_limit = regulator_get_current_limit_regmap, 149 + .set_current_limit = regulator_set_current_limit_regmap, 150 + .set_ramp_delay = mp5416_set_ramp_delay, 151 + }; 152 + 153 + static struct regulator_desc mp5416_regulators_desc[MP5416_MAX_REGULATORS] = { 154 + MP5416BUCK("buck1", 1, mp5416_I_limits1, MP5416_REG_CTL1, BIT(0), 1), 155 + MP5416BUCK("buck2", 2, mp5416_I_limits2, MP5416_REG_CTL1, BIT(1), 2), 156 + MP5416BUCK("buck3", 3, mp5416_I_limits1, MP5416_REG_CTL1, BIT(2), 1), 157 + MP5416BUCK("buck4", 4, mp5416_I_limits2, MP5416_REG_CTL2, BIT(5), 2), 158 + MP5416LDO("ldo1", 1, BIT(4)), 159 + MP5416LDO("ldo2", 2, BIT(3)), 160 + MP5416LDO("ldo3", 3, BIT(2)), 161 + MP5416LDO("ldo4", 4, BIT(1)), 162 + }; 163 + 164 + /* 165 + * DVS ramp rate BUCK1 to BUCK4 166 + * 00: 32mV/us 167 + * 01: 16mV/us 168 + * 10: 8mV/us 169 + * 11: 4mV/us 170 + */ 171 + static int mp5416_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) 172 + { 173 + unsigned int ramp_val; 174 + 175 + if (ramp_delay > 32000 || ramp_delay < 0) 176 + return -EINVAL; 177 + 178 + if (ramp_delay <= 4000) 179 + ramp_val = 3; 180 + else if (ramp_delay <= 8000) 181 + ramp_val = 2; 182 + else if (ramp_delay <= 16000) 183 + ramp_val = 1; 184 + else 185 + ramp_val = 0; 186 + 187 + return regmap_update_bits(rdev->regmap, MP5416_REG_CTL2, 188 + MP5416_MASK_DVS_SLEWRATE, ramp_val << 6); 189 + } 190 + 191 + static int mp5416_i2c_probe(struct i2c_client *client) 192 + { 193 + struct device *dev = &client->dev; 194 + struct regulator_config config = { NULL, }; 195 + struct regulator_dev *rdev; 196 + struct regmap *regmap; 197 + int i; 198 + 199 + regmap = devm_regmap_init_i2c(client, &mp5416_regmap_config); 200 + if (IS_ERR(regmap)) { 201 + dev_err(dev, "Failed to allocate regmap!\n"); 202 + return PTR_ERR(regmap); 203 + } 204 + 205 + config.dev = dev; 206 + config.regmap = regmap; 207 + 208 + for (i = 0; i < MP5416_MAX_REGULATORS; i++) { 209 + rdev = devm_regulator_register(dev, 210 + &mp5416_regulators_desc[i], 211 + &config); 212 + if (IS_ERR(rdev)) { 213 + dev_err(dev, "Failed to register regulator!\n"); 214 + return PTR_ERR(rdev); 215 + } 216 + } 217 + 218 + return 0; 219 + } 220 + 221 + static const struct of_device_id mp5416_of_match[] = { 222 + { .compatible = "mps,mp5416" }, 223 + {}, 224 + }; 225 + MODULE_DEVICE_TABLE(of, mp5416_of_match); 226 + 227 + static const struct i2c_device_id mp5416_id[] = { 228 + { "mp5416", }, 229 + { }, 230 + }; 231 + MODULE_DEVICE_TABLE(i2c, mp5416_id); 232 + 233 + static struct i2c_driver mp5416_regulator_driver = { 234 + .driver = { 235 + .name = "mp5416", 236 + .of_match_table = of_match_ptr(mp5416_of_match), 237 + }, 238 + .probe_new = mp5416_i2c_probe, 239 + .id_table = mp5416_id, 240 + }; 241 + module_i2c_driver(mp5416_regulator_driver); 242 + 243 + MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>"); 244 + MODULE_DESCRIPTION("MP5416 PMIC regulator driver"); 245 + MODULE_LICENSE("GPL");
+1
drivers/regulator/mp8859.c
··· 95 95 .id = 0, 96 96 .type = REGULATOR_VOLTAGE, 97 97 .name = "mp8859_dcdc", 98 + .supply_name = "vin", 98 99 .of_match = of_match_ptr("mp8859_dcdc"), 99 100 .n_voltages = VOL_MAX_IDX + 1, 100 101 .linear_ranges = mp8859_dcdc_ranges,
+5 -1
drivers/regulator/pwm-regulator.c
··· 354 354 drvdata->pwm = devm_pwm_get(&pdev->dev, NULL); 355 355 if (IS_ERR(drvdata->pwm)) { 356 356 ret = PTR_ERR(drvdata->pwm); 357 - dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret); 357 + if (ret == -EPROBE_DEFER) 358 + dev_dbg(&pdev->dev, 359 + "Failed to get PWM, deferring probe\n"); 360 + else 361 + dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret); 358 362 return ret; 359 363 } 360 364
+9
drivers/regulator/qcom_rpm-regulator.c
··· 925 925 { } 926 926 }; 927 927 928 + static const struct rpm_regulator_data rpm_smb208_regulators[] = { 929 + { "s1a", QCOM_RPM_SMB208_S1a, &smb208_smps, "vin_s1a" }, 930 + { "s1b", QCOM_RPM_SMB208_S1b, &smb208_smps, "vin_s1b" }, 931 + { "s2a", QCOM_RPM_SMB208_S2a, &smb208_smps, "vin_s2a" }, 932 + { "s2b", QCOM_RPM_SMB208_S2b, &smb208_smps, "vin_s2b" }, 933 + { } 934 + }; 935 + 928 936 static const struct of_device_id rpm_of_match[] = { 929 937 { .compatible = "qcom,rpm-pm8018-regulators", 930 938 .data = &rpm_pm8018_regulators }, 931 939 { .compatible = "qcom,rpm-pm8058-regulators", .data = &rpm_pm8058_regulators }, 932 940 { .compatible = "qcom,rpm-pm8901-regulators", .data = &rpm_pm8901_regulators }, 933 941 { .compatible = "qcom,rpm-pm8921-regulators", .data = &rpm_pm8921_regulators }, 942 + { .compatible = "qcom,rpm-smb208-regulators", .data = &rpm_smb208_regulators }, 934 943 { } 935 944 }; 936 945 MODULE_DEVICE_TABLE(of, rpm_of_match);
+36 -1
drivers/spi/Kconfig
··· 62 62 help 63 63 This is the driver for the Altera SPI Controller. 64 64 65 + config SPI_AR934X 66 + tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver" 67 + depends on ATH79 || COMPILE_TEST 68 + help 69 + This enables support for the SPI controller present on the 70 + Qualcomm Atheros AR934X/QCA95XX SoCs. 71 + 65 72 config SPI_ATH79 66 73 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" 67 74 depends on ATH79 || COMPILE_TEST ··· 271 264 has only been tested with m25p80 type chips. The hardware has no 272 265 support for other types of SPI peripherals. 273 266 267 + config SPI_FSI 268 + tristate "FSI SPI driver" 269 + depends on FSI 270 + help 271 + This enables support for the driver for FSI bus attached SPI 272 + controllers. 273 + 274 274 config SPI_FSL_LPSPI 275 275 tristate "Freescale i.MX LPSPI controller" 276 276 depends on ARCH_MXC || COMPILE_TEST ··· 299 285 tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets" 300 286 depends on (ARM64 && ACPI) || COMPILE_TEST 301 287 depends on HAS_IOMEM 302 - select CONFIG_MTD_SPI_NOR 303 288 help 304 289 This enables support for HiSilicon v3xx SPI-NOR flash controller 305 290 found in hi16xx chipsets. ··· 428 415 429 416 config SPI_MESON_SPICC 430 417 tristate "Amlogic Meson SPICC controller" 418 + depends on COMMON_CLK 431 419 depends on ARCH_MESON || COMPILE_TEST 432 420 help 433 421 This enables master mode support for the SPICC (SPI communication ··· 456 442 depends on RALINK || COMPILE_TEST 457 443 help 458 444 This selects a driver for the MediaTek MT7621 SPI Controller. 445 + 446 + config SPI_MTK_NOR 447 + tristate "MediaTek SPI NOR controller" 448 + depends on ARCH_MEDIATEK || COMPILE_TEST 449 + help 450 + This enables support for SPI NOR controller found on MediaTek 451 + ARM SoCs. This is a controller specifically for SPI-NOR flash. 452 + It can perform generic SPI transfers up to 6 bytes via generic 453 + SPI interface as well as several SPI-NOR specific instructions 454 + via SPI MEM interface. 459 455 460 456 config SPI_NPCM_FIU 461 457 tristate "Nuvoton NPCM FLASH Interface Unit" ··· 913 889 # 914 890 # Add new SPI master controllers in alphabetical order above this line 915 891 # 892 + 893 + comment "SPI Multiplexer support" 894 + 895 + config SPI_MUX 896 + tristate "SPI multiplexer support" 897 + select MULTIPLEXER 898 + help 899 + This adds support for SPI multiplexers. Each SPI mux will be 900 + accessible as a SPI controller, the devices behind the mux will appear 901 + to be chip selects on this controller. It is still necessary to 902 + select one or more specific mux-controller drivers. 916 903 917 904 # 918 905 # There are lots of SPI device types, with sensors and memory
+4
drivers/spi/Makefile
··· 9 9 # config declarations into driver model code 10 10 obj-$(CONFIG_SPI_MASTER) += spi.o 11 11 obj-$(CONFIG_SPI_MEM) += spi-mem.o 12 + obj-$(CONFIG_SPI_MUX) += spi-mux.o 12 13 obj-$(CONFIG_SPI_SPIDEV) += spidev.o 13 14 obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o 14 15 15 16 # SPI master controller drivers (bus) 16 17 obj-$(CONFIG_SPI_ALTERA) += spi-altera.o 18 + obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o 17 19 obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o 18 20 obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o 19 21 obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o ··· 42 40 obj-$(CONFIG_SPI_EFM32) += spi-efm32.o 43 41 obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o 44 42 obj-$(CONFIG_SPI_FALCON) += spi-falcon.o 43 + obj-$(CONFIG_SPI_FSI) += spi-fsi.o 45 44 obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o 46 45 obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o 47 46 obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o ··· 65 62 obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o 66 63 obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o 67 64 obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o 65 + obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o 68 66 obj-$(CONFIG_SPI_MXIC) += spi-mxic.o 69 67 obj-$(CONFIG_SPI_MXS) += spi-mxs.o 70 68 obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
+97 -22
drivers/spi/atmel-quadspi.c
··· 173 173 { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, 174 174 }; 175 175 176 + #ifdef VERBOSE_DEBUG 177 + static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) 178 + { 179 + switch (offset) { 180 + case QSPI_CR: 181 + return "CR"; 182 + case QSPI_MR: 183 + return "MR"; 184 + case QSPI_RD: 185 + return "MR"; 186 + case QSPI_TD: 187 + return "TD"; 188 + case QSPI_SR: 189 + return "SR"; 190 + case QSPI_IER: 191 + return "IER"; 192 + case QSPI_IDR: 193 + return "IDR"; 194 + case QSPI_IMR: 195 + return "IMR"; 196 + case QSPI_SCR: 197 + return "SCR"; 198 + case QSPI_IAR: 199 + return "IAR"; 200 + case QSPI_ICR: 201 + return "ICR/WICR"; 202 + case QSPI_IFR: 203 + return "IFR"; 204 + case QSPI_RICR: 205 + return "RICR"; 206 + case QSPI_SMR: 207 + return "SMR"; 208 + case QSPI_SKR: 209 + return "SKR"; 210 + case QSPI_WPMR: 211 + return "WPMR"; 212 + case QSPI_WPSR: 213 + return "WPSR"; 214 + case QSPI_VERSION: 215 + return "VERSION"; 216 + default: 217 + snprintf(tmp, sz, "0x%02x", offset); 218 + break; 219 + } 220 + 221 + return tmp; 222 + } 223 + #endif /* VERBOSE_DEBUG */ 224 + 225 + static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset) 226 + { 227 + u32 value = readl_relaxed(aq->regs + offset); 228 + 229 + #ifdef VERBOSE_DEBUG 230 + char tmp[8]; 231 + 232 + dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value, 233 + atmel_qspi_reg_name(offset, tmp, sizeof(tmp))); 234 + #endif /* VERBOSE_DEBUG */ 235 + 236 + return value; 237 + } 238 + 239 + static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset) 240 + { 241 + #ifdef VERBOSE_DEBUG 242 + char tmp[8]; 243 + 244 + dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value, 245 + atmel_qspi_reg_name(offset, tmp, sizeof(tmp))); 246 + #endif /* VERBOSE_DEBUG */ 247 + 248 + writel_relaxed(value, aq->regs + offset); 249 + } 250 + 176 251 static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, 177 252 const struct atmel_qspi_mode *mode) 178 253 { ··· 368 293 * Serial Memory Mode (SMM). 369 294 */ 370 295 if (aq->mr != QSPI_MR_SMM) { 371 - writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); 296 + atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); 372 297 aq->mr = QSPI_MR_SMM; 373 298 } 374 299 375 300 /* Clear pending interrupts */ 376 - (void)readl_relaxed(aq->regs + QSPI_SR); 301 + (void)atmel_qspi_read(aq, QSPI_SR); 377 302 378 303 if (aq->caps->has_ricr) { 379 304 if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN) 380 305 ifr |= QSPI_IFR_APBTFRTYP_READ; 381 306 382 307 /* Set QSPI Instruction Frame registers */ 383 - writel_relaxed(iar, aq->regs + QSPI_IAR); 308 + atmel_qspi_write(iar, aq, QSPI_IAR); 384 309 if (op->data.dir == SPI_MEM_DATA_IN) 385 - writel_relaxed(icr, aq->regs + QSPI_RICR); 310 + atmel_qspi_write(icr, aq, QSPI_RICR); 386 311 else 387 - writel_relaxed(icr, aq->regs + QSPI_WICR); 388 - writel_relaxed(ifr, aq->regs + QSPI_IFR); 312 + atmel_qspi_write(icr, aq, QSPI_WICR); 313 + atmel_qspi_write(ifr, aq, QSPI_IFR); 389 314 } else { 390 315 if (op->data.dir == SPI_MEM_DATA_OUT) 391 316 ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; 392 317 393 318 /* Set QSPI Instruction Frame registers */ 394 - writel_relaxed(iar, aq->regs + QSPI_IAR); 395 - writel_relaxed(icr, aq->regs + QSPI_ICR); 396 - writel_relaxed(ifr, aq->regs + QSPI_IFR); 319 + atmel_qspi_write(iar, aq, QSPI_IAR); 320 + atmel_qspi_write(icr, aq, QSPI_ICR); 321 + atmel_qspi_write(ifr, aq, QSPI_IFR); 397 322 } 398 323 399 324 return 0; ··· 420 345 /* Skip to the final steps if there is no data */ 421 346 if (op->data.nbytes) { 422 347 /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ 423 - (void)readl_relaxed(aq->regs + QSPI_IFR); 348 + (void)atmel_qspi_read(aq, QSPI_IFR); 424 349 425 350 /* Send/Receive data */ 426 351 if (op->data.dir == SPI_MEM_DATA_IN) ··· 431 356 op->data.nbytes); 432 357 433 358 /* Release the chip-select */ 434 - writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR); 359 + atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); 435 360 } 436 361 437 362 /* Poll INSTRuction End status */ 438 - sr = readl_relaxed(aq->regs + QSPI_SR); 363 + sr = atmel_qspi_read(aq, QSPI_SR); 439 364 if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) 440 365 return err; 441 366 442 367 /* Wait for INSTRuction End interrupt */ 443 368 reinit_completion(&aq->cmd_completion); 444 369 aq->pending = sr & QSPI_SR_CMD_COMPLETED; 445 - writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER); 370 + atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER); 446 371 if (!wait_for_completion_timeout(&aq->cmd_completion, 447 372 msecs_to_jiffies(1000))) 448 373 err = -ETIMEDOUT; 449 - writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR); 374 + atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR); 450 375 451 376 return err; 452 377 } ··· 485 410 scbr--; 486 411 487 412 aq->scr = QSPI_SCR_SCBR(scbr); 488 - writel_relaxed(aq->scr, aq->regs + QSPI_SCR); 413 + atmel_qspi_write(aq->scr, aq, QSPI_SCR); 489 414 490 415 return 0; 491 416 } ··· 493 418 static void atmel_qspi_init(struct atmel_qspi *aq) 494 419 { 495 420 /* Reset the QSPI controller */ 496 - writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); 421 + atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); 497 422 498 423 /* Set the QSPI controller by default in Serial Memory Mode */ 499 - writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); 424 + atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); 500 425 aq->mr = QSPI_MR_SMM; 501 426 502 427 /* Enable the QSPI controller */ 503 - writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); 428 + atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); 504 429 } 505 430 506 431 static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) ··· 508 433 struct atmel_qspi *aq = dev_id; 509 434 u32 status, mask, pending; 510 435 511 - status = readl_relaxed(aq->regs + QSPI_SR); 512 - mask = readl_relaxed(aq->regs + QSPI_IMR); 436 + status = atmel_qspi_read(aq, QSPI_SR); 437 + mask = atmel_qspi_read(aq, QSPI_IMR); 513 438 pending = status & mask; 514 439 515 440 if (!pending) ··· 644 569 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); 645 570 646 571 spi_unregister_controller(ctrl); 647 - writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR); 572 + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); 648 573 clk_disable_unprepare(aq->qspick); 649 574 clk_disable_unprepare(aq->pclk); 650 575 return 0; ··· 671 596 672 597 atmel_qspi_init(aq); 673 598 674 - writel_relaxed(aq->scr, aq->regs + QSPI_SCR); 599 + atmel_qspi_write(aq->scr, aq, QSPI_SCR); 675 600 676 601 return 0; 677 602 }
+235
drivers/spi/spi-ar934x.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // SPI controller driver for Qualcomm Atheros AR934x/QCA95xx SoCs 4 + // 5 + // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 + // 7 + // Based on spi-mt7621.c: 8 + // Copyright (C) 2011 Sergiy <piratfm@gmail.com> 9 + // Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org> 10 + // Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name> 11 + 12 + #include <linux/clk.h> 13 + #include <linux/io.h> 14 + #include <linux/iopoll.h> 15 + #include <linux/kernel.h> 16 + #include <linux/module.h> 17 + #include <linux/of_device.h> 18 + #include <linux/spi/spi.h> 19 + 20 + #define DRIVER_NAME "spi-ar934x" 21 + 22 + #define AR934X_SPI_REG_FS 0x00 23 + #define AR934X_SPI_ENABLE BIT(0) 24 + 25 + #define AR934X_SPI_REG_IOC 0x08 26 + #define AR934X_SPI_IOC_INITVAL 0x70000 27 + 28 + #define AR934X_SPI_REG_CTRL 0x04 29 + #define AR934X_SPI_CLK_MASK GENMASK(5, 0) 30 + 31 + #define AR934X_SPI_DATAOUT 0x10 32 + 33 + #define AR934X_SPI_REG_SHIFT_CTRL 0x14 34 + #define AR934X_SPI_SHIFT_EN BIT(31) 35 + #define AR934X_SPI_SHIFT_CS(n) BIT(28 + (n)) 36 + #define AR934X_SPI_SHIFT_TERM 26 37 + #define AR934X_SPI_SHIFT_VAL(cs, term, count) \ 38 + (AR934X_SPI_SHIFT_EN | AR934X_SPI_SHIFT_CS(cs) | \ 39 + (term) << AR934X_SPI_SHIFT_TERM | (count)) 40 + 41 + #define AR934X_SPI_DATAIN 0x18 42 + 43 + struct ar934x_spi { 44 + struct spi_controller *ctlr; 45 + void __iomem *base; 46 + struct clk *clk; 47 + unsigned int clk_freq; 48 + }; 49 + 50 + static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq) 51 + { 52 + int div = DIV_ROUND_UP(sp->clk_freq, freq * 2) - 1; 53 + 54 + if (div < 0) 55 + return 0; 56 + else if (div > AR934X_SPI_CLK_MASK) 57 + return -EINVAL; 58 + else 59 + return div; 60 + } 61 + 62 + static int ar934x_spi_setup(struct spi_device *spi) 63 + { 64 + struct ar934x_spi *sp = spi_controller_get_devdata(spi->master); 65 + 66 + if ((spi->max_speed_hz == 0) || 67 + (spi->max_speed_hz > (sp->clk_freq / 2))) { 68 + spi->max_speed_hz = sp->clk_freq / 2; 69 + } else if (spi->max_speed_hz < (sp->clk_freq / 128)) { 70 + dev_err(&spi->dev, "spi clock is too low\n"); 71 + return -EINVAL; 72 + } 73 + 74 + return 0; 75 + } 76 + 77 + static int ar934x_spi_transfer_one_message(struct spi_controller *master, 78 + struct spi_message *m) 79 + { 80 + struct ar934x_spi *sp = spi_controller_get_devdata(master); 81 + struct spi_transfer *t = NULL; 82 + struct spi_device *spi = m->spi; 83 + unsigned long trx_done, trx_cur; 84 + int stat = 0; 85 + u8 term = 0; 86 + int div, i; 87 + u32 reg; 88 + const u8 *tx_buf; 89 + u8 *buf; 90 + 91 + m->actual_length = 0; 92 + list_for_each_entry(t, &m->transfers, transfer_list) { 93 + if (t->speed_hz) 94 + div = ar934x_spi_clk_div(sp, t->speed_hz); 95 + else 96 + div = ar934x_spi_clk_div(sp, spi->max_speed_hz); 97 + if (div < 0) { 98 + stat = -EIO; 99 + goto msg_done; 100 + } 101 + 102 + reg = ioread32(sp->base + AR934X_SPI_REG_CTRL); 103 + reg &= ~AR934X_SPI_CLK_MASK; 104 + reg |= div; 105 + iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL); 106 + iowrite32(0, sp->base + AR934X_SPI_DATAOUT); 107 + 108 + for (trx_done = 0; trx_done < t->len; trx_done += 4) { 109 + trx_cur = t->len - trx_done; 110 + if (trx_cur > 4) 111 + trx_cur = 4; 112 + else if (list_is_last(&t->transfer_list, &m->transfers)) 113 + term = 1; 114 + 115 + if (t->tx_buf) { 116 + tx_buf = t->tx_buf + trx_done; 117 + reg = tx_buf[0]; 118 + for (i = 1; i < trx_cur; i++) 119 + reg = reg << 8 | tx_buf[i]; 120 + iowrite32(reg, sp->base + AR934X_SPI_DATAOUT); 121 + } 122 + 123 + reg = AR934X_SPI_SHIFT_VAL(spi->chip_select, term, 124 + trx_cur * 8); 125 + iowrite32(reg, sp->base + AR934X_SPI_REG_SHIFT_CTRL); 126 + stat = readl_poll_timeout( 127 + sp->base + AR934X_SPI_REG_SHIFT_CTRL, reg, 128 + !(reg & AR934X_SPI_SHIFT_EN), 0, 5); 129 + if (stat < 0) 130 + goto msg_done; 131 + 132 + if (t->rx_buf) { 133 + reg = ioread32(sp->base + AR934X_SPI_DATAIN); 134 + buf = t->rx_buf + trx_done; 135 + for (i = 0; i < trx_cur; i++) { 136 + buf[trx_cur - i - 1] = reg & 0xff; 137 + reg >>= 8; 138 + } 139 + } 140 + } 141 + m->actual_length += t->len; 142 + } 143 + 144 + msg_done: 145 + m->status = stat; 146 + spi_finalize_current_message(master); 147 + 148 + return 0; 149 + } 150 + 151 + static const struct of_device_id ar934x_spi_match[] = { 152 + { .compatible = "qca,ar934x-spi" }, 153 + {}, 154 + }; 155 + MODULE_DEVICE_TABLE(of, ar934x_spi_match); 156 + 157 + static int ar934x_spi_probe(struct platform_device *pdev) 158 + { 159 + struct spi_controller *ctlr; 160 + struct ar934x_spi *sp; 161 + void __iomem *base; 162 + struct clk *clk; 163 + int ret; 164 + 165 + base = devm_platform_ioremap_resource(pdev, 0); 166 + if (IS_ERR(base)) 167 + return PTR_ERR(base); 168 + 169 + clk = devm_clk_get(&pdev->dev, NULL); 170 + if (IS_ERR(clk)) { 171 + dev_err(&pdev->dev, "failed to get clock\n"); 172 + return PTR_ERR(clk); 173 + } 174 + 175 + ret = clk_prepare_enable(clk); 176 + if (ret) 177 + return ret; 178 + 179 + ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); 180 + if (!ctlr) { 181 + dev_info(&pdev->dev, "failed to allocate spi controller\n"); 182 + return -ENOMEM; 183 + } 184 + 185 + /* disable flash mapping and expose spi controller registers */ 186 + iowrite32(AR934X_SPI_ENABLE, base + AR934X_SPI_REG_FS); 187 + /* restore pins to default state: CSn=1 DO=CLK=0 */ 188 + iowrite32(AR934X_SPI_IOC_INITVAL, base + AR934X_SPI_REG_IOC); 189 + 190 + ctlr->mode_bits = SPI_LSB_FIRST; 191 + ctlr->setup = ar934x_spi_setup; 192 + ctlr->transfer_one_message = ar934x_spi_transfer_one_message; 193 + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 194 + ctlr->dev.of_node = pdev->dev.of_node; 195 + ctlr->num_chipselect = 3; 196 + 197 + dev_set_drvdata(&pdev->dev, ctlr); 198 + 199 + sp = spi_controller_get_devdata(ctlr); 200 + sp->base = base; 201 + sp->clk = clk; 202 + sp->clk_freq = clk_get_rate(clk); 203 + sp->ctlr = ctlr; 204 + 205 + return devm_spi_register_controller(&pdev->dev, ctlr); 206 + } 207 + 208 + static int ar934x_spi_remove(struct platform_device *pdev) 209 + { 210 + struct spi_controller *ctlr; 211 + struct ar934x_spi *sp; 212 + 213 + ctlr = dev_get_drvdata(&pdev->dev); 214 + sp = spi_controller_get_devdata(ctlr); 215 + 216 + clk_disable_unprepare(sp->clk); 217 + 218 + return 0; 219 + } 220 + 221 + static struct platform_driver ar934x_spi_driver = { 222 + .driver = { 223 + .name = DRIVER_NAME, 224 + .of_match_table = ar934x_spi_match, 225 + }, 226 + .probe = ar934x_spi_probe, 227 + .remove = ar934x_spi_remove, 228 + }; 229 + 230 + module_platform_driver(ar934x_spi_driver); 231 + 232 + MODULE_DESCRIPTION("SPI controller driver for Qualcomm Atheros AR934x/QCA95xx"); 233 + MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 234 + MODULE_LICENSE("GPL v2"); 235 + MODULE_ALIAS("platform:" DRIVER_NAME);
+3 -41
drivers/spi/spi-efm32.c
··· 6 6 #include <linux/io.h> 7 7 #include <linux/spi/spi.h> 8 8 #include <linux/spi/spi_bitbang.h> 9 - #include <linux/gpio.h> 10 9 #include <linux/interrupt.h> 11 10 #include <linux/platform_device.h> 12 11 #include <linux/clk.h> 13 12 #include <linux/err.h> 14 13 #include <linux/module.h> 15 - #include <linux/of_gpio.h> 16 14 #include <linux/platform_data/efm32-spi.h> 15 + #include <linux/of.h> 17 16 18 17 #define DRIVER_NAME "efm32-spi" 19 18 ··· 81 82 const u8 *tx_buf; 82 83 u8 *rx_buf; 83 84 unsigned tx_len, rx_len; 84 - 85 - /* chip selects */ 86 - unsigned csgpio[]; 87 85 }; 88 86 89 87 #define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev)) ··· 96 100 static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset) 97 101 { 98 102 return readl_relaxed(ddata->base + offset); 99 - } 100 - 101 - static void efm32_spi_chipselect(struct spi_device *spi, int is_on) 102 - { 103 - struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master); 104 - int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE); 105 - 106 - gpio_set_value(ddata->csgpio[spi->chip_select], value); 107 103 } 108 104 109 105 static int efm32_spi_setup_transfer(struct spi_device *spi, ··· 308 320 int ret; 309 321 struct spi_master *master; 310 322 struct device_node *np = pdev->dev.of_node; 311 - int num_cs, i; 312 323 313 324 if (!np) 314 325 return -EINVAL; 315 326 316 - num_cs = of_gpio_named_count(np, "cs-gpios"); 317 - if (num_cs < 0) 318 - return num_cs; 319 - 320 - master = spi_alloc_master(&pdev->dev, 321 - sizeof(*ddata) + num_cs * sizeof(unsigned)); 327 + master = spi_alloc_master(&pdev->dev, sizeof(*ddata)); 322 328 if (!master) { 323 329 dev_dbg(&pdev->dev, 324 330 "failed to allocate spi master controller\n"); ··· 322 340 323 341 master->dev.of_node = pdev->dev.of_node; 324 342 325 - master->num_chipselect = num_cs; 326 343 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 327 344 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 345 + master->use_gpio_descriptors = true; 328 346 329 347 ddata = spi_master_get_devdata(master); 330 348 331 349 ddata->bitbang.master = master; 332 - ddata->bitbang.chipselect = efm32_spi_chipselect; 333 350 ddata->bitbang.setup_transfer = efm32_spi_setup_transfer; 334 351 ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs; 335 352 ··· 340 359 ret = PTR_ERR(ddata->clk); 341 360 dev_err(&pdev->dev, "failed to get clock: %d\n", ret); 342 361 goto err; 343 - } 344 - 345 - for (i = 0; i < num_cs; ++i) { 346 - ret = of_get_named_gpio(np, "cs-gpios", i); 347 - if (ret < 0) { 348 - dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n", 349 - i, ret); 350 - goto err; 351 - } 352 - ddata->csgpio[i] = ret; 353 - dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]); 354 - ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i], 355 - GPIOF_OUT_INIT_LOW, DRIVER_NAME); 356 - if (ret < 0) { 357 - dev_err(&pdev->dev, 358 - "failed to configure csgpio#%u (%d)\n", 359 - i, ret); 360 - goto err; 361 - } 362 362 } 363 363 364 364 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+558
drivers/spi/spi-fsi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + // Copyright (C) IBM Corporation 2020 3 + 4 + #include <linux/bitfield.h> 5 + #include <linux/bits.h> 6 + #include <linux/fsi.h> 7 + #include <linux/jiffies.h> 8 + #include <linux/kernel.h> 9 + #include <linux/module.h> 10 + #include <linux/of.h> 11 + #include <linux/spi/spi.h> 12 + 13 + #define FSI_ENGID_SPI 0x23 14 + #define FSI_MBOX_ROOT_CTRL_8 0x2860 15 + 16 + #define FSI2SPI_DATA0 0x00 17 + #define FSI2SPI_DATA1 0x04 18 + #define FSI2SPI_CMD 0x08 19 + #define FSI2SPI_CMD_WRITE BIT(31) 20 + #define FSI2SPI_RESET 0x18 21 + #define FSI2SPI_STATUS 0x1c 22 + #define FSI2SPI_STATUS_ANY_ERROR BIT(31) 23 + #define FSI2SPI_IRQ 0x20 24 + 25 + #define SPI_FSI_BASE 0x70000 26 + #define SPI_FSI_INIT_TIMEOUT_MS 1000 27 + #define SPI_FSI_MAX_TRANSFER_SIZE 2048 28 + 29 + #define SPI_FSI_ERROR 0x0 30 + #define SPI_FSI_COUNTER_CFG 0x1 31 + #define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32) 32 + #define SPI_FSI_CFG1 0x2 33 + #define SPI_FSI_CLOCK_CFG 0x3 34 + #define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32) 35 + #define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33)) 36 + #define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38)) 37 + #define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39)) 38 + #define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42)) 39 + #define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44) 40 + #define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51) 41 + #define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52) 42 + #define SPI_FSI_MMAP 0x4 43 + #define SPI_FSI_DATA_TX 0x5 44 + #define SPI_FSI_DATA_RX 0x6 45 + #define SPI_FSI_SEQUENCE 0x7 46 + #define SPI_FSI_SEQUENCE_STOP 0x00 47 + #define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf)) 48 + #define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf)) 49 + #define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf)) 50 + #define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0 51 + #define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf)) 52 + #define SPI_FSI_STATUS 0x8 53 + #define SPI_FSI_STATUS_ERROR \ 54 + (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12)) 55 + #define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48) 56 + #define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48) 57 + #define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57) 58 + #define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58) 59 + #define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59) 60 + #define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61) 61 + #define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62) 62 + #define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63) 63 + #define SPI_FSI_STATUS_ANY_ERROR \ 64 + (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \ 65 + SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \ 66 + SPI_FSI_STATUS_RDR_OVERRUN) 67 + #define SPI_FSI_PORT_CTRL 0x9 68 + 69 + struct fsi_spi { 70 + struct device *dev; /* SPI controller device */ 71 + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ 72 + u32 base; 73 + }; 74 + 75 + struct fsi_spi_sequence { 76 + int bit; 77 + u64 data; 78 + }; 79 + 80 + static int fsi_spi_check_status(struct fsi_spi *ctx) 81 + { 82 + int rc; 83 + u32 sts; 84 + __be32 sts_be; 85 + 86 + rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be, 87 + sizeof(sts_be)); 88 + if (rc) 89 + return rc; 90 + 91 + sts = be32_to_cpu(sts_be); 92 + if (sts & FSI2SPI_STATUS_ANY_ERROR) { 93 + dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts); 94 + return -EIO; 95 + } 96 + 97 + return 0; 98 + } 99 + 100 + static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value) 101 + { 102 + int rc; 103 + __be32 cmd_be; 104 + __be32 data_be; 105 + u32 cmd = offset + ctx->base; 106 + 107 + *value = 0ULL; 108 + 109 + if (cmd & FSI2SPI_CMD_WRITE) 110 + return -EINVAL; 111 + 112 + cmd_be = cpu_to_be32(cmd); 113 + rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); 114 + if (rc) 115 + return rc; 116 + 117 + rc = fsi_spi_check_status(ctx); 118 + if (rc) 119 + return rc; 120 + 121 + rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be, 122 + sizeof(data_be)); 123 + if (rc) 124 + return rc; 125 + 126 + *value |= (u64)be32_to_cpu(data_be) << 32; 127 + 128 + rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be, 129 + sizeof(data_be)); 130 + if (rc) 131 + return rc; 132 + 133 + *value |= (u64)be32_to_cpu(data_be); 134 + dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value); 135 + 136 + return 0; 137 + } 138 + 139 + static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value) 140 + { 141 + int rc; 142 + __be32 cmd_be; 143 + __be32 data_be; 144 + u32 cmd = offset + ctx->base; 145 + 146 + if (cmd & FSI2SPI_CMD_WRITE) 147 + return -EINVAL; 148 + 149 + dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value); 150 + 151 + data_be = cpu_to_be32(upper_32_bits(value)); 152 + rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be, 153 + sizeof(data_be)); 154 + if (rc) 155 + return rc; 156 + 157 + data_be = cpu_to_be32(lower_32_bits(value)); 158 + rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be, 159 + sizeof(data_be)); 160 + if (rc) 161 + return rc; 162 + 163 + cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE); 164 + rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); 165 + if (rc) 166 + return rc; 167 + 168 + return fsi_spi_check_status(ctx); 169 + } 170 + 171 + static int fsi_spi_data_in(u64 in, u8 *rx, int len) 172 + { 173 + int i; 174 + int num_bytes = min(len, 8); 175 + 176 + for (i = 0; i < num_bytes; ++i) 177 + rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i))); 178 + 179 + return num_bytes; 180 + } 181 + 182 + static int fsi_spi_data_out(u64 *out, const u8 *tx, int len) 183 + { 184 + int i; 185 + int num_bytes = min(len, 8); 186 + u8 *out_bytes = (u8 *)out; 187 + 188 + /* Unused bytes of the tx data should be 0. */ 189 + *out = 0ULL; 190 + 191 + for (i = 0; i < num_bytes; ++i) 192 + out_bytes[8 - (i + 1)] = tx[i]; 193 + 194 + return num_bytes; 195 + } 196 + 197 + static int fsi_spi_reset(struct fsi_spi *ctx) 198 + { 199 + int rc; 200 + 201 + dev_dbg(ctx->dev, "Resetting SPI controller.\n"); 202 + 203 + rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, 204 + SPI_FSI_CLOCK_CFG_RESET1); 205 + if (rc) 206 + return rc; 207 + 208 + return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, 209 + SPI_FSI_CLOCK_CFG_RESET2); 210 + } 211 + 212 + static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) 213 + { 214 + /* 215 + * Add the next byte of instruction to the 8-byte sequence register. 216 + * Then decrement the counter so that the next instruction will go in 217 + * the right place. Return the number of "slots" left in the sequence 218 + * register. 219 + */ 220 + seq->data |= (u64)val << seq->bit; 221 + seq->bit -= 8; 222 + 223 + return ((64 - seq->bit) / 8) - 2; 224 + } 225 + 226 + static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq) 227 + { 228 + seq->bit = 56; 229 + seq->data = 0ULL; 230 + } 231 + 232 + static int fsi_spi_sequence_transfer(struct fsi_spi *ctx, 233 + struct fsi_spi_sequence *seq, 234 + struct spi_transfer *transfer) 235 + { 236 + int loops; 237 + int idx; 238 + int rc; 239 + u8 len = min(transfer->len, 8U); 240 + u8 rem = transfer->len % len; 241 + 242 + loops = transfer->len / len; 243 + 244 + if (transfer->tx_buf) { 245 + idx = fsi_spi_sequence_add(seq, 246 + SPI_FSI_SEQUENCE_SHIFT_OUT(len)); 247 + if (rem) 248 + rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem); 249 + } else if (transfer->rx_buf) { 250 + idx = fsi_spi_sequence_add(seq, 251 + SPI_FSI_SEQUENCE_SHIFT_IN(len)); 252 + if (rem) 253 + rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem); 254 + } else { 255 + return -EINVAL; 256 + } 257 + 258 + if (loops > 1) { 259 + fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx)); 260 + 261 + if (rem) 262 + fsi_spi_sequence_add(seq, rem); 263 + 264 + rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG, 265 + SPI_FSI_COUNTER_CFG_LOOPS(loops - 1)); 266 + if (rc) 267 + return rc; 268 + } 269 + 270 + return 0; 271 + } 272 + 273 + static int fsi_spi_transfer_data(struct fsi_spi *ctx, 274 + struct spi_transfer *transfer) 275 + { 276 + int rc = 0; 277 + u64 status = 0ULL; 278 + 279 + if (transfer->tx_buf) { 280 + int nb; 281 + int sent = 0; 282 + u64 out = 0ULL; 283 + const u8 *tx = transfer->tx_buf; 284 + 285 + while (transfer->len > sent) { 286 + nb = fsi_spi_data_out(&out, &tx[sent], 287 + (int)transfer->len - sent); 288 + 289 + rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out); 290 + if (rc) 291 + return rc; 292 + 293 + do { 294 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, 295 + &status); 296 + if (rc) 297 + return rc; 298 + 299 + if (status & SPI_FSI_STATUS_ANY_ERROR) { 300 + rc = fsi_spi_reset(ctx); 301 + if (rc) 302 + return rc; 303 + 304 + return -EREMOTEIO; 305 + } 306 + } while (status & SPI_FSI_STATUS_TDR_FULL); 307 + 308 + sent += nb; 309 + } 310 + } else if (transfer->rx_buf) { 311 + int recv = 0; 312 + u64 in = 0ULL; 313 + u8 *rx = transfer->rx_buf; 314 + 315 + while (transfer->len > recv) { 316 + do { 317 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, 318 + &status); 319 + if (rc) 320 + return rc; 321 + 322 + if (status & SPI_FSI_STATUS_ANY_ERROR) { 323 + rc = fsi_spi_reset(ctx); 324 + if (rc) 325 + return rc; 326 + 327 + return -EREMOTEIO; 328 + } 329 + } while (!(status & SPI_FSI_STATUS_RDR_FULL)); 330 + 331 + rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in); 332 + if (rc) 333 + return rc; 334 + 335 + recv += fsi_spi_data_in(in, &rx[recv], 336 + (int)transfer->len - recv); 337 + } 338 + } 339 + 340 + return 0; 341 + } 342 + 343 + static int fsi_spi_transfer_init(struct fsi_spi *ctx) 344 + { 345 + int rc; 346 + bool reset = false; 347 + unsigned long end; 348 + u64 seq_state; 349 + u64 clock_cfg = 0ULL; 350 + u64 status = 0ULL; 351 + u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE | 352 + SPI_FSI_CLOCK_CFG_SCK_NO_DEL | 353 + FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4); 354 + 355 + end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS); 356 + do { 357 + if (time_after(jiffies, end)) 358 + return -ETIMEDOUT; 359 + 360 + rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status); 361 + if (rc) 362 + return rc; 363 + 364 + seq_state = status & SPI_FSI_STATUS_SEQ_STATE; 365 + 366 + if (status & (SPI_FSI_STATUS_ANY_ERROR | 367 + SPI_FSI_STATUS_TDR_FULL | 368 + SPI_FSI_STATUS_RDR_FULL)) { 369 + if (reset) 370 + return -EIO; 371 + 372 + rc = fsi_spi_reset(ctx); 373 + if (rc) 374 + return rc; 375 + 376 + reset = true; 377 + continue; 378 + } 379 + } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE)); 380 + 381 + rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg); 382 + if (rc) 383 + return rc; 384 + 385 + if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE | 386 + SPI_FSI_CLOCK_CFG_ECC_DISABLE | 387 + SPI_FSI_CLOCK_CFG_MODE | 388 + SPI_FSI_CLOCK_CFG_SCK_RECV_DEL | 389 + SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg) 390 + rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG, 391 + wanted_clock_cfg); 392 + 393 + return rc; 394 + } 395 + 396 + static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, 397 + struct spi_message *mesg) 398 + { 399 + int rc = 0; 400 + u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1); 401 + struct spi_transfer *transfer; 402 + struct fsi_spi *ctx = spi_controller_get_devdata(ctlr); 403 + 404 + list_for_each_entry(transfer, &mesg->transfers, transfer_list) { 405 + struct fsi_spi_sequence seq; 406 + struct spi_transfer *next = NULL; 407 + 408 + /* Sequencer must do shift out (tx) first. */ 409 + if (!transfer->tx_buf || 410 + transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) { 411 + rc = -EINVAL; 412 + goto error; 413 + } 414 + 415 + dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len); 416 + 417 + rc = fsi_spi_transfer_init(ctx); 418 + if (rc < 0) 419 + goto error; 420 + 421 + fsi_spi_sequence_init(&seq); 422 + fsi_spi_sequence_add(&seq, seq_slave); 423 + 424 + rc = fsi_spi_sequence_transfer(ctx, &seq, transfer); 425 + if (rc) 426 + goto error; 427 + 428 + if (!list_is_last(&transfer->transfer_list, 429 + &mesg->transfers)) { 430 + next = list_next_entry(transfer, transfer_list); 431 + 432 + /* Sequencer can only do shift in (rx) after tx. */ 433 + if (next->rx_buf) { 434 + if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) { 435 + rc = -EINVAL; 436 + goto error; 437 + } 438 + 439 + dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n", 440 + next->len); 441 + 442 + rc = fsi_spi_sequence_transfer(ctx, &seq, 443 + next); 444 + if (rc) 445 + goto error; 446 + } else { 447 + next = NULL; 448 + } 449 + } 450 + 451 + fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0)); 452 + 453 + rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data); 454 + if (rc) 455 + goto error; 456 + 457 + rc = fsi_spi_transfer_data(ctx, transfer); 458 + if (rc) 459 + goto error; 460 + 461 + if (next) { 462 + rc = fsi_spi_transfer_data(ctx, next); 463 + if (rc) 464 + goto error; 465 + 466 + transfer = next; 467 + } 468 + } 469 + 470 + error: 471 + mesg->status = rc; 472 + spi_finalize_current_message(ctlr); 473 + 474 + return rc; 475 + } 476 + 477 + static size_t fsi_spi_max_transfer_size(struct spi_device *spi) 478 + { 479 + return SPI_FSI_MAX_TRANSFER_SIZE; 480 + } 481 + 482 + static int fsi_spi_probe(struct device *dev) 483 + { 484 + int rc; 485 + u32 root_ctrl_8; 486 + struct device_node *np; 487 + int num_controllers_registered = 0; 488 + struct fsi_device *fsi = to_fsi_dev(dev); 489 + 490 + /* 491 + * Check the SPI mux before attempting to probe. If the mux isn't set 492 + * then the SPI controllers can't access their slave devices. 493 + */ 494 + rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8, 495 + sizeof(root_ctrl_8)); 496 + if (rc) 497 + return rc; 498 + 499 + if (!root_ctrl_8) { 500 + dev_dbg(dev, "SPI mux not set, aborting probe.\n"); 501 + return -ENODEV; 502 + } 503 + 504 + for_each_available_child_of_node(dev->of_node, np) { 505 + u32 base; 506 + struct fsi_spi *ctx; 507 + struct spi_controller *ctlr; 508 + 509 + if (of_property_read_u32(np, "reg", &base)) 510 + continue; 511 + 512 + ctlr = spi_alloc_master(dev, sizeof(*ctx)); 513 + if (!ctlr) 514 + break; 515 + 516 + ctlr->dev.of_node = np; 517 + ctlr->num_chipselect = of_get_available_child_count(np) ?: 1; 518 + ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; 519 + ctlr->max_transfer_size = fsi_spi_max_transfer_size; 520 + ctlr->transfer_one_message = fsi_spi_transfer_one_message; 521 + 522 + ctx = spi_controller_get_devdata(ctlr); 523 + ctx->dev = &ctlr->dev; 524 + ctx->fsi = fsi; 525 + ctx->base = base + SPI_FSI_BASE; 526 + 527 + rc = devm_spi_register_controller(dev, ctlr); 528 + if (rc) 529 + spi_controller_put(ctlr); 530 + else 531 + num_controllers_registered++; 532 + } 533 + 534 + if (!num_controllers_registered) 535 + return -ENODEV; 536 + 537 + return 0; 538 + } 539 + 540 + static const struct fsi_device_id fsi_spi_ids[] = { 541 + { FSI_ENGID_SPI, FSI_VERSION_ANY }, 542 + { } 543 + }; 544 + MODULE_DEVICE_TABLE(fsi, fsi_spi_ids); 545 + 546 + static struct fsi_driver fsi_spi_driver = { 547 + .id_table = fsi_spi_ids, 548 + .drv = { 549 + .name = "spi-fsi", 550 + .bus = &fsi_bus_type, 551 + .probe = fsi_spi_probe, 552 + }, 553 + }; 554 + module_fsi_driver(fsi_spi_driver); 555 + 556 + MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>"); 557 + MODULE_DESCRIPTION("FSI attached SPI controller"); 558 + MODULE_LICENSE("GPL");
+480 -270
drivers/spi/spi-fsl-dspi.c
··· 20 20 21 21 #define DRIVER_NAME "fsl-dspi" 22 22 23 - #ifdef CONFIG_M5441x 24 - #define DSPI_FIFO_SIZE 16 25 - #else 26 - #define DSPI_FIFO_SIZE 4 27 - #endif 28 - #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) 29 - 30 23 #define SPI_MCR 0x00 31 24 #define SPI_MCR_MASTER BIT(31) 32 - #define SPI_MCR_PCSIS (0x3F << 16) 25 + #define SPI_MCR_PCSIS(x) ((x) << 16) 33 26 #define SPI_MCR_CLR_TXF BIT(11) 34 27 #define SPI_MCR_CLR_RXF BIT(10) 35 28 #define SPI_MCR_XSPI BIT(3) ··· 72 79 #define SPI_RSER 0x30 73 80 #define SPI_RSER_TCFQE BIT(31) 74 81 #define SPI_RSER_EOQFE BIT(28) 82 + #define SPI_RSER_CMDTCFE BIT(23) 75 83 76 84 #define SPI_PUSHR 0x34 77 85 #define SPI_PUSHR_CMD_CONT BIT(15) ··· 103 109 #define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1) 104 110 #define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4) 105 111 106 - /* Register offsets for regmap_pushr */ 107 - #define PUSHR_CMD 0x0 108 - #define PUSHR_TX 0x2 109 - 110 112 #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000) 111 113 112 114 struct chip_data { 113 115 u32 ctar_val; 114 - u16 void_write_data; 115 116 }; 116 117 117 118 enum dspi_trans_mode { 118 119 DSPI_EOQ_MODE = 0, 119 - DSPI_TCFQ_MODE, 120 + DSPI_XSPI_MODE, 120 121 DSPI_DMA_MODE, 121 122 }; 122 123 123 124 struct fsl_dspi_devtype_data { 124 125 enum dspi_trans_mode trans_mode; 125 126 u8 max_clock_factor; 126 - bool ptp_sts_supported; 127 - bool xspi_mode; 127 + int fifo_size; 128 128 }; 129 129 130 - static const struct fsl_dspi_devtype_data vf610_data = { 131 - .trans_mode = DSPI_DMA_MODE, 132 - .max_clock_factor = 2, 130 + enum { 131 + LS1021A, 132 + LS1012A, 133 + LS1028A, 134 + LS1043A, 135 + LS1046A, 136 + LS2080A, 137 + LS2085A, 138 + LX2160A, 139 + MCF5441X, 140 + VF610, 133 141 }; 134 142 135 - static const struct fsl_dspi_devtype_data ls1021a_v1_data = { 136 - .trans_mode = DSPI_TCFQ_MODE, 137 - .max_clock_factor = 8, 138 - .ptp_sts_supported = true, 139 - .xspi_mode = true, 140 - }; 141 - 142 - static const struct fsl_dspi_devtype_data ls2085a_data = { 143 - .trans_mode = DSPI_TCFQ_MODE, 144 - .max_clock_factor = 8, 145 - .ptp_sts_supported = true, 146 - }; 147 - 148 - static const struct fsl_dspi_devtype_data coldfire_data = { 149 - .trans_mode = DSPI_EOQ_MODE, 150 - .max_clock_factor = 8, 143 + static const struct fsl_dspi_devtype_data devtype_data[] = { 144 + [VF610] = { 145 + .trans_mode = DSPI_DMA_MODE, 146 + .max_clock_factor = 2, 147 + .fifo_size = 4, 148 + }, 149 + [LS1021A] = { 150 + /* Has A-011218 DMA erratum */ 151 + .trans_mode = DSPI_XSPI_MODE, 152 + .max_clock_factor = 8, 153 + .fifo_size = 4, 154 + }, 155 + [LS1012A] = { 156 + /* Has A-011218 DMA erratum */ 157 + .trans_mode = DSPI_XSPI_MODE, 158 + .max_clock_factor = 8, 159 + .fifo_size = 16, 160 + }, 161 + [LS1028A] = { 162 + .trans_mode = DSPI_XSPI_MODE, 163 + .max_clock_factor = 8, 164 + .fifo_size = 4, 165 + }, 166 + [LS1043A] = { 167 + /* Has A-011218 DMA erratum */ 168 + .trans_mode = DSPI_XSPI_MODE, 169 + .max_clock_factor = 8, 170 + .fifo_size = 16, 171 + }, 172 + [LS1046A] = { 173 + /* Has A-011218 DMA erratum */ 174 + .trans_mode = DSPI_XSPI_MODE, 175 + .max_clock_factor = 8, 176 + .fifo_size = 16, 177 + }, 178 + [LS2080A] = { 179 + .trans_mode = DSPI_DMA_MODE, 180 + .max_clock_factor = 8, 181 + .fifo_size = 4, 182 + }, 183 + [LS2085A] = { 184 + .trans_mode = DSPI_DMA_MODE, 185 + .max_clock_factor = 8, 186 + .fifo_size = 4, 187 + }, 188 + [LX2160A] = { 189 + .trans_mode = DSPI_DMA_MODE, 190 + .max_clock_factor = 8, 191 + .fifo_size = 4, 192 + }, 193 + [MCF5441X] = { 194 + .trans_mode = DSPI_EOQ_MODE, 195 + .max_clock_factor = 8, 196 + .fifo_size = 16, 197 + }, 151 198 }; 152 199 153 200 struct fsl_dspi_dma { 154 - /* Length of transfer in words of DSPI_FIFO_SIZE */ 155 - u32 curr_xfer_len; 156 - 157 201 u32 *tx_dma_buf; 158 202 struct dma_chan *chan_tx; 159 203 dma_addr_t tx_dma_phys; ··· 221 189 size_t len; 222 190 const void *tx; 223 191 void *rx; 224 - void *rx_end; 225 - u16 void_write_data; 226 192 u16 tx_cmd; 227 - u8 bits_per_word; 228 - u8 bytes_per_word; 229 193 const struct fsl_dspi_devtype_data *devtype_data; 230 194 231 - wait_queue_head_t waitq; 232 - u32 waitflags; 195 + struct completion xfer_done; 233 196 234 197 struct fsl_dspi_dma *dma; 198 + 199 + int oper_word_size; 200 + int oper_bits_per_word; 201 + 202 + int words_in_flight; 203 + 204 + /* 205 + * Offsets for CMD and TXDATA within SPI_PUSHR when accessed 206 + * individually (in XSPI mode) 207 + */ 208 + int pushr_cmd; 209 + int pushr_tx; 210 + 211 + void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata); 212 + void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata); 235 213 }; 236 214 215 + static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 216 + { 217 + memcpy(txdata, dspi->tx, dspi->oper_word_size); 218 + dspi->tx += dspi->oper_word_size; 219 + } 220 + 221 + static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 222 + { 223 + memcpy(dspi->rx, &rxdata, dspi->oper_word_size); 224 + dspi->rx += dspi->oper_word_size; 225 + } 226 + 227 + static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 228 + { 229 + *txdata = cpu_to_be32(*(u32 *)dspi->tx); 230 + dspi->tx += sizeof(u32); 231 + } 232 + 233 + static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 234 + { 235 + *(u32 *)dspi->rx = be32_to_cpu(rxdata); 236 + dspi->rx += sizeof(u32); 237 + } 238 + 239 + static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 240 + { 241 + *txdata = cpu_to_be16(*(u16 *)dspi->tx); 242 + dspi->tx += sizeof(u16); 243 + } 244 + 245 + static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 246 + { 247 + *(u16 *)dspi->rx = be16_to_cpu(rxdata); 248 + dspi->rx += sizeof(u16); 249 + } 250 + 251 + static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata) 252 + { 253 + u16 hi = *(u16 *)dspi->tx; 254 + u16 lo = *(u16 *)(dspi->tx + 2); 255 + 256 + *txdata = (u32)hi << 16 | lo; 257 + dspi->tx += sizeof(u32); 258 + } 259 + 260 + static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata) 261 + { 262 + u16 hi = rxdata & 0xffff; 263 + u16 lo = rxdata >> 16; 264 + 265 + *(u16 *)dspi->rx = lo; 266 + *(u16 *)(dspi->rx + 2) = hi; 267 + dspi->rx += sizeof(u32); 268 + } 269 + 270 + /* 271 + * Pop one word from the TX buffer for pushing into the 272 + * PUSHR register (TX FIFO) 273 + */ 237 274 static u32 dspi_pop_tx(struct fsl_dspi *dspi) 238 275 { 239 276 u32 txdata = 0; 240 277 241 - if (dspi->tx) { 242 - if (dspi->bytes_per_word == 1) 243 - txdata = *(u8 *)dspi->tx; 244 - else if (dspi->bytes_per_word == 2) 245 - txdata = *(u16 *)dspi->tx; 246 - else /* dspi->bytes_per_word == 4 */ 247 - txdata = *(u32 *)dspi->tx; 248 - dspi->tx += dspi->bytes_per_word; 249 - } 250 - dspi->len -= dspi->bytes_per_word; 278 + if (dspi->tx) 279 + dspi->host_to_dev(dspi, &txdata); 280 + dspi->len -= dspi->oper_word_size; 251 281 return txdata; 252 282 } 253 283 284 + /* Prepare one TX FIFO entry (txdata plus cmd) */ 254 285 static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) 255 286 { 256 287 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); ··· 326 231 return cmd << 16 | data; 327 232 } 328 233 234 + /* Push one word to the RX buffer from the POPR register (RX FIFO) */ 329 235 static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata) 330 236 { 331 237 if (!dspi->rx) 332 238 return; 333 - 334 - /* Mask off undefined bits */ 335 - rxdata &= (1 << dspi->bits_per_word) - 1; 336 - 337 - if (dspi->bytes_per_word == 1) 338 - *(u8 *)dspi->rx = rxdata; 339 - else if (dspi->bytes_per_word == 2) 340 - *(u16 *)dspi->rx = rxdata; 341 - else /* dspi->bytes_per_word == 4 */ 342 - *(u32 *)dspi->rx = rxdata; 343 - dspi->rx += dspi->bytes_per_word; 239 + dspi->dev_to_host(dspi, rxdata); 344 240 } 345 241 346 242 static void dspi_tx_dma_callback(void *arg) ··· 349 263 int i; 350 264 351 265 if (dspi->rx) { 352 - for (i = 0; i < dma->curr_xfer_len; i++) 266 + for (i = 0; i < dspi->words_in_flight; i++) 353 267 dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]); 354 268 } 355 269 ··· 363 277 int time_left; 364 278 int i; 365 279 366 - for (i = 0; i < dma->curr_xfer_len; i++) 280 + for (i = 0; i < dspi->words_in_flight; i++) 367 281 dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi); 368 282 369 283 dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx, 370 284 dma->tx_dma_phys, 371 - dma->curr_xfer_len * 285 + dspi->words_in_flight * 372 286 DMA_SLAVE_BUSWIDTH_4_BYTES, 373 287 DMA_MEM_TO_DEV, 374 288 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ··· 386 300 387 301 dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx, 388 302 dma->rx_dma_phys, 389 - dma->curr_xfer_len * 303 + dspi->words_in_flight * 390 304 DMA_SLAVE_BUSWIDTH_4_BYTES, 391 305 DMA_DEV_TO_MEM, 392 306 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ··· 434 348 return 0; 435 349 } 436 350 351 + static void dspi_setup_accel(struct fsl_dspi *dspi); 352 + 437 353 static int dspi_dma_xfer(struct fsl_dspi *dspi) 438 354 { 439 355 struct spi_message *message = dspi->cur_msg; 440 356 struct device *dev = &dspi->pdev->dev; 441 - struct fsl_dspi_dma *dma = dspi->dma; 442 - int curr_remaining_bytes; 443 - int bytes_per_buffer; 444 357 int ret = 0; 445 358 446 - curr_remaining_bytes = dspi->len; 447 - bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE; 448 - while (curr_remaining_bytes) { 449 - /* Check if current transfer fits the DMA buffer */ 450 - dma->curr_xfer_len = curr_remaining_bytes 451 - / dspi->bytes_per_word; 452 - if (dma->curr_xfer_len > bytes_per_buffer) 453 - dma->curr_xfer_len = bytes_per_buffer; 359 + /* 360 + * dspi->len gets decremented by dspi_pop_tx_pushr in 361 + * dspi_next_xfer_dma_submit 362 + */ 363 + while (dspi->len) { 364 + /* Figure out operational bits-per-word for this chunk */ 365 + dspi_setup_accel(dspi); 366 + 367 + dspi->words_in_flight = dspi->len / dspi->oper_word_size; 368 + if (dspi->words_in_flight > dspi->devtype_data->fifo_size) 369 + dspi->words_in_flight = dspi->devtype_data->fifo_size; 370 + 371 + message->actual_length += dspi->words_in_flight * 372 + dspi->oper_word_size; 454 373 455 374 ret = dspi_next_xfer_dma_submit(dspi); 456 375 if (ret) { 457 376 dev_err(dev, "DMA transfer failed\n"); 458 - goto exit; 459 - 460 - } else { 461 - const int len = 462 - dma->curr_xfer_len * dspi->bytes_per_word; 463 - curr_remaining_bytes -= len; 464 - message->actual_length += len; 465 - if (curr_remaining_bytes < 0) 466 - curr_remaining_bytes = 0; 377 + break; 467 378 } 468 379 } 469 380 470 - exit: 471 381 return ret; 472 382 } 473 383 474 384 static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) 475 385 { 386 + int dma_bufsize = dspi->devtype_data->fifo_size * 2; 476 387 struct device *dev = &dspi->pdev->dev; 477 388 struct dma_slave_config cfg; 478 389 struct fsl_dspi_dma *dma; ··· 493 410 goto err_tx_channel; 494 411 } 495 412 496 - dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, 497 - &dma->tx_dma_phys, GFP_KERNEL); 413 + dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev, 414 + dma_bufsize, &dma->tx_dma_phys, 415 + GFP_KERNEL); 498 416 if (!dma->tx_dma_buf) { 499 417 ret = -ENOMEM; 500 418 goto err_tx_dma_buf; 501 419 } 502 420 503 - dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE, 504 - &dma->rx_dma_phys, GFP_KERNEL); 421 + dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev, 422 + dma_bufsize, &dma->rx_dma_phys, 423 + GFP_KERNEL); 505 424 if (!dma->rx_dma_buf) { 506 425 ret = -ENOMEM; 507 426 goto err_rx_dma_buf; ··· 539 454 return 0; 540 455 541 456 err_slave_config: 542 - dma_free_coherent(dev, DSPI_DMA_BUFSIZE, 543 - dma->rx_dma_buf, dma->rx_dma_phys); 457 + dma_free_coherent(dma->chan_rx->device->dev, 458 + dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys); 544 459 err_rx_dma_buf: 545 - dma_free_coherent(dev, DSPI_DMA_BUFSIZE, 546 - dma->tx_dma_buf, dma->tx_dma_phys); 460 + dma_free_coherent(dma->chan_tx->device->dev, 461 + dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys); 547 462 err_tx_dma_buf: 548 463 dma_release_channel(dma->chan_tx); 549 464 err_tx_channel: ··· 557 472 558 473 static void dspi_release_dma(struct fsl_dspi *dspi) 559 474 { 475 + int dma_bufsize = dspi->devtype_data->fifo_size * 2; 560 476 struct fsl_dspi_dma *dma = dspi->dma; 561 - struct device *dev = &dspi->pdev->dev; 562 477 563 478 if (!dma) 564 479 return; 565 480 566 481 if (dma->chan_tx) { 567 - dma_unmap_single(dev, dma->tx_dma_phys, 568 - DSPI_DMA_BUFSIZE, DMA_TO_DEVICE); 482 + dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys, 483 + dma_bufsize, DMA_TO_DEVICE); 569 484 dma_release_channel(dma->chan_tx); 570 485 } 571 486 572 487 if (dma->chan_rx) { 573 - dma_unmap_single(dev, dma->rx_dma_phys, 574 - DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE); 488 + dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys, 489 + dma_bufsize, DMA_FROM_DEVICE); 575 490 dma_release_channel(dma->chan_rx); 576 491 } 577 492 } ··· 647 562 } 648 563 } 649 564 650 - static void fifo_write(struct fsl_dspi *dspi) 565 + static void dspi_pushr_write(struct fsl_dspi *dspi) 651 566 { 652 567 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi)); 653 568 } 654 569 655 - static void cmd_fifo_write(struct fsl_dspi *dspi) 570 + static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd) 656 571 { 657 - u16 cmd = dspi->tx_cmd; 658 - 659 - if (dspi->len > 0) 572 + /* 573 + * The only time when the PCS doesn't need continuation after this word 574 + * is when it's last. We need to look ahead, because we actually call 575 + * dspi_pop_tx (the function that decrements dspi->len) _after_ 576 + * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One 577 + * word is enough. If there's more to transmit than that, 578 + * dspi_xspi_write will know to split the FIFO writes in 2, and 579 + * generate a new PUSHR command with the final word that will have PCS 580 + * deasserted (not continued) here. 581 + */ 582 + if (dspi->len > dspi->oper_word_size) 660 583 cmd |= SPI_PUSHR_CMD_CONT; 661 - regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd); 584 + regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd); 662 585 } 663 586 664 - static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata) 587 + static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata) 665 588 { 666 - regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata); 589 + regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata); 667 590 } 668 591 669 - static void dspi_tcfq_write(struct fsl_dspi *dspi) 592 + static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words) 670 593 { 671 - /* Clear transfer count */ 672 - dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; 594 + int num_bytes = num_words * dspi->oper_word_size; 595 + u16 tx_cmd = dspi->tx_cmd; 673 596 674 - if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) { 675 - /* Write the CMD FIFO entry first, and then the two 676 - * corresponding TX FIFO entries. 677 - */ 597 + /* 598 + * If the PCS needs to de-assert (i.e. we're at the end of the buffer 599 + * and cs_change does not want the PCS to stay on), then we need a new 600 + * PUSHR command, since this one (for the body of the buffer) 601 + * necessarily has the CONT bit set. 602 + * So send one word less during this go, to force a split and a command 603 + * with a single word next time, when CONT will be unset. 604 + */ 605 + if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len) 606 + tx_cmd |= SPI_PUSHR_CMD_EOQ; 607 + 608 + /* Update CTARE */ 609 + regmap_write(dspi->regmap, SPI_CTARE(0), 610 + SPI_FRAME_EBITS(dspi->oper_bits_per_word) | 611 + SPI_CTARE_DTCP(num_words)); 612 + 613 + /* 614 + * Write the CMD FIFO entry first, and then the two 615 + * corresponding TX FIFO entries (or one...). 616 + */ 617 + dspi_pushr_cmd_write(dspi, tx_cmd); 618 + 619 + /* Fill TX FIFO with as many transfers as possible */ 620 + while (num_words--) { 678 621 u32 data = dspi_pop_tx(dspi); 679 622 680 - cmd_fifo_write(dspi); 681 - tx_fifo_write(dspi, data & 0xFFFF); 682 - tx_fifo_write(dspi, data >> 16); 683 - } else { 684 - /* Write one entry to both TX FIFO and CMD FIFO 685 - * simultaneously. 686 - */ 687 - fifo_write(dspi); 623 + dspi_pushr_txdata_write(dspi, data & 0xFFFF); 624 + if (dspi->oper_bits_per_word > 16) 625 + dspi_pushr_txdata_write(dspi, data >> 16); 688 626 } 689 627 } 690 628 691 - static u32 fifo_read(struct fsl_dspi *dspi) 629 + static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words) 630 + { 631 + u16 xfer_cmd = dspi->tx_cmd; 632 + 633 + /* Fill TX FIFO with as many transfers as possible */ 634 + while (num_words--) { 635 + dspi->tx_cmd = xfer_cmd; 636 + /* Request EOQF for last transfer in FIFO */ 637 + if (num_words == 0) 638 + dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; 639 + /* Write combined TX FIFO and CMD FIFO entry */ 640 + dspi_pushr_write(dspi); 641 + } 642 + } 643 + 644 + static u32 dspi_popr_read(struct fsl_dspi *dspi) 692 645 { 693 646 u32 rxdata = 0; 694 647 ··· 734 611 return rxdata; 735 612 } 736 613 737 - static void dspi_tcfq_read(struct fsl_dspi *dspi) 614 + static void dspi_fifo_read(struct fsl_dspi *dspi) 738 615 { 739 - dspi_push_rx(dspi, fifo_read(dspi)); 740 - } 741 - 742 - static void dspi_eoq_write(struct fsl_dspi *dspi) 743 - { 744 - int fifo_size = DSPI_FIFO_SIZE; 745 - u16 xfer_cmd = dspi->tx_cmd; 746 - 747 - /* Fill TX FIFO with as many transfers as possible */ 748 - while (dspi->len && fifo_size--) { 749 - dspi->tx_cmd = xfer_cmd; 750 - /* Request EOQF for last transfer in FIFO */ 751 - if (dspi->len == dspi->bytes_per_word || fifo_size == 0) 752 - dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; 753 - /* Clear transfer count for first transfer in FIFO */ 754 - if (fifo_size == (DSPI_FIFO_SIZE - 1)) 755 - dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT; 756 - /* Write combined TX FIFO and CMD FIFO entry */ 757 - fifo_write(dspi); 758 - } 759 - } 760 - 761 - static void dspi_eoq_read(struct fsl_dspi *dspi) 762 - { 763 - int fifo_size = DSPI_FIFO_SIZE; 616 + int num_fifo_entries = dspi->words_in_flight; 764 617 765 618 /* Read one FIFO entry and push to rx buffer */ 766 - while ((dspi->rx < dspi->rx_end) && fifo_size--) 767 - dspi_push_rx(dspi, fifo_read(dspi)); 619 + while (num_fifo_entries--) 620 + dspi_push_rx(dspi, dspi_popr_read(dspi)); 621 + } 622 + 623 + static void dspi_setup_accel(struct fsl_dspi *dspi) 624 + { 625 + struct spi_transfer *xfer = dspi->cur_transfer; 626 + bool odd = !!(dspi->len & 1); 627 + 628 + /* No accel for frames not multiple of 8 bits at the moment */ 629 + if (xfer->bits_per_word % 8) 630 + goto no_accel; 631 + 632 + if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) { 633 + dspi->oper_bits_per_word = 16; 634 + } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) { 635 + dspi->oper_bits_per_word = 8; 636 + } else { 637 + /* Start off with maximum supported by hardware */ 638 + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 639 + dspi->oper_bits_per_word = 32; 640 + else 641 + dspi->oper_bits_per_word = 16; 642 + 643 + /* 644 + * And go down only if the buffer can't be sent with 645 + * words this big 646 + */ 647 + do { 648 + if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8)) 649 + break; 650 + 651 + dspi->oper_bits_per_word /= 2; 652 + } while (dspi->oper_bits_per_word > 8); 653 + } 654 + 655 + if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) { 656 + dspi->dev_to_host = dspi_8on32_dev_to_host; 657 + dspi->host_to_dev = dspi_8on32_host_to_dev; 658 + } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) { 659 + dspi->dev_to_host = dspi_8on16_dev_to_host; 660 + dspi->host_to_dev = dspi_8on16_host_to_dev; 661 + } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) { 662 + dspi->dev_to_host = dspi_16on32_dev_to_host; 663 + dspi->host_to_dev = dspi_16on32_host_to_dev; 664 + } else { 665 + no_accel: 666 + dspi->dev_to_host = dspi_native_dev_to_host; 667 + dspi->host_to_dev = dspi_native_host_to_dev; 668 + dspi->oper_bits_per_word = xfer->bits_per_word; 669 + } 670 + 671 + dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8); 672 + 673 + /* 674 + * Update CTAR here (code is common for EOQ, XSPI and DMA modes). 675 + * We will update CTARE in the portion specific to XSPI, when we 676 + * also know the preload value (DTCP). 677 + */ 678 + regmap_write(dspi->regmap, SPI_CTAR(0), 679 + dspi->cur_chip->ctar_val | 680 + SPI_FRAME_BITS(dspi->oper_bits_per_word)); 681 + } 682 + 683 + static void dspi_fifo_write(struct fsl_dspi *dspi) 684 + { 685 + int num_fifo_entries = dspi->devtype_data->fifo_size; 686 + struct spi_transfer *xfer = dspi->cur_transfer; 687 + struct spi_message *msg = dspi->cur_msg; 688 + int num_words, num_bytes; 689 + 690 + dspi_setup_accel(dspi); 691 + 692 + /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */ 693 + if (dspi->oper_word_size == 4) 694 + num_fifo_entries /= 2; 695 + 696 + /* 697 + * Integer division intentionally trims off odd (or non-multiple of 4) 698 + * numbers of bytes at the end of the buffer, which will be sent next 699 + * time using a smaller oper_word_size. 700 + */ 701 + num_words = dspi->len / dspi->oper_word_size; 702 + if (num_words > num_fifo_entries) 703 + num_words = num_fifo_entries; 704 + 705 + /* Update total number of bytes that were transferred */ 706 + num_bytes = num_words * dspi->oper_word_size; 707 + msg->actual_length += num_bytes; 708 + dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8); 709 + 710 + /* 711 + * Update shared variable for use in the next interrupt (both in 712 + * dspi_fifo_read and in dspi_fifo_write). 713 + */ 714 + dspi->words_in_flight = num_words; 715 + 716 + spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq); 717 + 718 + if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE) 719 + dspi_eoq_fifo_write(dspi, num_words); 720 + else 721 + dspi_xspi_fifo_write(dspi, num_words); 722 + /* 723 + * Everything after this point is in a potential race with the next 724 + * interrupt, so we must never use dspi->words_in_flight again since it 725 + * might already be modified by the next dspi_fifo_write. 726 + */ 727 + 728 + spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, 729 + dspi->progress, !dspi->irq); 768 730 } 769 731 770 732 static int dspi_rxtx(struct fsl_dspi *dspi) 771 733 { 772 - struct spi_message *msg = dspi->cur_msg; 773 - enum dspi_trans_mode trans_mode; 774 - u16 spi_tcnt; 775 - u32 spi_tcr; 776 - 777 - spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer, 778 - dspi->progress, !dspi->irq); 779 - 780 - /* Get transfer counter (in number of SPI transfers). It was 781 - * reset to 0 when transfer(s) were started. 782 - */ 783 - regmap_read(dspi->regmap, SPI_TCR, &spi_tcr); 784 - spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr); 785 - /* Update total number of bytes that were transferred */ 786 - msg->actual_length += spi_tcnt * dspi->bytes_per_word; 787 - dspi->progress += spi_tcnt; 788 - 789 - trans_mode = dspi->devtype_data->trans_mode; 790 - if (trans_mode == DSPI_EOQ_MODE) 791 - dspi_eoq_read(dspi); 792 - else if (trans_mode == DSPI_TCFQ_MODE) 793 - dspi_tcfq_read(dspi); 734 + dspi_fifo_read(dspi); 794 735 795 736 if (!dspi->len) 796 737 /* Success! */ 797 738 return 0; 798 739 799 - spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, 800 - dspi->progress, !dspi->irq); 801 - 802 - if (trans_mode == DSPI_EOQ_MODE) 803 - dspi_eoq_write(dspi); 804 - else if (trans_mode == DSPI_TCFQ_MODE) 805 - dspi_tcfq_write(dspi); 740 + dspi_fifo_write(dspi); 806 741 807 742 return -EINPROGRESS; 808 743 } ··· 874 693 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 875 694 regmap_write(dspi->regmap, SPI_SR, spi_sr); 876 695 877 - if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) 696 + if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)) 878 697 break; 879 698 } while (--tries); 880 699 ··· 892 711 regmap_read(dspi->regmap, SPI_SR, &spi_sr); 893 712 regmap_write(dspi->regmap, SPI_SR, spi_sr); 894 713 895 - if (!(spi_sr & SPI_SR_EOQF)) 714 + if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))) 896 715 return IRQ_NONE; 897 716 898 - if (dspi_rxtx(dspi) == 0) { 899 - dspi->waitflags = 1; 900 - wake_up_interruptible(&dspi->waitq); 901 - } 717 + if (dspi_rxtx(dspi) == 0) 718 + complete(&dspi->xfer_done); 902 719 903 720 return IRQ_HANDLED; 904 721 } ··· 906 727 { 907 728 struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); 908 729 struct spi_device *spi = message->spi; 909 - enum dspi_trans_mode trans_mode; 910 730 struct spi_transfer *transfer; 911 731 int status = 0; 912 732 ··· 935 757 dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; 936 758 } 937 759 938 - dspi->void_write_data = dspi->cur_chip->void_write_data; 939 - 940 760 dspi->tx = transfer->tx_buf; 941 761 dspi->rx = transfer->rx_buf; 942 - dspi->rx_end = dspi->rx + transfer->len; 943 762 dspi->len = transfer->len; 944 763 dspi->progress = 0; 945 - /* Validated transfer specific frame size (defaults applied) */ 946 - dspi->bits_per_word = transfer->bits_per_word; 947 - if (transfer->bits_per_word <= 8) 948 - dspi->bytes_per_word = 1; 949 - else if (transfer->bits_per_word <= 16) 950 - dspi->bytes_per_word = 2; 951 - else 952 - dspi->bytes_per_word = 4; 953 764 954 765 regmap_update_bits(dspi->regmap, SPI_MCR, 955 766 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 956 767 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 957 - regmap_write(dspi->regmap, SPI_CTAR(0), 958 - dspi->cur_chip->ctar_val | 959 - SPI_FRAME_BITS(transfer->bits_per_word)); 960 - if (dspi->devtype_data->xspi_mode) 961 - regmap_write(dspi->regmap, SPI_CTARE(0), 962 - SPI_FRAME_EBITS(transfer->bits_per_word) | 963 - SPI_CTARE_DTCP(1)); 964 768 965 769 spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, 966 770 dspi->progress, !dspi->irq); 967 771 968 - trans_mode = dspi->devtype_data->trans_mode; 969 - switch (trans_mode) { 970 - case DSPI_EOQ_MODE: 971 - regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); 972 - dspi_eoq_write(dspi); 973 - break; 974 - case DSPI_TCFQ_MODE: 975 - regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE); 976 - dspi_tcfq_write(dspi); 977 - break; 978 - case DSPI_DMA_MODE: 979 - regmap_write(dspi->regmap, SPI_RSER, 980 - SPI_RSER_TFFFE | SPI_RSER_TFFFD | 981 - SPI_RSER_RFDFE | SPI_RSER_RFDFD); 772 + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 982 773 status = dspi_dma_xfer(dspi); 983 - break; 984 - default: 985 - dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", 986 - trans_mode); 987 - status = -EINVAL; 988 - goto out; 989 - } 774 + } else { 775 + dspi_fifo_write(dspi); 990 776 991 - if (!dspi->irq) { 992 - do { 993 - status = dspi_poll(dspi); 994 - } while (status == -EINPROGRESS); 995 - } else if (trans_mode != DSPI_DMA_MODE) { 996 - status = wait_event_interruptible(dspi->waitq, 997 - dspi->waitflags); 998 - dspi->waitflags = 0; 777 + if (dspi->irq) { 778 + wait_for_completion(&dspi->xfer_done); 779 + reinit_completion(&dspi->xfer_done); 780 + } else { 781 + do { 782 + status = dspi_poll(dspi); 783 + } while (status == -EINPROGRESS); 784 + } 999 785 } 1000 786 if (status) 1001 - dev_err(&dspi->pdev->dev, 1002 - "Waiting for transfer to complete failed!\n"); 787 + break; 1003 788 1004 789 spi_transfer_delay_exec(transfer); 1005 790 } 1006 791 1007 - out: 1008 792 message->status = status; 1009 793 spi_finalize_current_message(ctlr); 1010 794 ··· 1003 863 cs_sck_delay = pdata->cs_sck_delay; 1004 864 sck_cs_delay = pdata->sck_cs_delay; 1005 865 } 1006 - 1007 - chip->void_write_data = 0; 1008 866 1009 867 clkrate = clk_get_rate(dspi->clk); 1010 868 hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate); ··· 1047 909 } 1048 910 1049 911 static const struct of_device_id fsl_dspi_dt_ids[] = { 1050 - { .compatible = "fsl,vf610-dspi", .data = &vf610_data, }, 1051 - { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, }, 1052 - { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, }, 912 + { 913 + .compatible = "fsl,vf610-dspi", 914 + .data = &devtype_data[VF610], 915 + }, { 916 + .compatible = "fsl,ls1021a-v1.0-dspi", 917 + .data = &devtype_data[LS1021A], 918 + }, { 919 + .compatible = "fsl,ls1012a-dspi", 920 + .data = &devtype_data[LS1012A], 921 + }, { 922 + .compatible = "fsl,ls1028a-dspi", 923 + .data = &devtype_data[LS1028A], 924 + }, { 925 + .compatible = "fsl,ls1043a-dspi", 926 + .data = &devtype_data[LS1043A], 927 + }, { 928 + .compatible = "fsl,ls1046a-dspi", 929 + .data = &devtype_data[LS1046A], 930 + }, { 931 + .compatible = "fsl,ls2080a-dspi", 932 + .data = &devtype_data[LS2080A], 933 + }, { 934 + .compatible = "fsl,ls2085a-dspi", 935 + .data = &devtype_data[LS2085A], 936 + }, { 937 + .compatible = "fsl,lx2160a-dspi", 938 + .data = &devtype_data[LX2160A], 939 + }, 1053 940 { /* sentinel */ } 1054 941 }; 1055 942 MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); ··· 1160 997 }, 1161 998 }; 1162 999 1163 - static void dspi_init(struct fsl_dspi *dspi) 1000 + static int dspi_init(struct fsl_dspi *dspi) 1164 1001 { 1165 - unsigned int mcr = SPI_MCR_PCSIS; 1002 + unsigned int mcr; 1166 1003 1167 - if (dspi->devtype_data->xspi_mode) 1004 + /* Set idle states for all chip select signals to high */ 1005 + mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->num_chipselect - 1, 0)); 1006 + 1007 + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1168 1008 mcr |= SPI_MCR_XSPI; 1169 1009 if (!spi_controller_is_slave(dspi->ctlr)) 1170 1010 mcr |= SPI_MCR_MASTER; 1171 1011 1172 1012 regmap_write(dspi->regmap, SPI_MCR, mcr); 1173 1013 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); 1174 - if (dspi->devtype_data->xspi_mode) 1175 - regmap_write(dspi->regmap, SPI_CTARE(0), 1176 - SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1)); 1014 + 1015 + switch (dspi->devtype_data->trans_mode) { 1016 + case DSPI_EOQ_MODE: 1017 + regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); 1018 + break; 1019 + case DSPI_XSPI_MODE: 1020 + regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE); 1021 + break; 1022 + case DSPI_DMA_MODE: 1023 + regmap_write(dspi->regmap, SPI_RSER, 1024 + SPI_RSER_TFFFE | SPI_RSER_TFFFD | 1025 + SPI_RSER_RFDFE | SPI_RSER_RFDFD); 1026 + break; 1027 + default: 1028 + dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n", 1029 + dspi->devtype_data->trans_mode); 1030 + return -EINVAL; 1031 + } 1032 + 1033 + return 0; 1177 1034 } 1178 1035 1179 1036 static int dspi_slave_abort(struct spi_master *master) ··· 1204 1021 * Terminate all pending DMA transactions for the SPI working 1205 1022 * in SLAVE mode. 1206 1023 */ 1207 - dmaengine_terminate_sync(dspi->dma->chan_rx); 1208 - dmaengine_terminate_sync(dspi->dma->chan_tx); 1024 + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { 1025 + dmaengine_terminate_sync(dspi->dma->chan_rx); 1026 + dmaengine_terminate_sync(dspi->dma->chan_tx); 1027 + } 1209 1028 1210 1029 /* Clear the internal DSPI RX and TX FIFO buffers */ 1211 1030 regmap_update_bits(dspi->regmap, SPI_MCR, ··· 1217 1032 return 0; 1218 1033 } 1219 1034 1035 + /* 1036 + * EOQ mode will inevitably deassert its PCS signal on last word in a queue 1037 + * (hardware limitation), so we need to inform the spi_device that larger 1038 + * buffers than the FIFO size are going to have the chip select randomly 1039 + * toggling, so it has a chance to adapt its message sizes. 1040 + */ 1041 + static size_t dspi_max_message_size(struct spi_device *spi) 1042 + { 1043 + struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller); 1044 + 1045 + if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE) 1046 + return dspi->devtype_data->fifo_size; 1047 + 1048 + return SIZE_MAX; 1049 + } 1050 + 1220 1051 static int dspi_probe(struct platform_device *pdev) 1221 1052 { 1222 1053 struct device_node *np = pdev->dev.of_node; 1223 1054 const struct regmap_config *regmap_config; 1224 1055 struct fsl_dspi_platform_data *pdata; 1225 1056 struct spi_controller *ctlr; 1226 - int ret, cs_num, bus_num; 1057 + int ret, cs_num, bus_num = -1; 1227 1058 struct fsl_dspi *dspi; 1228 1059 struct resource *res; 1229 1060 void __iomem *base; 1061 + bool big_endian; 1230 1062 1231 1063 ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); 1232 1064 if (!ctlr) ··· 1255 1053 1256 1054 ctlr->setup = dspi_setup; 1257 1055 ctlr->transfer_one_message = dspi_transfer_one_message; 1056 + ctlr->max_message_size = dspi_max_message_size; 1258 1057 ctlr->dev.of_node = pdev->dev.of_node; 1259 1058 1260 1059 ctlr->cleanup = dspi_cleanup; ··· 1267 1064 ctlr->num_chipselect = pdata->cs_num; 1268 1065 ctlr->bus_num = pdata->bus_num; 1269 1066 1270 - dspi->devtype_data = &coldfire_data; 1067 + /* Only Coldfire uses platform data */ 1068 + dspi->devtype_data = &devtype_data[MCF5441X]; 1069 + big_endian = true; 1271 1070 } else { 1272 1071 1273 1072 ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); ··· 1279 1074 } 1280 1075 ctlr->num_chipselect = cs_num; 1281 1076 1282 - ret = of_property_read_u32(np, "bus-num", &bus_num); 1283 - if (ret < 0) { 1284 - dev_err(&pdev->dev, "can't get bus-num\n"); 1285 - goto out_ctlr_put; 1286 - } 1077 + of_property_read_u32(np, "bus-num", &bus_num); 1287 1078 ctlr->bus_num = bus_num; 1288 1079 1289 1080 if (of_property_read_bool(np, "spi-slave")) ··· 1291 1090 ret = -EFAULT; 1292 1091 goto out_ctlr_put; 1293 1092 } 1093 + 1094 + big_endian = of_device_is_big_endian(np); 1095 + } 1096 + if (big_endian) { 1097 + dspi->pushr_cmd = 0; 1098 + dspi->pushr_tx = 2; 1099 + } else { 1100 + dspi->pushr_cmd = 2; 1101 + dspi->pushr_tx = 0; 1294 1102 } 1295 1103 1296 - if (dspi->devtype_data->xspi_mode) 1104 + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1297 1105 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1298 1106 else 1299 1107 ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); ··· 1314 1104 goto out_ctlr_put; 1315 1105 } 1316 1106 1317 - if (dspi->devtype_data->xspi_mode) 1107 + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) 1318 1108 regmap_config = &dspi_xspi_regmap_config[0]; 1319 1109 else 1320 1110 regmap_config = &dspi_regmap_config; ··· 1326 1116 goto out_ctlr_put; 1327 1117 } 1328 1118 1329 - if (dspi->devtype_data->xspi_mode) { 1119 + if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) { 1330 1120 dspi->regmap_pushr = devm_regmap_init_mmio( 1331 1121 &pdev->dev, base + SPI_PUSHR, 1332 1122 &dspi_xspi_regmap_config[1]); ··· 1349 1139 if (ret) 1350 1140 goto out_ctlr_put; 1351 1141 1352 - dspi_init(dspi); 1353 - 1354 - if (dspi->devtype_data->trans_mode == DSPI_TCFQ_MODE) 1355 - goto poll_mode; 1142 + ret = dspi_init(dspi); 1143 + if (ret) 1144 + goto out_clk_put; 1356 1145 1357 1146 dspi->irq = platform_get_irq(pdev, 0); 1358 1147 if (dspi->irq <= 0) { ··· 1368 1159 goto out_clk_put; 1369 1160 } 1370 1161 1371 - init_waitqueue_head(&dspi->waitq); 1162 + init_completion(&dspi->xfer_done); 1372 1163 1373 1164 poll_mode: 1374 1165 ··· 1383 1174 ctlr->max_speed_hz = 1384 1175 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor; 1385 1176 1386 - ctlr->ptp_sts_supported = dspi->devtype_data->ptp_sts_supported; 1177 + if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE) 1178 + ctlr->ptp_sts_supported = true; 1387 1179 1388 1180 platform_set_drvdata(pdev, ctlr); 1389 1181
+3 -6
drivers/spi/spi-fsl-lpspi.c
··· 86 86 #define TCR_RXMSK BIT(19) 87 87 #define TCR_TXMSK BIT(18) 88 88 89 - static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128}; 90 - 91 89 struct lpspi_config { 92 90 u8 bpw; 93 91 u8 chip_select; ··· 123 125 struct completion dma_rx_completion; 124 126 struct completion dma_tx_completion; 125 127 126 - int chipselect[0]; 128 + int chipselect[]; 127 129 }; 128 130 129 131 static const struct of_device_id fsl_lpspi_dt_ids[] = { ··· 329 331 } 330 332 331 333 for (prescale = 0; prescale < 8; prescale++) { 332 - scldiv = perclk_rate / 333 - (clkdivs[prescale] * config.speed_hz) - 2; 334 + scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2; 334 335 if (scldiv < 256) { 335 336 fsl_lpspi->config.prescale = prescale; 336 337 break; 337 338 } 338 339 } 339 340 340 - if (prescale == 8 && scldiv >= 256) 341 + if (scldiv >= 256) 341 342 return -EINVAL; 342 343 343 344 writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
+11 -15
drivers/spi/spi-geni-qcom.c
··· 6 6 #include <linux/io.h> 7 7 #include <linux/log2.h> 8 8 #include <linux/module.h> 9 - #include <linux/of.h> 10 9 #include <linux/platform_device.h> 11 10 #include <linux/pm_runtime.h> 12 11 #include <linux/qcom-geni-se.h> ··· 535 536 struct spi_geni_master *mas; 536 537 void __iomem *base; 537 538 struct clk *clk; 539 + struct device *dev = &pdev->dev; 538 540 539 541 irq = platform_get_irq(pdev, 0); 540 542 if (irq < 0) ··· 545 545 if (IS_ERR(base)) 546 546 return PTR_ERR(base); 547 547 548 - clk = devm_clk_get(&pdev->dev, "se"); 549 - if (IS_ERR(clk)) { 550 - dev_err(&pdev->dev, "Err getting SE Core clk %ld\n", 551 - PTR_ERR(clk)); 548 + clk = devm_clk_get(dev, "se"); 549 + if (IS_ERR(clk)) 552 550 return PTR_ERR(clk); 553 - } 554 551 555 - spi = spi_alloc_master(&pdev->dev, sizeof(*mas)); 552 + spi = spi_alloc_master(dev, sizeof(*mas)); 556 553 if (!spi) 557 554 return -ENOMEM; 558 555 559 556 platform_set_drvdata(pdev, spi); 560 557 mas = spi_master_get_devdata(spi); 561 558 mas->irq = irq; 562 - mas->dev = &pdev->dev; 563 - mas->se.dev = &pdev->dev; 564 - mas->se.wrapper = dev_get_drvdata(pdev->dev.parent); 559 + mas->dev = dev; 560 + mas->se.dev = dev; 561 + mas->se.wrapper = dev_get_drvdata(dev->parent); 565 562 mas->se.base = base; 566 563 mas->se.clk = clk; 567 564 568 565 spi->bus_num = -1; 569 - spi->dev.of_node = pdev->dev.of_node; 566 + spi->dev.of_node = dev->of_node; 570 567 spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH; 571 568 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 572 569 spi->num_chipselect = 4; ··· 576 579 577 580 init_completion(&mas->xfer_done); 578 581 spin_lock_init(&mas->lock); 579 - pm_runtime_enable(&pdev->dev); 582 + pm_runtime_enable(dev); 580 583 581 584 ret = spi_geni_init(mas); 582 585 if (ret) 583 586 goto spi_geni_probe_runtime_disable; 584 587 585 - ret = request_irq(mas->irq, geni_spi_isr, 586 - IRQF_TRIGGER_HIGH, "spi_geni", spi); 588 + ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi); 587 589 if (ret) 588 590 goto spi_geni_probe_runtime_disable; 589 591 ··· 594 598 spi_geni_probe_free_irq: 595 599 free_irq(mas->irq, spi); 596 600 spi_geni_probe_runtime_disable: 597 - pm_runtime_disable(&pdev->dev); 601 + pm_runtime_disable(dev); 598 602 spi_master_put(spi); 599 603 return ret; 600 604 }
+98 -1
drivers/spi/spi-hisi-sfc-v3xx.c
··· 7 7 8 8 #include <linux/acpi.h> 9 9 #include <linux/bitops.h> 10 + #include <linux/dmi.h> 10 11 #include <linux/iopoll.h> 11 12 #include <linux/module.h> 12 13 #include <linux/platform_device.h> ··· 18 17 #define HISI_SFC_V3XX_VERSION (0x1f8) 19 18 20 19 #define HISI_SFC_V3XX_CMD_CFG (0x300) 20 + #define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17) 21 + #define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17) 22 + #define HISI_SFC_V3XX_CMD_CFG_FULL_DIO (3 << 17) 23 + #define HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT (5 << 17) 24 + #define HISI_SFC_V3XX_CMD_CFG_QUAD_IO (6 << 17) 25 + #define HISI_SFC_V3XX_CMD_CFG_FULL_QIO (7 << 17) 21 26 #define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9 22 27 #define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8) 23 28 #define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7) ··· 168 161 if (op->addr.nbytes) 169 162 config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; 170 163 164 + switch (op->data.buswidth) { 165 + case 0 ... 1: 166 + break; 167 + case 2: 168 + if (op->addr.buswidth <= 1) { 169 + config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT; 170 + } else if (op->addr.buswidth == 2) { 171 + if (op->cmd.buswidth <= 1) { 172 + config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IO; 173 + } else if (op->cmd.buswidth == 2) { 174 + config |= HISI_SFC_V3XX_CMD_CFG_FULL_DIO; 175 + } else { 176 + return -EIO; 177 + } 178 + } else { 179 + return -EIO; 180 + } 181 + break; 182 + case 4: 183 + if (op->addr.buswidth <= 1) { 184 + config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT; 185 + } else if (op->addr.buswidth == 4) { 186 + if (op->cmd.buswidth <= 1) { 187 + config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IO; 188 + } else if (op->cmd.buswidth == 4) { 189 + config |= HISI_SFC_V3XX_CMD_CFG_FULL_QIO; 190 + } else { 191 + return -EIO; 192 + } 193 + } else { 194 + return -EIO; 195 + } 196 + break; 197 + default: 198 + return -EOPNOTSUPP; 199 + } 200 + 171 201 if (op->data.dir != SPI_MEM_NO_DATA) { 172 202 config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF; 173 203 config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK; ··· 251 207 .exec_op = hisi_sfc_v3xx_exec_op, 252 208 }; 253 209 210 + static int hisi_sfc_v3xx_buswidth_override_bits; 211 + 212 + /* 213 + * ACPI FW does not allow us to currently set the device buswidth, so quirk it 214 + * depending on the board. 215 + */ 216 + static int __init hisi_sfc_v3xx_dmi_quirk(const struct dmi_system_id *d) 217 + { 218 + hisi_sfc_v3xx_buswidth_override_bits = SPI_RX_QUAD | SPI_TX_QUAD; 219 + 220 + return 0; 221 + } 222 + 223 + static const struct dmi_system_id hisi_sfc_v3xx_dmi_quirk_table[] = { 224 + { 225 + .callback = hisi_sfc_v3xx_dmi_quirk, 226 + .matches = { 227 + DMI_MATCH(DMI_SYS_VENDOR, "Huawei"), 228 + DMI_MATCH(DMI_PRODUCT_NAME, "D06"), 229 + }, 230 + }, 231 + { 232 + .callback = hisi_sfc_v3xx_dmi_quirk, 233 + .matches = { 234 + DMI_MATCH(DMI_SYS_VENDOR, "Huawei"), 235 + DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 2280 V2"), 236 + }, 237 + }, 238 + { 239 + .callback = hisi_sfc_v3xx_dmi_quirk, 240 + .matches = { 241 + DMI_MATCH(DMI_SYS_VENDOR, "Huawei"), 242 + DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 200 (Model 2280)"), 243 + }, 244 + }, 245 + {} 246 + }; 247 + 254 248 static int hisi_sfc_v3xx_probe(struct platform_device *pdev) 255 249 { 256 250 struct device *dev = &pdev->dev; ··· 303 221 304 222 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | 305 223 SPI_TX_DUAL | SPI_TX_QUAD; 224 + 225 + ctlr->buswidth_override_bits = hisi_sfc_v3xx_buswidth_override_bits; 306 226 307 227 host = spi_controller_get_devdata(ctlr); 308 228 host->dev = dev; ··· 361 277 .probe = hisi_sfc_v3xx_probe, 362 278 }; 363 279 364 - module_platform_driver(hisi_sfc_v3xx_spi_driver); 280 + static int __init hisi_sfc_v3xx_spi_init(void) 281 + { 282 + dmi_check_system(hisi_sfc_v3xx_dmi_quirk_table); 283 + 284 + return platform_driver_register(&hisi_sfc_v3xx_spi_driver); 285 + } 286 + 287 + static void __exit hisi_sfc_v3xx_spi_exit(void) 288 + { 289 + platform_driver_unregister(&hisi_sfc_v3xx_spi_driver); 290 + } 291 + 292 + module_init(hisi_sfc_v3xx_spi_init); 293 + module_exit(hisi_sfc_v3xx_spi_exit); 365 294 366 295 MODULE_LICENSE("GPL"); 367 296 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+4 -3
drivers/spi/spi-mem.c
··· 418 418 struct spi_controller *ctlr = mem->spi->controller; 419 419 size_t len; 420 420 421 - len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; 422 - 423 421 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) 424 422 return ctlr->mem_ops->adjust_op_size(mem, op); 425 423 426 424 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 425 + len = sizeof(op->cmd.opcode) + op->addr.nbytes + 426 + op->dummy.nbytes; 427 + 427 428 if (len > spi_max_transfer_size(mem->spi)) 428 429 return -EINVAL; 429 430 ··· 488 487 * This function is creating a direct mapping descriptor which can then be used 489 488 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 490 489 * If the SPI controller driver does not support direct mapping, this function 491 - * fallback to an implementation using spi_mem_exec_op(), so that the caller 490 + * falls back to an implementation using spi_mem_exec_op(), so that the caller 492 491 * doesn't have to bother implementing a fallback on his own. 493 492 * 494 493 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
+366 -124
drivers/spi/spi-meson-spicc.c
··· 9 9 10 10 #include <linux/bitfield.h> 11 11 #include <linux/clk.h> 12 + #include <linux/clk-provider.h> 12 13 #include <linux/device.h> 13 14 #include <linux/io.h> 14 15 #include <linux/kernel.h> 15 16 #include <linux/module.h> 16 17 #include <linux/of.h> 18 + #include <linux/of_device.h> 17 19 #include <linux/platform_device.h> 18 20 #include <linux/spi/spi.h> 19 21 #include <linux/types.h> ··· 35 33 * to have a CS go down over the full transfer 36 34 */ 37 35 38 - #define SPICC_MAX_FREQ 30000000 39 36 #define SPICC_MAX_BURST 128 40 37 41 38 /* Register Map */ ··· 106 105 #define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */ 107 106 #define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */ 108 107 #define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */ 109 - #define SPICC_DLYCTL_W1_MASK GENMASK(21, 16) /* Delay Control Write-Only */ 108 + #define SPICC_MO_DELAY_MASK GENMASK(17, 16) /* Master Output Delay */ 109 + #define SPICC_MO_NO_DELAY 0 110 + #define SPICC_MO_DELAY_1_CYCLE 1 111 + #define SPICC_MO_DELAY_2_CYCLE 2 112 + #define SPICC_MO_DELAY_3_CYCLE 3 113 + #define SPICC_MI_DELAY_MASK GENMASK(19, 18) /* Master Input Delay */ 114 + #define SPICC_MI_NO_DELAY 0 115 + #define SPICC_MI_DELAY_1_CYCLE 1 116 + #define SPICC_MI_DELAY_2_CYCLE 2 117 + #define SPICC_MI_DELAY_3_CYCLE 3 118 + #define SPICC_MI_CAP_DELAY_MASK GENMASK(21, 20) /* Master Capture Delay */ 119 + #define SPICC_CAP_AHEAD_2_CYCLE 0 120 + #define SPICC_CAP_AHEAD_1_CYCLE 1 121 + #define SPICC_CAP_NO_DELAY 2 122 + #define SPICC_CAP_DELAY_1_CYCLE 3 110 123 #define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */ 111 124 #define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */ 112 125 ··· 128 113 129 114 #define SPICC_DWADDR 0x24 /* Write Address of DMA */ 130 115 116 + #define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */ 117 + #define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0) 118 + #define SPICC_ENH_DATARATE_MASK GENMASK(23, 16) 119 + #define SPICC_ENH_DATARATE_EN BIT(24) 120 + #define SPICC_ENH_MOSI_OEN BIT(25) 121 + #define SPICC_ENH_CLK_OEN BIT(26) 122 + #define SPICC_ENH_CS_OEN BIT(27) 123 + #define SPICC_ENH_CLK_CS_DELAY_EN BIT(28) 124 + #define SPICC_ENH_MAIN_CLK_AO BIT(29) 125 + 131 126 #define writel_bits_relaxed(mask, val, addr) \ 132 127 writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr) 133 128 134 - #define SPICC_BURST_MAX 16 135 - #define SPICC_FIFO_HALF 10 129 + struct meson_spicc_data { 130 + unsigned int max_speed_hz; 131 + unsigned int min_speed_hz; 132 + unsigned int fifo_size; 133 + bool has_oen; 134 + bool has_enhance_clk_div; 135 + bool has_pclk; 136 + }; 136 137 137 138 struct meson_spicc_device { 138 139 struct spi_master *master; 139 140 struct platform_device *pdev; 140 141 void __iomem *base; 141 142 struct clk *core; 143 + struct clk *pclk; 144 + struct clk *clk; 142 145 struct spi_message *message; 143 146 struct spi_transfer *xfer; 147 + const struct meson_spicc_data *data; 144 148 u8 *tx_buf; 145 149 u8 *rx_buf; 146 150 unsigned int bytes_per_word; 147 151 unsigned long tx_remain; 148 - unsigned long txb_remain; 149 152 unsigned long rx_remain; 150 - unsigned long rxb_remain; 151 153 unsigned long xfer_remain; 152 - bool is_burst_end; 153 - bool is_last_burst; 154 154 }; 155 + 156 + static void meson_spicc_oen_enable(struct meson_spicc_device *spicc) 157 + { 158 + u32 conf; 159 + 160 + if (!spicc->data->has_oen) 161 + return; 162 + 163 + conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) | 164 + SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN; 165 + 166 + writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0); 167 + } 155 168 156 169 static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc) 157 170 { ··· 189 146 190 147 static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc) 191 148 { 192 - return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN, 149 + return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF, 193 150 readl_relaxed(spicc->base + SPICC_STATREG)); 194 151 } 195 152 ··· 244 201 spicc->base + SPICC_TXDATA); 245 202 } 246 203 247 - static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc, 248 - u32 irq_ctrl) 204 + static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc) 249 205 { 250 - if (spicc->rx_remain > SPICC_FIFO_HALF) 251 - irq_ctrl |= SPICC_RH_EN; 252 - else 253 - irq_ctrl |= SPICC_RR_EN; 254 206 255 - return irq_ctrl; 256 - } 257 - 258 - static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc, 259 - unsigned int burst_len) 260 - { 207 + unsigned int burst_len = min_t(unsigned int, 208 + spicc->xfer_remain / 209 + spicc->bytes_per_word, 210 + spicc->data->fifo_size); 261 211 /* Setup Xfer variables */ 262 212 spicc->tx_remain = burst_len; 263 213 spicc->rx_remain = burst_len; 264 214 spicc->xfer_remain -= burst_len * spicc->bytes_per_word; 265 - spicc->is_burst_end = false; 266 - if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain) 267 - spicc->is_last_burst = true; 268 - else 269 - spicc->is_last_burst = false; 270 215 271 216 /* Setup burst length */ 272 217 writel_bits_relaxed(SPICC_BURSTLENGTH_MASK, 273 218 FIELD_PREP(SPICC_BURSTLENGTH_MASK, 274 - burst_len), 219 + burst_len - 1), 275 220 spicc->base + SPICC_CONREG); 276 221 277 222 /* Fill TX FIFO */ ··· 269 238 static irqreturn_t meson_spicc_irq(int irq, void *data) 270 239 { 271 240 struct meson_spicc_device *spicc = (void *) data; 272 - u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG); 273 - u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl; 274 241 275 - ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN); 242 + writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG); 276 243 277 244 /* Empty RX FIFO */ 278 245 meson_spicc_rx(spicc); 279 246 280 - /* Enable TC interrupt since we transferred everything */ 281 - if (!spicc->tx_remain && !spicc->rx_remain) { 282 - spicc->is_burst_end = true; 247 + if (!spicc->xfer_remain) { 248 + /* Disable all IRQs */ 249 + writel(0, spicc->base + SPICC_INTREG); 283 250 284 - /* Enable TC interrupt */ 285 - ctrl |= SPICC_TC_EN; 251 + spi_finalize_current_transfer(spicc->master); 286 252 287 - /* Reload IRQ status */ 288 - stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl; 253 + return IRQ_HANDLED; 289 254 } 290 255 291 - /* Check transfer complete */ 292 - if ((stat & SPICC_TC) && spicc->is_burst_end) { 293 - unsigned int burst_len; 256 + /* Setup burst */ 257 + meson_spicc_setup_burst(spicc); 294 258 295 - /* Clear TC bit */ 296 - writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG); 297 - 298 - /* Disable TC interrupt */ 299 - ctrl &= ~SPICC_TC_EN; 300 - 301 - if (spicc->is_last_burst) { 302 - /* Disable all IRQs */ 303 - writel(0, spicc->base + SPICC_INTREG); 304 - 305 - spi_finalize_current_transfer(spicc->master); 306 - 307 - return IRQ_HANDLED; 308 - } 309 - 310 - burst_len = min_t(unsigned int, 311 - spicc->xfer_remain / spicc->bytes_per_word, 312 - SPICC_BURST_MAX); 313 - 314 - /* Setup burst */ 315 - meson_spicc_setup_burst(spicc, burst_len); 316 - 317 - /* Restart burst */ 318 - writel_bits_relaxed(SPICC_XCH, SPICC_XCH, 319 - spicc->base + SPICC_CONREG); 320 - } 321 - 322 - /* Setup RX interrupt trigger */ 323 - ctrl = meson_spicc_setup_rx_irq(spicc, ctrl); 324 - 325 - /* Reconfigure interrupts */ 326 - writel(ctrl, spicc->base + SPICC_INTREG); 259 + /* Start burst */ 260 + writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); 327 261 328 262 return IRQ_HANDLED; 329 263 } 330 264 331 - static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf, 332 - u32 speed) 265 + static void meson_spicc_auto_io_delay(struct meson_spicc_device *spicc) 333 266 { 334 - unsigned long parent, value; 335 - unsigned int i, div; 267 + u32 div, hz; 268 + u32 mi_delay, cap_delay; 269 + u32 conf; 336 270 337 - parent = clk_get_rate(spicc->core); 338 - 339 - /* Find closest inferior/equal possible speed */ 340 - for (i = 0 ; i < 7 ; ++i) { 341 - /* 2^(data_rate+2) */ 342 - value = parent >> (i + 2); 343 - 344 - if (value <= speed) 345 - break; 271 + if (spicc->data->has_enhance_clk_div) { 272 + div = FIELD_GET(SPICC_ENH_DATARATE_MASK, 273 + readl_relaxed(spicc->base + SPICC_ENH_CTL0)); 274 + div++; 275 + div <<= 1; 276 + } else { 277 + div = FIELD_GET(SPICC_DATARATE_MASK, 278 + readl_relaxed(spicc->base + SPICC_CONREG)); 279 + div += 2; 280 + div = 1 << div; 346 281 } 347 282 348 - /* If provided speed it lower than max divider, use max divider */ 349 - if (i > 7) { 350 - div = 7; 351 - dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n", 352 - speed); 353 - } else 354 - div = i; 283 + mi_delay = SPICC_MI_NO_DELAY; 284 + cap_delay = SPICC_CAP_AHEAD_2_CYCLE; 285 + hz = clk_get_rate(spicc->clk); 355 286 356 - dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n", 357 - parent, speed, value, div); 287 + if (hz >= 100000000) 288 + cap_delay = SPICC_CAP_DELAY_1_CYCLE; 289 + else if (hz >= 80000000) 290 + cap_delay = SPICC_CAP_NO_DELAY; 291 + else if (hz >= 40000000) 292 + cap_delay = SPICC_CAP_AHEAD_1_CYCLE; 293 + else if (div >= 16) 294 + mi_delay = SPICC_MI_DELAY_3_CYCLE; 295 + else if (div >= 8) 296 + mi_delay = SPICC_MI_DELAY_2_CYCLE; 297 + else if (div >= 6) 298 + mi_delay = SPICC_MI_DELAY_1_CYCLE; 358 299 359 - conf &= ~SPICC_DATARATE_MASK; 360 - conf |= FIELD_PREP(SPICC_DATARATE_MASK, div); 361 - 362 - return conf; 300 + conf = readl_relaxed(spicc->base + SPICC_TESTREG); 301 + conf &= ~(SPICC_MO_DELAY_MASK | SPICC_MI_DELAY_MASK 302 + | SPICC_MI_CAP_DELAY_MASK); 303 + conf |= FIELD_PREP(SPICC_MI_DELAY_MASK, mi_delay); 304 + conf |= FIELD_PREP(SPICC_MI_CAP_DELAY_MASK, cap_delay); 305 + writel_relaxed(conf, spicc->base + SPICC_TESTREG); 363 306 } 364 307 365 308 static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc, ··· 344 339 /* Read original configuration */ 345 340 conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG); 346 341 347 - /* Select closest divider */ 348 - conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz); 349 - 350 342 /* Setup word width */ 351 343 conf &= ~SPICC_BITLENGTH_MASK; 352 344 conf |= FIELD_PREP(SPICC_BITLENGTH_MASK, ··· 352 350 /* Ignore if unchanged */ 353 351 if (conf != conf_orig) 354 352 writel_relaxed(conf, spicc->base + SPICC_CONREG); 353 + 354 + clk_set_rate(spicc->clk, xfer->speed_hz); 355 + 356 + meson_spicc_auto_io_delay(spicc); 357 + 358 + writel_relaxed(0, spicc->base + SPICC_DMAREG); 359 + } 360 + 361 + static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc) 362 + { 363 + u32 data; 364 + 365 + if (spicc->data->has_oen) 366 + writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 367 + SPICC_ENH_MAIN_CLK_AO, 368 + spicc->base + SPICC_ENH_CTL0); 369 + 370 + writel_bits_relaxed(SPICC_FIFORST_W1_MASK, SPICC_FIFORST_W1_MASK, 371 + spicc->base + SPICC_TESTREG); 372 + 373 + while (meson_spicc_rxready(spicc)) 374 + data = readl_relaxed(spicc->base + SPICC_RXDATA); 375 + 376 + if (spicc->data->has_oen) 377 + writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0, 378 + spicc->base + SPICC_ENH_CTL0); 355 379 } 356 380 357 381 static int meson_spicc_transfer_one(struct spi_master *master, ··· 385 357 struct spi_transfer *xfer) 386 358 { 387 359 struct meson_spicc_device *spicc = spi_master_get_devdata(master); 388 - unsigned int burst_len; 389 - u32 irq = 0; 390 360 391 361 /* Store current transfer */ 392 362 spicc->xfer = xfer; ··· 398 372 spicc->bytes_per_word = 399 373 DIV_ROUND_UP(spicc->xfer->bits_per_word, 8); 400 374 375 + if (xfer->len % spicc->bytes_per_word) 376 + return -EINVAL; 377 + 401 378 /* Setup transfer parameters */ 402 379 meson_spicc_setup_xfer(spicc, xfer); 403 380 404 - burst_len = min_t(unsigned int, 405 - spicc->xfer_remain / spicc->bytes_per_word, 406 - SPICC_BURST_MAX); 381 + meson_spicc_reset_fifo(spicc); 407 382 408 - meson_spicc_setup_burst(spicc, burst_len); 409 - 410 - irq = meson_spicc_setup_rx_irq(spicc, irq); 383 + /* Setup burst */ 384 + meson_spicc_setup_burst(spicc); 411 385 412 386 /* Start burst */ 413 387 writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); 414 388 415 389 /* Enable interrupts */ 416 - writel_relaxed(irq, spicc->base + SPICC_INTREG); 390 + writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); 417 391 418 392 return 1; 419 393 } ··· 470 444 /* Setup no wait cycles by default */ 471 445 writel_relaxed(0, spicc->base + SPICC_PERIODREG); 472 446 473 - writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG); 447 + writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG); 474 448 475 449 return 0; 476 450 } ··· 481 455 482 456 /* Disable all IRQs */ 483 457 writel(0, spicc->base + SPICC_INTREG); 484 - 485 - /* Disable controller */ 486 - writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG); 487 458 488 459 device_reset_optional(&spicc->pdev->dev); 489 460 ··· 500 477 spi->controller_state = NULL; 501 478 } 502 479 480 + /* 481 + * The Clock Mux 482 + * x-----------------x x------------x x------\ 483 + * |---| pow2 fixed div |---| pow2 div |----| | 484 + * | x-----------------x x------------x | | 485 + * src ---| | mux |-- out 486 + * | x-----------------x x------------x | | 487 + * |---| enh fixed div |---| enh div |0---| | 488 + * x-----------------x x------------x x------/ 489 + * 490 + * Clk path for GX series: 491 + * src -> pow2 fixed div -> pow2 div -> out 492 + * 493 + * Clk path for AXG series: 494 + * src -> pow2 fixed div -> pow2 div -> mux -> out 495 + * src -> enh fixed div -> enh div -> mux -> out 496 + * 497 + * Clk path for G12A series: 498 + * pclk -> pow2 fixed div -> pow2 div -> mux -> out 499 + * pclk -> enh fixed div -> enh div -> mux -> out 500 + */ 501 + 502 + static int meson_spicc_clk_init(struct meson_spicc_device *spicc) 503 + { 504 + struct device *dev = &spicc->pdev->dev; 505 + struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div; 506 + struct clk_divider *pow2_div, *enh_div; 507 + struct clk_mux *mux; 508 + struct clk_init_data init; 509 + struct clk *clk; 510 + struct clk_parent_data parent_data[2]; 511 + char name[64]; 512 + 513 + memset(&init, 0, sizeof(init)); 514 + memset(&parent_data, 0, sizeof(parent_data)); 515 + 516 + init.parent_data = parent_data; 517 + 518 + /* algorithm for pow2 div: rate = freq / 4 / (2 ^ N) */ 519 + 520 + pow2_fixed_div = devm_kzalloc(dev, sizeof(*pow2_fixed_div), GFP_KERNEL); 521 + if (!pow2_fixed_div) 522 + return -ENOMEM; 523 + 524 + snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev)); 525 + init.name = name; 526 + init.ops = &clk_fixed_factor_ops; 527 + init.flags = 0; 528 + if (spicc->data->has_pclk) 529 + parent_data[0].hw = __clk_get_hw(spicc->pclk); 530 + else 531 + parent_data[0].hw = __clk_get_hw(spicc->core); 532 + init.num_parents = 1; 533 + 534 + pow2_fixed_div->mult = 1, 535 + pow2_fixed_div->div = 4, 536 + pow2_fixed_div->hw.init = &init; 537 + 538 + clk = devm_clk_register(dev, &pow2_fixed_div->hw); 539 + if (WARN_ON(IS_ERR(clk))) 540 + return PTR_ERR(clk); 541 + 542 + pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL); 543 + if (!pow2_div) 544 + return -ENOMEM; 545 + 546 + snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev)); 547 + init.name = name; 548 + init.ops = &clk_divider_ops; 549 + init.flags = CLK_SET_RATE_PARENT; 550 + parent_data[0].hw = &pow2_fixed_div->hw; 551 + init.num_parents = 1; 552 + 553 + pow2_div->shift = 16, 554 + pow2_div->width = 3, 555 + pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO, 556 + pow2_div->reg = spicc->base + SPICC_CONREG; 557 + pow2_div->hw.init = &init; 558 + 559 + clk = devm_clk_register(dev, &pow2_div->hw); 560 + if (WARN_ON(IS_ERR(clk))) 561 + return PTR_ERR(clk); 562 + 563 + if (!spicc->data->has_enhance_clk_div) { 564 + spicc->clk = clk; 565 + return 0; 566 + } 567 + 568 + /* algorithm for enh div: rate = freq / 2 / (N + 1) */ 569 + 570 + enh_fixed_div = devm_kzalloc(dev, sizeof(*enh_fixed_div), GFP_KERNEL); 571 + if (!enh_fixed_div) 572 + return -ENOMEM; 573 + 574 + snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev)); 575 + init.name = name; 576 + init.ops = &clk_fixed_factor_ops; 577 + init.flags = 0; 578 + if (spicc->data->has_pclk) 579 + parent_data[0].hw = __clk_get_hw(spicc->pclk); 580 + else 581 + parent_data[0].hw = __clk_get_hw(spicc->core); 582 + init.num_parents = 1; 583 + 584 + enh_fixed_div->mult = 1, 585 + enh_fixed_div->div = 2, 586 + enh_fixed_div->hw.init = &init; 587 + 588 + clk = devm_clk_register(dev, &enh_fixed_div->hw); 589 + if (WARN_ON(IS_ERR(clk))) 590 + return PTR_ERR(clk); 591 + 592 + enh_div = devm_kzalloc(dev, sizeof(*enh_div), GFP_KERNEL); 593 + if (!enh_div) 594 + return -ENOMEM; 595 + 596 + snprintf(name, sizeof(name), "%s#enh_div", dev_name(dev)); 597 + init.name = name; 598 + init.ops = &clk_divider_ops; 599 + init.flags = CLK_SET_RATE_PARENT; 600 + parent_data[0].hw = &enh_fixed_div->hw; 601 + init.num_parents = 1; 602 + 603 + enh_div->shift = 16, 604 + enh_div->width = 8, 605 + enh_div->reg = spicc->base + SPICC_ENH_CTL0; 606 + enh_div->hw.init = &init; 607 + 608 + clk = devm_clk_register(dev, &enh_div->hw); 609 + if (WARN_ON(IS_ERR(clk))) 610 + return PTR_ERR(clk); 611 + 612 + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); 613 + if (!mux) 614 + return -ENOMEM; 615 + 616 + snprintf(name, sizeof(name), "%s#sel", dev_name(dev)); 617 + init.name = name; 618 + init.ops = &clk_mux_ops; 619 + parent_data[0].hw = &pow2_div->hw; 620 + parent_data[1].hw = &enh_div->hw; 621 + init.num_parents = 2; 622 + init.flags = CLK_SET_RATE_PARENT; 623 + 624 + mux->mask = 0x1, 625 + mux->shift = 24, 626 + mux->reg = spicc->base + SPICC_ENH_CTL0; 627 + mux->hw.init = &init; 628 + 629 + spicc->clk = devm_clk_register(dev, &mux->hw); 630 + if (WARN_ON(IS_ERR(spicc->clk))) 631 + return PTR_ERR(spicc->clk); 632 + 633 + return 0; 634 + } 635 + 503 636 static int meson_spicc_probe(struct platform_device *pdev) 504 637 { 505 638 struct spi_master *master; 506 639 struct meson_spicc_device *spicc; 507 - int ret, irq, rate; 640 + int ret, irq; 508 641 509 642 master = spi_alloc_master(&pdev->dev, sizeof(*spicc)); 510 643 if (!master) { ··· 669 490 } 670 491 spicc = spi_master_get_devdata(master); 671 492 spicc->master = master; 493 + 494 + spicc->data = of_device_get_match_data(&pdev->dev); 495 + if (!spicc->data) { 496 + dev_err(&pdev->dev, "failed to get match data\n"); 497 + ret = -EINVAL; 498 + goto out_master; 499 + } 672 500 673 501 spicc->pdev = pdev; 674 502 platform_set_drvdata(pdev, spicc); ··· 686 500 ret = PTR_ERR(spicc->base); 687 501 goto out_master; 688 502 } 503 + 504 + /* Set master mode and enable controller */ 505 + writel_relaxed(SPICC_ENABLE | SPICC_MODE_MASTER, 506 + spicc->base + SPICC_CONREG); 689 507 690 508 /* Disable all IRQs */ 691 509 writel_relaxed(0, spicc->base + SPICC_INTREG); ··· 709 519 goto out_master; 710 520 } 711 521 522 + if (spicc->data->has_pclk) { 523 + spicc->pclk = devm_clk_get(&pdev->dev, "pclk"); 524 + if (IS_ERR(spicc->pclk)) { 525 + dev_err(&pdev->dev, "pclk clock request failed\n"); 526 + ret = PTR_ERR(spicc->pclk); 527 + goto out_master; 528 + } 529 + } 530 + 712 531 ret = clk_prepare_enable(spicc->core); 713 532 if (ret) { 714 533 dev_err(&pdev->dev, "core clock enable failed\n"); 715 534 goto out_master; 716 535 } 717 - rate = clk_get_rate(spicc->core); 536 + 537 + ret = clk_prepare_enable(spicc->pclk); 538 + if (ret) { 539 + dev_err(&pdev->dev, "pclk clock enable failed\n"); 540 + goto out_master; 541 + } 718 542 719 543 device_reset_optional(&pdev->dev); 720 544 ··· 740 536 SPI_BPW_MASK(16) | 741 537 SPI_BPW_MASK(8); 742 538 master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX); 743 - master->min_speed_hz = rate >> 9; 539 + master->min_speed_hz = spicc->data->min_speed_hz; 540 + master->max_speed_hz = spicc->data->max_speed_hz; 744 541 master->setup = meson_spicc_setup; 745 542 master->cleanup = meson_spicc_cleanup; 746 543 master->prepare_message = meson_spicc_prepare_message; ··· 749 544 master->transfer_one = meson_spicc_transfer_one; 750 545 master->use_gpio_descriptors = true; 751 546 752 - /* Setup max rate according to the Meson GX datasheet */ 753 - if ((rate >> 2) > SPICC_MAX_FREQ) 754 - master->max_speed_hz = SPICC_MAX_FREQ; 755 - else 756 - master->max_speed_hz = rate >> 2; 547 + meson_spicc_oen_enable(spicc); 548 + 549 + ret = meson_spicc_clk_init(spicc); 550 + if (ret) { 551 + dev_err(&pdev->dev, "clock registration failed\n"); 552 + goto out_master; 553 + } 757 554 758 555 ret = devm_spi_register_master(&pdev->dev, master); 759 556 if (ret) { ··· 767 560 768 561 out_clk: 769 562 clk_disable_unprepare(spicc->core); 563 + clk_disable_unprepare(spicc->pclk); 770 564 771 565 out_master: 772 566 spi_master_put(master); ··· 783 575 writel(0, spicc->base + SPICC_CONREG); 784 576 785 577 clk_disable_unprepare(spicc->core); 578 + clk_disable_unprepare(spicc->pclk); 786 579 787 580 return 0; 788 581 } 789 582 583 + static const struct meson_spicc_data meson_spicc_gx_data = { 584 + .max_speed_hz = 30000000, 585 + .min_speed_hz = 325000, 586 + .fifo_size = 16, 587 + }; 588 + 589 + static const struct meson_spicc_data meson_spicc_axg_data = { 590 + .max_speed_hz = 80000000, 591 + .min_speed_hz = 325000, 592 + .fifo_size = 16, 593 + .has_oen = true, 594 + .has_enhance_clk_div = true, 595 + }; 596 + 597 + static const struct meson_spicc_data meson_spicc_g12a_data = { 598 + .max_speed_hz = 166666666, 599 + .min_speed_hz = 50000, 600 + .fifo_size = 15, 601 + .has_oen = true, 602 + .has_enhance_clk_div = true, 603 + .has_pclk = true, 604 + }; 605 + 790 606 static const struct of_device_id meson_spicc_of_match[] = { 791 - { .compatible = "amlogic,meson-gx-spicc", }, 792 - { .compatible = "amlogic,meson-axg-spicc", }, 607 + { 608 + .compatible = "amlogic,meson-gx-spicc", 609 + .data = &meson_spicc_gx_data, 610 + }, 611 + { 612 + .compatible = "amlogic,meson-axg-spicc", 613 + .data = &meson_spicc_axg_data, 614 + }, 615 + { 616 + .compatible = "amlogic,meson-g12a-spicc", 617 + .data = &meson_spicc_g12a_data, 618 + }, 793 619 { /* sentinel */ } 794 620 }; 795 621 MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
+689
drivers/spi/spi-mtk-nor.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // Mediatek SPI NOR controller driver 4 + // 5 + // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 + 7 + #include <linux/bits.h> 8 + #include <linux/clk.h> 9 + #include <linux/completion.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/interrupt.h> 12 + #include <linux/io.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/kernel.h> 15 + #include <linux/module.h> 16 + #include <linux/of_device.h> 17 + #include <linux/spi/spi.h> 18 + #include <linux/spi/spi-mem.h> 19 + #include <linux/string.h> 20 + 21 + #define DRIVER_NAME "mtk-spi-nor" 22 + 23 + #define MTK_NOR_REG_CMD 0x00 24 + #define MTK_NOR_CMD_WRITE BIT(4) 25 + #define MTK_NOR_CMD_PROGRAM BIT(2) 26 + #define MTK_NOR_CMD_READ BIT(0) 27 + #define MTK_NOR_CMD_MASK GENMASK(5, 0) 28 + 29 + #define MTK_NOR_REG_PRG_CNT 0x04 30 + #define MTK_NOR_REG_RDATA 0x0c 31 + 32 + #define MTK_NOR_REG_RADR0 0x10 33 + #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) 34 + #define MTK_NOR_REG_RADR3 0xc8 35 + 36 + #define MTK_NOR_REG_WDATA 0x1c 37 + 38 + #define MTK_NOR_REG_PRGDATA0 0x20 39 + #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) 40 + #define MTK_NOR_REG_PRGDATA_MAX 5 41 + 42 + #define MTK_NOR_REG_SHIFT0 0x38 43 + #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) 44 + #define MTK_NOR_REG_SHIFT_MAX 9 45 + 46 + #define MTK_NOR_REG_CFG1 0x60 47 + #define MTK_NOR_FAST_READ BIT(0) 48 + 49 + #define MTK_NOR_REG_CFG2 0x64 50 + #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) 51 + #define MTK_NOR_WR_BUF_EN BIT(0) 52 + 53 + #define MTK_NOR_REG_PP_DATA 0x98 54 + 55 + #define MTK_NOR_REG_IRQ_STAT 0xa8 56 + #define MTK_NOR_REG_IRQ_EN 0xac 57 + #define MTK_NOR_IRQ_DMA BIT(7) 58 + #define MTK_NOR_IRQ_MASK GENMASK(7, 0) 59 + 60 + #define MTK_NOR_REG_CFG3 0xb4 61 + #define MTK_NOR_DISABLE_WREN BIT(7) 62 + #define MTK_NOR_DISABLE_SR_POLL BIT(5) 63 + 64 + #define MTK_NOR_REG_WP 0xc4 65 + #define MTK_NOR_ENABLE_SF_CMD 0x30 66 + 67 + #define MTK_NOR_REG_BUSCFG 0xcc 68 + #define MTK_NOR_4B_ADDR BIT(4) 69 + #define MTK_NOR_QUAD_ADDR BIT(3) 70 + #define MTK_NOR_QUAD_READ BIT(2) 71 + #define MTK_NOR_DUAL_ADDR BIT(1) 72 + #define MTK_NOR_DUAL_READ BIT(0) 73 + #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) 74 + 75 + #define MTK_NOR_REG_DMA_CTL 0x718 76 + #define MTK_NOR_DMA_START BIT(0) 77 + 78 + #define MTK_NOR_REG_DMA_FADR 0x71c 79 + #define MTK_NOR_REG_DMA_DADR 0x720 80 + #define MTK_NOR_REG_DMA_END_DADR 0x724 81 + 82 + #define MTK_NOR_PRG_MAX_SIZE 6 83 + // Reading DMA src/dst addresses have to be 16-byte aligned 84 + #define MTK_NOR_DMA_ALIGN 16 85 + #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) 86 + // and we allocate a bounce buffer if destination address isn't aligned. 87 + #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE 88 + 89 + // Buffered page program can do one 128-byte transfer 90 + #define MTK_NOR_PP_SIZE 128 91 + 92 + #define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq) 93 + 94 + struct mtk_nor { 95 + struct spi_controller *ctlr; 96 + struct device *dev; 97 + void __iomem *base; 98 + u8 *buffer; 99 + struct clk *spi_clk; 100 + struct clk *ctlr_clk; 101 + unsigned int spi_freq; 102 + bool wbuf_en; 103 + bool has_irq; 104 + struct completion op_done; 105 + }; 106 + 107 + static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) 108 + { 109 + u32 val = readl(sp->base + reg); 110 + 111 + val &= ~clr; 112 + val |= set; 113 + writel(val, sp->base + reg); 114 + } 115 + 116 + static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) 117 + { 118 + ulong delay = CLK_TO_US(sp, clk); 119 + u32 reg; 120 + int ret; 121 + 122 + writel(cmd, sp->base + MTK_NOR_REG_CMD); 123 + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), 124 + delay / 3, (delay + 1) * 200); 125 + if (ret < 0) 126 + dev_err(sp->dev, "command %u timeout.\n", cmd); 127 + return ret; 128 + } 129 + 130 + static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) 131 + { 132 + u32 addr = op->addr.val; 133 + int i; 134 + 135 + for (i = 0; i < 3; i++) { 136 + writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); 137 + addr >>= 8; 138 + } 139 + if (op->addr.nbytes == 4) { 140 + writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); 141 + mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); 142 + } else { 143 + mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); 144 + } 145 + } 146 + 147 + static bool mtk_nor_match_read(const struct spi_mem_op *op) 148 + { 149 + int dummy = 0; 150 + 151 + if (op->dummy.buswidth) 152 + dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 153 + 154 + if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { 155 + if (op->addr.buswidth == 1) 156 + return dummy == 8; 157 + else if (op->addr.buswidth == 2) 158 + return dummy == 4; 159 + else if (op->addr.buswidth == 4) 160 + return dummy == 6; 161 + } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { 162 + if (op->cmd.opcode == 0x03) 163 + return dummy == 0; 164 + else if (op->cmd.opcode == 0x0b) 165 + return dummy == 8; 166 + } 167 + return false; 168 + } 169 + 170 + static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 171 + { 172 + size_t len; 173 + 174 + if (!op->data.nbytes) 175 + return 0; 176 + 177 + if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 178 + if ((op->data.dir == SPI_MEM_DATA_IN) && 179 + mtk_nor_match_read(op)) { 180 + if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || 181 + (op->data.nbytes < MTK_NOR_DMA_ALIGN)) 182 + op->data.nbytes = 1; 183 + else if (!((ulong)(op->data.buf.in) & 184 + MTK_NOR_DMA_ALIGN_MASK)) 185 + op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; 186 + else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) 187 + op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; 188 + return 0; 189 + } else if (op->data.dir == SPI_MEM_DATA_OUT) { 190 + if (op->data.nbytes >= MTK_NOR_PP_SIZE) 191 + op->data.nbytes = MTK_NOR_PP_SIZE; 192 + else 193 + op->data.nbytes = 1; 194 + return 0; 195 + } 196 + } 197 + 198 + len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes - 199 + op->dummy.nbytes; 200 + if (op->data.nbytes > len) 201 + op->data.nbytes = len; 202 + 203 + return 0; 204 + } 205 + 206 + static bool mtk_nor_supports_op(struct spi_mem *mem, 207 + const struct spi_mem_op *op) 208 + { 209 + size_t len; 210 + 211 + if (op->cmd.buswidth != 1) 212 + return false; 213 + 214 + if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 215 + if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) 216 + return true; 217 + else if (op->data.dir == SPI_MEM_DATA_OUT) 218 + return (op->addr.buswidth == 1) && 219 + (op->dummy.buswidth == 0) && 220 + (op->data.buswidth == 1); 221 + } 222 + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; 223 + if ((len > MTK_NOR_PRG_MAX_SIZE) || 224 + ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE))) 225 + return false; 226 + return true; 227 + } 228 + 229 + static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) 230 + { 231 + u32 reg = 0; 232 + 233 + if (op->addr.nbytes == 4) 234 + reg |= MTK_NOR_4B_ADDR; 235 + 236 + if (op->data.buswidth == 4) { 237 + reg |= MTK_NOR_QUAD_READ; 238 + writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); 239 + if (op->addr.buswidth == 4) 240 + reg |= MTK_NOR_QUAD_ADDR; 241 + } else if (op->data.buswidth == 2) { 242 + reg |= MTK_NOR_DUAL_READ; 243 + writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); 244 + if (op->addr.buswidth == 2) 245 + reg |= MTK_NOR_DUAL_ADDR; 246 + } else { 247 + if (op->cmd.opcode == 0x0b) 248 + mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); 249 + else 250 + mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); 251 + } 252 + mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); 253 + } 254 + 255 + static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length, 256 + u8 *buffer) 257 + { 258 + int ret = 0; 259 + ulong delay; 260 + u32 reg; 261 + dma_addr_t dma_addr; 262 + 263 + dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE); 264 + if (dma_mapping_error(sp->dev, dma_addr)) { 265 + dev_err(sp->dev, "failed to map dma buffer.\n"); 266 + return -EINVAL; 267 + } 268 + 269 + writel(from, sp->base + MTK_NOR_REG_DMA_FADR); 270 + writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); 271 + writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); 272 + 273 + if (sp->has_irq) { 274 + reinit_completion(&sp->op_done); 275 + mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); 276 + } 277 + 278 + mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); 279 + 280 + delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); 281 + 282 + if (sp->has_irq) { 283 + if (!wait_for_completion_timeout(&sp->op_done, 284 + (delay + 1) * 100)) 285 + ret = -ETIMEDOUT; 286 + } else { 287 + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, 288 + !(reg & MTK_NOR_DMA_START), delay / 3, 289 + (delay + 1) * 100); 290 + } 291 + 292 + dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE); 293 + if (ret < 0) 294 + dev_err(sp->dev, "dma read timeout.\n"); 295 + 296 + return ret; 297 + } 298 + 299 + static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from, 300 + unsigned int length, u8 *buffer) 301 + { 302 + unsigned int rdlen; 303 + int ret; 304 + 305 + if (length & MTK_NOR_DMA_ALIGN_MASK) 306 + rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; 307 + else 308 + rdlen = length; 309 + 310 + ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer); 311 + if (ret) 312 + return ret; 313 + 314 + memcpy(buffer, sp->buffer, length); 315 + return 0; 316 + } 317 + 318 + static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) 319 + { 320 + u8 *buf = op->data.buf.in; 321 + int ret; 322 + 323 + ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); 324 + if (!ret) 325 + buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); 326 + return ret; 327 + } 328 + 329 + static int mtk_nor_write_buffer_enable(struct mtk_nor *sp) 330 + { 331 + int ret; 332 + u32 val; 333 + 334 + if (sp->wbuf_en) 335 + return 0; 336 + 337 + val = readl(sp->base + MTK_NOR_REG_CFG2); 338 + writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 339 + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 340 + val & MTK_NOR_WR_BUF_EN, 0, 10000); 341 + if (!ret) 342 + sp->wbuf_en = true; 343 + return ret; 344 + } 345 + 346 + static int mtk_nor_write_buffer_disable(struct mtk_nor *sp) 347 + { 348 + int ret; 349 + u32 val; 350 + 351 + if (!sp->wbuf_en) 352 + return 0; 353 + val = readl(sp->base + MTK_NOR_REG_CFG2); 354 + writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 355 + ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 356 + !(val & MTK_NOR_WR_BUF_EN), 0, 10000); 357 + if (!ret) 358 + sp->wbuf_en = false; 359 + return ret; 360 + } 361 + 362 + static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) 363 + { 364 + const u8 *buf = op->data.buf.out; 365 + u32 val; 366 + int ret, i; 367 + 368 + ret = mtk_nor_write_buffer_enable(sp); 369 + if (ret < 0) 370 + return ret; 371 + 372 + for (i = 0; i < op->data.nbytes; i += 4) { 373 + val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | 374 + buf[i]; 375 + writel(val, sp->base + MTK_NOR_REG_PP_DATA); 376 + } 377 + return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 378 + (op->data.nbytes + 5) * BITS_PER_BYTE); 379 + } 380 + 381 + static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, 382 + const struct spi_mem_op *op) 383 + { 384 + const u8 *buf = op->data.buf.out; 385 + int ret; 386 + 387 + ret = mtk_nor_write_buffer_disable(sp); 388 + if (ret < 0) 389 + return ret; 390 + writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); 391 + return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); 392 + } 393 + 394 + int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 395 + { 396 + struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 397 + int ret; 398 + 399 + if ((op->data.nbytes == 0) || 400 + ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) 401 + return -ENOTSUPP; 402 + 403 + if (op->data.dir == SPI_MEM_DATA_OUT) { 404 + mtk_nor_set_addr(sp, op); 405 + writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); 406 + if (op->data.nbytes == MTK_NOR_PP_SIZE) 407 + return mtk_nor_pp_buffered(sp, op); 408 + return mtk_nor_pp_unbuffered(sp, op); 409 + } 410 + 411 + if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { 412 + ret = mtk_nor_write_buffer_disable(sp); 413 + if (ret < 0) 414 + return ret; 415 + mtk_nor_setup_bus(sp, op); 416 + if (op->data.nbytes == 1) { 417 + mtk_nor_set_addr(sp, op); 418 + return mtk_nor_read_pio(sp, op); 419 + } else if (((ulong)(op->data.buf.in) & 420 + MTK_NOR_DMA_ALIGN_MASK)) { 421 + return mtk_nor_read_bounce(sp, op->addr.val, 422 + op->data.nbytes, 423 + op->data.buf.in); 424 + } else { 425 + return mtk_nor_read_dma(sp, op->addr.val, 426 + op->data.nbytes, 427 + op->data.buf.in); 428 + } 429 + } 430 + 431 + return -ENOTSUPP; 432 + } 433 + 434 + static int mtk_nor_setup(struct spi_device *spi) 435 + { 436 + struct mtk_nor *sp = spi_controller_get_devdata(spi->master); 437 + 438 + if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { 439 + dev_err(&spi->dev, "spi clock should be %u Hz.\n", 440 + sp->spi_freq); 441 + return -EINVAL; 442 + } 443 + spi->max_speed_hz = sp->spi_freq; 444 + 445 + return 0; 446 + } 447 + 448 + static int mtk_nor_transfer_one_message(struct spi_controller *master, 449 + struct spi_message *m) 450 + { 451 + struct mtk_nor *sp = spi_controller_get_devdata(master); 452 + struct spi_transfer *t = NULL; 453 + unsigned long trx_len = 0; 454 + int stat = 0; 455 + int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 456 + void __iomem *reg; 457 + const u8 *txbuf; 458 + u8 *rxbuf; 459 + int i; 460 + 461 + list_for_each_entry(t, &m->transfers, transfer_list) { 462 + txbuf = t->tx_buf; 463 + for (i = 0; i < t->len; i++, reg_offset--) { 464 + reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 465 + if (txbuf) 466 + writeb(txbuf[i], reg); 467 + else 468 + writeb(0, reg); 469 + } 470 + trx_len += t->len; 471 + } 472 + 473 + writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 474 + 475 + stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 476 + trx_len * BITS_PER_BYTE); 477 + if (stat < 0) 478 + goto msg_done; 479 + 480 + reg_offset = trx_len - 1; 481 + list_for_each_entry(t, &m->transfers, transfer_list) { 482 + rxbuf = t->rx_buf; 483 + for (i = 0; i < t->len; i++, reg_offset--) { 484 + reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 485 + if (rxbuf) 486 + rxbuf[i] = readb(reg); 487 + } 488 + } 489 + 490 + m->actual_length = trx_len; 491 + msg_done: 492 + m->status = stat; 493 + spi_finalize_current_message(master); 494 + 495 + return 0; 496 + } 497 + 498 + static void mtk_nor_disable_clk(struct mtk_nor *sp) 499 + { 500 + clk_disable_unprepare(sp->spi_clk); 501 + clk_disable_unprepare(sp->ctlr_clk); 502 + } 503 + 504 + static int mtk_nor_enable_clk(struct mtk_nor *sp) 505 + { 506 + int ret; 507 + 508 + ret = clk_prepare_enable(sp->spi_clk); 509 + if (ret) 510 + return ret; 511 + 512 + ret = clk_prepare_enable(sp->ctlr_clk); 513 + if (ret) { 514 + clk_disable_unprepare(sp->spi_clk); 515 + return ret; 516 + } 517 + 518 + return 0; 519 + } 520 + 521 + static int mtk_nor_init(struct mtk_nor *sp) 522 + { 523 + int ret; 524 + 525 + ret = mtk_nor_enable_clk(sp); 526 + if (ret) 527 + return ret; 528 + 529 + sp->spi_freq = clk_get_rate(sp->spi_clk); 530 + 531 + writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 532 + mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); 533 + mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, 534 + MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); 535 + 536 + return ret; 537 + } 538 + 539 + static irqreturn_t mtk_nor_irq_handler(int irq, void *data) 540 + { 541 + struct mtk_nor *sp = data; 542 + u32 irq_status, irq_enabled; 543 + 544 + irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); 545 + irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); 546 + // write status back to clear interrupt 547 + writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); 548 + 549 + if (!(irq_status & irq_enabled)) 550 + return IRQ_NONE; 551 + 552 + if (irq_status & MTK_NOR_IRQ_DMA) { 553 + complete(&sp->op_done); 554 + writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 555 + } 556 + 557 + return IRQ_HANDLED; 558 + } 559 + 560 + static size_t mtk_max_msg_size(struct spi_device *spi) 561 + { 562 + return MTK_NOR_PRG_MAX_SIZE; 563 + } 564 + 565 + static const struct spi_controller_mem_ops mtk_nor_mem_ops = { 566 + .adjust_op_size = mtk_nor_adjust_op_size, 567 + .supports_op = mtk_nor_supports_op, 568 + .exec_op = mtk_nor_exec_op 569 + }; 570 + 571 + static const struct of_device_id mtk_nor_match[] = { 572 + { .compatible = "mediatek,mt8173-nor" }, 573 + { /* sentinel */ } 574 + }; 575 + MODULE_DEVICE_TABLE(of, mtk_nor_match); 576 + 577 + static int mtk_nor_probe(struct platform_device *pdev) 578 + { 579 + struct spi_controller *ctlr; 580 + struct mtk_nor *sp; 581 + void __iomem *base; 582 + u8 *buffer; 583 + struct clk *spi_clk, *ctlr_clk; 584 + int ret, irq; 585 + 586 + base = devm_platform_ioremap_resource(pdev, 0); 587 + if (IS_ERR(base)) 588 + return PTR_ERR(base); 589 + 590 + spi_clk = devm_clk_get(&pdev->dev, "spi"); 591 + if (IS_ERR(spi_clk)) 592 + return PTR_ERR(spi_clk); 593 + 594 + ctlr_clk = devm_clk_get(&pdev->dev, "sf"); 595 + if (IS_ERR(ctlr_clk)) 596 + return PTR_ERR(ctlr_clk); 597 + 598 + buffer = devm_kmalloc(&pdev->dev, 599 + MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, 600 + GFP_KERNEL); 601 + if (!buffer) 602 + return -ENOMEM; 603 + 604 + if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK) 605 + buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) & 606 + ~MTK_NOR_DMA_ALIGN_MASK); 607 + 608 + ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp)); 609 + if (!ctlr) { 610 + dev_err(&pdev->dev, "failed to allocate spi controller\n"); 611 + return -ENOMEM; 612 + } 613 + 614 + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 615 + ctlr->dev.of_node = pdev->dev.of_node; 616 + ctlr->max_message_size = mtk_max_msg_size; 617 + ctlr->mem_ops = &mtk_nor_mem_ops; 618 + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 619 + ctlr->num_chipselect = 1; 620 + ctlr->setup = mtk_nor_setup; 621 + ctlr->transfer_one_message = mtk_nor_transfer_one_message; 622 + 623 + dev_set_drvdata(&pdev->dev, ctlr); 624 + 625 + sp = spi_controller_get_devdata(ctlr); 626 + sp->base = base; 627 + sp->buffer = buffer; 628 + sp->has_irq = false; 629 + sp->wbuf_en = false; 630 + sp->ctlr = ctlr; 631 + sp->dev = &pdev->dev; 632 + sp->spi_clk = spi_clk; 633 + sp->ctlr_clk = ctlr_clk; 634 + 635 + irq = platform_get_irq_optional(pdev, 0); 636 + if (irq < 0) { 637 + dev_warn(sp->dev, "IRQ not available."); 638 + } else { 639 + writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT); 640 + writel(0, base + MTK_NOR_REG_IRQ_EN); 641 + ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, 642 + pdev->name, sp); 643 + if (ret < 0) { 644 + dev_warn(sp->dev, "failed to request IRQ."); 645 + } else { 646 + init_completion(&sp->op_done); 647 + sp->has_irq = true; 648 + } 649 + } 650 + 651 + ret = mtk_nor_init(sp); 652 + if (ret < 0) { 653 + kfree(ctlr); 654 + return ret; 655 + } 656 + 657 + dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); 658 + 659 + return devm_spi_register_controller(&pdev->dev, ctlr); 660 + } 661 + 662 + static int mtk_nor_remove(struct platform_device *pdev) 663 + { 664 + struct spi_controller *ctlr; 665 + struct mtk_nor *sp; 666 + 667 + ctlr = dev_get_drvdata(&pdev->dev); 668 + sp = spi_controller_get_devdata(ctlr); 669 + 670 + mtk_nor_disable_clk(sp); 671 + 672 + return 0; 673 + } 674 + 675 + static struct platform_driver mtk_nor_driver = { 676 + .driver = { 677 + .name = DRIVER_NAME, 678 + .of_match_table = mtk_nor_match, 679 + }, 680 + .probe = mtk_nor_probe, 681 + .remove = mtk_nor_remove, 682 + }; 683 + 684 + module_platform_driver(mtk_nor_driver); 685 + 686 + MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); 687 + MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 688 + MODULE_LICENSE("GPL v2"); 689 + MODULE_ALIAS("platform:" DRIVER_NAME);
+187
drivers/spi/spi-mux.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // General Purpose SPI multiplexer 4 + 5 + #include <linux/err.h> 6 + #include <linux/kernel.h> 7 + #include <linux/module.h> 8 + #include <linux/mux/consumer.h> 9 + #include <linux/slab.h> 10 + #include <linux/spi/spi.h> 11 + 12 + #define SPI_MUX_NO_CS ((unsigned int)-1) 13 + 14 + /** 15 + * DOC: Driver description 16 + * 17 + * This driver supports a MUX on an SPI bus. This can be useful when you need 18 + * more chip selects than the hardware peripherals support, or than are 19 + * available in a particular board setup. 20 + * 21 + * The driver will create an additional SPI controller. Devices added under the 22 + * mux will be handled as 'chip selects' on this controller. 23 + */ 24 + 25 + /** 26 + * struct spi_mux_priv - the basic spi_mux structure 27 + * @spi: pointer to the device struct attached to the parent 28 + * spi controller 29 + * @current_cs: The current chip select set in the mux 30 + * @child_msg_complete: The mux replaces the complete callback in the child's 31 + * message to its own callback; this field is used by the 32 + * driver to store the child's callback during a transfer 33 + * @child_msg_context: Used to store the child's context to the callback 34 + * @child_msg_dev: Used to store the spi_device pointer to the child 35 + * @mux: mux_control structure used to provide chip selects for 36 + * downstream spi devices 37 + */ 38 + struct spi_mux_priv { 39 + struct spi_device *spi; 40 + unsigned int current_cs; 41 + 42 + void (*child_msg_complete)(void *context); 43 + void *child_msg_context; 44 + struct spi_device *child_msg_dev; 45 + struct mux_control *mux; 46 + }; 47 + 48 + /* should not get called when the parent controller is doing a transfer */ 49 + static int spi_mux_select(struct spi_device *spi) 50 + { 51 + struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller); 52 + int ret; 53 + 54 + if (priv->current_cs == spi->chip_select) 55 + return 0; 56 + 57 + dev_dbg(&priv->spi->dev, "setting up the mux for cs %d\n", 58 + spi->chip_select); 59 + 60 + /* copy the child device's settings except for the cs */ 61 + priv->spi->max_speed_hz = spi->max_speed_hz; 62 + priv->spi->mode = spi->mode; 63 + priv->spi->bits_per_word = spi->bits_per_word; 64 + 65 + ret = mux_control_select(priv->mux, spi->chip_select); 66 + if (ret) 67 + return ret; 68 + 69 + priv->current_cs = spi->chip_select; 70 + 71 + return 0; 72 + } 73 + 74 + static int spi_mux_setup(struct spi_device *spi) 75 + { 76 + struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller); 77 + 78 + /* 79 + * can be called multiple times, won't do a valid setup now but we will 80 + * change the settings when we do a transfer (necessary because we 81 + * can't predict from which device it will be anyway) 82 + */ 83 + return spi_setup(priv->spi); 84 + } 85 + 86 + static void spi_mux_complete_cb(void *context) 87 + { 88 + struct spi_mux_priv *priv = (struct spi_mux_priv *)context; 89 + struct spi_controller *ctlr = spi_get_drvdata(priv->spi); 90 + struct spi_message *m = ctlr->cur_msg; 91 + 92 + m->complete = priv->child_msg_complete; 93 + m->context = priv->child_msg_context; 94 + m->spi = priv->child_msg_dev; 95 + spi_finalize_current_message(ctlr); 96 + mux_control_deselect(priv->mux); 97 + } 98 + 99 + static int spi_mux_transfer_one_message(struct spi_controller *ctlr, 100 + struct spi_message *m) 101 + { 102 + struct spi_mux_priv *priv = spi_controller_get_devdata(ctlr); 103 + struct spi_device *spi = m->spi; 104 + int ret; 105 + 106 + ret = spi_mux_select(spi); 107 + if (ret) 108 + return ret; 109 + 110 + /* 111 + * Replace the complete callback, context and spi_device with our own 112 + * pointers. Save originals 113 + */ 114 + priv->child_msg_complete = m->complete; 115 + priv->child_msg_context = m->context; 116 + priv->child_msg_dev = m->spi; 117 + 118 + m->complete = spi_mux_complete_cb; 119 + m->context = priv; 120 + m->spi = priv->spi; 121 + 122 + /* do the transfer */ 123 + return spi_async(priv->spi, m); 124 + } 125 + 126 + static int spi_mux_probe(struct spi_device *spi) 127 + { 128 + struct spi_controller *ctlr; 129 + struct spi_mux_priv *priv; 130 + int ret; 131 + 132 + ctlr = spi_alloc_master(&spi->dev, sizeof(*priv)); 133 + if (!ctlr) 134 + return -ENOMEM; 135 + 136 + spi_set_drvdata(spi, ctlr); 137 + priv = spi_controller_get_devdata(ctlr); 138 + priv->spi = spi; 139 + 140 + priv->mux = devm_mux_control_get(&spi->dev, NULL); 141 + if (IS_ERR(priv->mux)) { 142 + ret = PTR_ERR(priv->mux); 143 + if (ret != -EPROBE_DEFER) 144 + dev_err(&spi->dev, "failed to get control-mux\n"); 145 + goto err_put_ctlr; 146 + } 147 + 148 + priv->current_cs = SPI_MUX_NO_CS; 149 + 150 + /* supported modes are the same as our parent's */ 151 + ctlr->mode_bits = spi->controller->mode_bits; 152 + ctlr->flags = spi->controller->flags; 153 + ctlr->transfer_one_message = spi_mux_transfer_one_message; 154 + ctlr->setup = spi_mux_setup; 155 + ctlr->num_chipselect = mux_control_states(priv->mux); 156 + ctlr->bus_num = -1; 157 + ctlr->dev.of_node = spi->dev.of_node; 158 + 159 + ret = devm_spi_register_controller(&spi->dev, ctlr); 160 + if (ret) 161 + goto err_put_ctlr; 162 + 163 + return 0; 164 + 165 + err_put_ctlr: 166 + spi_controller_put(ctlr); 167 + 168 + return ret; 169 + } 170 + 171 + static const struct of_device_id spi_mux_of_match[] = { 172 + { .compatible = "spi-mux" }, 173 + { } 174 + }; 175 + 176 + static struct spi_driver spi_mux_driver = { 177 + .probe = spi_mux_probe, 178 + .driver = { 179 + .name = "spi-mux", 180 + .of_match_table = spi_mux_of_match, 181 + }, 182 + }; 183 + 184 + module_spi_driver(spi_mux_driver); 185 + 186 + MODULE_DESCRIPTION("SPI multiplexer"); 187 + MODULE_LICENSE("GPL");
+1 -2
drivers/spi/spi-mxs.c
··· 22 22 #include <linux/ioport.h> 23 23 #include <linux/of.h> 24 24 #include <linux/of_device.h> 25 - #include <linux/of_gpio.h> 26 25 #include <linux/platform_device.h> 27 26 #include <linux/delay.h> 28 27 #include <linux/interrupt.h> ··· 31 32 #include <linux/clk.h> 32 33 #include <linux/err.h> 33 34 #include <linux/completion.h> 34 - #include <linux/gpio.h> 35 + #include <linux/pinctrl/consumer.h> 35 36 #include <linux/regulator/consumer.h> 36 37 #include <linux/pm_runtime.h> 37 38 #include <linux/module.h>
+55 -8
drivers/spi/spi-nxp-fspi.c
··· 307 307 308 308 #define POLL_TOUT 5000 309 309 #define NXP_FSPI_MAX_CHIPSELECT 4 310 + #define NXP_FSPI_MIN_IOMAP SZ_4M 310 311 311 312 struct nxp_fspi_devtype_data { 312 313 unsigned int rxfifo; ··· 325 324 .little_endian = true, /* little-endian */ 326 325 }; 327 326 327 + static const struct nxp_fspi_devtype_data imx8mm_data = { 328 + .rxfifo = SZ_512, /* (64 * 64 bits) */ 329 + .txfifo = SZ_1K, /* (128 * 64 bits) */ 330 + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ 331 + .quirks = 0, 332 + .little_endian = true, /* little-endian */ 333 + }; 334 + 335 + static const struct nxp_fspi_devtype_data imx8qxp_data = { 336 + .rxfifo = SZ_512, /* (64 * 64 bits) */ 337 + .txfifo = SZ_1K, /* (128 * 64 bits) */ 338 + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ 339 + .quirks = 0, 340 + .little_endian = true, /* little-endian */ 341 + }; 342 + 328 343 struct nxp_fspi { 329 344 void __iomem *iobase; 330 345 void __iomem *ahb_addr; 331 346 u32 memmap_phy; 332 347 u32 memmap_phy_size; 348 + u32 memmap_start; 349 + u32 memmap_len; 333 350 struct clk *clk, *clk_en; 334 351 struct device *dev; 335 352 struct completion c; ··· 660 641 f->selected = spi->chip_select; 661 642 } 662 643 663 - static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) 644 + static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) 664 645 { 646 + u32 start = op->addr.val; 665 647 u32 len = op->data.nbytes; 666 648 649 + /* if necessary, ioremap before AHB read */ 650 + if ((!f->ahb_addr) || start < f->memmap_start || 651 + start + len > f->memmap_start + f->memmap_len) { 652 + if (f->ahb_addr) 653 + iounmap(f->ahb_addr); 654 + 655 + f->memmap_start = start; 656 + f->memmap_len = len > NXP_FSPI_MIN_IOMAP ? 657 + len : NXP_FSPI_MIN_IOMAP; 658 + 659 + f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start, 660 + f->memmap_len); 661 + 662 + if (!f->ahb_addr) { 663 + dev_err(f->dev, "failed to alloc memory\n"); 664 + return -ENOMEM; 665 + } 666 + } 667 + 667 668 /* Read out the data directly from the AHB buffer. */ 668 - memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len); 669 + memcpy_fromio(op->data.buf.in, 670 + f->ahb_addr + start - f->memmap_start, len); 671 + 672 + return 0; 669 673 } 670 674 671 675 static void nxp_fspi_fill_txfifo(struct nxp_fspi *f, ··· 848 806 */ 849 807 if (op->data.nbytes > (f->devtype_data->rxfifo - 4) && 850 808 op->data.dir == SPI_MEM_DATA_IN) { 851 - nxp_fspi_read_ahb(f, op); 809 + err = nxp_fspi_read_ahb(f, op); 852 810 } else { 853 811 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) 854 812 nxp_fspi_fill_txfifo(f, op); ··· 913 871 fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR); 914 872 915 873 /* enable module */ 916 - fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF), 917 - base + FSPI_MCR0); 874 + fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | 875 + FSPI_MCR0_IP_TIMEOUT(0xFF) | (u32) FSPI_MCR0_OCTCOMB_EN, 876 + base + FSPI_MCR0); 918 877 919 878 /* 920 879 * Disable same device enable bit and configure all slave devices ··· 1019 976 1020 977 /* find the resources - controller memory mapped space */ 1021 978 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); 1022 - f->ahb_addr = devm_ioremap_resource(dev, res); 1023 - if (IS_ERR(f->ahb_addr)) { 1024 - ret = PTR_ERR(f->ahb_addr); 979 + if (!res) { 980 + ret = -ENODEV; 1025 981 goto err_put_ctrl; 1026 982 } 1027 983 ··· 1099 1057 1100 1058 mutex_destroy(&f->lock); 1101 1059 1060 + if (f->ahb_addr) 1061 + iounmap(f->ahb_addr); 1062 + 1102 1063 return 0; 1103 1064 } 1104 1065 ··· 1121 1076 1122 1077 static const struct of_device_id nxp_fspi_dt_ids[] = { 1123 1078 { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, 1079 + { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, }, 1080 + { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, }, 1124 1081 { /* sentinel */ } 1125 1082 }; 1126 1083 MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
+17 -16
drivers/spi/spi-pxa2xx.c
··· 192 192 return drv_data->ssp_type == QUARK_X1000_SSP; 193 193 } 194 194 195 + static bool is_mmp2_ssp(const struct driver_data *drv_data) 196 + { 197 + return drv_data->ssp_type == MMP2_SSP; 198 + } 199 + 195 200 static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data) 196 201 { 197 202 switch (drv_data->ssp_type) { ··· 491 486 492 487 static void pxa2xx_spi_off(struct driver_data *drv_data) 493 488 { 494 - /* On MMP, disabling SSE seems to corrupt the rx fifo */ 495 - if (drv_data->ssp_type == MMP2_SSP) 489 + /* On MMP, disabling SSE seems to corrupt the Rx FIFO */ 490 + if (is_mmp2_ssp(drv_data)) 496 491 return; 497 492 498 493 pxa2xx_spi_write(drv_data, SSCR0, ··· 1098 1093 || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) 1099 1094 != (cr1 & change_mask)) { 1100 1095 /* stop the SSP, and update the other bits */ 1101 - if (drv_data->ssp_type != MMP2_SSP) 1096 + if (!is_mmp2_ssp(drv_data)) 1102 1097 pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); 1103 1098 if (!pxa25x_ssp_comp(drv_data)) 1104 1099 pxa2xx_spi_write(drv_data, SSTO, chip->timeout); ··· 1112 1107 pxa2xx_spi_write(drv_data, SSTO, chip->timeout); 1113 1108 } 1114 1109 1115 - if (drv_data->ssp_type == MMP2_SSP) { 1110 + if (is_mmp2_ssp(drv_data)) { 1116 1111 u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR) 1117 1112 & SSSR_TFL_MASK) >> 8; 1118 1113 ··· 1576 1571 else if (pcidev_id) 1577 1572 type = (enum pxa_ssp_type)pcidev_id->driver_data; 1578 1573 else 1579 - return NULL; 1574 + return ERR_PTR(-EINVAL); 1580 1575 1581 1576 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1582 1577 if (!pdata) 1583 - return NULL; 1578 + return ERR_PTR(-ENOMEM); 1584 1579 1585 1580 ssp = &pdata->ssp; 1586 1581 1587 1582 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1588 1583 ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res); 1589 1584 if (IS_ERR(ssp->mmio_base)) 1590 - return NULL; 1585 + return ERR_CAST(ssp->mmio_base); 1591 1586 1592 1587 ssp->phys_base = res->start; 1593 1588 ··· 1601 1596 1602 1597 ssp->clk = devm_clk_get(&pdev->dev, NULL); 1603 1598 if (IS_ERR(ssp->clk)) 1604 - return NULL; 1599 + return ERR_CAST(ssp->clk); 1605 1600 1606 1601 ssp->irq = platform_get_irq(pdev, 0); 1607 1602 if (ssp->irq < 0) 1608 - return NULL; 1603 + return ERR_PTR(ssp->irq); 1609 1604 1610 1605 ssp->type = type; 1611 1606 ssp->dev = &pdev->dev; ··· 1662 1657 platform_info = dev_get_platdata(dev); 1663 1658 if (!platform_info) { 1664 1659 platform_info = pxa2xx_spi_init_pdata(pdev); 1665 - if (!platform_info) { 1660 + if (IS_ERR(platform_info)) { 1666 1661 dev_err(&pdev->dev, "missing platform data\n"); 1667 - return -ENODEV; 1662 + return PTR_ERR(platform_info); 1668 1663 } 1669 1664 } 1670 1665 ··· 1912 1907 static int pxa2xx_spi_remove(struct platform_device *pdev) 1913 1908 { 1914 1909 struct driver_data *drv_data = platform_get_drvdata(pdev); 1915 - struct ssp_device *ssp; 1916 - 1917 - if (!drv_data) 1918 - return 0; 1919 - ssp = drv_data->ssp; 1910 + struct ssp_device *ssp = drv_data->ssp; 1920 1911 1921 1912 pm_runtime_get_sync(&pdev->dev); 1922 1913
+4 -1
drivers/spi/spi-rockchip.c
··· 843 843 }; 844 844 845 845 static const struct of_device_id rockchip_spi_dt_match[] = { 846 - { .compatible = "rockchip,rv1108-spi", }, 846 + { .compatible = "rockchip,px30-spi", }, 847 847 { .compatible = "rockchip,rk3036-spi", }, 848 848 { .compatible = "rockchip,rk3066-spi", }, 849 849 { .compatible = "rockchip,rk3188-spi", }, 850 850 { .compatible = "rockchip,rk3228-spi", }, 851 851 { .compatible = "rockchip,rk3288-spi", }, 852 + { .compatible = "rockchip,rk3308-spi", }, 853 + { .compatible = "rockchip,rk3328-spi", }, 852 854 { .compatible = "rockchip,rk3368-spi", }, 853 855 { .compatible = "rockchip,rk3399-spi", }, 856 + { .compatible = "rockchip,rv1108-spi", }, 854 857 { }, 855 858 }; 856 859 MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
+36 -8
drivers/spi/spi-rspi.c
··· 24 24 #include <linux/sh_dma.h> 25 25 #include <linux/spi/spi.h> 26 26 #include <linux/spi/rspi.h> 27 + #include <linux/spinlock.h> 27 28 28 29 #define RSPI_SPCR 0x00 /* Control Register */ 29 30 #define RSPI_SSLP 0x01 /* Slave Select Polarity Register */ ··· 80 79 #define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */ 81 80 82 81 /* SSLP - Slave Select Polarity Register */ 83 - #define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */ 84 - #define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */ 82 + #define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */ 85 83 86 84 /* SPPCR - Pin Control Register */ 87 85 #define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */ ··· 181 181 void __iomem *addr; 182 182 u32 max_speed_hz; 183 183 struct spi_controller *ctlr; 184 + struct platform_device *pdev; 184 185 wait_queue_head_t wait; 186 + spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */ 185 187 struct clk *clk; 186 188 u16 spcmd; 187 189 u8 spsr; ··· 241 239 int (*set_config_register)(struct rspi_data *rspi, int access_size); 242 240 int (*transfer_one)(struct spi_controller *ctlr, 243 241 struct spi_device *spi, struct spi_transfer *xfer); 244 - u16 mode_bits; 242 + u16 extra_mode_bits; 245 243 u16 flags; 246 244 u16 fifo_size; 247 245 u8 num_hw_ss; ··· 921 919 return 0; 922 920 } 923 921 922 + static int rspi_setup(struct spi_device *spi) 923 + { 924 + struct rspi_data *rspi = spi_controller_get_devdata(spi->controller); 925 + u8 sslp; 926 + 927 + if (spi->cs_gpiod) 928 + return 0; 929 + 930 + pm_runtime_get_sync(&rspi->pdev->dev); 931 + spin_lock_irq(&rspi->lock); 932 + 933 + sslp = rspi_read8(rspi, RSPI_SSLP); 934 + if (spi->mode & SPI_CS_HIGH) 935 + sslp |= SSLP_SSLP(spi->chip_select); 936 + else 937 + sslp &= ~SSLP_SSLP(spi->chip_select); 938 + rspi_write8(rspi, sslp, RSPI_SSLP); 939 + 940 + spin_unlock_irq(&rspi->lock); 941 + pm_runtime_put(&rspi->pdev->dev); 942 + return 0; 943 + } 944 + 924 945 static int rspi_prepare_message(struct spi_controller *ctlr, 925 946 struct spi_message *msg) 926 947 { ··· 958 933 rspi->spcmd |= SPCMD_CPOL; 959 934 if (spi->mode & SPI_CPHA) 960 935 rspi->spcmd |= SPCMD_CPHA; 936 + if (spi->mode & SPI_LSB_FIRST) 937 + rspi->spcmd |= SPCMD_LSBF; 961 938 962 939 /* Configure slave signal to assert */ 963 940 rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs ··· 1149 1122 static const struct spi_ops rspi_ops = { 1150 1123 .set_config_register = rspi_set_config_register, 1151 1124 .transfer_one = rspi_transfer_one, 1152 - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 1153 1125 .flags = SPI_CONTROLLER_MUST_TX, 1154 1126 .fifo_size = 8, 1155 1127 .num_hw_ss = 2, ··· 1157 1131 static const struct spi_ops rspi_rz_ops = { 1158 1132 .set_config_register = rspi_rz_set_config_register, 1159 1133 .transfer_one = rspi_rz_transfer_one, 1160 - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 1161 1134 .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, 1162 1135 .fifo_size = 8, /* 8 for TX, 32 for RX */ 1163 1136 .num_hw_ss = 1, ··· 1165 1140 static const struct spi_ops qspi_ops = { 1166 1141 .set_config_register = qspi_set_config_register, 1167 1142 .transfer_one = qspi_transfer_one, 1168 - .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | 1169 - SPI_TX_DUAL | SPI_TX_QUAD | 1143 + .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | 1170 1144 SPI_RX_DUAL | SPI_RX_QUAD, 1171 1145 .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, 1172 1146 .fifo_size = 32, ··· 1273 1249 goto error1; 1274 1250 } 1275 1251 1252 + rspi->pdev = pdev; 1276 1253 pm_runtime_enable(&pdev->dev); 1277 1254 1278 1255 init_waitqueue_head(&rspi->wait); 1256 + spin_lock_init(&rspi->lock); 1279 1257 1280 1258 ctlr->bus_num = pdev->id; 1259 + ctlr->setup = rspi_setup; 1281 1260 ctlr->auto_runtime_pm = true; 1282 1261 ctlr->transfer_one = ops->transfer_one; 1283 1262 ctlr->prepare_message = rspi_prepare_message; 1284 1263 ctlr->unprepare_message = rspi_unprepare_message; 1285 - ctlr->mode_bits = ops->mode_bits; 1264 + ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | 1265 + SPI_LOOP | ops->extra_mode_bits; 1286 1266 ctlr->flags = ops->flags; 1287 1267 ctlr->dev.of_node = pdev->dev.of_node; 1288 1268 ctlr->use_gpio_descriptors = true;
+1 -1
drivers/spi/spi-s3c24xx.c
··· 227 227 struct spi_fiq_code { 228 228 u32 length; 229 229 u32 ack_offset; 230 - u8 data[0]; 230 + u8 data[]; 231 231 }; 232 232 233 233 extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
+19 -12
drivers/spi/spi-stm32-qspi.c
··· 565 565 qspi->io_base = devm_ioremap_resource(dev, res); 566 566 if (IS_ERR(qspi->io_base)) { 567 567 ret = PTR_ERR(qspi->io_base); 568 - goto err; 568 + goto err_master_put; 569 569 } 570 570 571 571 qspi->phys_base = res->start; ··· 574 574 qspi->mm_base = devm_ioremap_resource(dev, res); 575 575 if (IS_ERR(qspi->mm_base)) { 576 576 ret = PTR_ERR(qspi->mm_base); 577 - goto err; 577 + goto err_master_put; 578 578 } 579 579 580 580 qspi->mm_size = resource_size(res); 581 581 if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) { 582 582 ret = -EINVAL; 583 - goto err; 583 + goto err_master_put; 584 584 } 585 585 586 586 irq = platform_get_irq(pdev, 0); 587 - if (irq < 0) 588 - return irq; 587 + if (irq < 0) { 588 + ret = irq; 589 + goto err_master_put; 590 + } 589 591 590 592 ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, 591 593 dev_name(dev), qspi); 592 594 if (ret) { 593 595 dev_err(dev, "failed to request irq\n"); 594 - goto err; 596 + goto err_master_put; 595 597 } 596 598 597 599 init_completion(&qspi->data_completion); ··· 601 599 qspi->clk = devm_clk_get(dev, NULL); 602 600 if (IS_ERR(qspi->clk)) { 603 601 ret = PTR_ERR(qspi->clk); 604 - goto err; 602 + goto err_master_put; 605 603 } 606 604 607 605 qspi->clk_rate = clk_get_rate(qspi->clk); 608 606 if (!qspi->clk_rate) { 609 607 ret = -EINVAL; 610 - goto err; 608 + goto err_master_put; 611 609 } 612 610 613 611 ret = clk_prepare_enable(qspi->clk); 614 612 if (ret) { 615 613 dev_err(dev, "can not enable the clock\n"); 616 - goto err; 614 + goto err_master_put; 617 615 } 618 616 619 617 rstc = devm_reset_control_get_exclusive(dev, NULL); 620 - if (!IS_ERR(rstc)) { 618 + if (IS_ERR(rstc)) { 619 + ret = PTR_ERR(rstc); 620 + if (ret == -EPROBE_DEFER) 621 + goto err_qspi_release; 622 + } else { 621 623 reset_control_assert(rstc); 622 624 udelay(2); 623 625 reset_control_deassert(rstc); ··· 631 625 platform_set_drvdata(pdev, qspi); 632 626 ret = stm32_qspi_dma_setup(qspi); 633 627 if (ret) 634 - goto err; 628 + goto err_qspi_release; 635 629 636 630 mutex_init(&qspi->lock); 637 631 ··· 647 641 if (!ret) 648 642 return 0; 649 643 650 - err: 644 + err_qspi_release: 651 645 stm32_qspi_release(qspi); 646 + err_master_put: 652 647 spi_master_put(qspi->ctrl); 653 648 654 649 return ret;
+45 -17
drivers/spi/spi-stm32.c
··· 175 175 #define SPI_DMA_MIN_BYTES 16 176 176 177 177 /** 178 - * stm32_spi_reg - stm32 SPI register & bitfield desc 178 + * struct stm32_spi_reg - stm32 SPI register & bitfield desc 179 179 * @reg: register offset 180 180 * @mask: bitfield mask 181 181 * @shift: left shift ··· 187 187 }; 188 188 189 189 /** 190 - * stm32_spi_regspec - stm32 registers definition, compatible dependent data 191 - * en: enable register and SPI enable bit 192 - * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit 193 - * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit 194 - * cpol: clock polarity register and polarity bit 195 - * cpha: clock phase register and phase bit 196 - * lsb_first: LSB transmitted first register and bit 197 - * br: baud rate register and bitfields 198 - * rx: SPI RX data register 199 - * tx: SPI TX data register 190 + * struct stm32_spi_regspec - stm32 registers definition, compatible dependent data 191 + * @en: enable register and SPI enable bit 192 + * @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit 193 + * @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit 194 + * @cpol: clock polarity register and polarity bit 195 + * @cpha: clock phase register and phase bit 196 + * @lsb_first: LSB transmitted first register and bit 197 + * @br: baud rate register and bitfields 198 + * @rx: SPI RX data register 199 + * @tx: SPI TX data register 200 200 */ 201 201 struct stm32_spi_regspec { 202 202 const struct stm32_spi_reg en; ··· 213 213 struct stm32_spi; 214 214 215 215 /** 216 - * stm32_spi_cfg - stm32 compatible configuration data 216 + * struct stm32_spi_cfg - stm32 compatible configuration data 217 217 * @regs: registers descriptions 218 218 * @get_fifo_size: routine to get fifo size 219 219 * @get_bpw_mask: routine to get bits per word mask ··· 223 223 * @set_mode: routine to configure registers to desired mode 224 224 * @set_data_idleness: optional routine to configure registers to desired idle 225 225 * time between frames (if driver has this functionality) 226 - * set_number_of_data: optional routine to configure registers to desired 226 + * @set_number_of_data: optional routine to configure registers to desired 227 227 * number of data (if driver has this functionality) 228 228 * @can_dma: routine to determine if the transfer is eligible for DMA use 229 229 * @transfer_one_dma_start: routine to start transfer a single spi_transfer 230 230 * using DMA 231 - * @dma_rx cb: routine to call after DMA RX channel operation is complete 232 - * @dma_tx cb: routine to call after DMA TX channel operation is complete 231 + * @dma_rx_cb: routine to call after DMA RX channel operation is complete 232 + * @dma_tx_cb: routine to call after DMA TX channel operation is complete 233 233 * @transfer_one_irq: routine to configure interrupts for driver 234 234 * @irq_handler_event: Interrupt handler for SPI controller events 235 235 * @irq_handler_thread: thread of interrupt handler for SPI controller ··· 587 587 /** 588 588 * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register 589 589 * @spi: pointer to the spi controller data structure 590 + * @flush: boolean indicating that FIFO should be flushed 590 591 * 591 592 * Write in rx_buf depends on remaining bytes to avoid to write beyond 592 593 * rx_buf end. ··· 757 756 758 757 /** 759 758 * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use 759 + * @master: controller master interface 760 + * @spi_dev: pointer to the spi device 761 + * @transfer: pointer to spi transfer 760 762 * 761 763 * If driver has fifo and the current transfer size is greater than fifo size, 762 764 * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes. ··· 978 974 979 975 /** 980 976 * stm32_spi_prepare_msg - set up the controller to transfer a single message 977 + * @master: controller master interface 978 + * @msg: pointer to spi message 981 979 */ 982 980 static int stm32_spi_prepare_msg(struct spi_master *master, 983 981 struct spi_message *msg) ··· 1032 1026 1033 1027 /** 1034 1028 * stm32f4_spi_dma_tx_cb - dma callback 1029 + * @data: pointer to the spi controller data structure 1035 1030 * 1036 1031 * DMA callback is called when the transfer is complete for DMA TX channel. 1037 1032 */ ··· 1048 1041 1049 1042 /** 1050 1043 * stm32f4_spi_dma_rx_cb - dma callback 1044 + * @data: pointer to the spi controller data structure 1051 1045 * 1052 1046 * DMA callback is called when the transfer is complete for DMA RX channel. 1053 1047 */ ··· 1062 1054 1063 1055 /** 1064 1056 * stm32h7_spi_dma_cb - dma callback 1057 + * @data: pointer to the spi controller data structure 1065 1058 * 1066 1059 * DMA callback is called when the transfer is complete or when an error 1067 1060 * occurs. If the transfer is complete, EOT flag is raised. ··· 1088 1079 /** 1089 1080 * stm32_spi_dma_config - configure dma slave channel depending on current 1090 1081 * transfer bits_per_word. 1082 + * @spi: pointer to the spi controller data structure 1083 + * @dma_conf: pointer to the dma_slave_config structure 1084 + * @dir: direction of the dma transfer 1091 1085 */ 1092 1086 static void stm32_spi_dma_config(struct stm32_spi *spi, 1093 1087 struct dma_slave_config *dma_conf, ··· 1138 1126 /** 1139 1127 * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using 1140 1128 * interrupts 1129 + * @spi: pointer to the spi controller data structure 1141 1130 * 1142 1131 * It must returns 0 if the transfer is finished or 1 if the transfer is still 1143 1132 * in progress. ··· 1179 1166 /** 1180 1167 * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using 1181 1168 * interrupts 1169 + * @spi: pointer to the spi controller data structure 1182 1170 * 1183 1171 * It must returns 0 if the transfer is finished or 1 if the transfer is still 1184 1172 * in progress. ··· 1221 1207 /** 1222 1208 * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start 1223 1209 * transfer using DMA 1210 + * @spi: pointer to the spi controller data structure 1224 1211 */ 1225 1212 static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) 1226 1213 { ··· 1242 1227 /** 1243 1228 * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start 1244 1229 * transfer using DMA 1230 + * @spi: pointer to the spi controller data structure 1245 1231 */ 1246 1232 static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) 1247 1233 { ··· 1259 1243 1260 1244 /** 1261 1245 * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA 1246 + * @spi: pointer to the spi controller data structure 1247 + * @xfer: pointer to the spi_transfer structure 1262 1248 * 1263 1249 * It must returns 0 if the transfer is finished or 1 if the transfer is still 1264 1250 * in progress. ··· 1423 1405 /** 1424 1406 * stm32_spi_communication_type - return transfer communication type 1425 1407 * @spi_dev: pointer to the spi device 1426 - * transfer: pointer to spi transfer 1408 + * @transfer: pointer to spi transfer 1427 1409 */ 1428 1410 static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev, 1429 1411 struct spi_transfer *transfer) ··· 1540 1522 /** 1541 1523 * stm32h7_spi_number_of_data - configure number of data at current transfer 1542 1524 * @spi: pointer to the spi controller data structure 1543 - * @len: transfer length 1525 + * @nb_words: transfer length (in words) 1544 1526 */ 1545 1527 static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) 1546 1528 { ··· 1564 1546 * stm32_spi_transfer_one_setup - common setup to transfer a single 1565 1547 * spi_transfer either using DMA or 1566 1548 * interrupts. 1549 + * @spi: pointer to the spi controller data structure 1550 + * @spi_dev: pointer to the spi device 1551 + * @transfer: pointer to spi transfer 1567 1552 */ 1568 1553 static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, 1569 1554 struct spi_device *spi_dev, ··· 1646 1625 1647 1626 /** 1648 1627 * stm32_spi_transfer_one - transfer a single spi_transfer 1628 + * @master: controller master interface 1629 + * @spi_dev: pointer to the spi device 1630 + * @transfer: pointer to spi transfer 1649 1631 * 1650 1632 * It must return 0 if the transfer is finished or 1 if the transfer is still 1651 1633 * in progress. ··· 1682 1658 1683 1659 /** 1684 1660 * stm32_spi_unprepare_msg - relax the hardware 1661 + * @master: controller master interface 1662 + * @msg: pointer to the spi message 1685 1663 */ 1686 1664 static int stm32_spi_unprepare_msg(struct spi_master *master, 1687 1665 struct spi_message *msg) ··· 1697 1671 1698 1672 /** 1699 1673 * stm32f4_spi_config - Configure SPI controller as SPI master 1674 + * @spi: pointer to the spi controller data structure 1700 1675 */ 1701 1676 static int stm32f4_spi_config(struct stm32_spi *spi) 1702 1677 { ··· 1728 1701 1729 1702 /** 1730 1703 * stm32h7_spi_config - Configure SPI controller as SPI master 1704 + * @spi: pointer to the spi controller data structure 1731 1705 */ 1732 1706 static int stm32h7_spi_config(struct stm32_spi *spi) 1733 1707 {
+13 -21
drivers/spi/spi.c
··· 510 510 spi->dev.bus = &spi_bus_type; 511 511 spi->dev.release = spidev_release; 512 512 spi->cs_gpio = -ENOENT; 513 + spi->mode = ctlr->buswidth_override_bits; 513 514 514 515 spin_lock_init(&spi->statistics.lock); 515 516 ··· 1515 1514 if (!xfer->ptp_sts) 1516 1515 return; 1517 1516 1518 - if (xfer->timestamped_pre) 1517 + if (xfer->timestamped) 1519 1518 return; 1520 1519 1521 - if (progress < xfer->ptp_sts_word_pre) 1520 + if (progress > xfer->ptp_sts_word_pre) 1522 1521 return; 1523 1522 1524 1523 /* Capture the resolution of the timestamp */ 1525 1524 xfer->ptp_sts_word_pre = progress; 1526 - 1527 - xfer->timestamped_pre = true; 1528 1525 1529 1526 if (irqs_off) { 1530 1527 local_irq_save(ctlr->irq_flags); ··· 1552 1553 if (!xfer->ptp_sts) 1553 1554 return; 1554 1555 1555 - if (xfer->timestamped_post) 1556 + if (xfer->timestamped) 1556 1557 return; 1557 1558 1558 1559 if (progress < xfer->ptp_sts_word_post) ··· 1568 1569 /* Capture the resolution of the timestamp */ 1569 1570 xfer->ptp_sts_word_post = progress; 1570 1571 1571 - xfer->timestamped_post = true; 1572 + xfer->timestamped = true; 1572 1573 } 1573 1574 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1574 1575 ··· 1673 1674 } 1674 1675 } 1675 1676 1676 - if (unlikely(ctlr->ptp_sts_supported)) { 1677 - list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1678 - WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); 1679 - WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); 1680 - } 1681 - } 1677 + if (unlikely(ctlr->ptp_sts_supported)) 1678 + list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1679 + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1682 1680 1683 1681 spi_unmap_msg(ctlr, mesg); 1684 1682 ··· 1951 1955 spi->mode |= SPI_CS_HIGH; 1952 1956 1953 1957 /* Device speed */ 1954 - rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1955 - if (rc) { 1956 - dev_err(&ctlr->dev, 1957 - "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); 1958 - return rc; 1959 - } 1960 - spi->max_speed_hz = value; 1958 + if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 1959 + spi->max_speed_hz = value; 1961 1960 1962 1961 return 0; 1963 1962 } ··· 2172 2181 return AE_NO_MEMORY; 2173 2182 } 2174 2183 2184 + 2175 2185 ACPI_COMPANION_SET(&spi->dev, adev); 2176 2186 spi->max_speed_hz = lookup.max_speed_hz; 2177 - spi->mode = lookup.mode; 2187 + spi->mode |= lookup.mode; 2178 2188 spi->irq = lookup.irq; 2179 2189 spi->bits_per_word = lookup.bits_per_word; 2180 2190 spi->chip_select = lookup.chip_select; ··· 4026 4034 struct device *dev; 4027 4035 4028 4036 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4029 - return dev ? to_spi_device(dev) : NULL; 4037 + return to_spi_device(dev); 4030 4038 } 4031 4039 4032 4040 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
+12 -11
drivers/spi/spidev.c
··· 275 275 #ifdef VERBOSE 276 276 dev_dbg(&spidev->spi->dev, 277 277 " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n", 278 - u_tmp->len, 279 - u_tmp->rx_buf ? "rx " : "", 280 - u_tmp->tx_buf ? "tx " : "", 281 - u_tmp->cs_change ? "cs " : "", 282 - u_tmp->bits_per_word ? : spidev->spi->bits_per_word, 283 - u_tmp->delay_usecs, 284 - u_tmp->word_delay_usecs, 285 - u_tmp->speed_hz ? : spidev->spi->max_speed_hz); 278 + k_tmp->len, 279 + k_tmp->rx_buf ? "rx " : "", 280 + k_tmp->tx_buf ? "tx " : "", 281 + k_tmp->cs_change ? "cs " : "", 282 + k_tmp->bits_per_word ? : spidev->spi->bits_per_word, 283 + k_tmp->delay.value, 284 + k_tmp->word_delay.value, 285 + k_tmp->speed_hz ? : spidev->spi->max_speed_hz); 286 286 #endif 287 287 spi_message_add_tail(k_tmp, &msg); 288 288 } ··· 454 454 455 455 spi->max_speed_hz = tmp; 456 456 retval = spi_setup(spi); 457 - if (retval >= 0) 457 + if (retval == 0) { 458 458 spidev->speed_hz = tmp; 459 - else 460 - dev_dbg(&spi->dev, "%d Hz (max)\n", tmp); 459 + dev_dbg(&spi->dev, "%d Hz (max)\n", 460 + spidev->speed_hz); 461 + } 461 462 spi->max_speed_hz = save; 462 463 } 463 464 break;
+7 -2
include/linux/spi/spi.h
··· 135 135 * @modalias: Name of the driver to use with this device, or an alias 136 136 * for that name. This appears in the sysfs "modalias" attribute 137 137 * for driver coldplugging, and in uevents used for hotplugging 138 + * @driver_override: If the name of a driver is written to this attribute, then 139 + * the device will bind to the named driver and only the named driver. 138 140 * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when 139 141 * not using a GPIO line) use cs_gpiod in new drivers by opting in on 140 142 * the spi_master. ··· 445 443 * @spi_transfer->ptp_sts_word_post were transmitted. 446 444 * If the driver does not set this, the SPI core takes the snapshot as 447 445 * close to the driver hand-over as possible. 446 + * @irq_flags: Interrupt enable state during PTP system timestamping 448 447 * 449 448 * Each SPI controller can communicate with one or more @spi_device 450 449 * children. These make a small bus, sharing MOSI, MISO and SCK signals ··· 483 480 484 481 /* spi_device.mode flags understood by this controller driver */ 485 482 u32 mode_bits; 483 + 484 + /* spi_device.mode flags override flags for this controller */ 485 + u32 buswidth_override_bits; 486 486 487 487 /* bitmask of supported bits_per_word for transfers */ 488 488 u32 bits_per_word_mask; ··· 936 930 937 931 struct ptp_system_timestamp *ptp_sts; 938 932 939 - bool timestamped_pre; 940 - bool timestamped_post; 933 + bool timestamped; 941 934 942 935 struct list_head transfer_list; 943 936 };
+1 -1
tools/spi/Makefile
··· 51 51 52 52 clean: 53 53 rm -f $(ALL_PROGRAMS) 54 - rm -f $(OUTPUT)include/linux/spi/spidev.h 54 + rm -rf $(OUTPUT)include/ 55 55 find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete 56 56 57 57 install: $(ALL_PROGRAMS)
+9 -5
tools/spi/spidev_test.c
··· 13 13 #include <stdio.h> 14 14 #include <stdlib.h> 15 15 #include <string.h> 16 + #include <errno.h> 16 17 #include <getopt.h> 17 18 #include <fcntl.h> 18 19 #include <time.h> ··· 27 26 28 27 static void pabort(const char *s) 29 28 { 30 - perror(s); 29 + if (errno != 0) 30 + perror(s); 31 + else 32 + printf("%s\n", s); 33 + 31 34 abort(); 32 35 } 33 36 ··· 288 283 break; 289 284 default: 290 285 print_usage(argv[0]); 291 - break; 292 286 } 293 287 } 294 288 if (mode & SPI_LOOP) { ··· 409 405 410 406 parse_opts(argc, argv); 411 407 408 + if (input_tx && input_file) 409 + pabort("only one of -p and --input may be selected"); 410 + 412 411 fd = open(device, O_RDWR); 413 412 if (fd < 0) 414 413 pabort("can't open device"); ··· 452 445 printf("spi mode: 0x%x\n", mode); 453 446 printf("bits per word: %d\n", bits); 454 447 printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000); 455 - 456 - if (input_tx && input_file) 457 - pabort("only one of -p and --input may be selected"); 458 448 459 449 if (input_tx) 460 450 transfer_escaped_string(fd, input_tx);