Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'spi-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi updates from Mark Brown:
"This is quite a quiet release for SPI, there's been a bit of cleanup
to the core from Uwe but nothing functionality wise.

We have added several new drivers, Cadence XSPI, Ingenic JZ47xx,
Qualcomm SC7280 and SC7180 and Xilinx Versal OSPI"

* tag 'spi-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (41 commits)
spi: Convert NXP flexspi to json schema
spi: spi-geni-qcom: Add support for GPI dma
spi: fsi: Fix contention in the FSI2SPI engine
spi: spi-rpc-if: Check return value of rpcif_sw_init()
spi: tegra210-quad: Put device into suspend on driver removal
spi: tegra20-slink: Put device into suspend on driver removal
spi: bcm-qspi: Fix missing clk_disable_unprepare() on error in bcm_qspi_probe()
spi: at91-usart: replacing legacy gpio interface for gpiod
spi: replace snprintf in show functions with sysfs_emit
spi: cadence: Add of_node_put() before return
spi: orion: Add of_node_put() before goto
spi: cadence-quadspi: fix dma_unmap_single() call
spi: tegra20: fix build with CONFIG_PM_SLEEP=n
spi: bcm-qspi: add support for 3-wire mode for half duplex transfer
spi: bcm-qspi: Add mspi spcr3 32/64-bits xfer mode
spi: Make several public functions private to spi.c
spi: Reorder functions to simplify the next commit
spi: Remove unused function spi_busnum_to_master()
spi: Move comment about chipselect check to the right place
spi: fsi: Print status on error
...

+2325 -453
+12
Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
··· 11 11 12 12 allOf: 13 13 - $ref: spi-controller.yaml# 14 + - if: 15 + properties: 16 + compatible: 17 + contains: 18 + const: xlnx,versal-ospi-1.0 19 + then: 20 + required: 21 + - power-domains 14 22 15 23 properties: 16 24 compatible: ··· 28 20 - ti,k2g-qspi 29 21 - ti,am654-ospi 30 22 - intel,lgm-qspi 23 + - xlnx,versal-ospi-1.0 31 24 - const: cdns,qspi-nor 32 25 - const: cdns,qspi-nor 33 26 ··· 73 64 Flag to indicate that QSPI return clock is used to latch the read 74 65 data rather than the QSPI clock. Make sure that QSPI return clock 75 66 is populated on the board before using this property. 67 + 68 + power-domains: 69 + maxItems: 1 76 70 77 71 resets: 78 72 maxItems: 2
+77
Documentation/devicetree/bindings/spi/cdns,xspi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 + # Copyright 2020-21 Cadence 3 + %YAML 1.2 4 + --- 5 + $id: "http://devicetree.org/schemas/spi/cdns,xspi.yaml#" 6 + $schema: "http://devicetree.org/meta-schemas/core.yaml#" 7 + 8 + title: Cadence XSPI Controller 9 + 10 + maintainers: 11 + - Parshuram Thombare <pthombar@cadence.com> 12 + 13 + description: | 14 + The XSPI controller allows SPI protocol communication in 15 + single, dual, quad or octal wire transmission modes for 16 + read/write access to slaves such as SPI-NOR flash. 17 + 18 + allOf: 19 + - $ref: "spi-controller.yaml#" 20 + 21 + properties: 22 + compatible: 23 + const: cdns,xspi-nor 24 + 25 + reg: 26 + items: 27 + - description: address and length of the controller register set 28 + - description: address and length of the Slave DMA data port 29 + - description: address and length of the auxiliary registers 30 + 31 + reg-names: 32 + items: 33 + - const: io 34 + - const: sdma 35 + - const: aux 36 + 37 + interrupts: 38 + maxItems: 1 39 + 40 + required: 41 + - compatible 42 + - reg 43 + - interrupts 44 + 45 + unevaluatedProperties: false 46 + 47 + examples: 48 + - | 49 + #include <dt-bindings/interrupt-controller/irq.h> 50 + bus { 51 + #address-cells = <2>; 52 + #size-cells = <2>; 53 + 54 + xspi: spi@a0010000 { 55 + #address-cells = <1>; 56 + #size-cells = <0>; 57 + compatible = "cdns,xspi-nor"; 58 + reg = <0x0 0xa0010000 0x0 0x1040>, 59 + <0x0 0xb0000000 0x0 0x1000>, 60 + <0x0 0xa0020000 0x0 0x100>; 61 + reg-names = "io", "sdma", "aux"; 62 + interrupts = <0 90 IRQ_TYPE_LEVEL_HIGH>; 63 + interrupt-parent = <&gic>; 64 + 65 + flash@0 { 66 + compatible = "jedec,spi-nor"; 67 + spi-max-frequency = <75000000>; 68 + reg = <0>; 69 + }; 70 + 71 + flash@1 { 72 + compatible = "jedec,spi-nor"; 73 + spi-max-frequency = <75000000>; 74 + reg = <1>; 75 + }; 76 + }; 77 + };
+72
Documentation/devicetree/bindings/spi/ingenic,spi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/spi/ingenic,spi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Ingenic SoCs SPI controller devicetree bindings 8 + 9 + maintainers: 10 + - Artur Rojek <contact@artur-rojek.eu> 11 + - Paul Cercueil <paul@crapouillou.net> 12 + 13 + allOf: 14 + - $ref: /schemas/spi/spi-controller.yaml# 15 + 16 + properties: 17 + compatible: 18 + oneOf: 19 + - enum: 20 + - ingenic,jz4750-spi 21 + - ingenic,jz4780-spi 22 + - items: 23 + - enum: 24 + - ingenic,jz4760-spi 25 + - ingenic,jz4770-spi 26 + - const: ingenic,jz4750-spi 27 + 28 + reg: 29 + maxItems: 1 30 + 31 + interrupts: 32 + maxItems: 1 33 + 34 + clocks: 35 + maxItems: 1 36 + 37 + dmas: 38 + maxItems: 2 39 + minItems: 2 40 + 41 + dma-names: 42 + items: 43 + - const: rx 44 + - const: tx 45 + 46 + required: 47 + - compatible 48 + - reg 49 + - interrupts 50 + - clocks 51 + - dmas 52 + - dma-names 53 + 54 + unevaluatedProperties: false 55 + 56 + examples: 57 + - | 58 + #include <dt-bindings/clock/jz4770-cgu.h> 59 + spi@10043000 { 60 + compatible = "ingenic,jz4770-spi", "ingenic,jz4750-spi"; 61 + reg = <0x10043000 0x1c>; 62 + #address-cells = <1>; 63 + #size-cells = <0>; 64 + 65 + interrupt-parent = <&intc>; 66 + interrupts = <8>; 67 + 68 + clocks = <&cgu JZ4770_CLK_SSI0>; 69 + 70 + dmas = <&dmac1 23 0xffffffff>, <&dmac1 22 0xffffffff>; 71 + dma-names = "rx", "tx"; 72 + };
+5 -1
Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
··· 21 21 properties: 22 22 compatible: 23 23 items: 24 - - const: qcom,sdm845-qspi 24 + - enum: 25 + - qcom,sc7180-qspi 26 + - qcom,sc7280-qspi 27 + - qcom,sdm845-qspi 28 + 25 29 - const: qcom,qspi-v1 26 30 27 31 reg:
-44
Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
··· 1 - * NXP Flex Serial Peripheral Interface (FSPI) 2 - 3 - Required properties: 4 - - compatible : Should be "nxp,lx2160a-fspi" 5 - "nxp,imx8qxp-fspi" 6 - "nxp,imx8mm-fspi" 7 - "nxp,imx8mp-fspi" 8 - "nxp,imx8dxl-fspi" 9 - 10 - - reg : First contains the register location and length, 11 - Second contains the memory mapping address and length 12 - - reg-names : Should contain the resource reg names: 13 - - fspi_base: configuration register address space 14 - - fspi_mmap: memory mapped address space 15 - - interrupts : Should contain the interrupt for the device 16 - 17 - Required SPI slave node properties: 18 - - reg : There are two buses (A and B) with two chip selects each. 19 - This encodes to which bus and CS the flash is connected: 20 - - <0>: Bus A, CS 0 21 - - <1>: Bus A, CS 1 22 - - <2>: Bus B, CS 0 23 - - <3>: Bus B, CS 1 24 - 25 - Example showing the usage of two SPI NOR slave devices on bus A: 26 - 27 - fspi0: spi@20c0000 { 28 - compatible = "nxp,lx2160a-fspi"; 29 - reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>; 30 - reg-names = "fspi_base", "fspi_mmap"; 31 - interrupts = <0 25 0x4>; /* Level high type */ 32 - clocks = <&clockgen 4 3>, <&clockgen 4 3>; 33 - clock-names = "fspi_en", "fspi"; 34 - 35 - mt35xu512aba0: flash@0 { 36 - reg = <0>; 37 - .... 38 - }; 39 - 40 - mt35xu512aba1: flash@1 { 41 - reg = <1>; 42 - .... 43 - }; 44 - };
+86
Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/spi/spi-nxp-fspi.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: NXP Flex Serial Peripheral Interface (FSPI) 8 + 9 + maintainers: 10 + - Kuldeep Singh <kuldeep.singh@nxp.com> 11 + 12 + allOf: 13 + - $ref: "spi-controller.yaml#" 14 + 15 + properties: 16 + compatible: 17 + enum: 18 + - nxp,imx8dxl-fspi 19 + - nxp,imx8mm-fspi 20 + - nxp,imx8mp-fspi 21 + - nxp,imx8qxp-fspi 22 + - nxp,lx2160a-fspi 23 + 24 + reg: 25 + items: 26 + - description: registers address space 27 + - description: memory mapped address space 28 + 29 + reg-names: 30 + items: 31 + - const: fspi_base 32 + - const: fspi_mmap 33 + 34 + interrupts: 35 + maxItems: 1 36 + 37 + clocks: 38 + items: 39 + - description: SPI bus clock 40 + - description: SPI serial clock 41 + 42 + clock-names: 43 + items: 44 + - const: fspi_en 45 + - const: fspi 46 + 47 + required: 48 + - compatible 49 + - reg 50 + - reg-names 51 + - interrupts 52 + - clocks 53 + - clock-names 54 + 55 + unevaluatedProperties: false 56 + 57 + examples: 58 + - | 59 + #include <dt-bindings/interrupt-controller/arm-gic.h> 60 + #include <dt-bindings/clock/fsl,qoriq-clockgen.h> 61 + 62 + soc { 63 + #address-cells = <2>; 64 + #size-cells = <2>; 65 + 66 + spi@20c0000 { 67 + compatible = "nxp,lx2160a-fspi"; 68 + reg = <0x0 0x20c0000 0x0 0x100000>, 69 + <0x0 0x20000000 0x0 0x10000000>; 70 + reg-names = "fspi_base", "fspi_mmap"; 71 + interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>; 72 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(4)>, 73 + <&clockgen QORIQ_CLK_PLATFORM_PLL QORIQ_CLK_PLL_DIV(4)>; 74 + clock-names = "fspi_en", "fspi"; 75 + #address-cells = <1>; 76 + #size-cells = <0>; 77 + 78 + flash@0 { 79 + compatible = "jedec,spi-nor"; 80 + spi-max-frequency = <50000000>; 81 + reg = <0>; 82 + spi-rx-bus-width = <8>; 83 + spi-tx-bus-width = <8>; 84 + }; 85 + }; 86 + };
-8
Documentation/spi/spi-summary.rst
··· 336 336 Non-static Configurations 337 337 ^^^^^^^^^^^^^^^^^^^^^^^^^ 338 338 339 - Developer boards often play by different rules than product boards, and one 340 - example is the potential need to hotplug SPI devices and/or controllers. 341 - 342 - For those cases you might need to use spi_busnum_to_master() to look 343 - up the spi bus master, and will likely need spi_new_device() to provide the 344 - board info based on the board that was hotplugged. Of course, you'd later 345 - call at least spi_unregister_device() when that board is removed. 346 - 347 339 When Linux includes support for MMC/SD/SDIO/DataFlash cards through SPI, those 348 340 configurations will also be dynamic. Fortunately, such devices all support 349 341 basic device identification probes, so they should hotplug normally.
+1 -1
MAINTAINERS
··· 13488 13488 R: Yogesh Gaur <yogeshgaur.83@gmail.com> 13489 13489 L: linux-spi@vger.kernel.org 13490 13490 S: Maintained 13491 - F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt 13491 + F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml 13492 13492 F: drivers/spi/spi-nxp-fspi.c 13493 13493 13494 13494 NXP FXAS21002C DRIVER
+6 -3
arch/mips/boot/dts/ingenic/ci20.dts
··· 113 113 * Use the 32.768 kHz oscillator as the parent of the RTC for a higher 114 114 * precision. 115 115 */ 116 - assigned-clocks = <&cgu JZ4780_CLK_OTGPHY>, <&cgu JZ4780_CLK_RTC>; 117 - assigned-clock-parents = <0>, <&cgu JZ4780_CLK_RTCLK>; 118 - assigned-clock-rates = <48000000>; 116 + assigned-clocks = <&cgu JZ4780_CLK_OTGPHY>, <&cgu JZ4780_CLK_RTC>, 117 + <&cgu JZ4780_CLK_SSIPLL>, <&cgu JZ4780_CLK_SSI>; 118 + assigned-clock-parents = <0>, <&cgu JZ4780_CLK_RTCLK>, 119 + <&cgu JZ4780_CLK_MPLL>, 120 + <&cgu JZ4780_CLK_SSIPLL>; 121 + assigned-clock-rates = <48000000>, <0>, <54000000>; 119 122 }; 120 123 121 124 &tcu {
+32 -12
arch/mips/boot/dts/ingenic/jz4780.dtsi
··· 255 255 }; 256 256 }; 257 257 258 - spi_gpio { 259 - compatible = "spi-gpio"; 258 + spi0: spi@10043000 { 259 + compatible = "ingenic,jz4780-spi"; 260 + reg = <0x10043000 0x1c>; 260 261 #address-cells = <1>; 261 262 #size-cells = <0>; 262 - num-chipselects = <2>; 263 263 264 - gpio-miso = <&gpe 14 0>; 265 - gpio-sck = <&gpe 15 0>; 266 - gpio-mosi = <&gpe 17 0>; 267 - cs-gpios = <&gpe 16 0>, <&gpe 18 0>; 264 + interrupt-parent = <&intc>; 265 + interrupts = <8>; 268 266 269 - spidev@0 { 270 - compatible = "spidev"; 271 - reg = <0>; 272 - spi-max-frequency = <1000000>; 273 - }; 267 + clocks = <&cgu JZ4780_CLK_SSI0>; 268 + clock-names = "spi"; 269 + 270 + dmas = <&dma JZ4780_DMA_SSI0_RX 0xffffffff>, 271 + <&dma JZ4780_DMA_SSI0_TX 0xffffffff>; 272 + dma-names = "rx", "tx"; 273 + 274 + status = "disabled"; 274 275 }; 275 276 276 277 uart0: serial@10030000 { ··· 335 334 336 335 clocks = <&ext>, <&cgu JZ4780_CLK_UART4>; 337 336 clock-names = "baud", "module"; 337 + 338 + status = "disabled"; 339 + }; 340 + 341 + spi1: spi@10044000 { 342 + compatible = "ingenic,jz4780-spi"; 343 + reg = <0x10044000 0x1c>; 344 + #address-cells = <1>; 345 + #size-sells = <0>; 346 + 347 + interrupt-parent = <&intc>; 348 + interrupts = <7>; 349 + 350 + clocks = <&cgu JZ4780_CLK_SSI1>; 351 + clock-names = "spi"; 352 + 353 + dmas = <&dma JZ4780_DMA_SSI1_RX 0xffffffff>, 354 + <&dma JZ4780_DMA_SSI1_TX 0xffffffff>; 355 + dma-names = "rx", "tx"; 338 356 339 357 status = "disabled"; 340 358 };
+17
drivers/firmware/xilinx/zynqmp.c
··· 648 648 EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset); 649 649 650 650 /** 651 + * zynqmp_pm_ospi_mux_select() - OSPI Mux selection 652 + * 653 + * @dev_id: Device Id of the OSPI device. 654 + * @select: OSPI Mux select value. 655 + * 656 + * This function select the OSPI Mux. 657 + * 658 + * Return: Returns status, either success or error+reason 659 + */ 660 + int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select) 661 + { 662 + return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT, 663 + select, 0, NULL); 664 + } 665 + EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select); 666 + 667 + /** 651 668 * zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs) 652 669 * @index: GGS register index 653 670 * @value: Register value to be written
+24 -2
drivers/spi/Kconfig
··· 228 228 device with a Cadence QSPI controller and want to access the 229 229 Flash as an MTD device. 230 230 231 + config SPI_CADENCE_XSPI 232 + tristate "Cadence XSPI controller" 233 + depends on (OF || COMPILE_TEST) && HAS_IOMEM 234 + depends on SPI_MEM 235 + help 236 + Enable support for the Cadence XSPI Flash controller. 237 + 238 + Cadence XSPI is a specialized controller for connecting an SPI 239 + Flash over upto 8bit wide bus. Enable this option if you have a 240 + device with a Cadence XSPI controller and want to access the 241 + Flash as an MTD device. 242 + 231 243 config SPI_CLPS711X 232 244 tristate "CLPS711X host SPI controller" 233 245 depends on ARCH_CLPS711X || COMPILE_TEST ··· 417 405 select SPI_BITBANG 418 406 help 419 407 This enables support for the Freescale i.MX SPI controllers. 408 + 409 + config SPI_INGENIC 410 + tristate "Ingenic JZ47xx SoCs SPI controller" 411 + depends on MACH_INGENIC || COMPILE_TEST 412 + help 413 + This enables support for the Ingenic JZ47xx SoCs SPI controller. 414 + 415 + To compile this driver as a module, choose M here: the module 416 + will be called spi-ingenic. 420 417 421 418 config SPI_JCORE 422 419 tristate "J-Core SPI Master" ··· 759 738 TX and RX data paths. 760 739 761 740 config SPI_S3C64XX 762 - tristate "Samsung S3C64XX series type SPI" 741 + tristate "Samsung S3C64XX/Exynos SoC series type SPI" 763 742 depends on (PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST) 764 743 help 765 - SPI driver for Samsung S3C64XX and newer SoCs. 744 + SPI driver for Samsung S3C64XX, S5Pv210 and Exynos SoCs. 745 + Choose Y/M here only if you build for such Samsung SoC. 766 746 767 747 config SPI_SC18IS602 768 748 tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
+2
drivers/spi/Makefile
··· 34 34 obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 35 35 obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o 36 36 obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o 37 + obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o 37 38 obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 38 39 obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 39 40 obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o ··· 60 59 obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o 61 60 obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o 62 61 obj-$(CONFIG_SPI_IMX) += spi-imx.o 62 + obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o 63 63 obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o 64 64 obj-$(CONFIG_SPI_JCORE) += spi-jcore.o 65 65 obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+1 -1
drivers/spi/atmel-quadspi.c
··· 310 310 return mode; 311 311 ifr |= atmel_qspi_modes[mode].config; 312 312 313 - if (op->dummy.buswidth && op->dummy.nbytes) 313 + if (op->dummy.nbytes) 314 314 dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; 315 315 316 316 /*
+43 -70
drivers/spi/spi-amd.c
··· 38 38 void __iomem *io_remap_addr; 39 39 unsigned long io_base_addr; 40 40 u32 rom_addr; 41 - u8 chip_select; 42 41 }; 43 42 44 - static inline u8 amd_spi_readreg8(struct spi_master *master, int idx) 43 + static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx) 45 44 { 46 - struct amd_spi *amd_spi = spi_master_get_devdata(master); 47 - 48 45 return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx); 49 46 } 50 47 51 - static inline void amd_spi_writereg8(struct spi_master *master, int idx, 52 - u8 val) 48 + static inline void amd_spi_writereg8(struct amd_spi *amd_spi, int idx, u8 val) 53 49 { 54 - struct amd_spi *amd_spi = spi_master_get_devdata(master); 55 - 56 50 iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx)); 57 51 } 58 52 59 - static inline void amd_spi_setclear_reg8(struct spi_master *master, int idx, 60 - u8 set, u8 clear) 53 + static void amd_spi_setclear_reg8(struct amd_spi *amd_spi, int idx, u8 set, u8 clear) 61 54 { 62 - u8 tmp = amd_spi_readreg8(master, idx); 55 + u8 tmp = amd_spi_readreg8(amd_spi, idx); 63 56 64 57 tmp = (tmp & ~clear) | set; 65 - amd_spi_writereg8(master, idx, tmp); 58 + amd_spi_writereg8(amd_spi, idx, tmp); 66 59 } 67 60 68 - static inline u32 amd_spi_readreg32(struct spi_master *master, int idx) 61 + static inline u32 amd_spi_readreg32(struct amd_spi *amd_spi, int idx) 69 62 { 70 - struct amd_spi *amd_spi = spi_master_get_devdata(master); 71 - 72 63 return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx); 73 64 } 74 65 75 - static inline void amd_spi_writereg32(struct spi_master *master, int idx, 76 - u32 val) 66 + static inline void amd_spi_writereg32(struct amd_spi *amd_spi, int idx, u32 val) 77 67 { 78 - struct amd_spi *amd_spi = spi_master_get_devdata(master); 79 - 80 68 iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx)); 81 69 } 82 70 83 - static inline void amd_spi_setclear_reg32(struct spi_master *master, int idx, 84 - u32 set, u32 clear) 71 + static inline void amd_spi_setclear_reg32(struct amd_spi *amd_spi, int idx, u32 set, u32 clear) 85 72 { 86 - u32 tmp = amd_spi_readreg32(master, idx); 73 + u32 tmp = amd_spi_readreg32(amd_spi, idx); 87 74 88 75 tmp = (tmp & ~clear) | set; 89 - amd_spi_writereg32(master, idx, tmp); 76 + amd_spi_writereg32(amd_spi, idx, tmp); 90 77 } 91 78 92 - static void amd_spi_select_chip(struct spi_master *master) 79 + static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs) 93 80 { 94 - struct amd_spi *amd_spi = spi_master_get_devdata(master); 95 - u8 chip_select = amd_spi->chip_select; 96 - 97 - amd_spi_setclear_reg8(master, AMD_SPI_ALT_CS_REG, chip_select, 98 - AMD_SPI_ALT_CS_MASK); 81 + amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK); 99 82 } 100 83 101 - static void amd_spi_clear_fifo_ptr(struct spi_master *master) 84 + static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi) 102 85 { 103 - amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, 104 - AMD_SPI_FIFO_CLEAR); 86 + amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR); 105 87 } 106 88 107 - static void amd_spi_set_opcode(struct spi_master *master, u8 cmd_opcode) 89 + static void amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode) 108 90 { 109 - amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, cmd_opcode, 110 - AMD_SPI_OPCODE_MASK); 91 + amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode, AMD_SPI_OPCODE_MASK); 111 92 } 112 93 113 - static inline void amd_spi_set_rx_count(struct spi_master *master, 114 - u8 rx_count) 94 + static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count) 115 95 { 116 - amd_spi_setclear_reg8(master, AMD_SPI_RX_COUNT_REG, rx_count, 0xff); 96 + amd_spi_setclear_reg8(amd_spi, AMD_SPI_RX_COUNT_REG, rx_count, 0xff); 117 97 } 118 98 119 - static inline void amd_spi_set_tx_count(struct spi_master *master, 120 - u8 tx_count) 99 + static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count) 121 100 { 122 - amd_spi_setclear_reg8(master, AMD_SPI_TX_COUNT_REG, tx_count, 0xff); 101 + amd_spi_setclear_reg8(amd_spi, AMD_SPI_TX_COUNT_REG, tx_count, 0xff); 123 102 } 124 103 125 - static inline int amd_spi_busy_wait(struct amd_spi *amd_spi) 104 + static int amd_spi_busy_wait(struct amd_spi *amd_spi) 126 105 { 127 - bool spi_busy; 128 106 int timeout = 100000; 129 107 130 108 /* poll for SPI bus to become idle */ 131 - spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr + 132 - AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY; 133 - while (spi_busy) { 109 + while (amd_spi_readreg32(amd_spi, AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) { 134 110 usleep_range(10, 20); 135 111 if (timeout-- < 0) 136 112 return -ETIMEDOUT; 137 - 138 - spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr + 139 - AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY; 140 113 } 141 114 142 115 return 0; 143 116 } 144 117 145 - static void amd_spi_execute_opcode(struct spi_master *master) 118 + static int amd_spi_execute_opcode(struct amd_spi *amd_spi) 146 119 { 147 - struct amd_spi *amd_spi = spi_master_get_devdata(master); 120 + int ret; 121 + 122 + ret = amd_spi_busy_wait(amd_spi); 123 + if (ret) 124 + return ret; 148 125 149 126 /* Set ExecuteOpCode bit in the CTRL0 register */ 150 - amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, 151 - AMD_SPI_EXEC_CMD); 127 + amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, AMD_SPI_EXEC_CMD); 152 128 153 - amd_spi_busy_wait(amd_spi); 129 + return 0; 154 130 } 155 131 156 132 static int amd_spi_master_setup(struct spi_device *spi) 157 133 { 158 - struct spi_master *master = spi->master; 134 + struct amd_spi *amd_spi = spi_master_get_devdata(spi->master); 159 135 160 - amd_spi_clear_fifo_ptr(master); 136 + amd_spi_clear_fifo_ptr(amd_spi); 161 137 162 138 return 0; 163 139 } ··· 161 185 tx_len = xfer->len - 1; 162 186 cmd_opcode = *(u8 *)xfer->tx_buf; 163 187 buf++; 164 - amd_spi_set_opcode(master, cmd_opcode); 188 + amd_spi_set_opcode(amd_spi, cmd_opcode); 165 189 166 190 /* Write data into the FIFO. */ 167 191 for (i = 0; i < tx_len; i++) { 168 - iowrite8(buf[i], 169 - ((u8 __iomem *)amd_spi->io_remap_addr + 192 + iowrite8(buf[i], ((u8 __iomem *)amd_spi->io_remap_addr + 170 193 AMD_SPI_FIFO_BASE + i)); 171 194 } 172 195 173 - amd_spi_set_tx_count(master, tx_len); 174 - amd_spi_clear_fifo_ptr(master); 196 + amd_spi_set_tx_count(amd_spi, tx_len); 197 + amd_spi_clear_fifo_ptr(amd_spi); 175 198 /* Execute command */ 176 - amd_spi_execute_opcode(master); 199 + amd_spi_execute_opcode(amd_spi); 177 200 } 178 201 if (m_cmd & AMD_SPI_XFER_RX) { 179 202 /* ··· 181 206 */ 182 207 rx_len = xfer->len; 183 208 buf = (u8 *)xfer->rx_buf; 184 - amd_spi_set_rx_count(master, rx_len); 185 - amd_spi_clear_fifo_ptr(master); 209 + amd_spi_set_rx_count(amd_spi, rx_len); 210 + amd_spi_clear_fifo_ptr(amd_spi); 186 211 /* Execute command */ 187 - amd_spi_execute_opcode(master); 212 + amd_spi_execute_opcode(amd_spi); 213 + amd_spi_busy_wait(amd_spi); 188 214 /* Read data from FIFO to receive buffer */ 189 215 for (i = 0; i < rx_len; i++) 190 - buf[i] = amd_spi_readreg8(master, 191 - AMD_SPI_FIFO_BASE + 192 - tx_len + i); 216 + buf[i] = amd_spi_readreg8(amd_spi, AMD_SPI_FIFO_BASE + tx_len + i); 193 217 } 194 218 } 195 219 ··· 207 233 struct amd_spi *amd_spi = spi_master_get_devdata(master); 208 234 struct spi_device *spi = msg->spi; 209 235 210 - amd_spi->chip_select = spi->chip_select; 211 - amd_spi_select_chip(master); 236 + amd_spi_select_chip(amd_spi, spi->chip_select); 212 237 213 238 /* 214 239 * Extract spi_transfers from the spi message and
+5 -22
drivers/spi/spi-at91-usart.c
··· 14 14 #include <linux/kernel.h> 15 15 #include <linux/module.h> 16 16 #include <linux/of_platform.h> 17 - #include <linux/of_gpio.h> 17 + #include <linux/gpio/consumer.h> 18 18 #include <linux/pinctrl/consumer.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/pm_runtime.h> ··· 482 482 483 483 static int at91_usart_gpio_setup(struct platform_device *pdev) 484 484 { 485 - struct device_node *np = pdev->dev.parent->of_node; 486 - int i; 487 - int ret; 488 - int nb; 485 + struct gpio_descs *cs_gpios; 489 486 490 - if (!np) 491 - return -EINVAL; 487 + cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW); 492 488 493 - nb = of_gpio_named_count(np, "cs-gpios"); 494 - for (i = 0; i < nb; i++) { 495 - int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 496 - 497 - if (cs_gpio < 0) 498 - return cs_gpio; 499 - 500 - if (gpio_is_valid(cs_gpio)) { 501 - ret = devm_gpio_request_one(&pdev->dev, cs_gpio, 502 - GPIOF_DIR_OUT, 503 - dev_name(&pdev->dev)); 504 - if (ret) 505 - return ret; 506 - } 507 - } 489 + if (IS_ERR(cs_gpios)) 490 + return PTR_ERR(cs_gpios); 508 491 509 492 return 0; 510 493 }
+168 -25
drivers/spi/spi-bcm-qspi.c
··· 83 83 /* MSPI register offsets */ 84 84 #define MSPI_SPCR0_LSB 0x000 85 85 #define MSPI_SPCR0_MSB 0x004 86 + #define MSPI_SPCR0_MSB_CPHA BIT(0) 87 + #define MSPI_SPCR0_MSB_CPOL BIT(1) 88 + #define MSPI_SPCR0_MSB_BITS_SHIFT 0x2 86 89 #define MSPI_SPCR1_LSB 0x008 87 90 #define MSPI_SPCR1_MSB 0x00c 88 91 #define MSPI_NEWQP 0x010 ··· 103 100 #define MSPI_MASTER_BIT BIT(7) 104 101 105 102 #define MSPI_NUM_CDRAM 16 103 + #define MSPI_CDRAM_OUTP BIT(8) 106 104 #define MSPI_CDRAM_CONT_BIT BIT(7) 107 105 #define MSPI_CDRAM_BITSE_BIT BIT(6) 106 + #define MSPI_CDRAM_DT_BIT BIT(5) 108 107 #define MSPI_CDRAM_PCS 0xf 109 108 110 109 #define MSPI_SPCR2_SPE BIT(6) ··· 119 114 ~(BIT(10) | BIT(11))) 120 115 #define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \ 121 116 BIT(11)) 117 + #define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2) 118 + #define MSPI_SPCR3_DAM_8BYTE 0 119 + #define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4)) 120 + #define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5)) 121 + #define MSPI_SPCR3_HALFDUPLEX BIT(6) 122 + #define MSPI_SPCR3_HDOUTTYPE BIT(7) 123 + #define MSPI_SPCR3_DATA_REG_SZ BIT(8) 124 + #define MSPI_SPCR3_CPHARX BIT(9) 122 125 123 126 #define MSPI_MSPI_STATUS_SPIF BIT(0) 124 127 ··· 165 152 /* events that make us deassert CS */ 166 153 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \ 167 154 TRANS_STATUS_BREAK_CS_CHANGE) 155 + 156 + /* 157 + * Used for writing and reading data in the right order 158 + * to TXRAM and RXRAM when used as 32-bit registers respectively 159 + */ 160 + #define swap4bytes(__val) \ 161 + ((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \ 162 + (((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000)) 168 163 169 164 struct bcm_qspi_parms { 170 165 u32 speed_hz; ··· 282 261 static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi) 283 262 { 284 263 if (bcm_qspi_has_fastbr(qspi)) 285 - return 1; 264 + return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1); 286 265 else 287 266 return 8; 288 267 } ··· 416 395 if (addrlen == BSPI_ADDRLEN_4BYTES) 417 396 bpp = BSPI_BPP_ADDR_SELECT_MASK; 418 397 419 - bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth; 398 + if (op->dummy.nbytes) 399 + bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth; 420 400 421 401 switch (width) { 422 402 case SPI_NBITS_SINGLE: ··· 592 570 { 593 571 u32 spcr, spbr = 0; 594 572 595 - if (xp->speed_hz) 596 - spbr = qspi->base_clk / (2 * xp->speed_hz); 597 - 598 - spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX); 599 - bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr); 600 - 601 573 if (!qspi->mspi_maj_rev) 602 574 /* legacy controller */ 603 575 spcr = MSPI_MASTER_BIT; 604 576 else 605 577 spcr = 0; 606 578 607 - /* for 16 bit the data should be zero */ 608 - if (xp->bits_per_word != 16) 609 - spcr |= xp->bits_per_word << 2; 610 - spcr |= xp->mode & 3; 579 + /* 580 + * Bits per transfer. BITS determines the number of data bits 581 + * transferred if the command control bit (BITSE of a 582 + * CDRAM Register) is equal to 1. 583 + * If CDRAM BITSE is equal to 0, 8 data bits are transferred 584 + * regardless 585 + */ 586 + if (xp->bits_per_word != 16 && xp->bits_per_word != 64) 587 + spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT; 611 588 589 + spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL); 612 590 bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr); 613 591 614 592 if (bcm_qspi_has_fastbr(qspi)) { ··· 617 595 /* enable fastbr */ 618 596 spcr |= MSPI_SPCR3_FASTBR; 619 597 598 + if (xp->mode & SPI_3WIRE) 599 + spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE; 600 + 620 601 if (bcm_qspi_has_sysclk_108(qspi)) { 621 602 /* SYSCLK_108 */ 622 603 spcr |= MSPI_SPCR3_SYSCLKSEL_108; 623 604 qspi->base_clk = MSPI_BASE_FREQ * 4; 624 - /* Change spbr as we changed sysclk */ 625 - bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4); 626 605 } 627 606 607 + if (xp->bits_per_word > 16) { 608 + /* data_reg_size 1 (64bit) */ 609 + spcr |= MSPI_SPCR3_DATA_REG_SZ; 610 + /* TxRx RAM data access mode 2 for 32B and set fastdt */ 611 + spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT; 612 + /* 613 + * Set length of delay after transfer 614 + * DTL from 0(256) to 1 615 + */ 616 + bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1); 617 + } else { 618 + /* data_reg_size[8] = 0 */ 619 + spcr &= ~(MSPI_SPCR3_DATA_REG_SZ); 620 + 621 + /* 622 + * TxRx RAM access mode 8B 623 + * and disable fastdt 624 + */ 625 + spcr &= ~(MSPI_SPCR3_DAM_32BYTE); 626 + } 628 627 bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr); 629 628 } 629 + 630 + if (xp->speed_hz) 631 + spbr = qspi->base_clk / (2 * xp->speed_hz); 632 + 633 + spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX); 634 + bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr); 630 635 631 636 qspi->last_parms = *xp; 632 637 } ··· 675 626 { 676 627 struct bcm_qspi_parms *xp; 677 628 678 - if (spi->bits_per_word > 16) 629 + if (spi->bits_per_word > 64) 679 630 return -EINVAL; 680 631 681 632 xp = spi_get_ctldata(spi); ··· 714 665 /* count the last transferred bytes */ 715 666 if (qt->trans->bits_per_word <= 8) 716 667 qt->byte++; 717 - else 668 + else if (qt->trans->bits_per_word <= 16) 718 669 qt->byte += 2; 670 + else if (qt->trans->bits_per_word <= 32) 671 + qt->byte += 4; 672 + else if (qt->trans->bits_per_word <= 64) 673 + qt->byte += 8; 719 674 720 675 if (qt->byte >= qt->trans->len) { 721 676 /* we're at the end of the spi_transfer */ ··· 762 709 ((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8); 763 710 } 764 711 712 + static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot) 713 + { 714 + u32 reg_offset = MSPI_RXRAM; 715 + u32 offset = reg_offset + (slot << 3); 716 + u32 val; 717 + 718 + val = bcm_qspi_read(qspi, MSPI, offset); 719 + val = swap4bytes(val); 720 + 721 + return val; 722 + } 723 + 724 + static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot) 725 + { 726 + u32 reg_offset = MSPI_RXRAM; 727 + u32 lsb_offset = reg_offset + (slot << 3) + 0x4; 728 + u32 msb_offset = reg_offset + (slot << 3); 729 + u32 msb, lsb; 730 + 731 + msb = bcm_qspi_read(qspi, MSPI, msb_offset); 732 + msb = swap4bytes(msb); 733 + lsb = bcm_qspi_read(qspi, MSPI, lsb_offset); 734 + lsb = swap4bytes(lsb); 735 + 736 + return ((u64)msb << 32 | lsb); 737 + } 738 + 765 739 static void read_from_hw(struct bcm_qspi *qspi, int slots) 766 740 { 767 741 struct qspi_trans tp; ··· 812 732 buf[tp.byte] = read_rxram_slot_u8(qspi, slot); 813 733 dev_dbg(&qspi->pdev->dev, "RD %02x\n", 814 734 buf ? buf[tp.byte] : 0x0); 815 - } else { 735 + } else if (tp.trans->bits_per_word <= 16) { 816 736 u16 *buf = tp.trans->rx_buf; 817 737 818 738 if (buf) ··· 820 740 slot); 821 741 dev_dbg(&qspi->pdev->dev, "RD %04x\n", 822 742 buf ? buf[tp.byte / 2] : 0x0); 743 + } else if (tp.trans->bits_per_word <= 32) { 744 + u32 *buf = tp.trans->rx_buf; 745 + 746 + if (buf) 747 + buf[tp.byte / 4] = read_rxram_slot_u32(qspi, 748 + slot); 749 + dev_dbg(&qspi->pdev->dev, "RD %08x\n", 750 + buf ? buf[tp.byte / 4] : 0x0); 751 + 752 + } else if (tp.trans->bits_per_word <= 64) { 753 + u64 *buf = tp.trans->rx_buf; 754 + 755 + if (buf) 756 + buf[tp.byte / 8] = read_rxram_slot_u64(qspi, 757 + slot); 758 + dev_dbg(&qspi->pdev->dev, "RD %llx\n", 759 + buf ? buf[tp.byte / 8] : 0x0); 760 + 761 + 823 762 } 824 763 825 764 update_qspi_trans_byte_count(qspi, &tp, ··· 868 769 bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff)); 869 770 } 870 771 772 + static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot, 773 + u32 val) 774 + { 775 + u32 reg_offset = MSPI_TXRAM; 776 + u32 msb_offset = reg_offset + (slot << 3); 777 + 778 + bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val)); 779 + } 780 + 781 + static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot, 782 + u64 val) 783 + { 784 + u32 reg_offset = MSPI_TXRAM; 785 + u32 msb_offset = reg_offset + (slot << 3); 786 + u32 lsb_offset = reg_offset + (slot << 3) + 0x4; 787 + u32 msb = upper_32_bits(val); 788 + u32 lsb = lower_32_bits(val); 789 + 790 + bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb)); 791 + bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb)); 792 + } 793 + 871 794 static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot) 872 795 { 873 796 return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2)); ··· 913 792 914 793 /* Run until end of transfer or reached the max data */ 915 794 while (!tstatus && slot < MSPI_NUM_CDRAM) { 795 + mspi_cdram = MSPI_CDRAM_CONT_BIT; 916 796 if (tp.trans->bits_per_word <= 8) { 917 797 const u8 *buf = tp.trans->tx_buf; 918 798 u8 val = buf ? buf[tp.byte] : 0x00; 919 799 920 800 write_txram_slot_u8(qspi, slot, val); 921 801 dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); 922 - } else { 802 + } else if (tp.trans->bits_per_word <= 16) { 923 803 const u16 *buf = tp.trans->tx_buf; 924 804 u16 val = buf ? buf[tp.byte / 2] : 0x0000; 925 805 926 806 write_txram_slot_u16(qspi, slot, val); 927 807 dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); 808 + } else if (tp.trans->bits_per_word <= 32) { 809 + const u32 *buf = tp.trans->tx_buf; 810 + u32 val = buf ? buf[tp.byte/4] : 0x0; 811 + 812 + write_txram_slot_u32(qspi, slot, val); 813 + dev_dbg(&qspi->pdev->dev, "WR %08x\n", val); 814 + } else if (tp.trans->bits_per_word <= 64) { 815 + const u64 *buf = tp.trans->tx_buf; 816 + u64 val = (buf ? buf[tp.byte/8] : 0x0); 817 + 818 + /* use the length of delay from SPCR1_LSB */ 819 + if (bcm_qspi_has_fastbr(qspi)) 820 + mspi_cdram |= MSPI_CDRAM_DT_BIT; 821 + 822 + write_txram_slot_u64(qspi, slot, val); 823 + dev_dbg(&qspi->pdev->dev, "WR %llx\n", val); 928 824 } 929 - mspi_cdram = MSPI_CDRAM_CONT_BIT; 825 + 826 + mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : 827 + MSPI_CDRAM_BITSE_BIT); 828 + 829 + /* set 3wrire halfduplex mode data from master to slave */ 830 + if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf) 831 + mspi_cdram |= MSPI_CDRAM_OUTP; 930 832 931 833 if (has_bspi(qspi)) 932 834 mspi_cdram &= ~1; 933 835 else 934 836 mspi_cdram |= (~(1 << spi->chip_select) & 935 837 MSPI_CDRAM_PCS); 936 - 937 - mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 : 938 - MSPI_CDRAM_BITSE_BIT); 939 838 940 839 write_cdram_slot(qspi, slot, mspi_cdram); 941 840 ··· 1491 1350 qspi->master = master; 1492 1351 1493 1352 master->bus_num = -1; 1494 - master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD; 1353 + master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD | 1354 + SPI_3WIRE; 1495 1355 master->setup = bcm_qspi_setup; 1496 1356 master->transfer_one = bcm_qspi_transfer_one; 1497 1357 master->mem_ops = &bcm_qspi_mem_ops; ··· 1602 1460 &qspi->dev_ids[val]); 1603 1461 if (ret < 0) { 1604 1462 dev_err(&pdev->dev, "IRQ %s not found\n", name); 1605 - goto qspi_probe_err; 1463 + goto qspi_unprepare_err; 1606 1464 } 1607 1465 1608 1466 qspi->dev_ids[val].dev = qspi; ··· 1617 1475 if (!num_ints) { 1618 1476 dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n"); 1619 1477 ret = -EINVAL; 1620 - goto qspi_probe_err; 1478 + goto qspi_unprepare_err; 1621 1479 } 1622 1480 1623 1481 bcm_qspi_hw_init(qspi); ··· 1641 1499 1642 1500 qspi_reg_err: 1643 1501 bcm_qspi_hw_uninit(qspi); 1502 + qspi_unprepare_err: 1644 1503 clk_disable_unprepare(qspi->clk); 1645 1504 qspi_probe_err: 1646 1505 kfree(qspi->dev_ids);
+214
drivers/spi/spi-cadence-quadspi.c
··· 13 13 #include <linux/dmaengine.h> 14 14 #include <linux/err.h> 15 15 #include <linux/errno.h> 16 + #include <linux/firmware/xlnx-zynqmp.h> 16 17 #include <linux/interrupt.h> 17 18 #include <linux/io.h> 18 19 #include <linux/iopoll.h> ··· 36 35 /* Quirks */ 37 36 #define CQSPI_NEEDS_WR_DELAY BIT(0) 38 37 #define CQSPI_DISABLE_DAC_MODE BIT(1) 38 + #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) 39 39 40 40 /* Capabilities */ 41 41 #define CQSPI_SUPPORTS_OCTAL BIT(0) ··· 84 82 u32 wr_delay; 85 83 bool use_direct_mode; 86 84 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 85 + bool use_dma_read; 86 + u32 pd_dev_id; 87 87 }; 88 88 89 89 struct cqspi_driver_platdata { 90 90 u32 hwcaps_mask; 91 91 u8 quirks; 92 + int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, 93 + u_char *rxbuf, loff_t from_addr, size_t n_rx); 94 + u32 (*get_dma_status)(struct cqspi_st *cqspi); 92 95 }; 93 96 94 97 /* Operation timeout value */ ··· 224 217 #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 225 218 #define CQSPI_REG_INDIRECTWRBYTES 0x7C 226 219 220 + #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80 221 + 227 222 #define CQSPI_REG_CMDADDRESS 0x94 228 223 #define CQSPI_REG_CMDREADDATALOWER 0xA0 229 224 #define CQSPI_REG_CMDREADDATAUPPER 0xA4 ··· 239 230 #define CQSPI_REG_OP_EXT_READ_LSB 24 240 231 #define CQSPI_REG_OP_EXT_WRITE_LSB 16 241 232 #define CQSPI_REG_OP_EXT_STIG_LSB 0 233 + 234 + #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000 235 + 236 + #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800 237 + #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804 238 + 239 + #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C 240 + 241 + #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814 242 + #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818 243 + #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C 244 + #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1) 245 + 246 + #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828 247 + 248 + #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00 249 + #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6 242 250 243 251 /* Interrupt status bits */ 244 252 #define CQSPI_REG_IRQ_MODE_ERR BIT(0) ··· 276 250 CQSPI_REG_IRQ_UNDERFLOW) 277 251 278 252 #define CQSPI_IRQ_STATUS_MASK 0x1FFFF 253 + #define CQSPI_DMA_UNALIGN 0x3 254 + 255 + #define CQSPI_REG_VERSAL_DMA_VAL 0x602 279 256 280 257 static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr) 281 258 { ··· 304 275 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; 305 276 } 306 277 278 + static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) 279 + { 280 + u32 dma_status; 281 + 282 + dma_status = readl(cqspi->iobase + 283 + CQSPI_REG_VERSAL_DMA_DST_I_STS); 284 + writel(dma_status, cqspi->iobase + 285 + CQSPI_REG_VERSAL_DMA_DST_I_STS); 286 + 287 + return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK; 288 + } 289 + 307 290 static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) 308 291 { 309 292 struct cqspi_st *cqspi = dev; 310 293 unsigned int irq_status; 294 + struct device *device = &cqspi->pdev->dev; 295 + const struct cqspi_driver_platdata *ddata; 296 + 297 + ddata = of_device_get_match_data(device); 311 298 312 299 /* Read interrupt status */ 313 300 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); 314 301 315 302 /* Clear interrupt */ 316 303 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); 304 + 305 + if (cqspi->use_dma_read && ddata && ddata->get_dma_status) { 306 + if (ddata->get_dma_status(cqspi)) { 307 + complete(&cqspi->transfer_complete); 308 + return IRQ_HANDLED; 309 + } 310 + } 317 311 318 312 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 319 313 ··· 833 781 return ret; 834 782 } 835 783 784 + static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata, 785 + u_char *rxbuf, loff_t from_addr, 786 + size_t n_rx) 787 + { 788 + struct cqspi_st *cqspi = f_pdata->cqspi; 789 + struct device *dev = &cqspi->pdev->dev; 790 + void __iomem *reg_base = cqspi->iobase; 791 + u32 reg, bytes_to_dma; 792 + loff_t addr = from_addr; 793 + void *buf = rxbuf; 794 + dma_addr_t dma_addr; 795 + u8 bytes_rem; 796 + int ret = 0; 797 + 798 + bytes_rem = n_rx % 4; 799 + bytes_to_dma = (n_rx - bytes_rem); 800 + 801 + if (!bytes_to_dma) 802 + goto nondmard; 803 + 804 + ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA); 805 + if (ret) 806 + return ret; 807 + 808 + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 809 + reg |= CQSPI_REG_CONFIG_DMA_MASK; 810 + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 811 + 812 + dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE); 813 + if (dma_mapping_error(dev, dma_addr)) { 814 + dev_err(dev, "dma mapping failed\n"); 815 + return -ENOMEM; 816 + } 817 + 818 + writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 819 + writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES); 820 + writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL, 821 + reg_base + CQSPI_REG_INDTRIG_ADDRRANGE); 822 + 823 + /* Clear all interrupts. */ 824 + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 825 + 826 + /* Enable DMA done interrupt */ 827 + writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK, 828 + reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN); 829 + 830 + /* Default DMA periph configuration */ 831 + writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA); 832 + 833 + /* Configure DMA Dst address */ 834 + writel(lower_32_bits(dma_addr), 835 + reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR); 836 + writel(upper_32_bits(dma_addr), 837 + reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB); 838 + 839 + /* Configure DMA Src address */ 840 + writel(cqspi->trigger_address, reg_base + 841 + CQSPI_REG_VERSAL_DMA_SRC_ADDR); 842 + 843 + /* Set DMA destination size */ 844 + writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE); 845 + 846 + /* Set DMA destination control */ 847 + writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL, 848 + reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL); 849 + 850 + writel(CQSPI_REG_INDIRECTRD_START_MASK, 851 + reg_base + CQSPI_REG_INDIRECTRD); 852 + 853 + reinit_completion(&cqspi->transfer_complete); 854 + 855 + if (!wait_for_completion_timeout(&cqspi->transfer_complete, 856 + msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) { 857 + ret = -ETIMEDOUT; 858 + goto failrd; 859 + } 860 + 861 + /* Disable DMA interrupt */ 862 + writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 863 + 864 + /* Clear indirect completion status */ 865 + writel(CQSPI_REG_INDIRECTRD_DONE_MASK, 866 + cqspi->iobase + CQSPI_REG_INDIRECTRD); 867 + dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 868 + 869 + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 870 + reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 871 + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 872 + 873 + ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, 874 + PM_OSPI_MUX_SEL_LINEAR); 875 + if (ret) 876 + return ret; 877 + 878 + nondmard: 879 + if (bytes_rem) { 880 + addr += bytes_to_dma; 881 + buf += bytes_to_dma; 882 + ret = cqspi_indirect_read_execute(f_pdata, buf, addr, 883 + bytes_rem); 884 + if (ret) 885 + return ret; 886 + } 887 + 888 + return 0; 889 + 890 + failrd: 891 + /* Disable DMA interrupt */ 892 + writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 893 + 894 + /* Cancel the indirect read */ 895 + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 896 + reg_base + CQSPI_REG_INDIRECTRD); 897 + 898 + dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 899 + 900 + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 901 + reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 902 + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 903 + 904 + zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR); 905 + 906 + return ret; 907 + } 908 + 836 909 static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, 837 910 const struct spi_mem_op *op) 838 911 { ··· 1357 1180 const struct spi_mem_op *op) 1358 1181 { 1359 1182 struct cqspi_st *cqspi = f_pdata->cqspi; 1183 + struct device *dev = &cqspi->pdev->dev; 1184 + const struct cqspi_driver_platdata *ddata; 1360 1185 loff_t from = op->addr.val; 1361 1186 size_t len = op->data.nbytes; 1362 1187 u_char *buf = op->data.buf.in; 1188 + u64 dma_align = (u64)(uintptr_t)buf; 1363 1189 int ret; 1364 1190 1191 + ddata = of_device_get_match_data(dev); 1365 1192 ret = cqspi_set_protocol(f_pdata, op); 1366 1193 if (ret) 1367 1194 return ret; ··· 1376 1195 1377 1196 if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) 1378 1197 return cqspi_direct_read_execute(f_pdata, buf, from, len); 1198 + 1199 + if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && 1200 + virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0)) 1201 + return ddata->indirect_read_dma(f_pdata, buf, from, len); 1379 1202 1380 1203 return cqspi_indirect_read_execute(f_pdata, buf, from, len); 1381 1204 } ··· 1484 1299 { 1485 1300 struct device *dev = &cqspi->pdev->dev; 1486 1301 struct device_node *np = dev->of_node; 1302 + u32 id[2]; 1487 1303 1488 1304 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); 1489 1305 ··· 1508 1322 cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT; 1509 1323 1510 1324 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); 1325 + 1326 + if (!of_property_read_u32_array(np, "power-domains", id, 1327 + ARRAY_SIZE(id))) 1328 + cqspi->pd_dev_id = id[1]; 1511 1329 1512 1330 return 0; 1513 1331 } ··· 1546 1356 if (!cqspi->use_direct_mode) { 1547 1357 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1548 1358 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; 1359 + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1360 + } 1361 + 1362 + /* Enable DMA interface */ 1363 + if (cqspi->use_dma_read) { 1364 + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1365 + reg |= CQSPI_REG_CONFIG_DMA_MASK; 1549 1366 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1550 1367 } 1551 1368 ··· 1745 1548 master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; 1746 1549 if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) 1747 1550 cqspi->use_direct_mode = true; 1551 + if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) 1552 + cqspi->use_dma_read = true; 1553 + 1554 + if (of_device_is_compatible(pdev->dev.of_node, 1555 + "xlnx,versal-ospi-1.0")) 1556 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1748 1557 } 1749 1558 1750 1559 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, ··· 1859 1656 .quirks = CQSPI_DISABLE_DAC_MODE, 1860 1657 }; 1861 1658 1659 + static const struct cqspi_driver_platdata versal_ospi = { 1660 + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 1661 + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA, 1662 + .indirect_read_dma = cqspi_versal_indirect_read_dma, 1663 + .get_dma_status = cqspi_get_versal_dma_status, 1664 + }; 1665 + 1862 1666 static const struct of_device_id cqspi_dt_ids[] = { 1863 1667 { 1864 1668 .compatible = "cdns,qspi-nor", ··· 1882 1672 { 1883 1673 .compatible = "intel,lgm-qspi", 1884 1674 .data = &intel_lgm_qspi, 1675 + }, 1676 + { 1677 + .compatible = "xlnx,versal-ospi-1.0", 1678 + .data = (void *)&versal_ospi, 1885 1679 }, 1886 1680 { /* end of table */ } 1887 1681 };
+642
drivers/spi/spi-cadence-xspi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // Cadence XSPI flash controller driver 3 + // Copyright (C) 2020-21 Cadence 4 + 5 + #include <linux/completion.h> 6 + #include <linux/delay.h> 7 + #include <linux/err.h> 8 + #include <linux/errno.h> 9 + #include <linux/interrupt.h> 10 + #include <linux/io.h> 11 + #include <linux/iopoll.h> 12 + #include <linux/kernel.h> 13 + #include <linux/module.h> 14 + #include <linux/of_device.h> 15 + #include <linux/of.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/pm_runtime.h> 18 + #include <linux/spi/spi.h> 19 + #include <linux/spi/spi-mem.h> 20 + #include <linux/bitfield.h> 21 + #include <linux/limits.h> 22 + #include <linux/log2.h> 23 + 24 + #define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522 25 + #define CDNS_XSPI_MAX_BANKS 8 26 + #define CDNS_XSPI_NAME "cadence-xspi" 27 + 28 + /* 29 + * Note: below are additional auxiliary registers to 30 + * configure XSPI controller pin-strap settings 31 + */ 32 + 33 + /* PHY DQ timing register */ 34 + #define CDNS_XSPI_CCP_PHY_DQ_TIMING 0x0000 35 + 36 + /* PHY DQS timing register */ 37 + #define CDNS_XSPI_CCP_PHY_DQS_TIMING 0x0004 38 + 39 + /* PHY gate loopback control register */ 40 + #define CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL 0x0008 41 + 42 + /* PHY DLL slave control register */ 43 + #define CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL 0x0010 44 + 45 + /* DLL PHY control register */ 46 + #define CDNS_XSPI_DLL_PHY_CTRL 0x1034 47 + 48 + /* Command registers */ 49 + #define CDNS_XSPI_CMD_REG_0 0x0000 50 + #define CDNS_XSPI_CMD_REG_1 0x0004 51 + #define CDNS_XSPI_CMD_REG_2 0x0008 52 + #define CDNS_XSPI_CMD_REG_3 0x000C 53 + #define CDNS_XSPI_CMD_REG_4 0x0010 54 + #define CDNS_XSPI_CMD_REG_5 0x0014 55 + 56 + /* Command status registers */ 57 + #define CDNS_XSPI_CMD_STATUS_REG 0x0044 58 + 59 + /* Controller status register */ 60 + #define CDNS_XSPI_CTRL_STATUS_REG 0x0100 61 + #define CDNS_XSPI_INIT_COMPLETED BIT(16) 62 + #define CDNS_XSPI_INIT_LEGACY BIT(9) 63 + #define CDNS_XSPI_INIT_FAIL BIT(8) 64 + #define CDNS_XSPI_CTRL_BUSY BIT(7) 65 + 66 + /* Controller interrupt status register */ 67 + #define CDNS_XSPI_INTR_STATUS_REG 0x0110 68 + #define CDNS_XSPI_STIG_DONE BIT(23) 69 + #define CDNS_XSPI_SDMA_ERROR BIT(22) 70 + #define CDNS_XSPI_SDMA_TRIGGER BIT(21) 71 + #define CDNS_XSPI_CMD_IGNRD_EN BIT(20) 72 + #define CDNS_XSPI_DDMA_TERR_EN BIT(18) 73 + #define CDNS_XSPI_CDMA_TREE_EN BIT(17) 74 + #define CDNS_XSPI_CTRL_IDLE_EN BIT(16) 75 + 76 + #define CDNS_XSPI_TRD_COMP_INTR_STATUS 0x0120 77 + #define CDNS_XSPI_TRD_ERR_INTR_STATUS 0x0130 78 + #define CDNS_XSPI_TRD_ERR_INTR_EN 0x0134 79 + 80 + /* Controller interrupt enable register */ 81 + #define CDNS_XSPI_INTR_ENABLE_REG 0x0114 82 + #define CDNS_XSPI_INTR_EN BIT(31) 83 + #define CDNS_XSPI_STIG_DONE_EN BIT(23) 84 + #define CDNS_XSPI_SDMA_ERROR_EN BIT(22) 85 + #define CDNS_XSPI_SDMA_TRIGGER_EN BIT(21) 86 + 87 + #define CDNS_XSPI_INTR_MASK (CDNS_XSPI_INTR_EN | \ 88 + CDNS_XSPI_STIG_DONE_EN | \ 89 + CDNS_XSPI_SDMA_ERROR_EN | \ 90 + CDNS_XSPI_SDMA_TRIGGER_EN) 91 + 92 + /* Controller config register */ 93 + #define CDNS_XSPI_CTRL_CONFIG_REG 0x0230 94 + #define CDNS_XSPI_CTRL_WORK_MODE GENMASK(6, 5) 95 + 96 + #define CDNS_XSPI_WORK_MODE_DIRECT 0 97 + #define CDNS_XSPI_WORK_MODE_STIG 1 98 + #define CDNS_XSPI_WORK_MODE_ACMD 3 99 + 100 + /* SDMA trigger transaction registers */ 101 + #define CDNS_XSPI_SDMA_SIZE_REG 0x0240 102 + #define CDNS_XSPI_SDMA_TRD_INFO_REG 0x0244 103 + #define CDNS_XSPI_SDMA_DIR BIT(8) 104 + 105 + /* Controller features register */ 106 + #define CDNS_XSPI_CTRL_FEATURES_REG 0x0F04 107 + #define CDNS_XSPI_NUM_BANKS GENMASK(25, 24) 108 + #define CDNS_XSPI_DMA_DATA_WIDTH BIT(21) 109 + #define CDNS_XSPI_NUM_THREADS GENMASK(3, 0) 110 + 111 + /* Controller version register */ 112 + #define CDNS_XSPI_CTRL_VERSION_REG 0x0F00 113 + #define CDNS_XSPI_MAGIC_NUM GENMASK(31, 16) 114 + #define CDNS_XSPI_CTRL_REV GENMASK(7, 0) 115 + 116 + /* STIG Profile 1.0 instruction fields (split into registers) */ 117 + #define CDNS_XSPI_CMD_INSTR_TYPE GENMASK(6, 0) 118 + #define CDNS_XSPI_CMD_P1_R1_ADDR0 GENMASK(31, 24) 119 + #define CDNS_XSPI_CMD_P1_R2_ADDR1 GENMASK(7, 0) 120 + #define CDNS_XSPI_CMD_P1_R2_ADDR2 GENMASK(15, 8) 121 + #define CDNS_XSPI_CMD_P1_R2_ADDR3 GENMASK(23, 16) 122 + #define CDNS_XSPI_CMD_P1_R2_ADDR4 GENMASK(31, 24) 123 + #define CDNS_XSPI_CMD_P1_R3_ADDR5 GENMASK(7, 0) 124 + #define CDNS_XSPI_CMD_P1_R3_CMD GENMASK(23, 16) 125 + #define CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES GENMASK(30, 28) 126 + #define CDNS_XSPI_CMD_P1_R4_ADDR_IOS GENMASK(1, 0) 127 + #define CDNS_XSPI_CMD_P1_R4_CMD_IOS GENMASK(9, 8) 128 + #define CDNS_XSPI_CMD_P1_R4_BANK GENMASK(14, 12) 129 + 130 + /* STIG data sequence instruction fields (split into registers) */ 131 + #define CDNS_XSPI_CMD_DSEQ_R2_DCNT_L GENMASK(31, 16) 132 + #define CDNS_XSPI_CMD_DSEQ_R3_DCNT_H GENMASK(15, 0) 133 + #define CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY GENMASK(25, 20) 134 + #define CDNS_XSPI_CMD_DSEQ_R4_BANK GENMASK(14, 12) 135 + #define CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS GENMASK(9, 8) 136 + #define CDNS_XSPI_CMD_DSEQ_R4_DIR BIT(4) 137 + 138 + /* STIG command status fields */ 139 + #define CDNS_XSPI_CMD_STATUS_COMPLETED BIT(15) 140 + #define CDNS_XSPI_CMD_STATUS_FAILED BIT(14) 141 + #define CDNS_XSPI_CMD_STATUS_DQS_ERROR BIT(3) 142 + #define CDNS_XSPI_CMD_STATUS_CRC_ERROR BIT(2) 143 + #define CDNS_XSPI_CMD_STATUS_BUS_ERROR BIT(1) 144 + #define CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR BIT(0) 145 + 146 + #define CDNS_XSPI_STIG_DONE_FLAG BIT(0) 147 + #define CDNS_XSPI_TRD_STATUS 0x0104 148 + 149 + /* Helper macros for filling command registers */ 150 + #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \ 151 + FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \ 152 + CDNS_XSPI_STIG_INSTR_TYPE_1 : CDNS_XSPI_STIG_INSTR_TYPE_0) | \ 153 + FIELD_PREP(CDNS_XSPI_CMD_P1_R1_ADDR0, (op)->addr.val & 0xff)) 154 + 155 + #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op) ( \ 156 + FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR1, ((op)->addr.val >> 8) & 0xFF) | \ 157 + FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR2, ((op)->addr.val >> 16) & 0xFF) | \ 158 + FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \ 159 + FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF)) 160 + 161 + #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \ 162 + FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \ 163 + FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \ 164 + FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes)) 165 + 166 + #define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \ 167 + FIELD_PREP(CDNS_XSPI_CMD_P1_R4_ADDR_IOS, ilog2((op)->addr.buswidth)) | \ 168 + FIELD_PREP(CDNS_XSPI_CMD_P1_R4_CMD_IOS, ilog2((op)->cmd.buswidth)) | \ 169 + FIELD_PREP(CDNS_XSPI_CMD_P1_R4_BANK, chipsel)) 170 + 171 + #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op) \ 172 + FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ) 173 + 174 + #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \ 175 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF) 176 + 177 + #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \ 178 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \ 179 + ((op)->data.nbytes >> 16) & 0xffff) | \ 180 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8)) 181 + 182 + #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \ 183 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \ 184 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS, \ 185 + ilog2((op)->data.buswidth)) | \ 186 + FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, \ 187 + ((op)->data.dir == SPI_MEM_DATA_IN) ? \ 188 + CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE)) 189 + 190 + enum cdns_xspi_stig_instr_type { 191 + CDNS_XSPI_STIG_INSTR_TYPE_0, 192 + CDNS_XSPI_STIG_INSTR_TYPE_1, 193 + CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ = 127, 194 + }; 195 + 196 + enum cdns_xspi_sdma_dir { 197 + CDNS_XSPI_SDMA_DIR_READ, 198 + CDNS_XSPI_SDMA_DIR_WRITE, 199 + }; 200 + 201 + enum cdns_xspi_stig_cmd_dir { 202 + CDNS_XSPI_STIG_CMD_DIR_READ, 203 + CDNS_XSPI_STIG_CMD_DIR_WRITE, 204 + }; 205 + 206 + struct cdns_xspi_dev { 207 + struct platform_device *pdev; 208 + struct device *dev; 209 + 210 + void __iomem *iobase; 211 + void __iomem *auxbase; 212 + void __iomem *sdmabase; 213 + 214 + int irq; 215 + int cur_cs; 216 + unsigned int sdmasize; 217 + 218 + struct completion cmd_complete; 219 + struct completion auto_cmd_complete; 220 + struct completion sdma_complete; 221 + bool sdma_error; 222 + 223 + void *in_buffer; 224 + const void *out_buffer; 225 + 226 + u8 hw_num_banks; 227 + }; 228 + 229 + static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi) 230 + { 231 + u32 ctrl_stat; 232 + 233 + return readl_relaxed_poll_timeout(cdns_xspi->iobase + 234 + CDNS_XSPI_CTRL_STATUS_REG, 235 + ctrl_stat, 236 + ((ctrl_stat & 237 + CDNS_XSPI_CTRL_BUSY) == 0), 238 + 100, 1000); 239 + } 240 + 241 + static void cdns_xspi_trigger_command(struct cdns_xspi_dev *cdns_xspi, 242 + u32 cmd_regs[6]) 243 + { 244 + writel(cmd_regs[5], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_5); 245 + writel(cmd_regs[4], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_4); 246 + writel(cmd_regs[3], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_3); 247 + writel(cmd_regs[2], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_2); 248 + writel(cmd_regs[1], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_1); 249 + writel(cmd_regs[0], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_0); 250 + } 251 + 252 + static int cdns_xspi_check_command_status(struct cdns_xspi_dev *cdns_xspi) 253 + { 254 + int ret = 0; 255 + u32 cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG); 256 + 257 + if (cmd_status & CDNS_XSPI_CMD_STATUS_COMPLETED) { 258 + if ((cmd_status & CDNS_XSPI_CMD_STATUS_FAILED) != 0) { 259 + if (cmd_status & CDNS_XSPI_CMD_STATUS_DQS_ERROR) { 260 + dev_err(cdns_xspi->dev, 261 + "Incorrect DQS pulses detected\n"); 262 + ret = -EPROTO; 263 + } 264 + if (cmd_status & CDNS_XSPI_CMD_STATUS_CRC_ERROR) { 265 + dev_err(cdns_xspi->dev, 266 + "CRC error received\n"); 267 + ret = -EPROTO; 268 + } 269 + if (cmd_status & CDNS_XSPI_CMD_STATUS_BUS_ERROR) { 270 + dev_err(cdns_xspi->dev, 271 + "Error resp on system DMA interface\n"); 272 + ret = -EPROTO; 273 + } 274 + if (cmd_status & CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR) { 275 + dev_err(cdns_xspi->dev, 276 + "Invalid command sequence detected\n"); 277 + ret = -EPROTO; 278 + } 279 + } 280 + } else { 281 + dev_err(cdns_xspi->dev, "Fatal err - command not completed\n"); 282 + ret = -EPROTO; 283 + } 284 + 285 + return ret; 286 + } 287 + 288 + static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi, 289 + bool enabled) 290 + { 291 + u32 intr_enable; 292 + 293 + intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG); 294 + if (enabled) 295 + intr_enable |= CDNS_XSPI_INTR_MASK; 296 + else 297 + intr_enable &= ~CDNS_XSPI_INTR_MASK; 298 + writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG); 299 + } 300 + 301 + static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi) 302 + { 303 + u32 ctrl_ver; 304 + u32 ctrl_features; 305 + u16 hw_magic_num; 306 + 307 + ctrl_ver = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_VERSION_REG); 308 + hw_magic_num = FIELD_GET(CDNS_XSPI_MAGIC_NUM, ctrl_ver); 309 + if (hw_magic_num != CDNS_XSPI_MAGIC_NUM_VALUE) { 310 + dev_err(cdns_xspi->dev, 311 + "Incorrect XSPI magic number: %x, expected: %x\n", 312 + hw_magic_num, CDNS_XSPI_MAGIC_NUM_VALUE); 313 + return -EIO; 314 + } 315 + 316 + ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG); 317 + cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features); 318 + cdns_xspi_set_interrupts(cdns_xspi, false); 319 + 320 + return 0; 321 + } 322 + 323 + static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi) 324 + { 325 + u32 sdma_size, sdma_trd_info; 326 + u8 sdma_dir; 327 + 328 + sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG); 329 + sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG); 330 + sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info); 331 + 332 + switch (sdma_dir) { 333 + case CDNS_XSPI_SDMA_DIR_READ: 334 + ioread8_rep(cdns_xspi->sdmabase, 335 + cdns_xspi->in_buffer, sdma_size); 336 + break; 337 + 338 + case CDNS_XSPI_SDMA_DIR_WRITE: 339 + iowrite8_rep(cdns_xspi->sdmabase, 340 + cdns_xspi->out_buffer, sdma_size); 341 + break; 342 + } 343 + } 344 + 345 + static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi, 346 + const struct spi_mem_op *op, 347 + bool data_phase) 348 + { 349 + u32 cmd_regs[6]; 350 + u32 cmd_status; 351 + int ret; 352 + 353 + ret = cdns_xspi_wait_for_controller_idle(cdns_xspi); 354 + if (ret < 0) 355 + return -EIO; 356 + 357 + writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG), 358 + cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG); 359 + 360 + cdns_xspi_set_interrupts(cdns_xspi, true); 361 + cdns_xspi->sdma_error = false; 362 + 363 + memset(cmd_regs, 0, sizeof(cmd_regs)); 364 + cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase); 365 + cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op); 366 + cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op); 367 + cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, 368 + cdns_xspi->cur_cs); 369 + 370 + cdns_xspi_trigger_command(cdns_xspi, cmd_regs); 371 + 372 + if (data_phase) { 373 + cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG; 374 + cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op); 375 + cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op); 376 + cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op); 377 + cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, 378 + cdns_xspi->cur_cs); 379 + 380 + cdns_xspi->in_buffer = op->data.buf.in; 381 + cdns_xspi->out_buffer = op->data.buf.out; 382 + 383 + cdns_xspi_trigger_command(cdns_xspi, cmd_regs); 384 + 385 + wait_for_completion(&cdns_xspi->sdma_complete); 386 + if (cdns_xspi->sdma_error) { 387 + cdns_xspi_set_interrupts(cdns_xspi, false); 388 + return -EIO; 389 + } 390 + cdns_xspi_sdma_handle(cdns_xspi); 391 + } 392 + 393 + wait_for_completion(&cdns_xspi->cmd_complete); 394 + cdns_xspi_set_interrupts(cdns_xspi, false); 395 + 396 + cmd_status = cdns_xspi_check_command_status(cdns_xspi); 397 + if (cmd_status) 398 + return -EPROTO; 399 + 400 + return 0; 401 + } 402 + 403 + static int cdns_xspi_mem_op(struct cdns_xspi_dev *cdns_xspi, 404 + struct spi_mem *mem, 405 + const struct spi_mem_op *op) 406 + { 407 + enum spi_mem_data_dir dir = op->data.dir; 408 + 409 + if (cdns_xspi->cur_cs != mem->spi->chip_select) 410 + cdns_xspi->cur_cs = mem->spi->chip_select; 411 + 412 + return cdns_xspi_send_stig_command(cdns_xspi, op, 413 + (dir != SPI_MEM_NO_DATA)); 414 + } 415 + 416 + static int cdns_xspi_mem_op_execute(struct spi_mem *mem, 417 + const struct spi_mem_op *op) 418 + { 419 + struct cdns_xspi_dev *cdns_xspi = 420 + spi_master_get_devdata(mem->spi->master); 421 + int ret = 0; 422 + 423 + ret = cdns_xspi_mem_op(cdns_xspi, mem, op); 424 + 425 + return ret; 426 + } 427 + 428 + static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op) 429 + { 430 + struct cdns_xspi_dev *cdns_xspi = 431 + spi_master_get_devdata(mem->spi->master); 432 + 433 + op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize); 434 + 435 + return 0; 436 + } 437 + 438 + static const struct spi_controller_mem_ops cadence_xspi_mem_ops = { 439 + .exec_op = cdns_xspi_mem_op_execute, 440 + .adjust_op_size = cdns_xspi_adjust_mem_op_size, 441 + }; 442 + 443 + static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev) 444 + { 445 + struct cdns_xspi_dev *cdns_xspi = dev; 446 + u32 irq_status; 447 + irqreturn_t result = IRQ_NONE; 448 + 449 + irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG); 450 + writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG); 451 + 452 + if (irq_status & 453 + (CDNS_XSPI_SDMA_ERROR | CDNS_XSPI_SDMA_TRIGGER | 454 + CDNS_XSPI_STIG_DONE)) { 455 + if (irq_status & CDNS_XSPI_SDMA_ERROR) { 456 + dev_err(cdns_xspi->dev, 457 + "Slave DMA transaction error\n"); 458 + cdns_xspi->sdma_error = true; 459 + complete(&cdns_xspi->sdma_complete); 460 + } 461 + 462 + if (irq_status & CDNS_XSPI_SDMA_TRIGGER) 463 + complete(&cdns_xspi->sdma_complete); 464 + 465 + if (irq_status & CDNS_XSPI_STIG_DONE) 466 + complete(&cdns_xspi->cmd_complete); 467 + 468 + result = IRQ_HANDLED; 469 + } 470 + 471 + irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS); 472 + if (irq_status) { 473 + writel(irq_status, 474 + cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS); 475 + 476 + complete(&cdns_xspi->auto_cmd_complete); 477 + 478 + result = IRQ_HANDLED; 479 + } 480 + 481 + return result; 482 + } 483 + 484 + static int cdns_xspi_of_get_plat_data(struct platform_device *pdev) 485 + { 486 + struct device_node *node_prop = pdev->dev.of_node; 487 + struct device_node *node_child; 488 + unsigned int cs; 489 + 490 + for_each_child_of_node(node_prop, node_child) { 491 + if (!of_device_is_available(node_child)) 492 + continue; 493 + 494 + if (of_property_read_u32(node_child, "reg", &cs)) { 495 + dev_err(&pdev->dev, "Couldn't get memory chip select\n"); 496 + of_node_put(node_child); 497 + return -ENXIO; 498 + } else if (cs >= CDNS_XSPI_MAX_BANKS) { 499 + dev_err(&pdev->dev, "reg (cs) parameter value too large\n"); 500 + of_node_put(node_child); 501 + return -ENXIO; 502 + } 503 + } 504 + 505 + return 0; 506 + } 507 + 508 + static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi) 509 + { 510 + struct device *dev = cdns_xspi->dev; 511 + 512 + dev_info(dev, "PHY configuration\n"); 513 + dev_info(dev, " * xspi_dll_phy_ctrl: %08x\n", 514 + readl(cdns_xspi->iobase + CDNS_XSPI_DLL_PHY_CTRL)); 515 + dev_info(dev, " * phy_dq_timing: %08x\n", 516 + readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQ_TIMING)); 517 + dev_info(dev, " * phy_dqs_timing: %08x\n", 518 + readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQS_TIMING)); 519 + dev_info(dev, " * phy_gate_loopback_ctrl: %08x\n", 520 + readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL)); 521 + dev_info(dev, " * phy_dll_slave_ctrl: %08x\n", 522 + readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL)); 523 + } 524 + 525 + static int cdns_xspi_probe(struct platform_device *pdev) 526 + { 527 + struct device *dev = &pdev->dev; 528 + struct spi_master *master = NULL; 529 + struct cdns_xspi_dev *cdns_xspi = NULL; 530 + struct resource *res; 531 + int ret; 532 + 533 + master = devm_spi_alloc_master(dev, sizeof(*cdns_xspi)); 534 + if (!master) 535 + return -ENOMEM; 536 + 537 + master->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD | 538 + SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL | 539 + SPI_MODE_0 | SPI_MODE_3; 540 + 541 + master->mem_ops = &cadence_xspi_mem_ops; 542 + master->dev.of_node = pdev->dev.of_node; 543 + master->bus_num = -1; 544 + 545 + platform_set_drvdata(pdev, master); 546 + 547 + cdns_xspi = spi_master_get_devdata(master); 548 + cdns_xspi->pdev = pdev; 549 + cdns_xspi->dev = &pdev->dev; 550 + cdns_xspi->cur_cs = 0; 551 + 552 + init_completion(&cdns_xspi->cmd_complete); 553 + init_completion(&cdns_xspi->auto_cmd_complete); 554 + init_completion(&cdns_xspi->sdma_complete); 555 + 556 + ret = cdns_xspi_of_get_plat_data(pdev); 557 + if (ret) 558 + return -ENODEV; 559 + 560 + cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io"); 561 + if (IS_ERR(cdns_xspi->iobase)) { 562 + dev_err(dev, "Failed to remap controller base address\n"); 563 + return PTR_ERR(cdns_xspi->iobase); 564 + } 565 + 566 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma"); 567 + cdns_xspi->sdmabase = devm_ioremap_resource(dev, res); 568 + if (IS_ERR(cdns_xspi->sdmabase)) { 569 + dev_err(dev, "Failed to remap SDMA address\n"); 570 + return PTR_ERR(cdns_xspi->sdmabase); 571 + } 572 + cdns_xspi->sdmasize = resource_size(res); 573 + 574 + cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux"); 575 + if (IS_ERR(cdns_xspi->auxbase)) { 576 + dev_err(dev, "Failed to remap AUX address\n"); 577 + return PTR_ERR(cdns_xspi->auxbase); 578 + } 579 + 580 + cdns_xspi->irq = platform_get_irq(pdev, 0); 581 + if (cdns_xspi->irq < 0) { 582 + dev_err(dev, "Failed to get IRQ\n"); 583 + return -ENXIO; 584 + } 585 + 586 + ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler, 587 + IRQF_SHARED, pdev->name, cdns_xspi); 588 + if (ret) { 589 + dev_err(dev, "Failed to request IRQ: %d\n", cdns_xspi->irq); 590 + return ret; 591 + } 592 + 593 + cdns_xspi_print_phy_config(cdns_xspi); 594 + 595 + ret = cdns_xspi_controller_init(cdns_xspi); 596 + if (ret) { 597 + dev_err(dev, "Failed to initialize controller\n"); 598 + return ret; 599 + } 600 + 601 + master->num_chipselect = 1 << cdns_xspi->hw_num_banks; 602 + 603 + ret = devm_spi_register_master(dev, master); 604 + if (ret) { 605 + dev_err(dev, "Failed to register SPI master\n"); 606 + return ret; 607 + } 608 + 609 + dev_info(dev, "Successfully registered SPI master\n"); 610 + 611 + return 0; 612 + } 613 + 614 + #ifdef CONFIG_OF 615 + static const struct of_device_id cdns_xspi_of_match[] = { 616 + { 617 + .compatible = "cdns,xspi-nor", 618 + }, 619 + { /* end of table */} 620 + }; 621 + MODULE_DEVICE_TABLE(of, cdns_xspi_of_match); 622 + #else 623 + #define cdns_xspi_of_match NULL 624 + #endif /* CONFIG_OF */ 625 + 626 + static struct platform_driver cdns_xspi_platform_driver = { 627 + .probe = cdns_xspi_probe, 628 + .remove = NULL, 629 + .driver = { 630 + .name = CDNS_XSPI_NAME, 631 + .of_match_table = cdns_xspi_of_match, 632 + }, 633 + }; 634 + 635 + module_platform_driver(cdns_xspi_platform_driver); 636 + 637 + MODULE_DESCRIPTION("Cadence XSPI Controller Driver"); 638 + MODULE_LICENSE("GPL v2"); 639 + MODULE_ALIAS("platform:" CDNS_XSPI_NAME); 640 + MODULE_AUTHOR("Konrad Kociolek <konrad@cadence.com>"); 641 + MODULE_AUTHOR("Jayshri Pawar <jpawar@cadence.com>"); 642 + MODULE_AUTHOR("Parshuram Thombare <pthombar@cadence.com>");
+79 -42
drivers/spi/spi-fsi.c
··· 67 67 SPI_FSI_STATUS_RDR_OVERRUN) 68 68 #define SPI_FSI_PORT_CTRL 0x9 69 69 70 + struct fsi2spi { 71 + struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ 72 + struct mutex lock; /* lock access to the device */ 73 + }; 74 + 70 75 struct fsi_spi { 71 76 struct device *dev; /* SPI controller device */ 72 - struct fsi_device *fsi; /* FSI2SPI CFAM engine device */ 77 + struct fsi2spi *bridge; /* FSI2SPI device */ 73 78 u32 base; 74 79 }; 75 80 ··· 109 104 u32 sts; 110 105 __be32 sts_be; 111 106 112 - rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be, 107 + rc = fsi_device_read(ctx->bridge->fsi, FSI2SPI_STATUS, &sts_be, 113 108 sizeof(sts_be)); 114 109 if (rc) 115 110 return rc; ··· 125 120 126 121 static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value) 127 122 { 128 - int rc; 123 + int rc = 0; 129 124 __be32 cmd_be; 130 125 __be32 data_be; 131 126 u32 cmd = offset + ctx->base; 127 + struct fsi2spi *bridge = ctx->bridge; 132 128 133 129 *value = 0ULL; 134 130 135 131 if (cmd & FSI2SPI_CMD_WRITE) 136 132 return -EINVAL; 137 133 138 - cmd_be = cpu_to_be32(cmd); 139 - rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); 134 + rc = mutex_lock_interruptible(&bridge->lock); 140 135 if (rc) 141 136 return rc; 137 + 138 + cmd_be = cpu_to_be32(cmd); 139 + rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be, 140 + sizeof(cmd_be)); 141 + if (rc) 142 + goto unlock; 142 143 143 144 rc = fsi_spi_check_status(ctx); 144 145 if (rc) 145 - return rc; 146 + goto unlock; 146 147 147 - rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be, 148 + rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA0, &data_be, 148 149 sizeof(data_be)); 149 150 if (rc) 150 - return rc; 151 + goto unlock; 151 152 152 153 *value |= (u64)be32_to_cpu(data_be) << 32; 153 154 154 - rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be, 155 + rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA1, &data_be, 155 156 sizeof(data_be)); 156 157 if (rc) 157 - return rc; 158 + goto unlock; 158 159 159 160 *value |= (u64)be32_to_cpu(data_be); 160 161 dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value); 161 162 162 - return 0; 163 + unlock: 164 + mutex_unlock(&bridge->lock); 165 + return rc; 163 166 } 164 167 165 168 static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value) 166 169 { 167 - int rc; 170 + int rc = 0; 168 171 __be32 cmd_be; 169 172 __be32 data_be; 170 173 u32 cmd = offset + ctx->base; 174 + struct fsi2spi *bridge = ctx->bridge; 171 175 172 176 if (cmd & FSI2SPI_CMD_WRITE) 173 177 return -EINVAL; 174 178 179 + rc = mutex_lock_interruptible(&bridge->lock); 180 + if (rc) 181 + return rc; 182 + 175 183 dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value); 176 184 177 185 data_be = cpu_to_be32(upper_32_bits(value)); 178 - rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be, 186 + rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA0, &data_be, 179 187 sizeof(data_be)); 180 188 if (rc) 181 - return rc; 189 + goto unlock; 182 190 183 191 data_be = cpu_to_be32(lower_32_bits(value)); 184 - rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be, 192 + rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA1, &data_be, 185 193 sizeof(data_be)); 186 194 if (rc) 187 - return rc; 195 + goto unlock; 188 196 189 197 cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE); 190 - rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be)); 198 + rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be, 199 + sizeof(cmd_be)); 191 200 if (rc) 192 - return rc; 201 + goto unlock; 193 202 194 - return fsi_spi_check_status(ctx); 203 + rc = fsi_spi_check_status(ctx); 204 + 205 + unlock: 206 + mutex_unlock(&bridge->lock); 207 + return rc; 195 208 } 196 209 197 210 static int fsi_spi_data_in(u64 in, u8 *rx, int len) ··· 257 234 return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL); 258 235 } 259 236 237 + static int fsi_spi_status(struct fsi_spi *ctx, u64 *status, const char *dir) 238 + { 239 + int rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, status); 240 + 241 + if (rc) 242 + return rc; 243 + 244 + if (*status & SPI_FSI_STATUS_ANY_ERROR) { 245 + dev_err(ctx->dev, "%s error: %016llx\n", dir, *status); 246 + 247 + rc = fsi_spi_reset(ctx); 248 + if (rc) 249 + return rc; 250 + 251 + return -EREMOTEIO; 252 + } 253 + 254 + return 0; 255 + } 256 + 260 257 static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val) 261 258 { 262 259 /* ··· 316 273 return rc; 317 274 318 275 do { 319 - rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, 320 - &status); 276 + rc = fsi_spi_status(ctx, &status, "TX"); 321 277 if (rc) 322 278 return rc; 323 - 324 - if (status & SPI_FSI_STATUS_ANY_ERROR) { 325 - rc = fsi_spi_reset(ctx); 326 - if (rc) 327 - return rc; 328 - 329 - return -EREMOTEIO; 330 - } 331 279 } while (status & SPI_FSI_STATUS_TDR_FULL); 332 280 333 281 sent += nb; ··· 330 296 331 297 while (transfer->len > recv) { 332 298 do { 333 - rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, 334 - &status); 299 + rc = fsi_spi_status(ctx, &status, "RX"); 335 300 if (rc) 336 301 return rc; 337 - 338 - if (status & SPI_FSI_STATUS_ANY_ERROR) { 339 - rc = fsi_spi_reset(ctx); 340 - if (rc) 341 - return rc; 342 - 343 - return -EREMOTEIO; 344 - } 345 302 } while (!(status & SPI_FSI_STATUS_RDR_FULL)); 346 303 347 304 rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in); ··· 373 348 if (status & (SPI_FSI_STATUS_ANY_ERROR | 374 349 SPI_FSI_STATUS_TDR_FULL | 375 350 SPI_FSI_STATUS_RDR_FULL)) { 376 - if (reset) 351 + if (reset) { 352 + dev_err(ctx->dev, 353 + "Initialization error: %08llx\n", 354 + status); 377 355 return -EIO; 356 + } 378 357 379 358 rc = fsi_spi_reset(ctx); 380 359 if (rc) ··· 417 388 struct spi_transfer *transfer; 418 389 struct fsi_spi *ctx = spi_controller_get_devdata(ctlr); 419 390 420 - rc = fsi_spi_check_mux(ctx->fsi, ctx->dev); 391 + rc = fsi_spi_check_mux(ctx->bridge->fsi, ctx->dev); 421 392 if (rc) 422 393 goto error; 423 394 ··· 507 478 int rc; 508 479 struct device_node *np; 509 480 int num_controllers_registered = 0; 481 + struct fsi2spi *bridge; 510 482 struct fsi_device *fsi = to_fsi_dev(dev); 511 483 512 484 rc = fsi_spi_check_mux(fsi, dev); 513 485 if (rc) 514 486 return -ENODEV; 487 + 488 + bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); 489 + if (!bridge) 490 + return -ENOMEM; 491 + 492 + bridge->fsi = fsi; 493 + mutex_init(&bridge->lock); 515 494 516 495 for_each_available_child_of_node(dev->of_node, np) { 517 496 u32 base; ··· 543 506 544 507 ctx = spi_controller_get_devdata(ctlr); 545 508 ctx->dev = &ctlr->dev; 546 - ctx->fsi = fsi; 509 + ctx->bridge = bridge; 547 510 ctx->base = base + SPI_FSI_BASE; 548 511 549 512 rc = devm_spi_register_controller(dev, ctlr);
+239 -15
drivers/spi/spi-geni-qcom.c
··· 2 2 // Copyright (c) 2017-2018, The Linux foundation. All rights reserved. 3 3 4 4 #include <linux/clk.h> 5 + #include <linux/dmaengine.h> 6 + #include <linux/dma-mapping.h> 7 + #include <linux/dma/qcom-gpi-dma.h> 5 8 #include <linux/interrupt.h> 6 9 #include <linux/io.h> 7 10 #include <linux/log2.h> ··· 66 63 #define TIMESTAMP_AFTER BIT(3) 67 64 #define POST_CMD_DELAY BIT(4) 68 65 66 + #define GSI_LOOPBACK_EN BIT(0) 67 + #define GSI_CS_TOGGLE BIT(3) 68 + #define GSI_CPHA BIT(4) 69 + #define GSI_CPOL BIT(5) 70 + 71 + #define MAX_TX_SG 3 72 + #define NUM_SPI_XFER 8 73 + #define SPI_XFER_TIMEOUT_MS 250 74 + 69 75 struct spi_geni_master { 70 76 struct geni_se se; 71 77 struct device *dev; ··· 96 84 int irq; 97 85 bool cs_flag; 98 86 bool abort_failed; 87 + struct dma_chan *tx; 88 + struct dma_chan *rx; 89 + int cur_xfer_mode; 99 90 }; 100 91 101 92 static int get_spi_clk_cfg(unsigned int speed_hz, ··· 345 330 return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz); 346 331 } 347 332 333 + static void 334 + spi_gsi_callback_result(void *cb, const struct dmaengine_result *result) 335 + { 336 + struct spi_master *spi = cb; 337 + 338 + if (result->result != DMA_TRANS_NOERROR) { 339 + dev_err(&spi->dev, "DMA txn failed: %d\n", result->result); 340 + return; 341 + } 342 + 343 + if (!result->residue) { 344 + dev_dbg(&spi->dev, "DMA txn completed\n"); 345 + spi_finalize_current_transfer(spi); 346 + } else { 347 + dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue); 348 + } 349 + } 350 + 351 + static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas, 352 + struct spi_device *spi_slv, struct spi_master *spi) 353 + { 354 + unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; 355 + struct dma_slave_config config = {}; 356 + struct gpi_spi_config peripheral = {}; 357 + struct dma_async_tx_descriptor *tx_desc, *rx_desc; 358 + int ret; 359 + 360 + config.peripheral_config = &peripheral; 361 + config.peripheral_size = sizeof(peripheral); 362 + peripheral.set_config = true; 363 + 364 + if (xfer->bits_per_word != mas->cur_bits_per_word || 365 + xfer->speed_hz != mas->cur_speed_hz) { 366 + mas->cur_bits_per_word = xfer->bits_per_word; 367 + mas->cur_speed_hz = xfer->speed_hz; 368 + } 369 + 370 + if (xfer->tx_buf && xfer->rx_buf) { 371 + peripheral.cmd = SPI_DUPLEX; 372 + } else if (xfer->tx_buf) { 373 + peripheral.cmd = SPI_TX; 374 + peripheral.rx_len = 0; 375 + } else if (xfer->rx_buf) { 376 + peripheral.cmd = SPI_RX; 377 + if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) { 378 + peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word); 379 + } else { 380 + int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1; 381 + 382 + peripheral.rx_len = (xfer->len / bytes_per_word); 383 + } 384 + } 385 + 386 + peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP); 387 + peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL); 388 + peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA); 389 + peripheral.cs = spi_slv->chip_select; 390 + peripheral.pack_en = true; 391 + peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN; 392 + 393 + ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, 394 + &peripheral.clk_src, &peripheral.clk_div); 395 + if (ret) { 396 + dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret); 397 + return ret; 398 + } 399 + 400 + if (!xfer->cs_change) { 401 + if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers)) 402 + peripheral.fragmentation = FRAGMENTATION; 403 + } 404 + 405 + if (peripheral.cmd & SPI_RX) { 406 + dmaengine_slave_config(mas->rx, &config); 407 + rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents, 408 + DMA_DEV_TO_MEM, flags); 409 + if (!rx_desc) { 410 + dev_err(mas->dev, "Err setting up rx desc\n"); 411 + return -EIO; 412 + } 413 + } 414 + 415 + /* 416 + * Prepare the TX always, even for RX or tx_buf being null, we would 417 + * need TX to be prepared per GSI spec 418 + */ 419 + dmaengine_slave_config(mas->tx, &config); 420 + tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents, 421 + DMA_MEM_TO_DEV, flags); 422 + if (!tx_desc) { 423 + dev_err(mas->dev, "Err setting up tx desc\n"); 424 + return -EIO; 425 + } 426 + 427 + tx_desc->callback_result = spi_gsi_callback_result; 428 + tx_desc->callback_param = spi; 429 + 430 + if (peripheral.cmd & SPI_RX) 431 + dmaengine_submit(rx_desc); 432 + dmaengine_submit(tx_desc); 433 + 434 + if (peripheral.cmd & SPI_RX) 435 + dma_async_issue_pending(mas->rx); 436 + 437 + dma_async_issue_pending(mas->tx); 438 + return 1; 439 + } 440 + 441 + static bool geni_can_dma(struct spi_controller *ctlr, 442 + struct spi_device *slv, struct spi_transfer *xfer) 443 + { 444 + struct spi_geni_master *mas = spi_master_get_devdata(slv->master); 445 + 446 + /* check if dma is supported */ 447 + return mas->cur_xfer_mode != GENI_SE_FIFO; 448 + } 449 + 348 450 static int spi_geni_prepare_message(struct spi_master *spi, 349 451 struct spi_message *spi_msg) 350 452 { 351 - int ret; 352 453 struct spi_geni_master *mas = spi_master_get_devdata(spi); 454 + int ret; 353 455 354 - if (spi_geni_is_abort_still_pending(mas)) 355 - return -EBUSY; 456 + switch (mas->cur_xfer_mode) { 457 + case GENI_SE_FIFO: 458 + if (spi_geni_is_abort_still_pending(mas)) 459 + return -EBUSY; 460 + ret = setup_fifo_params(spi_msg->spi, spi); 461 + if (ret) 462 + dev_err(mas->dev, "Couldn't select mode %d\n", ret); 463 + return ret; 356 464 357 - ret = setup_fifo_params(spi_msg->spi, spi); 358 - if (ret) 359 - dev_err(mas->dev, "Couldn't select mode %d\n", ret); 465 + case GENI_GPI_DMA: 466 + /* nothing to do for GPI DMA */ 467 + return 0; 468 + } 469 + 470 + dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode); 471 + return -EINVAL; 472 + } 473 + 474 + static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas) 475 + { 476 + int ret; 477 + 478 + mas->tx = dma_request_chan(mas->dev, "tx"); 479 + ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n"); 480 + if (ret < 0) 481 + goto err_tx; 482 + 483 + mas->rx = dma_request_chan(mas->dev, "rx"); 484 + ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n"); 485 + if (ret < 0) 486 + goto err_rx; 487 + 488 + return 0; 489 + 490 + err_rx: 491 + dma_release_channel(mas->tx); 492 + mas->tx = NULL; 493 + err_tx: 494 + mas->rx = NULL; 360 495 return ret; 496 + } 497 + 498 + static void spi_geni_release_dma_chan(struct spi_geni_master *mas) 499 + { 500 + if (mas->rx) { 501 + dma_release_channel(mas->rx); 502 + mas->rx = NULL; 503 + } 504 + 505 + if (mas->tx) { 506 + dma_release_channel(mas->tx); 507 + mas->tx = NULL; 508 + } 361 509 } 362 510 363 511 static int spi_geni_init(struct spi_geni_master *mas) 364 512 { 365 513 struct geni_se *se = &mas->se; 366 514 unsigned int proto, major, minor, ver; 367 - u32 spi_tx_cfg; 515 + u32 spi_tx_cfg, fifo_disable; 516 + int ret = -ENXIO; 368 517 369 518 pm_runtime_get_sync(mas->dev); 370 519 371 520 proto = geni_se_read_proto(se); 372 521 if (proto != GENI_SE_SPI) { 373 522 dev_err(mas->dev, "Invalid proto %d\n", proto); 374 - pm_runtime_put(mas->dev); 375 - return -ENXIO; 523 + goto out_pm; 376 524 } 377 525 mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se); 378 526 ··· 558 380 else 559 381 mas->oversampling = 1; 560 382 561 - geni_se_select_mode(se, GENI_SE_FIFO); 383 + fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE; 384 + switch (fifo_disable) { 385 + case 1: 386 + ret = spi_geni_grab_gpi_chan(mas); 387 + if (!ret) { /* success case */ 388 + mas->cur_xfer_mode = GENI_GPI_DMA; 389 + geni_se_select_mode(se, GENI_GPI_DMA); 390 + dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n"); 391 + break; 392 + } 393 + /* 394 + * in case of failure to get dma channel, we can still do the 395 + * FIFO mode, so fallthrough 396 + */ 397 + dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n"); 398 + fallthrough; 399 + 400 + case 0: 401 + mas->cur_xfer_mode = GENI_SE_FIFO; 402 + geni_se_select_mode(se, GENI_SE_FIFO); 403 + ret = 0; 404 + break; 405 + } 562 406 563 407 /* We always control CS manually */ 564 408 spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG); 565 409 spi_tx_cfg &= ~CS_TOGGLE; 566 410 writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG); 567 411 412 + out_pm: 568 413 pm_runtime_put(mas->dev); 569 - return 0; 414 + return ret; 570 415 } 571 416 572 417 static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas) ··· 770 569 if (!xfer->len) 771 570 return 0; 772 571 773 - setup_fifo_xfer(xfer, mas, slv->mode, spi); 774 - return 1; 572 + if (mas->cur_xfer_mode == GENI_SE_FIFO) { 573 + setup_fifo_xfer(xfer, mas, slv->mode, spi); 574 + return 1; 575 + } 576 + return setup_gsi_xfer(xfer, mas, slv, spi); 775 577 } 776 578 777 579 static irqreturn_t geni_spi_isr(int irq, void *data) ··· 869 665 if (irq < 0) 870 666 return irq; 871 667 668 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 669 + if (ret) { 670 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 671 + if (ret) 672 + return dev_err_probe(dev, ret, "could not set DMA mask\n"); 673 + } 674 + 872 675 base = devm_platform_ioremap_resource(pdev, 0); 873 676 if (IS_ERR(base)) 874 677 return PTR_ERR(base); ··· 915 704 spi->max_speed_hz = 50000000; 916 705 spi->prepare_message = spi_geni_prepare_message; 917 706 spi->transfer_one = spi_geni_transfer_one; 707 + spi->can_dma = geni_can_dma; 708 + spi->dma_map_dev = dev->parent; 918 709 spi->auto_runtime_pm = true; 919 710 spi->handle_err = handle_fifo_timeout; 920 - spi->set_cs = spi_geni_set_cs; 921 711 spi->use_gpio_descriptors = true; 922 712 923 713 init_completion(&mas->cs_done); ··· 944 732 if (ret) 945 733 goto spi_geni_probe_runtime_disable; 946 734 735 + /* 736 + * check the mode supported and set_cs for fifo mode only 737 + * for dma (gsi) mode, the gsi will set cs based on params passed in 738 + * TRE 739 + */ 740 + if (mas->cur_xfer_mode == GENI_SE_FIFO) 741 + spi->set_cs = spi_geni_set_cs; 742 + 947 743 ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi); 948 744 if (ret) 949 - goto spi_geni_probe_runtime_disable; 745 + goto spi_geni_release_dma; 950 746 951 747 ret = spi_register_master(spi); 952 748 if (ret) ··· 963 743 return 0; 964 744 spi_geni_probe_free_irq: 965 745 free_irq(mas->irq, spi); 746 + spi_geni_release_dma: 747 + spi_geni_release_dma_chan(mas); 966 748 spi_geni_probe_runtime_disable: 967 749 pm_runtime_disable(dev); 968 750 return ret; ··· 977 755 978 756 /* Unregister _before_ disabling pm_runtime() so we stop transfers */ 979 757 spi_unregister_master(spi); 758 + 759 + spi_geni_release_dma_chan(mas); 980 760 981 761 free_irq(mas->irq, spi); 982 762 pm_runtime_disable(&pdev->dev);
+482
drivers/spi/spi-ingenic.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * SPI bus driver for the Ingenic JZ47xx SoCs 4 + * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu> 5 + * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net> 6 + */ 7 + 8 + #include <linux/clk.h> 9 + #include <linux/delay.h> 10 + #include <linux/dmaengine.h> 11 + #include <linux/dma-mapping.h> 12 + #include <linux/iopoll.h> 13 + #include <linux/module.h> 14 + #include <linux/of_device.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/regmap.h> 17 + #include <linux/spi/spi.h> 18 + 19 + #define REG_SSIDR 0x0 20 + #define REG_SSICR0 0x4 21 + #define REG_SSICR1 0x8 22 + #define REG_SSISR 0xc 23 + #define REG_SSIGR 0x18 24 + 25 + #define REG_SSICR0_TENDIAN_LSB BIT(19) 26 + #define REG_SSICR0_RENDIAN_LSB BIT(17) 27 + #define REG_SSICR0_SSIE BIT(15) 28 + #define REG_SSICR0_LOOP BIT(10) 29 + #define REG_SSICR0_EACLRUN BIT(7) 30 + #define REG_SSICR0_FSEL BIT(6) 31 + #define REG_SSICR0_TFLUSH BIT(2) 32 + #define REG_SSICR0_RFLUSH BIT(1) 33 + 34 + #define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30)) 35 + #define REG_SSICR1_FRMHL BIT(30) 36 + #define REG_SSICR1_LFST BIT(25) 37 + #define REG_SSICR1_UNFIN BIT(23) 38 + #define REG_SSICR1_PHA BIT(1) 39 + #define REG_SSICR1_POL BIT(0) 40 + 41 + #define REG_SSISR_END BIT(7) 42 + #define REG_SSISR_BUSY BIT(6) 43 + #define REG_SSISR_TFF BIT(5) 44 + #define REG_SSISR_RFE BIT(4) 45 + #define REG_SSISR_RFHF BIT(2) 46 + #define REG_SSISR_UNDR BIT(1) 47 + #define REG_SSISR_OVER BIT(0) 48 + 49 + #define SPI_INGENIC_FIFO_SIZE 128u 50 + 51 + struct jz_soc_info { 52 + u32 bits_per_word_mask; 53 + struct reg_field flen_field; 54 + bool has_trendian; 55 + }; 56 + 57 + struct ingenic_spi { 58 + const struct jz_soc_info *soc_info; 59 + struct clk *clk; 60 + struct resource *mem_res; 61 + 62 + struct regmap *map; 63 + struct regmap_field *flen_field; 64 + }; 65 + 66 + static int spi_ingenic_wait(struct ingenic_spi *priv, 67 + unsigned long mask, 68 + bool condition) 69 + { 70 + unsigned int val; 71 + 72 + return regmap_read_poll_timeout(priv->map, REG_SSISR, val, 73 + !!(val & mask) == condition, 74 + 100, 10000); 75 + } 76 + 77 + static void spi_ingenic_set_cs(struct spi_device *spi, bool disable) 78 + { 79 + struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller); 80 + 81 + if (disable) { 82 + regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN); 83 + regmap_clear_bits(priv->map, REG_SSISR, 84 + REG_SSISR_UNDR | REG_SSISR_OVER); 85 + 86 + spi_ingenic_wait(priv, REG_SSISR_END, true); 87 + } else { 88 + regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN); 89 + } 90 + 91 + regmap_set_bits(priv->map, REG_SSICR0, 92 + REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH); 93 + } 94 + 95 + static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv, 96 + struct spi_device *spi, 97 + struct spi_transfer *xfer) 98 + { 99 + unsigned long clk_hz = clk_get_rate(priv->clk); 100 + u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz, 101 + bits_per_word = xfer->bits_per_word ?: spi->bits_per_word; 102 + 103 + cdiv = clk_hz / (speed_hz * 2); 104 + cdiv = clamp(cdiv, 1u, 0x100u) - 1; 105 + 106 + regmap_write(priv->map, REG_SSIGR, cdiv); 107 + 108 + regmap_field_write(priv->flen_field, bits_per_word - 2); 109 + } 110 + 111 + static void spi_ingenic_finalize_transfer(void *controller) 112 + { 113 + spi_finalize_current_transfer(controller); 114 + } 115 + 116 + static struct dma_async_tx_descriptor * 117 + spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan, 118 + struct sg_table *sg, enum dma_transfer_direction dir, 119 + unsigned int bits) 120 + { 121 + struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); 122 + struct dma_slave_config cfg = { 123 + .direction = dir, 124 + .src_addr = priv->mem_res->start + REG_SSIDR, 125 + .dst_addr = priv->mem_res->start + REG_SSIDR, 126 + }; 127 + struct dma_async_tx_descriptor *desc; 128 + dma_cookie_t cookie; 129 + int ret; 130 + 131 + if (bits > 16) { 132 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 133 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 134 + cfg.src_maxburst = cfg.dst_maxburst = 4; 135 + } else if (bits > 8) { 136 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 137 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 138 + cfg.src_maxburst = cfg.dst_maxburst = 2; 139 + } else { 140 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 141 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 142 + cfg.src_maxburst = cfg.dst_maxburst = 1; 143 + } 144 + 145 + ret = dmaengine_slave_config(chan, &cfg); 146 + if (ret) 147 + return ERR_PTR(ret); 148 + 149 + desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir, 150 + DMA_PREP_INTERRUPT); 151 + if (!desc) 152 + return ERR_PTR(-ENOMEM); 153 + 154 + if (dir == DMA_DEV_TO_MEM) { 155 + desc->callback = spi_ingenic_finalize_transfer; 156 + desc->callback_param = ctlr; 157 + } 158 + 159 + cookie = dmaengine_submit(desc); 160 + 161 + ret = dma_submit_error(cookie); 162 + if (ret) { 163 + dmaengine_desc_free(desc); 164 + return ERR_PTR(ret); 165 + } 166 + 167 + return desc; 168 + } 169 + 170 + static int spi_ingenic_dma_tx(struct spi_controller *ctlr, 171 + struct spi_transfer *xfer, unsigned int bits) 172 + { 173 + struct dma_async_tx_descriptor *rx_desc, *tx_desc; 174 + 175 + rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx, 176 + &xfer->rx_sg, DMA_DEV_TO_MEM, bits); 177 + if (IS_ERR(rx_desc)) 178 + return PTR_ERR(rx_desc); 179 + 180 + tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx, 181 + &xfer->tx_sg, DMA_MEM_TO_DEV, bits); 182 + if (IS_ERR(tx_desc)) { 183 + dmaengine_terminate_async(ctlr->dma_rx); 184 + dmaengine_desc_free(rx_desc); 185 + return PTR_ERR(tx_desc); 186 + } 187 + 188 + dma_async_issue_pending(ctlr->dma_rx); 189 + dma_async_issue_pending(ctlr->dma_tx); 190 + 191 + return 1; 192 + } 193 + 194 + #define SPI_INGENIC_TX(x) \ 195 + static int spi_ingenic_tx##x(struct ingenic_spi *priv, \ 196 + struct spi_transfer *xfer) \ 197 + { \ 198 + unsigned int count = xfer->len / (x / 8); \ 199 + unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \ 200 + const u##x *tx_buf = xfer->tx_buf; \ 201 + u##x *rx_buf = xfer->rx_buf; \ 202 + unsigned int i, val; \ 203 + int err; \ 204 + \ 205 + /* Fill up the TX fifo */ \ 206 + for (i = 0; i < prefill; i++) { \ 207 + val = tx_buf ? tx_buf[i] : 0; \ 208 + \ 209 + regmap_write(priv->map, REG_SSIDR, val); \ 210 + } \ 211 + \ 212 + for (i = 0; i < count; i++) { \ 213 + err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \ 214 + if (err) \ 215 + return err; \ 216 + \ 217 + regmap_read(priv->map, REG_SSIDR, &val); \ 218 + if (rx_buf) \ 219 + rx_buf[i] = val; \ 220 + \ 221 + if (i < count - prefill) { \ 222 + val = tx_buf ? tx_buf[i + prefill] : 0; \ 223 + \ 224 + regmap_write(priv->map, REG_SSIDR, val); \ 225 + } \ 226 + } \ 227 + \ 228 + return 0; \ 229 + } 230 + SPI_INGENIC_TX(8) 231 + SPI_INGENIC_TX(16) 232 + SPI_INGENIC_TX(32) 233 + #undef SPI_INGENIC_TX 234 + 235 + static int spi_ingenic_transfer_one(struct spi_controller *ctlr, 236 + struct spi_device *spi, 237 + struct spi_transfer *xfer) 238 + { 239 + struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); 240 + unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word; 241 + bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer); 242 + 243 + spi_ingenic_prepare_transfer(priv, spi, xfer); 244 + 245 + if (ctlr->cur_msg_mapped && can_dma) 246 + return spi_ingenic_dma_tx(ctlr, xfer, bits); 247 + 248 + if (bits > 16) 249 + return spi_ingenic_tx32(priv, xfer); 250 + 251 + if (bits > 8) 252 + return spi_ingenic_tx16(priv, xfer); 253 + 254 + return spi_ingenic_tx8(priv, xfer); 255 + } 256 + 257 + static int spi_ingenic_prepare_message(struct spi_controller *ctlr, 258 + struct spi_message *message) 259 + { 260 + struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); 261 + struct spi_device *spi = message->spi; 262 + unsigned int cs = REG_SSICR1_FRMHL << spi->chip_select; 263 + unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL; 264 + unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs; 265 + unsigned int ssicr0 = 0, ssicr1 = 0; 266 + 267 + if (priv->soc_info->has_trendian) { 268 + ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB; 269 + 270 + if (spi->mode & SPI_LSB_FIRST) 271 + ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB; 272 + } else { 273 + ssicr1_mask |= REG_SSICR1_LFST; 274 + 275 + if (spi->mode & SPI_LSB_FIRST) 276 + ssicr1 |= REG_SSICR1_LFST; 277 + } 278 + 279 + if (spi->mode & SPI_LOOP) 280 + ssicr0 |= REG_SSICR0_LOOP; 281 + if (spi->chip_select) 282 + ssicr0 |= REG_SSICR0_FSEL; 283 + 284 + if (spi->mode & SPI_CPHA) 285 + ssicr1 |= REG_SSICR1_PHA; 286 + if (spi->mode & SPI_CPOL) 287 + ssicr1 |= REG_SSICR1_POL; 288 + if (spi->mode & SPI_CS_HIGH) 289 + ssicr1 |= cs; 290 + 291 + regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0); 292 + regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1); 293 + 294 + return 0; 295 + } 296 + 297 + static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr) 298 + { 299 + struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); 300 + int ret; 301 + 302 + ret = clk_prepare_enable(priv->clk); 303 + if (ret) 304 + return ret; 305 + 306 + regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN); 307 + regmap_write(priv->map, REG_SSICR1, 0); 308 + regmap_write(priv->map, REG_SSISR, 0); 309 + regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE); 310 + 311 + return 0; 312 + } 313 + 314 + static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr) 315 + { 316 + struct ingenic_spi *priv = spi_controller_get_devdata(ctlr); 317 + 318 + regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE); 319 + 320 + clk_disable_unprepare(priv->clk); 321 + 322 + return 0; 323 + } 324 + 325 + static bool spi_ingenic_can_dma(struct spi_controller *ctlr, 326 + struct spi_device *spi, 327 + struct spi_transfer *xfer) 328 + { 329 + struct dma_slave_caps caps; 330 + int ret; 331 + 332 + ret = dma_get_slave_caps(ctlr->dma_tx, &caps); 333 + if (ret) { 334 + dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret); 335 + return false; 336 + } 337 + 338 + return !caps.max_sg_burst || 339 + xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE; 340 + } 341 + 342 + static int spi_ingenic_request_dma(struct spi_controller *ctlr, 343 + struct device *dev) 344 + { 345 + ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); 346 + if (!ctlr->dma_tx) 347 + return -ENODEV; 348 + 349 + ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); 350 + 351 + if (!ctlr->dma_rx) 352 + return -ENODEV; 353 + 354 + ctlr->can_dma = spi_ingenic_can_dma; 355 + 356 + return 0; 357 + } 358 + 359 + static void spi_ingenic_release_dma(void *data) 360 + { 361 + struct spi_controller *ctlr = data; 362 + 363 + if (ctlr->dma_tx) 364 + dma_release_channel(ctlr->dma_tx); 365 + if (ctlr->dma_rx) 366 + dma_release_channel(ctlr->dma_rx); 367 + } 368 + 369 + static const struct regmap_config spi_ingenic_regmap_config = { 370 + .reg_bits = 32, 371 + .val_bits = 32, 372 + .reg_stride = 4, 373 + .max_register = REG_SSIGR, 374 + }; 375 + 376 + static int spi_ingenic_probe(struct platform_device *pdev) 377 + { 378 + const struct jz_soc_info *pdata; 379 + struct device *dev = &pdev->dev; 380 + struct spi_controller *ctlr; 381 + struct ingenic_spi *priv; 382 + void __iomem *base; 383 + int ret; 384 + 385 + pdata = of_device_get_match_data(dev); 386 + if (!pdata) { 387 + dev_err(dev, "Missing platform data.\n"); 388 + return -EINVAL; 389 + } 390 + 391 + ctlr = devm_spi_alloc_master(dev, sizeof(*priv)); 392 + if (!ctlr) { 393 + dev_err(dev, "Unable to allocate SPI controller.\n"); 394 + return -ENOMEM; 395 + } 396 + 397 + priv = spi_controller_get_devdata(ctlr); 398 + priv->soc_info = pdata; 399 + 400 + priv->clk = devm_clk_get(dev, NULL); 401 + if (IS_ERR(priv->clk)) { 402 + return dev_err_probe(dev, PTR_ERR(priv->clk), 403 + "Unable to get clock.\n"); 404 + } 405 + 406 + base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res); 407 + if (IS_ERR(base)) 408 + return PTR_ERR(base); 409 + 410 + priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config); 411 + if (IS_ERR(priv->map)) 412 + return PTR_ERR(priv->map); 413 + 414 + priv->flen_field = devm_regmap_field_alloc(dev, priv->map, 415 + pdata->flen_field); 416 + if (IS_ERR(priv->flen_field)) 417 + return PTR_ERR(priv->flen_field); 418 + 419 + platform_set_drvdata(pdev, ctlr); 420 + 421 + ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware; 422 + ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware; 423 + ctlr->prepare_message = spi_ingenic_prepare_message; 424 + ctlr->set_cs = spi_ingenic_set_cs; 425 + ctlr->transfer_one = spi_ingenic_transfer_one; 426 + ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH; 427 + ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; 428 + ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE; 429 + ctlr->bits_per_word_mask = pdata->bits_per_word_mask; 430 + ctlr->min_speed_hz = 7200; 431 + ctlr->max_speed_hz = 54000000; 432 + ctlr->num_chipselect = 2; 433 + ctlr->dev.of_node = pdev->dev.of_node; 434 + 435 + if (spi_ingenic_request_dma(ctlr, dev)) 436 + dev_warn(dev, "DMA not available.\n"); 437 + 438 + ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr); 439 + if (ret) { 440 + dev_err(dev, "Unable to add action.\n"); 441 + return ret; 442 + } 443 + 444 + ret = devm_spi_register_controller(dev, ctlr); 445 + if (ret) 446 + dev_err(dev, "Unable to register SPI controller.\n"); 447 + 448 + return ret; 449 + } 450 + 451 + static const struct jz_soc_info jz4750_soc_info = { 452 + .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17), 453 + .flen_field = REG_FIELD(REG_SSICR1, 4, 7), 454 + .has_trendian = false, 455 + }; 456 + 457 + static const struct jz_soc_info jz4780_soc_info = { 458 + .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32), 459 + .flen_field = REG_FIELD(REG_SSICR1, 3, 7), 460 + .has_trendian = true, 461 + }; 462 + 463 + static const struct of_device_id spi_ingenic_of_match[] = { 464 + { .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info }, 465 + { .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info }, 466 + {} 467 + }; 468 + MODULE_DEVICE_TABLE(of, spi_ingenic_of_match); 469 + 470 + static struct platform_driver spi_ingenic_driver = { 471 + .driver = { 472 + .name = "spi-ingenic", 473 + .of_match_table = spi_ingenic_of_match, 474 + }, 475 + .probe = spi_ingenic_probe, 476 + }; 477 + 478 + module_platform_driver(spi_ingenic_driver); 479 + MODULE_DESCRIPTION("SPI bus driver for the Ingenic JZ47xx SoCs"); 480 + MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>"); 481 + MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); 482 + MODULE_LICENSE("GPL");
+1 -1
drivers/spi/spi-mtk-nor.c
··· 160 160 { 161 161 int dummy = 0; 162 162 163 - if (op->dummy.buswidth) 163 + if (op->dummy.nbytes) 164 164 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 165 165 166 166 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
+1
drivers/spi/spi-orion.c
··· 769 769 dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); 770 770 if (!dir_acc->vaddr) { 771 771 status = -ENOMEM; 772 + of_node_put(np); 772 773 goto out_rel_axi_clk; 773 774 } 774 775 dir_acc->size = PAGE_SIZE;
+3 -1
drivers/spi/spi-rpc-if.c
··· 139 139 return -ENOMEM; 140 140 141 141 rpc = spi_controller_get_devdata(ctlr); 142 - rpcif_sw_init(rpc, parent); 142 + error = rpcif_sw_init(rpc, parent); 143 + if (error) 144 + return error; 143 145 144 146 platform_set_drvdata(pdev, ctlr); 145 147
-1
drivers/spi/spi-rspi.c
··· 1427 1427 MODULE_DESCRIPTION("Renesas RSPI bus driver"); 1428 1428 MODULE_LICENSE("GPL v2"); 1429 1429 MODULE_AUTHOR("Yoshihiro Shimoda"); 1430 - MODULE_ALIAS("platform:rspi");
-1
drivers/spi/spi-sh-msiof.c
··· 1426 1426 MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver"); 1427 1427 MODULE_AUTHOR("Magnus Damm"); 1428 1428 MODULE_LICENSE("GPL v2"); 1429 - MODULE_ALIAS("platform:spi_sh_msiof");
+1 -1
drivers/spi/spi-stm32-qspi.c
··· 397 397 ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); 398 398 } 399 399 400 - if (op->dummy.buswidth && op->dummy.nbytes) 400 + if (op->dummy.nbytes) 401 401 ccr |= FIELD_PREP(CCR_DCYC_MASK, 402 402 op->dummy.nbytes * 8 / op->dummy.buswidth); 403 403
+2 -2
drivers/spi/spi-tegra20-slink.c
··· 1124 1124 exit_pm_put: 1125 1125 pm_runtime_put(&pdev->dev); 1126 1126 exit_pm_disable: 1127 - pm_runtime_disable(&pdev->dev); 1127 + pm_runtime_force_suspend(&pdev->dev); 1128 1128 1129 1129 tegra_slink_deinit_dma_param(tspi, false); 1130 1130 exit_rx_dma_free: ··· 1143 1143 1144 1144 free_irq(tspi->irq, tspi); 1145 1145 1146 - pm_runtime_disable(&pdev->dev); 1146 + pm_runtime_force_suspend(&pdev->dev); 1147 1147 1148 1148 if (tspi->tx_dma_chan) 1149 1149 tegra_slink_deinit_dma_param(tspi, false);
+2 -2
drivers/spi/spi-tegra210-quad.c
··· 1318 1318 exit_free_irq: 1319 1319 free_irq(qspi_irq, tqspi); 1320 1320 exit_pm_disable: 1321 - pm_runtime_disable(&pdev->dev); 1321 + pm_runtime_force_suspend(&pdev->dev); 1322 1322 tegra_qspi_deinit_dma(tqspi); 1323 1323 return ret; 1324 1324 } ··· 1330 1330 1331 1331 spi_unregister_master(master); 1332 1332 free_irq(tqspi->irq, tqspi); 1333 - pm_runtime_disable(&pdev->dev); 1333 + pm_runtime_force_suspend(&pdev->dev); 1334 1334 tegra_qspi_deinit_dma(tqspi); 1335 1335 1336 1336 return 0;
+1 -1
drivers/spi/spi-tle62x0.c
··· 141 141 value = (st->gpio_state >> gpio_num) & 1; 142 142 mutex_unlock(&st->lock); 143 143 144 - return snprintf(buf, PAGE_SIZE, "%d", value); 144 + return sysfs_emit(buf, "%d", value); 145 145 } 146 146 147 147 static ssize_t tle62x0_gpio_store(struct device *dev,
+95 -142
drivers/spi/spi.c
··· 285 285 NULL, 286 286 }; 287 287 288 - void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 289 - struct spi_transfer *xfer, 290 - struct spi_controller *ctlr) 288 + static void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 289 + struct spi_transfer *xfer, 290 + struct spi_controller *ctlr) 291 291 { 292 292 unsigned long flags; 293 293 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; ··· 310 310 311 311 spin_unlock_irqrestore(&stats->lock, flags); 312 312 } 313 - EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 314 313 315 314 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 316 315 * and the sysfs version makes coldplug work too. ··· 535 536 * 536 537 * Return: a pointer to the new device, or NULL. 537 538 */ 538 - struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 539 + static struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 539 540 { 540 541 struct spi_device *spi; 541 542 ··· 560 561 device_initialize(&spi->dev); 561 562 return spi; 562 563 } 563 - EXPORT_SYMBOL_GPL(spi_alloc_device); 564 564 565 565 static void spi_dev_set_name(struct spi_device *spi) 566 566 { ··· 597 599 struct device *dev = ctlr->dev.parent; 598 600 int status; 599 601 602 + /* 603 + * We need to make sure there's no other device with this 604 + * chipselect **BEFORE** we call setup(), else we'll trash 605 + * its configuration. 606 + */ 600 607 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 601 608 if (status) { 602 609 dev_err(dev, "chipselect %d already in use\n", ··· 654 651 * 655 652 * Return: 0 on success; negative errno on failure 656 653 */ 657 - int spi_add_device(struct spi_device *spi) 654 + static int spi_add_device(struct spi_device *spi) 658 655 { 659 656 struct spi_controller *ctlr = spi->controller; 660 657 struct device *dev = ctlr->dev.parent; ··· 670 667 /* Set the bus ID string */ 671 668 spi_dev_set_name(spi); 672 669 673 - /* We need to make sure there's no other device with this 674 - * chipselect **BEFORE** we call setup(), else we'll trash 675 - * its configuration. Lock against concurrent add() calls. 676 - */ 677 670 mutex_lock(&ctlr->add_lock); 678 671 status = __spi_add_device(spi); 679 672 mutex_unlock(&ctlr->add_lock); 680 673 return status; 681 674 } 682 - EXPORT_SYMBOL_GPL(spi_add_device); 683 675 684 676 static int spi_add_device_locked(struct spi_device *spi) 685 677 { ··· 845 847 } 846 848 847 849 return 0; 850 + } 851 + 852 + /*-------------------------------------------------------------------------*/ 853 + 854 + /* Core methods for SPI resource management */ 855 + 856 + /** 857 + * spi_res_alloc - allocate a spi resource that is life-cycle managed 858 + * during the processing of a spi_message while using 859 + * spi_transfer_one 860 + * @spi: the spi device for which we allocate memory 861 + * @release: the release code to execute for this resource 862 + * @size: size to alloc and return 863 + * @gfp: GFP allocation flags 864 + * 865 + * Return: the pointer to the allocated data 866 + * 867 + * This may get enhanced in the future to allocate from a memory pool 868 + * of the @spi_device or @spi_controller to avoid repeated allocations. 869 + */ 870 + static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 871 + size_t size, gfp_t gfp) 872 + { 873 + struct spi_res *sres; 874 + 875 + sres = kzalloc(sizeof(*sres) + size, gfp); 876 + if (!sres) 877 + return NULL; 878 + 879 + INIT_LIST_HEAD(&sres->entry); 880 + sres->release = release; 881 + 882 + return sres->data; 883 + } 884 + 885 + /** 886 + * spi_res_free - free an spi resource 887 + * @res: pointer to the custom data of a resource 888 + * 889 + */ 890 + static void spi_res_free(void *res) 891 + { 892 + struct spi_res *sres = container_of(res, struct spi_res, data); 893 + 894 + if (!res) 895 + return; 896 + 897 + WARN_ON(!list_empty(&sres->entry)); 898 + kfree(sres); 899 + } 900 + 901 + /** 902 + * spi_res_add - add a spi_res to the spi_message 903 + * @message: the spi message 904 + * @res: the spi_resource 905 + */ 906 + static void spi_res_add(struct spi_message *message, void *res) 907 + { 908 + struct spi_res *sres = container_of(res, struct spi_res, data); 909 + 910 + WARN_ON(!list_empty(&sres->entry)); 911 + list_add_tail(&sres->entry, &message->resources); 912 + } 913 + 914 + /** 915 + * spi_res_release - release all spi resources for this message 916 + * @ctlr: the @spi_controller 917 + * @message: the @spi_message 918 + */ 919 + static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 920 + { 921 + struct spi_res *res, *tmp; 922 + 923 + list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 924 + if (res->release) 925 + res->release(ctlr, message, res->data); 926 + 927 + list_del(&res->entry); 928 + 929 + kfree(res); 930 + } 848 931 } 849 932 850 933 /*-------------------------------------------------------------------------*/ ··· 3147 3068 } 3148 3069 EXPORT_SYMBOL_GPL(spi_controller_resume); 3149 3070 3150 - static int __spi_controller_match(struct device *dev, const void *data) 3151 - { 3152 - struct spi_controller *ctlr; 3153 - const u16 *bus_num = data; 3154 - 3155 - ctlr = container_of(dev, struct spi_controller, dev); 3156 - return ctlr->bus_num == *bus_num; 3157 - } 3158 - 3159 - /** 3160 - * spi_busnum_to_master - look up master associated with bus_num 3161 - * @bus_num: the master's bus number 3162 - * Context: can sleep 3163 - * 3164 - * This call may be used with devices that are registered after 3165 - * arch init time. It returns a refcounted pointer to the relevant 3166 - * spi_controller (which the caller must release), or NULL if there is 3167 - * no such master registered. 3168 - * 3169 - * Return: the SPI master structure on success, else NULL. 3170 - */ 3171 - struct spi_controller *spi_busnum_to_master(u16 bus_num) 3172 - { 3173 - struct device *dev; 3174 - struct spi_controller *ctlr = NULL; 3175 - 3176 - dev = class_find_device(&spi_master_class, NULL, &bus_num, 3177 - __spi_controller_match); 3178 - if (dev) 3179 - ctlr = container_of(dev, struct spi_controller, dev); 3180 - /* reference got in class_find_device */ 3181 - return ctlr; 3182 - } 3183 - EXPORT_SYMBOL_GPL(spi_busnum_to_master); 3184 - 3185 - /*-------------------------------------------------------------------------*/ 3186 - 3187 - /* Core methods for SPI resource management */ 3188 - 3189 - /** 3190 - * spi_res_alloc - allocate a spi resource that is life-cycle managed 3191 - * during the processing of a spi_message while using 3192 - * spi_transfer_one 3193 - * @spi: the spi device for which we allocate memory 3194 - * @release: the release code to execute for this resource 3195 - * @size: size to alloc and return 3196 - * @gfp: GFP allocation flags 3197 - * 3198 - * Return: the pointer to the allocated data 3199 - * 3200 - * This may get enhanced in the future to allocate from a memory pool 3201 - * of the @spi_device or @spi_controller to avoid repeated allocations. 3202 - */ 3203 - void *spi_res_alloc(struct spi_device *spi, 3204 - spi_res_release_t release, 3205 - size_t size, gfp_t gfp) 3206 - { 3207 - struct spi_res *sres; 3208 - 3209 - sres = kzalloc(sizeof(*sres) + size, gfp); 3210 - if (!sres) 3211 - return NULL; 3212 - 3213 - INIT_LIST_HEAD(&sres->entry); 3214 - sres->release = release; 3215 - 3216 - return sres->data; 3217 - } 3218 - EXPORT_SYMBOL_GPL(spi_res_alloc); 3219 - 3220 - /** 3221 - * spi_res_free - free an spi resource 3222 - * @res: pointer to the custom data of a resource 3223 - * 3224 - */ 3225 - void spi_res_free(void *res) 3226 - { 3227 - struct spi_res *sres = container_of(res, struct spi_res, data); 3228 - 3229 - if (!res) 3230 - return; 3231 - 3232 - WARN_ON(!list_empty(&sres->entry)); 3233 - kfree(sres); 3234 - } 3235 - EXPORT_SYMBOL_GPL(spi_res_free); 3236 - 3237 - /** 3238 - * spi_res_add - add a spi_res to the spi_message 3239 - * @message: the spi message 3240 - * @res: the spi_resource 3241 - */ 3242 - void spi_res_add(struct spi_message *message, void *res) 3243 - { 3244 - struct spi_res *sres = container_of(res, struct spi_res, data); 3245 - 3246 - WARN_ON(!list_empty(&sres->entry)); 3247 - list_add_tail(&sres->entry, &message->resources); 3248 - } 3249 - EXPORT_SYMBOL_GPL(spi_res_add); 3250 - 3251 - /** 3252 - * spi_res_release - release all spi resources for this message 3253 - * @ctlr: the @spi_controller 3254 - * @message: the @spi_message 3255 - */ 3256 - void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 3257 - { 3258 - struct spi_res *res, *tmp; 3259 - 3260 - list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 3261 - if (res->release) 3262 - res->release(ctlr, message, res->data); 3263 - 3264 - list_del(&res->entry); 3265 - 3266 - kfree(res); 3267 - } 3268 - } 3269 - EXPORT_SYMBOL_GPL(spi_res_release); 3270 - 3271 3071 /*-------------------------------------------------------------------------*/ 3272 3072 3273 3073 /* Core methods for spi_message alterations */ ··· 3185 3227 * Returns: pointer to @spi_replaced_transfers, 3186 3228 * PTR_ERR(...) in case of errors. 3187 3229 */ 3188 - struct spi_replaced_transfers *spi_replace_transfers( 3230 + static struct spi_replaced_transfers *spi_replace_transfers( 3189 3231 struct spi_message *msg, 3190 3232 struct spi_transfer *xfer_first, 3191 3233 size_t remove, ··· 3277 3319 3278 3320 return rxfer; 3279 3321 } 3280 - EXPORT_SYMBOL_GPL(spi_replace_transfers); 3281 3322 3282 3323 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3283 3324 struct spi_message *msg, ··· 3826 3869 * 3827 3870 * Return: zero on success, else a negative error code. 3828 3871 */ 3829 - int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3872 + static int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3830 3873 { 3831 3874 struct spi_controller *ctlr = spi->controller; 3832 3875 int ret; ··· 3845 3888 return ret; 3846 3889 3847 3890 } 3848 - EXPORT_SYMBOL_GPL(spi_async_locked); 3849 3891 3850 3892 /*-------------------------------------------------------------------------*/ 3851 3893 ··· 4102 4146 4103 4147 /*-------------------------------------------------------------------------*/ 4104 4148 4105 - #if IS_ENABLED(CONFIG_OF) 4149 + #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4106 4150 /* must call put_device() when done with returned spi_device device */ 4107 - struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4151 + static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4108 4152 { 4109 4153 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4110 4154 4111 4155 return dev ? to_spi_device(dev) : NULL; 4112 4156 } 4113 - EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); 4114 - #endif /* IS_ENABLED(CONFIG_OF) */ 4115 4157 4116 - #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4117 4158 /* the spi controllers are not using spi_bus, so we find it with another way */ 4118 4159 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4119 4160 {
+12
include/linux/firmware/xlnx-zynqmp.h
··· 123 123 IOCTL_READ_PGGS = 15, 124 124 /* Set healthy bit value */ 125 125 IOCTL_SET_BOOT_HEALTH_STATUS = 17, 126 + IOCTL_OSPI_MUX_SELECT = 21, 126 127 }; 127 128 128 129 enum pm_query_id { ··· 352 351 ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2, 353 352 }; 354 353 354 + enum ospi_mux_select_type { 355 + PM_OSPI_MUX_SEL_DMA = 0, 356 + PM_OSPI_MUX_SEL_LINEAR = 1, 357 + }; 358 + 355 359 /** 356 360 * struct zynqmp_pm_query_data - PM query data 357 361 * @qid: query ID ··· 393 387 int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data); 394 388 int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value); 395 389 int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type); 390 + int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select); 396 391 int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, 397 392 const enum zynqmp_pm_reset_action assert_flag); 398 393 int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status); ··· 511 504 } 512 505 513 506 static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type) 507 + { 508 + return -ENODEV; 509 + } 510 + 511 + static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select) 514 512 { 515 513 return -ENODEV; 516 514 }
-55
include/linux/spi/spi.h
··· 78 78 unsigned long transfers_split_maxsize; 79 79 }; 80 80 81 - void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 82 - struct spi_transfer *xfer, 83 - struct spi_controller *ctlr); 84 - 85 81 #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \ 86 82 do { \ 87 83 unsigned long flags; \ ··· 759 763 struct spi_controller *ctlr); 760 764 extern void spi_unregister_controller(struct spi_controller *ctlr); 761 765 762 - extern struct spi_controller *spi_busnum_to_master(u16 busnum); 763 - 764 766 /* 765 767 * SPI resource management while processing a SPI message 766 768 */ ··· 781 787 spi_res_release_t release; 782 788 unsigned long long data[]; /* guarantee ull alignment */ 783 789 }; 784 - 785 - extern void *spi_res_alloc(struct spi_device *spi, 786 - spi_res_release_t release, 787 - size_t size, gfp_t gfp); 788 - extern void spi_res_add(struct spi_message *message, void *res); 789 - extern void spi_res_free(void *res); 790 - 791 - extern void spi_res_release(struct spi_controller *ctlr, 792 - struct spi_message *message); 793 790 794 791 /*---------------------------------------------------------------------------*/ 795 792 ··· 1099 1114 1100 1115 extern int spi_setup(struct spi_device *spi); 1101 1116 extern int spi_async(struct spi_device *spi, struct spi_message *message); 1102 - extern int spi_async_locked(struct spi_device *spi, 1103 - struct spi_message *message); 1104 1117 extern int spi_slave_abort(struct spi_device *spi); 1105 1118 1106 1119 static inline size_t ··· 1180 1197 size_t inserted; 1181 1198 struct spi_transfer inserted_transfers[]; 1182 1199 }; 1183 - 1184 - extern struct spi_replaced_transfers *spi_replace_transfers( 1185 - struct spi_message *msg, 1186 - struct spi_transfer *xfer_first, 1187 - size_t remove, 1188 - size_t insert, 1189 - spi_replaced_release_t release, 1190 - size_t extradatasize, 1191 - gfp_t gfp); 1192 1200 1193 1201 /*---------------------------------------------------------------------------*/ 1194 1202 ··· 1452 1478 * use spi_new_device() to describe each device. You can also call 1453 1479 * spi_unregister_device() to start making that device vanish, but 1454 1480 * normally that would be handled by spi_unregister_controller(). 1455 - * 1456 - * You can also use spi_alloc_device() and spi_add_device() to use a two 1457 - * stage registration sequence for each spi_device. This gives the caller 1458 - * some more control over the spi_device structure before it is registered, 1459 - * but requires that caller to initialize fields that would otherwise 1460 - * be defined using the board info. 1461 1481 */ 1462 - extern struct spi_device * 1463 - spi_alloc_device(struct spi_controller *ctlr); 1464 - 1465 - extern int 1466 - spi_add_device(struct spi_device *spi); 1467 - 1468 1482 extern struct spi_device * 1469 1483 spi_new_device(struct spi_controller *, struct spi_board_info *); 1470 1484 ··· 1466 1504 { 1467 1505 return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers); 1468 1506 } 1469 - 1470 - /* OF support code */ 1471 - #if IS_ENABLED(CONFIG_OF) 1472 - 1473 - /* must call put_device() when done with returned spi_device device */ 1474 - extern struct spi_device * 1475 - of_find_spi_device_by_node(struct device_node *node); 1476 - 1477 - #else 1478 - 1479 - static inline struct spi_device * 1480 - of_find_spi_device_by_node(struct device_node *node) 1481 - { 1482 - return NULL; 1483 - } 1484 - 1485 - #endif /* IS_ENABLED(CONFIG_OF) */ 1486 1507 1487 1508 /* Compatibility layer */ 1488 1509 #define spi_master spi_controller