Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6

* 'next-spi' of git://git.secretlab.ca/git/linux-2.6: (53 commits)
spi/omap2_mcspi: Verify TX reg is empty after TX only xfer with DMA
spi/omap2_mcspi: disable channel after TX_ONLY transfer in PIO mode
spi/bfin_spi: namespace local structs
spi/bfin_spi: init early
spi/bfin_spi: check per-transfer bits_per_word
spi/bfin_spi: warn when CS is driven by hardware (CPHA=0)
spi/bfin_spi: cs should be always low when a new transfer begins
spi/bfin_spi: fix typo in comment
spi/bfin_spi: reject unsupported SPI modes
spi/bfin_spi: use dma_disable_irq_nosync() in irq handler
spi/bfin_spi: combine duplicate SPI_CTL read/write logic
spi/bfin_spi: reset ctl_reg bits when setup is run again on a device
spi/bfin_spi: push all size checks into the transfer function
spi/bfin_spi: use nosync when disabling the IRQ from the IRQ handler
spi/bfin_spi: sync hardware state before reprogramming everything
spi/bfin_spi: save/restore state when suspending/resuming
spi/bfin_spi: redo GPIO CS handling
Blackfin: SPI: expand SPI bitmasks
spi/bfin_spi: use the SPI namespaced bit names
spi/bfin_spi: drop extra memory we don't need
...

+3857 -1247
+23 -1
Documentation/powerpc/dts-bindings/fsl/spi.txt
··· 1 1 * SPI (Serial Peripheral Interface) 2 2 3 3 Required properties: 4 - - cell-index : SPI controller index. 4 + - cell-index : QE SPI subblock index. 5 + 0: QE subblock SPI1 6 + 1: QE subblock SPI2 5 7 - compatible : should be "fsl,spi". 6 8 - mode : the SPI operation mode, it can be "cpu" or "cpu-qe". 7 9 - reg : Offset and length of the register set for the device ··· 30 28 mode = "cpu"; 31 29 gpios = <&gpio 18 1 // device reg=<0> 32 30 &gpio 19 1>; // device reg=<1> 31 + }; 32 + 33 + 34 + * eSPI (Enhanced Serial Peripheral Interface) 35 + 36 + Required properties: 37 + - compatible : should be "fsl,mpc8536-espi". 38 + - reg : Offset and length of the register set for the device. 39 + - interrupts : should contain eSPI interrupt, the device has one interrupt. 40 + - fsl,espi-num-chipselects : the number of the chipselect signals. 41 + 42 + Example: 43 + spi@110000 { 44 + #address-cells = <1>; 45 + #size-cells = <0>; 46 + compatible = "fsl,mpc8536-espi"; 47 + reg = <0x110000 0x1000>; 48 + interrupts = <53 0x2>; 49 + interrupt-parent = <&mpic>; 50 + fsl,espi-num-chipselects = <4>; 33 51 };
+1 -6
arch/arm/mach-lpc32xx/phy3250.c
··· 172 172 } 173 173 174 174 static struct pl022_config_chip spi0_chip_info = { 175 - .lbm = LOOPBACK_DISABLED, 176 175 .com_mode = INTERRUPT_TRANSFER, 177 176 .iface = SSP_INTERFACE_MOTOROLA_SPI, 178 177 .hierarchy = SSP_MASTER, 179 178 .slave_tx_disable = 0, 180 - .endian_tx = SSP_TX_LSB, 181 - .endian_rx = SSP_RX_LSB, 182 - .data_size = SSP_DATA_BITS_8, 183 179 .rx_lev_trig = SSP_RX_4_OR_MORE_ELEM, 184 180 .tx_lev_trig = SSP_TX_4_OR_MORE_EMPTY_LOC, 185 - .clk_phase = SSP_CLK_FIRST_EDGE, 186 - .clk_pol = SSP_CLK_POL_IDLE_LOW, 187 181 .ctrl_len = SSP_BITS_8, 188 182 .wait_state = SSP_MWIRE_WAIT_ZERO, 189 183 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, ··· 233 239 .max_speed_hz = 5000000, 234 240 .bus_num = 0, 235 241 .chip_select = 0, 242 + .mode = SPI_MODE_0, 236 243 .platform_data = &eeprom, 237 244 .controller_data = &spi0_chip_info, 238 245 },
+2 -3
arch/arm/mach-u300/dummyspichip.c
··· 46 46 * struct, this is just used here to alter the behaviour of the chip 47 47 * in order to perform tests. 48 48 */ 49 - struct pl022_config_chip *chip_info = spi->controller_data; 50 49 int status; 51 50 u8 txbuf[14] = {0xDE, 0xAD, 0xBE, 0xEF, 0x2B, 0xAD, 52 51 0xCA, 0xFE, 0xBA, 0xBE, 0xB1, 0x05, ··· 71 72 * Force chip to 8 bit mode 72 73 * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC! 73 74 */ 74 - chip_info->data_size = SSP_DATA_BITS_8; 75 + spi->bits_per_word = 8; 75 76 /* You should NOT DO THIS EITHER */ 76 77 spi->master->setup(spi); 77 78 ··· 158 159 * Force chip to 16 bit mode 159 160 * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC! 160 161 */ 161 - chip_info->data_size = SSP_DATA_BITS_16; 162 + spi->bits_per_word = 16; 162 163 /* You should NOT DO THIS EITHER */ 163 164 spi->master->setup(spi); 164 165
+1 -9
arch/arm/mach-u300/spi.c
··· 30 30 } 31 31 32 32 struct pl022_config_chip dummy_chip_info = { 33 - /* Nominally this is LOOPBACK_DISABLED, but this is our dummy chip! */ 34 - .lbm = LOOPBACK_ENABLED, 35 33 /* 36 34 * available POLLING_TRANSFER and INTERRUPT_TRANSFER, 37 35 * DMA_TRANSFER does not work ··· 40 42 .hierarchy = SSP_MASTER, 41 43 /* 0 = drive TX even as slave, 1 = do not drive TX as slave */ 42 44 .slave_tx_disable = 0, 43 - /* LSB first */ 44 - .endian_tx = SSP_TX_LSB, 45 - .endian_rx = SSP_RX_LSB, 46 - .data_size = SSP_DATA_BITS_8, /* used to be 12 in some default */ 47 45 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 48 46 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 49 - .clk_phase = SSP_CLK_SECOND_EDGE, 50 - .clk_pol = SSP_CLK_POL_IDLE_LOW, 51 47 .ctrl_len = SSP_BITS_12, 52 48 .wait_state = SSP_MWIRE_WAIT_ZERO, 53 49 .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, ··· 67 75 .bus_num = 0, /* Only one bus on this chip */ 68 76 .chip_select = 0, 69 77 /* Means SPI_CS_HIGH, change if e.g low CS */ 70 - .mode = 0, 78 + .mode = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP, 71 79 }, 72 80 #endif 73 81 };
+1 -7
arch/arm/mach-ux500/board-mop500.c
··· 55 55 } 56 56 57 57 struct pl022_config_chip ab4500_chip_info = { 58 - .lbm = LOOPBACK_DISABLED, 59 58 .com_mode = INTERRUPT_TRANSFER, 60 59 .iface = SSP_INTERFACE_MOTOROLA_SPI, 61 60 /* we can act as master only */ 62 61 .hierarchy = SSP_MASTER, 63 62 .slave_tx_disable = 0, 64 - .endian_rx = SSP_RX_MSB, 65 - .endian_tx = SSP_TX_MSB, 66 - .data_size = SSP_DATA_BITS_24, 67 63 .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 68 64 .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 69 - .clk_phase = SSP_CLK_SECOND_EDGE, 70 - .clk_pol = SSP_CLK_POL_IDLE_HIGH, 71 65 .cs_control = ab4500_spi_cs_control, 72 66 }; 73 67 ··· 77 83 .max_speed_hz = 12000000, 78 84 .bus_num = 0, 79 85 .chip_select = 0, 80 - .mode = SPI_MODE_0, 86 + .mode = SPI_MODE_3, 81 87 .irq = IRQ_DB8500_AB8500, 82 88 }, 83 89 };
+3
arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
··· 32 32 * struct s3c64xx_spi_info - SPI Controller defining structure 33 33 * @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field. 34 34 * @src_clk_name: Platform name of the corresponding clock. 35 + * @clk_from_cmu: If the SPI clock/prescalar control block is present 36 + * by the platform's clock-management-unit and not in SPI controller. 35 37 * @num_cs: Number of CS this controller emulates. 36 38 * @cfg_gpio: Configure pins for this SPI controller. 37 39 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 ··· 43 41 struct s3c64xx_spi_info { 44 42 int src_clk_nr; 45 43 char *src_clk_name; 44 + bool clk_from_cmu; 46 45 47 46 int num_cs; 48 47
+8 -73
arch/blackfin/include/asm/bfin5xx_spi.h
··· 11 11 12 12 #define MIN_SPI_BAUD_VAL 2 13 13 14 - #define SPI_READ 0 15 - #define SPI_WRITE 1 16 - 17 - #define SPI_CTRL_OFF 0x0 18 - #define SPI_FLAG_OFF 0x4 19 - #define SPI_STAT_OFF 0x8 20 - #define SPI_TXBUFF_OFF 0xc 21 - #define SPI_RXBUFF_OFF 0x10 22 - #define SPI_BAUD_OFF 0x14 23 - #define SPI_SHAW_OFF 0x18 24 - 25 - 26 14 #define BIT_CTL_ENABLE 0x4000 27 15 #define BIT_CTL_OPENDRAIN 0x2000 28 16 #define BIT_CTL_MASTER 0x1000 29 - #define BIT_CTL_POLAR 0x0800 30 - #define BIT_CTL_PHASE 0x0400 31 - #define BIT_CTL_BITORDER 0x0200 17 + #define BIT_CTL_CPOL 0x0800 18 + #define BIT_CTL_CPHA 0x0400 19 + #define BIT_CTL_LSBF 0x0200 32 20 #define BIT_CTL_WORDSIZE 0x0100 33 - #define BIT_CTL_MISOENABLE 0x0020 21 + #define BIT_CTL_EMISO 0x0020 22 + #define BIT_CTL_PSSE 0x0010 23 + #define BIT_CTL_GM 0x0008 24 + #define BIT_CTL_SZ 0x0004 34 25 #define BIT_CTL_RXMOD 0x0000 35 26 #define BIT_CTL_TXMOD 0x0001 36 27 #define BIT_CTL_TIMOD_DMA_TX 0x0003 ··· 41 50 #define BIT_STU_SENDOVER 0x0001 42 51 #define BIT_STU_RECVFULL 0x0020 43 52 44 - #define CFG_SPI_ENABLE 1 45 - #define CFG_SPI_DISABLE 0 46 - 47 - #define CFG_SPI_OUTENABLE 1 48 - #define CFG_SPI_OUTDISABLE 0 49 - 50 - #define CFG_SPI_ACTLOW 1 51 - #define CFG_SPI_ACTHIGH 0 52 - 53 - #define CFG_SPI_PHASESTART 1 54 - #define CFG_SPI_PHASEMID 0 55 - 56 - #define CFG_SPI_MASTER 1 57 - #define CFG_SPI_SLAVE 0 58 - 59 - #define CFG_SPI_SENELAST 0 60 - #define CFG_SPI_SENDZERO 1 61 - 62 - #define CFG_SPI_RCVFLUSH 1 63 - #define CFG_SPI_RCVDISCARD 0 64 - 65 - #define CFG_SPI_LSBFIRST 1 66 - #define CFG_SPI_MSBFIRST 0 67 - 68 - #define CFG_SPI_WORDSIZE16 1 69 - #define CFG_SPI_WORDSIZE8 0 70 - 71 - #define CFG_SPI_MISOENABLE 1 72 - #define CFG_SPI_MISODISABLE 0 73 - 74 - #define CFG_SPI_READ 0x00 75 - #define CFG_SPI_WRITE 0x01 76 - #define CFG_SPI_DMAREAD 0x02 77 - #define CFG_SPI_DMAWRITE 0x03 78 - 79 - #define CFG_SPI_CSCLEARALL 0 80 - #define CFG_SPI_CHIPSEL1 1 81 - #define CFG_SPI_CHIPSEL2 2 82 - #define CFG_SPI_CHIPSEL3 3 83 - #define CFG_SPI_CHIPSEL4 4 84 - #define CFG_SPI_CHIPSEL5 5 85 - #define CFG_SPI_CHIPSEL6 6 86 - #define CFG_SPI_CHIPSEL7 7 87 - 88 - #define CFG_SPI_CS1VALUE 1 89 - #define CFG_SPI_CS2VALUE 2 90 - #define CFG_SPI_CS3VALUE 3 91 - #define CFG_SPI_CS4VALUE 4 92 - #define CFG_SPI_CS5VALUE 5 93 - #define CFG_SPI_CS6VALUE 6 94 - #define CFG_SPI_CS7VALUE 7 95 - 96 - #define CMD_SPI_SET_BAUDRATE 2 97 - #define CMD_SPI_GET_SYSTEMCLOCK 25 98 - #define CMD_SPI_SET_WRITECONTINUOUS 26 53 + #define MAX_CTRL_CS 8 /* cs in spi controller */ 99 54 100 55 /* device.platform_data for SSP controller devices */ 101 56 struct bfin5xx_spi_master { ··· 57 120 u16 ctl_reg; 58 121 u8 enable_dma; 59 122 u8 bits_per_word; 60 - u8 cs_change_per_word; 61 123 u16 cs_chg_udelay; /* Some devices require 16-bit delays */ 62 - u32 cs_gpio; 63 124 /* Value to send if no TX value is supplied, usually 0x0 or 0xFFFF */ 64 125 u16 idle_tx_val; 65 126 u8 pio_interrupt; /* Enable spi data irq */
+52
arch/powerpc/boot/dts/mpc8536ds.dts
··· 108 108 }; 109 109 }; 110 110 111 + spi@7000 { 112 + #address-cells = <1>; 113 + #size-cells = <0>; 114 + compatible = "fsl,mpc8536-espi"; 115 + reg = <0x7000 0x1000>; 116 + interrupts = <59 0x2>; 117 + interrupt-parent = <&mpic>; 118 + fsl,espi-num-chipselects = <4>; 119 + 120 + flash@0 { 121 + #address-cells = <1>; 122 + #size-cells = <1>; 123 + compatible = "spansion,s25sl12801"; 124 + reg = <0>; 125 + spi-max-frequency = <40000000>; 126 + partition@u-boot { 127 + label = "u-boot"; 128 + reg = <0x00000000 0x00100000>; 129 + read-only; 130 + }; 131 + partition@kernel { 132 + label = "kernel"; 133 + reg = <0x00100000 0x00500000>; 134 + read-only; 135 + }; 136 + partition@dtb { 137 + label = "dtb"; 138 + reg = <0x00600000 0x00100000>; 139 + read-only; 140 + }; 141 + partition@fs { 142 + label = "file system"; 143 + reg = <0x00700000 0x00900000>; 144 + }; 145 + }; 146 + flash@1 { 147 + compatible = "spansion,s25sl12801"; 148 + reg = <1>; 149 + spi-max-frequency = <40000000>; 150 + }; 151 + flash@2 { 152 + compatible = "spansion,s25sl12801"; 153 + reg = <2>; 154 + spi-max-frequency = <40000000>; 155 + }; 156 + flash@3 { 157 + compatible = "spansion,s25sl12801"; 158 + reg = <3>; 159 + spi-max-frequency = <40000000>; 160 + }; 161 + }; 162 + 111 163 dma@21300 { 112 164 #address-cells = <1>; 113 165 #size-cells = <1>;
+4 -7
arch/powerpc/boot/dts/p4080ds.dts
··· 236 236 }; 237 237 238 238 spi@110000 { 239 - cell-index = <0>; 240 239 #address-cells = <1>; 241 240 #size-cells = <0>; 242 - compatible = "fsl,espi"; 241 + compatible = "fsl,p4080-espi", "fsl,mpc8536-espi"; 243 242 reg = <0x110000 0x1000>; 244 243 interrupts = <53 0x2>; 245 244 interrupt-parent = <&mpic>; 246 - espi,num-ss-bits = <4>; 247 - mode = "cpu"; 245 + fsl,espi-num-chipselects = <4>; 248 246 249 - fsl_m25p80@0 { 247 + flash@0 { 250 248 #address-cells = <1>; 251 249 #size-cells = <1>; 252 - compatible = "fsl,espi-flash"; 250 + compatible = "spansion,s25sl12801"; 253 251 reg = <0>; 254 - linux,modalias = "fsl_m25p80"; 255 252 spi-max-frequency = <40000000>; /* input clock */ 256 253 partition@u-boot { 257 254 label = "u-boot";
+5
drivers/mfd/ab8500-spi.c
··· 83 83 struct ab8500 *ab8500; 84 84 int ret; 85 85 86 + spi->bits_per_word = 24; 87 + ret = spi_setup(spi); 88 + if (ret < 0) 89 + return ret; 90 + 86 91 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL); 87 92 if (!ab8500) 88 93 return -ENOMEM;
+26 -4
drivers/spi/Kconfig
··· 182 182 This enables using the Freescale MPC5121 Programmable Serial 183 183 Controller in SPI master mode. 184 184 185 - config SPI_MPC8xxx 186 - tristate "Freescale MPC8xxx SPI controller" 185 + config SPI_FSL_LIB 186 + tristate 187 187 depends on FSL_SOC 188 + 189 + config SPI_FSL_SPI 190 + tristate "Freescale SPI controller" 191 + depends on FSL_SOC 192 + select SPI_FSL_LIB 188 193 help 189 - This enables using the Freescale MPC8xxx SPI controllers in master 190 - mode. 194 + This enables using the Freescale SPI controllers in master mode. 195 + MPC83xx platform uses the controller in cpu mode or CPM/QE mode. 196 + MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. 197 + 198 + config SPI_FSL_ESPI 199 + tristate "Freescale eSPI controller" 200 + depends on FSL_SOC 201 + select SPI_FSL_LIB 202 + help 203 + This enables using the Freescale eSPI controllers in master mode. 204 + From MPC8536, 85xx platform uses the controller, and all P10xx, 205 + P20xx, P30xx,P40xx, P50xx uses this controller. 191 206 192 207 config SPI_OMAP_UWIRE 193 208 tristate "OMAP1 MicroWire" ··· 312 297 depends on ARCH_STMP3XXX && SPI_MASTER 313 298 help 314 299 SPI driver for Freescale STMP37xx/378x SoC SSP interface 300 + 301 + config SPI_TOPCLIFF_PCH 302 + tristate "Topcliff PCH SPI Controller" 303 + depends on PCI 304 + help 305 + SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus 306 + used in some x86 embedded processors. 315 307 316 308 config SPI_TXX9 317 309 tristate "Toshiba TXx9 SPI controller"
+5 -4
drivers/spi/Makefile
··· 2 2 # Makefile for kernel SPI drivers. 3 3 # 4 4 5 - ifeq ($(CONFIG_SPI_DEBUG),y) 6 - EXTRA_CFLAGS += -DDEBUG 7 - endif 5 + ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG 8 6 9 7 # small core, mostly translating board-specific 10 8 # config declarations into driver model code ··· 32 34 obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o 33 35 obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 34 36 obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o 35 - obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 37 + obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o 38 + obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o 39 + obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o 36 40 obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 37 41 obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 38 42 obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o 39 43 obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o 44 + obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o 40 45 obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 41 46 obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 42 47 obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
+537 -218
drivers/spi/amba-pl022.c
··· 27 27 /* 28 28 * TODO: 29 29 * - add timeout on polled transfers 30 - * - add generic DMA framework support 31 30 */ 32 31 33 32 #include <linux/init.h> ··· 44 45 #include <linux/amba/pl022.h> 45 46 #include <linux/io.h> 46 47 #include <linux/slab.h> 48 + #include <linux/dmaengine.h> 49 + #include <linux/dma-mapping.h> 50 + #include <linux/scatterlist.h> 47 51 48 52 /* 49 53 * This macro is used to define some register default values. ··· 383 381 enum ssp_reading read; 384 382 enum ssp_writing write; 385 383 u32 exp_fifo_level; 384 + /* DMA settings */ 385 + #ifdef CONFIG_DMA_ENGINE 386 + struct dma_chan *dma_rx_channel; 387 + struct dma_chan *dma_tx_channel; 388 + struct sg_table sgt_rx; 389 + struct sg_table sgt_tx; 390 + char *dummypage; 391 + #endif 386 392 }; 387 393 388 394 /** ··· 416 406 u16 dmacr; 417 407 u16 cpsr; 418 408 u8 n_bytes; 419 - u8 enable_dma:1; 409 + bool enable_dma; 420 410 enum ssp_reading read; 421 411 enum ssp_writing write; 422 412 void (*cs_control) (u32 command); ··· 773 763 } 774 764 return STATE_DONE; 775 765 } 766 + 767 + /* 768 + * This DMA functionality is only compiled in if we have 769 + * access to the generic DMA devices/DMA engine. 770 + */ 771 + #ifdef CONFIG_DMA_ENGINE 772 + static void unmap_free_dma_scatter(struct pl022 *pl022) 773 + { 774 + /* Unmap and free the SG tables */ 775 + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 776 + pl022->sgt_tx.nents, DMA_TO_DEVICE); 777 + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 778 + pl022->sgt_rx.nents, DMA_FROM_DEVICE); 779 + sg_free_table(&pl022->sgt_rx); 780 + sg_free_table(&pl022->sgt_tx); 781 + } 782 + 783 + static void dma_callback(void *data) 784 + { 785 + struct pl022 *pl022 = data; 786 + struct spi_message *msg = pl022->cur_msg; 787 + 788 + BUG_ON(!pl022->sgt_rx.sgl); 789 + 790 + #ifdef VERBOSE_DEBUG 791 + /* 792 + * Optionally dump out buffers to inspect contents, this is 793 + * good if you want to convince yourself that the loopback 794 + * read/write contents are the same, when adopting to a new 795 + * DMA engine. 796 + */ 797 + { 798 + struct scatterlist *sg; 799 + unsigned int i; 800 + 801 + dma_sync_sg_for_cpu(&pl022->adev->dev, 802 + pl022->sgt_rx.sgl, 803 + pl022->sgt_rx.nents, 804 + DMA_FROM_DEVICE); 805 + 806 + for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { 807 + dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); 808 + print_hex_dump(KERN_ERR, "SPI RX: ", 809 + DUMP_PREFIX_OFFSET, 810 + 16, 811 + 1, 812 + sg_virt(sg), 813 + sg_dma_len(sg), 814 + 1); 815 + } 816 + for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { 817 + dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); 818 + print_hex_dump(KERN_ERR, "SPI TX: ", 819 + DUMP_PREFIX_OFFSET, 820 + 16, 821 + 1, 822 + sg_virt(sg), 823 + sg_dma_len(sg), 824 + 1); 825 + } 826 + } 827 + #endif 828 + 829 + unmap_free_dma_scatter(pl022); 830 + 831 + /* Update total bytes transfered */ 832 + msg->actual_length += pl022->cur_transfer->len; 833 + if (pl022->cur_transfer->cs_change) 834 + pl022->cur_chip-> 835 + cs_control(SSP_CHIP_DESELECT); 836 + 837 + /* Move to next transfer */ 838 + msg->state = next_transfer(pl022); 839 + tasklet_schedule(&pl022->pump_transfers); 840 + } 841 + 842 + static void setup_dma_scatter(struct pl022 *pl022, 843 + void *buffer, 844 + unsigned int length, 845 + struct sg_table *sgtab) 846 + { 847 + struct scatterlist *sg; 848 + int bytesleft = length; 849 + void *bufp = buffer; 850 + int mapbytes; 851 + int i; 852 + 853 + if (buffer) { 854 + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 855 + /* 856 + * If there are less bytes left than what fits 857 + * in the current page (plus page alignment offset) 858 + * we just feed in this, else we stuff in as much 859 + * as we can. 860 + */ 861 + if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) 862 + mapbytes = bytesleft; 863 + else 864 + mapbytes = PAGE_SIZE - offset_in_page(bufp); 865 + sg_set_page(sg, virt_to_page(bufp), 866 + mapbytes, offset_in_page(bufp)); 867 + bufp += mapbytes; 868 + bytesleft -= mapbytes; 869 + dev_dbg(&pl022->adev->dev, 870 + "set RX/TX target page @ %p, %d bytes, %d left\n", 871 + bufp, mapbytes, bytesleft); 872 + } 873 + } else { 874 + /* Map the dummy buffer on every page */ 875 + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { 876 + if (bytesleft < PAGE_SIZE) 877 + mapbytes = bytesleft; 878 + else 879 + mapbytes = PAGE_SIZE; 880 + sg_set_page(sg, virt_to_page(pl022->dummypage), 881 + mapbytes, 0); 882 + bytesleft -= mapbytes; 883 + dev_dbg(&pl022->adev->dev, 884 + "set RX/TX to dummy page %d bytes, %d left\n", 885 + mapbytes, bytesleft); 886 + 887 + } 888 + } 889 + BUG_ON(bytesleft); 890 + } 891 + 892 + /** 893 + * configure_dma - configures the channels for the next transfer 894 + * @pl022: SSP driver's private data structure 895 + */ 896 + static int configure_dma(struct pl022 *pl022) 897 + { 898 + struct dma_slave_config rx_conf = { 899 + .src_addr = SSP_DR(pl022->phybase), 900 + .direction = DMA_FROM_DEVICE, 901 + .src_maxburst = pl022->vendor->fifodepth >> 1, 902 + }; 903 + struct dma_slave_config tx_conf = { 904 + .dst_addr = SSP_DR(pl022->phybase), 905 + .direction = DMA_TO_DEVICE, 906 + .dst_maxburst = pl022->vendor->fifodepth >> 1, 907 + }; 908 + unsigned int pages; 909 + int ret; 910 + int sglen; 911 + struct dma_chan *rxchan = pl022->dma_rx_channel; 912 + struct dma_chan *txchan = pl022->dma_tx_channel; 913 + struct dma_async_tx_descriptor *rxdesc; 914 + struct dma_async_tx_descriptor *txdesc; 915 + dma_cookie_t cookie; 916 + 917 + /* Check that the channels are available */ 918 + if (!rxchan || !txchan) 919 + return -ENODEV; 920 + 921 + switch (pl022->read) { 922 + case READING_NULL: 923 + /* Use the same as for writing */ 924 + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 925 + break; 926 + case READING_U8: 927 + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 928 + break; 929 + case READING_U16: 930 + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 931 + break; 932 + case READING_U32: 933 + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 934 + break; 935 + } 936 + 937 + switch (pl022->write) { 938 + case WRITING_NULL: 939 + /* Use the same as for reading */ 940 + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 941 + break; 942 + case WRITING_U8: 943 + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 944 + break; 945 + case WRITING_U16: 946 + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 947 + break; 948 + case WRITING_U32: 949 + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; 950 + break; 951 + } 952 + 953 + /* SPI pecularity: we need to read and write the same width */ 954 + if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 955 + rx_conf.src_addr_width = tx_conf.dst_addr_width; 956 + if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) 957 + tx_conf.dst_addr_width = rx_conf.src_addr_width; 958 + BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); 959 + 960 + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, 961 + (unsigned long) &rx_conf); 962 + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, 963 + (unsigned long) &tx_conf); 964 + 965 + /* Create sglists for the transfers */ 966 + pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; 967 + dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); 968 + 969 + ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); 970 + if (ret) 971 + goto err_alloc_rx_sg; 972 + 973 + ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); 974 + if (ret) 975 + goto err_alloc_tx_sg; 976 + 977 + /* Fill in the scatterlists for the RX+TX buffers */ 978 + setup_dma_scatter(pl022, pl022->rx, 979 + pl022->cur_transfer->len, &pl022->sgt_rx); 980 + setup_dma_scatter(pl022, pl022->tx, 981 + pl022->cur_transfer->len, &pl022->sgt_tx); 982 + 983 + /* Map DMA buffers */ 984 + sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 985 + pl022->sgt_rx.nents, DMA_FROM_DEVICE); 986 + if (!sglen) 987 + goto err_rx_sgmap; 988 + 989 + sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 990 + pl022->sgt_tx.nents, DMA_TO_DEVICE); 991 + if (!sglen) 992 + goto err_tx_sgmap; 993 + 994 + /* Send both scatterlists */ 995 + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 996 + pl022->sgt_rx.sgl, 997 + pl022->sgt_rx.nents, 998 + DMA_FROM_DEVICE, 999 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1000 + if (!rxdesc) 1001 + goto err_rxdesc; 1002 + 1003 + txdesc = txchan->device->device_prep_slave_sg(txchan, 1004 + pl022->sgt_tx.sgl, 1005 + pl022->sgt_tx.nents, 1006 + DMA_TO_DEVICE, 1007 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1008 + if (!txdesc) 1009 + goto err_txdesc; 1010 + 1011 + /* Put the callback on the RX transfer only, that should finish last */ 1012 + rxdesc->callback = dma_callback; 1013 + rxdesc->callback_param = pl022; 1014 + 1015 + /* Submit and fire RX and TX with TX last so we're ready to read! */ 1016 + cookie = rxdesc->tx_submit(rxdesc); 1017 + if (dma_submit_error(cookie)) 1018 + goto err_submit_rx; 1019 + cookie = txdesc->tx_submit(txdesc); 1020 + if (dma_submit_error(cookie)) 1021 + goto err_submit_tx; 1022 + rxchan->device->device_issue_pending(rxchan); 1023 + txchan->device->device_issue_pending(txchan); 1024 + 1025 + return 0; 1026 + 1027 + err_submit_tx: 1028 + err_submit_rx: 1029 + err_txdesc: 1030 + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); 1031 + err_rxdesc: 1032 + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); 1033 + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 1034 + pl022->sgt_tx.nents, DMA_TO_DEVICE); 1035 + err_tx_sgmap: 1036 + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 1037 + pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1038 + err_rx_sgmap: 1039 + sg_free_table(&pl022->sgt_tx); 1040 + err_alloc_tx_sg: 1041 + sg_free_table(&pl022->sgt_rx); 1042 + err_alloc_rx_sg: 1043 + return -ENOMEM; 1044 + } 1045 + 1046 + static int __init pl022_dma_probe(struct pl022 *pl022) 1047 + { 1048 + dma_cap_mask_t mask; 1049 + 1050 + /* Try to acquire a generic DMA engine slave channel */ 1051 + dma_cap_zero(mask); 1052 + dma_cap_set(DMA_SLAVE, mask); 1053 + /* 1054 + * We need both RX and TX channels to do DMA, else do none 1055 + * of them. 1056 + */ 1057 + pl022->dma_rx_channel = dma_request_channel(mask, 1058 + pl022->master_info->dma_filter, 1059 + pl022->master_info->dma_rx_param); 1060 + if (!pl022->dma_rx_channel) { 1061 + dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); 1062 + goto err_no_rxchan; 1063 + } 1064 + 1065 + pl022->dma_tx_channel = dma_request_channel(mask, 1066 + pl022->master_info->dma_filter, 1067 + pl022->master_info->dma_tx_param); 1068 + if (!pl022->dma_tx_channel) { 1069 + dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); 1070 + goto err_no_txchan; 1071 + } 1072 + 1073 + pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1074 + if (!pl022->dummypage) { 1075 + dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); 1076 + goto err_no_dummypage; 1077 + } 1078 + 1079 + dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", 1080 + dma_chan_name(pl022->dma_rx_channel), 1081 + dma_chan_name(pl022->dma_tx_channel)); 1082 + 1083 + return 0; 1084 + 1085 + err_no_dummypage: 1086 + dma_release_channel(pl022->dma_tx_channel); 1087 + err_no_txchan: 1088 + dma_release_channel(pl022->dma_rx_channel); 1089 + pl022->dma_rx_channel = NULL; 1090 + err_no_rxchan: 1091 + return -ENODEV; 1092 + } 1093 + 1094 + static void terminate_dma(struct pl022 *pl022) 1095 + { 1096 + struct dma_chan *rxchan = pl022->dma_rx_channel; 1097 + struct dma_chan *txchan = pl022->dma_tx_channel; 1098 + 1099 + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); 1100 + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); 1101 + unmap_free_dma_scatter(pl022); 1102 + } 1103 + 1104 + static void pl022_dma_remove(struct pl022 *pl022) 1105 + { 1106 + if (pl022->busy) 1107 + terminate_dma(pl022); 1108 + if (pl022->dma_tx_channel) 1109 + dma_release_channel(pl022->dma_tx_channel); 1110 + if (pl022->dma_rx_channel) 1111 + dma_release_channel(pl022->dma_rx_channel); 1112 + kfree(pl022->dummypage); 1113 + } 1114 + 1115 + #else 1116 + static inline int configure_dma(struct pl022 *pl022) 1117 + { 1118 + return -ENODEV; 1119 + } 1120 + 1121 + static inline int pl022_dma_probe(struct pl022 *pl022) 1122 + { 1123 + return 0; 1124 + } 1125 + 1126 + static inline void pl022_dma_remove(struct pl022 *pl022) 1127 + { 1128 + } 1129 + #endif 1130 + 776 1131 /** 777 1132 * pl022_interrupt_handler - Interrupt handler for SSP controller 778 1133 * ··· 1169 794 if (unlikely(!irq_status)) 1170 795 return IRQ_NONE; 1171 796 1172 - /* This handles the error code interrupts */ 797 + /* 798 + * This handles the FIFO interrupts, the timeout 799 + * interrupts are flatly ignored, they cannot be 800 + * trusted. 801 + */ 1173 802 if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { 1174 803 /* 1175 804 * Overrun interrupt - bail out since our Data has been 1176 805 * corrupted 1177 806 */ 1178 - dev_err(&pl022->adev->dev, 1179 - "FIFO overrun\n"); 807 + dev_err(&pl022->adev->dev, "FIFO overrun\n"); 1180 808 if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) 1181 809 dev_err(&pl022->adev->dev, 1182 810 "RXFIFO is full\n"); ··· 1274 896 } 1275 897 1276 898 /** 1277 - * pump_transfers - Tasklet function which schedules next interrupt transfer 1278 - * when running in interrupt transfer mode. 899 + * pump_transfers - Tasklet function which schedules next transfer 900 + * when running in interrupt or DMA transfer mode. 1279 901 * @data: SSP driver private data structure 1280 902 * 1281 903 */ ··· 1332 954 } 1333 955 /* Flush the FIFOs and let's go! */ 1334 956 flush(pl022); 957 + 958 + if (pl022->cur_chip->enable_dma) { 959 + if (configure_dma(pl022)) { 960 + dev_dbg(&pl022->adev->dev, 961 + "configuration of DMA failed, fall back to interrupt mode\n"); 962 + goto err_config_dma; 963 + } 964 + return; 965 + } 966 + 967 + err_config_dma: 1335 968 writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 1336 969 } 1337 970 1338 - /** 1339 - * NOT IMPLEMENTED 1340 - * configure_dma - It configures the DMA pipes for DMA transfers 1341 - * @data: SSP driver's private data structure 1342 - * 1343 - */ 1344 - static int configure_dma(void *data) 971 + static void do_interrupt_dma_transfer(struct pl022 *pl022) 1345 972 { 1346 - struct pl022 *pl022 = data; 1347 - dev_dbg(&pl022->adev->dev, "configure DMA\n"); 1348 - return -ENOTSUPP; 1349 - } 1350 - 1351 - /** 1352 - * do_dma_transfer - It handles transfers of the current message 1353 - * if it is DMA xfer. 1354 - * NOT FULLY IMPLEMENTED 1355 - * @data: SSP driver's private data structure 1356 - */ 1357 - static void do_dma_transfer(void *data) 1358 - { 1359 - struct pl022 *pl022 = data; 1360 - 1361 - if (configure_dma(data)) { 1362 - dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); 1363 - goto err_config_dma; 1364 - } 1365 - 1366 - /* TODO: Implememt DMA setup of pipes here */ 1367 - 1368 - /* Enable target chip, set up transfer */ 1369 - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); 1370 - if (set_up_next_transfer(pl022, pl022->cur_transfer)) { 1371 - /* Error path */ 1372 - pl022->cur_msg->state = STATE_ERROR; 1373 - pl022->cur_msg->status = -EIO; 1374 - giveback(pl022); 1375 - return; 1376 - } 1377 - /* Enable SSP */ 1378 - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1379 - SSP_CR1(pl022->virtbase)); 1380 - 1381 - /* TODO: Enable the DMA transfer here */ 1382 - return; 1383 - 1384 - err_config_dma: 1385 - pl022->cur_msg->state = STATE_ERROR; 1386 - pl022->cur_msg->status = -EIO; 1387 - giveback(pl022); 1388 - return; 1389 - } 1390 - 1391 - static void do_interrupt_transfer(void *data) 1392 - { 1393 - struct pl022 *pl022 = data; 973 + u32 irqflags = ENABLE_ALL_INTERRUPTS; 1394 974 1395 975 /* Enable target chip */ 1396 976 pl022->cur_chip->cs_control(SSP_CHIP_SELECT); ··· 1359 1023 giveback(pl022); 1360 1024 return; 1361 1025 } 1026 + /* If we're using DMA, set up DMA here */ 1027 + if (pl022->cur_chip->enable_dma) { 1028 + /* Configure DMA transfer */ 1029 + if (configure_dma(pl022)) { 1030 + dev_dbg(&pl022->adev->dev, 1031 + "configuration of DMA failed, fall back to interrupt mode\n"); 1032 + goto err_config_dma; 1033 + } 1034 + /* Disable interrupts in DMA mode, IRQ from DMA controller */ 1035 + irqflags = DISABLE_ALL_INTERRUPTS; 1036 + } 1037 + err_config_dma: 1362 1038 /* Enable SSP, turn on interrupts */ 1363 1039 writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), 1364 1040 SSP_CR1(pl022->virtbase)); 1365 - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); 1041 + writew(irqflags, SSP_IMSC(pl022->virtbase)); 1366 1042 } 1367 1043 1368 - static void do_polling_transfer(void *data) 1044 + static void do_polling_transfer(struct pl022 *pl022) 1369 1045 { 1370 - struct pl022 *pl022 = data; 1371 1046 struct spi_message *message = NULL; 1372 1047 struct spi_transfer *transfer = NULL; 1373 1048 struct spi_transfer *previous = NULL; ··· 1448 1101 * 1449 1102 * This function checks if there is any spi message in the queue that 1450 1103 * needs processing and delegate control to appropriate function 1451 - * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() 1104 + * do_polling_transfer()/do_interrupt_dma_transfer() 1452 1105 * based on the kind of the transfer 1453 1106 * 1454 1107 */ ··· 1497 1150 1498 1151 if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) 1499 1152 do_polling_transfer(pl022); 1500 - else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) 1501 - do_interrupt_transfer(pl022); 1502 1153 else 1503 - do_dma_transfer(pl022); 1154 + do_interrupt_dma_transfer(pl022); 1504 1155 } 1505 1156 1506 1157 ··· 1593 1248 } 1594 1249 1595 1250 static int verify_controller_parameters(struct pl022 *pl022, 1596 - struct pl022_config_chip *chip_info) 1251 + struct pl022_config_chip const *chip_info) 1597 1252 { 1598 - if ((chip_info->lbm != LOOPBACK_ENABLED) 1599 - && (chip_info->lbm != LOOPBACK_DISABLED)) { 1600 - dev_err(chip_info->dev, 1601 - "loopback Mode is configured incorrectly\n"); 1602 - return -EINVAL; 1603 - } 1604 1253 if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) 1605 1254 || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { 1606 - dev_err(chip_info->dev, 1255 + dev_err(&pl022->adev->dev, 1607 1256 "interface is configured incorrectly\n"); 1608 1257 return -EINVAL; 1609 1258 } 1610 1259 if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && 1611 1260 (!pl022->vendor->unidir)) { 1612 - dev_err(chip_info->dev, 1261 + dev_err(&pl022->adev->dev, 1613 1262 "unidirectional mode not supported in this " 1614 1263 "hardware version\n"); 1615 1264 return -EINVAL; 1616 1265 } 1617 1266 if ((chip_info->hierarchy != SSP_MASTER) 1618 1267 && (chip_info->hierarchy != SSP_SLAVE)) { 1619 - dev_err(chip_info->dev, 1268 + dev_err(&pl022->adev->dev, 1620 1269 "hierarchy is configured incorrectly\n"); 1621 - return -EINVAL; 1622 - } 1623 - if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN) 1624 - || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) { 1625 - dev_err(chip_info->dev, 1626 - "cpsdvsr is configured incorrectly\n"); 1627 - return -EINVAL; 1628 - } 1629 - if ((chip_info->endian_rx != SSP_RX_MSB) 1630 - && (chip_info->endian_rx != SSP_RX_LSB)) { 1631 - dev_err(chip_info->dev, 1632 - "RX FIFO endianess is configured incorrectly\n"); 1633 - return -EINVAL; 1634 - } 1635 - if ((chip_info->endian_tx != SSP_TX_MSB) 1636 - && (chip_info->endian_tx != SSP_TX_LSB)) { 1637 - dev_err(chip_info->dev, 1638 - "TX FIFO endianess is configured incorrectly\n"); 1639 - return -EINVAL; 1640 - } 1641 - if ((chip_info->data_size < SSP_DATA_BITS_4) 1642 - || (chip_info->data_size > SSP_DATA_BITS_32)) { 1643 - dev_err(chip_info->dev, 1644 - "DATA Size is configured incorrectly\n"); 1645 1270 return -EINVAL; 1646 1271 } 1647 1272 if ((chip_info->com_mode != INTERRUPT_TRANSFER) 1648 1273 && (chip_info->com_mode != DMA_TRANSFER) 1649 1274 && (chip_info->com_mode != POLLING_TRANSFER)) { 1650 - dev_err(chip_info->dev, 1275 + dev_err(&pl022->adev->dev, 1651 1276 "Communication mode is configured incorrectly\n"); 1652 1277 return -EINVAL; 1653 1278 } 1654 1279 if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) 1655 1280 || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { 1656 - dev_err(chip_info->dev, 1281 + dev_err(&pl022->adev->dev, 1657 1282 "RX FIFO Trigger Level is configured incorrectly\n"); 1658 1283 return -EINVAL; 1659 1284 } 1660 1285 if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) 1661 1286 || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { 1662 - dev_err(chip_info->dev, 1287 + dev_err(&pl022->adev->dev, 1663 1288 "TX FIFO Trigger Level is configured incorrectly\n"); 1664 1289 return -EINVAL; 1665 - } 1666 - if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { 1667 - if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE) 1668 - && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) { 1669 - dev_err(chip_info->dev, 1670 - "Clock Phase is configured incorrectly\n"); 1671 - return -EINVAL; 1672 - } 1673 - if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW) 1674 - && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) { 1675 - dev_err(chip_info->dev, 1676 - "Clock Polarity is configured incorrectly\n"); 1677 - return -EINVAL; 1678 - } 1679 1290 } 1680 1291 if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { 1681 1292 if ((chip_info->ctrl_len < SSP_BITS_4) 1682 1293 || (chip_info->ctrl_len > SSP_BITS_32)) { 1683 - dev_err(chip_info->dev, 1294 + dev_err(&pl022->adev->dev, 1684 1295 "CTRL LEN is configured incorrectly\n"); 1685 1296 return -EINVAL; 1686 1297 } 1687 1298 if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) 1688 1299 && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { 1689 - dev_err(chip_info->dev, 1300 + dev_err(&pl022->adev->dev, 1690 1301 "Wait State is configured incorrectly\n"); 1691 1302 return -EINVAL; 1692 1303 } ··· 1651 1350 if ((chip_info->duplex != 1652 1351 SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1653 1352 && (chip_info->duplex != 1654 - SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) 1655 - dev_err(chip_info->dev, 1353 + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { 1354 + dev_err(&pl022->adev->dev, 1656 1355 "Microwire duplex mode is configured incorrectly\n"); 1657 1356 return -EINVAL; 1357 + } 1658 1358 } else { 1659 1359 if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) 1660 - dev_err(chip_info->dev, 1360 + dev_err(&pl022->adev->dev, 1661 1361 "Microwire half duplex mode requested," 1662 1362 " but this is only available in the" 1663 1363 " ST version of PL022\n"); 1664 1364 return -EINVAL; 1665 1365 } 1666 - } 1667 - if (chip_info->cs_control == NULL) { 1668 - dev_warn(chip_info->dev, 1669 - "Chip Select Function is NULL for this chip\n"); 1670 - chip_info->cs_control = null_cs_control; 1671 1366 } 1672 1367 return 0; 1673 1368 } ··· 1764 1467 return 0; 1765 1468 } 1766 1469 1767 - /** 1768 - * NOT IMPLEMENTED 1769 - * process_dma_info - Processes the DMA info provided by client drivers 1770 - * @chip_info: chip info provided by client device 1771 - * @chip: Runtime state maintained by the SSP controller for each spi device 1772 - * 1773 - * This function processes and stores DMA config provided by client driver 1774 - * into the runtime state maintained by the SSP controller driver 1470 + 1471 + /* 1472 + * A piece of default chip info unless the platform 1473 + * supplies it. 1775 1474 */ 1776 - static int process_dma_info(struct pl022_config_chip *chip_info, 1777 - struct chip_data *chip) 1778 - { 1779 - dev_err(chip_info->dev, 1780 - "cannot process DMA info, DMA not implemented!\n"); 1781 - return -ENOTSUPP; 1782 - } 1475 + static const struct pl022_config_chip pl022_default_chip_info = { 1476 + .com_mode = POLLING_TRANSFER, 1477 + .iface = SSP_INTERFACE_MOTOROLA_SPI, 1478 + .hierarchy = SSP_SLAVE, 1479 + .slave_tx_disable = DO_NOT_DRIVE_TX, 1480 + .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, 1481 + .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, 1482 + .ctrl_len = SSP_BITS_8, 1483 + .wait_state = SSP_MWIRE_WAIT_ZERO, 1484 + .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, 1485 + .cs_control = null_cs_control, 1486 + }; 1487 + 1783 1488 1784 1489 /** 1785 1490 * pl022_setup - setup function registered to SPI master framework ··· 1795 1496 * controller hardware here, that is not done until the actual transfer 1796 1497 * commence. 1797 1498 */ 1798 - 1799 - /* FIXME: JUST GUESSING the spi->mode bits understood by this driver */ 1800 - #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 1801 - | SPI_LSB_FIRST | SPI_LOOP) 1802 - 1803 1499 static int pl022_setup(struct spi_device *spi) 1804 1500 { 1805 - struct pl022_config_chip *chip_info; 1501 + struct pl022_config_chip const *chip_info; 1806 1502 struct chip_data *chip; 1503 + struct ssp_clock_params clk_freq; 1807 1504 int status = 0; 1808 1505 struct pl022 *pl022 = spi_master_get_devdata(spi->master); 1809 - 1810 - if (spi->mode & ~MODEBITS) { 1811 - dev_dbg(&spi->dev, "unsupported mode bits %x\n", 1812 - spi->mode & ~MODEBITS); 1813 - return -EINVAL; 1814 - } 1506 + unsigned int bits = spi->bits_per_word; 1507 + u32 tmp; 1815 1508 1816 1509 if (!spi->max_speed_hz) 1817 1510 return -EINVAL; ··· 1826 1535 chip_info = spi->controller_data; 1827 1536 1828 1537 if (chip_info == NULL) { 1538 + chip_info = &pl022_default_chip_info; 1829 1539 /* spi_board_info.controller_data not is supplied */ 1830 1540 dev_dbg(&spi->dev, 1831 1541 "using default controller_data settings\n"); 1832 - 1833 - chip_info = 1834 - kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL); 1835 - 1836 - if (!chip_info) { 1837 - dev_err(&spi->dev, 1838 - "cannot allocate controller data\n"); 1839 - status = -ENOMEM; 1840 - goto err_first_setup; 1841 - } 1842 - 1843 - dev_dbg(&spi->dev, "allocated memory for controller data\n"); 1844 - 1845 - /* Pointer back to the SPI device */ 1846 - chip_info->dev = &spi->dev; 1847 - /* 1848 - * Set controller data default values: 1849 - * Polling is supported by default 1850 - */ 1851 - chip_info->lbm = LOOPBACK_DISABLED; 1852 - chip_info->com_mode = POLLING_TRANSFER; 1853 - chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI; 1854 - chip_info->hierarchy = SSP_SLAVE; 1855 - chip_info->slave_tx_disable = DO_NOT_DRIVE_TX; 1856 - chip_info->endian_tx = SSP_TX_LSB; 1857 - chip_info->endian_rx = SSP_RX_LSB; 1858 - chip_info->data_size = SSP_DATA_BITS_12; 1859 - chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; 1860 - chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; 1861 - chip_info->clk_phase = SSP_CLK_SECOND_EDGE; 1862 - chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; 1863 - chip_info->ctrl_len = SSP_BITS_8; 1864 - chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; 1865 - chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX; 1866 - chip_info->cs_control = null_cs_control; 1867 - } else { 1542 + } else 1868 1543 dev_dbg(&spi->dev, 1869 1544 "using user supplied controller_data settings\n"); 1870 - } 1871 1545 1872 1546 /* 1873 1547 * We can override with custom divisors, else we use the board ··· 1842 1586 && (0 == chip_info->clk_freq.scr)) { 1843 1587 status = calculate_effective_freq(pl022, 1844 1588 spi->max_speed_hz, 1845 - &chip_info->clk_freq); 1589 + &clk_freq); 1846 1590 if (status < 0) 1847 1591 goto err_config_params; 1848 1592 } else { 1849 - if ((chip_info->clk_freq.cpsdvsr % 2) != 0) 1850 - chip_info->clk_freq.cpsdvsr = 1851 - chip_info->clk_freq.cpsdvsr - 1; 1593 + memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); 1594 + if ((clk_freq.cpsdvsr % 2) != 0) 1595 + clk_freq.cpsdvsr = 1596 + clk_freq.cpsdvsr - 1; 1852 1597 } 1598 + if ((clk_freq.cpsdvsr < CPSDVR_MIN) 1599 + || (clk_freq.cpsdvsr > CPSDVR_MAX)) { 1600 + dev_err(&spi->dev, 1601 + "cpsdvsr is configured incorrectly\n"); 1602 + goto err_config_params; 1603 + } 1604 + 1605 + 1853 1606 status = verify_controller_parameters(pl022, chip_info); 1854 1607 if (status) { 1855 1608 dev_err(&spi->dev, "controller data is incorrect"); 1856 1609 goto err_config_params; 1857 1610 } 1611 + 1858 1612 /* Now set controller state based on controller data */ 1859 1613 chip->xfer_type = chip_info->com_mode; 1860 - chip->cs_control = chip_info->cs_control; 1614 + if (!chip_info->cs_control) { 1615 + chip->cs_control = null_cs_control; 1616 + dev_warn(&spi->dev, 1617 + "chip select function is NULL for this chip\n"); 1618 + } else 1619 + chip->cs_control = chip_info->cs_control; 1861 1620 1862 - if (chip_info->data_size <= 8) { 1863 - dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); 1621 + if (bits <= 3) { 1622 + /* PL022 doesn't support less than 4-bits */ 1623 + status = -ENOTSUPP; 1624 + goto err_config_params; 1625 + } else if (bits <= 8) { 1626 + dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); 1864 1627 chip->n_bytes = 1; 1865 1628 chip->read = READING_U8; 1866 1629 chip->write = WRITING_U8; 1867 - } else if (chip_info->data_size <= 16) { 1630 + } else if (bits <= 16) { 1868 1631 dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); 1869 1632 chip->n_bytes = 2; 1870 1633 chip->read = READING_U16; ··· 1900 1625 dev_err(&spi->dev, 1901 1626 "a standard pl022 can only handle " 1902 1627 "1 <= n <= 16 bit words\n"); 1628 + status = -ENOTSUPP; 1903 1629 goto err_config_params; 1904 1630 } 1905 1631 } ··· 1912 1636 chip->cpsr = 0; 1913 1637 if ((chip_info->com_mode == DMA_TRANSFER) 1914 1638 && ((pl022->master_info)->enable_dma)) { 1915 - chip->enable_dma = 1; 1639 + chip->enable_dma = true; 1916 1640 dev_dbg(&spi->dev, "DMA mode set in controller state\n"); 1917 - status = process_dma_info(chip_info, chip); 1918 1641 if (status < 0) 1919 1642 goto err_config_params; 1920 1643 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, ··· 1921 1646 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, 1922 1647 SSP_DMACR_MASK_TXDMAE, 1); 1923 1648 } else { 1924 - chip->enable_dma = 0; 1649 + chip->enable_dma = false; 1925 1650 dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); 1926 1651 SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, 1927 1652 SSP_DMACR_MASK_RXDMAE, 0); ··· 1929 1654 SSP_DMACR_MASK_TXDMAE, 1); 1930 1655 } 1931 1656 1932 - chip->cpsr = chip_info->clk_freq.cpsdvsr; 1657 + chip->cpsr = clk_freq.cpsdvsr; 1933 1658 1934 1659 /* Special setup for the ST micro extended control registers */ 1935 1660 if (pl022->vendor->extended_cr) { 1661 + u32 etx; 1662 + 1936 1663 if (pl022->vendor->pl023) { 1937 1664 /* These bits are only in the PL023 */ 1938 1665 SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, ··· 1950 1673 SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, 1951 1674 SSP_CR1_MASK_MWAIT_ST, 6); 1952 1675 } 1953 - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, 1676 + SSP_WRITE_BITS(chip->cr0, bits - 1, 1954 1677 SSP_CR0_MASK_DSS_ST, 0); 1955 - SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, 1956 - SSP_CR1_MASK_RENDN_ST, 4); 1957 - SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, 1958 - SSP_CR1_MASK_TENDN_ST, 5); 1678 + 1679 + if (spi->mode & SPI_LSB_FIRST) { 1680 + tmp = SSP_RX_LSB; 1681 + etx = SSP_TX_LSB; 1682 + } else { 1683 + tmp = SSP_RX_MSB; 1684 + etx = SSP_TX_MSB; 1685 + } 1686 + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); 1687 + SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); 1959 1688 SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, 1960 1689 SSP_CR1_MASK_RXIFLSEL_ST, 7); 1961 1690 SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, 1962 1691 SSP_CR1_MASK_TXIFLSEL_ST, 10); 1963 1692 } else { 1964 - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, 1693 + SSP_WRITE_BITS(chip->cr0, bits - 1, 1965 1694 SSP_CR0_MASK_DSS, 0); 1966 1695 SSP_WRITE_BITS(chip->cr0, chip_info->iface, 1967 1696 SSP_CR0_MASK_FRF, 4); 1968 1697 } 1698 + 1969 1699 /* Stuff that is common for all versions */ 1970 - SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); 1971 - SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); 1972 - SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); 1700 + if (spi->mode & SPI_CPOL) 1701 + tmp = SSP_CLK_POL_IDLE_HIGH; 1702 + else 1703 + tmp = SSP_CLK_POL_IDLE_LOW; 1704 + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); 1705 + 1706 + if (spi->mode & SPI_CPHA) 1707 + tmp = SSP_CLK_SECOND_EDGE; 1708 + else 1709 + tmp = SSP_CLK_FIRST_EDGE; 1710 + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); 1711 + 1712 + SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); 1973 1713 /* Loopback is available on all versions except PL023 */ 1974 - if (!pl022->vendor->pl023) 1975 - SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); 1714 + if (!pl022->vendor->pl023) { 1715 + if (spi->mode & SPI_LOOP) 1716 + tmp = LOOPBACK_ENABLED; 1717 + else 1718 + tmp = LOOPBACK_DISABLED; 1719 + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); 1720 + } 1976 1721 SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); 1977 1722 SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); 1978 1723 SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); ··· 2003 1704 spi_set_ctldata(spi, chip); 2004 1705 return status; 2005 1706 err_config_params: 2006 - err_first_setup: 1707 + spi_set_ctldata(spi, NULL); 2007 1708 kfree(chip); 2008 1709 return status; 2009 1710 } ··· 2065 1766 master->setup = pl022_setup; 2066 1767 master->transfer = pl022_transfer; 2067 1768 1769 + /* 1770 + * Supports mode 0-3, loopback, and active low CS. Transfers are 1771 + * always MS bit first on the original pl022. 1772 + */ 1773 + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 1774 + if (pl022->vendor->extended_cr) 1775 + master->mode_bits |= SPI_LSB_FIRST; 1776 + 2068 1777 dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); 2069 1778 2070 1779 status = amba_request_regions(adev, NULL); 2071 1780 if (status) 2072 1781 goto err_no_ioregion; 2073 1782 1783 + pl022->phybase = adev->res.start; 2074 1784 pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); 2075 1785 if (pl022->virtbase == NULL) { 2076 1786 status = -ENOMEM; ··· 2106 1798 dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); 2107 1799 goto err_no_irq; 2108 1800 } 1801 + 1802 + /* Get DMA channels */ 1803 + if (platform_info->enable_dma) { 1804 + status = pl022_dma_probe(pl022); 1805 + if (status != 0) 1806 + goto err_no_dma; 1807 + } 1808 + 2109 1809 /* Initialize and start queue */ 2110 1810 status = init_queue(pl022); 2111 1811 if (status != 0) { ··· 2142 1826 err_start_queue: 2143 1827 err_init_queue: 2144 1828 destroy_queue(pl022); 1829 + pl022_dma_remove(pl022); 1830 + err_no_dma: 2145 1831 free_irq(adev->irq[0], pl022); 2146 1832 err_no_irq: 2147 1833 clk_put(pl022->clk); ··· 2174 1856 return status; 2175 1857 } 2176 1858 load_ssp_default_config(pl022); 1859 + pl022_dma_remove(pl022); 2177 1860 free_irq(adev->irq[0], pl022); 2178 1861 clk_disable(pl022->clk); 2179 1862 clk_put(pl022->clk);
+13 -1
drivers/spi/atmel_spi.c
··· 654 654 struct spi_transfer *xfer; 655 655 unsigned long flags; 656 656 struct device *controller = spi->master->dev.parent; 657 + u8 bits; 658 + struct atmel_spi_device *asd; 657 659 658 660 as = spi_master_get_devdata(spi->master); 659 661 ··· 674 672 return -EINVAL; 675 673 } 676 674 675 + if (xfer->bits_per_word) { 676 + asd = spi->controller_state; 677 + bits = (asd->csr >> 4) & 0xf; 678 + if (bits != xfer->bits_per_word - 8) { 679 + dev_dbg(&spi->dev, "you can't yet change " 680 + "bits_per_word in transfers\n"); 681 + return -ENOPROTOOPT; 682 + } 683 + } 684 + 677 685 /* FIXME implement these protocol options!! */ 678 - if (xfer->bits_per_word || xfer->speed_hz) { 686 + if (xfer->speed_hz) { 679 687 dev_dbg(&spi->dev, "no protocol options yet\n"); 680 688 return -ENOPROTOOPT; 681 689 }
+41 -40
drivers/spi/omap2_mcspi.c
··· 296 296 return 0; 297 297 } 298 298 299 + static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) 300 + { 301 + unsigned long timeout; 302 + 303 + timeout = jiffies + msecs_to_jiffies(1000); 304 + while (!(__raw_readl(reg) & bit)) { 305 + if (time_after(jiffies, timeout)) 306 + return -1; 307 + cpu_relax(); 308 + } 309 + return 0; 310 + } 311 + 299 312 static unsigned 300 313 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) 301 314 { ··· 322 309 u32 l; 323 310 u8 * rx; 324 311 const u8 * tx; 312 + void __iomem *chstat_reg; 325 313 326 314 mcspi = spi_master_get_devdata(spi->master); 327 315 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 328 316 l = mcspi_cached_chconf0(spi); 317 + 318 + chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 329 319 330 320 count = xfer->len; 331 321 c = count; ··· 398 382 if (tx != NULL) { 399 383 wait_for_completion(&mcspi_dma->dma_tx_completion); 400 384 dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); 385 + 386 + /* for TX_ONLY mode, be sure all words have shifted out */ 387 + if (rx == NULL) { 388 + if (mcspi_wait_for_reg_bit(chstat_reg, 389 + OMAP2_MCSPI_CHSTAT_TXS) < 0) 390 + dev_err(&spi->dev, "TXS timed out\n"); 391 + else if (mcspi_wait_for_reg_bit(chstat_reg, 392 + OMAP2_MCSPI_CHSTAT_EOT) < 0) 393 + dev_err(&spi->dev, "EOT timed out\n"); 394 + } 401 395 } 402 396 403 397 if (rx != NULL) { ··· 461 435 return count; 462 436 } 463 437 464 - static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) 465 - { 466 - unsigned long timeout; 467 - 468 - timeout = jiffies + msecs_to_jiffies(1000); 469 - while (!(__raw_readl(reg) & bit)) { 470 - if (time_after(jiffies, timeout)) 471 - return -1; 472 - cpu_relax(); 473 - } 474 - return 0; 475 - } 476 - 477 438 static unsigned 478 439 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) 479 440 { ··· 502 489 dev_err(&spi->dev, "TXS timed out\n"); 503 490 goto out; 504 491 } 505 - #ifdef VERBOSE 506 - dev_dbg(&spi->dev, "write-%d %02x\n", 492 + dev_vdbg(&spi->dev, "write-%d %02x\n", 507 493 word_len, *tx); 508 - #endif 509 494 __raw_writel(*tx++, tx_reg); 510 495 } 511 496 if (rx != NULL) { ··· 517 506 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 518 507 omap2_mcspi_set_enable(spi, 0); 519 508 *rx++ = __raw_readl(rx_reg); 520 - #ifdef VERBOSE 521 - dev_dbg(&spi->dev, "read-%d %02x\n", 509 + dev_vdbg(&spi->dev, "read-%d %02x\n", 522 510 word_len, *(rx - 1)); 523 - #endif 524 511 if (mcspi_wait_for_reg_bit(chstat_reg, 525 512 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 526 513 dev_err(&spi->dev, ··· 531 522 } 532 523 533 524 *rx++ = __raw_readl(rx_reg); 534 - #ifdef VERBOSE 535 - dev_dbg(&spi->dev, "read-%d %02x\n", 525 + dev_vdbg(&spi->dev, "read-%d %02x\n", 536 526 word_len, *(rx - 1)); 537 - #endif 538 527 } 539 528 } while (c); 540 529 } else if (word_len <= 16) { ··· 549 542 dev_err(&spi->dev, "TXS timed out\n"); 550 543 goto out; 551 544 } 552 - #ifdef VERBOSE 553 - dev_dbg(&spi->dev, "write-%d %04x\n", 545 + dev_vdbg(&spi->dev, "write-%d %04x\n", 554 546 word_len, *tx); 555 - #endif 556 547 __raw_writel(*tx++, tx_reg); 557 548 } 558 549 if (rx != NULL) { ··· 564 559 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 565 560 omap2_mcspi_set_enable(spi, 0); 566 561 *rx++ = __raw_readl(rx_reg); 567 - #ifdef VERBOSE 568 - dev_dbg(&spi->dev, "read-%d %04x\n", 562 + dev_vdbg(&spi->dev, "read-%d %04x\n", 569 563 word_len, *(rx - 1)); 570 - #endif 571 564 if (mcspi_wait_for_reg_bit(chstat_reg, 572 565 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 573 566 dev_err(&spi->dev, ··· 578 575 } 579 576 580 577 *rx++ = __raw_readl(rx_reg); 581 - #ifdef VERBOSE 582 - dev_dbg(&spi->dev, "read-%d %04x\n", 578 + dev_vdbg(&spi->dev, "read-%d %04x\n", 583 579 word_len, *(rx - 1)); 584 - #endif 585 580 } 586 581 } while (c); 587 582 } else if (word_len <= 32) { ··· 596 595 dev_err(&spi->dev, "TXS timed out\n"); 597 596 goto out; 598 597 } 599 - #ifdef VERBOSE 600 - dev_dbg(&spi->dev, "write-%d %08x\n", 598 + dev_vdbg(&spi->dev, "write-%d %08x\n", 601 599 word_len, *tx); 602 - #endif 603 600 __raw_writel(*tx++, tx_reg); 604 601 } 605 602 if (rx != NULL) { ··· 611 612 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 612 613 omap2_mcspi_set_enable(spi, 0); 613 614 *rx++ = __raw_readl(rx_reg); 614 - #ifdef VERBOSE 615 - dev_dbg(&spi->dev, "read-%d %08x\n", 615 + dev_vdbg(&spi->dev, "read-%d %08x\n", 616 616 word_len, *(rx - 1)); 617 - #endif 618 617 if (mcspi_wait_for_reg_bit(chstat_reg, 619 618 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 620 619 dev_err(&spi->dev, ··· 625 628 } 626 629 627 630 *rx++ = __raw_readl(rx_reg); 628 - #ifdef VERBOSE 629 - dev_dbg(&spi->dev, "read-%d %08x\n", 631 + dev_vdbg(&spi->dev, "read-%d %08x\n", 630 632 word_len, *(rx - 1)); 631 - #endif 632 633 } 633 634 } while (c); 634 635 } ··· 639 644 } else if (mcspi_wait_for_reg_bit(chstat_reg, 640 645 OMAP2_MCSPI_CHSTAT_EOT) < 0) 641 646 dev_err(&spi->dev, "EOT timed out\n"); 647 + 648 + /* disable chan to purge rx datas received in TX_ONLY transfer, 649 + * otherwise these rx datas will affect the direct following 650 + * RX_ONLY transfer. 651 + */ 652 + omap2_mcspi_set_enable(spi, 0); 642 653 } 643 654 out: 644 655 omap2_mcspi_set_enable(spi, 1);
+2 -2
drivers/spi/orion_spi.c
··· 404 404 goto msg_rejected; 405 405 } 406 406 407 - if ((t != NULL) && t->bits_per_word) 407 + if (t->bits_per_word) 408 408 bits_per_word = t->bits_per_word; 409 409 410 410 if ((bits_per_word != 8) && (bits_per_word != 16)) { ··· 415 415 goto msg_rejected; 416 416 } 417 417 /*make sure buffer length is even when working in 16 bit mode*/ 418 - if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) { 418 + if ((t->bits_per_word == 16) && (t->len & 1)) { 419 419 dev_err(&spi->dev, 420 420 "message rejected : " 421 421 "odd data length (%d) while in 16 bit mode\n",
+453 -423
drivers/spi/spi_bfin5xx.c
··· 1 1 /* 2 2 * Blackfin On-Chip SPI Driver 3 3 * 4 - * Copyright 2004-2007 Analog Devices Inc. 4 + * Copyright 2004-2010 Analog Devices Inc. 5 5 * 6 6 * Enter bugs at http://blackfin.uclinux.org/ 7 7 * ··· 41 41 #define RUNNING_STATE ((void *)1) 42 42 #define DONE_STATE ((void *)2) 43 43 #define ERROR_STATE ((void *)-1) 44 - #define QUEUE_RUNNING 0 45 - #define QUEUE_STOPPED 1 46 44 47 - /* Value to send if no TX value is supplied */ 48 - #define SPI_IDLE_TXVAL 0x0000 45 + struct bfin_spi_master_data; 49 46 50 - struct driver_data { 47 + struct bfin_spi_transfer_ops { 48 + void (*write) (struct bfin_spi_master_data *); 49 + void (*read) (struct bfin_spi_master_data *); 50 + void (*duplex) (struct bfin_spi_master_data *); 51 + }; 52 + 53 + struct bfin_spi_master_data { 51 54 /* Driver model hookup */ 52 55 struct platform_device *pdev; 53 56 ··· 72 69 spinlock_t lock; 73 70 struct list_head queue; 74 71 int busy; 75 - int run; 72 + bool running; 76 73 77 74 /* Message Transfer pump */ 78 75 struct tasklet_struct pump_transfers; ··· 80 77 /* Current message transfer state info */ 81 78 struct spi_message *cur_msg; 82 79 struct spi_transfer *cur_transfer; 83 - struct chip_data *cur_chip; 80 + struct bfin_spi_slave_data *cur_chip; 84 81 size_t len_in_bytes; 85 82 size_t len; 86 83 void *tx; ··· 95 92 dma_addr_t rx_dma; 96 93 dma_addr_t tx_dma; 97 94 95 + int irq_requested; 96 + int spi_irq; 97 + 98 98 size_t rx_map_len; 99 99 size_t tx_map_len; 100 100 u8 n_bytes; 101 + u16 ctrl_reg; 102 + u16 flag_reg; 103 + 101 104 int cs_change; 102 - void (*write) (struct driver_data *); 103 - void (*read) (struct driver_data *); 104 - void (*duplex) (struct driver_data *); 105 + const struct bfin_spi_transfer_ops *ops; 105 106 }; 106 107 107 - struct chip_data { 108 + struct bfin_spi_slave_data { 108 109 u16 ctl_reg; 109 110 u16 baud; 110 111 u16 flag; 111 112 112 113 u8 chip_select_num; 113 - u8 n_bytes; 114 - u8 width; /* 0 or 1 */ 115 114 u8 enable_dma; 116 - u8 bits_per_word; /* 8 or 16 */ 117 - u8 cs_change_per_word; 118 115 u16 cs_chg_udelay; /* Some devices require > 255usec delay */ 119 116 u32 cs_gpio; 120 117 u16 idle_tx_val; 121 - void (*write) (struct driver_data *); 122 - void (*read) (struct driver_data *); 123 - void (*duplex) (struct driver_data *); 118 + u8 pio_interrupt; /* use spi data irq */ 119 + const struct bfin_spi_transfer_ops *ops; 124 120 }; 125 121 126 122 #define DEFINE_SPI_REG(reg, off) \ 127 - static inline u16 read_##reg(struct driver_data *drv_data) \ 123 + static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ 128 124 { return bfin_read16(drv_data->regs_base + off); } \ 129 - static inline void write_##reg(struct driver_data *drv_data, u16 v) \ 125 + static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ 130 126 { bfin_write16(drv_data->regs_base + off, v); } 131 127 132 128 DEFINE_SPI_REG(CTRL, 0x00) ··· 136 134 DEFINE_SPI_REG(BAUD, 0x14) 137 135 DEFINE_SPI_REG(SHAW, 0x18) 138 136 139 - static void bfin_spi_enable(struct driver_data *drv_data) 137 + static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) 140 138 { 141 139 u16 cr; 142 140 ··· 144 142 write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); 145 143 } 146 144 147 - static void bfin_spi_disable(struct driver_data *drv_data) 145 + static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) 148 146 { 149 147 u16 cr; 150 148 ··· 167 165 return spi_baud; 168 166 } 169 167 170 - static int bfin_spi_flush(struct driver_data *drv_data) 168 + static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) 171 169 { 172 170 unsigned long limit = loops_per_jiffy << 1; 173 171 ··· 181 179 } 182 180 183 181 /* Chip select operation functions for cs_change flag */ 184 - static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip) 182 + static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) 185 183 { 186 - if (likely(chip->chip_select_num)) { 184 + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { 187 185 u16 flag = read_FLAG(drv_data); 188 186 189 - flag |= chip->flag; 190 - flag &= ~(chip->flag << 8); 187 + flag &= ~chip->flag; 191 188 192 189 write_FLAG(drv_data, flag); 193 190 } else { ··· 194 193 } 195 194 } 196 195 197 - static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip) 196 + static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, 197 + struct bfin_spi_slave_data *chip) 198 198 { 199 - if (likely(chip->chip_select_num)) { 199 + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { 200 200 u16 flag = read_FLAG(drv_data); 201 201 202 - flag &= ~chip->flag; 203 - flag |= (chip->flag << 8); 202 + flag |= chip->flag; 204 203 205 204 write_FLAG(drv_data, flag); 206 205 } else { ··· 212 211 udelay(chip->cs_chg_udelay); 213 212 } 214 213 215 - /* stop controller and re-config current chip*/ 216 - static void bfin_spi_restore_state(struct driver_data *drv_data) 214 + /* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ 215 + static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, 216 + struct bfin_spi_slave_data *chip) 217 217 { 218 - struct chip_data *chip = drv_data->cur_chip; 218 + if (chip->chip_select_num < MAX_CTRL_CS) { 219 + u16 flag = read_FLAG(drv_data); 220 + 221 + flag |= (chip->flag >> 8); 222 + 223 + write_FLAG(drv_data, flag); 224 + } 225 + } 226 + 227 + static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, 228 + struct bfin_spi_slave_data *chip) 229 + { 230 + if (chip->chip_select_num < MAX_CTRL_CS) { 231 + u16 flag = read_FLAG(drv_data); 232 + 233 + flag &= ~(chip->flag >> 8); 234 + 235 + write_FLAG(drv_data, flag); 236 + } 237 + } 238 + 239 + /* stop controller and re-config current chip*/ 240 + static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) 241 + { 242 + struct bfin_spi_slave_data *chip = drv_data->cur_chip; 219 243 220 244 /* Clear status and disable clock */ 221 245 write_STAT(drv_data, BIT_STAT_CLR); 222 246 bfin_spi_disable(drv_data); 223 247 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); 248 + 249 + SSYNC(); 224 250 225 251 /* Load the registers */ 226 252 write_CTRL(drv_data, chip->ctl_reg); ··· 258 230 } 259 231 260 232 /* used to kick off transfer in rx mode and read unwanted RX data */ 261 - static inline void bfin_spi_dummy_read(struct driver_data *drv_data) 233 + static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) 262 234 { 263 235 (void) read_RDBR(drv_data); 264 236 } 265 237 266 - static void bfin_spi_null_writer(struct driver_data *drv_data) 267 - { 268 - u8 n_bytes = drv_data->n_bytes; 269 - u16 tx_val = drv_data->cur_chip->idle_tx_val; 270 - 271 - /* clear RXS (we check for RXS inside the loop) */ 272 - bfin_spi_dummy_read(drv_data); 273 - 274 - while (drv_data->tx < drv_data->tx_end) { 275 - write_TDBR(drv_data, tx_val); 276 - drv_data->tx += n_bytes; 277 - /* wait until transfer finished. 278 - checking SPIF or TXS may not guarantee transfer completion */ 279 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 280 - cpu_relax(); 281 - /* discard RX data and clear RXS */ 282 - bfin_spi_dummy_read(drv_data); 283 - } 284 - } 285 - 286 - static void bfin_spi_null_reader(struct driver_data *drv_data) 287 - { 288 - u8 n_bytes = drv_data->n_bytes; 289 - u16 tx_val = drv_data->cur_chip->idle_tx_val; 290 - 291 - /* discard old RX data and clear RXS */ 292 - bfin_spi_dummy_read(drv_data); 293 - 294 - while (drv_data->rx < drv_data->rx_end) { 295 - write_TDBR(drv_data, tx_val); 296 - drv_data->rx += n_bytes; 297 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 298 - cpu_relax(); 299 - bfin_spi_dummy_read(drv_data); 300 - } 301 - } 302 - 303 - static void bfin_spi_u8_writer(struct driver_data *drv_data) 238 + static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) 304 239 { 305 240 /* clear RXS (we check for RXS inside the loop) */ 306 241 bfin_spi_dummy_read(drv_data); ··· 279 288 } 280 289 } 281 290 282 - static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data) 283 - { 284 - struct chip_data *chip = drv_data->cur_chip; 285 - 286 - /* clear RXS (we check for RXS inside the loop) */ 287 - bfin_spi_dummy_read(drv_data); 288 - 289 - while (drv_data->tx < drv_data->tx_end) { 290 - bfin_spi_cs_active(drv_data, chip); 291 - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); 292 - /* make sure transfer finished before deactiving CS */ 293 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 294 - cpu_relax(); 295 - bfin_spi_dummy_read(drv_data); 296 - bfin_spi_cs_deactive(drv_data, chip); 297 - } 298 - } 299 - 300 - static void bfin_spi_u8_reader(struct driver_data *drv_data) 291 + static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) 301 292 { 302 293 u16 tx_val = drv_data->cur_chip->idle_tx_val; 303 294 ··· 294 321 } 295 322 } 296 323 297 - static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data) 298 - { 299 - struct chip_data *chip = drv_data->cur_chip; 300 - u16 tx_val = chip->idle_tx_val; 301 - 302 - /* discard old RX data and clear RXS */ 303 - bfin_spi_dummy_read(drv_data); 304 - 305 - while (drv_data->rx < drv_data->rx_end) { 306 - bfin_spi_cs_active(drv_data, chip); 307 - write_TDBR(drv_data, tx_val); 308 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 309 - cpu_relax(); 310 - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); 311 - bfin_spi_cs_deactive(drv_data, chip); 312 - } 313 - } 314 - 315 - static void bfin_spi_u8_duplex(struct driver_data *drv_data) 324 + static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) 316 325 { 317 326 /* discard old RX data and clear RXS */ 318 327 bfin_spi_dummy_read(drv_data); ··· 307 352 } 308 353 } 309 354 310 - static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data) 311 - { 312 - struct chip_data *chip = drv_data->cur_chip; 355 + static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { 356 + .write = bfin_spi_u8_writer, 357 + .read = bfin_spi_u8_reader, 358 + .duplex = bfin_spi_u8_duplex, 359 + }; 313 360 314 - /* discard old RX data and clear RXS */ 315 - bfin_spi_dummy_read(drv_data); 316 - 317 - while (drv_data->rx < drv_data->rx_end) { 318 - bfin_spi_cs_active(drv_data, chip); 319 - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); 320 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 321 - cpu_relax(); 322 - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); 323 - bfin_spi_cs_deactive(drv_data, chip); 324 - } 325 - } 326 - 327 - static void bfin_spi_u16_writer(struct driver_data *drv_data) 361 + static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) 328 362 { 329 363 /* clear RXS (we check for RXS inside the loop) */ 330 364 bfin_spi_dummy_read(drv_data); ··· 330 386 } 331 387 } 332 388 333 - static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data) 334 - { 335 - struct chip_data *chip = drv_data->cur_chip; 336 - 337 - /* clear RXS (we check for RXS inside the loop) */ 338 - bfin_spi_dummy_read(drv_data); 339 - 340 - while (drv_data->tx < drv_data->tx_end) { 341 - bfin_spi_cs_active(drv_data, chip); 342 - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 343 - drv_data->tx += 2; 344 - /* make sure transfer finished before deactiving CS */ 345 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 346 - cpu_relax(); 347 - bfin_spi_dummy_read(drv_data); 348 - bfin_spi_cs_deactive(drv_data, chip); 349 - } 350 - } 351 - 352 - static void bfin_spi_u16_reader(struct driver_data *drv_data) 389 + static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) 353 390 { 354 391 u16 tx_val = drv_data->cur_chip->idle_tx_val; 355 392 ··· 346 421 } 347 422 } 348 423 349 - static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data) 350 - { 351 - struct chip_data *chip = drv_data->cur_chip; 352 - u16 tx_val = chip->idle_tx_val; 353 - 354 - /* discard old RX data and clear RXS */ 355 - bfin_spi_dummy_read(drv_data); 356 - 357 - while (drv_data->rx < drv_data->rx_end) { 358 - bfin_spi_cs_active(drv_data, chip); 359 - write_TDBR(drv_data, tx_val); 360 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 361 - cpu_relax(); 362 - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 363 - drv_data->rx += 2; 364 - bfin_spi_cs_deactive(drv_data, chip); 365 - } 366 - } 367 - 368 - static void bfin_spi_u16_duplex(struct driver_data *drv_data) 424 + static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) 369 425 { 370 426 /* discard old RX data and clear RXS */ 371 427 bfin_spi_dummy_read(drv_data); ··· 361 455 } 362 456 } 363 457 364 - static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data) 365 - { 366 - struct chip_data *chip = drv_data->cur_chip; 458 + static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { 459 + .write = bfin_spi_u16_writer, 460 + .read = bfin_spi_u16_reader, 461 + .duplex = bfin_spi_u16_duplex, 462 + }; 367 463 368 - /* discard old RX data and clear RXS */ 369 - bfin_spi_dummy_read(drv_data); 370 - 371 - while (drv_data->rx < drv_data->rx_end) { 372 - bfin_spi_cs_active(drv_data, chip); 373 - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 374 - drv_data->tx += 2; 375 - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 376 - cpu_relax(); 377 - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 378 - drv_data->rx += 2; 379 - bfin_spi_cs_deactive(drv_data, chip); 380 - } 381 - } 382 - 383 - /* test if ther is more transfer to be done */ 384 - static void *bfin_spi_next_transfer(struct driver_data *drv_data) 464 + /* test if there is more transfer to be done */ 465 + static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) 385 466 { 386 467 struct spi_message *msg = drv_data->cur_msg; 387 468 struct spi_transfer *trans = drv_data->cur_transfer; ··· 387 494 * caller already set message->status; 388 495 * dma and pio irqs are blocked give finished message back 389 496 */ 390 - static void bfin_spi_giveback(struct driver_data *drv_data) 497 + static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) 391 498 { 392 - struct chip_data *chip = drv_data->cur_chip; 499 + struct bfin_spi_slave_data *chip = drv_data->cur_chip; 393 500 struct spi_transfer *last_transfer; 394 501 unsigned long flags; 395 502 struct spi_message *msg; ··· 418 525 msg->complete(msg->context); 419 526 } 420 527 528 + /* spi data irq handler */ 529 + static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) 530 + { 531 + struct bfin_spi_master_data *drv_data = dev_id; 532 + struct bfin_spi_slave_data *chip = drv_data->cur_chip; 533 + struct spi_message *msg = drv_data->cur_msg; 534 + int n_bytes = drv_data->n_bytes; 535 + 536 + /* wait until transfer finished. */ 537 + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 538 + cpu_relax(); 539 + 540 + if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || 541 + (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { 542 + /* last read */ 543 + if (drv_data->rx) { 544 + dev_dbg(&drv_data->pdev->dev, "last read\n"); 545 + if (n_bytes == 2) 546 + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 547 + else if (n_bytes == 1) 548 + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); 549 + drv_data->rx += n_bytes; 550 + } 551 + 552 + msg->actual_length += drv_data->len_in_bytes; 553 + if (drv_data->cs_change) 554 + bfin_spi_cs_deactive(drv_data, chip); 555 + /* Move to next transfer */ 556 + msg->state = bfin_spi_next_transfer(drv_data); 557 + 558 + disable_irq_nosync(drv_data->spi_irq); 559 + 560 + /* Schedule transfer tasklet */ 561 + tasklet_schedule(&drv_data->pump_transfers); 562 + return IRQ_HANDLED; 563 + } 564 + 565 + if (drv_data->rx && drv_data->tx) { 566 + /* duplex */ 567 + dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); 568 + if (drv_data->n_bytes == 2) { 569 + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 570 + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 571 + } else if (drv_data->n_bytes == 1) { 572 + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); 573 + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); 574 + } 575 + } else if (drv_data->rx) { 576 + /* read */ 577 + dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); 578 + if (drv_data->n_bytes == 2) 579 + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 580 + else if (drv_data->n_bytes == 1) 581 + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); 582 + write_TDBR(drv_data, chip->idle_tx_val); 583 + } else if (drv_data->tx) { 584 + /* write */ 585 + dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); 586 + bfin_spi_dummy_read(drv_data); 587 + if (drv_data->n_bytes == 2) 588 + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 589 + else if (drv_data->n_bytes == 1) 590 + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); 591 + } 592 + 593 + if (drv_data->tx) 594 + drv_data->tx += n_bytes; 595 + if (drv_data->rx) 596 + drv_data->rx += n_bytes; 597 + 598 + return IRQ_HANDLED; 599 + } 600 + 421 601 static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) 422 602 { 423 - struct driver_data *drv_data = dev_id; 424 - struct chip_data *chip = drv_data->cur_chip; 603 + struct bfin_spi_master_data *drv_data = dev_id; 604 + struct bfin_spi_slave_data *chip = drv_data->cur_chip; 425 605 struct spi_message *msg = drv_data->cur_msg; 426 606 unsigned long timeout; 427 607 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); ··· 506 540 507 541 clear_dma_irqstat(drv_data->dma_channel); 508 542 509 - /* Wait for DMA to complete */ 510 - while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN) 511 - cpu_relax(); 512 - 513 543 /* 514 544 * wait for the last transaction shifted out. HRM states: 515 545 * at this point there may still be data in the SPI DMA FIFO waiting ··· 513 551 * register until it goes low for 2 successive reads 514 552 */ 515 553 if (drv_data->tx != NULL) { 516 - while ((read_STAT(drv_data) & TXS) || 517 - (read_STAT(drv_data) & TXS)) 554 + while ((read_STAT(drv_data) & BIT_STAT_TXS) || 555 + (read_STAT(drv_data) & BIT_STAT_TXS)) 518 556 cpu_relax(); 519 557 } 520 558 ··· 523 561 dmastat, read_STAT(drv_data)); 524 562 525 563 timeout = jiffies + HZ; 526 - while (!(read_STAT(drv_data) & SPIF)) 564 + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) 527 565 if (!time_before(jiffies, timeout)) { 528 566 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); 529 567 break; 530 568 } else 531 569 cpu_relax(); 532 570 533 - if ((dmastat & DMA_ERR) && (spistat & RBSY)) { 571 + if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { 534 572 msg->state = ERROR_STATE; 535 573 dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); 536 574 } else { ··· 550 588 dev_dbg(&drv_data->pdev->dev, 551 589 "disable dma channel irq%d\n", 552 590 drv_data->dma_channel); 553 - dma_disable_irq(drv_data->dma_channel); 591 + dma_disable_irq_nosync(drv_data->dma_channel); 554 592 555 593 return IRQ_HANDLED; 556 594 } 557 595 558 596 static void bfin_spi_pump_transfers(unsigned long data) 559 597 { 560 - struct driver_data *drv_data = (struct driver_data *)data; 598 + struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; 561 599 struct spi_message *message = NULL; 562 600 struct spi_transfer *transfer = NULL; 563 601 struct spi_transfer *previous = NULL; 564 - struct chip_data *chip = NULL; 565 - u8 width; 566 - u16 cr, dma_width, dma_config; 602 + struct bfin_spi_slave_data *chip = NULL; 603 + unsigned int bits_per_word; 604 + u16 cr, cr_width, dma_width, dma_config; 567 605 u32 tranf_success = 1; 568 606 u8 full_duplex = 0; 569 607 ··· 601 639 udelay(previous->delay_usecs); 602 640 } 603 641 604 - /* Setup the transfer state based on the type of transfer */ 642 + /* Flush any existing transfers that may be sitting in the hardware */ 605 643 if (bfin_spi_flush(drv_data) == 0) { 606 644 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 607 645 message->status = -EIO; ··· 641 679 drv_data->cs_change = transfer->cs_change; 642 680 643 681 /* Bits per word setup */ 644 - switch (transfer->bits_per_word) { 645 - case 8: 682 + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; 683 + if (bits_per_word == 8) { 646 684 drv_data->n_bytes = 1; 647 - width = CFG_SPI_WORDSIZE8; 648 - drv_data->read = chip->cs_change_per_word ? 649 - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; 650 - drv_data->write = chip->cs_change_per_word ? 651 - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; 652 - drv_data->duplex = chip->cs_change_per_word ? 653 - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; 654 - break; 655 - 656 - case 16: 685 + drv_data->len = transfer->len; 686 + cr_width = 0; 687 + drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; 688 + } else if (bits_per_word == 16) { 657 689 drv_data->n_bytes = 2; 658 - width = CFG_SPI_WORDSIZE16; 659 - drv_data->read = chip->cs_change_per_word ? 660 - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; 661 - drv_data->write = chip->cs_change_per_word ? 662 - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; 663 - drv_data->duplex = chip->cs_change_per_word ? 664 - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; 665 - break; 666 - 667 - default: 668 - /* No change, the same as default setting */ 669 - drv_data->n_bytes = chip->n_bytes; 670 - width = chip->width; 671 - drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer; 672 - drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader; 673 - drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer; 674 - break; 690 + drv_data->len = (transfer->len) >> 1; 691 + cr_width = BIT_CTL_WORDSIZE; 692 + drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; 693 + } else { 694 + dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); 695 + message->status = -EINVAL; 696 + bfin_spi_giveback(drv_data); 697 + return; 675 698 } 676 - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); 677 - cr |= (width << 8); 699 + cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); 700 + cr |= cr_width; 678 701 write_CTRL(drv_data, cr); 679 702 680 - if (width == CFG_SPI_WORDSIZE16) { 681 - drv_data->len = (transfer->len) >> 1; 682 - } else { 683 - drv_data->len = transfer->len; 684 - } 685 703 dev_dbg(&drv_data->pdev->dev, 686 - "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", 687 - drv_data->write, chip->write, bfin_spi_null_writer); 704 + "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", 705 + drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); 688 706 689 - /* speed and width has been set on per message */ 690 707 message->state = RUNNING_STATE; 691 708 dma_config = 0; 692 709 ··· 676 735 write_BAUD(drv_data, chip->baud); 677 736 678 737 write_STAT(drv_data, BIT_STAT_CLR); 679 - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); 680 - if (drv_data->cs_change) 681 - bfin_spi_cs_active(drv_data, chip); 738 + bfin_spi_cs_active(drv_data, chip); 682 739 683 740 dev_dbg(&drv_data->pdev->dev, 684 741 "now pumping a transfer: width is %d, len is %d\n", 685 - width, transfer->len); 742 + cr_width, transfer->len); 686 743 687 744 /* 688 745 * Try to map dma buffer and do a dma transfer. If successful use, ··· 699 760 /* config dma channel */ 700 761 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); 701 762 set_dma_x_count(drv_data->dma_channel, drv_data->len); 702 - if (width == CFG_SPI_WORDSIZE16) { 763 + if (cr_width == BIT_CTL_WORDSIZE) { 703 764 set_dma_x_modify(drv_data->dma_channel, 2); 704 765 dma_width = WDSIZE_16; 705 766 } else { ··· 785 846 dma_enable_irq(drv_data->dma_channel); 786 847 local_irq_restore(flags); 787 848 788 - } else { 789 - /* IO mode write then read */ 790 - dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); 791 - 792 - /* we always use SPI_WRITE mode. SPI_READ mode 793 - seems to have problems with setting up the 794 - output value in TDBR prior to the transfer. */ 795 - write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); 796 - 797 - if (full_duplex) { 798 - /* full duplex mode */ 799 - BUG_ON((drv_data->tx_end - drv_data->tx) != 800 - (drv_data->rx_end - drv_data->rx)); 801 - dev_dbg(&drv_data->pdev->dev, 802 - "IO duplex: cr is 0x%x\n", cr); 803 - 804 - drv_data->duplex(drv_data); 805 - 806 - if (drv_data->tx != drv_data->tx_end) 807 - tranf_success = 0; 808 - } else if (drv_data->tx != NULL) { 809 - /* write only half duplex */ 810 - dev_dbg(&drv_data->pdev->dev, 811 - "IO write: cr is 0x%x\n", cr); 812 - 813 - drv_data->write(drv_data); 814 - 815 - if (drv_data->tx != drv_data->tx_end) 816 - tranf_success = 0; 817 - } else if (drv_data->rx != NULL) { 818 - /* read only half duplex */ 819 - dev_dbg(&drv_data->pdev->dev, 820 - "IO read: cr is 0x%x\n", cr); 821 - 822 - drv_data->read(drv_data); 823 - if (drv_data->rx != drv_data->rx_end) 824 - tranf_success = 0; 825 - } 826 - 827 - if (!tranf_success) { 828 - dev_dbg(&drv_data->pdev->dev, 829 - "IO write error!\n"); 830 - message->state = ERROR_STATE; 831 - } else { 832 - /* Update total byte transfered */ 833 - message->actual_length += drv_data->len_in_bytes; 834 - /* Move to next transfer of this msg */ 835 - message->state = bfin_spi_next_transfer(drv_data); 836 - if (drv_data->cs_change) 837 - bfin_spi_cs_deactive(drv_data, chip); 838 - } 839 - /* Schedule next transfer tasklet */ 840 - tasklet_schedule(&drv_data->pump_transfers); 849 + return; 841 850 } 851 + 852 + /* 853 + * We always use SPI_WRITE mode (transfer starts with TDBR write). 854 + * SPI_READ mode (transfer starts with RDBR read) seems to have 855 + * problems with setting up the output value in TDBR prior to the 856 + * start of the transfer. 857 + */ 858 + write_CTRL(drv_data, cr | BIT_CTL_TXMOD); 859 + 860 + if (chip->pio_interrupt) { 861 + /* SPI irq should have been disabled by now */ 862 + 863 + /* discard old RX data and clear RXS */ 864 + bfin_spi_dummy_read(drv_data); 865 + 866 + /* start transfer */ 867 + if (drv_data->tx == NULL) 868 + write_TDBR(drv_data, chip->idle_tx_val); 869 + else { 870 + if (bits_per_word == 8) 871 + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); 872 + else 873 + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 874 + drv_data->tx += drv_data->n_bytes; 875 + } 876 + 877 + /* once TDBR is empty, interrupt is triggered */ 878 + enable_irq(drv_data->spi_irq); 879 + return; 880 + } 881 + 882 + /* IO mode */ 883 + dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); 884 + 885 + if (full_duplex) { 886 + /* full duplex mode */ 887 + BUG_ON((drv_data->tx_end - drv_data->tx) != 888 + (drv_data->rx_end - drv_data->rx)); 889 + dev_dbg(&drv_data->pdev->dev, 890 + "IO duplex: cr is 0x%x\n", cr); 891 + 892 + drv_data->ops->duplex(drv_data); 893 + 894 + if (drv_data->tx != drv_data->tx_end) 895 + tranf_success = 0; 896 + } else if (drv_data->tx != NULL) { 897 + /* write only half duplex */ 898 + dev_dbg(&drv_data->pdev->dev, 899 + "IO write: cr is 0x%x\n", cr); 900 + 901 + drv_data->ops->write(drv_data); 902 + 903 + if (drv_data->tx != drv_data->tx_end) 904 + tranf_success = 0; 905 + } else if (drv_data->rx != NULL) { 906 + /* read only half duplex */ 907 + dev_dbg(&drv_data->pdev->dev, 908 + "IO read: cr is 0x%x\n", cr); 909 + 910 + drv_data->ops->read(drv_data); 911 + if (drv_data->rx != drv_data->rx_end) 912 + tranf_success = 0; 913 + } 914 + 915 + if (!tranf_success) { 916 + dev_dbg(&drv_data->pdev->dev, 917 + "IO write error!\n"); 918 + message->state = ERROR_STATE; 919 + } else { 920 + /* Update total byte transfered */ 921 + message->actual_length += drv_data->len_in_bytes; 922 + /* Move to next transfer of this msg */ 923 + message->state = bfin_spi_next_transfer(drv_data); 924 + if (drv_data->cs_change) 925 + bfin_spi_cs_deactive(drv_data, chip); 926 + } 927 + 928 + /* Schedule next transfer tasklet */ 929 + tasklet_schedule(&drv_data->pump_transfers); 842 930 } 843 931 844 932 /* pop a msg from queue and kick off real transfer */ 845 933 static void bfin_spi_pump_messages(struct work_struct *work) 846 934 { 847 - struct driver_data *drv_data; 935 + struct bfin_spi_master_data *drv_data; 848 936 unsigned long flags; 849 937 850 - drv_data = container_of(work, struct driver_data, pump_messages); 938 + drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); 851 939 852 940 /* Lock queue and check for queue work */ 853 941 spin_lock_irqsave(&drv_data->lock, flags); 854 - if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { 942 + if (list_empty(&drv_data->queue) || !drv_data->running) { 855 943 /* pumper kicked off but no work to do */ 856 944 drv_data->busy = 0; 857 945 spin_unlock_irqrestore(&drv_data->lock, flags); ··· 928 962 */ 929 963 static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) 930 964 { 931 - struct driver_data *drv_data = spi_master_get_devdata(spi->master); 965 + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 932 966 unsigned long flags; 933 967 934 968 spin_lock_irqsave(&drv_data->lock, flags); 935 969 936 - if (drv_data->run == QUEUE_STOPPED) { 970 + if (!drv_data->running) { 937 971 spin_unlock_irqrestore(&drv_data->lock, flags); 938 972 return -ESHUTDOWN; 939 973 } ··· 945 979 dev_dbg(&spi->dev, "adding an msg in transfer() \n"); 946 980 list_add_tail(&msg->queue, &drv_data->queue); 947 981 948 - if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) 982 + if (drv_data->running && !drv_data->busy) 949 983 queue_work(drv_data->workqueue, &drv_data->pump_messages); 950 984 951 985 spin_unlock_irqrestore(&drv_data->lock, flags); ··· 969 1003 P_SPI2_SSEL6, P_SPI2_SSEL7}, 970 1004 }; 971 1005 972 - /* first setup for new devices */ 1006 + /* setup for devices (may be called multiple times -- not just first setup) */ 973 1007 static int bfin_spi_setup(struct spi_device *spi) 974 1008 { 975 - struct bfin5xx_spi_chip *chip_info = NULL; 976 - struct chip_data *chip; 977 - struct driver_data *drv_data = spi_master_get_devdata(spi->master); 978 - int ret; 979 - 980 - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) 981 - return -EINVAL; 1009 + struct bfin5xx_spi_chip *chip_info; 1010 + struct bfin_spi_slave_data *chip = NULL; 1011 + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 1012 + u16 bfin_ctl_reg; 1013 + int ret = -EINVAL; 982 1014 983 1015 /* Only alloc (or use chip_info) on first setup */ 1016 + chip_info = NULL; 984 1017 chip = spi_get_ctldata(spi); 985 1018 if (chip == NULL) { 986 - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 987 - if (!chip) 988 - return -ENOMEM; 1019 + chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1020 + if (!chip) { 1021 + dev_err(&spi->dev, "cannot allocate chip data\n"); 1022 + ret = -ENOMEM; 1023 + goto error; 1024 + } 989 1025 990 1026 chip->enable_dma = 0; 991 1027 chip_info = spi->controller_data; 992 1028 } 993 1029 1030 + /* Let people set non-standard bits directly */ 1031 + bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | 1032 + BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; 1033 + 994 1034 /* chip_info isn't always needed */ 995 1035 if (chip_info) { 996 1036 /* Make sure people stop trying to set fields via ctl_reg 997 1037 * when they should actually be using common SPI framework. 998 - * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. 1038 + * Currently we let through: WOM EMISO PSSE GM SZ. 999 1039 * Not sure if a user actually needs/uses any of these, 1000 1040 * but let's assume (for now) they do. 1001 1041 */ 1002 - if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { 1042 + if (chip_info->ctl_reg & ~bfin_ctl_reg) { 1003 1043 dev_err(&spi->dev, "do not set bits in ctl_reg " 1004 1044 "that the SPI framework manages\n"); 1005 - return -EINVAL; 1045 + goto error; 1006 1046 } 1007 - 1008 1047 chip->enable_dma = chip_info->enable_dma != 0 1009 1048 && drv_data->master_info->enable_dma; 1010 1049 chip->ctl_reg = chip_info->ctl_reg; 1011 - chip->bits_per_word = chip_info->bits_per_word; 1012 - chip->cs_change_per_word = chip_info->cs_change_per_word; 1013 1050 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 1014 - chip->cs_gpio = chip_info->cs_gpio; 1015 1051 chip->idle_tx_val = chip_info->idle_tx_val; 1052 + chip->pio_interrupt = chip_info->pio_interrupt; 1053 + spi->bits_per_word = chip_info->bits_per_word; 1054 + } else { 1055 + /* force a default base state */ 1056 + chip->ctl_reg &= bfin_ctl_reg; 1057 + } 1058 + 1059 + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { 1060 + dev_err(&spi->dev, "%d bits_per_word is not supported\n", 1061 + spi->bits_per_word); 1062 + goto error; 1016 1063 } 1017 1064 1018 1065 /* translate common spi framework into our register */ 1019 - if (spi->mode & SPI_CPOL) 1020 - chip->ctl_reg |= CPOL; 1021 - if (spi->mode & SPI_CPHA) 1022 - chip->ctl_reg |= CPHA; 1023 - if (spi->mode & SPI_LSB_FIRST) 1024 - chip->ctl_reg |= LSBF; 1025 - /* we dont support running in slave mode (yet?) */ 1026 - chip->ctl_reg |= MSTR; 1027 - 1028 - /* 1029 - * if any one SPI chip is registered and wants DMA, request the 1030 - * DMA channel for it 1031 - */ 1032 - if (chip->enable_dma && !drv_data->dma_requested) { 1033 - /* register dma irq handler */ 1034 - if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) { 1035 - dev_dbg(&spi->dev, 1036 - "Unable to request BlackFin SPI DMA channel\n"); 1037 - return -ENODEV; 1038 - } 1039 - if (set_dma_callback(drv_data->dma_channel, 1040 - bfin_spi_dma_irq_handler, drv_data) < 0) { 1041 - dev_dbg(&spi->dev, "Unable to set dma callback\n"); 1042 - return -EPERM; 1043 - } 1044 - dma_disable_irq(drv_data->dma_channel); 1045 - drv_data->dma_requested = 1; 1066 + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { 1067 + dev_err(&spi->dev, "unsupported spi modes detected\n"); 1068 + goto error; 1046 1069 } 1070 + if (spi->mode & SPI_CPOL) 1071 + chip->ctl_reg |= BIT_CTL_CPOL; 1072 + if (spi->mode & SPI_CPHA) 1073 + chip->ctl_reg |= BIT_CTL_CPHA; 1074 + if (spi->mode & SPI_LSB_FIRST) 1075 + chip->ctl_reg |= BIT_CTL_LSBF; 1076 + /* we dont support running in slave mode (yet?) */ 1077 + chip->ctl_reg |= BIT_CTL_MASTER; 1047 1078 1048 1079 /* 1049 1080 * Notice: for blackfin, the speed_hz is the value of register 1050 1081 * SPI_BAUD, not the real baudrate 1051 1082 */ 1052 1083 chip->baud = hz_to_spi_baud(spi->max_speed_hz); 1053 - chip->flag = 1 << (spi->chip_select); 1054 1084 chip->chip_select_num = spi->chip_select; 1085 + if (chip->chip_select_num < MAX_CTRL_CS) { 1086 + if (!(spi->mode & SPI_CPHA)) 1087 + dev_warn(&spi->dev, "Warning: SPI CPHA not set:" 1088 + " Slave Select not under software control!\n" 1089 + " See Documentation/blackfin/bfin-spi-notes.txt"); 1055 1090 1056 - if (chip->chip_select_num == 0) { 1091 + chip->flag = (1 << spi->chip_select) << 8; 1092 + } else 1093 + chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; 1094 + 1095 + if (chip->enable_dma && chip->pio_interrupt) { 1096 + dev_err(&spi->dev, "enable_dma is set, " 1097 + "do not set pio_interrupt\n"); 1098 + goto error; 1099 + } 1100 + /* 1101 + * if any one SPI chip is registered and wants DMA, request the 1102 + * DMA channel for it 1103 + */ 1104 + if (chip->enable_dma && !drv_data->dma_requested) { 1105 + /* register dma irq handler */ 1106 + ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); 1107 + if (ret) { 1108 + dev_err(&spi->dev, 1109 + "Unable to request BlackFin SPI DMA channel\n"); 1110 + goto error; 1111 + } 1112 + drv_data->dma_requested = 1; 1113 + 1114 + ret = set_dma_callback(drv_data->dma_channel, 1115 + bfin_spi_dma_irq_handler, drv_data); 1116 + if (ret) { 1117 + dev_err(&spi->dev, "Unable to set dma callback\n"); 1118 + goto error; 1119 + } 1120 + dma_disable_irq(drv_data->dma_channel); 1121 + } 1122 + 1123 + if (chip->pio_interrupt && !drv_data->irq_requested) { 1124 + ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, 1125 + IRQF_DISABLED, "BFIN_SPI", drv_data); 1126 + if (ret) { 1127 + dev_err(&spi->dev, "Unable to register spi IRQ\n"); 1128 + goto error; 1129 + } 1130 + drv_data->irq_requested = 1; 1131 + /* we use write mode, spi irq has to be disabled here */ 1132 + disable_irq(drv_data->spi_irq); 1133 + } 1134 + 1135 + if (chip->chip_select_num >= MAX_CTRL_CS) { 1057 1136 ret = gpio_request(chip->cs_gpio, spi->modalias); 1058 1137 if (ret) { 1059 - if (drv_data->dma_requested) 1060 - free_dma(drv_data->dma_channel); 1061 - return ret; 1138 + dev_err(&spi->dev, "gpio_request() error\n"); 1139 + goto pin_error; 1062 1140 } 1063 1141 gpio_direction_output(chip->cs_gpio, 1); 1064 1142 } 1065 1143 1066 - switch (chip->bits_per_word) { 1067 - case 8: 1068 - chip->n_bytes = 1; 1069 - chip->width = CFG_SPI_WORDSIZE8; 1070 - chip->read = chip->cs_change_per_word ? 1071 - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; 1072 - chip->write = chip->cs_change_per_word ? 1073 - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; 1074 - chip->duplex = chip->cs_change_per_word ? 1075 - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; 1076 - break; 1077 - 1078 - case 16: 1079 - chip->n_bytes = 2; 1080 - chip->width = CFG_SPI_WORDSIZE16; 1081 - chip->read = chip->cs_change_per_word ? 1082 - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; 1083 - chip->write = chip->cs_change_per_word ? 1084 - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; 1085 - chip->duplex = chip->cs_change_per_word ? 1086 - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; 1087 - break; 1088 - 1089 - default: 1090 - dev_err(&spi->dev, "%d bits_per_word is not supported\n", 1091 - chip->bits_per_word); 1092 - if (chip_info) 1093 - kfree(chip); 1094 - return -ENODEV; 1095 - } 1096 - 1097 1144 dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", 1098 - spi->modalias, chip->width, chip->enable_dma); 1145 + spi->modalias, spi->bits_per_word, chip->enable_dma); 1099 1146 dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", 1100 1147 chip->ctl_reg, chip->flag); 1101 1148 1102 1149 spi_set_ctldata(spi, chip); 1103 1150 1104 1151 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); 1105 - if ((chip->chip_select_num > 0) 1106 - && (chip->chip_select_num <= spi->master->num_chipselect)) 1107 - peripheral_request(ssel[spi->master->bus_num] 1108 - [chip->chip_select_num-1], spi->modalias); 1152 + if (chip->chip_select_num < MAX_CTRL_CS) { 1153 + ret = peripheral_request(ssel[spi->master->bus_num] 1154 + [chip->chip_select_num-1], spi->modalias); 1155 + if (ret) { 1156 + dev_err(&spi->dev, "peripheral_request() error\n"); 1157 + goto pin_error; 1158 + } 1159 + } 1109 1160 1161 + bfin_spi_cs_enable(drv_data, chip); 1110 1162 bfin_spi_cs_deactive(drv_data, chip); 1111 1163 1112 1164 return 0; 1165 + 1166 + pin_error: 1167 + if (chip->chip_select_num >= MAX_CTRL_CS) 1168 + gpio_free(chip->cs_gpio); 1169 + else 1170 + peripheral_free(ssel[spi->master->bus_num] 1171 + [chip->chip_select_num - 1]); 1172 + error: 1173 + if (chip) { 1174 + if (drv_data->dma_requested) 1175 + free_dma(drv_data->dma_channel); 1176 + drv_data->dma_requested = 0; 1177 + 1178 + kfree(chip); 1179 + /* prevent free 'chip' twice */ 1180 + spi_set_ctldata(spi, NULL); 1181 + } 1182 + 1183 + return ret; 1113 1184 } 1114 1185 1115 1186 /* ··· 1155 1152 */ 1156 1153 static void bfin_spi_cleanup(struct spi_device *spi) 1157 1154 { 1158 - struct chip_data *chip = spi_get_ctldata(spi); 1155 + struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); 1156 + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 1159 1157 1160 1158 if (!chip) 1161 1159 return; 1162 1160 1163 - if ((chip->chip_select_num > 0) 1164 - && (chip->chip_select_num <= spi->master->num_chipselect)) 1161 + if (chip->chip_select_num < MAX_CTRL_CS) { 1165 1162 peripheral_free(ssel[spi->master->bus_num] 1166 1163 [chip->chip_select_num-1]); 1167 - 1168 - if (chip->chip_select_num == 0) 1164 + bfin_spi_cs_disable(drv_data, chip); 1165 + } else 1169 1166 gpio_free(chip->cs_gpio); 1170 1167 1171 1168 kfree(chip); 1169 + /* prevent free 'chip' twice */ 1170 + spi_set_ctldata(spi, NULL); 1172 1171 } 1173 1172 1174 - static inline int bfin_spi_init_queue(struct driver_data *drv_data) 1173 + static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) 1175 1174 { 1176 1175 INIT_LIST_HEAD(&drv_data->queue); 1177 1176 spin_lock_init(&drv_data->lock); 1178 1177 1179 - drv_data->run = QUEUE_STOPPED; 1178 + drv_data->running = false; 1180 1179 drv_data->busy = 0; 1181 1180 1182 1181 /* init transfer tasklet */ ··· 1195 1190 return 0; 1196 1191 } 1197 1192 1198 - static inline int bfin_spi_start_queue(struct driver_data *drv_data) 1193 + static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) 1199 1194 { 1200 1195 unsigned long flags; 1201 1196 1202 1197 spin_lock_irqsave(&drv_data->lock, flags); 1203 1198 1204 - if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { 1199 + if (drv_data->running || drv_data->busy) { 1205 1200 spin_unlock_irqrestore(&drv_data->lock, flags); 1206 1201 return -EBUSY; 1207 1202 } 1208 1203 1209 - drv_data->run = QUEUE_RUNNING; 1204 + drv_data->running = true; 1210 1205 drv_data->cur_msg = NULL; 1211 1206 drv_data->cur_transfer = NULL; 1212 1207 drv_data->cur_chip = NULL; ··· 1217 1212 return 0; 1218 1213 } 1219 1214 1220 - static inline int bfin_spi_stop_queue(struct driver_data *drv_data) 1215 + static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) 1221 1216 { 1222 1217 unsigned long flags; 1223 1218 unsigned limit = 500; ··· 1231 1226 * execution path (pump_messages) would be required to call wake_up or 1232 1227 * friends on every SPI message. Do this instead 1233 1228 */ 1234 - drv_data->run = QUEUE_STOPPED; 1229 + drv_data->running = false; 1235 1230 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { 1236 1231 spin_unlock_irqrestore(&drv_data->lock, flags); 1237 1232 msleep(10); ··· 1246 1241 return status; 1247 1242 } 1248 1243 1249 - static inline int bfin_spi_destroy_queue(struct driver_data *drv_data) 1244 + static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) 1250 1245 { 1251 1246 int status; 1252 1247 ··· 1264 1259 struct device *dev = &pdev->dev; 1265 1260 struct bfin5xx_spi_master *platform_info; 1266 1261 struct spi_master *master; 1267 - struct driver_data *drv_data = 0; 1262 + struct bfin_spi_master_data *drv_data; 1268 1263 struct resource *res; 1269 1264 int status = 0; 1270 1265 1271 1266 platform_info = dev->platform_data; 1272 1267 1273 1268 /* Allocate master with space for drv_data */ 1274 - master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); 1269 + master = spi_alloc_master(dev, sizeof(*drv_data)); 1275 1270 if (!master) { 1276 1271 dev_err(&pdev->dev, "can not alloc spi_master\n"); 1277 1272 return -ENOMEM; ··· 1307 1302 goto out_error_ioremap; 1308 1303 } 1309 1304 1310 - drv_data->dma_channel = platform_get_irq(pdev, 0); 1311 - if (drv_data->dma_channel < 0) { 1305 + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1306 + if (res == NULL) { 1312 1307 dev_err(dev, "No DMA channel specified\n"); 1313 1308 status = -ENOENT; 1314 - goto out_error_no_dma_ch; 1309 + goto out_error_free_io; 1310 + } 1311 + drv_data->dma_channel = res->start; 1312 + 1313 + drv_data->spi_irq = platform_get_irq(pdev, 0); 1314 + if (drv_data->spi_irq < 0) { 1315 + dev_err(dev, "No spi pio irq specified\n"); 1316 + status = -ENOENT; 1317 + goto out_error_free_io; 1315 1318 } 1316 1319 1317 1320 /* Initial and start queue */ ··· 1341 1328 goto out_error_queue_alloc; 1342 1329 } 1343 1330 1331 + /* Reset SPI registers. If these registers were used by the boot loader, 1332 + * the sky may fall on your head if you enable the dma controller. 1333 + */ 1334 + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); 1335 + write_FLAG(drv_data, 0xFF00); 1336 + 1344 1337 /* Register with the SPI framework */ 1345 1338 platform_set_drvdata(pdev, drv_data); 1346 1339 status = spi_register_master(master); ··· 1362 1343 1363 1344 out_error_queue_alloc: 1364 1345 bfin_spi_destroy_queue(drv_data); 1365 - out_error_no_dma_ch: 1346 + out_error_free_io: 1366 1347 iounmap((void *) drv_data->regs_base); 1367 1348 out_error_ioremap: 1368 1349 out_error_get_res: ··· 1374 1355 /* stop hardware and remove the driver */ 1375 1356 static int __devexit bfin_spi_remove(struct platform_device *pdev) 1376 1357 { 1377 - struct driver_data *drv_data = platform_get_drvdata(pdev); 1358 + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1378 1359 int status = 0; 1379 1360 1380 1361 if (!drv_data) ··· 1394 1375 free_dma(drv_data->dma_channel); 1395 1376 } 1396 1377 1378 + if (drv_data->irq_requested) { 1379 + free_irq(drv_data->spi_irq, drv_data); 1380 + drv_data->irq_requested = 0; 1381 + } 1382 + 1397 1383 /* Disconnect from the SPI framework */ 1398 1384 spi_unregister_master(drv_data->master); 1399 1385 ··· 1413 1389 #ifdef CONFIG_PM 1414 1390 static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) 1415 1391 { 1416 - struct driver_data *drv_data = platform_get_drvdata(pdev); 1392 + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1417 1393 int status = 0; 1418 1394 1419 1395 status = bfin_spi_stop_queue(drv_data); 1420 1396 if (status != 0) 1421 1397 return status; 1422 1398 1423 - /* stop hardware */ 1424 - bfin_spi_disable(drv_data); 1399 + drv_data->ctrl_reg = read_CTRL(drv_data); 1400 + drv_data->flag_reg = read_FLAG(drv_data); 1401 + 1402 + /* 1403 + * reset SPI_CTL and SPI_FLG registers 1404 + */ 1405 + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); 1406 + write_FLAG(drv_data, 0xFF00); 1425 1407 1426 1408 return 0; 1427 1409 } 1428 1410 1429 1411 static int bfin_spi_resume(struct platform_device *pdev) 1430 1412 { 1431 - struct driver_data *drv_data = platform_get_drvdata(pdev); 1413 + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1432 1414 int status = 0; 1433 1415 1434 - /* Enable the SPI interface */ 1435 - bfin_spi_enable(drv_data); 1416 + write_CTRL(drv_data, drv_data->ctrl_reg); 1417 + write_FLAG(drv_data, drv_data->flag_reg); 1436 1418 1437 1419 /* Start the queue running */ 1438 1420 status = bfin_spi_start_queue(drv_data); ··· 1469 1439 { 1470 1440 return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); 1471 1441 } 1472 - module_init(bfin_spi_init); 1442 + subsys_initcall(bfin_spi_init); 1473 1443 1474 1444 static void __exit bfin_spi_exit(void) 1475 1445 {
+748
drivers/spi/spi_fsl_espi.c
··· 1 + /* 2 + * Freescale eSPI controller driver. 3 + * 4 + * Copyright 2010 Freescale Semiconductor, Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + */ 11 + #include <linux/module.h> 12 + #include <linux/delay.h> 13 + #include <linux/irq.h> 14 + #include <linux/spi/spi.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/fsl_devices.h> 17 + #include <linux/mm.h> 18 + #include <linux/of.h> 19 + #include <linux/of_platform.h> 20 + #include <linux/of_spi.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/err.h> 23 + #include <sysdev/fsl_soc.h> 24 + 25 + #include "spi_fsl_lib.h" 26 + 27 + /* eSPI Controller registers */ 28 + struct fsl_espi_reg { 29 + __be32 mode; /* 0x000 - eSPI mode register */ 30 + __be32 event; /* 0x004 - eSPI event register */ 31 + __be32 mask; /* 0x008 - eSPI mask register */ 32 + __be32 command; /* 0x00c - eSPI command register */ 33 + __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ 34 + __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ 35 + u8 res[8]; /* 0x018 - 0x01c reserved */ 36 + __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ 37 + }; 38 + 39 + struct fsl_espi_transfer { 40 + const void *tx_buf; 41 + void *rx_buf; 42 + unsigned len; 43 + unsigned n_tx; 44 + unsigned n_rx; 45 + unsigned actual_length; 46 + int status; 47 + }; 48 + 49 + /* eSPI Controller mode register definitions */ 50 + #define SPMODE_ENABLE (1 << 31) 51 + #define SPMODE_LOOP (1 << 30) 52 + #define SPMODE_TXTHR(x) ((x) << 8) 53 + #define SPMODE_RXTHR(x) ((x) << 0) 54 + 55 + /* eSPI Controller CS mode register definitions */ 56 + #define CSMODE_CI_INACTIVEHIGH (1 << 31) 57 + #define CSMODE_CP_BEGIN_EDGECLK (1 << 30) 58 + #define CSMODE_REV (1 << 29) 59 + #define CSMODE_DIV16 (1 << 28) 60 + #define CSMODE_PM(x) ((x) << 24) 61 + #define CSMODE_POL_1 (1 << 20) 62 + #define CSMODE_LEN(x) ((x) << 16) 63 + #define CSMODE_BEF(x) ((x) << 12) 64 + #define CSMODE_AFT(x) ((x) << 8) 65 + #define CSMODE_CG(x) ((x) << 3) 66 + 67 + /* Default mode/csmode for eSPI controller */ 68 + #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) 69 + #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ 70 + | CSMODE_AFT(0) | CSMODE_CG(1)) 71 + 72 + /* SPIE register values */ 73 + #define SPIE_NE 0x00000200 /* Not empty */ 74 + #define SPIE_NF 0x00000100 /* Not full */ 75 + 76 + /* SPIM register values */ 77 + #define SPIM_NE 0x00000200 /* Not empty */ 78 + #define SPIM_NF 0x00000100 /* Not full */ 79 + #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) 80 + #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) 81 + 82 + /* SPCOM register values */ 83 + #define SPCOM_CS(x) ((x) << 30) 84 + #define SPCOM_TRANLEN(x) ((x) << 0) 85 + #define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ 86 + 87 + static void fsl_espi_change_mode(struct spi_device *spi) 88 + { 89 + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); 90 + struct spi_mpc8xxx_cs *cs = spi->controller_state; 91 + struct fsl_espi_reg *reg_base = mspi->reg_base; 92 + __be32 __iomem *mode = &reg_base->csmode[spi->chip_select]; 93 + __be32 __iomem *espi_mode = &reg_base->mode; 94 + u32 tmp; 95 + unsigned long flags; 96 + 97 + /* Turn off IRQs locally to minimize time that SPI is disabled. */ 98 + local_irq_save(flags); 99 + 100 + /* Turn off SPI unit prior changing mode */ 101 + tmp = mpc8xxx_spi_read_reg(espi_mode); 102 + mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); 103 + mpc8xxx_spi_write_reg(mode, cs->hw_mode); 104 + mpc8xxx_spi_write_reg(espi_mode, tmp); 105 + 106 + local_irq_restore(flags); 107 + } 108 + 109 + static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) 110 + { 111 + u32 data; 112 + u16 data_h; 113 + u16 data_l; 114 + const u32 *tx = mpc8xxx_spi->tx; 115 + 116 + if (!tx) 117 + return 0; 118 + 119 + data = *tx++ << mpc8xxx_spi->tx_shift; 120 + data_l = data & 0xffff; 121 + data_h = (data >> 16) & 0xffff; 122 + swab16s(&data_l); 123 + swab16s(&data_h); 124 + data = data_h | data_l; 125 + 126 + mpc8xxx_spi->tx = tx; 127 + return data; 128 + } 129 + 130 + static int fsl_espi_setup_transfer(struct spi_device *spi, 131 + struct spi_transfer *t) 132 + { 133 + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 134 + int bits_per_word = 0; 135 + u8 pm; 136 + u32 hz = 0; 137 + struct spi_mpc8xxx_cs *cs = spi->controller_state; 138 + 139 + if (t) { 140 + bits_per_word = t->bits_per_word; 141 + hz = t->speed_hz; 142 + } 143 + 144 + /* spi_transfer level calls that work per-word */ 145 + if (!bits_per_word) 146 + bits_per_word = spi->bits_per_word; 147 + 148 + /* Make sure its a bit width we support [4..16] */ 149 + if ((bits_per_word < 4) || (bits_per_word > 16)) 150 + return -EINVAL; 151 + 152 + if (!hz) 153 + hz = spi->max_speed_hz; 154 + 155 + cs->rx_shift = 0; 156 + cs->tx_shift = 0; 157 + cs->get_rx = mpc8xxx_spi_rx_buf_u32; 158 + cs->get_tx = mpc8xxx_spi_tx_buf_u32; 159 + if (bits_per_word <= 8) { 160 + cs->rx_shift = 8 - bits_per_word; 161 + } else if (bits_per_word <= 16) { 162 + cs->rx_shift = 16 - bits_per_word; 163 + if (spi->mode & SPI_LSB_FIRST) 164 + cs->get_tx = fsl_espi_tx_buf_lsb; 165 + } else { 166 + return -EINVAL; 167 + } 168 + 169 + mpc8xxx_spi->rx_shift = cs->rx_shift; 170 + mpc8xxx_spi->tx_shift = cs->tx_shift; 171 + mpc8xxx_spi->get_rx = cs->get_rx; 172 + mpc8xxx_spi->get_tx = cs->get_tx; 173 + 174 + bits_per_word = bits_per_word - 1; 175 + 176 + /* mask out bits we are going to set */ 177 + cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); 178 + 179 + cs->hw_mode |= CSMODE_LEN(bits_per_word); 180 + 181 + if ((mpc8xxx_spi->spibrg / hz) > 64) { 182 + cs->hw_mode |= CSMODE_DIV16; 183 + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; 184 + 185 + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " 186 + "Will use %d Hz instead.\n", dev_name(&spi->dev), 187 + hz, mpc8xxx_spi->spibrg / 1024); 188 + if (pm > 16) 189 + pm = 16; 190 + } else { 191 + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; 192 + } 193 + if (pm) 194 + pm--; 195 + 196 + cs->hw_mode |= CSMODE_PM(pm); 197 + 198 + fsl_espi_change_mode(spi); 199 + return 0; 200 + } 201 + 202 + static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, 203 + unsigned int len) 204 + { 205 + u32 word; 206 + struct fsl_espi_reg *reg_base = mspi->reg_base; 207 + 208 + mspi->count = len; 209 + 210 + /* enable rx ints */ 211 + mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE); 212 + 213 + /* transmit word */ 214 + word = mspi->get_tx(mspi); 215 + mpc8xxx_spi_write_reg(&reg_base->transmit, word); 216 + 217 + return 0; 218 + } 219 + 220 + static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) 221 + { 222 + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 223 + struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; 224 + unsigned int len = t->len; 225 + u8 bits_per_word; 226 + int ret; 227 + 228 + bits_per_word = spi->bits_per_word; 229 + if (t->bits_per_word) 230 + bits_per_word = t->bits_per_word; 231 + 232 + mpc8xxx_spi->len = t->len; 233 + len = roundup(len, 4) / 4; 234 + 235 + mpc8xxx_spi->tx = t->tx_buf; 236 + mpc8xxx_spi->rx = t->rx_buf; 237 + 238 + INIT_COMPLETION(mpc8xxx_spi->done); 239 + 240 + /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 241 + if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 242 + dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" 243 + " beyond the SPCOM[TRANLEN] field\n", t->len); 244 + return -EINVAL; 245 + } 246 + mpc8xxx_spi_write_reg(&reg_base->command, 247 + (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); 248 + 249 + ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); 250 + if (ret) 251 + return ret; 252 + 253 + wait_for_completion(&mpc8xxx_spi->done); 254 + 255 + /* disable rx ints */ 256 + mpc8xxx_spi_write_reg(&reg_base->mask, 0); 257 + 258 + return mpc8xxx_spi->count; 259 + } 260 + 261 + static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) 262 + { 263 + if (cmd[1] && cmd[2] && cmd[3]) { 264 + cmd[1] = (u8)(addr >> 16); 265 + cmd[2] = (u8)(addr >> 8); 266 + cmd[3] = (u8)(addr >> 0); 267 + } 268 + } 269 + 270 + static unsigned int fsl_espi_cmd2addr(u8 *cmd) 271 + { 272 + if (cmd[1] && cmd[2] && cmd[3]) 273 + return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; 274 + 275 + return 0; 276 + } 277 + 278 + static void fsl_espi_do_trans(struct spi_message *m, 279 + struct fsl_espi_transfer *tr) 280 + { 281 + struct spi_device *spi = m->spi; 282 + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); 283 + struct fsl_espi_transfer *espi_trans = tr; 284 + struct spi_message message; 285 + struct spi_transfer *t, *first, trans; 286 + int status = 0; 287 + 288 + spi_message_init(&message); 289 + memset(&trans, 0, sizeof(trans)); 290 + 291 + first = list_first_entry(&m->transfers, struct spi_transfer, 292 + transfer_list); 293 + list_for_each_entry(t, &m->transfers, transfer_list) { 294 + if ((first->bits_per_word != t->bits_per_word) || 295 + (first->speed_hz != t->speed_hz)) { 296 + espi_trans->status = -EINVAL; 297 + dev_err(mspi->dev, "bits_per_word/speed_hz should be" 298 + " same for the same SPI transfer\n"); 299 + return; 300 + } 301 + 302 + trans.speed_hz = t->speed_hz; 303 + trans.bits_per_word = t->bits_per_word; 304 + trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); 305 + } 306 + 307 + trans.len = espi_trans->len; 308 + trans.tx_buf = espi_trans->tx_buf; 309 + trans.rx_buf = espi_trans->rx_buf; 310 + spi_message_add_tail(&trans, &message); 311 + 312 + list_for_each_entry(t, &message.transfers, transfer_list) { 313 + if (t->bits_per_word || t->speed_hz) { 314 + status = -EINVAL; 315 + 316 + status = fsl_espi_setup_transfer(spi, t); 317 + if (status < 0) 318 + break; 319 + } 320 + 321 + if (t->len) 322 + status = fsl_espi_bufs(spi, t); 323 + 324 + if (status) { 325 + status = -EMSGSIZE; 326 + break; 327 + } 328 + 329 + if (t->delay_usecs) 330 + udelay(t->delay_usecs); 331 + } 332 + 333 + espi_trans->status = status; 334 + fsl_espi_setup_transfer(spi, NULL); 335 + } 336 + 337 + static void fsl_espi_cmd_trans(struct spi_message *m, 338 + struct fsl_espi_transfer *trans, u8 *rx_buff) 339 + { 340 + struct spi_transfer *t; 341 + u8 *local_buf; 342 + int i = 0; 343 + struct fsl_espi_transfer *espi_trans = trans; 344 + 345 + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); 346 + if (!local_buf) { 347 + espi_trans->status = -ENOMEM; 348 + return; 349 + } 350 + 351 + list_for_each_entry(t, &m->transfers, transfer_list) { 352 + if (t->tx_buf) { 353 + memcpy(local_buf + i, t->tx_buf, t->len); 354 + i += t->len; 355 + } 356 + } 357 + 358 + espi_trans->tx_buf = local_buf; 359 + espi_trans->rx_buf = local_buf + espi_trans->n_tx; 360 + fsl_espi_do_trans(m, espi_trans); 361 + 362 + espi_trans->actual_length = espi_trans->len; 363 + kfree(local_buf); 364 + } 365 + 366 + static void fsl_espi_rw_trans(struct spi_message *m, 367 + struct fsl_espi_transfer *trans, u8 *rx_buff) 368 + { 369 + struct fsl_espi_transfer *espi_trans = trans; 370 + unsigned int n_tx = espi_trans->n_tx; 371 + unsigned int n_rx = espi_trans->n_rx; 372 + struct spi_transfer *t; 373 + u8 *local_buf; 374 + u8 *rx_buf = rx_buff; 375 + unsigned int trans_len; 376 + unsigned int addr; 377 + int i, pos, loop; 378 + 379 + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); 380 + if (!local_buf) { 381 + espi_trans->status = -ENOMEM; 382 + return; 383 + } 384 + 385 + for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { 386 + trans_len = n_rx - pos; 387 + if (trans_len > SPCOM_TRANLEN_MAX - n_tx) 388 + trans_len = SPCOM_TRANLEN_MAX - n_tx; 389 + 390 + i = 0; 391 + list_for_each_entry(t, &m->transfers, transfer_list) { 392 + if (t->tx_buf) { 393 + memcpy(local_buf + i, t->tx_buf, t->len); 394 + i += t->len; 395 + } 396 + } 397 + 398 + addr = fsl_espi_cmd2addr(local_buf); 399 + addr += pos; 400 + fsl_espi_addr2cmd(addr, local_buf); 401 + 402 + espi_trans->n_tx = n_tx; 403 + espi_trans->n_rx = trans_len; 404 + espi_trans->len = trans_len + n_tx; 405 + espi_trans->tx_buf = local_buf; 406 + espi_trans->rx_buf = local_buf + n_tx; 407 + fsl_espi_do_trans(m, espi_trans); 408 + 409 + memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); 410 + 411 + if (loop > 0) 412 + espi_trans->actual_length += espi_trans->len - n_tx; 413 + else 414 + espi_trans->actual_length += espi_trans->len; 415 + } 416 + 417 + kfree(local_buf); 418 + } 419 + 420 + static void fsl_espi_do_one_msg(struct spi_message *m) 421 + { 422 + struct spi_transfer *t; 423 + u8 *rx_buf = NULL; 424 + unsigned int n_tx = 0; 425 + unsigned int n_rx = 0; 426 + struct fsl_espi_transfer espi_trans; 427 + 428 + list_for_each_entry(t, &m->transfers, transfer_list) { 429 + if (t->tx_buf) 430 + n_tx += t->len; 431 + if (t->rx_buf) { 432 + n_rx += t->len; 433 + rx_buf = t->rx_buf; 434 + } 435 + } 436 + 437 + espi_trans.n_tx = n_tx; 438 + espi_trans.n_rx = n_rx; 439 + espi_trans.len = n_tx + n_rx; 440 + espi_trans.actual_length = 0; 441 + espi_trans.status = 0; 442 + 443 + if (!rx_buf) 444 + fsl_espi_cmd_trans(m, &espi_trans, NULL); 445 + else 446 + fsl_espi_rw_trans(m, &espi_trans, rx_buf); 447 + 448 + m->actual_length = espi_trans.actual_length; 449 + m->status = espi_trans.status; 450 + m->complete(m->context); 451 + } 452 + 453 + static int fsl_espi_setup(struct spi_device *spi) 454 + { 455 + struct mpc8xxx_spi *mpc8xxx_spi; 456 + struct fsl_espi_reg *reg_base; 457 + int retval; 458 + u32 hw_mode; 459 + u32 loop_mode; 460 + struct spi_mpc8xxx_cs *cs = spi->controller_state; 461 + 462 + if (!spi->max_speed_hz) 463 + return -EINVAL; 464 + 465 + if (!cs) { 466 + cs = kzalloc(sizeof *cs, GFP_KERNEL); 467 + if (!cs) 468 + return -ENOMEM; 469 + spi->controller_state = cs; 470 + } 471 + 472 + mpc8xxx_spi = spi_master_get_devdata(spi->master); 473 + reg_base = mpc8xxx_spi->reg_base; 474 + 475 + hw_mode = cs->hw_mode; /* Save orginal settings */ 476 + cs->hw_mode = mpc8xxx_spi_read_reg( 477 + &reg_base->csmode[spi->chip_select]); 478 + /* mask out bits we are going to set */ 479 + cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH 480 + | CSMODE_REV); 481 + 482 + if (spi->mode & SPI_CPHA) 483 + cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; 484 + if (spi->mode & SPI_CPOL) 485 + cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; 486 + if (!(spi->mode & SPI_LSB_FIRST)) 487 + cs->hw_mode |= CSMODE_REV; 488 + 489 + /* Handle the loop mode */ 490 + loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode); 491 + loop_mode &= ~SPMODE_LOOP; 492 + if (spi->mode & SPI_LOOP) 493 + loop_mode |= SPMODE_LOOP; 494 + mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode); 495 + 496 + retval = fsl_espi_setup_transfer(spi, NULL); 497 + if (retval < 0) { 498 + cs->hw_mode = hw_mode; /* Restore settings */ 499 + return retval; 500 + } 501 + return 0; 502 + } 503 + 504 + void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) 505 + { 506 + struct fsl_espi_reg *reg_base = mspi->reg_base; 507 + 508 + /* We need handle RX first */ 509 + if (events & SPIE_NE) { 510 + u32 rx_data; 511 + 512 + /* Spin until RX is done */ 513 + while (SPIE_RXCNT(events) < min(4, mspi->len)) { 514 + cpu_relax(); 515 + events = mpc8xxx_spi_read_reg(&reg_base->event); 516 + } 517 + mspi->len -= 4; 518 + 519 + rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); 520 + 521 + if (mspi->rx) 522 + mspi->get_rx(rx_data, mspi); 523 + } 524 + 525 + if (!(events & SPIE_NF)) { 526 + int ret; 527 + 528 + /* spin until TX is done */ 529 + ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( 530 + &reg_base->event)) & SPIE_NF) == 0, 1000, 0); 531 + if (!ret) { 532 + dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); 533 + return; 534 + } 535 + } 536 + 537 + /* Clear the events */ 538 + mpc8xxx_spi_write_reg(&reg_base->event, events); 539 + 540 + mspi->count -= 1; 541 + if (mspi->count) { 542 + u32 word = mspi->get_tx(mspi); 543 + 544 + mpc8xxx_spi_write_reg(&reg_base->transmit, word); 545 + } else { 546 + complete(&mspi->done); 547 + } 548 + } 549 + 550 + static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) 551 + { 552 + struct mpc8xxx_spi *mspi = context_data; 553 + struct fsl_espi_reg *reg_base = mspi->reg_base; 554 + irqreturn_t ret = IRQ_NONE; 555 + u32 events; 556 + 557 + /* Get interrupt events(tx/rx) */ 558 + events = mpc8xxx_spi_read_reg(&reg_base->event); 559 + if (events) 560 + ret = IRQ_HANDLED; 561 + 562 + dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); 563 + 564 + fsl_espi_cpu_irq(mspi, events); 565 + 566 + return ret; 567 + } 568 + 569 + static void fsl_espi_remove(struct mpc8xxx_spi *mspi) 570 + { 571 + iounmap(mspi->reg_base); 572 + } 573 + 574 + static struct spi_master * __devinit fsl_espi_probe(struct device *dev, 575 + struct resource *mem, unsigned int irq) 576 + { 577 + struct fsl_spi_platform_data *pdata = dev->platform_data; 578 + struct spi_master *master; 579 + struct mpc8xxx_spi *mpc8xxx_spi; 580 + struct fsl_espi_reg *reg_base; 581 + u32 regval; 582 + int i, ret = 0; 583 + 584 + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); 585 + if (!master) { 586 + ret = -ENOMEM; 587 + goto err; 588 + } 589 + 590 + dev_set_drvdata(dev, master); 591 + 592 + ret = mpc8xxx_spi_probe(dev, mem, irq); 593 + if (ret) 594 + goto err_probe; 595 + 596 + master->setup = fsl_espi_setup; 597 + 598 + mpc8xxx_spi = spi_master_get_devdata(master); 599 + mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; 600 + mpc8xxx_spi->spi_remove = fsl_espi_remove; 601 + 602 + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); 603 + if (!mpc8xxx_spi->reg_base) { 604 + ret = -ENOMEM; 605 + goto err_probe; 606 + } 607 + 608 + reg_base = mpc8xxx_spi->reg_base; 609 + 610 + /* Register for SPI Interrupt */ 611 + ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, 612 + 0, "fsl_espi", mpc8xxx_spi); 613 + if (ret) 614 + goto free_irq; 615 + 616 + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { 617 + mpc8xxx_spi->rx_shift = 16; 618 + mpc8xxx_spi->tx_shift = 24; 619 + } 620 + 621 + /* SPI controller initializations */ 622 + mpc8xxx_spi_write_reg(&reg_base->mode, 0); 623 + mpc8xxx_spi_write_reg(&reg_base->mask, 0); 624 + mpc8xxx_spi_write_reg(&reg_base->command, 0); 625 + mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); 626 + 627 + /* Init eSPI CS mode register */ 628 + for (i = 0; i < pdata->max_chipselect; i++) 629 + mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); 630 + 631 + /* Enable SPI interface */ 632 + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 633 + 634 + mpc8xxx_spi_write_reg(&reg_base->mode, regval); 635 + 636 + ret = spi_register_master(master); 637 + if (ret < 0) 638 + goto unreg_master; 639 + 640 + dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); 641 + 642 + return master; 643 + 644 + unreg_master: 645 + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 646 + free_irq: 647 + iounmap(mpc8xxx_spi->reg_base); 648 + err_probe: 649 + spi_master_put(master); 650 + err: 651 + return ERR_PTR(ret); 652 + } 653 + 654 + static int of_fsl_espi_get_chipselects(struct device *dev) 655 + { 656 + struct device_node *np = dev->of_node; 657 + struct fsl_spi_platform_data *pdata = dev->platform_data; 658 + const u32 *prop; 659 + int len; 660 + 661 + prop = of_get_property(np, "fsl,espi-num-chipselects", &len); 662 + if (!prop || len < sizeof(*prop)) { 663 + dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); 664 + return -EINVAL; 665 + } 666 + 667 + pdata->max_chipselect = *prop; 668 + pdata->cs_control = NULL; 669 + 670 + return 0; 671 + } 672 + 673 + static int __devinit of_fsl_espi_probe(struct platform_device *ofdev, 674 + const struct of_device_id *ofid) 675 + { 676 + struct device *dev = &ofdev->dev; 677 + struct device_node *np = ofdev->dev.of_node; 678 + struct spi_master *master; 679 + struct resource mem; 680 + struct resource irq; 681 + int ret = -ENOMEM; 682 + 683 + ret = of_mpc8xxx_spi_probe(ofdev, ofid); 684 + if (ret) 685 + return ret; 686 + 687 + ret = of_fsl_espi_get_chipselects(dev); 688 + if (ret) 689 + goto err; 690 + 691 + ret = of_address_to_resource(np, 0, &mem); 692 + if (ret) 693 + goto err; 694 + 695 + ret = of_irq_to_resource(np, 0, &irq); 696 + if (!ret) { 697 + ret = -EINVAL; 698 + goto err; 699 + } 700 + 701 + master = fsl_espi_probe(dev, &mem, irq.start); 702 + if (IS_ERR(master)) { 703 + ret = PTR_ERR(master); 704 + goto err; 705 + } 706 + 707 + return 0; 708 + 709 + err: 710 + return ret; 711 + } 712 + 713 + static int __devexit of_fsl_espi_remove(struct platform_device *dev) 714 + { 715 + return mpc8xxx_spi_remove(&dev->dev); 716 + } 717 + 718 + static const struct of_device_id of_fsl_espi_match[] = { 719 + { .compatible = "fsl,mpc8536-espi" }, 720 + {} 721 + }; 722 + MODULE_DEVICE_TABLE(of, of_fsl_espi_match); 723 + 724 + static struct of_platform_driver fsl_espi_driver = { 725 + .driver = { 726 + .name = "fsl_espi", 727 + .owner = THIS_MODULE, 728 + .of_match_table = of_fsl_espi_match, 729 + }, 730 + .probe = of_fsl_espi_probe, 731 + .remove = __devexit_p(of_fsl_espi_remove), 732 + }; 733 + 734 + static int __init fsl_espi_init(void) 735 + { 736 + return of_register_platform_driver(&fsl_espi_driver); 737 + } 738 + module_init(fsl_espi_init); 739 + 740 + static void __exit fsl_espi_exit(void) 741 + { 742 + of_unregister_platform_driver(&fsl_espi_driver); 743 + } 744 + module_exit(fsl_espi_exit); 745 + 746 + MODULE_AUTHOR("Mingkai Hu"); 747 + MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); 748 + MODULE_LICENSE("GPL");
+237
drivers/spi/spi_fsl_lib.c
··· 1 + /* 2 + * Freescale SPI/eSPI controller driver library. 3 + * 4 + * Maintainer: Kumar Gala 5 + * 6 + * Copyright (C) 2006 Polycom, Inc. 7 + * 8 + * CPM SPI and QE buffer descriptors mode support: 9 + * Copyright (c) 2009 MontaVista Software, Inc. 10 + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 11 + * 12 + * Copyright 2010 Freescale Semiconductor, Inc. 13 + * 14 + * This program is free software; you can redistribute it and/or modify it 15 + * under the terms of the GNU General Public License as published by the 16 + * Free Software Foundation; either version 2 of the License, or (at your 17 + * option) any later version. 18 + */ 19 + #include <linux/kernel.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/fsl_devices.h> 22 + #include <linux/dma-mapping.h> 23 + #include <linux/mm.h> 24 + #include <linux/of_platform.h> 25 + #include <linux/of_spi.h> 26 + #include <sysdev/fsl_soc.h> 27 + 28 + #include "spi_fsl_lib.h" 29 + 30 + #define MPC8XXX_SPI_RX_BUF(type) \ 31 + void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ 32 + { \ 33 + type *rx = mpc8xxx_spi->rx; \ 34 + *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ 35 + mpc8xxx_spi->rx = rx; \ 36 + } 37 + 38 + #define MPC8XXX_SPI_TX_BUF(type) \ 39 + u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ 40 + { \ 41 + u32 data; \ 42 + const type *tx = mpc8xxx_spi->tx; \ 43 + if (!tx) \ 44 + return 0; \ 45 + data = *tx++ << mpc8xxx_spi->tx_shift; \ 46 + mpc8xxx_spi->tx = tx; \ 47 + return data; \ 48 + } 49 + 50 + MPC8XXX_SPI_RX_BUF(u8) 51 + MPC8XXX_SPI_RX_BUF(u16) 52 + MPC8XXX_SPI_RX_BUF(u32) 53 + MPC8XXX_SPI_TX_BUF(u8) 54 + MPC8XXX_SPI_TX_BUF(u16) 55 + MPC8XXX_SPI_TX_BUF(u32) 56 + 57 + struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) 58 + { 59 + return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); 60 + } 61 + 62 + void mpc8xxx_spi_work(struct work_struct *work) 63 + { 64 + struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, 65 + work); 66 + 67 + spin_lock_irq(&mpc8xxx_spi->lock); 68 + while (!list_empty(&mpc8xxx_spi->queue)) { 69 + struct spi_message *m = container_of(mpc8xxx_spi->queue.next, 70 + struct spi_message, queue); 71 + 72 + list_del_init(&m->queue); 73 + spin_unlock_irq(&mpc8xxx_spi->lock); 74 + 75 + if (mpc8xxx_spi->spi_do_one_msg) 76 + mpc8xxx_spi->spi_do_one_msg(m); 77 + 78 + spin_lock_irq(&mpc8xxx_spi->lock); 79 + } 80 + spin_unlock_irq(&mpc8xxx_spi->lock); 81 + } 82 + 83 + int mpc8xxx_spi_transfer(struct spi_device *spi, 84 + struct spi_message *m) 85 + { 86 + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 87 + unsigned long flags; 88 + 89 + m->actual_length = 0; 90 + m->status = -EINPROGRESS; 91 + 92 + spin_lock_irqsave(&mpc8xxx_spi->lock, flags); 93 + list_add_tail(&m->queue, &mpc8xxx_spi->queue); 94 + queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); 95 + spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); 96 + 97 + return 0; 98 + } 99 + 100 + void mpc8xxx_spi_cleanup(struct spi_device *spi) 101 + { 102 + kfree(spi->controller_state); 103 + } 104 + 105 + const char *mpc8xxx_spi_strmode(unsigned int flags) 106 + { 107 + if (flags & SPI_QE_CPU_MODE) { 108 + return "QE CPU"; 109 + } else if (flags & SPI_CPM_MODE) { 110 + if (flags & SPI_QE) 111 + return "QE"; 112 + else if (flags & SPI_CPM2) 113 + return "CPM2"; 114 + else 115 + return "CPM1"; 116 + } 117 + return "CPU"; 118 + } 119 + 120 + int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 121 + unsigned int irq) 122 + { 123 + struct fsl_spi_platform_data *pdata = dev->platform_data; 124 + struct spi_master *master; 125 + struct mpc8xxx_spi *mpc8xxx_spi; 126 + int ret = 0; 127 + 128 + master = dev_get_drvdata(dev); 129 + 130 + /* the spi->mode bits understood by this driver: */ 131 + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH 132 + | SPI_LSB_FIRST | SPI_LOOP; 133 + 134 + master->transfer = mpc8xxx_spi_transfer; 135 + master->cleanup = mpc8xxx_spi_cleanup; 136 + master->dev.of_node = dev->of_node; 137 + 138 + mpc8xxx_spi = spi_master_get_devdata(master); 139 + mpc8xxx_spi->dev = dev; 140 + mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; 141 + mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; 142 + mpc8xxx_spi->flags = pdata->flags; 143 + mpc8xxx_spi->spibrg = pdata->sysclk; 144 + mpc8xxx_spi->irq = irq; 145 + 146 + mpc8xxx_spi->rx_shift = 0; 147 + mpc8xxx_spi->tx_shift = 0; 148 + 149 + init_completion(&mpc8xxx_spi->done); 150 + 151 + master->bus_num = pdata->bus_num; 152 + master->num_chipselect = pdata->max_chipselect; 153 + 154 + spin_lock_init(&mpc8xxx_spi->lock); 155 + init_completion(&mpc8xxx_spi->done); 156 + INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); 157 + INIT_LIST_HEAD(&mpc8xxx_spi->queue); 158 + 159 + mpc8xxx_spi->workqueue = create_singlethread_workqueue( 160 + dev_name(master->dev.parent)); 161 + if (mpc8xxx_spi->workqueue == NULL) { 162 + ret = -EBUSY; 163 + goto err; 164 + } 165 + 166 + return 0; 167 + 168 + err: 169 + return ret; 170 + } 171 + 172 + int __devexit mpc8xxx_spi_remove(struct device *dev) 173 + { 174 + struct mpc8xxx_spi *mpc8xxx_spi; 175 + struct spi_master *master; 176 + 177 + master = dev_get_drvdata(dev); 178 + mpc8xxx_spi = spi_master_get_devdata(master); 179 + 180 + flush_workqueue(mpc8xxx_spi->workqueue); 181 + destroy_workqueue(mpc8xxx_spi->workqueue); 182 + spi_unregister_master(master); 183 + 184 + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 185 + 186 + if (mpc8xxx_spi->spi_remove) 187 + mpc8xxx_spi->spi_remove(mpc8xxx_spi); 188 + 189 + return 0; 190 + } 191 + 192 + int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, 193 + const struct of_device_id *ofid) 194 + { 195 + struct device *dev = &ofdev->dev; 196 + struct device_node *np = ofdev->dev.of_node; 197 + struct mpc8xxx_spi_probe_info *pinfo; 198 + struct fsl_spi_platform_data *pdata; 199 + const void *prop; 200 + int ret = -ENOMEM; 201 + 202 + pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); 203 + if (!pinfo) 204 + return -ENOMEM; 205 + 206 + pdata = &pinfo->pdata; 207 + dev->platform_data = pdata; 208 + 209 + /* Allocate bus num dynamically. */ 210 + pdata->bus_num = -1; 211 + 212 + /* SPI controller is either clocked from QE or SoC clock. */ 213 + pdata->sysclk = get_brgfreq(); 214 + if (pdata->sysclk == -1) { 215 + pdata->sysclk = fsl_get_sys_freq(); 216 + if (pdata->sysclk == -1) { 217 + ret = -ENODEV; 218 + goto err; 219 + } 220 + } 221 + 222 + prop = of_get_property(np, "mode", NULL); 223 + if (prop && !strcmp(prop, "cpu-qe")) 224 + pdata->flags = SPI_QE_CPU_MODE; 225 + else if (prop && !strcmp(prop, "qe")) 226 + pdata->flags = SPI_CPM_MODE | SPI_QE; 227 + else if (of_device_is_compatible(np, "fsl,cpm2-spi")) 228 + pdata->flags = SPI_CPM_MODE | SPI_CPM2; 229 + else if (of_device_is_compatible(np, "fsl,cpm1-spi")) 230 + pdata->flags = SPI_CPM_MODE | SPI_CPM1; 231 + 232 + return 0; 233 + 234 + err: 235 + kfree(pinfo); 236 + return ret; 237 + }
+124
drivers/spi/spi_fsl_lib.h
··· 1 + /* 2 + * Freescale SPI/eSPI controller driver library. 3 + * 4 + * Maintainer: Kumar Gala 5 + * 6 + * Copyright 2010 Freescale Semiconductor, Inc. 7 + * Copyright (C) 2006 Polycom, Inc. 8 + * 9 + * CPM SPI and QE buffer descriptors mode support: 10 + * Copyright (c) 2009 MontaVista Software, Inc. 11 + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> 12 + * 13 + * This program is free software; you can redistribute it and/or modify it 14 + * under the terms of the GNU General Public License as published by the 15 + * Free Software Foundation; either version 2 of the License, or (at your 16 + * option) any later version. 17 + */ 18 + #ifndef __SPI_FSL_LIB_H__ 19 + #define __SPI_FSL_LIB_H__ 20 + 21 + #include <asm/io.h> 22 + 23 + /* SPI/eSPI Controller driver's private data. */ 24 + struct mpc8xxx_spi { 25 + struct device *dev; 26 + void *reg_base; 27 + 28 + /* rx & tx bufs from the spi_transfer */ 29 + const void *tx; 30 + void *rx; 31 + #ifdef CONFIG_SPI_FSL_ESPI 32 + int len; 33 + #endif 34 + 35 + int subblock; 36 + struct spi_pram __iomem *pram; 37 + struct cpm_buf_desc __iomem *tx_bd; 38 + struct cpm_buf_desc __iomem *rx_bd; 39 + 40 + struct spi_transfer *xfer_in_progress; 41 + 42 + /* dma addresses for CPM transfers */ 43 + dma_addr_t tx_dma; 44 + dma_addr_t rx_dma; 45 + bool map_tx_dma; 46 + bool map_rx_dma; 47 + 48 + dma_addr_t dma_dummy_tx; 49 + dma_addr_t dma_dummy_rx; 50 + 51 + /* functions to deal with different sized buffers */ 52 + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 53 + u32(*get_tx) (struct mpc8xxx_spi *); 54 + 55 + /* hooks for different controller driver */ 56 + void (*spi_do_one_msg) (struct spi_message *m); 57 + void (*spi_remove) (struct mpc8xxx_spi *mspi); 58 + 59 + unsigned int count; 60 + unsigned int irq; 61 + 62 + unsigned nsecs; /* (clock cycle time)/2 */ 63 + 64 + u32 spibrg; /* SPIBRG input clock */ 65 + u32 rx_shift; /* RX data reg shift when in qe mode */ 66 + u32 tx_shift; /* TX data reg shift when in qe mode */ 67 + 68 + unsigned int flags; 69 + 70 + struct workqueue_struct *workqueue; 71 + struct work_struct work; 72 + 73 + struct list_head queue; 74 + spinlock_t lock; 75 + 76 + struct completion done; 77 + }; 78 + 79 + struct spi_mpc8xxx_cs { 80 + /* functions to deal with different sized buffers */ 81 + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 82 + u32 (*get_tx) (struct mpc8xxx_spi *); 83 + u32 rx_shift; /* RX data reg shift when in qe mode */ 84 + u32 tx_shift; /* TX data reg shift when in qe mode */ 85 + u32 hw_mode; /* Holds HW mode register settings */ 86 + }; 87 + 88 + static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) 89 + { 90 + out_be32(reg, val); 91 + } 92 + 93 + static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) 94 + { 95 + return in_be32(reg); 96 + } 97 + 98 + struct mpc8xxx_spi_probe_info { 99 + struct fsl_spi_platform_data pdata; 100 + int *gpios; 101 + bool *alow_flags; 102 + }; 103 + 104 + extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); 105 + extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); 106 + extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); 107 + extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); 108 + extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); 109 + extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); 110 + 111 + extern struct mpc8xxx_spi_probe_info *to_of_pinfo( 112 + struct fsl_spi_platform_data *pdata); 113 + extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, 114 + struct spi_transfer *t, unsigned int len); 115 + extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); 116 + extern void mpc8xxx_spi_cleanup(struct spi_device *spi); 117 + extern const char *mpc8xxx_spi_strmode(unsigned int flags); 118 + extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 119 + unsigned int irq); 120 + extern int mpc8xxx_spi_remove(struct device *dev); 121 + extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev, 122 + const struct of_device_id *ofid); 123 + 124 + #endif /* __SPI_FSL_LIB_H__ */
+160 -392
drivers/spi/spi_mpc8xxx.c drivers/spi/spi_fsl_spi.c
··· 1 1 /* 2 - * MPC8xxx SPI controller driver. 2 + * Freescale SPI controller driver. 3 3 * 4 4 * Maintainer: Kumar Gala 5 5 * 6 6 * Copyright (C) 2006 Polycom, Inc. 7 + * Copyright 2010 Freescale Semiconductor, Inc. 7 8 * 8 9 * CPM SPI and QE buffer descriptors mode support: 9 10 * Copyright (c) 2009 MontaVista Software, Inc. ··· 16 15 * option) any later version. 17 16 */ 18 17 #include <linux/module.h> 19 - #include <linux/init.h> 20 18 #include <linux/types.h> 21 19 #include <linux/kernel.h> 22 - #include <linux/bug.h> 23 - #include <linux/errno.h> 24 - #include <linux/err.h> 25 - #include <linux/io.h> 26 - #include <linux/completion.h> 27 20 #include <linux/interrupt.h> 28 21 #include <linux/delay.h> 29 22 #include <linux/irq.h> 30 - #include <linux/device.h> 31 23 #include <linux/spi/spi.h> 32 24 #include <linux/spi/spi_bitbang.h> 33 25 #include <linux/platform_device.h> ··· 32 38 #include <linux/of_platform.h> 33 39 #include <linux/gpio.h> 34 40 #include <linux/of_gpio.h> 35 - #include <linux/slab.h> 36 41 37 42 #include <sysdev/fsl_soc.h> 38 43 #include <asm/cpm.h> 39 44 #include <asm/qe.h> 40 - #include <asm/irq.h> 45 + 46 + #include "spi_fsl_lib.h" 41 47 42 48 /* CPM1 and CPM2 are mutually exclusive. */ 43 49 #ifdef CONFIG_CPM1 ··· 49 55 #endif 50 56 51 57 /* SPI Controller registers */ 52 - struct mpc8xxx_spi_reg { 58 + struct fsl_spi_reg { 53 59 u8 res1[0x20]; 54 60 __be32 mode; 55 61 __be32 event; ··· 74 80 75 81 /* 76 82 * Default for SPI Mode: 77 - * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk 83 + * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk 78 84 */ 79 85 #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ 80 86 SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) ··· 96 102 #define SPI_PRAM_SIZE 0x100 97 103 #define SPI_MRBLR ((unsigned int)PAGE_SIZE) 98 104 99 - /* SPI Controller driver's private data. */ 100 - struct mpc8xxx_spi { 101 - struct device *dev; 102 - struct mpc8xxx_spi_reg __iomem *base; 105 + static void *fsl_dummy_rx; 106 + static DEFINE_MUTEX(fsl_dummy_rx_lock); 107 + static int fsl_dummy_rx_refcnt; 103 108 104 - /* rx & tx bufs from the spi_transfer */ 105 - const void *tx; 106 - void *rx; 107 - 108 - int subblock; 109 - struct spi_pram __iomem *pram; 110 - struct cpm_buf_desc __iomem *tx_bd; 111 - struct cpm_buf_desc __iomem *rx_bd; 112 - 113 - struct spi_transfer *xfer_in_progress; 114 - 115 - /* dma addresses for CPM transfers */ 116 - dma_addr_t tx_dma; 117 - dma_addr_t rx_dma; 118 - bool map_tx_dma; 119 - bool map_rx_dma; 120 - 121 - dma_addr_t dma_dummy_tx; 122 - dma_addr_t dma_dummy_rx; 123 - 124 - /* functions to deal with different sized buffers */ 125 - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 126 - u32(*get_tx) (struct mpc8xxx_spi *); 127 - 128 - unsigned int count; 129 - unsigned int irq; 130 - 131 - unsigned nsecs; /* (clock cycle time)/2 */ 132 - 133 - u32 spibrg; /* SPIBRG input clock */ 134 - u32 rx_shift; /* RX data reg shift when in qe mode */ 135 - u32 tx_shift; /* TX data reg shift when in qe mode */ 136 - 137 - unsigned int flags; 138 - 139 - struct workqueue_struct *workqueue; 140 - struct work_struct work; 141 - 142 - struct list_head queue; 143 - spinlock_t lock; 144 - 145 - struct completion done; 146 - }; 147 - 148 - static void *mpc8xxx_dummy_rx; 149 - static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock); 150 - static int mpc8xxx_dummy_rx_refcnt; 151 - 152 - struct spi_mpc8xxx_cs { 153 - /* functions to deal with different sized buffers */ 154 - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 155 - u32 (*get_tx) (struct mpc8xxx_spi *); 156 - u32 rx_shift; /* RX data reg shift when in qe mode */ 157 - u32 tx_shift; /* TX data reg shift when in qe mode */ 158 - u32 hw_mode; /* Holds HW mode register settings */ 159 - }; 160 - 161 - static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) 162 - { 163 - out_be32(reg, val); 164 - } 165 - 166 - static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) 167 - { 168 - return in_be32(reg); 169 - } 170 - 171 - #define MPC83XX_SPI_RX_BUF(type) \ 172 - static \ 173 - void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ 174 - { \ 175 - type *rx = mpc8xxx_spi->rx; \ 176 - *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ 177 - mpc8xxx_spi->rx = rx; \ 178 - } 179 - 180 - #define MPC83XX_SPI_TX_BUF(type) \ 181 - static \ 182 - u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ 183 - { \ 184 - u32 data; \ 185 - const type *tx = mpc8xxx_spi->tx; \ 186 - if (!tx) \ 187 - return 0; \ 188 - data = *tx++ << mpc8xxx_spi->tx_shift; \ 189 - mpc8xxx_spi->tx = tx; \ 190 - return data; \ 191 - } 192 - 193 - MPC83XX_SPI_RX_BUF(u8) 194 - MPC83XX_SPI_RX_BUF(u16) 195 - MPC83XX_SPI_RX_BUF(u32) 196 - MPC83XX_SPI_TX_BUF(u8) 197 - MPC83XX_SPI_TX_BUF(u16) 198 - MPC83XX_SPI_TX_BUF(u32) 199 - 200 - static void mpc8xxx_spi_change_mode(struct spi_device *spi) 109 + static void fsl_spi_change_mode(struct spi_device *spi) 201 110 { 202 111 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); 203 112 struct spi_mpc8xxx_cs *cs = spi->controller_state; 204 - __be32 __iomem *mode = &mspi->base->mode; 113 + struct fsl_spi_reg *reg_base = mspi->reg_base; 114 + __be32 __iomem *mode = &reg_base->mode; 205 115 unsigned long flags; 206 116 207 117 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) ··· 136 238 local_irq_restore(flags); 137 239 } 138 240 139 - static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) 241 + static void fsl_spi_chipselect(struct spi_device *spi, int value) 140 242 { 141 243 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 142 244 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; ··· 154 256 mpc8xxx_spi->get_rx = cs->get_rx; 155 257 mpc8xxx_spi->get_tx = cs->get_tx; 156 258 157 - mpc8xxx_spi_change_mode(spi); 259 + fsl_spi_change_mode(spi); 158 260 159 261 if (pdata->cs_control) 160 262 pdata->cs_control(spi, pol); 161 263 } 162 264 } 163 265 164 - static int 165 - mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, 166 - struct spi_device *spi, 167 - struct mpc8xxx_spi *mpc8xxx_spi, 168 - int bits_per_word) 266 + static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, 267 + struct spi_device *spi, 268 + struct mpc8xxx_spi *mpc8xxx_spi, 269 + int bits_per_word) 169 270 { 170 271 cs->rx_shift = 0; 171 272 cs->tx_shift = 0; ··· 204 307 return bits_per_word; 205 308 } 206 309 207 - static int 208 - mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, 209 - struct spi_device *spi, 210 - int bits_per_word) 310 + static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, 311 + struct spi_device *spi, 312 + int bits_per_word) 211 313 { 212 314 /* QE uses Little Endian for words > 8 213 315 * so transform all words > 8 into 8 bits ··· 222 326 return bits_per_word; 223 327 } 224 328 225 - static 226 - int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 329 + static int fsl_spi_setup_transfer(struct spi_device *spi, 330 + struct spi_transfer *t) 227 331 { 228 332 struct mpc8xxx_spi *mpc8xxx_spi; 229 - int bits_per_word; 333 + int bits_per_word = 0; 230 334 u8 pm; 231 - u32 hz; 335 + u32 hz = 0; 232 336 struct spi_mpc8xxx_cs *cs = spi->controller_state; 233 337 234 338 mpc8xxx_spi = spi_master_get_devdata(spi->master); ··· 236 340 if (t) { 237 341 bits_per_word = t->bits_per_word; 238 342 hz = t->speed_hz; 239 - } else { 240 - bits_per_word = 0; 241 - hz = 0; 242 343 } 243 344 244 345 /* spi_transfer level calls that work per-word */ ··· 281 388 hz, mpc8xxx_spi->spibrg / 1024); 282 389 if (pm > 16) 283 390 pm = 16; 284 - } else 391 + } else { 285 392 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; 393 + } 286 394 if (pm) 287 395 pm--; 288 396 289 397 cs->hw_mode |= SPMODE_PM(pm); 290 398 291 - mpc8xxx_spi_change_mode(spi); 399 + fsl_spi_change_mode(spi); 292 400 return 0; 293 401 } 294 402 295 - static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 403 + static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) 296 404 { 297 405 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; 298 406 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; 299 407 unsigned int xfer_len = min(mspi->count, SPI_MRBLR); 300 408 unsigned int xfer_ofs; 409 + struct fsl_spi_reg *reg_base = mspi->reg_base; 301 410 302 411 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 303 412 ··· 319 424 BD_SC_LAST); 320 425 321 426 /* start transfer */ 322 - mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR); 427 + mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR); 323 428 } 324 429 325 - static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, 430 + static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, 326 431 struct spi_transfer *t, bool is_dma_mapped) 327 432 { 328 433 struct device *dev = mspi->dev; 434 + struct fsl_spi_reg *reg_base = mspi->reg_base; 329 435 330 436 if (is_dma_mapped) { 331 437 mspi->map_tx_dma = 0; ··· 371 475 } 372 476 373 477 /* enable rx ints */ 374 - mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB); 478 + mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB); 375 479 376 480 mspi->xfer_in_progress = t; 377 481 mspi->count = t->len; 378 482 379 483 /* start CPM transfers */ 380 - mpc8xxx_spi_cpm_bufs_start(mspi); 484 + fsl_spi_cpm_bufs_start(mspi); 381 485 382 486 return 0; 383 487 ··· 387 491 return -ENOMEM; 388 492 } 389 493 390 - static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 494 + static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) 391 495 { 392 496 struct device *dev = mspi->dev; 393 497 struct spi_transfer *t = mspi->xfer_in_progress; ··· 399 503 mspi->xfer_in_progress = NULL; 400 504 } 401 505 402 - static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi, 506 + static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, 403 507 struct spi_transfer *t, unsigned int len) 404 508 { 405 509 u32 word; 510 + struct fsl_spi_reg *reg_base = mspi->reg_base; 406 511 407 512 mspi->count = len; 408 513 409 514 /* enable rx ints */ 410 - mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE); 515 + mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE); 411 516 412 517 /* transmit word */ 413 518 word = mspi->get_tx(mspi); 414 - mpc8xxx_spi_write_reg(&mspi->base->transmit, word); 519 + mpc8xxx_spi_write_reg(&reg_base->transmit, word); 415 520 416 521 return 0; 417 522 } 418 523 419 - static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, 524 + static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, 420 525 bool is_dma_mapped) 421 526 { 422 527 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 528 + struct fsl_spi_reg *reg_base; 423 529 unsigned int len = t->len; 424 530 u8 bits_per_word; 425 531 int ret; 426 532 533 + reg_base = mpc8xxx_spi->reg_base; 427 534 bits_per_word = spi->bits_per_word; 428 535 if (t->bits_per_word) 429 536 bits_per_word = t->bits_per_word; ··· 450 551 INIT_COMPLETION(mpc8xxx_spi->done); 451 552 452 553 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 453 - ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); 554 + ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); 454 555 else 455 - ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len); 556 + ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); 456 557 if (ret) 457 558 return ret; 458 559 459 560 wait_for_completion(&mpc8xxx_spi->done); 460 561 461 562 /* disable rx ints */ 462 - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 563 + mpc8xxx_spi_write_reg(&reg_base->mask, 0); 463 564 464 565 if (mpc8xxx_spi->flags & SPI_CPM_MODE) 465 - mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi); 566 + fsl_spi_cpm_bufs_complete(mpc8xxx_spi); 466 567 467 568 return mpc8xxx_spi->count; 468 569 } 469 570 470 - static void mpc8xxx_spi_do_one_msg(struct spi_message *m) 571 + static void fsl_spi_do_one_msg(struct spi_message *m) 471 572 { 472 573 struct spi_device *spi = m->spi; 473 574 struct spi_transfer *t; ··· 483 584 status = -EINVAL; 484 585 485 586 if (cs_change) 486 - status = mpc8xxx_spi_setup_transfer(spi, t); 587 + status = fsl_spi_setup_transfer(spi, t); 487 588 if (status < 0) 488 589 break; 489 590 } 490 591 491 592 if (cs_change) { 492 - mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE); 593 + fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); 493 594 ndelay(nsecs); 494 595 } 495 596 cs_change = t->cs_change; 496 597 if (t->len) 497 - status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped); 598 + status = fsl_spi_bufs(spi, t, m->is_dma_mapped); 498 599 if (status) { 499 600 status = -EMSGSIZE; 500 601 break; ··· 506 607 507 608 if (cs_change) { 508 609 ndelay(nsecs); 509 - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); 610 + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); 510 611 ndelay(nsecs); 511 612 } 512 613 } ··· 516 617 517 618 if (status || !cs_change) { 518 619 ndelay(nsecs); 519 - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); 620 + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); 520 621 } 521 622 522 - mpc8xxx_spi_setup_transfer(spi, NULL); 623 + fsl_spi_setup_transfer(spi, NULL); 523 624 } 524 625 525 - static void mpc8xxx_spi_work(struct work_struct *work) 526 - { 527 - struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, 528 - work); 529 - 530 - spin_lock_irq(&mpc8xxx_spi->lock); 531 - while (!list_empty(&mpc8xxx_spi->queue)) { 532 - struct spi_message *m = container_of(mpc8xxx_spi->queue.next, 533 - struct spi_message, queue); 534 - 535 - list_del_init(&m->queue); 536 - spin_unlock_irq(&mpc8xxx_spi->lock); 537 - 538 - mpc8xxx_spi_do_one_msg(m); 539 - 540 - spin_lock_irq(&mpc8xxx_spi->lock); 541 - } 542 - spin_unlock_irq(&mpc8xxx_spi->lock); 543 - } 544 - 545 - static int mpc8xxx_spi_setup(struct spi_device *spi) 626 + static int fsl_spi_setup(struct spi_device *spi) 546 627 { 547 628 struct mpc8xxx_spi *mpc8xxx_spi; 629 + struct fsl_spi_reg *reg_base; 548 630 int retval; 549 631 u32 hw_mode; 550 632 struct spi_mpc8xxx_cs *cs = spi->controller_state; ··· 541 661 } 542 662 mpc8xxx_spi = spi_master_get_devdata(spi->master); 543 663 664 + reg_base = mpc8xxx_spi->reg_base; 665 + 544 666 hw_mode = cs->hw_mode; /* Save original settings */ 545 - cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); 667 + cs->hw_mode = mpc8xxx_spi_read_reg(&reg_base->mode); 546 668 /* mask out bits we are going to set */ 547 669 cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH 548 670 | SPMODE_REV | SPMODE_LOOP); ··· 558 676 if (spi->mode & SPI_LOOP) 559 677 cs->hw_mode |= SPMODE_LOOP; 560 678 561 - retval = mpc8xxx_spi_setup_transfer(spi, NULL); 679 + retval = fsl_spi_setup_transfer(spi, NULL); 562 680 if (retval < 0) { 563 681 cs->hw_mode = hw_mode; /* Restore settings */ 564 682 return retval; ··· 566 684 return 0; 567 685 } 568 686 569 - static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 687 + static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) 570 688 { 571 689 u16 len; 690 + struct fsl_spi_reg *reg_base = mspi->reg_base; 572 691 573 692 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, 574 693 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); ··· 581 698 } 582 699 583 700 /* Clear the events */ 584 - mpc8xxx_spi_write_reg(&mspi->base->event, events); 701 + mpc8xxx_spi_write_reg(&reg_base->event, events); 585 702 586 703 mspi->count -= len; 587 704 if (mspi->count) 588 - mpc8xxx_spi_cpm_bufs_start(mspi); 705 + fsl_spi_cpm_bufs_start(mspi); 589 706 else 590 707 complete(&mspi->done); 591 708 } 592 709 593 - static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) 710 + static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) 594 711 { 712 + struct fsl_spi_reg *reg_base = mspi->reg_base; 713 + 595 714 /* We need handle RX first */ 596 715 if (events & SPIE_NE) { 597 - u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive); 716 + u32 rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); 598 717 599 718 if (mspi->rx) 600 719 mspi->get_rx(rx_data, mspi); ··· 605 720 if ((events & SPIE_NF) == 0) 606 721 /* spin until TX is done */ 607 722 while (((events = 608 - mpc8xxx_spi_read_reg(&mspi->base->event)) & 723 + mpc8xxx_spi_read_reg(&reg_base->event)) & 609 724 SPIE_NF) == 0) 610 725 cpu_relax(); 611 726 612 727 /* Clear the events */ 613 - mpc8xxx_spi_write_reg(&mspi->base->event, events); 728 + mpc8xxx_spi_write_reg(&reg_base->event, events); 614 729 615 730 mspi->count -= 1; 616 731 if (mspi->count) { 617 732 u32 word = mspi->get_tx(mspi); 618 733 619 - mpc8xxx_spi_write_reg(&mspi->base->transmit, word); 734 + mpc8xxx_spi_write_reg(&reg_base->transmit, word); 620 735 } else { 621 736 complete(&mspi->done); 622 737 } 623 738 } 624 739 625 - static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) 740 + static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) 626 741 { 627 742 struct mpc8xxx_spi *mspi = context_data; 628 743 irqreturn_t ret = IRQ_NONE; 629 744 u32 events; 745 + struct fsl_spi_reg *reg_base = mspi->reg_base; 630 746 631 747 /* Get interrupt events(tx/rx) */ 632 - events = mpc8xxx_spi_read_reg(&mspi->base->event); 748 + events = mpc8xxx_spi_read_reg(&reg_base->event); 633 749 if (events) 634 750 ret = IRQ_HANDLED; 635 751 636 752 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); 637 753 638 754 if (mspi->flags & SPI_CPM_MODE) 639 - mpc8xxx_spi_cpm_irq(mspi, events); 755 + fsl_spi_cpm_irq(mspi, events); 640 756 else 641 - mpc8xxx_spi_cpu_irq(mspi, events); 757 + fsl_spi_cpu_irq(mspi, events); 642 758 643 759 return ret; 644 760 } 645 761 646 - static int mpc8xxx_spi_transfer(struct spi_device *spi, 647 - struct spi_message *m) 762 + static void *fsl_spi_alloc_dummy_rx(void) 648 763 { 649 - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 650 - unsigned long flags; 764 + mutex_lock(&fsl_dummy_rx_lock); 651 765 652 - m->actual_length = 0; 653 - m->status = -EINPROGRESS; 766 + if (!fsl_dummy_rx) 767 + fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); 768 + if (fsl_dummy_rx) 769 + fsl_dummy_rx_refcnt++; 654 770 655 - spin_lock_irqsave(&mpc8xxx_spi->lock, flags); 656 - list_add_tail(&m->queue, &mpc8xxx_spi->queue); 657 - queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); 658 - spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); 771 + mutex_unlock(&fsl_dummy_rx_lock); 659 772 660 - return 0; 773 + return fsl_dummy_rx; 661 774 } 662 775 663 - 664 - static void mpc8xxx_spi_cleanup(struct spi_device *spi) 776 + static void fsl_spi_free_dummy_rx(void) 665 777 { 666 - kfree(spi->controller_state); 667 - } 778 + mutex_lock(&fsl_dummy_rx_lock); 668 779 669 - static void *mpc8xxx_spi_alloc_dummy_rx(void) 670 - { 671 - mutex_lock(&mpc8xxx_dummy_rx_lock); 672 - 673 - if (!mpc8xxx_dummy_rx) 674 - mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); 675 - if (mpc8xxx_dummy_rx) 676 - mpc8xxx_dummy_rx_refcnt++; 677 - 678 - mutex_unlock(&mpc8xxx_dummy_rx_lock); 679 - 680 - return mpc8xxx_dummy_rx; 681 - } 682 - 683 - static void mpc8xxx_spi_free_dummy_rx(void) 684 - { 685 - mutex_lock(&mpc8xxx_dummy_rx_lock); 686 - 687 - switch (mpc8xxx_dummy_rx_refcnt) { 780 + switch (fsl_dummy_rx_refcnt) { 688 781 case 0: 689 782 WARN_ON(1); 690 783 break; 691 784 case 1: 692 - kfree(mpc8xxx_dummy_rx); 693 - mpc8xxx_dummy_rx = NULL; 785 + kfree(fsl_dummy_rx); 786 + fsl_dummy_rx = NULL; 694 787 /* fall through */ 695 788 default: 696 - mpc8xxx_dummy_rx_refcnt--; 789 + fsl_dummy_rx_refcnt--; 697 790 break; 698 791 } 699 792 700 - mutex_unlock(&mpc8xxx_dummy_rx_lock); 793 + mutex_unlock(&fsl_dummy_rx_lock); 701 794 } 702 795 703 - static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) 796 + static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) 704 797 { 705 798 struct device *dev = mspi->dev; 706 799 struct device_node *np = dev->of_node; ··· 732 869 return pram_ofs; 733 870 } 734 871 735 - static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) 872 + static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) 736 873 { 737 874 struct device *dev = mspi->dev; 738 875 struct device_node *np = dev->of_node; ··· 744 881 if (!(mspi->flags & SPI_CPM_MODE)) 745 882 return 0; 746 883 747 - if (!mpc8xxx_spi_alloc_dummy_rx()) 884 + if (!fsl_spi_alloc_dummy_rx()) 748 885 return -ENOMEM; 749 886 750 887 if (mspi->flags & SPI_QE) { ··· 765 902 } 766 903 } 767 904 768 - pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi); 905 + pram_ofs = fsl_spi_cpm_get_pram(mspi); 769 906 if (IS_ERR_VALUE(pram_ofs)) { 770 907 dev_err(dev, "can't allocate spi parameter ram\n"); 771 908 goto err_pram; ··· 785 922 goto err_dummy_tx; 786 923 } 787 924 788 - mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR, 925 + mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, 789 926 DMA_FROM_DEVICE); 790 927 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { 791 928 dev_err(dev, "unable to map dummy rx buffer\n"); ··· 823 960 err_bds: 824 961 cpm_muram_free(pram_ofs); 825 962 err_pram: 826 - mpc8xxx_spi_free_dummy_rx(); 963 + fsl_spi_free_dummy_rx(); 827 964 return -ENOMEM; 828 965 } 829 966 830 - static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) 967 + static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) 831 968 { 832 969 struct device *dev = mspi->dev; 833 970 ··· 835 972 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); 836 973 cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); 837 974 cpm_muram_free(cpm_muram_offset(mspi->pram)); 838 - mpc8xxx_spi_free_dummy_rx(); 975 + fsl_spi_free_dummy_rx(); 839 976 } 840 977 841 - static const char *mpc8xxx_spi_strmode(unsigned int flags) 978 + static void fsl_spi_remove(struct mpc8xxx_spi *mspi) 842 979 { 843 - if (flags & SPI_QE_CPU_MODE) { 844 - return "QE CPU"; 845 - } else if (flags & SPI_CPM_MODE) { 846 - if (flags & SPI_QE) 847 - return "QE"; 848 - else if (flags & SPI_CPM2) 849 - return "CPM2"; 850 - else 851 - return "CPM1"; 852 - } 853 - return "CPU"; 980 + iounmap(mspi->reg_base); 981 + fsl_spi_cpm_free(mspi); 854 982 } 855 983 856 - static struct spi_master * __devinit 857 - mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) 984 + static struct spi_master * __devinit fsl_spi_probe(struct device *dev, 985 + struct resource *mem, unsigned int irq) 858 986 { 859 987 struct fsl_spi_platform_data *pdata = dev->platform_data; 860 988 struct spi_master *master; 861 989 struct mpc8xxx_spi *mpc8xxx_spi; 990 + struct fsl_spi_reg *reg_base; 862 991 u32 regval; 863 992 int ret = 0; 864 993 ··· 862 1007 863 1008 dev_set_drvdata(dev, master); 864 1009 865 - /* the spi->mode bits understood by this driver: */ 866 - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH 867 - | SPI_LSB_FIRST | SPI_LOOP; 1010 + ret = mpc8xxx_spi_probe(dev, mem, irq); 1011 + if (ret) 1012 + goto err_probe; 868 1013 869 - master->setup = mpc8xxx_spi_setup; 870 - master->transfer = mpc8xxx_spi_transfer; 871 - master->cleanup = mpc8xxx_spi_cleanup; 872 - master->dev.of_node = dev->of_node; 1014 + master->setup = fsl_spi_setup; 873 1015 874 1016 mpc8xxx_spi = spi_master_get_devdata(master); 875 - mpc8xxx_spi->dev = dev; 876 - mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; 877 - mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; 878 - mpc8xxx_spi->flags = pdata->flags; 879 - mpc8xxx_spi->spibrg = pdata->sysclk; 1017 + mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; 1018 + mpc8xxx_spi->spi_remove = fsl_spi_remove; 880 1019 881 - ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi); 1020 + 1021 + ret = fsl_spi_cpm_init(mpc8xxx_spi); 882 1022 if (ret) 883 1023 goto err_cpm_init; 884 1024 885 - mpc8xxx_spi->rx_shift = 0; 886 - mpc8xxx_spi->tx_shift = 0; 887 1025 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { 888 1026 mpc8xxx_spi->rx_shift = 16; 889 1027 mpc8xxx_spi->tx_shift = 24; 890 1028 } 891 1029 892 - init_completion(&mpc8xxx_spi->done); 893 - 894 - mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem)); 895 - if (mpc8xxx_spi->base == NULL) { 1030 + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); 1031 + if (mpc8xxx_spi->reg_base == NULL) { 896 1032 ret = -ENOMEM; 897 1033 goto err_ioremap; 898 1034 } 899 1035 900 - mpc8xxx_spi->irq = irq; 901 - 902 1036 /* Register for SPI Interrupt */ 903 - ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq, 904 - 0, "mpc8xxx_spi", mpc8xxx_spi); 1037 + ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, 1038 + 0, "fsl_spi", mpc8xxx_spi); 905 1039 906 1040 if (ret != 0) 907 - goto unmap_io; 1041 + goto free_irq; 908 1042 909 - master->bus_num = pdata->bus_num; 910 - master->num_chipselect = pdata->max_chipselect; 1043 + reg_base = mpc8xxx_spi->reg_base; 911 1044 912 1045 /* SPI controller initializations */ 913 - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0); 914 - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 915 - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0); 916 - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff); 1046 + mpc8xxx_spi_write_reg(&reg_base->mode, 0); 1047 + mpc8xxx_spi_write_reg(&reg_base->mask, 0); 1048 + mpc8xxx_spi_write_reg(&reg_base->command, 0); 1049 + mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); 917 1050 918 1051 /* Enable SPI interface */ 919 1052 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 920 1053 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) 921 1054 regval |= SPMODE_OP; 922 1055 923 - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); 924 - spin_lock_init(&mpc8xxx_spi->lock); 925 - init_completion(&mpc8xxx_spi->done); 926 - INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); 927 - INIT_LIST_HEAD(&mpc8xxx_spi->queue); 928 - 929 - mpc8xxx_spi->workqueue = create_singlethread_workqueue( 930 - dev_name(master->dev.parent)); 931 - if (mpc8xxx_spi->workqueue == NULL) { 932 - ret = -EBUSY; 933 - goto free_irq; 934 - } 1056 + mpc8xxx_spi_write_reg(&reg_base->mode, regval); 935 1057 936 1058 ret = spi_register_master(master); 937 1059 if (ret < 0) 938 1060 goto unreg_master; 939 1061 940 - dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base, 1062 + dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, 941 1063 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); 942 1064 943 1065 return master; 944 1066 945 1067 unreg_master: 946 - destroy_workqueue(mpc8xxx_spi->workqueue); 947 - free_irq: 948 1068 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 949 - unmap_io: 950 - iounmap(mpc8xxx_spi->base); 1069 + free_irq: 1070 + iounmap(mpc8xxx_spi->reg_base); 951 1071 err_ioremap: 952 - mpc8xxx_spi_cpm_free(mpc8xxx_spi); 1072 + fsl_spi_cpm_free(mpc8xxx_spi); 953 1073 err_cpm_init: 1074 + err_probe: 954 1075 spi_master_put(master); 955 1076 err: 956 1077 return ERR_PTR(ret); 957 1078 } 958 1079 959 - static int __devexit mpc8xxx_spi_remove(struct device *dev) 960 - { 961 - struct mpc8xxx_spi *mpc8xxx_spi; 962 - struct spi_master *master; 963 - 964 - master = dev_get_drvdata(dev); 965 - mpc8xxx_spi = spi_master_get_devdata(master); 966 - 967 - flush_workqueue(mpc8xxx_spi->workqueue); 968 - destroy_workqueue(mpc8xxx_spi->workqueue); 969 - spi_unregister_master(master); 970 - 971 - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 972 - iounmap(mpc8xxx_spi->base); 973 - mpc8xxx_spi_cpm_free(mpc8xxx_spi); 974 - 975 - return 0; 976 - } 977 - 978 - struct mpc8xxx_spi_probe_info { 979 - struct fsl_spi_platform_data pdata; 980 - int *gpios; 981 - bool *alow_flags; 982 - }; 983 - 984 - static struct mpc8xxx_spi_probe_info * 985 - to_of_pinfo(struct fsl_spi_platform_data *pdata) 986 - { 987 - return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); 988 - } 989 - 990 - static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) 1080 + static void fsl_spi_cs_control(struct spi_device *spi, bool on) 991 1081 { 992 1082 struct device *dev = spi->dev.parent; 993 1083 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); ··· 943 1143 gpio_set_value(gpio, on ^ alow); 944 1144 } 945 1145 946 - static int of_mpc8xxx_spi_get_chipselects(struct device *dev) 1146 + static int of_fsl_spi_get_chipselects(struct device *dev) 947 1147 { 948 1148 struct device_node *np = dev->of_node; 949 1149 struct fsl_spi_platform_data *pdata = dev->platform_data; ··· 1004 1204 } 1005 1205 1006 1206 pdata->max_chipselect = ngpios; 1007 - pdata->cs_control = mpc8xxx_spi_cs_control; 1207 + pdata->cs_control = fsl_spi_cs_control; 1008 1208 1009 1209 return 0; 1010 1210 ··· 1023 1223 return ret; 1024 1224 } 1025 1225 1026 - static int of_mpc8xxx_spi_free_chipselects(struct device *dev) 1226 + static int of_fsl_spi_free_chipselects(struct device *dev) 1027 1227 { 1028 1228 struct fsl_spi_platform_data *pdata = dev->platform_data; 1029 1229 struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); ··· 1042 1242 return 0; 1043 1243 } 1044 1244 1045 - static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, 1046 - const struct of_device_id *ofid) 1245 + static int __devinit of_fsl_spi_probe(struct platform_device *ofdev, 1246 + const struct of_device_id *ofid) 1047 1247 { 1048 1248 struct device *dev = &ofdev->dev; 1049 1249 struct device_node *np = ofdev->dev.of_node; 1050 - struct mpc8xxx_spi_probe_info *pinfo; 1051 - struct fsl_spi_platform_data *pdata; 1052 1250 struct spi_master *master; 1053 1251 struct resource mem; 1054 1252 struct resource irq; 1055 - const void *prop; 1056 1253 int ret = -ENOMEM; 1057 1254 1058 - pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); 1059 - if (!pinfo) 1060 - return -ENOMEM; 1255 + ret = of_mpc8xxx_spi_probe(ofdev, ofid); 1256 + if (ret) 1257 + return ret; 1061 1258 1062 - pdata = &pinfo->pdata; 1063 - dev->platform_data = pdata; 1064 - 1065 - /* Allocate bus num dynamically. */ 1066 - pdata->bus_num = -1; 1067 - 1068 - /* SPI controller is either clocked from QE or SoC clock. */ 1069 - pdata->sysclk = get_brgfreq(); 1070 - if (pdata->sysclk == -1) { 1071 - pdata->sysclk = fsl_get_sys_freq(); 1072 - if (pdata->sysclk == -1) { 1073 - ret = -ENODEV; 1074 - goto err_clk; 1075 - } 1076 - } 1077 - 1078 - prop = of_get_property(np, "mode", NULL); 1079 - if (prop && !strcmp(prop, "cpu-qe")) 1080 - pdata->flags = SPI_QE_CPU_MODE; 1081 - else if (prop && !strcmp(prop, "qe")) 1082 - pdata->flags = SPI_CPM_MODE | SPI_QE; 1083 - else if (of_device_is_compatible(np, "fsl,cpm2-spi")) 1084 - pdata->flags = SPI_CPM_MODE | SPI_CPM2; 1085 - else if (of_device_is_compatible(np, "fsl,cpm1-spi")) 1086 - pdata->flags = SPI_CPM_MODE | SPI_CPM1; 1087 - 1088 - ret = of_mpc8xxx_spi_get_chipselects(dev); 1259 + ret = of_fsl_spi_get_chipselects(dev); 1089 1260 if (ret) 1090 1261 goto err; 1091 1262 ··· 1070 1299 goto err; 1071 1300 } 1072 1301 1073 - master = mpc8xxx_spi_probe(dev, &mem, irq.start); 1302 + master = fsl_spi_probe(dev, &mem, irq.start); 1074 1303 if (IS_ERR(master)) { 1075 1304 ret = PTR_ERR(master); 1076 1305 goto err; ··· 1079 1308 return 0; 1080 1309 1081 1310 err: 1082 - of_mpc8xxx_spi_free_chipselects(dev); 1083 - err_clk: 1084 - kfree(pinfo); 1311 + of_fsl_spi_free_chipselects(dev); 1085 1312 return ret; 1086 1313 } 1087 1314 1088 - static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev) 1315 + static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) 1089 1316 { 1090 1317 int ret; 1091 1318 1092 1319 ret = mpc8xxx_spi_remove(&ofdev->dev); 1093 1320 if (ret) 1094 1321 return ret; 1095 - of_mpc8xxx_spi_free_chipselects(&ofdev->dev); 1322 + of_fsl_spi_free_chipselects(&ofdev->dev); 1096 1323 return 0; 1097 1324 } 1098 1325 1099 - static const struct of_device_id of_mpc8xxx_spi_match[] = { 1326 + static const struct of_device_id of_fsl_spi_match[] = { 1100 1327 { .compatible = "fsl,spi" }, 1101 - {}, 1328 + {} 1102 1329 }; 1103 - MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); 1330 + MODULE_DEVICE_TABLE(of, of_fsl_spi_match); 1104 1331 1105 - static struct of_platform_driver of_mpc8xxx_spi_driver = { 1332 + static struct of_platform_driver of_fsl_spi_driver = { 1106 1333 .driver = { 1107 - .name = "mpc8xxx_spi", 1334 + .name = "fsl_spi", 1108 1335 .owner = THIS_MODULE, 1109 - .of_match_table = of_mpc8xxx_spi_match, 1336 + .of_match_table = of_fsl_spi_match, 1110 1337 }, 1111 - .probe = of_mpc8xxx_spi_probe, 1112 - .remove = __devexit_p(of_mpc8xxx_spi_remove), 1338 + .probe = of_fsl_spi_probe, 1339 + .remove = __devexit_p(of_fsl_spi_remove), 1113 1340 }; 1114 1341 1115 1342 #ifdef CONFIG_MPC832x_RDB 1116 1343 /* 1117 - * XXX XXX XXX 1344 + * XXX XXX XXX 1118 1345 * This is "legacy" platform driver, was used by the MPC8323E-RDB boards 1119 1346 * only. The driver should go away soon, since newer MPC8323E-RDB's device 1120 1347 * tree can work with OpenFirmware driver. But for now we support old trees ··· 1135 1366 if (irq <= 0) 1136 1367 return -EINVAL; 1137 1368 1138 - master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); 1369 + master = fsl_spi_probe(&pdev->dev, mem, irq); 1139 1370 if (IS_ERR(master)) 1140 1371 return PTR_ERR(master); 1141 1372 return 0; ··· 1174 1405 static void __exit legacy_driver_unregister(void) {} 1175 1406 #endif /* CONFIG_MPC832x_RDB */ 1176 1407 1177 - static int __init mpc8xxx_spi_init(void) 1408 + static int __init fsl_spi_init(void) 1178 1409 { 1179 1410 legacy_driver_register(); 1180 - return of_register_platform_driver(&of_mpc8xxx_spi_driver); 1411 + return of_register_platform_driver(&of_fsl_spi_driver); 1181 1412 } 1413 + module_init(fsl_spi_init); 1182 1414 1183 - static void __exit mpc8xxx_spi_exit(void) 1415 + static void __exit fsl_spi_exit(void) 1184 1416 { 1185 - of_unregister_platform_driver(&of_mpc8xxx_spi_driver); 1417 + of_unregister_platform_driver(&of_fsl_spi_driver); 1186 1418 legacy_driver_unregister(); 1187 1419 } 1188 - 1189 - module_init(mpc8xxx_spi_init); 1190 - module_exit(mpc8xxx_spi_exit); 1420 + module_exit(fsl_spi_exit); 1191 1421 1192 1422 MODULE_AUTHOR("Kumar Gala"); 1193 - MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver"); 1423 + MODULE_DESCRIPTION("Simple Freescale SPI Driver"); 1194 1424 MODULE_LICENSE("GPL");
+102 -50
drivers/spi/spi_s3c64xx.c
··· 261 261 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 262 262 if (dma_mode) { 263 263 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 264 - s3c2410_dma_config(sdd->tx_dmach, 1); 264 + s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); 265 265 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, 266 266 xfer->tx_dma, xfer->len); 267 267 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); 268 268 } else { 269 - unsigned char *buf = (unsigned char *) xfer->tx_buf; 270 - int i = 0; 271 - while (i < xfer->len) 272 - writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA); 269 + switch (sdd->cur_bpw) { 270 + case 32: 271 + iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, 272 + xfer->tx_buf, xfer->len / 4); 273 + break; 274 + case 16: 275 + iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, 276 + xfer->tx_buf, xfer->len / 2); 277 + break; 278 + default: 279 + iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, 280 + xfer->tx_buf, xfer->len); 281 + break; 282 + } 273 283 } 274 284 } 275 285 ··· 296 286 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 297 287 | S3C64XX_SPI_PACKET_CNT_EN, 298 288 regs + S3C64XX_SPI_PACKET_CNT); 299 - s3c2410_dma_config(sdd->rx_dmach, 1); 289 + s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); 300 290 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, 301 291 xfer->rx_dma, xfer->len); 302 292 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); ··· 376 366 return -EIO; 377 367 } 378 368 } else { 379 - unsigned char *buf; 380 - int i; 381 - 382 369 /* If it was only Tx */ 383 370 if (xfer->rx_buf == NULL) { 384 371 sdd->state &= ~TXBUSY; 385 372 return 0; 386 373 } 387 374 388 - i = 0; 389 - buf = xfer->rx_buf; 390 - while (i < xfer->len) 391 - buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA); 392 - 375 + switch (sdd->cur_bpw) { 376 + case 32: 377 + ioread32_rep(regs + S3C64XX_SPI_RX_DATA, 378 + xfer->rx_buf, xfer->len / 4); 379 + break; 380 + case 16: 381 + ioread16_rep(regs + S3C64XX_SPI_RX_DATA, 382 + xfer->rx_buf, xfer->len / 2); 383 + break; 384 + default: 385 + ioread8_rep(regs + S3C64XX_SPI_RX_DATA, 386 + xfer->rx_buf, xfer->len); 387 + break; 388 + } 393 389 sdd->state &= ~RXBUSY; 394 390 } 395 391 ··· 415 399 416 400 static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) 417 401 { 402 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 418 403 void __iomem *regs = sdd->regs; 419 404 u32 val; 420 405 421 406 /* Disable Clock */ 422 - val = readl(regs + S3C64XX_SPI_CLK_CFG); 423 - val &= ~S3C64XX_SPI_ENCLK_ENABLE; 424 - writel(val, regs + S3C64XX_SPI_CLK_CFG); 407 + if (sci->clk_from_cmu) { 408 + clk_disable(sdd->src_clk); 409 + } else { 410 + val = readl(regs + S3C64XX_SPI_CLK_CFG); 411 + val &= ~S3C64XX_SPI_ENCLK_ENABLE; 412 + writel(val, regs + S3C64XX_SPI_CLK_CFG); 413 + } 425 414 426 415 /* Set Polarity and Phase */ 427 416 val = readl(regs + S3C64XX_SPI_CH_CFG); ··· 450 429 switch (sdd->cur_bpw) { 451 430 case 32: 452 431 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; 432 + val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; 453 433 break; 454 434 case 16: 455 435 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; 436 + val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; 456 437 break; 457 438 default: 458 439 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; 440 + val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; 459 441 break; 460 442 } 461 - val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */ 462 443 463 444 writel(val, regs + S3C64XX_SPI_MODE_CFG); 464 445 465 - /* Configure Clock */ 466 - val = readl(regs + S3C64XX_SPI_CLK_CFG); 467 - val &= ~S3C64XX_SPI_PSR_MASK; 468 - val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 469 - & S3C64XX_SPI_PSR_MASK); 470 - writel(val, regs + S3C64XX_SPI_CLK_CFG); 446 + if (sci->clk_from_cmu) { 447 + /* Configure Clock */ 448 + /* There is half-multiplier before the SPI */ 449 + clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); 450 + /* Enable Clock */ 451 + clk_enable(sdd->src_clk); 452 + } else { 453 + /* Configure Clock */ 454 + val = readl(regs + S3C64XX_SPI_CLK_CFG); 455 + val &= ~S3C64XX_SPI_PSR_MASK; 456 + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) 457 + & S3C64XX_SPI_PSR_MASK); 458 + writel(val, regs + S3C64XX_SPI_CLK_CFG); 471 459 472 - /* Enable Clock */ 473 - val = readl(regs + S3C64XX_SPI_CLK_CFG); 474 - val |= S3C64XX_SPI_ENCLK_ENABLE; 475 - writel(val, regs + S3C64XX_SPI_CLK_CFG); 460 + /* Enable Clock */ 461 + val = readl(regs + S3C64XX_SPI_CLK_CFG); 462 + val |= S3C64XX_SPI_ENCLK_ENABLE; 463 + writel(val, regs + S3C64XX_SPI_CLK_CFG); 464 + } 476 465 } 477 466 478 467 static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, ··· 530 499 static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, 531 500 struct spi_message *msg) 532 501 { 502 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 533 503 struct device *dev = &sdd->pdev->dev; 534 504 struct spi_transfer *xfer; 535 505 ··· 545 513 546 514 /* Map until end or first fail */ 547 515 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 516 + 517 + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) 518 + continue; 548 519 549 520 if (xfer->tx_buf != NULL) { 550 521 xfer->tx_dma = dma_map_single(dev, ··· 580 545 static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, 581 546 struct spi_message *msg) 582 547 { 548 + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 583 549 struct device *dev = &sdd->pdev->dev; 584 550 struct spi_transfer *xfer; 585 551 ··· 588 552 return; 589 553 590 554 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 555 + 556 + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) 557 + continue; 591 558 592 559 if (xfer->rx_buf != NULL 593 560 && xfer->rx_dma != XFER_DMAADDR_INVALID) ··· 646 607 /* Only BPW and Speed may change across transfers */ 647 608 bpw = xfer->bits_per_word ? : spi->bits_per_word; 648 609 speed = xfer->speed_hz ? : spi->max_speed_hz; 610 + 611 + if (xfer->len % (bpw / 8)) { 612 + dev_err(&spi->dev, 613 + "Xfer length(%u) not a multiple of word size(%u)\n", 614 + xfer->len, bpw / 8); 615 + status = -EIO; 616 + goto out; 617 + } 649 618 650 619 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { 651 620 sdd->cur_bpw = bpw; ··· 845 798 struct s3c64xx_spi_driver_data *sdd; 846 799 struct s3c64xx_spi_info *sci; 847 800 struct spi_message *msg; 848 - u32 psr, speed; 849 801 unsigned long flags; 850 802 int err = 0; 851 803 ··· 887 841 } 888 842 889 843 /* Check if we can provide the requested rate */ 890 - speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ 844 + if (!sci->clk_from_cmu) { 845 + u32 psr, speed; 891 846 892 - if (spi->max_speed_hz > speed) 893 - spi->max_speed_hz = speed; 847 + /* Max possible */ 848 + speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); 894 849 895 - psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 896 - psr &= S3C64XX_SPI_PSR_MASK; 897 - if (psr == S3C64XX_SPI_PSR_MASK) 898 - psr--; 850 + if (spi->max_speed_hz > speed) 851 + spi->max_speed_hz = speed; 899 852 900 - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 901 - if (spi->max_speed_hz < speed) { 902 - if (psr+1 < S3C64XX_SPI_PSR_MASK) { 903 - psr++; 904 - } else { 905 - err = -EINVAL; 906 - goto setup_exit; 853 + psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; 854 + psr &= S3C64XX_SPI_PSR_MASK; 855 + if (psr == S3C64XX_SPI_PSR_MASK) 856 + psr--; 857 + 858 + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 859 + if (spi->max_speed_hz < speed) { 860 + if (psr+1 < S3C64XX_SPI_PSR_MASK) { 861 + psr++; 862 + } else { 863 + err = -EINVAL; 864 + goto setup_exit; 865 + } 907 866 } 908 - } 909 867 910 - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 911 - if (spi->max_speed_hz >= speed) 912 - spi->max_speed_hz = speed; 913 - else 914 - err = -EINVAL; 868 + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); 869 + if (spi->max_speed_hz >= speed) 870 + spi->max_speed_hz = speed; 871 + else 872 + err = -EINVAL; 873 + } 915 874 916 875 setup_exit: 917 876 ··· 939 888 /* Disable Interrupts - we use Polling if not DMA mode */ 940 889 writel(0, regs + S3C64XX_SPI_INT_EN); 941 890 942 - writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 891 + if (!sci->clk_from_cmu) 892 + writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, 943 893 regs + S3C64XX_SPI_CLK_CFG); 944 894 writel(0, regs + S3C64XX_SPI_MODE_CFG); 945 895 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+1303
drivers/spi/spi_topcliff_pch.c
··· 1 + /* 2 + * SPI bus driver for the Topcliff PCH used by Intel SoCs 3 + * 4 + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; version 2 of the License. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. 18 + */ 19 + 20 + #include <linux/delay.h> 21 + #include <linux/pci.h> 22 + #include <linux/wait.h> 23 + #include <linux/spi/spi.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/sched.h> 26 + #include <linux/spi/spidev.h> 27 + #include <linux/module.h> 28 + #include <linux/device.h> 29 + 30 + /* Register offsets */ 31 + #define PCH_SPCR 0x00 /* SPI control register */ 32 + #define PCH_SPBRR 0x04 /* SPI baud rate register */ 33 + #define PCH_SPSR 0x08 /* SPI status register */ 34 + #define PCH_SPDWR 0x0C /* SPI write data register */ 35 + #define PCH_SPDRR 0x10 /* SPI read data register */ 36 + #define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ 37 + #define PCH_SRST 0x1C /* SPI reset register */ 38 + 39 + #define PCH_SPSR_TFD 0x000007C0 40 + #define PCH_SPSR_RFD 0x0000F800 41 + 42 + #define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) 43 + #define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) 44 + 45 + #define PCH_RX_THOLD 7 46 + #define PCH_RX_THOLD_MAX 15 47 + 48 + #define PCH_MAX_BAUDRATE 5000000 49 + #define PCH_MAX_FIFO_DEPTH 16 50 + 51 + #define STATUS_RUNNING 1 52 + #define STATUS_EXITING 2 53 + #define PCH_SLEEP_TIME 10 54 + 55 + #define PCH_ADDRESS_SIZE 0x20 56 + 57 + #define SSN_LOW 0x02U 58 + #define SSN_NO_CONTROL 0x00U 59 + #define PCH_MAX_CS 0xFF 60 + #define PCI_DEVICE_ID_GE_SPI 0x8816 61 + 62 + #define SPCR_SPE_BIT (1 << 0) 63 + #define SPCR_MSTR_BIT (1 << 1) 64 + #define SPCR_LSBF_BIT (1 << 4) 65 + #define SPCR_CPHA_BIT (1 << 5) 66 + #define SPCR_CPOL_BIT (1 << 6) 67 + #define SPCR_TFIE_BIT (1 << 8) 68 + #define SPCR_RFIE_BIT (1 << 9) 69 + #define SPCR_FIE_BIT (1 << 10) 70 + #define SPCR_ORIE_BIT (1 << 11) 71 + #define SPCR_MDFIE_BIT (1 << 12) 72 + #define SPCR_FICLR_BIT (1 << 24) 73 + #define SPSR_TFI_BIT (1 << 0) 74 + #define SPSR_RFI_BIT (1 << 1) 75 + #define SPSR_FI_BIT (1 << 2) 76 + #define SPBRR_SIZE_BIT (1 << 10) 77 + 78 + #define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) 79 + 80 + #define SPCR_RFIC_FIELD 20 81 + #define SPCR_TFIC_FIELD 16 82 + 83 + #define SPSR_INT_BITS 0x1F 84 + #define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) 85 + #define MASK_RFIC_SPCR_BITS (~(0xf << 20)) 86 + #define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) 87 + 88 + #define PCH_CLOCK_HZ 50000000 89 + #define PCH_MAX_SPBR 1023 90 + 91 + 92 + /** 93 + * struct pch_spi_data - Holds the SPI channel specific details 94 + * @io_remap_addr: The remapped PCI base address 95 + * @master: Pointer to the SPI master structure 96 + * @work: Reference to work queue handler 97 + * @wk: Workqueue for carrying out execution of the 98 + * requests 99 + * @wait: Wait queue for waking up upon receiving an 100 + * interrupt. 101 + * @transfer_complete: Status of SPI Transfer 102 + * @bcurrent_msg_processing: Status flag for message processing 103 + * @lock: Lock for protecting this structure 104 + * @queue: SPI Message queue 105 + * @status: Status of the SPI driver 106 + * @bpw_len: Length of data to be transferred in bits per 107 + * word 108 + * @transfer_active: Flag showing active transfer 109 + * @tx_index: Transmit data count; for bookkeeping during 110 + * transfer 111 + * @rx_index: Receive data count; for bookkeeping during 112 + * transfer 113 + * @tx_buff: Buffer for data to be transmitted 114 + * @rx_index: Buffer for Received data 115 + * @n_curnt_chip: The chip number that this SPI driver currently 116 + * operates on 117 + * @current_chip: Reference to the current chip that this SPI 118 + * driver currently operates on 119 + * @current_msg: The current message that this SPI driver is 120 + * handling 121 + * @cur_trans: The current transfer that this SPI driver is 122 + * handling 123 + * @board_dat: Reference to the SPI device data structure 124 + */ 125 + struct pch_spi_data { 126 + void __iomem *io_remap_addr; 127 + struct spi_master *master; 128 + struct work_struct work; 129 + struct workqueue_struct *wk; 130 + wait_queue_head_t wait; 131 + u8 transfer_complete; 132 + u8 bcurrent_msg_processing; 133 + spinlock_t lock; 134 + struct list_head queue; 135 + u8 status; 136 + u32 bpw_len; 137 + u8 transfer_active; 138 + u32 tx_index; 139 + u32 rx_index; 140 + u16 *pkt_tx_buff; 141 + u16 *pkt_rx_buff; 142 + u8 n_curnt_chip; 143 + struct spi_device *current_chip; 144 + struct spi_message *current_msg; 145 + struct spi_transfer *cur_trans; 146 + struct pch_spi_board_data *board_dat; 147 + }; 148 + 149 + /** 150 + * struct pch_spi_board_data - Holds the SPI device specific details 151 + * @pdev: Pointer to the PCI device 152 + * @irq_reg_sts: Status of IRQ registration 153 + * @pci_req_sts: Status of pci_request_regions 154 + * @suspend_sts: Status of suspend 155 + * @data: Pointer to SPI channel data structure 156 + */ 157 + struct pch_spi_board_data { 158 + struct pci_dev *pdev; 159 + u8 irq_reg_sts; 160 + u8 pci_req_sts; 161 + u8 suspend_sts; 162 + struct pch_spi_data *data; 163 + }; 164 + 165 + static struct pci_device_id pch_spi_pcidev_id[] = { 166 + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, 167 + {0,} 168 + }; 169 + 170 + /** 171 + * pch_spi_writereg() - Performs register writes 172 + * @master: Pointer to struct spi_master. 173 + * @idx: Register offset. 174 + * @val: Value to be written to register. 175 + */ 176 + static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) 177 + { 178 + struct pch_spi_data *data = spi_master_get_devdata(master); 179 + iowrite32(val, (data->io_remap_addr + idx)); 180 + } 181 + 182 + /** 183 + * pch_spi_readreg() - Performs register reads 184 + * @master: Pointer to struct spi_master. 185 + * @idx: Register offset. 186 + */ 187 + static inline u32 pch_spi_readreg(struct spi_master *master, int idx) 188 + { 189 + struct pch_spi_data *data = spi_master_get_devdata(master); 190 + return ioread32(data->io_remap_addr + idx); 191 + } 192 + 193 + static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, 194 + u32 set, u32 clr) 195 + { 196 + u32 tmp = pch_spi_readreg(master, idx); 197 + tmp = (tmp & ~clr) | set; 198 + pch_spi_writereg(master, idx, tmp); 199 + } 200 + 201 + static void pch_spi_set_master_mode(struct spi_master *master) 202 + { 203 + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); 204 + } 205 + 206 + /** 207 + * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs 208 + * @master: Pointer to struct spi_master. 209 + */ 210 + static void pch_spi_clear_fifo(struct spi_master *master) 211 + { 212 + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); 213 + pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); 214 + } 215 + 216 + static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, 217 + void __iomem *io_remap_addr) 218 + { 219 + u32 n_read, tx_index, rx_index, bpw_len; 220 + u16 *pkt_rx_buffer, *pkt_tx_buff; 221 + int read_cnt; 222 + u32 reg_spcr_val; 223 + void __iomem *spsr; 224 + void __iomem *spdrr; 225 + void __iomem *spdwr; 226 + 227 + spsr = io_remap_addr + PCH_SPSR; 228 + iowrite32(reg_spsr_val, spsr); 229 + 230 + if (data->transfer_active) { 231 + rx_index = data->rx_index; 232 + tx_index = data->tx_index; 233 + bpw_len = data->bpw_len; 234 + pkt_rx_buffer = data->pkt_rx_buff; 235 + pkt_tx_buff = data->pkt_tx_buff; 236 + 237 + spdrr = io_remap_addr + PCH_SPDRR; 238 + spdwr = io_remap_addr + PCH_SPDWR; 239 + 240 + n_read = PCH_READABLE(reg_spsr_val); 241 + 242 + for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { 243 + pkt_rx_buffer[rx_index++] = ioread32(spdrr); 244 + if (tx_index < bpw_len) 245 + iowrite32(pkt_tx_buff[tx_index++], spdwr); 246 + } 247 + 248 + /* disable RFI if not needed */ 249 + if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { 250 + reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); 251 + reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ 252 + 253 + /* reset rx threshold */ 254 + reg_spcr_val &= MASK_RFIC_SPCR_BITS; 255 + reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); 256 + iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), 257 + (io_remap_addr + PCH_SPCR)); 258 + } 259 + 260 + /* update counts */ 261 + data->tx_index = tx_index; 262 + data->rx_index = rx_index; 263 + 264 + } 265 + 266 + /* if transfer complete interrupt */ 267 + if (reg_spsr_val & SPSR_FI_BIT) { 268 + /* disable FI & RFI interrupts */ 269 + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 270 + SPCR_FIE_BIT | SPCR_TFIE_BIT); 271 + 272 + /* transfer is completed;inform pch_spi_process_messages */ 273 + data->transfer_complete = true; 274 + wake_up(&data->wait); 275 + } 276 + } 277 + 278 + /** 279 + * pch_spi_handler() - Interrupt handler 280 + * @irq: The interrupt number. 281 + * @dev_id: Pointer to struct pch_spi_board_data. 282 + */ 283 + static irqreturn_t pch_spi_handler(int irq, void *dev_id) 284 + { 285 + u32 reg_spsr_val; 286 + struct pch_spi_data *data; 287 + void __iomem *spsr; 288 + void __iomem *io_remap_addr; 289 + irqreturn_t ret = IRQ_NONE; 290 + struct pch_spi_board_data *board_dat = dev_id; 291 + 292 + if (board_dat->suspend_sts) { 293 + dev_dbg(&board_dat->pdev->dev, 294 + "%s returning due to suspend\n", __func__); 295 + return IRQ_NONE; 296 + } 297 + 298 + data = board_dat->data; 299 + io_remap_addr = data->io_remap_addr; 300 + spsr = io_remap_addr + PCH_SPSR; 301 + 302 + reg_spsr_val = ioread32(spsr); 303 + 304 + /* Check if the interrupt is for SPI device */ 305 + if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { 306 + pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); 307 + ret = IRQ_HANDLED; 308 + } 309 + 310 + dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", 311 + __func__, ret); 312 + 313 + return ret; 314 + } 315 + 316 + /** 317 + * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR 318 + * @master: Pointer to struct spi_master. 319 + * @speed_hz: Baud rate. 320 + */ 321 + static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) 322 + { 323 + u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); 324 + 325 + /* if baud rate is less than we can support limit it */ 326 + if (n_spbr > PCH_MAX_SPBR) 327 + n_spbr = PCH_MAX_SPBR; 328 + 329 + pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); 330 + } 331 + 332 + /** 333 + * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR 334 + * @master: Pointer to struct spi_master. 335 + * @bits_per_word: Bits per word for SPI transfer. 336 + */ 337 + static void pch_spi_set_bits_per_word(struct spi_master *master, 338 + u8 bits_per_word) 339 + { 340 + if (bits_per_word == 8) 341 + pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); 342 + else 343 + pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); 344 + } 345 + 346 + /** 347 + * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer 348 + * @spi: Pointer to struct spi_device. 349 + */ 350 + static void pch_spi_setup_transfer(struct spi_device *spi) 351 + { 352 + u32 flags = 0; 353 + 354 + dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", 355 + __func__, pch_spi_readreg(spi->master, PCH_SPBRR), 356 + spi->max_speed_hz); 357 + pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); 358 + 359 + /* set bits per word */ 360 + pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); 361 + 362 + if (!(spi->mode & SPI_LSB_FIRST)) 363 + flags |= SPCR_LSBF_BIT; 364 + if (spi->mode & SPI_CPOL) 365 + flags |= SPCR_CPOL_BIT; 366 + if (spi->mode & SPI_CPHA) 367 + flags |= SPCR_CPHA_BIT; 368 + pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, 369 + (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); 370 + 371 + /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ 372 + pch_spi_clear_fifo(spi->master); 373 + } 374 + 375 + /** 376 + * pch_spi_reset() - Clears SPI registers 377 + * @master: Pointer to struct spi_master. 378 + */ 379 + static void pch_spi_reset(struct spi_master *master) 380 + { 381 + /* write 1 to reset SPI */ 382 + pch_spi_writereg(master, PCH_SRST, 0x1); 383 + 384 + /* clear reset */ 385 + pch_spi_writereg(master, PCH_SRST, 0x0); 386 + } 387 + 388 + static int pch_spi_setup(struct spi_device *pspi) 389 + { 390 + /* check bits per word */ 391 + if (pspi->bits_per_word == 0) { 392 + pspi->bits_per_word = 8; 393 + dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); 394 + } 395 + 396 + if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { 397 + dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); 398 + return -EINVAL; 399 + } 400 + 401 + /* Check baud rate setting */ 402 + /* if baud rate of chip is greater than 403 + max we can support,return error */ 404 + if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) 405 + pspi->max_speed_hz = PCH_MAX_BAUDRATE; 406 + 407 + dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, 408 + (pspi->mode) & (SPI_CPOL | SPI_CPHA)); 409 + 410 + return 0; 411 + } 412 + 413 + static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) 414 + { 415 + 416 + struct spi_transfer *transfer; 417 + struct pch_spi_data *data = spi_master_get_devdata(pspi->master); 418 + int retval; 419 + unsigned long flags; 420 + 421 + /* validate spi message and baud rate */ 422 + if (unlikely(list_empty(&pmsg->transfers) == 1)) { 423 + dev_err(&pspi->dev, "%s list empty\n", __func__); 424 + retval = -EINVAL; 425 + goto err_out; 426 + } 427 + 428 + if (unlikely(pspi->max_speed_hz == 0)) { 429 + dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", 430 + __func__, pspi->max_speed_hz); 431 + retval = -EINVAL; 432 + goto err_out; 433 + } 434 + 435 + dev_dbg(&pspi->dev, "%s Transfer List not empty. " 436 + "Transfer Speed is set.\n", __func__); 437 + 438 + /* validate Tx/Rx buffers and Transfer length */ 439 + list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { 440 + if (!transfer->tx_buf && !transfer->rx_buf) { 441 + dev_err(&pspi->dev, 442 + "%s Tx and Rx buffer NULL\n", __func__); 443 + retval = -EINVAL; 444 + goto err_out; 445 + } 446 + 447 + if (!transfer->len) { 448 + dev_err(&pspi->dev, "%s Transfer length invalid\n", 449 + __func__); 450 + retval = -EINVAL; 451 + goto err_out; 452 + } 453 + 454 + dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" 455 + " valid\n", __func__); 456 + 457 + /* if baud rate hs been specified validate the same */ 458 + if (transfer->speed_hz > PCH_MAX_BAUDRATE) 459 + transfer->speed_hz = PCH_MAX_BAUDRATE; 460 + 461 + /* if bits per word has been specified validate the same */ 462 + if (transfer->bits_per_word) { 463 + if ((transfer->bits_per_word != 8) 464 + && (transfer->bits_per_word != 16)) { 465 + retval = -EINVAL; 466 + dev_err(&pspi->dev, 467 + "%s Invalid bits per word\n", __func__); 468 + goto err_out; 469 + } 470 + } 471 + } 472 + 473 + spin_lock_irqsave(&data->lock, flags); 474 + 475 + /* We won't process any messages if we have been asked to terminate */ 476 + if (data->status == STATUS_EXITING) { 477 + dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); 478 + retval = -ESHUTDOWN; 479 + goto err_return_spinlock; 480 + } 481 + 482 + /* If suspended ,return -EINVAL */ 483 + if (data->board_dat->suspend_sts) { 484 + dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); 485 + retval = -EINVAL; 486 + goto err_return_spinlock; 487 + } 488 + 489 + /* set status of message */ 490 + pmsg->actual_length = 0; 491 + dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); 492 + 493 + pmsg->status = -EINPROGRESS; 494 + 495 + /* add message to queue */ 496 + list_add_tail(&pmsg->queue, &data->queue); 497 + dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); 498 + 499 + /* schedule work queue to run */ 500 + queue_work(data->wk, &data->work); 501 + dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); 502 + 503 + retval = 0; 504 + 505 + err_return_spinlock: 506 + spin_unlock_irqrestore(&data->lock, flags); 507 + err_out: 508 + dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); 509 + return retval; 510 + } 511 + 512 + static inline void pch_spi_select_chip(struct pch_spi_data *data, 513 + struct spi_device *pspi) 514 + { 515 + if (data->current_chip != NULL) { 516 + if (pspi->chip_select != data->n_curnt_chip) { 517 + dev_dbg(&pspi->dev, "%s : different slave\n", __func__); 518 + data->current_chip = NULL; 519 + } 520 + } 521 + 522 + data->current_chip = pspi; 523 + 524 + data->n_curnt_chip = data->current_chip->chip_select; 525 + 526 + dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); 527 + pch_spi_setup_transfer(pspi); 528 + } 529 + 530 + static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, 531 + struct spi_message **ppmsg) 532 + { 533 + int size; 534 + u32 n_writes; 535 + int j; 536 + struct spi_message *pmsg; 537 + const u8 *tx_buf; 538 + const u16 *tx_sbuf; 539 + 540 + pmsg = *ppmsg; 541 + 542 + /* set baud rate if needed */ 543 + if (data->cur_trans->speed_hz) { 544 + dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); 545 + pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); 546 + } 547 + 548 + /* set bits per word if needed */ 549 + if (data->cur_trans->bits_per_word && 550 + (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { 551 + dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); 552 + pch_spi_set_bits_per_word(data->master, 553 + data->cur_trans->bits_per_word); 554 + *bpw = data->cur_trans->bits_per_word; 555 + } else { 556 + *bpw = data->current_msg->spi->bits_per_word; 557 + } 558 + 559 + /* reset Tx/Rx index */ 560 + data->tx_index = 0; 561 + data->rx_index = 0; 562 + 563 + data->bpw_len = data->cur_trans->len / (*bpw / 8); 564 + 565 + /* find alloc size */ 566 + size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); 567 + 568 + /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ 569 + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); 570 + if (data->pkt_tx_buff != NULL) { 571 + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); 572 + if (!data->pkt_rx_buff) 573 + kfree(data->pkt_tx_buff); 574 + } 575 + 576 + if (!data->pkt_rx_buff) { 577 + /* flush queue and set status of all transfers to -ENOMEM */ 578 + dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); 579 + list_for_each_entry(pmsg, data->queue.next, queue) { 580 + pmsg->status = -ENOMEM; 581 + 582 + if (pmsg->complete != 0) 583 + pmsg->complete(pmsg->context); 584 + 585 + /* delete from queue */ 586 + list_del_init(&pmsg->queue); 587 + } 588 + return; 589 + } 590 + 591 + /* copy Tx Data */ 592 + if (data->cur_trans->tx_buf != NULL) { 593 + if (*bpw == 8) { 594 + tx_buf = data->cur_trans->tx_buf; 595 + for (j = 0; j < data->bpw_len; j++) 596 + data->pkt_tx_buff[j] = *tx_buf++; 597 + } else { 598 + tx_sbuf = data->cur_trans->tx_buf; 599 + for (j = 0; j < data->bpw_len; j++) 600 + data->pkt_tx_buff[j] = *tx_sbuf++; 601 + } 602 + } 603 + 604 + /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ 605 + n_writes = data->bpw_len; 606 + if (n_writes > PCH_MAX_FIFO_DEPTH) 607 + n_writes = PCH_MAX_FIFO_DEPTH; 608 + 609 + dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " 610 + "0x2 to SSNXCR\n", __func__); 611 + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); 612 + 613 + for (j = 0; j < n_writes; j++) 614 + pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); 615 + 616 + /* update tx_index */ 617 + data->tx_index = j; 618 + 619 + /* reset transfer complete flag */ 620 + data->transfer_complete = false; 621 + data->transfer_active = true; 622 + } 623 + 624 + 625 + static void pch_spi_nomore_transfer(struct pch_spi_data *data, 626 + struct spi_message *pmsg) 627 + { 628 + dev_dbg(&data->master->dev, "%s called\n", __func__); 629 + /* Invoke complete callback 630 + * [To the spi core..indicating end of transfer] */ 631 + data->current_msg->status = 0; 632 + 633 + if (data->current_msg->complete != 0) { 634 + dev_dbg(&data->master->dev, 635 + "%s:Invoking callback of SPI core\n", __func__); 636 + data->current_msg->complete(data->current_msg->context); 637 + } 638 + 639 + /* update status in global variable */ 640 + data->bcurrent_msg_processing = false; 641 + 642 + dev_dbg(&data->master->dev, 643 + "%s:data->bcurrent_msg_processing = false\n", __func__); 644 + 645 + data->current_msg = NULL; 646 + data->cur_trans = NULL; 647 + 648 + /* check if we have items in list and not suspending 649 + * return 1 if list empty */ 650 + if ((list_empty(&data->queue) == 0) && 651 + (!data->board_dat->suspend_sts) && 652 + (data->status != STATUS_EXITING)) { 653 + /* We have some more work to do (either there is more tranint 654 + * bpw;sfer requests in the current message or there are 655 + *more messages) 656 + */ 657 + dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); 658 + queue_work(data->wk, &data->work); 659 + } else if (data->board_dat->suspend_sts || 660 + data->status == STATUS_EXITING) { 661 + dev_dbg(&data->master->dev, 662 + "%s suspend/remove initiated, flushing queue\n", 663 + __func__); 664 + list_for_each_entry(pmsg, data->queue.next, queue) { 665 + pmsg->status = -EIO; 666 + 667 + if (pmsg->complete) 668 + pmsg->complete(pmsg->context); 669 + 670 + /* delete from queue */ 671 + list_del_init(&pmsg->queue); 672 + } 673 + } 674 + } 675 + 676 + static void pch_spi_set_ir(struct pch_spi_data *data) 677 + { 678 + /* enable interrupts */ 679 + if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { 680 + /* set receive threhold to PCH_RX_THOLD */ 681 + pch_spi_setclr_reg(data->master, PCH_SPCR, 682 + PCH_RX_THOLD << SPCR_TFIC_FIELD, 683 + ~MASK_TFIC_SPCR_BITS); 684 + /* enable FI and RFI interrupts */ 685 + pch_spi_setclr_reg(data->master, PCH_SPCR, 686 + SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0); 687 + } else { 688 + /* set receive threhold to maximum */ 689 + pch_spi_setclr_reg(data->master, PCH_SPCR, 690 + PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, 691 + ~MASK_TFIC_SPCR_BITS); 692 + /* enable FI interrupt */ 693 + pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); 694 + } 695 + 696 + dev_dbg(&data->master->dev, 697 + "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); 698 + 699 + /* SPI set enable */ 700 + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); 701 + 702 + /* Wait until the transfer completes; go to sleep after 703 + initiating the transfer. */ 704 + dev_dbg(&data->master->dev, 705 + "%s:waiting for transfer to get over\n", __func__); 706 + 707 + wait_event_interruptible(data->wait, data->transfer_complete); 708 + 709 + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); 710 + dev_dbg(&data->master->dev, 711 + "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); 712 + 713 + data->transfer_active = false; 714 + dev_dbg(&data->master->dev, 715 + "%s set data->transfer_active = false\n", __func__); 716 + 717 + /* clear all interrupts */ 718 + pch_spi_writereg(data->master, PCH_SPSR, 719 + pch_spi_readreg(data->master, PCH_SPSR)); 720 + /* disable interrupts */ 721 + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); 722 + } 723 + 724 + static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) 725 + { 726 + int j; 727 + u8 *rx_buf; 728 + u16 *rx_sbuf; 729 + 730 + /* copy Rx Data */ 731 + if (!data->cur_trans->rx_buf) 732 + return; 733 + 734 + if (bpw == 8) { 735 + rx_buf = data->cur_trans->rx_buf; 736 + for (j = 0; j < data->bpw_len; j++) 737 + *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; 738 + } else { 739 + rx_sbuf = data->cur_trans->rx_buf; 740 + for (j = 0; j < data->bpw_len; j++) 741 + *rx_sbuf++ = data->pkt_rx_buff[j]; 742 + } 743 + } 744 + 745 + 746 + static void pch_spi_process_messages(struct work_struct *pwork) 747 + { 748 + struct spi_message *pmsg; 749 + struct pch_spi_data *data; 750 + int bpw; 751 + 752 + data = container_of(pwork, struct pch_spi_data, work); 753 + dev_dbg(&data->master->dev, "%s data initialized\n", __func__); 754 + 755 + spin_lock(&data->lock); 756 + 757 + /* check if suspend has been initiated;if yes flush queue */ 758 + if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { 759 + dev_dbg(&data->master->dev, 760 + "%s suspend/remove initiated,flushing queue\n", 761 + __func__); 762 + 763 + list_for_each_entry(pmsg, data->queue.next, queue) { 764 + pmsg->status = -EIO; 765 + 766 + if (pmsg->complete != 0) { 767 + spin_unlock(&data->lock); 768 + pmsg->complete(pmsg->context); 769 + spin_lock(&data->lock); 770 + } 771 + 772 + /* delete from queue */ 773 + list_del_init(&pmsg->queue); 774 + } 775 + 776 + spin_unlock(&data->lock); 777 + return; 778 + } 779 + 780 + data->bcurrent_msg_processing = true; 781 + dev_dbg(&data->master->dev, 782 + "%s Set data->bcurrent_msg_processing= true\n", __func__); 783 + 784 + /* Get the message from the queue and delete it from there. */ 785 + data->current_msg = list_entry(data->queue.next, struct spi_message, 786 + queue); 787 + 788 + list_del_init(&data->current_msg->queue); 789 + 790 + data->current_msg->status = 0; 791 + 792 + pch_spi_select_chip(data, data->current_msg->spi); 793 + 794 + spin_unlock(&data->lock); 795 + 796 + do { 797 + /* If we are already processing a message get the next 798 + transfer structure from the message otherwise retrieve 799 + the 1st transfer request from the message. */ 800 + spin_lock(&data->lock); 801 + 802 + if (data->cur_trans == NULL) { 803 + data->cur_trans = 804 + list_entry(data->current_msg->transfers. 805 + next, struct spi_transfer, 806 + transfer_list); 807 + dev_dbg(&data->master->dev, 808 + "%s :Getting 1st transfer message\n", __func__); 809 + } else { 810 + data->cur_trans = 811 + list_entry(data->cur_trans->transfer_list.next, 812 + struct spi_transfer, 813 + transfer_list); 814 + dev_dbg(&data->master->dev, 815 + "%s :Getting next transfer message\n", 816 + __func__); 817 + } 818 + 819 + spin_unlock(&data->lock); 820 + 821 + pch_spi_set_tx(data, &bpw, &pmsg); 822 + 823 + /* Control interrupt*/ 824 + pch_spi_set_ir(data); 825 + 826 + /* Disable SPI transfer */ 827 + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, 828 + SPCR_SPE_BIT); 829 + 830 + /* clear FIFO */ 831 + pch_spi_clear_fifo(data->master); 832 + 833 + /* copy Rx Data */ 834 + pch_spi_copy_rx_data(data, bpw); 835 + 836 + /* free memory */ 837 + kfree(data->pkt_rx_buff); 838 + data->pkt_rx_buff = NULL; 839 + 840 + kfree(data->pkt_tx_buff); 841 + data->pkt_tx_buff = NULL; 842 + 843 + /* increment message count */ 844 + data->current_msg->actual_length += data->cur_trans->len; 845 + 846 + dev_dbg(&data->master->dev, 847 + "%s:data->current_msg->actual_length=%d\n", 848 + __func__, data->current_msg->actual_length); 849 + 850 + /* check for delay */ 851 + if (data->cur_trans->delay_usecs) { 852 + dev_dbg(&data->master->dev, "%s:" 853 + "delay in usec=%d\n", __func__, 854 + data->cur_trans->delay_usecs); 855 + udelay(data->cur_trans->delay_usecs); 856 + } 857 + 858 + spin_lock(&data->lock); 859 + 860 + /* No more transfer in this message. */ 861 + if ((data->cur_trans->transfer_list.next) == 862 + &(data->current_msg->transfers)) { 863 + pch_spi_nomore_transfer(data, pmsg); 864 + } 865 + 866 + spin_unlock(&data->lock); 867 + 868 + } while (data->cur_trans != NULL); 869 + } 870 + 871 + static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) 872 + { 873 + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); 874 + 875 + /* free workqueue */ 876 + if (board_dat->data->wk != NULL) { 877 + destroy_workqueue(board_dat->data->wk); 878 + board_dat->data->wk = NULL; 879 + dev_dbg(&board_dat->pdev->dev, 880 + "%s destroy_workqueue invoked successfully\n", 881 + __func__); 882 + } 883 + 884 + /* disable interrupts & free IRQ */ 885 + if (board_dat->irq_reg_sts) { 886 + /* disable interrupts */ 887 + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, 888 + PCH_ALL); 889 + 890 + /* free IRQ */ 891 + free_irq(board_dat->pdev->irq, board_dat); 892 + 893 + dev_dbg(&board_dat->pdev->dev, 894 + "%s free_irq invoked successfully\n", __func__); 895 + 896 + board_dat->irq_reg_sts = false; 897 + } 898 + 899 + /* unmap PCI base address */ 900 + if (board_dat->data->io_remap_addr != 0) { 901 + pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); 902 + 903 + board_dat->data->io_remap_addr = 0; 904 + 905 + dev_dbg(&board_dat->pdev->dev, 906 + "%s pci_iounmap invoked successfully\n", __func__); 907 + } 908 + 909 + /* release PCI region */ 910 + if (board_dat->pci_req_sts) { 911 + pci_release_regions(board_dat->pdev); 912 + dev_dbg(&board_dat->pdev->dev, 913 + "%s pci_release_regions invoked successfully\n", 914 + __func__); 915 + board_dat->pci_req_sts = false; 916 + } 917 + } 918 + 919 + static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) 920 + { 921 + void __iomem *io_remap_addr; 922 + int retval; 923 + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); 924 + 925 + /* create workqueue */ 926 + board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); 927 + if (!board_dat->data->wk) { 928 + dev_err(&board_dat->pdev->dev, 929 + "%s create_singlet hread_workqueue failed\n", __func__); 930 + retval = -EBUSY; 931 + goto err_return; 932 + } 933 + 934 + dev_dbg(&board_dat->pdev->dev, 935 + "%s create_singlethread_workqueue success\n", __func__); 936 + 937 + retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); 938 + if (retval != 0) { 939 + dev_err(&board_dat->pdev->dev, 940 + "%s request_region failed\n", __func__); 941 + goto err_return; 942 + } 943 + 944 + board_dat->pci_req_sts = true; 945 + 946 + io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); 947 + if (io_remap_addr == 0) { 948 + dev_err(&board_dat->pdev->dev, 949 + "%s pci_iomap failed\n", __func__); 950 + retval = -ENOMEM; 951 + goto err_return; 952 + } 953 + 954 + /* calculate base address for all channels */ 955 + board_dat->data->io_remap_addr = io_remap_addr; 956 + 957 + /* reset PCH SPI h/w */ 958 + pch_spi_reset(board_dat->data->master); 959 + dev_dbg(&board_dat->pdev->dev, 960 + "%s pch_spi_reset invoked successfully\n", __func__); 961 + 962 + /* register IRQ */ 963 + retval = request_irq(board_dat->pdev->irq, pch_spi_handler, 964 + IRQF_SHARED, KBUILD_MODNAME, board_dat); 965 + if (retval != 0) { 966 + dev_err(&board_dat->pdev->dev, 967 + "%s request_irq failed\n", __func__); 968 + goto err_return; 969 + } 970 + 971 + dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", 972 + __func__, retval); 973 + 974 + board_dat->irq_reg_sts = true; 975 + dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); 976 + 977 + err_return: 978 + if (retval != 0) { 979 + dev_err(&board_dat->pdev->dev, 980 + "%s FAIL:invoking pch_spi_free_resources\n", __func__); 981 + pch_spi_free_resources(board_dat); 982 + } 983 + 984 + dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); 985 + 986 + return retval; 987 + } 988 + 989 + static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) 990 + { 991 + 992 + struct spi_master *master; 993 + 994 + struct pch_spi_board_data *board_dat; 995 + int retval; 996 + 997 + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 998 + 999 + /* allocate memory for private data */ 1000 + board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); 1001 + if (board_dat == NULL) { 1002 + dev_err(&pdev->dev, 1003 + " %s memory allocation for private data failed\n", 1004 + __func__); 1005 + retval = -ENOMEM; 1006 + goto err_kmalloc; 1007 + } 1008 + 1009 + dev_dbg(&pdev->dev, 1010 + "%s memory allocation for private data success\n", __func__); 1011 + 1012 + /* enable PCI device */ 1013 + retval = pci_enable_device(pdev); 1014 + if (retval != 0) { 1015 + dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); 1016 + 1017 + goto err_pci_en_device; 1018 + } 1019 + 1020 + dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", 1021 + __func__, retval); 1022 + 1023 + board_dat->pdev = pdev; 1024 + 1025 + /* alllocate memory for SPI master */ 1026 + master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); 1027 + if (master == NULL) { 1028 + retval = -ENOMEM; 1029 + dev_err(&pdev->dev, "%s Fail.\n", __func__); 1030 + goto err_spi_alloc_master; 1031 + } 1032 + 1033 + dev_dbg(&pdev->dev, 1034 + "%s spi_alloc_master returned non NULL\n", __func__); 1035 + 1036 + /* initialize members of SPI master */ 1037 + master->bus_num = -1; 1038 + master->num_chipselect = PCH_MAX_CS; 1039 + master->setup = pch_spi_setup; 1040 + master->transfer = pch_spi_transfer; 1041 + dev_dbg(&pdev->dev, 1042 + "%s transfer member of SPI master initialized\n", __func__); 1043 + 1044 + board_dat->data = spi_master_get_devdata(master); 1045 + 1046 + board_dat->data->master = master; 1047 + board_dat->data->n_curnt_chip = 255; 1048 + board_dat->data->board_dat = board_dat; 1049 + board_dat->data->status = STATUS_RUNNING; 1050 + 1051 + INIT_LIST_HEAD(&board_dat->data->queue); 1052 + spin_lock_init(&board_dat->data->lock); 1053 + INIT_WORK(&board_dat->data->work, pch_spi_process_messages); 1054 + init_waitqueue_head(&board_dat->data->wait); 1055 + 1056 + /* allocate resources for PCH SPI */ 1057 + retval = pch_spi_get_resources(board_dat); 1058 + if (retval) { 1059 + dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); 1060 + goto err_spi_get_resources; 1061 + } 1062 + 1063 + dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", 1064 + __func__, retval); 1065 + 1066 + /* save private data in dev */ 1067 + pci_set_drvdata(pdev, board_dat); 1068 + dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); 1069 + 1070 + /* set master mode */ 1071 + pch_spi_set_master_mode(master); 1072 + dev_dbg(&pdev->dev, 1073 + "%s invoked pch_spi_set_master_mode\n", __func__); 1074 + 1075 + /* Register the controller with the SPI core. */ 1076 + retval = spi_register_master(master); 1077 + if (retval != 0) { 1078 + dev_err(&pdev->dev, 1079 + "%s spi_register_master FAILED\n", __func__); 1080 + goto err_spi_reg_master; 1081 + } 1082 + 1083 + dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", 1084 + __func__, retval); 1085 + 1086 + 1087 + return 0; 1088 + 1089 + err_spi_reg_master: 1090 + spi_unregister_master(master); 1091 + err_spi_get_resources: 1092 + err_spi_alloc_master: 1093 + spi_master_put(master); 1094 + pci_disable_device(pdev); 1095 + err_pci_en_device: 1096 + kfree(board_dat); 1097 + err_kmalloc: 1098 + return retval; 1099 + } 1100 + 1101 + static void pch_spi_remove(struct pci_dev *pdev) 1102 + { 1103 + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); 1104 + int count; 1105 + 1106 + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1107 + 1108 + if (!board_dat) { 1109 + dev_err(&pdev->dev, 1110 + "%s pci_get_drvdata returned NULL\n", __func__); 1111 + return; 1112 + } 1113 + 1114 + /* check for any pending messages; no action is taken if the queue 1115 + * is still full; but at least we tried. Unload anyway */ 1116 + count = 500; 1117 + spin_lock(&board_dat->data->lock); 1118 + board_dat->data->status = STATUS_EXITING; 1119 + while ((list_empty(&board_dat->data->queue) == 0) && --count) { 1120 + dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", 1121 + __func__); 1122 + spin_unlock(&board_dat->data->lock); 1123 + msleep(PCH_SLEEP_TIME); 1124 + spin_lock(&board_dat->data->lock); 1125 + } 1126 + spin_unlock(&board_dat->data->lock); 1127 + 1128 + /* Free resources allocated for PCH SPI */ 1129 + pch_spi_free_resources(board_dat); 1130 + 1131 + spi_unregister_master(board_dat->data->master); 1132 + 1133 + /* free memory for private data */ 1134 + kfree(board_dat); 1135 + 1136 + pci_set_drvdata(pdev, NULL); 1137 + 1138 + /* disable PCI device */ 1139 + pci_disable_device(pdev); 1140 + 1141 + dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); 1142 + } 1143 + 1144 + #ifdef CONFIG_PM 1145 + static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) 1146 + { 1147 + u8 count; 1148 + int retval; 1149 + 1150 + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); 1151 + 1152 + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1153 + 1154 + if (!board_dat) { 1155 + dev_err(&pdev->dev, 1156 + "%s pci_get_drvdata returned NULL\n", __func__); 1157 + return -EFAULT; 1158 + } 1159 + 1160 + retval = 0; 1161 + board_dat->suspend_sts = true; 1162 + 1163 + /* check if the current message is processed: 1164 + Only after thats done the transfer will be suspended */ 1165 + count = 255; 1166 + while ((--count) > 0) { 1167 + if (!(board_dat->data->bcurrent_msg_processing)) { 1168 + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" 1169 + "msg_processing = false\n", __func__); 1170 + break; 1171 + } else { 1172 + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" 1173 + "processing = true\n", __func__); 1174 + } 1175 + msleep(PCH_SLEEP_TIME); 1176 + } 1177 + 1178 + /* Free IRQ */ 1179 + if (board_dat->irq_reg_sts) { 1180 + /* disable all interrupts */ 1181 + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, 1182 + PCH_ALL); 1183 + pch_spi_reset(board_dat->data->master); 1184 + 1185 + free_irq(board_dat->pdev->irq, board_dat); 1186 + 1187 + board_dat->irq_reg_sts = false; 1188 + dev_dbg(&pdev->dev, 1189 + "%s free_irq invoked successfully.\n", __func__); 1190 + } 1191 + 1192 + /* save config space */ 1193 + retval = pci_save_state(pdev); 1194 + 1195 + if (retval == 0) { 1196 + dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", 1197 + __func__, retval); 1198 + /* disable PM notifications */ 1199 + pci_enable_wake(pdev, PCI_D3hot, 0); 1200 + dev_dbg(&pdev->dev, 1201 + "%s pci_enable_wake invoked successfully\n", __func__); 1202 + /* disable PCI device */ 1203 + pci_disable_device(pdev); 1204 + dev_dbg(&pdev->dev, 1205 + "%s pci_disable_device invoked successfully\n", 1206 + __func__); 1207 + /* move device to D3hot state */ 1208 + pci_set_power_state(pdev, PCI_D3hot); 1209 + dev_dbg(&pdev->dev, 1210 + "%s pci_set_power_state invoked successfully\n", 1211 + __func__); 1212 + } else { 1213 + dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); 1214 + } 1215 + 1216 + dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); 1217 + 1218 + return retval; 1219 + } 1220 + 1221 + static int pch_spi_resume(struct pci_dev *pdev) 1222 + { 1223 + int retval; 1224 + 1225 + struct pch_spi_board_data *board = pci_get_drvdata(pdev); 1226 + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); 1227 + 1228 + if (!board) { 1229 + dev_err(&pdev->dev, 1230 + "%s pci_get_drvdata returned NULL\n", __func__); 1231 + return -EFAULT; 1232 + } 1233 + 1234 + /* move device to DO power state */ 1235 + pci_set_power_state(pdev, PCI_D0); 1236 + 1237 + /* restore state */ 1238 + pci_restore_state(pdev); 1239 + 1240 + retval = pci_enable_device(pdev); 1241 + if (retval < 0) { 1242 + dev_err(&pdev->dev, 1243 + "%s pci_enable_device failed\n", __func__); 1244 + } else { 1245 + /* disable PM notifications */ 1246 + pci_enable_wake(pdev, PCI_D3hot, 0); 1247 + 1248 + /* register IRQ handler */ 1249 + if (!board->irq_reg_sts) { 1250 + /* register IRQ */ 1251 + retval = request_irq(board->pdev->irq, pch_spi_handler, 1252 + IRQF_SHARED, KBUILD_MODNAME, 1253 + board); 1254 + if (retval < 0) { 1255 + dev_err(&pdev->dev, 1256 + "%s request_irq failed\n", __func__); 1257 + return retval; 1258 + } 1259 + board->irq_reg_sts = true; 1260 + 1261 + /* reset PCH SPI h/w */ 1262 + pch_spi_reset(board->data->master); 1263 + pch_spi_set_master_mode(board->data->master); 1264 + 1265 + /* set suspend status to false */ 1266 + board->suspend_sts = false; 1267 + 1268 + } 1269 + } 1270 + 1271 + dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); 1272 + 1273 + return retval; 1274 + } 1275 + #else 1276 + #define pch_spi_suspend NULL 1277 + #define pch_spi_resume NULL 1278 + 1279 + #endif 1280 + 1281 + static struct pci_driver pch_spi_pcidev = { 1282 + .name = "pch_spi", 1283 + .id_table = pch_spi_pcidev_id, 1284 + .probe = pch_spi_probe, 1285 + .remove = pch_spi_remove, 1286 + .suspend = pch_spi_suspend, 1287 + .resume = pch_spi_resume, 1288 + }; 1289 + 1290 + static int __init pch_spi_init(void) 1291 + { 1292 + return pci_register_driver(&pch_spi_pcidev); 1293 + } 1294 + module_init(pch_spi_init); 1295 + 1296 + static void __exit pch_spi_exit(void) 1297 + { 1298 + pci_unregister_driver(&pch_spi_pcidev); 1299 + } 1300 + module_exit(pch_spi_exit); 1301 + 1302 + MODULE_LICENSE("GPL"); 1303 + MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver");
+6 -7
include/linux/amba/pl022.h
··· 228 228 }; 229 229 230 230 231 + struct dma_chan; 231 232 /** 232 233 * struct pl022_ssp_master - device.platform_data for SPI controller devices. 233 234 * @num_chipselect: chipselects are used to distinguish individual ··· 236 235 * each slave has a chipselect signal, but it's common that not 237 236 * every chipselect is connected to a slave. 238 237 * @enable_dma: if true enables DMA driven transfers. 238 + * @dma_rx_param: parameter to locate an RX DMA channel. 239 + * @dma_tx_param: parameter to locate a TX DMA channel. 239 240 */ 240 241 struct pl022_ssp_controller { 241 242 u16 bus_id; 242 243 u8 num_chipselect; 243 244 u8 enable_dma:1; 245 + bool (*dma_filter)(struct dma_chan *chan, void *filter_param); 246 + void *dma_rx_param; 247 + void *dma_tx_param; 244 248 }; 245 249 246 250 /** ··· 276 270 * @dma_config: DMA configuration for SSP controller and peripheral 277 271 */ 278 272 struct pl022_config_chip { 279 - struct device *dev; 280 - enum ssp_loopback lbm; 281 273 enum ssp_interface iface; 282 274 enum ssp_hierarchy hierarchy; 283 275 bool slave_tx_disable; 284 276 struct ssp_clock_params clk_freq; 285 - enum ssp_rx_endian endian_rx; 286 - enum ssp_tx_endian endian_tx; 287 - enum ssp_data_size data_size; 288 277 enum ssp_mode com_mode; 289 278 enum ssp_rx_level_trig rx_lev_trig; 290 279 enum ssp_tx_level_trig tx_lev_trig; 291 - enum ssp_spi_clk_phase clk_phase; 292 - enum ssp_spi_clk_pol clk_pol; 293 280 enum ssp_microwire_ctrl_len ctrl_len; 294 281 enum ssp_microwire_wait_state wait_state; 295 282 enum ssp_duplex duplex;