Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6

* 'next-spi' of git://git.secretlab.ca/git/linux-2.6: (77 commits)
spi/omap: Fix DMA API usage in OMAP MCSPI driver
spi/imx: correct the test on platform_get_irq() return value
spi/topcliff: Typo fix threhold to threshold
spi/dw_spi Typo change diable to disable.
spi/fsl_espi: change the read behaviour of the SPIRF
spi/mpc52xx-psc-spi: move probe/remove to proper sections
spi/dw_spi: add DMA support
spi/dw_spi: change to EXPORT_SYMBOL_GPL for exported APIs
spi/dw_spi: Fix too short timeout in spi polling loop
spi/pl022: convert running variable
spi/pl022: convert busy flag to a bool
spi/pl022: pass the returned sglen to the DMA engine
spi/pl022: map the buffers on the DMA engine
spi/topcliff_pch: Fix data transfer issue
spi/imx: remove autodetection
spi/pxa2xx: pass of_node to spi device and set a parent device
spi/pxa2xx: Modify RX-Tresh instead of busy-loop for the remaining RX bytes.
spi/pxa2xx: Add chipselect support for Sodaville
spi/pxa2xx: Consider CE4100's FIFO depth
spi/pxa2xx: Add CE4100 support
...

+1607 -1376
+2 -2
Documentation/spi/pxa2xx
··· 19 19 ----------------------------------- 20 20 Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a 21 21 "platform device". The master configuration is passed to the driver via a table 22 - found in arch/arm/mach-pxa/include/mach/pxa2xx_spi.h: 22 + found in include/linux/spi/pxa2xx_spi.h: 23 23 24 24 struct pxa2xx_spi_master { 25 25 enum pxa_ssp_type ssp_type; ··· 94 94 95 95 Each slave device attached to the PXA must provide slave specific configuration 96 96 information via the structure "pxa2xx_spi_chip" found in 97 - "arch/arm/mach-pxa/include/mach/pxa2xx_spi.h". The pxa2xx_spi master controller driver 97 + "include/linux/spi/pxa2xx_spi.h". The pxa2xx_spi master controller driver 98 98 will uses the configuration whenever the driver communicates with the slave 99 99 device. All fields are optional. 100 100
+1 -6
arch/arm/mach-davinci/dm355.c
··· 412 412 static struct davinci_spi_platform_data dm355_spi0_pdata = { 413 413 .version = SPI_VERSION_1, 414 414 .num_chipselect = 2, 415 - .clk_internal = 1, 416 - .cs_hold = 1, 417 - .intr_level = 0, 418 - .poll_mode = 1, /* 0 -> interrupt mode 1-> polling mode */ 419 - .c2tdelay = 0, 420 - .t2cdelay = 0, 415 + .cshold_bug = true, 421 416 }; 422 417 static struct platform_device dm355_spi0_device = { 423 418 .name = "spi_davinci",
-6
arch/arm/mach-davinci/dm365.c
··· 625 625 static struct davinci_spi_platform_data dm365_spi0_pdata = { 626 626 .version = SPI_VERSION_1, 627 627 .num_chipselect = 2, 628 - .clk_internal = 1, 629 - .cs_hold = 1, 630 - .intr_level = 0, 631 - .poll_mode = 1, /* 0 -> interrupt mode 1-> polling mode */ 632 - .c2tdelay = 0, 633 - .t2cdelay = 0, 634 628 }; 635 629 636 630 static struct resource dm365_spi0_resources[] = {
+46 -6
arch/arm/mach-davinci/include/mach/spi.h
··· 19 19 #ifndef __ARCH_ARM_DAVINCI_SPI_H 20 20 #define __ARCH_ARM_DAVINCI_SPI_H 21 21 22 + #define SPI_INTERN_CS 0xFF 23 + 22 24 enum { 23 25 SPI_VERSION_1, /* For DM355/DM365/DM6467 */ 24 26 SPI_VERSION_2, /* For DA8xx */ 25 27 }; 26 28 29 + /** 30 + * davinci_spi_platform_data - Platform data for SPI master device on DaVinci 31 + * 32 + * @version: version of the SPI IP. Different DaVinci devices have slightly 33 + * varying versions of the same IP. 34 + * @num_chipselect: number of chipselects supported by this SPI master 35 + * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt 36 + * controller withn the SoC. Possible values are 0 and 1. 37 + * @chip_sel: list of GPIOs which can act as chip-selects for the SPI. 38 + * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary 39 + * to populate if all chip-selects are internal. 40 + * @cshold_bug: set this to true if the SPI controller on your chip requires 41 + * a write to CSHOLD bit in between transfers (like in DM355). 42 + */ 27 43 struct davinci_spi_platform_data { 28 44 u8 version; 29 45 u8 num_chipselect; 46 + u8 intr_line; 47 + u8 *chip_sel; 48 + bool cshold_bug; 49 + }; 50 + 51 + /** 52 + * davinci_spi_config - Per-chip-select configuration for SPI slave devices 53 + * 54 + * @wdelay: amount of delay between transmissions. Measured in number of 55 + * SPI module clocks. 56 + * @odd_parity: polarity of parity flag at the end of transmit data stream. 57 + * 0 - odd parity, 1 - even parity. 58 + * @parity_enable: enable transmission of parity at end of each transmit 59 + * data stream. 60 + * @io_type: type of IO transfer. Choose between polled, interrupt and DMA. 61 + * @timer_disable: disable chip-select timers (setup and hold) 62 + * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks. 63 + * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks. 64 + * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured 65 + * in number of SPI clocks. 66 + * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in 67 + * number of SPI clocks. 68 + */ 69 + struct davinci_spi_config { 30 70 u8 wdelay; 31 71 u8 odd_parity; 32 72 u8 parity_enable; 33 - u8 wait_enable; 73 + #define SPI_IO_TYPE_INTR 0 74 + #define SPI_IO_TYPE_POLL 1 75 + #define SPI_IO_TYPE_DMA 2 76 + u8 io_type; 34 77 u8 timer_disable; 35 - u8 clk_internal; 36 - u8 cs_hold; 37 - u8 intr_level; 38 - u8 poll_mode; 39 - u8 use_dma; 40 78 u8 c2tdelay; 41 79 u8 t2cdelay; 80 + u8 t2edelay; 81 + u8 c2edelay; 42 82 }; 43 83 44 84 #endif /* __ARCH_ARM_DAVINCI_SPI_H */
+1 -1
arch/arm/mach-pxa/cm-x255.c
··· 17 17 #include <linux/mtd/nand-gpio.h> 18 18 19 19 #include <linux/spi/spi.h> 20 + #include <linux/spi/pxa2xx_spi.h> 20 21 21 22 #include <asm/mach/arch.h> 22 23 #include <asm/mach-types.h> 23 24 #include <asm/mach/map.h> 24 25 25 26 #include <mach/pxa25x.h> 26 - #include <mach/pxa2xx_spi.h> 27 27 28 28 #include "generic.h" 29 29
+1 -1
arch/arm/mach-pxa/cm-x270.c
··· 19 19 #include <video/mbxfb.h> 20 20 21 21 #include <linux/spi/spi.h> 22 + #include <linux/spi/pxa2xx_spi.h> 22 23 #include <linux/spi/libertas_spi.h> 23 24 24 25 #include <mach/pxa27x.h> 25 26 #include <mach/ohci.h> 26 27 #include <mach/mmc.h> 27 - #include <mach/pxa2xx_spi.h> 28 28 29 29 #include "generic.h" 30 30
+1 -1
arch/arm/mach-pxa/corgi.c
··· 28 28 #include <linux/spi/spi.h> 29 29 #include <linux/spi/ads7846.h> 30 30 #include <linux/spi/corgi_lcd.h> 31 + #include <linux/spi/pxa2xx_spi.h> 31 32 #include <linux/mtd/sharpsl.h> 32 33 #include <linux/input/matrix_keypad.h> 33 34 #include <video/w100fb.h> ··· 49 48 #include <mach/irda.h> 50 49 #include <mach/mmc.h> 51 50 #include <mach/udc.h> 52 - #include <mach/pxa2xx_spi.h> 53 51 #include <mach/corgi.h> 54 52 #include <mach/sharpsl_pm.h> 55 53
+1 -1
arch/arm/mach-pxa/devices.c
··· 3 3 #include <linux/init.h> 4 4 #include <linux/platform_device.h> 5 5 #include <linux/dma-mapping.h> 6 + #include <linux/spi/pxa2xx_spi.h> 6 7 7 8 #include <asm/pmu.h> 8 9 #include <mach/udc.h> ··· 13 12 #include <mach/irda.h> 14 13 #include <mach/ohci.h> 15 14 #include <plat/pxa27x_keypad.h> 16 - #include <mach/pxa2xx_spi.h> 17 15 #include <mach/camera.h> 18 16 #include <mach/audio.h> 19 17 #include <mach/hardware.h>
+1 -1
arch/arm/mach-pxa/em-x270.c
··· 26 26 #include <linux/spi/spi.h> 27 27 #include <linux/spi/tdo24m.h> 28 28 #include <linux/spi/libertas_spi.h> 29 + #include <linux/spi/pxa2xx_spi.h> 29 30 #include <linux/power_supply.h> 30 31 #include <linux/apm-emulation.h> 31 32 #include <linux/i2c.h> ··· 47 46 #include <plat/pxa27x_keypad.h> 48 47 #include <plat/i2c.h> 49 48 #include <mach/camera.h> 50 - #include <mach/pxa2xx_spi.h> 51 49 52 50 #include "generic.h" 53 51 #include "devices.h"
+1 -1
arch/arm/mach-pxa/hx4700.c
··· 33 33 #include <linux/regulator/max1586.h> 34 34 #include <linux/spi/ads7846.h> 35 35 #include <linux/spi/spi.h> 36 + #include <linux/spi/pxa2xx_spi.h> 36 37 #include <linux/usb/gpio_vbus.h> 37 38 38 39 #include <mach/hardware.h> ··· 44 43 #include <mach/hx4700.h> 45 44 #include <plat/i2c.h> 46 45 #include <mach/irda.h> 47 - #include <mach/pxa2xx_spi.h> 48 46 49 47 #include <video/platform_lcd.h> 50 48 #include <video/w100fb.h>
+1 -1
arch/arm/mach-pxa/icontrol.c
··· 24 24 #include <mach/mxm8x10.h> 25 25 26 26 #include <linux/spi/spi.h> 27 - #include <mach/pxa2xx_spi.h> 27 + #include <linux/spi/pxa2xx_spi.h> 28 28 #include <linux/can/platform/mcp251x.h> 29 29 30 30 #include "generic.h"
-47
arch/arm/mach-pxa/include/mach/pxa2xx_spi.h
··· 1 - /* 2 - * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 - */ 18 - 19 - #ifndef PXA2XX_SPI_H_ 20 - #define PXA2XX_SPI_H_ 21 - 22 - #define PXA2XX_CS_ASSERT (0x01) 23 - #define PXA2XX_CS_DEASSERT (0x02) 24 - 25 - /* device.platform_data for SSP controller devices */ 26 - struct pxa2xx_spi_master { 27 - u32 clock_enable; 28 - u16 num_chipselect; 29 - u8 enable_dma; 30 - }; 31 - 32 - /* spi_board_info.controller_data for SPI slave devices, 33 - * copied to spi_device.platform_data ... mostly for dma tuning 34 - */ 35 - struct pxa2xx_spi_chip { 36 - u8 tx_threshold; 37 - u8 rx_threshold; 38 - u8 dma_burst_size; 39 - u32 timeout; 40 - u8 enable_loopback; 41 - int gpio_cs; 42 - void (*cs_control)(u32 command); 43 - }; 44 - 45 - extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 46 - 47 - #endif /*PXA2XX_SPI_H_*/
+1 -1
arch/arm/mach-pxa/littleton.c
··· 22 22 #include <linux/clk.h> 23 23 #include <linux/gpio.h> 24 24 #include <linux/spi/spi.h> 25 + #include <linux/spi/pxa2xx_spi.h> 25 26 #include <linux/smc91x.h> 26 27 #include <linux/i2c.h> 27 28 #include <linux/leds.h> ··· 43 42 #include <mach/pxa300.h> 44 43 #include <mach/pxafb.h> 45 44 #include <mach/mmc.h> 46 - #include <mach/pxa2xx_spi.h> 47 45 #include <plat/pxa27x_keypad.h> 48 46 #include <mach/littleton.h> 49 47 #include <plat/i2c.h>
+1 -1
arch/arm/mach-pxa/lubbock.c
··· 25 25 26 26 #include <linux/spi/spi.h> 27 27 #include <linux/spi/ads7846.h> 28 - #include <mach/pxa2xx_spi.h> 28 + #include <linux/spi/pxa2xx_spi.h> 29 29 30 30 #include <asm/setup.h> 31 31 #include <asm/memory.h>
+1 -1
arch/arm/mach-pxa/pcm027.c
··· 25 25 #include <linux/mtd/physmap.h> 26 26 #include <linux/spi/spi.h> 27 27 #include <linux/spi/max7301.h> 28 + #include <linux/spi/pxa2xx_spi.h> 28 29 #include <linux/leds.h> 29 30 30 31 #include <asm/mach-types.h> 31 32 #include <asm/mach/arch.h> 32 33 #include <mach/pxa27x.h> 33 - #include <mach/pxa2xx_spi.h> 34 34 #include <mach/pcm027.h> 35 35 #include "generic.h" 36 36
+1 -1
arch/arm/mach-pxa/poodle.c
··· 25 25 #include <linux/i2c.h> 26 26 #include <linux/spi/spi.h> 27 27 #include <linux/spi/ads7846.h> 28 + #include <linux/spi/pxa2xx_spi.h> 28 29 #include <linux/mtd/sharpsl.h> 29 30 30 31 #include <mach/hardware.h> ··· 44 43 #include <mach/irda.h> 45 44 #include <mach/poodle.h> 46 45 #include <mach/pxafb.h> 47 - #include <mach/pxa2xx_spi.h> 48 46 #include <plat/i2c.h> 49 47 50 48 #include <asm/hardware/scoop.h>
+1 -2
arch/arm/mach-pxa/spitz.c
··· 23 23 #include <linux/spi/spi.h> 24 24 #include <linux/spi/ads7846.h> 25 25 #include <linux/spi/corgi_lcd.h> 26 - #include <linux/mtd/physmap.h> 26 + #include <linux/spi/pxa2xx_spi.h> 27 27 #include <linux/mtd/sharpsl.h> 28 28 #include <linux/input/matrix_keypad.h> 29 29 #include <linux/regulator/machine.h> ··· 42 42 #include <mach/mmc.h> 43 43 #include <mach/ohci.h> 44 44 #include <mach/pxafb.h> 45 - #include <mach/pxa2xx_spi.h> 46 45 #include <mach/spitz.h> 47 46 #include <mach/sharpsl_pm.h> 48 47 #include <mach/smemc.h>
+1 -1
arch/arm/mach-pxa/stargate2.c
··· 46 46 #include <plat/i2c.h> 47 47 #include <mach/mmc.h> 48 48 #include <mach/udc.h> 49 - #include <mach/pxa2xx_spi.h> 50 49 #include <mach/pxa27x-udc.h> 51 50 #include <mach/smemc.h> 52 51 53 52 #include <linux/spi/spi.h> 53 + #include <linux/spi/pxa2xx_spi.h> 54 54 #include <linux/mfd/da903x.h> 55 55 #include <linux/sht15.h> 56 56
+1 -1
arch/arm/mach-pxa/tosa.c
··· 32 32 #include <linux/gpio.h> 33 33 #include <linux/pda_power.h> 34 34 #include <linux/spi/spi.h> 35 + #include <linux/spi/pxa2xx_spi.h> 35 36 #include <linux/input/matrix_keypad.h> 36 37 37 38 #include <asm/setup.h> ··· 45 44 #include <mach/mmc.h> 46 45 #include <mach/udc.h> 47 46 #include <mach/tosa_bt.h> 48 - #include <mach/pxa2xx_spi.h> 49 47 #include <mach/audio.h> 50 48 #include <mach/smemc.h> 51 49
-1
arch/arm/mach-pxa/trizeps4.c
··· 40 40 #include <asm/mach/flash.h> 41 41 42 42 #include <mach/pxa27x.h> 43 - #include <mach/pxa2xx_spi.h> 44 43 #include <mach/trizeps4.h> 45 44 #include <mach/audio.h> 46 45 #include <mach/pxafb.h>
+1 -1
arch/arm/mach-pxa/z2.c
··· 20 20 #include <linux/z2_battery.h> 21 21 #include <linux/dma-mapping.h> 22 22 #include <linux/spi/spi.h> 23 + #include <linux/spi/pxa2xx_spi.h> 23 24 #include <linux/spi/libertas_spi.h> 24 25 #include <linux/spi/lms283gf05.h> 25 26 #include <linux/power_supply.h> ··· 39 38 #include <mach/pxafb.h> 40 39 #include <mach/mmc.h> 41 40 #include <plat/pxa27x_keypad.h> 42 - #include <mach/pxa2xx_spi.h> 43 41 44 42 #include <plat/i2c.h> 45 43
+1 -1
arch/arm/mach-pxa/zeus.c
··· 20 20 #include <linux/dm9000.h> 21 21 #include <linux/mmc/host.h> 22 22 #include <linux/spi/spi.h> 23 + #include <linux/spi/pxa2xx_spi.h> 23 24 #include <linux/mtd/mtd.h> 24 25 #include <linux/mtd/partitions.h> 25 26 #include <linux/mtd/physmap.h> ··· 42 41 #include <mach/pxa27x-udc.h> 43 42 #include <mach/udc.h> 44 43 #include <mach/pxafb.h> 45 - #include <mach/pxa2xx_spi.h> 46 44 #include <mach/mfp-pxa27x.h> 47 45 #include <mach/pm.h> 48 46 #include <mach/audio.h>
+31 -8
arch/arm/plat-pxa/include/plat/ssp.h include/linux/pxa2xx_ssp.h
··· 1 1 /* 2 - * ssp.h 2 + * pxa2xx_ssp.h 3 3 * 4 4 * Copyright (C) 2003 Russell King, All Rights Reserved. 5 5 * ··· 16 16 * PXA3xx SSP1, SSP2, SSP3, SSP4 17 17 */ 18 18 19 - #ifndef __ASM_ARCH_SSP_H 20 - #define __ASM_ARCH_SSP_H 19 + #ifndef __LINUX_SSP_H 20 + #define __LINUX_SSP_H 21 21 22 22 #include <linux/list.h> 23 23 #include <linux/io.h> ··· 71 71 #define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */ 72 72 #define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */ 73 73 #define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */ 74 - #define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ 75 - #define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ 76 - #define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ 77 - #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ 78 74 75 + #define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */ 79 76 #define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */ 80 77 #define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */ 81 78 #define SSSR_BSY (1 << 4) /* SSP Busy */ ··· 80 83 #define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */ 81 84 #define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */ 82 85 86 + #ifdef CONFIG_ARCH_PXA 87 + #define RX_THRESH_DFLT 8 88 + #define TX_THRESH_DFLT 8 89 + 90 + #define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */ 91 + #define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */ 92 + 93 + #define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */ 94 + #define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */ 95 + #define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */ 96 + #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */ 97 + 98 + #else 99 + 100 + #define RX_THRESH_DFLT 2 101 + #define TX_THRESH_DFLT 2 102 + 103 + #define SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */ 104 + #define SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */ 105 + 106 + #define SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */ 107 + #define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */ 108 + #define SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */ 109 + #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ 110 + #endif 83 111 84 112 /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ 85 113 #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ ··· 161 139 PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ 162 140 PXA27x_SSP, 163 141 PXA168_SSP, 142 + CE4100_SSP, 164 143 }; 165 144 166 145 struct ssp_device { ··· 206 183 207 184 struct ssp_device *pxa_ssp_request(int port, const char *label); 208 185 void pxa_ssp_free(struct ssp_device *); 209 - #endif /* __ASM_ARCH_SSP_H */ 186 + #endif
+1 -1
arch/arm/plat-pxa/ssp.c
··· 28 28 #include <linux/clk.h> 29 29 #include <linux/err.h> 30 30 #include <linux/platform_device.h> 31 + #include <linux/spi/pxa2xx_spi.h> 31 32 #include <linux/io.h> 32 33 33 34 #include <asm/irq.h> 34 35 #include <mach/hardware.h> 35 - #include <plat/ssp.h> 36 36 37 37 static DEFINE_MUTEX(ssp_lock); 38 38 static LIST_HEAD(ssp_list);
+17 -21
drivers/spi/Kconfig
··· 111 111 will be called coldfire_qspi. 112 112 113 113 config SPI_DAVINCI 114 - tristate "SPI controller driver for DaVinci/DA8xx SoC's" 114 + tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" 115 115 depends on SPI_MASTER && ARCH_DAVINCI 116 116 select SPI_BITBANG 117 117 help 118 - SPI master controller for DaVinci and DA8xx SPI modules. 118 + SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 119 + 120 + This driver can also be built as a module. The module will be called 121 + davinci_spi. 119 122 120 123 config SPI_EP93XX 121 124 tristate "Cirrus Logic EP93xx SPI controller" ··· 270 267 271 268 config SPI_PXA2XX 272 269 tristate "PXA2xx SSP SPI master" 273 - depends on ARCH_PXA && EXPERIMENTAL 274 - select PXA_SSP 270 + depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL 271 + select PXA_SSP if ARCH_PXA 275 272 help 276 - This enables using a PXA2xx SSP port as a SPI master controller. 277 - The driver can be configured to use any SSP port and additional 278 - documentation can be found a Documentation/spi/pxa2xx. 273 + This enables using a PXA2xx or Sodaville SSP port as a SPI master 274 + controller. The driver can be configured to use any SSP port and 275 + additional documentation can be found a Documentation/spi/pxa2xx. 276 + 277 + config SPI_PXA2XX_PCI 278 + def_bool SPI_PXA2XX && X86_32 && PCI 279 279 280 280 config SPI_S3C24XX 281 281 tristate "Samsung S3C24XX series SPI" ··· 359 353 tristate "Xilinx SPI controller common module" 360 354 depends on HAS_IOMEM && EXPERIMENTAL 361 355 select SPI_BITBANG 362 - select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE) 363 356 help 364 357 This exposes the SPI controller IP from the Xilinx EDK. 365 358 ··· 366 361 Product Specification document (DS464) for hardware details. 367 362 368 363 Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)" 369 - 370 - config SPI_XILINX_OF 371 - tristate "Xilinx SPI controller OF device" 372 - depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE) 373 - help 374 - This is the OF driver for the SPI controller IP from the Xilinx EDK. 375 - 376 - config SPI_XILINX_PLTFM 377 - tristate "Xilinx SPI controller platform device" 378 - depends on SPI_XILINX 379 - help 380 - This is the platform driver for the SPI controller IP 381 - from the Xilinx EDK. 382 364 383 365 config SPI_NUC900 384 366 tristate "Nuvoton NUC900 series SPI" ··· 387 395 config SPI_DW_PCI 388 396 tristate "PCI interface driver for DW SPI core" 389 397 depends on SPI_DESIGNWARE && PCI 398 + 399 + config SPI_DW_MID_DMA 400 + bool "DMA support for DW SPI controller on Intel Moorestown platform" 401 + depends on SPI_DW_PCI && INTEL_MID_DMAC 390 402 391 403 config SPI_DW_MMIO 392 404 tristate "Memory-mapped io interface driver for DW SPI core"
+3 -3
drivers/spi/Makefile
··· 17 17 obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o 18 18 obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o 19 19 obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o 20 - obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o 20 + obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o 21 + dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o 21 22 obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o 22 23 obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o 23 24 obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 24 25 obj-$(CONFIG_SPI_IMX) += spi_imx.o 25 26 obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 26 27 obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 28 + obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o 27 29 obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 28 30 obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o 29 31 obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o ··· 45 43 obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o 46 44 obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 47 45 obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 48 - obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o 49 - obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o 50 46 obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o 51 47 obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o 52 48 obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
+26 -30
drivers/spi/amba-pl022.c
··· 253 253 #define STATE_ERROR ((void *) -1) 254 254 255 255 /* 256 - * Queue State 257 - */ 258 - #define QUEUE_RUNNING (0) 259 - #define QUEUE_STOPPED (1) 260 - /* 261 256 * SSP State - Whether Enabled or Disabled 262 257 */ 263 258 #define SSP_DISABLED (0) ··· 339 344 * @lock: spinlock to syncronise access to driver data 340 345 * @workqueue: a workqueue on which any spi_message request is queued 341 346 * @busy: workqueue is busy 342 - * @run: workqueue is running 347 + * @running: workqueue is running 343 348 * @pump_transfers: Tasklet used in Interrupt Transfer mode 344 349 * @cur_msg: Pointer to current spi_message being processed 345 350 * @cur_transfer: Pointer to current spi_transfer ··· 364 369 struct work_struct pump_messages; 365 370 spinlock_t queue_lock; 366 371 struct list_head queue; 367 - int busy; 368 - int run; 372 + bool busy; 373 + bool running; 369 374 /* Message transfer pump */ 370 375 struct tasklet_struct pump_transfers; 371 376 struct spi_message *cur_msg; ··· 777 782 static void unmap_free_dma_scatter(struct pl022 *pl022) 778 783 { 779 784 /* Unmap and free the SG tables */ 780 - dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 785 + dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, 781 786 pl022->sgt_tx.nents, DMA_TO_DEVICE); 782 - dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 787 + dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, 783 788 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 784 789 sg_free_table(&pl022->sgt_rx); 785 790 sg_free_table(&pl022->sgt_tx); ··· 912 917 }; 913 918 unsigned int pages; 914 919 int ret; 915 - int sglen; 920 + int rx_sglen, tx_sglen; 916 921 struct dma_chan *rxchan = pl022->dma_rx_channel; 917 922 struct dma_chan *txchan = pl022->dma_tx_channel; 918 923 struct dma_async_tx_descriptor *rxdesc; ··· 951 956 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 952 957 break; 953 958 case WRITING_U32: 954 - tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; 959 + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 955 960 break; 956 961 } 957 962 ··· 986 991 pl022->cur_transfer->len, &pl022->sgt_tx); 987 992 988 993 /* Map DMA buffers */ 989 - sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 994 + rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 990 995 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 991 - if (!sglen) 996 + if (!rx_sglen) 992 997 goto err_rx_sgmap; 993 998 994 - sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 999 + tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, 995 1000 pl022->sgt_tx.nents, DMA_TO_DEVICE); 996 - if (!sglen) 1001 + if (!tx_sglen) 997 1002 goto err_tx_sgmap; 998 1003 999 1004 /* Send both scatterlists */ 1000 1005 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 1001 1006 pl022->sgt_rx.sgl, 1002 - pl022->sgt_rx.nents, 1007 + rx_sglen, 1003 1008 DMA_FROM_DEVICE, 1004 1009 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1005 1010 if (!rxdesc) ··· 1007 1012 1008 1013 txdesc = txchan->device->device_prep_slave_sg(txchan, 1009 1014 pl022->sgt_tx.sgl, 1010 - pl022->sgt_tx.nents, 1015 + tx_sglen, 1011 1016 DMA_TO_DEVICE, 1012 1017 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1013 1018 if (!txdesc) ··· 1035 1040 txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); 1036 1041 err_rxdesc: 1037 1042 rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); 1038 - dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 1043 + dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, 1039 1044 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1040 1045 err_tx_sgmap: 1041 - dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 1046 + dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1042 1047 pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1043 1048 err_rx_sgmap: 1044 1049 sg_free_table(&pl022->sgt_tx); ··· 1455 1460 1456 1461 /* Lock queue and check for queue work */ 1457 1462 spin_lock_irqsave(&pl022->queue_lock, flags); 1458 - if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { 1459 - pl022->busy = 0; 1463 + if (list_empty(&pl022->queue) || !pl022->running) { 1464 + pl022->busy = false; 1460 1465 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1461 1466 return; 1462 1467 } ··· 1470 1475 list_entry(pl022->queue.next, struct spi_message, queue); 1471 1476 1472 1477 list_del_init(&pl022->cur_msg->queue); 1473 - pl022->busy = 1; 1478 + pl022->busy = true; 1474 1479 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1475 1480 1476 1481 /* Initial message state */ ··· 1502 1507 INIT_LIST_HEAD(&pl022->queue); 1503 1508 spin_lock_init(&pl022->queue_lock); 1504 1509 1505 - pl022->run = QUEUE_STOPPED; 1506 - pl022->busy = 0; 1510 + pl022->running = false; 1511 + pl022->busy = false; 1507 1512 1508 1513 tasklet_init(&pl022->pump_transfers, 1509 1514 pump_transfers, (unsigned long)pl022); ··· 1524 1529 1525 1530 spin_lock_irqsave(&pl022->queue_lock, flags); 1526 1531 1527 - if (pl022->run == QUEUE_RUNNING || pl022->busy) { 1532 + if (pl022->running || pl022->busy) { 1528 1533 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1529 1534 return -EBUSY; 1530 1535 } 1531 1536 1532 - pl022->run = QUEUE_RUNNING; 1537 + pl022->running = true; 1533 1538 pl022->cur_msg = NULL; 1534 1539 pl022->cur_transfer = NULL; 1535 1540 pl022->cur_chip = NULL; ··· 1561 1566 1562 1567 if (!list_empty(&pl022->queue) || pl022->busy) 1563 1568 status = -EBUSY; 1564 - else pl022->run = QUEUE_STOPPED; 1569 + else 1570 + pl022->running = false; 1565 1571 1566 1572 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1567 1573 ··· 1680 1684 1681 1685 spin_lock_irqsave(&pl022->queue_lock, flags); 1682 1686 1683 - if (pl022->run == QUEUE_STOPPED) { 1687 + if (!pl022->running) { 1684 1688 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1685 1689 return -ESHUTDOWN; 1686 1690 } ··· 1689 1693 msg->state = STATE_START; 1690 1694 1691 1695 list_add_tail(&msg->queue, &pl022->queue); 1692 - if (pl022->run == QUEUE_RUNNING && !pl022->busy) 1696 + if (pl022->running && !pl022->busy) 1693 1697 queue_work(pl022->workqueue, &pl022->pump_messages); 1694 1698 1695 1699 spin_unlock_irqrestore(&pl022->queue_lock, flags);
+546 -790
drivers/spi/davinci_spi.c
··· 1 1 /* 2 2 * Copyright (C) 2009 Texas Instruments. 3 + * Copyright (C) 2010 EF Johnson Technologies 3 4 * 4 5 * This program is free software; you can redistribute it and/or modify 5 6 * it under the terms of the GNU General Public License as published by ··· 39 38 40 39 #define CS_DEFAULT 0xFF 41 40 42 - #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) 43 - #define DAVINCI_DMA_DATA_TYPE_S8 0x01 44 - #define DAVINCI_DMA_DATA_TYPE_S16 0x02 45 - #define DAVINCI_DMA_DATA_TYPE_S32 0x04 46 - 47 41 #define SPIFMT_PHASE_MASK BIT(16) 48 42 #define SPIFMT_POLARITY_MASK BIT(17) 49 43 #define SPIFMT_DISTIMER_MASK BIT(18) ··· 48 52 #define SPIFMT_ODD_PARITY_MASK BIT(23) 49 53 #define SPIFMT_WDELAY_MASK 0x3f000000u 50 54 #define SPIFMT_WDELAY_SHIFT 24 51 - #define SPIFMT_CHARLEN_MASK 0x0000001Fu 52 - 53 - /* SPIGCR1 */ 54 - #define SPIGCR1_SPIENA_MASK 0x01000000u 55 + #define SPIFMT_PRESCALE_SHIFT 8 55 56 56 57 /* SPIPC0 */ 57 58 #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 58 59 #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 59 60 #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 60 61 #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 61 - #define SPIPC0_EN1FUN_MASK BIT(1) 62 - #define SPIPC0_EN0FUN_MASK BIT(0) 63 62 64 63 #define SPIINT_MASKALL 0x0101035F 65 - #define SPI_INTLVL_1 0x000001FFu 66 - #define SPI_INTLVL_0 0x00000000u 64 + #define SPIINT_MASKINT 0x0000015F 65 + #define SPI_INTLVL_1 0x000001FF 66 + #define SPI_INTLVL_0 0x00000000 67 67 68 - /* SPIDAT1 */ 69 - #define SPIDAT1_CSHOLD_SHIFT 28 70 - #define SPIDAT1_CSNR_SHIFT 16 68 + /* SPIDAT1 (upper 16 bit defines) */ 69 + #define SPIDAT1_CSHOLD_MASK BIT(12) 70 + 71 + /* SPIGCR1 */ 71 72 #define SPIGCR1_CLKMOD_MASK BIT(1) 72 73 #define SPIGCR1_MASTER_MASK BIT(0) 74 + #define SPIGCR1_POWERDOWN_MASK BIT(8) 73 75 #define SPIGCR1_LOOPBACK_MASK BIT(16) 76 + #define SPIGCR1_SPIENA_MASK BIT(24) 74 77 75 78 /* SPIBUF */ 76 79 #define SPIBUF_TXFULL_MASK BIT(29) 77 80 #define SPIBUF_RXEMPTY_MASK BIT(31) 81 + 82 + /* SPIDELAY */ 83 + #define SPIDELAY_C2TDELAY_SHIFT 24 84 + #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) 85 + #define SPIDELAY_T2CDELAY_SHIFT 16 86 + #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) 87 + #define SPIDELAY_T2EDELAY_SHIFT 8 88 + #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) 89 + #define SPIDELAY_C2EDELAY_SHIFT 0 90 + #define SPIDELAY_C2EDELAY_MASK 0xFF 78 91 79 92 /* Error Masks */ 80 93 #define SPIFLG_DLEN_ERR_MASK BIT(0) ··· 92 87 #define SPIFLG_DESYNC_MASK BIT(3) 93 88 #define SPIFLG_BITERR_MASK BIT(4) 94 89 #define SPIFLG_OVRRUN_MASK BIT(6) 95 - #define SPIFLG_RX_INTR_MASK BIT(8) 96 - #define SPIFLG_TX_INTR_MASK BIT(9) 97 90 #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 98 - #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ 91 + #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ 99 92 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 100 93 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 101 - | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ 102 - | SPIFLG_TX_INTR_MASK \ 103 - | SPIFLG_BUF_INIT_ACTIVE_MASK) 94 + | SPIFLG_OVRRUN_MASK) 104 95 105 - #define SPIINT_DLEN_ERR_INTR BIT(0) 106 - #define SPIINT_TIMEOUT_INTR BIT(1) 107 - #define SPIINT_PARERR_INTR BIT(2) 108 - #define SPIINT_DESYNC_INTR BIT(3) 109 - #define SPIINT_BITERR_INTR BIT(4) 110 - #define SPIINT_OVRRUN_INTR BIT(6) 111 - #define SPIINT_RX_INTR BIT(8) 112 - #define SPIINT_TX_INTR BIT(9) 113 96 #define SPIINT_DMA_REQ_EN BIT(16) 114 - #define SPIINT_ENABLE_HIGHZ BIT(24) 115 - 116 - #define SPI_T2CDELAY_SHIFT 16 117 - #define SPI_C2TDELAY_SHIFT 24 118 97 119 98 /* SPI Controller registers */ 120 99 #define SPIGCR0 0x00 ··· 107 118 #define SPILVL 0x0c 108 119 #define SPIFLG 0x10 109 120 #define SPIPC0 0x14 110 - #define SPIPC1 0x18 111 - #define SPIPC2 0x1c 112 - #define SPIPC3 0x20 113 - #define SPIPC4 0x24 114 - #define SPIPC5 0x28 115 - #define SPIPC6 0x2c 116 - #define SPIPC7 0x30 117 - #define SPIPC8 0x34 118 - #define SPIDAT0 0x38 119 121 #define SPIDAT1 0x3c 120 122 #define SPIBUF 0x40 121 - #define SPIEMU 0x44 122 123 #define SPIDELAY 0x48 123 124 #define SPIDEF 0x4c 124 125 #define SPIFMT0 0x50 125 - #define SPIFMT1 0x54 126 - #define SPIFMT2 0x58 127 - #define SPIFMT3 0x5c 128 - #define TGINTVEC0 0x60 129 - #define TGINTVEC1 0x64 130 - 131 - struct davinci_spi_slave { 132 - u32 cmd_to_write; 133 - u32 clk_ctrl_to_write; 134 - u32 bytes_per_word; 135 - u8 active_cs; 136 - }; 137 126 138 127 /* We have 2 DMA channels per CS, one for RX and one for TX */ 139 128 struct davinci_spi_dma { 140 - int dma_tx_channel; 141 - int dma_rx_channel; 142 - int dma_tx_sync_dev; 143 - int dma_rx_sync_dev; 129 + int tx_channel; 130 + int rx_channel; 131 + int dummy_param_slot; 144 132 enum dma_event_q eventq; 145 - 146 - struct completion dma_tx_completion; 147 - struct completion dma_rx_completion; 148 133 }; 149 134 150 135 /* SPI Controller driver's private data. */ ··· 129 166 u8 version; 130 167 resource_size_t pbase; 131 168 void __iomem *base; 132 - size_t region_size; 133 169 u32 irq; 134 170 struct completion done; 135 171 136 172 const void *tx; 137 173 void *rx; 138 - u8 *tmp_buf; 139 - int count; 140 - struct davinci_spi_dma *dma_channels; 141 - struct davinci_spi_platform_data *pdata; 174 + #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) 175 + u8 rx_tmp_buf[SPI_TMP_BUFSZ]; 176 + int rcount; 177 + int wcount; 178 + struct davinci_spi_dma dma; 179 + struct davinci_spi_platform_data *pdata; 142 180 143 181 void (*get_rx)(u32 rx_data, struct davinci_spi *); 144 182 u32 (*get_tx)(struct davinci_spi *); 145 183 146 - struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; 184 + u8 bytes_per_word[SPI_MAX_CHIPSELECT]; 147 185 }; 148 186 149 - static unsigned use_dma; 187 + static struct davinci_spi_config davinci_spi_default_cfg; 150 188 151 - static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) 189 + static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) 152 190 { 153 - u8 *rx = davinci_spi->rx; 154 - 155 - *rx++ = (u8)data; 156 - davinci_spi->rx = rx; 191 + if (dspi->rx) { 192 + u8 *rx = dspi->rx; 193 + *rx++ = (u8)data; 194 + dspi->rx = rx; 195 + } 157 196 } 158 197 159 - static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) 198 + static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) 160 199 { 161 - u16 *rx = davinci_spi->rx; 162 - 163 - *rx++ = (u16)data; 164 - davinci_spi->rx = rx; 200 + if (dspi->rx) { 201 + u16 *rx = dspi->rx; 202 + *rx++ = (u16)data; 203 + dspi->rx = rx; 204 + } 165 205 } 166 206 167 - static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) 207 + static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) 168 208 { 169 - u32 data; 170 - const u8 *tx = davinci_spi->tx; 171 - 172 - data = *tx++; 173 - davinci_spi->tx = tx; 209 + u32 data = 0; 210 + if (dspi->tx) { 211 + const u8 *tx = dspi->tx; 212 + data = *tx++; 213 + dspi->tx = tx; 214 + } 174 215 return data; 175 216 } 176 217 177 - static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) 218 + static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) 178 219 { 179 - u32 data; 180 - const u16 *tx = davinci_spi->tx; 181 - 182 - data = *tx++; 183 - davinci_spi->tx = tx; 220 + u32 data = 0; 221 + if (dspi->tx) { 222 + const u16 *tx = dspi->tx; 223 + data = *tx++; 224 + dspi->tx = tx; 225 + } 184 226 return data; 185 227 } 186 228 ··· 205 237 iowrite32(v, addr); 206 238 } 207 239 208 - static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) 209 - { 210 - set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); 211 - } 212 - 213 - static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) 214 - { 215 - clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); 216 - } 217 - 218 - static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) 219 - { 220 - struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); 221 - 222 - if (enable) 223 - set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); 224 - else 225 - clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); 226 - } 227 - 228 240 /* 229 241 * Interface to control the chip select signal 230 242 */ 231 243 static void davinci_spi_chipselect(struct spi_device *spi, int value) 232 244 { 233 - struct davinci_spi *davinci_spi; 245 + struct davinci_spi *dspi; 234 246 struct davinci_spi_platform_data *pdata; 235 - u32 data1_reg_val = 0; 247 + u8 chip_sel = spi->chip_select; 248 + u16 spidat1 = CS_DEFAULT; 249 + bool gpio_chipsel = false; 236 250 237 - davinci_spi = spi_master_get_devdata(spi->master); 238 - pdata = davinci_spi->pdata; 251 + dspi = spi_master_get_devdata(spi->master); 252 + pdata = dspi->pdata; 253 + 254 + if (pdata->chip_sel && chip_sel < pdata->num_chipselect && 255 + pdata->chip_sel[chip_sel] != SPI_INTERN_CS) 256 + gpio_chipsel = true; 239 257 240 258 /* 241 259 * Board specific chip select logic decides the polarity and cs 242 260 * line for the controller 243 261 */ 244 - if (value == BITBANG_CS_INACTIVE) { 245 - set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); 262 + if (gpio_chipsel) { 263 + if (value == BITBANG_CS_ACTIVE) 264 + gpio_set_value(pdata->chip_sel[chip_sel], 0); 265 + else 266 + gpio_set_value(pdata->chip_sel[chip_sel], 1); 267 + } else { 268 + if (value == BITBANG_CS_ACTIVE) { 269 + spidat1 |= SPIDAT1_CSHOLD_MASK; 270 + spidat1 &= ~(0x1 << chip_sel); 271 + } 246 272 247 - data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; 248 - iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 249 - 250 - while ((ioread32(davinci_spi->base + SPIBUF) 251 - & SPIBUF_RXEMPTY_MASK) == 0) 252 - cpu_relax(); 273 + iowrite16(spidat1, dspi->base + SPIDAT1 + 2); 253 274 } 275 + } 276 + 277 + /** 278 + * davinci_spi_get_prescale - Calculates the correct prescale value 279 + * @maxspeed_hz: the maximum rate the SPI clock can run at 280 + * 281 + * This function calculates the prescale value that generates a clock rate 282 + * less than or equal to the specified maximum. 283 + * 284 + * Returns: calculated prescale - 1 for easy programming into SPI registers 285 + * or negative error number if valid prescalar cannot be updated. 286 + */ 287 + static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, 288 + u32 max_speed_hz) 289 + { 290 + int ret; 291 + 292 + ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); 293 + 294 + if (ret < 3 || ret > 256) 295 + return -EINVAL; 296 + 297 + return ret - 1; 254 298 } 255 299 256 300 /** ··· 278 298 struct spi_transfer *t) 279 299 { 280 300 281 - struct davinci_spi *davinci_spi; 282 - struct davinci_spi_platform_data *pdata; 301 + struct davinci_spi *dspi; 302 + struct davinci_spi_config *spicfg; 283 303 u8 bits_per_word = 0; 284 - u32 hz = 0, prescale = 0, clkspeed; 304 + u32 hz = 0, spifmt = 0, prescale = 0; 285 305 286 - davinci_spi = spi_master_get_devdata(spi->master); 287 - pdata = davinci_spi->pdata; 306 + dspi = spi_master_get_devdata(spi->master); 307 + spicfg = (struct davinci_spi_config *)spi->controller_data; 308 + if (!spicfg) 309 + spicfg = &davinci_spi_default_cfg; 288 310 289 311 if (t) { 290 312 bits_per_word = t->bits_per_word; ··· 302 320 * 8bit, 16bit or 32bit transfer 303 321 */ 304 322 if (bits_per_word <= 8 && bits_per_word >= 2) { 305 - davinci_spi->get_rx = davinci_spi_rx_buf_u8; 306 - davinci_spi->get_tx = davinci_spi_tx_buf_u8; 307 - davinci_spi->slave[spi->chip_select].bytes_per_word = 1; 323 + dspi->get_rx = davinci_spi_rx_buf_u8; 324 + dspi->get_tx = davinci_spi_tx_buf_u8; 325 + dspi->bytes_per_word[spi->chip_select] = 1; 308 326 } else if (bits_per_word <= 16 && bits_per_word >= 2) { 309 - davinci_spi->get_rx = davinci_spi_rx_buf_u16; 310 - davinci_spi->get_tx = davinci_spi_tx_buf_u16; 311 - davinci_spi->slave[spi->chip_select].bytes_per_word = 2; 327 + dspi->get_rx = davinci_spi_rx_buf_u16; 328 + dspi->get_tx = davinci_spi_tx_buf_u16; 329 + dspi->bytes_per_word[spi->chip_select] = 2; 312 330 } else 313 331 return -EINVAL; 314 332 315 333 if (!hz) 316 334 hz = spi->max_speed_hz; 317 335 318 - clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, 319 - spi->chip_select); 320 - set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, 321 - spi->chip_select); 336 + /* Set up SPIFMTn register, unique to this chipselect. */ 322 337 323 - clkspeed = clk_get_rate(davinci_spi->clk); 324 - if (hz > clkspeed / 2) 325 - prescale = 1 << 8; 326 - if (hz < clkspeed / 256) 327 - prescale = 255 << 8; 328 - if (!prescale) 329 - prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00; 338 + prescale = davinci_spi_get_prescale(dspi, hz); 339 + if (prescale < 0) 340 + return prescale; 330 341 331 - clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); 332 - set_fmt_bits(davinci_spi->base, prescale, spi->chip_select); 342 + spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); 333 343 334 - return 0; 335 - } 336 - 337 - static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) 338 - { 339 - struct spi_device *spi = (struct spi_device *)data; 340 - struct davinci_spi *davinci_spi; 341 - struct davinci_spi_dma *davinci_spi_dma; 342 - struct davinci_spi_platform_data *pdata; 343 - 344 - davinci_spi = spi_master_get_devdata(spi->master); 345 - davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); 346 - pdata = davinci_spi->pdata; 347 - 348 - if (ch_status == DMA_COMPLETE) 349 - edma_stop(davinci_spi_dma->dma_rx_channel); 350 - else 351 - edma_clean_channel(davinci_spi_dma->dma_rx_channel); 352 - 353 - complete(&davinci_spi_dma->dma_rx_completion); 354 - /* We must disable the DMA RX request */ 355 - davinci_spi_set_dma_req(spi, 0); 356 - } 357 - 358 - static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) 359 - { 360 - struct spi_device *spi = (struct spi_device *)data; 361 - struct davinci_spi *davinci_spi; 362 - struct davinci_spi_dma *davinci_spi_dma; 363 - struct davinci_spi_platform_data *pdata; 364 - 365 - davinci_spi = spi_master_get_devdata(spi->master); 366 - davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); 367 - pdata = davinci_spi->pdata; 368 - 369 - if (ch_status == DMA_COMPLETE) 370 - edma_stop(davinci_spi_dma->dma_tx_channel); 371 - else 372 - edma_clean_channel(davinci_spi_dma->dma_tx_channel); 373 - 374 - complete(&davinci_spi_dma->dma_tx_completion); 375 - /* We must disable the DMA TX request */ 376 - davinci_spi_set_dma_req(spi, 0); 377 - } 378 - 379 - static int davinci_spi_request_dma(struct spi_device *spi) 380 - { 381 - struct davinci_spi *davinci_spi; 382 - struct davinci_spi_dma *davinci_spi_dma; 383 - struct davinci_spi_platform_data *pdata; 384 - struct device *sdev; 385 - int r; 386 - 387 - davinci_spi = spi_master_get_devdata(spi->master); 388 - davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 389 - pdata = davinci_spi->pdata; 390 - sdev = davinci_spi->bitbang.master->dev.parent; 391 - 392 - r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, 393 - davinci_spi_dma_rx_callback, spi, 394 - davinci_spi_dma->eventq); 395 - if (r < 0) { 396 - dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); 397 - return -EAGAIN; 398 - } 399 - davinci_spi_dma->dma_rx_channel = r; 400 - r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, 401 - davinci_spi_dma_tx_callback, spi, 402 - davinci_spi_dma->eventq); 403 - if (r < 0) { 404 - edma_free_channel(davinci_spi_dma->dma_rx_channel); 405 - davinci_spi_dma->dma_rx_channel = -1; 406 - dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); 407 - return -EAGAIN; 408 - } 409 - davinci_spi_dma->dma_tx_channel = r; 410 - 411 - return 0; 412 - } 413 - 414 - /** 415 - * davinci_spi_setup - This functions will set default transfer method 416 - * @spi: spi device on which data transfer to be done 417 - * 418 - * This functions sets the default transfer method. 419 - */ 420 - 421 - static int davinci_spi_setup(struct spi_device *spi) 422 - { 423 - int retval; 424 - struct davinci_spi *davinci_spi; 425 - struct davinci_spi_dma *davinci_spi_dma; 426 - struct device *sdev; 427 - 428 - davinci_spi = spi_master_get_devdata(spi->master); 429 - sdev = davinci_spi->bitbang.master->dev.parent; 430 - 431 - /* if bits per word length is zero then set it default 8 */ 432 - if (!spi->bits_per_word) 433 - spi->bits_per_word = 8; 434 - 435 - davinci_spi->slave[spi->chip_select].cmd_to_write = 0; 436 - 437 - if (use_dma && davinci_spi->dma_channels) { 438 - davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 439 - 440 - if ((davinci_spi_dma->dma_rx_channel == -1) 441 - || (davinci_spi_dma->dma_tx_channel == -1)) { 442 - retval = davinci_spi_request_dma(spi); 443 - if (retval < 0) 444 - return retval; 445 - } 446 - } 447 - 448 - /* 449 - * SPI in DaVinci and DA8xx operate between 450 - * 600 KHz and 50 MHz 451 - */ 452 - if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { 453 - dev_dbg(sdev, "Operating frequency is not in acceptable " 454 - "range\n"); 455 - return -EINVAL; 456 - } 457 - 458 - /* 459 - * Set up SPIFMTn register, unique to this chipselect. 460 - * 461 - * NOTE: we could do all of these with one write. Also, some 462 - * of the "version 2" features are found in chips that don't 463 - * support all of them... 464 - */ 465 344 if (spi->mode & SPI_LSB_FIRST) 466 - set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, 467 - spi->chip_select); 468 - else 469 - clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, 470 - spi->chip_select); 345 + spifmt |= SPIFMT_SHIFTDIR_MASK; 471 346 472 347 if (spi->mode & SPI_CPOL) 473 - set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, 474 - spi->chip_select); 475 - else 476 - clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, 477 - spi->chip_select); 348 + spifmt |= SPIFMT_POLARITY_MASK; 478 349 479 350 if (!(spi->mode & SPI_CPHA)) 480 - set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, 481 - spi->chip_select); 482 - else 483 - clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, 484 - spi->chip_select); 351 + spifmt |= SPIFMT_PHASE_MASK; 485 352 486 353 /* 487 354 * Version 1 hardware supports two basic SPI modes: ··· 345 514 * - 4 pin with enable is (SPI_READY | SPI_NO_CS) 346 515 */ 347 516 348 - if (davinci_spi->version == SPI_VERSION_2) { 349 - clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, 350 - spi->chip_select); 351 - set_fmt_bits(davinci_spi->base, 352 - (davinci_spi->pdata->wdelay 353 - << SPIFMT_WDELAY_SHIFT) 354 - & SPIFMT_WDELAY_MASK, 355 - spi->chip_select); 517 + if (dspi->version == SPI_VERSION_2) { 356 518 357 - if (davinci_spi->pdata->odd_parity) 358 - set_fmt_bits(davinci_spi->base, 359 - SPIFMT_ODD_PARITY_MASK, 360 - spi->chip_select); 361 - else 362 - clear_fmt_bits(davinci_spi->base, 363 - SPIFMT_ODD_PARITY_MASK, 364 - spi->chip_select); 519 + u32 delay = 0; 365 520 366 - if (davinci_spi->pdata->parity_enable) 367 - set_fmt_bits(davinci_spi->base, 368 - SPIFMT_PARITYENA_MASK, 369 - spi->chip_select); 370 - else 371 - clear_fmt_bits(davinci_spi->base, 372 - SPIFMT_PARITYENA_MASK, 373 - spi->chip_select); 521 + spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) 522 + & SPIFMT_WDELAY_MASK); 374 523 375 - if (davinci_spi->pdata->wait_enable) 376 - set_fmt_bits(davinci_spi->base, 377 - SPIFMT_WAITENA_MASK, 378 - spi->chip_select); 379 - else 380 - clear_fmt_bits(davinci_spi->base, 381 - SPIFMT_WAITENA_MASK, 382 - spi->chip_select); 524 + if (spicfg->odd_parity) 525 + spifmt |= SPIFMT_ODD_PARITY_MASK; 383 526 384 - if (davinci_spi->pdata->timer_disable) 385 - set_fmt_bits(davinci_spi->base, 386 - SPIFMT_DISTIMER_MASK, 387 - spi->chip_select); 388 - else 389 - clear_fmt_bits(davinci_spi->base, 390 - SPIFMT_DISTIMER_MASK, 391 - spi->chip_select); 392 - } 527 + if (spicfg->parity_enable) 528 + spifmt |= SPIFMT_PARITYENA_MASK; 393 529 394 - retval = davinci_spi_setup_transfer(spi, NULL); 395 - 396 - return retval; 397 - } 398 - 399 - static void davinci_spi_cleanup(struct spi_device *spi) 400 - { 401 - struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); 402 - struct davinci_spi_dma *davinci_spi_dma; 403 - 404 - davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 405 - 406 - if (use_dma && davinci_spi->dma_channels) { 407 - davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 408 - 409 - if ((davinci_spi_dma->dma_rx_channel != -1) 410 - && (davinci_spi_dma->dma_tx_channel != -1)) { 411 - edma_free_channel(davinci_spi_dma->dma_tx_channel); 412 - edma_free_channel(davinci_spi_dma->dma_rx_channel); 530 + if (spicfg->timer_disable) { 531 + spifmt |= SPIFMT_DISTIMER_MASK; 532 + } else { 533 + delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) 534 + & SPIDELAY_C2TDELAY_MASK; 535 + delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) 536 + & SPIDELAY_T2CDELAY_MASK; 413 537 } 538 + 539 + if (spi->mode & SPI_READY) { 540 + spifmt |= SPIFMT_WAITENA_MASK; 541 + delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) 542 + & SPIDELAY_T2EDELAY_MASK; 543 + delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) 544 + & SPIDELAY_C2EDELAY_MASK; 545 + } 546 + 547 + iowrite32(delay, dspi->base + SPIDELAY); 414 548 } 415 - } 416 549 417 - static int davinci_spi_bufs_prep(struct spi_device *spi, 418 - struct davinci_spi *davinci_spi) 419 - { 420 - int op_mode = 0; 421 - 422 - /* 423 - * REVISIT unless devices disagree about SPI_LOOP or 424 - * SPI_READY (SPI_NO_CS only allows one device!), this 425 - * should not need to be done before each message... 426 - * optimize for both flags staying cleared. 427 - */ 428 - 429 - op_mode = SPIPC0_DIFUN_MASK 430 - | SPIPC0_DOFUN_MASK 431 - | SPIPC0_CLKFUN_MASK; 432 - if (!(spi->mode & SPI_NO_CS)) 433 - op_mode |= 1 << spi->chip_select; 434 - if (spi->mode & SPI_READY) 435 - op_mode |= SPIPC0_SPIENA_MASK; 436 - 437 - iowrite32(op_mode, davinci_spi->base + SPIPC0); 438 - 439 - if (spi->mode & SPI_LOOP) 440 - set_io_bits(davinci_spi->base + SPIGCR1, 441 - SPIGCR1_LOOPBACK_MASK); 442 - else 443 - clear_io_bits(davinci_spi->base + SPIGCR1, 444 - SPIGCR1_LOOPBACK_MASK); 550 + iowrite32(spifmt, dspi->base + SPIFMT0); 445 551 446 552 return 0; 447 553 } 448 554 449 - static int davinci_spi_check_error(struct davinci_spi *davinci_spi, 450 - int int_status) 555 + /** 556 + * davinci_spi_setup - This functions will set default transfer method 557 + * @spi: spi device on which data transfer to be done 558 + * 559 + * This functions sets the default transfer method. 560 + */ 561 + static int davinci_spi_setup(struct spi_device *spi) 451 562 { 452 - struct device *sdev = davinci_spi->bitbang.master->dev.parent; 563 + int retval = 0; 564 + struct davinci_spi *dspi; 565 + struct davinci_spi_platform_data *pdata; 566 + 567 + dspi = spi_master_get_devdata(spi->master); 568 + pdata = dspi->pdata; 569 + 570 + /* if bits per word length is zero then set it default 8 */ 571 + if (!spi->bits_per_word) 572 + spi->bits_per_word = 8; 573 + 574 + if (!(spi->mode & SPI_NO_CS)) { 575 + if ((pdata->chip_sel == NULL) || 576 + (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)) 577 + set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 578 + 579 + } 580 + 581 + if (spi->mode & SPI_READY) 582 + set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); 583 + 584 + if (spi->mode & SPI_LOOP) 585 + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 586 + else 587 + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); 588 + 589 + return retval; 590 + } 591 + 592 + static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) 593 + { 594 + struct device *sdev = dspi->bitbang.master->dev.parent; 453 595 454 596 if (int_status & SPIFLG_TIMEOUT_MASK) { 455 597 dev_dbg(sdev, "SPI Time-out Error\n"); ··· 437 633 return -EIO; 438 634 } 439 635 440 - if (davinci_spi->version == SPI_VERSION_2) { 636 + if (dspi->version == SPI_VERSION_2) { 441 637 if (int_status & SPIFLG_DLEN_ERR_MASK) { 442 638 dev_dbg(sdev, "SPI Data Length Error\n"); 443 639 return -EIO; ··· 450 646 dev_dbg(sdev, "SPI Data Overrun error\n"); 451 647 return -EIO; 452 648 } 453 - if (int_status & SPIFLG_TX_INTR_MASK) { 454 - dev_dbg(sdev, "SPI TX intr bit set\n"); 455 - return -EIO; 456 - } 457 649 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 458 650 dev_dbg(sdev, "SPI Buffer Init Active\n"); 459 651 return -EBUSY; ··· 457 657 } 458 658 459 659 return 0; 660 + } 661 + 662 + /** 663 + * davinci_spi_process_events - check for and handle any SPI controller events 664 + * @dspi: the controller data 665 + * 666 + * This function will check the SPIFLG register and handle any events that are 667 + * detected there 668 + */ 669 + static int davinci_spi_process_events(struct davinci_spi *dspi) 670 + { 671 + u32 buf, status, errors = 0, spidat1; 672 + 673 + buf = ioread32(dspi->base + SPIBUF); 674 + 675 + if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { 676 + dspi->get_rx(buf & 0xFFFF, dspi); 677 + dspi->rcount--; 678 + } 679 + 680 + status = ioread32(dspi->base + SPIFLG); 681 + 682 + if (unlikely(status & SPIFLG_ERROR_MASK)) { 683 + errors = status & SPIFLG_ERROR_MASK; 684 + goto out; 685 + } 686 + 687 + if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { 688 + spidat1 = ioread32(dspi->base + SPIDAT1); 689 + dspi->wcount--; 690 + spidat1 &= ~0xFFFF; 691 + spidat1 |= 0xFFFF & dspi->get_tx(dspi); 692 + iowrite32(spidat1, dspi->base + SPIDAT1); 693 + } 694 + 695 + out: 696 + return errors; 697 + } 698 + 699 + static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) 700 + { 701 + struct davinci_spi *dspi = data; 702 + struct davinci_spi_dma *dma = &dspi->dma; 703 + 704 + edma_stop(lch); 705 + 706 + if (status == DMA_COMPLETE) { 707 + if (lch == dma->rx_channel) 708 + dspi->rcount = 0; 709 + if (lch == dma->tx_channel) 710 + dspi->wcount = 0; 711 + } 712 + 713 + if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) 714 + complete(&dspi->done); 460 715 } 461 716 462 717 /** ··· 523 668 * of SPI controller and then wait until the completion will be marked 524 669 * by the IRQ Handler. 525 670 */ 526 - static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) 671 + static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 527 672 { 528 - struct davinci_spi *davinci_spi; 529 - int int_status, count, ret; 530 - u8 conv, tmp; 531 - u32 tx_data, data1_reg_val; 532 - u32 buf_val, flg_val; 673 + struct davinci_spi *dspi; 674 + int data_type, ret; 675 + u32 tx_data, spidat1; 676 + u32 errors = 0; 677 + struct davinci_spi_config *spicfg; 533 678 struct davinci_spi_platform_data *pdata; 534 - 535 - davinci_spi = spi_master_get_devdata(spi->master); 536 - pdata = davinci_spi->pdata; 537 - 538 - davinci_spi->tx = t->tx_buf; 539 - davinci_spi->rx = t->rx_buf; 540 - 541 - /* convert len to words based on bits_per_word */ 542 - conv = davinci_spi->slave[spi->chip_select].bytes_per_word; 543 - davinci_spi->count = t->len / conv; 544 - 545 - INIT_COMPLETION(davinci_spi->done); 546 - 547 - ret = davinci_spi_bufs_prep(spi, davinci_spi); 548 - if (ret) 549 - return ret; 550 - 551 - /* Enable SPI */ 552 - set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 553 - 554 - iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | 555 - (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), 556 - davinci_spi->base + SPIDELAY); 557 - 558 - count = davinci_spi->count; 559 - data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; 560 - tmp = ~(0x1 << spi->chip_select); 561 - 562 - clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); 563 - 564 - data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; 565 - 566 - while ((ioread32(davinci_spi->base + SPIBUF) 567 - & SPIBUF_RXEMPTY_MASK) == 0) 568 - cpu_relax(); 569 - 570 - /* Determine the command to execute READ or WRITE */ 571 - if (t->tx_buf) { 572 - clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 573 - 574 - while (1) { 575 - tx_data = davinci_spi->get_tx(davinci_spi); 576 - 577 - data1_reg_val &= ~(0xFFFF); 578 - data1_reg_val |= (0xFFFF & tx_data); 579 - 580 - buf_val = ioread32(davinci_spi->base + SPIBUF); 581 - if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { 582 - iowrite32(data1_reg_val, 583 - davinci_spi->base + SPIDAT1); 584 - 585 - count--; 586 - } 587 - while (ioread32(davinci_spi->base + SPIBUF) 588 - & SPIBUF_RXEMPTY_MASK) 589 - cpu_relax(); 590 - 591 - /* getting the returned byte */ 592 - if (t->rx_buf) { 593 - buf_val = ioread32(davinci_spi->base + SPIBUF); 594 - davinci_spi->get_rx(buf_val, davinci_spi); 595 - } 596 - if (count <= 0) 597 - break; 598 - } 599 - } else { 600 - if (pdata->poll_mode) { 601 - while (1) { 602 - /* keeps the serial clock going */ 603 - if ((ioread32(davinci_spi->base + SPIBUF) 604 - & SPIBUF_TXFULL_MASK) == 0) 605 - iowrite32(data1_reg_val, 606 - davinci_spi->base + SPIDAT1); 607 - 608 - while (ioread32(davinci_spi->base + SPIBUF) & 609 - SPIBUF_RXEMPTY_MASK) 610 - cpu_relax(); 611 - 612 - flg_val = ioread32(davinci_spi->base + SPIFLG); 613 - buf_val = ioread32(davinci_spi->base + SPIBUF); 614 - 615 - davinci_spi->get_rx(buf_val, davinci_spi); 616 - 617 - count--; 618 - if (count <= 0) 619 - break; 620 - } 621 - } else { /* Receive in Interrupt mode */ 622 - int i; 623 - 624 - for (i = 0; i < davinci_spi->count; i++) { 625 - set_io_bits(davinci_spi->base + SPIINT, 626 - SPIINT_BITERR_INTR 627 - | SPIINT_OVRRUN_INTR 628 - | SPIINT_RX_INTR); 629 - 630 - iowrite32(data1_reg_val, 631 - davinci_spi->base + SPIDAT1); 632 - 633 - while (ioread32(davinci_spi->base + SPIINT) & 634 - SPIINT_RX_INTR) 635 - cpu_relax(); 636 - } 637 - iowrite32((data1_reg_val & 0x0ffcffff), 638 - davinci_spi->base + SPIDAT1); 639 - } 640 - } 641 - 642 - /* 643 - * Check for bit error, desync error,parity error,timeout error and 644 - * receive overflow errors 645 - */ 646 - int_status = ioread32(davinci_spi->base + SPIFLG); 647 - 648 - ret = davinci_spi_check_error(davinci_spi, int_status); 649 - if (ret != 0) 650 - return ret; 651 - 652 - /* SPI Framework maintains the count only in bytes so convert back */ 653 - davinci_spi->count *= conv; 654 - 655 - return t->len; 656 - } 657 - 658 - #define DAVINCI_DMA_DATA_TYPE_S8 0x01 659 - #define DAVINCI_DMA_DATA_TYPE_S16 0x02 660 - #define DAVINCI_DMA_DATA_TYPE_S32 0x04 661 - 662 - static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) 663 - { 664 - struct davinci_spi *davinci_spi; 665 - int int_status = 0; 666 - int count, temp_count; 667 - u8 conv = 1; 668 - u8 tmp; 669 - u32 data1_reg_val; 670 - struct davinci_spi_dma *davinci_spi_dma; 671 - int word_len, data_type, ret; 672 - unsigned long tx_reg, rx_reg; 673 - struct davinci_spi_platform_data *pdata; 679 + unsigned uninitialized_var(rx_buf_count); 674 680 struct device *sdev; 675 681 676 - davinci_spi = spi_master_get_devdata(spi->master); 677 - pdata = davinci_spi->pdata; 678 - sdev = davinci_spi->bitbang.master->dev.parent; 679 - 680 - davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 681 - 682 - tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; 683 - rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; 684 - 685 - davinci_spi->tx = t->tx_buf; 686 - davinci_spi->rx = t->rx_buf; 682 + dspi = spi_master_get_devdata(spi->master); 683 + pdata = dspi->pdata; 684 + spicfg = (struct davinci_spi_config *)spi->controller_data; 685 + if (!spicfg) 686 + spicfg = &davinci_spi_default_cfg; 687 + sdev = dspi->bitbang.master->dev.parent; 687 688 688 689 /* convert len to words based on bits_per_word */ 689 - conv = davinci_spi->slave[spi->chip_select].bytes_per_word; 690 - davinci_spi->count = t->len / conv; 690 + data_type = dspi->bytes_per_word[spi->chip_select]; 691 691 692 - INIT_COMPLETION(davinci_spi->done); 692 + dspi->tx = t->tx_buf; 693 + dspi->rx = t->rx_buf; 694 + dspi->wcount = t->len / data_type; 695 + dspi->rcount = dspi->wcount; 693 696 694 - init_completion(&davinci_spi_dma->dma_rx_completion); 695 - init_completion(&davinci_spi_dma->dma_tx_completion); 697 + spidat1 = ioread32(dspi->base + SPIDAT1); 696 698 697 - word_len = conv * 8; 699 + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 700 + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 698 701 699 - if (word_len <= 8) 700 - data_type = DAVINCI_DMA_DATA_TYPE_S8; 701 - else if (word_len <= 16) 702 - data_type = DAVINCI_DMA_DATA_TYPE_S16; 703 - else if (word_len <= 32) 704 - data_type = DAVINCI_DMA_DATA_TYPE_S32; 705 - else 706 - return -EINVAL; 702 + INIT_COMPLETION(dspi->done); 707 703 708 - ret = davinci_spi_bufs_prep(spi, davinci_spi); 709 - if (ret) 710 - return ret; 704 + if (spicfg->io_type == SPI_IO_TYPE_INTR) 705 + set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 711 706 712 - /* Put delay val if required */ 713 - iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | 714 - (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), 715 - davinci_spi->base + SPIDELAY); 716 - 717 - count = davinci_spi->count; /* the number of elements */ 718 - data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; 719 - 720 - /* CS default = 0xFF */ 721 - tmp = ~(0x1 << spi->chip_select); 722 - 723 - clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); 724 - 725 - data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; 726 - 727 - /* disable all interrupts for dma transfers */ 728 - clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 729 - /* Disable SPI to write configuration bits in SPIDAT */ 730 - clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 731 - iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 732 - /* Enable SPI */ 733 - set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 734 - 735 - while ((ioread32(davinci_spi->base + SPIBUF) 736 - & SPIBUF_RXEMPTY_MASK) == 0) 737 - cpu_relax(); 738 - 739 - 740 - if (t->tx_buf) { 741 - t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, 742 - DMA_TO_DEVICE); 743 - if (dma_mapping_error(&spi->dev, t->tx_dma)) { 744 - dev_dbg(sdev, "Unable to DMA map a %d bytes" 745 - " TX buffer\n", count); 746 - return -ENOMEM; 747 - } 748 - temp_count = count; 707 + if (spicfg->io_type != SPI_IO_TYPE_DMA) { 708 + /* start the transfer */ 709 + dspi->wcount--; 710 + tx_data = dspi->get_tx(dspi); 711 + spidat1 &= 0xFFFF0000; 712 + spidat1 |= tx_data & 0xFFFF; 713 + iowrite32(spidat1, dspi->base + SPIDAT1); 749 714 } else { 750 - /* We need TX clocking for RX transaction */ 751 - t->tx_dma = dma_map_single(&spi->dev, 752 - (void *)davinci_spi->tmp_buf, count + 1, 753 - DMA_TO_DEVICE); 754 - if (dma_mapping_error(&spi->dev, t->tx_dma)) { 755 - dev_dbg(sdev, "Unable to DMA map a %d bytes" 756 - " TX tmp buffer\n", count); 757 - return -ENOMEM; 715 + struct davinci_spi_dma *dma; 716 + unsigned long tx_reg, rx_reg; 717 + struct edmacc_param param; 718 + void *rx_buf; 719 + 720 + dma = &dspi->dma; 721 + 722 + tx_reg = (unsigned long)dspi->pbase + SPIDAT1; 723 + rx_reg = (unsigned long)dspi->pbase + SPIBUF; 724 + 725 + /* 726 + * Transmit DMA setup 727 + * 728 + * If there is transmit data, map the transmit buffer, set it 729 + * as the source of data and set the source B index to data 730 + * size. If there is no transmit data, set the transmit register 731 + * as the source of data, and set the source B index to zero. 732 + * 733 + * The destination is always the transmit register itself. And 734 + * the destination never increments. 735 + */ 736 + 737 + if (t->tx_buf) { 738 + t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, 739 + dspi->wcount, DMA_TO_DEVICE); 740 + if (dma_mapping_error(&spi->dev, t->tx_dma)) { 741 + dev_dbg(sdev, "Unable to DMA map %d bytes" 742 + "TX buffer\n", dspi->wcount); 743 + return -ENOMEM; 744 + } 758 745 } 759 - temp_count = count + 1; 760 - } 761 746 762 - edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, 763 - data_type, temp_count, 1, 0, ASYNC); 764 - edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); 765 - edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); 766 - edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); 767 - edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); 747 + param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); 748 + param.src = t->tx_buf ? t->tx_dma : tx_reg; 749 + param.a_b_cnt = dspi->wcount << 16 | data_type; 750 + param.dst = tx_reg; 751 + param.src_dst_bidx = t->tx_buf ? data_type : 0; 752 + param.link_bcntrld = 0xffff; 753 + param.src_dst_cidx = 0; 754 + param.ccnt = 1; 755 + edma_write_slot(dma->tx_channel, &param); 756 + edma_link(dma->tx_channel, dma->dummy_param_slot); 768 757 769 - if (t->rx_buf) { 770 - /* initiate transaction */ 771 - iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 758 + /* 759 + * Receive DMA setup 760 + * 761 + * If there is receive buffer, use it to receive data. If there 762 + * is none provided, use a temporary receive buffer. Set the 763 + * destination B index to 0 so effectively only one byte is used 764 + * in the temporary buffer (address does not increment). 765 + * 766 + * The source of receive data is the receive data register. The 767 + * source address never increments. 768 + */ 772 769 773 - t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, 774 - DMA_FROM_DEVICE); 770 + if (t->rx_buf) { 771 + rx_buf = t->rx_buf; 772 + rx_buf_count = dspi->rcount; 773 + } else { 774 + rx_buf = dspi->rx_tmp_buf; 775 + rx_buf_count = sizeof(dspi->rx_tmp_buf); 776 + } 777 + 778 + t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, 779 + DMA_FROM_DEVICE); 775 780 if (dma_mapping_error(&spi->dev, t->rx_dma)) { 776 781 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 777 - count); 778 - if (t->tx_buf != NULL) 779 - dma_unmap_single(NULL, t->tx_dma, 780 - count, DMA_TO_DEVICE); 782 + rx_buf_count); 783 + if (t->tx_buf) 784 + dma_unmap_single(NULL, t->tx_dma, dspi->wcount, 785 + DMA_TO_DEVICE); 781 786 return -ENOMEM; 782 787 } 783 - edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, 784 - data_type, count, 1, 0, ASYNC); 785 - edma_set_src(davinci_spi_dma->dma_rx_channel, 786 - rx_reg, INCR, W8BIT); 787 - edma_set_dest(davinci_spi_dma->dma_rx_channel, 788 - t->rx_dma, INCR, W8BIT); 789 - edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); 790 - edma_set_dest_index(davinci_spi_dma->dma_rx_channel, 791 - data_type, 0); 788 + 789 + param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); 790 + param.src = rx_reg; 791 + param.a_b_cnt = dspi->rcount << 16 | data_type; 792 + param.dst = t->rx_dma; 793 + param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; 794 + param.link_bcntrld = 0xffff; 795 + param.src_dst_cidx = 0; 796 + param.ccnt = 1; 797 + edma_write_slot(dma->rx_channel, &param); 798 + 799 + if (pdata->cshold_bug) 800 + iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 801 + 802 + edma_start(dma->rx_channel); 803 + edma_start(dma->tx_channel); 804 + set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 792 805 } 793 806 794 - if ((t->tx_buf) || (t->rx_buf)) 795 - edma_start(davinci_spi_dma->dma_tx_channel); 807 + /* Wait for the transfer to complete */ 808 + if (spicfg->io_type != SPI_IO_TYPE_POLL) { 809 + wait_for_completion_interruptible(&(dspi->done)); 810 + } else { 811 + while (dspi->rcount > 0 || dspi->wcount > 0) { 812 + errors = davinci_spi_process_events(dspi); 813 + if (errors) 814 + break; 815 + cpu_relax(); 816 + } 817 + } 796 818 797 - if (t->rx_buf) 798 - edma_start(davinci_spi_dma->dma_rx_channel); 819 + clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 820 + if (spicfg->io_type == SPI_IO_TYPE_DMA) { 799 821 800 - if ((t->rx_buf) || (t->tx_buf)) 801 - davinci_spi_set_dma_req(spi, 1); 822 + if (t->tx_buf) 823 + dma_unmap_single(NULL, t->tx_dma, dspi->wcount, 824 + DMA_TO_DEVICE); 802 825 803 - if (t->tx_buf) 804 - wait_for_completion_interruptible( 805 - &davinci_spi_dma->dma_tx_completion); 826 + dma_unmap_single(NULL, t->rx_dma, rx_buf_count, 827 + DMA_FROM_DEVICE); 806 828 807 - if (t->rx_buf) 808 - wait_for_completion_interruptible( 809 - &davinci_spi_dma->dma_rx_completion); 829 + clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 830 + } 810 831 811 - dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); 812 - 813 - if (t->rx_buf) 814 - dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); 832 + clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); 833 + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 815 834 816 835 /* 817 836 * Check for bit error, desync error,parity error,timeout error and 818 837 * receive overflow errors 819 838 */ 820 - int_status = ioread32(davinci_spi->base + SPIFLG); 821 - 822 - ret = davinci_spi_check_error(davinci_spi, int_status); 823 - if (ret != 0) 839 + if (errors) { 840 + ret = davinci_spi_check_error(dspi, errors); 841 + WARN(!ret, "%s: error reported but no error found!\n", 842 + dev_name(&spi->dev)); 824 843 return ret; 844 + } 825 845 826 - /* SPI Framework maintains the count only in bytes so convert back */ 827 - davinci_spi->count *= conv; 846 + if (dspi->rcount != 0 || dspi->wcount != 0) { 847 + dev_err(sdev, "SPI data transfer error\n"); 848 + return -EIO; 849 + } 828 850 829 851 return t->len; 830 852 } 831 853 832 854 /** 833 - * davinci_spi_irq - IRQ handler for DaVinci SPI 855 + * davinci_spi_irq - Interrupt handler for SPI Master Controller 834 856 * @irq: IRQ number for this SPI Master 835 857 * @context_data: structure for SPI Master controller davinci_spi 858 + * 859 + * ISR will determine that interrupt arrives either for READ or WRITE command. 860 + * According to command it will do the appropriate action. It will check 861 + * transfer length and if it is not zero then dispatch transfer command again. 862 + * If transfer length is zero then it will indicate the COMPLETION so that 863 + * davinci_spi_bufs function can go ahead. 836 864 */ 837 - static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) 865 + static irqreturn_t davinci_spi_irq(s32 irq, void *data) 838 866 { 839 - struct davinci_spi *davinci_spi = context_data; 840 - u32 int_status, rx_data = 0; 841 - irqreturn_t ret = IRQ_NONE; 867 + struct davinci_spi *dspi = data; 868 + int status; 842 869 843 - int_status = ioread32(davinci_spi->base + SPIFLG); 870 + status = davinci_spi_process_events(dspi); 871 + if (unlikely(status != 0)) 872 + clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); 844 873 845 - while ((int_status & SPIFLG_RX_INTR_MASK)) { 846 - if (likely(int_status & SPIFLG_RX_INTR_MASK)) { 847 - ret = IRQ_HANDLED; 874 + if ((!dspi->rcount && !dspi->wcount) || status) 875 + complete(&dspi->done); 848 876 849 - rx_data = ioread32(davinci_spi->base + SPIBUF); 850 - davinci_spi->get_rx(rx_data, davinci_spi); 877 + return IRQ_HANDLED; 878 + } 851 879 852 - /* Disable Receive Interrupt */ 853 - iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), 854 - davinci_spi->base + SPIINT); 855 - } else 856 - (void)davinci_spi_check_error(davinci_spi, int_status); 880 + static int davinci_spi_request_dma(struct davinci_spi *dspi) 881 + { 882 + int r; 883 + struct davinci_spi_dma *dma = &dspi->dma; 857 884 858 - int_status = ioread32(davinci_spi->base + SPIFLG); 885 + r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, 886 + dma->eventq); 887 + if (r < 0) { 888 + pr_err("Unable to request DMA channel for SPI RX\n"); 889 + r = -EAGAIN; 890 + goto rx_dma_failed; 859 891 } 860 892 861 - return ret; 893 + r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, 894 + dma->eventq); 895 + if (r < 0) { 896 + pr_err("Unable to request DMA channel for SPI TX\n"); 897 + r = -EAGAIN; 898 + goto tx_dma_failed; 899 + } 900 + 901 + r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); 902 + if (r < 0) { 903 + pr_err("Unable to request SPI TX DMA param slot\n"); 904 + r = -EAGAIN; 905 + goto param_failed; 906 + } 907 + dma->dummy_param_slot = r; 908 + edma_link(dma->dummy_param_slot, dma->dummy_param_slot); 909 + 910 + return 0; 911 + param_failed: 912 + edma_free_channel(dma->tx_channel); 913 + tx_dma_failed: 914 + edma_free_channel(dma->rx_channel); 915 + rx_dma_failed: 916 + return r; 862 917 } 863 918 864 919 /** 865 920 * davinci_spi_probe - probe function for SPI Master Controller 866 921 * @pdev: platform_device structure which contains plateform specific data 922 + * 923 + * According to Linux Device Model this function will be invoked by Linux 924 + * with platform_device struct which contains the device specific info. 925 + * This function will map the SPI controller's memory, register IRQ, 926 + * Reset SPI controller and setting its registers to default value. 927 + * It will invoke spi_bitbang_start to create work queue so that client driver 928 + * can register transfer method to work queue. 867 929 */ 868 930 static int davinci_spi_probe(struct platform_device *pdev) 869 931 { 870 932 struct spi_master *master; 871 - struct davinci_spi *davinci_spi; 933 + struct davinci_spi *dspi; 872 934 struct davinci_spi_platform_data *pdata; 873 935 struct resource *r, *mem; 874 936 resource_size_t dma_rx_chan = SPI_NO_RESOURCE; 875 937 resource_size_t dma_tx_chan = SPI_NO_RESOURCE; 876 938 resource_size_t dma_eventq = SPI_NO_RESOURCE; 877 939 int i = 0, ret = 0; 940 + u32 spipc0; 878 941 879 942 pdata = pdev->dev.platform_data; 880 943 if (pdata == NULL) { ··· 808 1035 809 1036 dev_set_drvdata(&pdev->dev, master); 810 1037 811 - davinci_spi = spi_master_get_devdata(master); 812 - if (davinci_spi == NULL) { 1038 + dspi = spi_master_get_devdata(master); 1039 + if (dspi == NULL) { 813 1040 ret = -ENOENT; 814 1041 goto free_master; 815 1042 } ··· 820 1047 goto free_master; 821 1048 } 822 1049 823 - davinci_spi->pbase = r->start; 824 - davinci_spi->region_size = resource_size(r); 825 - davinci_spi->pdata = pdata; 1050 + dspi->pbase = r->start; 1051 + dspi->pdata = pdata; 826 1052 827 - mem = request_mem_region(r->start, davinci_spi->region_size, 828 - pdev->name); 1053 + mem = request_mem_region(r->start, resource_size(r), pdev->name); 829 1054 if (mem == NULL) { 830 1055 ret = -EBUSY; 831 1056 goto free_master; 832 1057 } 833 1058 834 - davinci_spi->base = (struct davinci_spi_reg __iomem *) 835 - ioremap(r->start, davinci_spi->region_size); 836 - if (davinci_spi->base == NULL) { 1059 + dspi->base = ioremap(r->start, resource_size(r)); 1060 + if (dspi->base == NULL) { 837 1061 ret = -ENOMEM; 838 1062 goto release_region; 839 1063 } 840 1064 841 - davinci_spi->irq = platform_get_irq(pdev, 0); 842 - if (davinci_spi->irq <= 0) { 1065 + dspi->irq = platform_get_irq(pdev, 0); 1066 + if (dspi->irq <= 0) { 843 1067 ret = -EINVAL; 844 1068 goto unmap_io; 845 1069 } 846 1070 847 - ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, 848 - dev_name(&pdev->dev), davinci_spi); 1071 + ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), 1072 + dspi); 849 1073 if (ret) 850 1074 goto unmap_io; 851 1075 852 - /* Allocate tmp_buf for tx_buf */ 853 - davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); 854 - if (davinci_spi->tmp_buf == NULL) { 855 - ret = -ENOMEM; 1076 + dspi->bitbang.master = spi_master_get(master); 1077 + if (dspi->bitbang.master == NULL) { 1078 + ret = -ENODEV; 856 1079 goto irq_free; 857 1080 } 858 1081 859 - davinci_spi->bitbang.master = spi_master_get(master); 860 - if (davinci_spi->bitbang.master == NULL) { 861 - ret = -ENODEV; 862 - goto free_tmp_buf; 863 - } 864 - 865 - davinci_spi->clk = clk_get(&pdev->dev, NULL); 866 - if (IS_ERR(davinci_spi->clk)) { 1082 + dspi->clk = clk_get(&pdev->dev, NULL); 1083 + if (IS_ERR(dspi->clk)) { 867 1084 ret = -ENODEV; 868 1085 goto put_master; 869 1086 } 870 - clk_enable(davinci_spi->clk); 871 - 1087 + clk_enable(dspi->clk); 872 1088 873 1089 master->bus_num = pdev->id; 874 1090 master->num_chipselect = pdata->num_chipselect; 875 1091 master->setup = davinci_spi_setup; 876 - master->cleanup = davinci_spi_cleanup; 877 1092 878 - davinci_spi->bitbang.chipselect = davinci_spi_chipselect; 879 - davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; 1093 + dspi->bitbang.chipselect = davinci_spi_chipselect; 1094 + dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; 880 1095 881 - davinci_spi->version = pdata->version; 882 - use_dma = pdata->use_dma; 1096 + dspi->version = pdata->version; 883 1097 884 - davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; 885 - if (davinci_spi->version == SPI_VERSION_2) 886 - davinci_spi->bitbang.flags |= SPI_READY; 1098 + dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; 1099 + if (dspi->version == SPI_VERSION_2) 1100 + dspi->bitbang.flags |= SPI_READY; 887 1101 888 - if (use_dma) { 889 - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 890 - if (r) 891 - dma_rx_chan = r->start; 892 - r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 893 - if (r) 894 - dma_tx_chan = r->start; 895 - r = platform_get_resource(pdev, IORESOURCE_DMA, 2); 896 - if (r) 897 - dma_eventq = r->start; 898 - } 1102 + r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1103 + if (r) 1104 + dma_rx_chan = r->start; 1105 + r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1106 + if (r) 1107 + dma_tx_chan = r->start; 1108 + r = platform_get_resource(pdev, IORESOURCE_DMA, 2); 1109 + if (r) 1110 + dma_eventq = r->start; 899 1111 900 - if (!use_dma || 901 - dma_rx_chan == SPI_NO_RESOURCE || 902 - dma_tx_chan == SPI_NO_RESOURCE || 903 - dma_eventq == SPI_NO_RESOURCE) { 904 - davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; 905 - use_dma = 0; 906 - } else { 907 - davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; 908 - davinci_spi->dma_channels = kzalloc(master->num_chipselect 909 - * sizeof(struct davinci_spi_dma), GFP_KERNEL); 910 - if (davinci_spi->dma_channels == NULL) { 911 - ret = -ENOMEM; 1112 + dspi->bitbang.txrx_bufs = davinci_spi_bufs; 1113 + if (dma_rx_chan != SPI_NO_RESOURCE && 1114 + dma_tx_chan != SPI_NO_RESOURCE && 1115 + dma_eventq != SPI_NO_RESOURCE) { 1116 + dspi->dma.rx_channel = dma_rx_chan; 1117 + dspi->dma.tx_channel = dma_tx_chan; 1118 + dspi->dma.eventq = dma_eventq; 1119 + 1120 + ret = davinci_spi_request_dma(dspi); 1121 + if (ret) 912 1122 goto free_clk; 913 - } 914 1123 915 - for (i = 0; i < master->num_chipselect; i++) { 916 - davinci_spi->dma_channels[i].dma_rx_channel = -1; 917 - davinci_spi->dma_channels[i].dma_rx_sync_dev = 918 - dma_rx_chan; 919 - davinci_spi->dma_channels[i].dma_tx_channel = -1; 920 - davinci_spi->dma_channels[i].dma_tx_sync_dev = 921 - dma_tx_chan; 922 - davinci_spi->dma_channels[i].eventq = dma_eventq; 923 - } 924 - dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" 925 - "Using RX channel = %d , TX channel = %d and " 926 - "event queue = %d", dma_rx_chan, dma_tx_chan, 1124 + dev_info(&pdev->dev, "DMA: supported\n"); 1125 + dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " 1126 + "event queue: %d\n", dma_rx_chan, dma_tx_chan, 927 1127 dma_eventq); 928 1128 } 929 1129 930 - davinci_spi->get_rx = davinci_spi_rx_buf_u8; 931 - davinci_spi->get_tx = davinci_spi_tx_buf_u8; 1130 + dspi->get_rx = davinci_spi_rx_buf_u8; 1131 + dspi->get_tx = davinci_spi_tx_buf_u8; 932 1132 933 - init_completion(&davinci_spi->done); 1133 + init_completion(&dspi->done); 934 1134 935 1135 /* Reset In/OUT SPI module */ 936 - iowrite32(0, davinci_spi->base + SPIGCR0); 1136 + iowrite32(0, dspi->base + SPIGCR0); 937 1137 udelay(100); 938 - iowrite32(1, davinci_spi->base + SPIGCR0); 1138 + iowrite32(1, dspi->base + SPIGCR0); 939 1139 940 - /* Clock internal */ 941 - if (davinci_spi->pdata->clk_internal) 942 - set_io_bits(davinci_spi->base + SPIGCR1, 943 - SPIGCR1_CLKMOD_MASK); 1140 + /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ 1141 + spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; 1142 + iowrite32(spipc0, dspi->base + SPIPC0); 1143 + 1144 + /* initialize chip selects */ 1145 + if (pdata->chip_sel) { 1146 + for (i = 0; i < pdata->num_chipselect; i++) { 1147 + if (pdata->chip_sel[i] != SPI_INTERN_CS) 1148 + gpio_direction_output(pdata->chip_sel[i], 1); 1149 + } 1150 + } 1151 + 1152 + if (pdata->intr_line) 1153 + iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); 944 1154 else 945 - clear_io_bits(davinci_spi->base + SPIGCR1, 946 - SPIGCR1_CLKMOD_MASK); 1155 + iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); 1156 + 1157 + iowrite32(CS_DEFAULT, dspi->base + SPIDEF); 947 1158 948 1159 /* master mode default */ 949 - set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 1160 + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); 1161 + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 1162 + set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); 950 1163 951 - if (davinci_spi->pdata->intr_level) 952 - iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); 953 - else 954 - iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); 955 - 956 - ret = spi_bitbang_start(&davinci_spi->bitbang); 1164 + ret = spi_bitbang_start(&dspi->bitbang); 957 1165 if (ret) 958 - goto free_clk; 1166 + goto free_dma; 959 1167 960 - dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); 961 - 962 - if (!pdata->poll_mode) 963 - dev_info(&pdev->dev, "Operating in interrupt mode" 964 - " using IRQ %d\n", davinci_spi->irq); 1168 + dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); 965 1169 966 1170 return ret; 967 1171 1172 + free_dma: 1173 + edma_free_channel(dspi->dma.tx_channel); 1174 + edma_free_channel(dspi->dma.rx_channel); 1175 + edma_free_slot(dspi->dma.dummy_param_slot); 968 1176 free_clk: 969 - clk_disable(davinci_spi->clk); 970 - clk_put(davinci_spi->clk); 1177 + clk_disable(dspi->clk); 1178 + clk_put(dspi->clk); 971 1179 put_master: 972 1180 spi_master_put(master); 973 - free_tmp_buf: 974 - kfree(davinci_spi->tmp_buf); 975 1181 irq_free: 976 - free_irq(davinci_spi->irq, davinci_spi); 1182 + free_irq(dspi->irq, dspi); 977 1183 unmap_io: 978 - iounmap(davinci_spi->base); 1184 + iounmap(dspi->base); 979 1185 release_region: 980 - release_mem_region(davinci_spi->pbase, davinci_spi->region_size); 1186 + release_mem_region(dspi->pbase, resource_size(r)); 981 1187 free_master: 982 1188 kfree(master); 983 1189 err: ··· 974 1222 */ 975 1223 static int __exit davinci_spi_remove(struct platform_device *pdev) 976 1224 { 977 - struct davinci_spi *davinci_spi; 1225 + struct davinci_spi *dspi; 978 1226 struct spi_master *master; 1227 + struct resource *r; 979 1228 980 1229 master = dev_get_drvdata(&pdev->dev); 981 - davinci_spi = spi_master_get_devdata(master); 1230 + dspi = spi_master_get_devdata(master); 982 1231 983 - spi_bitbang_stop(&davinci_spi->bitbang); 1232 + spi_bitbang_stop(&dspi->bitbang); 984 1233 985 - clk_disable(davinci_spi->clk); 986 - clk_put(davinci_spi->clk); 1234 + clk_disable(dspi->clk); 1235 + clk_put(dspi->clk); 987 1236 spi_master_put(master); 988 - kfree(davinci_spi->tmp_buf); 989 - free_irq(davinci_spi->irq, davinci_spi); 990 - iounmap(davinci_spi->base); 991 - release_mem_region(davinci_spi->pbase, davinci_spi->region_size); 1237 + free_irq(dspi->irq, dspi); 1238 + iounmap(dspi->base); 1239 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1240 + release_mem_region(dspi->pbase, resource_size(r)); 992 1241 993 1242 return 0; 994 1243 } 995 1244 996 1245 static struct platform_driver davinci_spi_driver = { 997 - .driver.name = "spi_davinci", 1246 + .driver = { 1247 + .name = "spi_davinci", 1248 + .owner = THIS_MODULE, 1249 + }, 998 1250 .remove = __exit_p(davinci_spi_remove), 999 1251 }; 1000 1252
+32 -20
drivers/spi/dw_spi.c
··· 164 164 165 165 static void wait_till_not_busy(struct dw_spi *dws) 166 166 { 167 - unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); 167 + unsigned long end = jiffies + 1 + usecs_to_jiffies(5000); 168 168 169 169 while (time_before(jiffies, end)) { 170 170 if (!(dw_readw(dws, sr) & SR_BUSY)) 171 171 return; 172 + cpu_relax(); 172 173 } 173 174 dev_err(&dws->master->dev, 174 - "DW SPI: Status keeps busy for 1000us after a read/write!\n"); 175 + "DW SPI: Status keeps busy for 5000us after a read/write!\n"); 175 176 } 176 177 177 178 static void flush(struct dw_spi *dws) 178 179 { 179 - while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) 180 + while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) { 180 181 dw_readw(dws, dr); 182 + cpu_relax(); 183 + } 181 184 182 185 wait_till_not_busy(dws); 183 186 } ··· 288 285 */ 289 286 static int map_dma_buffers(struct dw_spi *dws) 290 287 { 291 - if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited 292 - || !dws->cur_chip->enable_dma) 288 + if (!dws->cur_msg->is_dma_mapped 289 + || !dws->dma_inited 290 + || !dws->cur_chip->enable_dma 291 + || !dws->dma_ops) 293 292 return 0; 294 293 295 294 if (dws->cur_transfer->tx_dma) ··· 343 338 tasklet_schedule(&dws->pump_transfers); 344 339 } 345 340 346 - static void transfer_complete(struct dw_spi *dws) 341 + void dw_spi_xfer_done(struct dw_spi *dws) 347 342 { 348 343 /* Update total byte transfered return count actual bytes read */ 349 344 dws->cur_msg->actual_length += dws->len; ··· 358 353 } else 359 354 tasklet_schedule(&dws->pump_transfers); 360 355 } 356 + EXPORT_SYMBOL_GPL(dw_spi_xfer_done); 361 357 362 358 static irqreturn_t interrupt_transfer(struct dw_spi *dws) 363 359 { ··· 390 384 if (dws->tx_end > dws->tx) 391 385 spi_umask_intr(dws, SPI_INT_TXEI); 392 386 else 393 - transfer_complete(dws); 387 + dw_spi_xfer_done(dws); 394 388 } 395 389 396 390 return IRQ_HANDLED; ··· 425 419 */ 426 420 dws->read(dws); 427 421 428 - transfer_complete(dws); 429 - } 430 - 431 - static void dma_transfer(struct dw_spi *dws, int cs_change) 432 - { 422 + dw_spi_xfer_done(dws); 433 423 } 434 424 435 425 static void pump_transfers(unsigned long data) ··· 594 592 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); 595 593 spi_chip_sel(dws, spi->chip_select); 596 594 597 - /* Set the interrupt mask, for poll mode just diable all int */ 595 + /* Set the interrupt mask, for poll mode just disable all int */ 598 596 spi_mask_intr(dws, 0xff); 599 597 if (imask) 600 598 spi_umask_intr(dws, imask); ··· 607 605 } 608 606 609 607 if (dws->dma_mapped) 610 - dma_transfer(dws, cs_change); 608 + dws->dma_ops->dma_transfer(dws, cs_change); 611 609 612 610 if (chip->poll_mode) 613 611 poll_transfer(dws); ··· 903 901 master->setup = dw_spi_setup; 904 902 master->transfer = dw_spi_transfer; 905 903 906 - dws->dma_inited = 0; 907 - 908 904 /* Basic HW init */ 909 905 spi_hw_init(dws); 906 + 907 + if (dws->dma_ops && dws->dma_ops->dma_init) { 908 + ret = dws->dma_ops->dma_init(dws); 909 + if (ret) { 910 + dev_warn(&master->dev, "DMA init failed\n"); 911 + dws->dma_inited = 0; 912 + } 913 + } 910 914 911 915 /* Initial and start queue */ 912 916 ret = init_queue(dws); ··· 938 930 939 931 err_queue_alloc: 940 932 destroy_queue(dws); 933 + if (dws->dma_ops && dws->dma_ops->dma_exit) 934 + dws->dma_ops->dma_exit(dws); 941 935 err_diable_hw: 942 936 spi_enable_chip(dws, 0); 943 937 free_irq(dws->irq, dws); ··· 948 938 exit: 949 939 return ret; 950 940 } 951 - EXPORT_SYMBOL(dw_spi_add_host); 941 + EXPORT_SYMBOL_GPL(dw_spi_add_host); 952 942 953 943 void __devexit dw_spi_remove_host(struct dw_spi *dws) 954 944 { ··· 964 954 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " 965 955 "complete, message memory not freed\n"); 966 956 957 + if (dws->dma_ops && dws->dma_ops->dma_exit) 958 + dws->dma_ops->dma_exit(dws); 967 959 spi_enable_chip(dws, 0); 968 960 /* Disable clk */ 969 961 spi_set_clk(dws, 0); ··· 974 962 /* Disconnect from the SPI framework */ 975 963 spi_unregister_master(dws->master); 976 964 } 977 - EXPORT_SYMBOL(dw_spi_remove_host); 965 + EXPORT_SYMBOL_GPL(dw_spi_remove_host); 978 966 979 967 int dw_spi_suspend_host(struct dw_spi *dws) 980 968 { ··· 987 975 spi_set_clk(dws, 0); 988 976 return ret; 989 977 } 990 - EXPORT_SYMBOL(dw_spi_suspend_host); 978 + EXPORT_SYMBOL_GPL(dw_spi_suspend_host); 991 979 992 980 int dw_spi_resume_host(struct dw_spi *dws) 993 981 { ··· 999 987 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 1000 988 return ret; 1001 989 } 1002 - EXPORT_SYMBOL(dw_spi_resume_host); 990 + EXPORT_SYMBOL_GPL(dw_spi_resume_host); 1003 991 1004 992 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); 1005 993 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
+223
drivers/spi/dw_spi_mid.c
··· 1 + /* 2 + * dw_spi_mid.c - special handling for DW core on Intel MID platform 3 + * 4 + * Copyright (c) 2009, Intel Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License along 16 + * with this program; if not, write to the Free Software Foundation, 17 + * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 + */ 19 + 20 + #include <linux/dma-mapping.h> 21 + #include <linux/dmaengine.h> 22 + #include <linux/interrupt.h> 23 + #include <linux/slab.h> 24 + #include <linux/spi/spi.h> 25 + #include <linux/spi/dw_spi.h> 26 + 27 + #ifdef CONFIG_SPI_DW_MID_DMA 28 + #include <linux/intel_mid_dma.h> 29 + #include <linux/pci.h> 30 + 31 + struct mid_dma { 32 + struct intel_mid_dma_slave dmas_tx; 33 + struct intel_mid_dma_slave dmas_rx; 34 + }; 35 + 36 + static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) 37 + { 38 + struct dw_spi *dws = param; 39 + 40 + return dws->dmac && (&dws->dmac->dev == chan->device->dev); 41 + } 42 + 43 + static int mid_spi_dma_init(struct dw_spi *dws) 44 + { 45 + struct mid_dma *dw_dma = dws->dma_priv; 46 + struct intel_mid_dma_slave *rxs, *txs; 47 + dma_cap_mask_t mask; 48 + 49 + /* 50 + * Get pci device for DMA controller, currently it could only 51 + * be the DMA controller of either Moorestown or Medfield 52 + */ 53 + dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); 54 + if (!dws->dmac) 55 + dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); 56 + 57 + dma_cap_zero(mask); 58 + dma_cap_set(DMA_SLAVE, mask); 59 + 60 + /* 1. Init rx channel */ 61 + dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); 62 + if (!dws->rxchan) 63 + goto err_exit; 64 + rxs = &dw_dma->dmas_rx; 65 + rxs->hs_mode = LNW_DMA_HW_HS; 66 + rxs->cfg_mode = LNW_DMA_PER_TO_MEM; 67 + dws->rxchan->private = rxs; 68 + 69 + /* 2. Init tx channel */ 70 + dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); 71 + if (!dws->txchan) 72 + goto free_rxchan; 73 + txs = &dw_dma->dmas_tx; 74 + txs->hs_mode = LNW_DMA_HW_HS; 75 + txs->cfg_mode = LNW_DMA_MEM_TO_PER; 76 + dws->txchan->private = txs; 77 + 78 + dws->dma_inited = 1; 79 + return 0; 80 + 81 + free_rxchan: 82 + dma_release_channel(dws->rxchan); 83 + err_exit: 84 + return -1; 85 + 86 + } 87 + 88 + static void mid_spi_dma_exit(struct dw_spi *dws) 89 + { 90 + dma_release_channel(dws->txchan); 91 + dma_release_channel(dws->rxchan); 92 + } 93 + 94 + /* 95 + * dws->dma_chan_done is cleared before the dma transfer starts, 96 + * callback for rx/tx channel will each increment it by 1. 97 + * Reaching 2 means the whole spi transaction is done. 98 + */ 99 + static void dw_spi_dma_done(void *arg) 100 + { 101 + struct dw_spi *dws = arg; 102 + 103 + if (++dws->dma_chan_done != 2) 104 + return; 105 + dw_spi_xfer_done(dws); 106 + } 107 + 108 + static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) 109 + { 110 + struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; 111 + struct dma_chan *txchan, *rxchan; 112 + struct dma_slave_config txconf, rxconf; 113 + u16 dma_ctrl = 0; 114 + 115 + /* 1. setup DMA related registers */ 116 + if (cs_change) { 117 + spi_enable_chip(dws, 0); 118 + dw_writew(dws, dmardlr, 0xf); 119 + dw_writew(dws, dmatdlr, 0x10); 120 + if (dws->tx_dma) 121 + dma_ctrl |= 0x2; 122 + if (dws->rx_dma) 123 + dma_ctrl |= 0x1; 124 + dw_writew(dws, dmacr, dma_ctrl); 125 + spi_enable_chip(dws, 1); 126 + } 127 + 128 + dws->dma_chan_done = 0; 129 + txchan = dws->txchan; 130 + rxchan = dws->rxchan; 131 + 132 + /* 2. Prepare the TX dma transfer */ 133 + txconf.direction = DMA_TO_DEVICE; 134 + txconf.dst_addr = dws->dma_addr; 135 + txconf.dst_maxburst = LNW_DMA_MSIZE_16; 136 + txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 137 + txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 138 + 139 + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, 140 + (unsigned long) &txconf); 141 + 142 + memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); 143 + dws->tx_sgl.dma_address = dws->tx_dma; 144 + dws->tx_sgl.length = dws->len; 145 + 146 + txdesc = txchan->device->device_prep_slave_sg(txchan, 147 + &dws->tx_sgl, 148 + 1, 149 + DMA_TO_DEVICE, 150 + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 151 + txdesc->callback = dw_spi_dma_done; 152 + txdesc->callback_param = dws; 153 + 154 + /* 3. Prepare the RX dma transfer */ 155 + rxconf.direction = DMA_FROM_DEVICE; 156 + rxconf.src_addr = dws->dma_addr; 157 + rxconf.src_maxburst = LNW_DMA_MSIZE_16; 158 + rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 159 + rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 160 + 161 + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, 162 + (unsigned long) &rxconf); 163 + 164 + memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); 165 + dws->rx_sgl.dma_address = dws->rx_dma; 166 + dws->rx_sgl.length = dws->len; 167 + 168 + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 169 + &dws->rx_sgl, 170 + 1, 171 + DMA_FROM_DEVICE, 172 + DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); 173 + rxdesc->callback = dw_spi_dma_done; 174 + rxdesc->callback_param = dws; 175 + 176 + /* rx must be started before tx due to spi instinct */ 177 + rxdesc->tx_submit(rxdesc); 178 + txdesc->tx_submit(txdesc); 179 + return 0; 180 + } 181 + 182 + static struct dw_spi_dma_ops mid_dma_ops = { 183 + .dma_init = mid_spi_dma_init, 184 + .dma_exit = mid_spi_dma_exit, 185 + .dma_transfer = mid_spi_dma_transfer, 186 + }; 187 + #endif 188 + 189 + /* Some specific info for SPI0 controller on Moorestown */ 190 + 191 + /* HW info for MRST CLk Control Unit, one 32b reg */ 192 + #define MRST_SPI_CLK_BASE 100000000 /* 100m */ 193 + #define MRST_CLK_SPI0_REG 0xff11d86c 194 + #define CLK_SPI_BDIV_OFFSET 0 195 + #define CLK_SPI_BDIV_MASK 0x00000007 196 + #define CLK_SPI_CDIV_OFFSET 9 197 + #define CLK_SPI_CDIV_MASK 0x00000e00 198 + #define CLK_SPI_DISABLE_OFFSET 8 199 + 200 + int dw_spi_mid_init(struct dw_spi *dws) 201 + { 202 + u32 *clk_reg, clk_cdiv; 203 + 204 + clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); 205 + if (!clk_reg) 206 + return -ENOMEM; 207 + 208 + /* get SPI controller operating freq info */ 209 + clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; 210 + dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); 211 + iounmap(clk_reg); 212 + 213 + dws->num_cs = 16; 214 + dws->fifo_len = 40; /* FIFO has 40 words buffer */ 215 + 216 + #ifdef CONFIG_SPI_DW_MID_DMA 217 + dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); 218 + if (!dws->dma_priv) 219 + return -ENOMEM; 220 + dws->dma_ops = &mid_dma_ops; 221 + #endif 222 + return 0; 223 + }
+14 -6
drivers/spi/dw_spi_pci.c
··· 1 1 /* 2 - * mrst_spi_pci.c - PCI interface driver for DW SPI Core 2 + * dw_spi_pci.c - PCI interface driver for DW SPI Core 3 3 * 4 4 * Copyright (c) 2009, Intel Corporation. 5 5 * ··· 26 26 #define DRIVER_NAME "dw_spi_pci" 27 27 28 28 struct dw_spi_pci { 29 - struct pci_dev *pdev; 30 - struct dw_spi dws; 29 + struct pci_dev *pdev; 30 + struct dw_spi dws; 31 31 }; 32 32 33 33 static int __devinit spi_pci_probe(struct pci_dev *pdev, ··· 72 72 dws->parent_dev = &pdev->dev; 73 73 dws->bus_num = 0; 74 74 dws->num_cs = 4; 75 - dws->max_freq = 25000000; /* for Moorestwon */ 76 75 dws->irq = pdev->irq; 77 - dws->fifo_len = 40; /* FIFO has 40 words buffer */ 76 + 77 + /* 78 + * Specific handling for Intel MID paltforms, like dma setup, 79 + * clock rate, FIFO depth. 80 + */ 81 + if (pdev->device == 0x0800) { 82 + ret = dw_spi_mid_init(dws); 83 + if (ret) 84 + goto err_unmap; 85 + } 78 86 79 87 ret = dw_spi_add_host(dws); 80 88 if (ret) ··· 148 140 #endif 149 141 150 142 static const struct pci_device_id pci_ids[] __devinitdata = { 151 - /* Intel Moorestown platform SPI controller 0 */ 143 + /* Intel MID platform SPI controller 0 */ 152 144 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, 153 145 {}, 154 146 };
+15 -20
drivers/spi/mpc52xx_psc_spi.c
··· 363 363 } 364 364 365 365 /* bus_num is used only for the case dev->platform_data == NULL */ 366 - static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, 366 + static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, 367 367 u32 size, unsigned int irq, s16 bus_num) 368 368 { 369 369 struct fsl_spi_platform_data *pdata = dev->platform_data; ··· 450 450 return ret; 451 451 } 452 452 453 - static int __exit mpc52xx_psc_spi_do_remove(struct device *dev) 454 - { 455 - struct spi_master *master = dev_get_drvdata(dev); 456 - struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); 457 - 458 - flush_workqueue(mps->workqueue); 459 - destroy_workqueue(mps->workqueue); 460 - spi_unregister_master(master); 461 - free_irq(mps->irq, mps); 462 - if (mps->psc) 463 - iounmap(mps->psc); 464 - 465 - return 0; 466 - } 467 - 468 - static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op, 453 + static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op, 469 454 const struct of_device_id *match) 470 455 { 471 456 const u32 *regaddr_p; ··· 480 495 irq_of_parse_and_map(op->dev.of_node, 0), id); 481 496 } 482 497 483 - static int __exit mpc52xx_psc_spi_of_remove(struct platform_device *op) 498 + static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op) 484 499 { 485 - return mpc52xx_psc_spi_do_remove(&op->dev); 500 + struct spi_master *master = dev_get_drvdata(&op->dev); 501 + struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); 502 + 503 + flush_workqueue(mps->workqueue); 504 + destroy_workqueue(mps->workqueue); 505 + spi_unregister_master(master); 506 + free_irq(mps->irq, mps); 507 + if (mps->psc) 508 + iounmap(mps->psc); 509 + 510 + return 0; 486 511 } 487 512 488 513 static const struct of_device_id mpc52xx_psc_spi_of_match[] = { ··· 505 510 506 511 static struct of_platform_driver mpc52xx_psc_spi_of_driver = { 507 512 .probe = mpc52xx_psc_spi_of_probe, 508 - .remove = __exit_p(mpc52xx_psc_spi_of_remove), 513 + .remove = __devexit_p(mpc52xx_psc_spi_of_remove), 509 514 .driver = { 510 515 .name = "mpc52xx-psc-spi", 511 516 .owner = THIS_MODULE,
+3 -8
drivers/spi/omap2_mcspi.c
··· 397 397 398 398 if (tx != NULL) { 399 399 wait_for_completion(&mcspi_dma->dma_tx_completion); 400 - dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); 400 + dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE); 401 401 402 402 /* for TX_ONLY mode, be sure all words have shifted out */ 403 403 if (rx == NULL) { ··· 412 412 413 413 if (rx != NULL) { 414 414 wait_for_completion(&mcspi_dma->dma_rx_completion); 415 - dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); 415 + dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE); 416 416 omap2_mcspi_set_enable(spi, 0); 417 417 418 418 if (l & OMAP2_MCSPI_CHCONF_TURBO) { ··· 1025 1025 if (m->is_dma_mapped || len < DMA_MIN_BYTES) 1026 1026 continue; 1027 1027 1028 - /* Do DMA mapping "early" for better error reporting and 1029 - * dcache use. Note that if dma_unmap_single() ever starts 1030 - * to do real work on ARM, we'd need to clean up mappings 1031 - * for previous transfers on *ALL* exits of this loop... 1032 - */ 1033 1028 if (tx_buf != NULL) { 1034 1029 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, 1035 1030 len, DMA_TO_DEVICE); ··· 1041 1046 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 1042 1047 'R', len); 1043 1048 if (tx_buf != NULL) 1044 - dma_unmap_single(NULL, t->tx_dma, 1049 + dma_unmap_single(&spi->dev, t->tx_dma, 1045 1050 len, DMA_TO_DEVICE); 1046 1051 return -EINVAL; 1047 1052 }
+134 -56
drivers/spi/pxa2xx_spi.c
··· 23 23 #include <linux/errno.h> 24 24 #include <linux/interrupt.h> 25 25 #include <linux/platform_device.h> 26 + #include <linux/spi/pxa2xx_spi.h> 26 27 #include <linux/dma-mapping.h> 27 28 #include <linux/spi/spi.h> 28 29 #include <linux/workqueue.h> 29 30 #include <linux/delay.h> 30 - #include <linux/clk.h> 31 31 #include <linux/gpio.h> 32 32 #include <linux/slab.h> 33 33 ··· 35 35 #include <asm/irq.h> 36 36 #include <asm/delay.h> 37 37 38 - #include <mach/dma.h> 39 - #include <plat/ssp.h> 40 - #include <mach/pxa2xx_spi.h> 41 38 42 39 MODULE_AUTHOR("Stephen Street"); 43 40 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); ··· 43 46 44 47 #define MAX_BUSES 3 45 48 46 - #define RX_THRESH_DFLT 8 47 - #define TX_THRESH_DFLT 8 48 49 #define TIMOUT_DFLT 1000 49 50 50 51 #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) ··· 163 168 u8 enable_dma; 164 169 u8 bits_per_word; 165 170 u32 speed_hz; 166 - int gpio_cs; 171 + union { 172 + int gpio_cs; 173 + unsigned int frm; 174 + }; 167 175 int gpio_cs_inverted; 168 176 int (*write)(struct driver_data *drv_data); 169 177 int (*read)(struct driver_data *drv_data); ··· 178 180 static void cs_assert(struct driver_data *drv_data) 179 181 { 180 182 struct chip_data *chip = drv_data->cur_chip; 183 + 184 + if (drv_data->ssp_type == CE4100_SSP) { 185 + write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); 186 + return; 187 + } 181 188 182 189 if (chip->cs_control) { 183 190 chip->cs_control(PXA2XX_CS_ASSERT); ··· 197 194 { 198 195 struct chip_data *chip = drv_data->cur_chip; 199 196 197 + if (drv_data->ssp_type == CE4100_SSP) 198 + return; 199 + 200 200 if (chip->cs_control) { 201 201 chip->cs_control(PXA2XX_CS_DEASSERT); 202 202 return; ··· 207 201 208 202 if (gpio_is_valid(chip->gpio_cs)) 209 203 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 204 + } 205 + 206 + static void write_SSSR_CS(struct driver_data *drv_data, u32 val) 207 + { 208 + void __iomem *reg = drv_data->ioaddr; 209 + 210 + if (drv_data->ssp_type == CE4100_SSP) 211 + val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; 212 + 213 + write_SSSR(val, reg); 214 + } 215 + 216 + static int pxa25x_ssp_comp(struct driver_data *drv_data) 217 + { 218 + if (drv_data->ssp_type == PXA25x_SSP) 219 + return 1; 220 + if (drv_data->ssp_type == CE4100_SSP) 221 + return 1; 222 + return 0; 210 223 } 211 224 212 225 static int flush(struct driver_data *drv_data) ··· 239 214 read_SSDR(reg); 240 215 } 241 216 } while ((read_SSSR(reg) & SSSR_BSY) && --limit); 242 - write_SSSR(SSSR_ROR, reg); 217 + write_SSSR_CS(drv_data, SSSR_ROR); 243 218 244 219 return limit; 245 220 } ··· 249 224 void __iomem *reg = drv_data->ioaddr; 250 225 u8 n_bytes = drv_data->n_bytes; 251 226 252 - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 227 + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 253 228 || (drv_data->tx == drv_data->tx_end)) 254 229 return 0; 255 230 ··· 277 252 { 278 253 void __iomem *reg = drv_data->ioaddr; 279 254 280 - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 255 + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 281 256 || (drv_data->tx == drv_data->tx_end)) 282 257 return 0; 283 258 ··· 304 279 { 305 280 void __iomem *reg = drv_data->ioaddr; 306 281 307 - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 282 + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 308 283 || (drv_data->tx == drv_data->tx_end)) 309 284 return 0; 310 285 ··· 331 306 { 332 307 void __iomem *reg = drv_data->ioaddr; 333 308 334 - if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 309 + if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) 335 310 || (drv_data->tx == drv_data->tx_end)) 336 311 return 0; 337 312 ··· 532 507 /* Stop and reset */ 533 508 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 534 509 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 535 - write_SSSR(drv_data->clear_sr, reg); 510 + write_SSSR_CS(drv_data, drv_data->clear_sr); 536 511 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 537 - if (drv_data->ssp_type != PXA25x_SSP) 512 + if (!pxa25x_ssp_comp(drv_data)) 538 513 write_SSTO(0, reg); 539 514 flush(drv_data); 540 515 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); ··· 554 529 555 530 /* Clear and disable interrupts on SSP and DMA channels*/ 556 531 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 557 - write_SSSR(drv_data->clear_sr, reg); 532 + write_SSSR_CS(drv_data, drv_data->clear_sr); 558 533 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 559 534 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 560 535 ··· 647 622 648 623 /* Clear and disable timeout interrupt, do the rest in 649 624 * dma_transfer_complete */ 650 - if (drv_data->ssp_type != PXA25x_SSP) 625 + if (!pxa25x_ssp_comp(drv_data)) 651 626 write_SSTO(0, reg); 652 627 653 628 /* finish this transfer, start the next */ ··· 660 635 return IRQ_NONE; 661 636 } 662 637 638 + static void reset_sccr1(struct driver_data *drv_data) 639 + { 640 + void __iomem *reg = drv_data->ioaddr; 641 + struct chip_data *chip = drv_data->cur_chip; 642 + u32 sccr1_reg; 643 + 644 + sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; 645 + sccr1_reg &= ~SSCR1_RFT; 646 + sccr1_reg |= chip->threshold; 647 + write_SSCR1(sccr1_reg, reg); 648 + } 649 + 663 650 static void int_error_stop(struct driver_data *drv_data, const char* msg) 664 651 { 665 652 void __iomem *reg = drv_data->ioaddr; 666 653 667 654 /* Stop and reset SSP */ 668 - write_SSSR(drv_data->clear_sr, reg); 669 - write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 670 - if (drv_data->ssp_type != PXA25x_SSP) 655 + write_SSSR_CS(drv_data, drv_data->clear_sr); 656 + reset_sccr1(drv_data); 657 + if (!pxa25x_ssp_comp(drv_data)) 671 658 write_SSTO(0, reg); 672 659 flush(drv_data); 673 660 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); ··· 695 658 void __iomem *reg = drv_data->ioaddr; 696 659 697 660 /* Stop SSP */ 698 - write_SSSR(drv_data->clear_sr, reg); 699 - write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 700 - if (drv_data->ssp_type != PXA25x_SSP) 661 + write_SSSR_CS(drv_data, drv_data->clear_sr); 662 + reset_sccr1(drv_data); 663 + if (!pxa25x_ssp_comp(drv_data)) 701 664 write_SSTO(0, reg); 702 665 703 666 /* Update total byte transfered return count actual bytes read */ ··· 751 714 } 752 715 753 716 if (drv_data->tx == drv_data->tx_end) { 754 - write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); 755 - /* PXA25x_SSP has no timeout, read trailing bytes */ 756 - if (drv_data->ssp_type == PXA25x_SSP) { 757 - if (!wait_ssp_rx_stall(reg)) 758 - { 759 - int_error_stop(drv_data, "interrupt_transfer: " 760 - "rx stall failed"); 761 - return IRQ_HANDLED; 717 + u32 bytes_left; 718 + u32 sccr1_reg; 719 + 720 + sccr1_reg = read_SSCR1(reg); 721 + sccr1_reg &= ~SSCR1_TIE; 722 + 723 + /* 724 + * PXA25x_SSP has no timeout, set up rx threshould for the 725 + * remaing RX bytes. 726 + */ 727 + if (pxa25x_ssp_comp(drv_data)) { 728 + 729 + sccr1_reg &= ~SSCR1_RFT; 730 + 731 + bytes_left = drv_data->rx_end - drv_data->rx; 732 + switch (drv_data->n_bytes) { 733 + case 4: 734 + bytes_left >>= 1; 735 + case 2: 736 + bytes_left >>= 1; 762 737 } 763 - if (!drv_data->read(drv_data)) 764 - { 765 - int_error_stop(drv_data, 766 - "interrupt_transfer: " 767 - "trailing byte read failed"); 768 - return IRQ_HANDLED; 769 - } 770 - int_transfer_complete(drv_data); 738 + 739 + if (bytes_left > RX_THRESH_DFLT) 740 + bytes_left = RX_THRESH_DFLT; 741 + 742 + sccr1_reg |= SSCR1_RxTresh(bytes_left); 771 743 } 744 + write_SSCR1(sccr1_reg, reg); 772 745 } 773 746 774 747 /* We did something */ ··· 789 742 { 790 743 struct driver_data *drv_data = dev_id; 791 744 void __iomem *reg = drv_data->ioaddr; 745 + u32 sccr1_reg = read_SSCR1(reg); 746 + u32 mask = drv_data->mask_sr; 747 + u32 status; 748 + 749 + status = read_SSSR(reg); 750 + 751 + /* Ignore possible writes if we don't need to write */ 752 + if (!(sccr1_reg & SSCR1_TIE)) 753 + mask &= ~SSSR_TFS; 754 + 755 + if (!(status & mask)) 756 + return IRQ_NONE; 792 757 793 758 if (!drv_data->cur_msg) { 794 759 795 760 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 796 761 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 797 - if (drv_data->ssp_type != PXA25x_SSP) 762 + if (!pxa25x_ssp_comp(drv_data)) 798 763 write_SSTO(0, reg); 799 - write_SSSR(drv_data->clear_sr, reg); 764 + write_SSSR_CS(drv_data, drv_data->clear_sr); 800 765 801 766 dev_err(&drv_data->pdev->dev, "bad message state " 802 767 "in interrupt handler\n"); ··· 921 862 { 922 863 unsigned long ssp_clk = clk_get_rate(ssp->clk); 923 864 924 - if (ssp->type == PXA25x_SSP) 865 + if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) 925 866 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; 926 867 else 927 868 return ((ssp_clk / rate - 1) & 0xfff) << 8; ··· 1147 1088 1148 1089 /* Clear status */ 1149 1090 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; 1150 - write_SSSR(drv_data->clear_sr, reg); 1091 + write_SSSR_CS(drv_data, drv_data->clear_sr); 1151 1092 } 1152 1093 1153 1094 /* see if we need to reload the config registers */ ··· 1157 1098 1158 1099 /* stop the SSP, and update the other bits */ 1159 1100 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 1160 - if (drv_data->ssp_type != PXA25x_SSP) 1101 + if (!pxa25x_ssp_comp(drv_data)) 1161 1102 write_SSTO(chip->timeout, reg); 1162 1103 /* first set CR1 without interrupt and service enables */ 1163 1104 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); ··· 1165 1106 write_SSCR0(cr0, reg); 1166 1107 1167 1108 } else { 1168 - if (drv_data->ssp_type != PXA25x_SSP) 1109 + if (!pxa25x_ssp_comp(drv_data)) 1169 1110 write_SSTO(chip->timeout, reg); 1170 1111 } 1171 1112 ··· 1292 1233 uint tx_thres = TX_THRESH_DFLT; 1293 1234 uint rx_thres = RX_THRESH_DFLT; 1294 1235 1295 - if (drv_data->ssp_type != PXA25x_SSP 1236 + if (!pxa25x_ssp_comp(drv_data) 1296 1237 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { 1297 1238 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " 1298 1239 "b/w not 4-32 for type non-PXA25x_SSP\n", 1299 1240 drv_data->ssp_type, spi->bits_per_word); 1300 1241 return -EINVAL; 1301 - } 1302 - else if (drv_data->ssp_type == PXA25x_SSP 1242 + } else if (pxa25x_ssp_comp(drv_data) 1303 1243 && (spi->bits_per_word < 4 1304 1244 || spi->bits_per_word > 16)) { 1305 1245 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " ··· 1317 1259 return -ENOMEM; 1318 1260 } 1319 1261 1320 - chip->gpio_cs = -1; 1262 + if (drv_data->ssp_type == CE4100_SSP) { 1263 + if (spi->chip_select > 4) { 1264 + dev_err(&spi->dev, "failed setup: " 1265 + "cs number must not be > 4.\n"); 1266 + kfree(chip); 1267 + return -EINVAL; 1268 + } 1269 + 1270 + chip->frm = spi->chip_select; 1271 + } else 1272 + chip->gpio_cs = -1; 1321 1273 chip->enable_dma = 0; 1322 1274 chip->timeout = TIMOUT_DFLT; 1323 1275 chip->dma_burst_size = drv_data->master_info->enable_dma ? ··· 1383 1315 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); 1384 1316 1385 1317 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1386 - if (drv_data->ssp_type != PXA25x_SSP) 1318 + if (!pxa25x_ssp_comp(drv_data)) 1387 1319 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1388 1320 clk_get_rate(ssp->clk) 1389 1321 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), ··· 1418 1350 1419 1351 spi_set_ctldata(spi, chip); 1420 1352 1353 + if (drv_data->ssp_type == CE4100_SSP) 1354 + return 0; 1355 + 1421 1356 return setup_cs(spi, chip, chip_info); 1422 1357 } 1423 1358 1424 1359 static void cleanup(struct spi_device *spi) 1425 1360 { 1426 1361 struct chip_data *chip = spi_get_ctldata(spi); 1362 + struct driver_data *drv_data = spi_master_get_devdata(spi->master); 1427 1363 1428 1364 if (!chip) 1429 1365 return; 1430 1366 1431 - if (gpio_is_valid(chip->gpio_cs)) 1367 + if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) 1432 1368 gpio_free(chip->gpio_cs); 1433 1369 1434 1370 kfree(chip); 1435 1371 } 1436 1372 1437 - static int __init init_queue(struct driver_data *drv_data) 1373 + static int __devinit init_queue(struct driver_data *drv_data) 1438 1374 { 1439 1375 INIT_LIST_HEAD(&drv_data->queue); 1440 1376 spin_lock_init(&drv_data->lock); ··· 1526 1454 return 0; 1527 1455 } 1528 1456 1529 - static int __init pxa2xx_spi_probe(struct platform_device *pdev) 1457 + static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) 1530 1458 { 1531 1459 struct device *dev = &pdev->dev; 1532 1460 struct pxa2xx_spi_master *platform_info; ··· 1556 1484 drv_data->pdev = pdev; 1557 1485 drv_data->ssp = ssp; 1558 1486 1487 + master->dev.parent = &pdev->dev; 1488 + #ifdef CONFIG_OF 1489 + master->dev.of_node = pdev->dev.of_node; 1490 + #endif 1559 1491 /* the spi->mode bits understood by this driver: */ 1560 1492 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1561 1493 ··· 1576 1500 1577 1501 drv_data->ioaddr = ssp->mmio_base; 1578 1502 drv_data->ssdr_physical = ssp->phys_base + SSDR; 1579 - if (ssp->type == PXA25x_SSP) { 1503 + if (pxa25x_ssp_comp(drv_data)) { 1580 1504 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; 1581 1505 drv_data->dma_cr1 = 0; 1582 1506 drv_data->clear_sr = SSSR_ROR; ··· 1588 1512 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; 1589 1513 } 1590 1514 1591 - status = request_irq(ssp->irq, ssp_int, 0, dev_name(dev), drv_data); 1515 + status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), 1516 + drv_data); 1592 1517 if (status < 0) { 1593 1518 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); 1594 1519 goto out_error_master_alloc; ··· 1638 1561 | SSCR0_Motorola 1639 1562 | SSCR0_DataSize(8), 1640 1563 drv_data->ioaddr); 1641 - if (drv_data->ssp_type != PXA25x_SSP) 1564 + if (!pxa25x_ssp_comp(drv_data)) 1642 1565 write_SSTO(0, drv_data->ioaddr); 1643 1566 write_SSPSP(0, drv_data->ioaddr); 1644 1567 ··· 1800 1723 .pm = &pxa2xx_spi_pm_ops, 1801 1724 #endif 1802 1725 }, 1726 + .probe = pxa2xx_spi_probe, 1803 1727 .remove = pxa2xx_spi_remove, 1804 1728 .shutdown = pxa2xx_spi_shutdown, 1805 1729 }; 1806 1730 1807 1731 static int __init pxa2xx_spi_init(void) 1808 1732 { 1809 - return platform_driver_probe(&driver, pxa2xx_spi_probe); 1733 + return platform_driver_register(&driver); 1810 1734 } 1811 1735 subsys_initcall(pxa2xx_spi_init); 1812 1736
+201
drivers/spi/pxa2xx_spi_pci.c
··· 1 + /* 2 + * CE4100's SPI device is more or less the same one as found on PXA 3 + * 4 + */ 5 + #include <linux/pci.h> 6 + #include <linux/platform_device.h> 7 + #include <linux/of_device.h> 8 + #include <linux/spi/pxa2xx_spi.h> 9 + 10 + struct awesome_struct { 11 + struct ssp_device ssp; 12 + struct platform_device spi_pdev; 13 + struct pxa2xx_spi_master spi_pdata; 14 + }; 15 + 16 + static DEFINE_MUTEX(ssp_lock); 17 + static LIST_HEAD(ssp_list); 18 + 19 + struct ssp_device *pxa_ssp_request(int port, const char *label) 20 + { 21 + struct ssp_device *ssp = NULL; 22 + 23 + mutex_lock(&ssp_lock); 24 + 25 + list_for_each_entry(ssp, &ssp_list, node) { 26 + if (ssp->port_id == port && ssp->use_count == 0) { 27 + ssp->use_count++; 28 + ssp->label = label; 29 + break; 30 + } 31 + } 32 + 33 + mutex_unlock(&ssp_lock); 34 + 35 + if (&ssp->node == &ssp_list) 36 + return NULL; 37 + 38 + return ssp; 39 + } 40 + EXPORT_SYMBOL_GPL(pxa_ssp_request); 41 + 42 + void pxa_ssp_free(struct ssp_device *ssp) 43 + { 44 + mutex_lock(&ssp_lock); 45 + if (ssp->use_count) { 46 + ssp->use_count--; 47 + ssp->label = NULL; 48 + } else 49 + dev_err(&ssp->pdev->dev, "device already free\n"); 50 + mutex_unlock(&ssp_lock); 51 + } 52 + EXPORT_SYMBOL_GPL(pxa_ssp_free); 53 + 54 + static void plat_dev_release(struct device *dev) 55 + { 56 + struct awesome_struct *as = container_of(dev, 57 + struct awesome_struct, spi_pdev.dev); 58 + 59 + of_device_node_put(&as->spi_pdev.dev); 60 + } 61 + 62 + static int __devinit ce4100_spi_probe(struct pci_dev *dev, 63 + const struct pci_device_id *ent) 64 + { 65 + int ret; 66 + resource_size_t phys_beg; 67 + resource_size_t phys_len; 68 + struct awesome_struct *spi_info; 69 + struct platform_device *pdev; 70 + struct pxa2xx_spi_master *spi_pdata; 71 + struct ssp_device *ssp; 72 + 73 + ret = pci_enable_device(dev); 74 + if (ret) 75 + return ret; 76 + 77 + phys_beg = pci_resource_start(dev, 0); 78 + phys_len = pci_resource_len(dev, 0); 79 + 80 + if (!request_mem_region(phys_beg, phys_len, 81 + "CE4100 SPI")) { 82 + dev_err(&dev->dev, "Can't request register space.\n"); 83 + ret = -EBUSY; 84 + return ret; 85 + } 86 + 87 + spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); 88 + if (!spi_info) { 89 + ret = -ENOMEM; 90 + goto err_kz; 91 + } 92 + ssp = &spi_info->ssp; 93 + pdev = &spi_info->spi_pdev; 94 + spi_pdata = &spi_info->spi_pdata; 95 + 96 + pdev->name = "pxa2xx-spi"; 97 + pdev->id = dev->devfn; 98 + pdev->dev.parent = &dev->dev; 99 + pdev->dev.platform_data = &spi_info->spi_pdata; 100 + 101 + #ifdef CONFIG_OF 102 + pdev->dev.of_node = dev->dev.of_node; 103 + #endif 104 + pdev->dev.release = plat_dev_release; 105 + 106 + spi_pdata->num_chipselect = dev->devfn; 107 + 108 + ssp->phys_base = pci_resource_start(dev, 0); 109 + ssp->mmio_base = ioremap(phys_beg, phys_len); 110 + if (!ssp->mmio_base) { 111 + dev_err(&pdev->dev, "failed to ioremap() registers\n"); 112 + ret = -EIO; 113 + goto err_remap; 114 + } 115 + ssp->irq = dev->irq; 116 + ssp->port_id = pdev->id; 117 + ssp->type = PXA25x_SSP; 118 + 119 + mutex_lock(&ssp_lock); 120 + list_add(&ssp->node, &ssp_list); 121 + mutex_unlock(&ssp_lock); 122 + 123 + pci_set_drvdata(dev, spi_info); 124 + 125 + ret = platform_device_register(pdev); 126 + if (ret) 127 + goto err_dev_add; 128 + 129 + return ret; 130 + 131 + err_dev_add: 132 + pci_set_drvdata(dev, NULL); 133 + mutex_lock(&ssp_lock); 134 + list_del(&ssp->node); 135 + mutex_unlock(&ssp_lock); 136 + iounmap(ssp->mmio_base); 137 + 138 + err_remap: 139 + kfree(spi_info); 140 + 141 + err_kz: 142 + release_mem_region(phys_beg, phys_len); 143 + 144 + return ret; 145 + } 146 + 147 + static void __devexit ce4100_spi_remove(struct pci_dev *dev) 148 + { 149 + struct awesome_struct *spi_info; 150 + struct platform_device *pdev; 151 + struct ssp_device *ssp; 152 + 153 + spi_info = pci_get_drvdata(dev); 154 + 155 + ssp = &spi_info->ssp; 156 + pdev = &spi_info->spi_pdev; 157 + 158 + platform_device_unregister(pdev); 159 + 160 + iounmap(ssp->mmio_base); 161 + release_mem_region(pci_resource_start(dev, 0), 162 + pci_resource_len(dev, 0)); 163 + 164 + mutex_lock(&ssp_lock); 165 + list_del(&ssp->node); 166 + mutex_unlock(&ssp_lock); 167 + 168 + pci_set_drvdata(dev, NULL); 169 + pci_disable_device(dev); 170 + kfree(spi_info); 171 + } 172 + 173 + static struct pci_device_id ce4100_spi_devices[] __devinitdata = { 174 + 175 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, 176 + { }, 177 + }; 178 + MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); 179 + 180 + static struct pci_driver ce4100_spi_driver = { 181 + .name = "ce4100_spi", 182 + .id_table = ce4100_spi_devices, 183 + .probe = ce4100_spi_probe, 184 + .remove = __devexit_p(ce4100_spi_remove), 185 + }; 186 + 187 + static int __init ce4100_spi_init(void) 188 + { 189 + return pci_register_driver(&ce4100_spi_driver); 190 + } 191 + module_init(ce4100_spi_init); 192 + 193 + static void __exit ce4100_spi_exit(void) 194 + { 195 + pci_unregister_driver(&ce4100_spi_driver); 196 + } 197 + module_exit(ce4100_spi_exit); 198 + 199 + MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); 200 + MODULE_LICENSE("GPL v2"); 201 + MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+3 -29
drivers/spi/spi_imx.c
··· 66 66 SPI_IMX_VER_0_5, 67 67 SPI_IMX_VER_0_7, 68 68 SPI_IMX_VER_2_3, 69 - SPI_IMX_VER_AUTODETECT, 70 69 }; 71 70 72 71 struct spi_imx_data; ··· 719 720 720 721 static struct platform_device_id spi_imx_devtype[] = { 721 722 { 722 - .name = DRIVER_NAME, 723 - .driver_data = SPI_IMX_VER_AUTODETECT, 724 - }, { 725 723 .name = "imx1-cspi", 726 724 .driver_data = SPI_IMX_VER_IMX1, 727 725 }, { ··· 798 802 799 803 init_completion(&spi_imx->xfer_done); 800 804 801 - if (pdev->id_entry->driver_data == SPI_IMX_VER_AUTODETECT) { 802 - if (cpu_is_mx25() || cpu_is_mx35()) 803 - spi_imx->devtype_data = 804 - spi_imx_devtype_data[SPI_IMX_VER_0_7]; 805 - else if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) 806 - spi_imx->devtype_data = 807 - spi_imx_devtype_data[SPI_IMX_VER_0_4]; 808 - else if (cpu_is_mx27() || cpu_is_mx21()) 809 - spi_imx->devtype_data = 810 - spi_imx_devtype_data[SPI_IMX_VER_0_0]; 811 - else if (cpu_is_mx1()) 812 - spi_imx->devtype_data = 813 - spi_imx_devtype_data[SPI_IMX_VER_IMX1]; 814 - else 815 - BUG(); 816 - } else 817 - spi_imx->devtype_data = 818 - spi_imx_devtype_data[pdev->id_entry->driver_data]; 819 - 820 - if (!spi_imx->devtype_data.intctrl) { 821 - dev_err(&pdev->dev, "no support for this device compiled in\n"); 822 - ret = -ENODEV; 823 - goto out_gpio_free; 824 - } 805 + spi_imx->devtype_data = 806 + spi_imx_devtype_data[pdev->id_entry->driver_data]; 825 807 826 808 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 827 809 if (!res) { ··· 821 847 } 822 848 823 849 spi_imx->irq = platform_get_irq(pdev, 0); 824 - if (spi_imx->irq <= 0) { 850 + if (spi_imx->irq < 0) { 825 851 ret = -EINVAL; 826 852 goto out_iounmap; 827 853 }
+1 -1
drivers/spi/spi_nuc900.c
··· 449 449 release_mem_region(hw->res->start, resource_size(hw->res)); 450 450 kfree(hw->ioarea); 451 451 err_pdata: 452 - spi_master_put(hw->master);; 452 + spi_master_put(hw->master); 453 453 454 454 err_nomem: 455 455 return err;
+6 -6
drivers/spi/spi_topcliff_pch.c
··· 267 267 if (reg_spsr_val & SPSR_FI_BIT) { 268 268 /* disable FI & RFI interrupts */ 269 269 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 270 - SPCR_FIE_BIT | SPCR_TFIE_BIT); 270 + SPCR_FIE_BIT | SPCR_RFIE_BIT); 271 271 272 272 /* transfer is completed;inform pch_spi_process_messages */ 273 273 data->transfer_complete = true; ··· 677 677 { 678 678 /* enable interrupts */ 679 679 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { 680 - /* set receive threhold to PCH_RX_THOLD */ 680 + /* set receive threshold to PCH_RX_THOLD */ 681 681 pch_spi_setclr_reg(data->master, PCH_SPCR, 682 - PCH_RX_THOLD << SPCR_TFIC_FIELD, 683 - ~MASK_TFIC_SPCR_BITS); 682 + PCH_RX_THOLD << SPCR_RFIC_FIELD, 683 + ~MASK_RFIC_SPCR_BITS); 684 684 /* enable FI and RFI interrupts */ 685 685 pch_spi_setclr_reg(data->master, PCH_SPCR, 686 - SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0); 686 + SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); 687 687 } else { 688 - /* set receive threhold to maximum */ 688 + /* set receive threshold to maximum */ 689 689 pch_spi_setclr_reg(data->master, PCH_SPCR, 690 690 PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, 691 691 ~MASK_TFIC_SPCR_BITS);
+114 -19
drivers/spi/xilinx_spi.c
··· 1 1 /* 2 - * xilinx_spi.c 3 - * 4 2 * Xilinx SPI controller driver (master mode only) 5 3 * 6 4 * Author: MontaVista Software, Inc. 7 5 * source@mvista.com 8 6 * 9 - * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the 10 - * terms of the GNU General Public License version 2. This program is licensed 11 - * "as is" without any warranty of any kind, whether express or implied. 7 + * Copyright (c) 2010 Secret Lab Technologies, Ltd. 8 + * Copyright (c) 2009 Intel Corporation 9 + * 2002-2007 (c) MontaVista Software, Inc. 10 + 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License version 2 as 13 + * published by the Free Software Foundation. 12 14 */ 13 15 14 16 #include <linux/module.h> 15 17 #include <linux/init.h> 16 18 #include <linux/interrupt.h> 17 - 19 + #include <linux/of.h> 20 + #include <linux/platform_device.h> 18 21 #include <linux/spi/spi.h> 19 22 #include <linux/spi/spi_bitbang.h> 20 - #include <linux/io.h> 21 - 22 - #include "xilinx_spi.h" 23 23 #include <linux/spi/xilinx_spi.h> 24 + #include <linux/io.h> 24 25 25 26 #define XILINX_SPI_NAME "xilinx_spi" 26 27 ··· 351 350 return IRQ_HANDLED; 352 351 } 353 352 353 + #ifdef CONFIG_OF 354 + static const struct of_device_id xilinx_spi_of_match[] = { 355 + { .compatible = "xlnx,xps-spi-2.00.a", }, 356 + { .compatible = "xlnx,xps-spi-2.00.b", }, 357 + {} 358 + }; 359 + MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); 360 + #endif 361 + 354 362 struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, 355 - u32 irq, s16 bus_num) 363 + u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word) 356 364 { 357 365 struct spi_master *master; 358 366 struct xilinx_spi *xspi; 359 - struct xspi_platform_data *pdata = dev->platform_data; 360 367 int ret; 361 - 362 - if (!pdata) { 363 - dev_err(dev, "No platform data attached\n"); 364 - return NULL; 365 - } 366 368 367 369 master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); 368 370 if (!master) ··· 393 389 } 394 390 395 391 master->bus_num = bus_num; 396 - master->num_chipselect = pdata->num_chipselect; 392 + master->num_chipselect = num_cs; 397 393 #ifdef CONFIG_OF 398 394 master->dev.of_node = dev->of_node; 399 395 #endif 400 396 401 397 xspi->mem = *mem; 402 398 xspi->irq = irq; 403 - if (pdata->little_endian) { 399 + if (little_endian) { 404 400 xspi->read_fn = xspi_read32; 405 401 xspi->write_fn = xspi_write32; 406 402 } else { 407 403 xspi->read_fn = xspi_read32_be; 408 404 xspi->write_fn = xspi_write32_be; 409 405 } 410 - xspi->bits_per_word = pdata->bits_per_word; 406 + xspi->bits_per_word = bits_per_word; 411 407 if (xspi->bits_per_word == 8) { 412 408 xspi->tx_fn = xspi_tx8; 413 409 xspi->rx_fn = xspi_rx8; ··· 465 461 spi_master_put(xspi->bitbang.master); 466 462 } 467 463 EXPORT_SYMBOL(xilinx_spi_deinit); 464 + 465 + static int __devinit xilinx_spi_probe(struct platform_device *dev) 466 + { 467 + struct xspi_platform_data *pdata; 468 + struct resource *r; 469 + int irq, num_cs = 0, little_endian = 0, bits_per_word = 8; 470 + struct spi_master *master; 471 + u8 i; 472 + 473 + pdata = dev->dev.platform_data; 474 + if (pdata) { 475 + num_cs = pdata->num_chipselect; 476 + little_endian = pdata->little_endian; 477 + bits_per_word = pdata->bits_per_word; 478 + } 479 + 480 + #ifdef CONFIG_OF 481 + if (dev->dev.of_node) { 482 + const __be32 *prop; 483 + int len; 484 + 485 + /* number of slave select bits is required */ 486 + prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits", 487 + &len); 488 + if (prop && len >= sizeof(*prop)) 489 + num_cs = __be32_to_cpup(prop); 490 + } 491 + #endif 492 + 493 + if (!num_cs) { 494 + dev_err(&dev->dev, "Missing slave select configuration data\n"); 495 + return -EINVAL; 496 + } 497 + 498 + 499 + r = platform_get_resource(dev, IORESOURCE_MEM, 0); 500 + if (!r) 501 + return -ENODEV; 502 + 503 + irq = platform_get_irq(dev, 0); 504 + if (irq < 0) 505 + return -ENXIO; 506 + 507 + master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs, 508 + little_endian, bits_per_word); 509 + if (!master) 510 + return -ENODEV; 511 + 512 + if (pdata) { 513 + for (i = 0; i < pdata->num_devices; i++) 514 + spi_new_device(master, pdata->devices + i); 515 + } 516 + 517 + platform_set_drvdata(dev, master); 518 + return 0; 519 + } 520 + 521 + static int __devexit xilinx_spi_remove(struct platform_device *dev) 522 + { 523 + xilinx_spi_deinit(platform_get_drvdata(dev)); 524 + platform_set_drvdata(dev, 0); 525 + 526 + return 0; 527 + } 528 + 529 + /* work with hotplug and coldplug */ 530 + MODULE_ALIAS("platform:" XILINX_SPI_NAME); 531 + 532 + static struct platform_driver xilinx_spi_driver = { 533 + .probe = xilinx_spi_probe, 534 + .remove = __devexit_p(xilinx_spi_remove), 535 + .driver = { 536 + .name = XILINX_SPI_NAME, 537 + .owner = THIS_MODULE, 538 + #ifdef CONFIG_OF 539 + .of_match_table = xilinx_spi_of_match, 540 + #endif 541 + }, 542 + }; 543 + 544 + static int __init xilinx_spi_pltfm_init(void) 545 + { 546 + return platform_driver_register(&xilinx_spi_driver); 547 + } 548 + module_init(xilinx_spi_pltfm_init); 549 + 550 + static void __exit xilinx_spi_pltfm_exit(void) 551 + { 552 + platform_driver_unregister(&xilinx_spi_driver); 553 + } 554 + module_exit(xilinx_spi_pltfm_exit); 468 555 469 556 MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); 470 557 MODULE_DESCRIPTION("Xilinx SPI driver");
-32
drivers/spi/xilinx_spi.h
··· 1 - /* 2 - * Xilinx SPI device driver API and platform data header file 3 - * 4 - * Copyright (c) 2009 Intel Corporation 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 - */ 19 - 20 - #ifndef _XILINX_SPI_H_ 21 - #define _XILINX_SPI_H_ 22 - 23 - #include <linux/spi/spi.h> 24 - #include <linux/spi/spi_bitbang.h> 25 - 26 - #define XILINX_SPI_NAME "xilinx_spi" 27 - 28 - struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, 29 - u32 irq, s16 bus_num); 30 - 31 - void xilinx_spi_deinit(struct spi_master *master); 32 - #endif
-133
drivers/spi/xilinx_spi_of.c
··· 1 - /* 2 - * Xilinx SPI OF device driver 3 - * 4 - * Copyright (c) 2009 Intel Corporation 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - * 10 - * This program is distributed in the hope that it will be useful, 11 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 - * GNU General Public License for more details. 14 - * 15 - * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 - */ 19 - 20 - /* Supports: 21 - * Xilinx SPI devices as OF devices 22 - * 23 - * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc. 24 - */ 25 - 26 - #include <linux/module.h> 27 - #include <linux/init.h> 28 - #include <linux/interrupt.h> 29 - #include <linux/io.h> 30 - #include <linux/slab.h> 31 - 32 - #include <linux/of_address.h> 33 - #include <linux/of_platform.h> 34 - #include <linux/of_device.h> 35 - #include <linux/of_spi.h> 36 - 37 - #include <linux/spi/xilinx_spi.h> 38 - #include "xilinx_spi.h" 39 - 40 - 41 - static int __devinit xilinx_spi_of_probe(struct platform_device *ofdev, 42 - const struct of_device_id *match) 43 - { 44 - struct spi_master *master; 45 - struct xspi_platform_data *pdata; 46 - struct resource r_mem; 47 - struct resource r_irq; 48 - int rc = 0; 49 - const u32 *prop; 50 - int len; 51 - 52 - rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); 53 - if (rc) { 54 - dev_warn(&ofdev->dev, "invalid address\n"); 55 - return rc; 56 - } 57 - 58 - rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); 59 - if (rc == NO_IRQ) { 60 - dev_warn(&ofdev->dev, "no IRQ found\n"); 61 - return -ENODEV; 62 - } 63 - 64 - ofdev->dev.platform_data = 65 - kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL); 66 - pdata = ofdev->dev.platform_data; 67 - if (!pdata) 68 - return -ENOMEM; 69 - 70 - /* number of slave select bits is required */ 71 - prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len); 72 - if (!prop || len < sizeof(*prop)) { 73 - dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); 74 - return -EINVAL; 75 - } 76 - pdata->num_chipselect = *prop; 77 - pdata->bits_per_word = 8; 78 - master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1); 79 - if (!master) 80 - return -ENODEV; 81 - 82 - dev_set_drvdata(&ofdev->dev, master); 83 - 84 - return 0; 85 - } 86 - 87 - static int __devexit xilinx_spi_remove(struct platform_device *ofdev) 88 - { 89 - xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev)); 90 - dev_set_drvdata(&ofdev->dev, 0); 91 - kfree(ofdev->dev.platform_data); 92 - ofdev->dev.platform_data = NULL; 93 - return 0; 94 - } 95 - 96 - static int __exit xilinx_spi_of_remove(struct platform_device *op) 97 - { 98 - return xilinx_spi_remove(op); 99 - } 100 - 101 - static const struct of_device_id xilinx_spi_of_match[] = { 102 - { .compatible = "xlnx,xps-spi-2.00.a", }, 103 - { .compatible = "xlnx,xps-spi-2.00.b", }, 104 - {} 105 - }; 106 - 107 - MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); 108 - 109 - static struct of_platform_driver xilinx_spi_of_driver = { 110 - .probe = xilinx_spi_of_probe, 111 - .remove = __exit_p(xilinx_spi_of_remove), 112 - .driver = { 113 - .name = "xilinx-xps-spi", 114 - .owner = THIS_MODULE, 115 - .of_match_table = xilinx_spi_of_match, 116 - }, 117 - }; 118 - 119 - static int __init xilinx_spi_of_init(void) 120 - { 121 - return of_register_platform_driver(&xilinx_spi_of_driver); 122 - } 123 - module_init(xilinx_spi_of_init); 124 - 125 - static void __exit xilinx_spi_of_exit(void) 126 - { 127 - of_unregister_platform_driver(&xilinx_spi_of_driver); 128 - } 129 - module_exit(xilinx_spi_of_exit); 130 - 131 - MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 132 - MODULE_DESCRIPTION("Xilinx SPI platform driver"); 133 - MODULE_LICENSE("GPL v2");
-102
drivers/spi/xilinx_spi_pltfm.c
··· 1 - /* 2 - * Support for Xilinx SPI platform devices 3 - * Copyright (c) 2009 Intel Corporation 4 - * 5 - * This program is free software; you can redistribute it and/or modify 6 - * it under the terms of the GNU General Public License version 2 as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 - */ 18 - 19 - /* Supports: 20 - * Xilinx SPI devices as platform devices 21 - * 22 - * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc. 23 - */ 24 - 25 - #include <linux/module.h> 26 - #include <linux/init.h> 27 - #include <linux/interrupt.h> 28 - #include <linux/io.h> 29 - #include <linux/platform_device.h> 30 - 31 - #include <linux/spi/spi.h> 32 - #include <linux/spi/spi_bitbang.h> 33 - #include <linux/spi/xilinx_spi.h> 34 - 35 - #include "xilinx_spi.h" 36 - 37 - static int __devinit xilinx_spi_probe(struct platform_device *dev) 38 - { 39 - struct xspi_platform_data *pdata; 40 - struct resource *r; 41 - int irq; 42 - struct spi_master *master; 43 - u8 i; 44 - 45 - pdata = dev->dev.platform_data; 46 - if (!pdata) 47 - return -ENODEV; 48 - 49 - r = platform_get_resource(dev, IORESOURCE_MEM, 0); 50 - if (!r) 51 - return -ENODEV; 52 - 53 - irq = platform_get_irq(dev, 0); 54 - if (irq < 0) 55 - return -ENXIO; 56 - 57 - master = xilinx_spi_init(&dev->dev, r, irq, dev->id); 58 - if (!master) 59 - return -ENODEV; 60 - 61 - for (i = 0; i < pdata->num_devices; i++) 62 - spi_new_device(master, pdata->devices + i); 63 - 64 - platform_set_drvdata(dev, master); 65 - return 0; 66 - } 67 - 68 - static int __devexit xilinx_spi_remove(struct platform_device *dev) 69 - { 70 - xilinx_spi_deinit(platform_get_drvdata(dev)); 71 - platform_set_drvdata(dev, 0); 72 - 73 - return 0; 74 - } 75 - 76 - /* work with hotplug and coldplug */ 77 - MODULE_ALIAS("platform:" XILINX_SPI_NAME); 78 - 79 - static struct platform_driver xilinx_spi_driver = { 80 - .probe = xilinx_spi_probe, 81 - .remove = __devexit_p(xilinx_spi_remove), 82 - .driver = { 83 - .name = XILINX_SPI_NAME, 84 - .owner = THIS_MODULE, 85 - }, 86 - }; 87 - 88 - static int __init xilinx_spi_pltfm_init(void) 89 - { 90 - return platform_driver_register(&xilinx_spi_driver); 91 - } 92 - module_init(xilinx_spi_pltfm_init); 93 - 94 - static void __exit xilinx_spi_pltfm_exit(void) 95 - { 96 - platform_driver_unregister(&xilinx_spi_driver); 97 - } 98 - module_exit(xilinx_spi_pltfm_exit); 99 - 100 - MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 101 - MODULE_DESCRIPTION("Xilinx SPI platform driver"); 102 - MODULE_LICENSE("GPL v2");
+19 -5
include/linux/spi/dw_spi.h
··· 1 1 #ifndef DW_SPI_HEADER_H 2 2 #define DW_SPI_HEADER_H 3 + 3 4 #include <linux/io.h> 4 5 5 6 /* Bit fields in CTRLR0 */ ··· 83 82 though only low 16 bits matters */ 84 83 } __packed; 85 84 85 + struct dw_spi; 86 + struct dw_spi_dma_ops { 87 + int (*dma_init)(struct dw_spi *dws); 88 + void (*dma_exit)(struct dw_spi *dws); 89 + int (*dma_transfer)(struct dw_spi *dws, int cs_change); 90 + }; 91 + 86 92 struct dw_spi { 87 93 struct spi_master *master; 88 94 struct spi_device *cur_dev; ··· 144 136 /* Dma info */ 145 137 int dma_inited; 146 138 struct dma_chan *txchan; 139 + struct scatterlist tx_sgl; 147 140 struct dma_chan *rxchan; 148 - int txdma_done; 149 - int rxdma_done; 150 - u64 tx_param; 151 - u64 rx_param; 141 + struct scatterlist rx_sgl; 142 + int dma_chan_done; 152 143 struct device *dma_dev; 153 - dma_addr_t dma_addr; 144 + dma_addr_t dma_addr; /* phy address of the Data register */ 145 + struct dw_spi_dma_ops *dma_ops; 146 + void *dma_priv; /* platform relate info */ 147 + struct pci_dev *dmac; 154 148 155 149 /* Bus interface info */ 156 150 void *priv; ··· 226 216 extern void dw_spi_remove_host(struct dw_spi *dws); 227 217 extern int dw_spi_suspend_host(struct dw_spi *dws); 228 218 extern int dw_spi_resume_host(struct dw_spi *dws); 219 + extern void dw_spi_xfer_done(struct dw_spi *dws); 220 + 221 + /* platform related setup */ 222 + extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ 229 223 #endif /* DW_SPI_HEADER_H */
+152
include/linux/spi/pxa2xx_spi.h
··· 1 + /* 2 + * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License as published by 6 + * the Free Software Foundation; either version 2 of the License, or 7 + * (at your option) any later version. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 + */ 18 + #ifndef __linux_pxa2xx_spi_h 19 + #define __linux_pxa2xx_spi_h 20 + 21 + #include <linux/pxa2xx_ssp.h> 22 + 23 + #define PXA2XX_CS_ASSERT (0x01) 24 + #define PXA2XX_CS_DEASSERT (0x02) 25 + 26 + /* device.platform_data for SSP controller devices */ 27 + struct pxa2xx_spi_master { 28 + u32 clock_enable; 29 + u16 num_chipselect; 30 + u8 enable_dma; 31 + }; 32 + 33 + /* spi_board_info.controller_data for SPI slave devices, 34 + * copied to spi_device.platform_data ... mostly for dma tuning 35 + */ 36 + struct pxa2xx_spi_chip { 37 + u8 tx_threshold; 38 + u8 rx_threshold; 39 + u8 dma_burst_size; 40 + u32 timeout; 41 + u8 enable_loopback; 42 + int gpio_cs; 43 + void (*cs_control)(u32 command); 44 + }; 45 + 46 + #ifdef CONFIG_ARCH_PXA 47 + 48 + #include <linux/clk.h> 49 + #include <mach/dma.h> 50 + 51 + extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); 52 + 53 + #else 54 + /* 55 + * This is the implemtation for CE4100 on x86. ARM defines them in mach/ or 56 + * plat/ include path. 57 + * The CE4100 does not provide DMA support. This bits are here to let the driver 58 + * compile and will never be used. Maybe we get DMA support at a later point in 59 + * time. 60 + */ 61 + 62 + #define DCSR(n) (n) 63 + #define DSADR(n) (n) 64 + #define DTADR(n) (n) 65 + #define DCMD(n) (n) 66 + #define DRCMR(n) (n) 67 + 68 + #define DCSR_RUN (1 << 31) /* Run Bit */ 69 + #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch */ 70 + #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable */ 71 + #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ 72 + #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ 73 + #define DCSR_ENDINTR (1 << 2) /* End Interrupt */ 74 + #define DCSR_STARTINTR (1 << 1) /* Start Interrupt */ 75 + #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt */ 76 + 77 + #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable */ 78 + #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ 79 + #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ 80 + #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ 81 + #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ 82 + #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ 83 + #define DCSR_EORINTR (1 << 9) /* The end of Receive */ 84 + 85 + #define DRCMR_MAPVLD (1 << 7) /* Map Valid */ 86 + #define DRCMR_CHLNUM 0x1f /* mask for Channel Number */ 87 + 88 + #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor */ 89 + #define DDADR_STOP (1 << 0) /* Stop */ 90 + 91 + #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ 92 + #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ 93 + #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ 94 + #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ 95 + #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ 96 + #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ 97 + #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ 98 + #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ 99 + #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ 100 + #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ 101 + #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ 102 + #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ 103 + #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ 104 + #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 105 + 106 + /* 107 + * Descriptor structure for PXA's DMA engine 108 + * Note: this structure must always be aligned to a 16-byte boundary. 109 + */ 110 + 111 + typedef enum { 112 + DMA_PRIO_HIGH = 0, 113 + DMA_PRIO_MEDIUM = 1, 114 + DMA_PRIO_LOW = 2 115 + } pxa_dma_prio; 116 + 117 + /* 118 + * DMA registration 119 + */ 120 + 121 + static inline int pxa_request_dma(char *name, 122 + pxa_dma_prio prio, 123 + void (*irq_handler)(int, void *), 124 + void *data) 125 + { 126 + return -ENODEV; 127 + } 128 + 129 + static inline void pxa_free_dma(int dma_ch) 130 + { 131 + } 132 + 133 + /* 134 + * The CE4100 does not have the clk framework implemented and SPI clock can 135 + * not be switched on/off or the divider changed. 136 + */ 137 + static inline void clk_disable(struct clk *clk) 138 + { 139 + } 140 + 141 + static inline int clk_enable(struct clk *clk) 142 + { 143 + return 0; 144 + } 145 + 146 + static inline unsigned long clk_get_rate(struct clk *clk) 147 + { 148 + return 3686400; 149 + } 150 + 151 + #endif 152 + #endif
+1 -1
sound/soc/pxa/pxa-ssp.c
··· 20 20 #include <linux/platform_device.h> 21 21 #include <linux/clk.h> 22 22 #include <linux/io.h> 23 + #include <linux/pxa2xx_ssp.h> 23 24 24 25 #include <asm/irq.h> 25 26 ··· 34 33 #include <mach/hardware.h> 35 34 #include <mach/dma.h> 36 35 #include <mach/audio.h> 37 - #include <plat/ssp.h> 38 36 39 37 #include "../../arm/pxa2xx-pcm.h" 40 38 #include "pxa-ssp.h"