Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.17-rc3 852 lines 24 kB view raw
1/* 2 * SPI bus driver for CSR SiRFprimaII 3 * 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 5 * 6 * Licensed under GPLv2 or later. 7 */ 8 9#include <linux/module.h> 10#include <linux/kernel.h> 11#include <linux/slab.h> 12#include <linux/clk.h> 13#include <linux/completion.h> 14#include <linux/interrupt.h> 15#include <linux/io.h> 16#include <linux/of.h> 17#include <linux/bitops.h> 18#include <linux/err.h> 19#include <linux/platform_device.h> 20#include <linux/of_gpio.h> 21#include <linux/spi/spi.h> 22#include <linux/spi/spi_bitbang.h> 23#include <linux/dmaengine.h> 24#include <linux/dma-direction.h> 25#include <linux/dma-mapping.h> 26 27#define DRIVER_NAME "sirfsoc_spi" 28 29#define SIRFSOC_SPI_CTRL 0x0000 30#define SIRFSOC_SPI_CMD 0x0004 31#define SIRFSOC_SPI_TX_RX_EN 0x0008 32#define SIRFSOC_SPI_INT_EN 0x000C 33#define SIRFSOC_SPI_INT_STATUS 0x0010 34#define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100 35#define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104 36#define SIRFSOC_SPI_TXFIFO_CTRL 0x0108 37#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C 38#define SIRFSOC_SPI_TXFIFO_OP 0x0110 39#define SIRFSOC_SPI_TXFIFO_STATUS 0x0114 40#define SIRFSOC_SPI_TXFIFO_DATA 0x0118 41#define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120 42#define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124 43#define SIRFSOC_SPI_RXFIFO_CTRL 0x0128 44#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C 45#define SIRFSOC_SPI_RXFIFO_OP 0x0130 46#define SIRFSOC_SPI_RXFIFO_STATUS 0x0134 47#define SIRFSOC_SPI_RXFIFO_DATA 0x0138 48#define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144 49 50/* SPI CTRL register defines */ 51#define SIRFSOC_SPI_SLV_MODE BIT(16) 52#define SIRFSOC_SPI_CMD_MODE BIT(17) 53#define SIRFSOC_SPI_CS_IO_OUT BIT(18) 54#define SIRFSOC_SPI_CS_IO_MODE BIT(19) 55#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20) 56#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21) 57#define SIRFSOC_SPI_TRAN_MSB BIT(22) 58#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23) 59#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24) 60#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25) 61#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26) 62#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26) 63#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26) 64#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26) 65#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28) 66#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30) 67#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31) 68 69/* Interrupt Enable */ 70#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0) 71#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1) 72#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2) 73#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3) 74#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4) 75#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5) 76#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6) 77#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7) 78#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8) 79#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9) 80#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10) 81 82#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF 83 84/* Interrupt status */ 85#define SIRFSOC_SPI_RX_DONE BIT(0) 86#define SIRFSOC_SPI_TX_DONE BIT(1) 87#define SIRFSOC_SPI_RX_OFLOW BIT(2) 88#define SIRFSOC_SPI_TX_UFLOW BIT(3) 89#define SIRFSOC_SPI_RX_IO_DMA BIT(4) 90#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6) 91#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7) 92#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8) 93#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9) 94#define SIRFSOC_SPI_FRM_END BIT(10) 95 96/* TX RX enable */ 97#define SIRFSOC_SPI_RX_EN BIT(0) 98#define SIRFSOC_SPI_TX_EN BIT(1) 99#define SIRFSOC_SPI_CMD_TX_EN BIT(2) 100 101#define SIRFSOC_SPI_IO_MODE_SEL BIT(0) 102#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2) 103 104/* FIFO OPs */ 105#define SIRFSOC_SPI_FIFO_RESET BIT(0) 106#define SIRFSOC_SPI_FIFO_START BIT(1) 107 108/* FIFO CTRL */ 109#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0) 110#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0) 111#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0) 112 113/* FIFO Status */ 114#define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF 115#define SIRFSOC_SPI_FIFO_FULL BIT(8) 116#define SIRFSOC_SPI_FIFO_EMPTY BIT(9) 117 118/* 256 bytes rx/tx FIFO */ 119#define SIRFSOC_SPI_FIFO_SIZE 256 120#define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024) 121 122#define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F) 123#define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10) 124#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20) 125#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2) 126 127/* 128 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma 129 * due to the limitation of dma controller 130 */ 131 132#define ALIGNED(x) (!((u32)x & 0x3)) 133#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \ 134 ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE)) 135 136#define SIRFSOC_MAX_CMD_BYTES 4 137 138struct sirfsoc_spi { 139 struct spi_bitbang bitbang; 140 struct completion rx_done; 141 struct completion tx_done; 142 143 void __iomem *base; 144 u32 ctrl_freq; /* SPI controller clock speed */ 145 struct clk *clk; 146 147 /* rx & tx bufs from the spi_transfer */ 148 const void *tx; 149 void *rx; 150 151 /* place received word into rx buffer */ 152 void (*rx_word) (struct sirfsoc_spi *); 153 /* get word from tx buffer for sending */ 154 void (*tx_word) (struct sirfsoc_spi *); 155 156 /* number of words left to be tranmitted/received */ 157 unsigned int left_tx_word; 158 unsigned int left_rx_word; 159 160 /* rx & tx DMA channels */ 161 struct dma_chan *rx_chan; 162 struct dma_chan *tx_chan; 163 dma_addr_t src_start; 164 dma_addr_t dst_start; 165 void *dummypage; 166 int word_width; /* in bytes */ 167 168 /* 169 * if tx size is not more than 4 and rx size is NULL, use 170 * command model 171 */ 172 bool tx_by_cmd; 173 174 int chipselect[0]; 175}; 176 177static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi) 178{ 179 u32 data; 180 u8 *rx = sspi->rx; 181 182 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); 183 184 if (rx) { 185 *rx++ = (u8) data; 186 sspi->rx = rx; 187 } 188 189 sspi->left_rx_word--; 190} 191 192static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi) 193{ 194 u32 data = 0; 195 const u8 *tx = sspi->tx; 196 197 if (tx) { 198 data = *tx++; 199 sspi->tx = tx; 200 } 201 202 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); 203 sspi->left_tx_word--; 204} 205 206static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi) 207{ 208 u32 data; 209 u16 *rx = sspi->rx; 210 211 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); 212 213 if (rx) { 214 *rx++ = (u16) data; 215 sspi->rx = rx; 216 } 217 218 sspi->left_rx_word--; 219} 220 221static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi) 222{ 223 u32 data = 0; 224 const u16 *tx = sspi->tx; 225 226 if (tx) { 227 data = *tx++; 228 sspi->tx = tx; 229 } 230 231 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); 232 sspi->left_tx_word--; 233} 234 235static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi) 236{ 237 u32 data; 238 u32 *rx = sspi->rx; 239 240 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); 241 242 if (rx) { 243 *rx++ = (u32) data; 244 sspi->rx = rx; 245 } 246 247 sspi->left_rx_word--; 248 249} 250 251static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi) 252{ 253 u32 data = 0; 254 const u32 *tx = sspi->tx; 255 256 if (tx) { 257 data = *tx++; 258 sspi->tx = tx; 259 } 260 261 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); 262 sspi->left_tx_word--; 263} 264 265static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) 266{ 267 struct sirfsoc_spi *sspi = dev_id; 268 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS); 269 if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) { 270 complete(&sspi->tx_done); 271 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 272 writel(SIRFSOC_SPI_INT_MASK_ALL, 273 sspi->base + SIRFSOC_SPI_INT_STATUS); 274 return IRQ_HANDLED; 275 } 276 277 /* Error Conditions */ 278 if (spi_stat & SIRFSOC_SPI_RX_OFLOW || 279 spi_stat & SIRFSOC_SPI_TX_UFLOW) { 280 complete(&sspi->tx_done); 281 complete(&sspi->rx_done); 282 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 283 writel(SIRFSOC_SPI_INT_MASK_ALL, 284 sspi->base + SIRFSOC_SPI_INT_STATUS); 285 return IRQ_HANDLED; 286 } 287 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY) 288 complete(&sspi->tx_done); 289 while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) & 290 SIRFSOC_SPI_RX_IO_DMA)) 291 cpu_relax(); 292 complete(&sspi->rx_done); 293 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 294 writel(SIRFSOC_SPI_INT_MASK_ALL, 295 sspi->base + SIRFSOC_SPI_INT_STATUS); 296 297 return IRQ_HANDLED; 298} 299 300static void spi_sirfsoc_dma_fini_callback(void *data) 301{ 302 struct completion *dma_complete = data; 303 304 complete(dma_complete); 305} 306 307static int spi_sirfsoc_cmd_transfer(struct spi_device *spi, 308 struct spi_transfer *t) 309{ 310 struct sirfsoc_spi *sspi; 311 int timeout = t->len * 10; 312 u32 cmd; 313 314 sspi = spi_master_get_devdata(spi->master); 315 memcpy(&cmd, sspi->tx, t->len); 316 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) 317 cmd = cpu_to_be32(cmd) >> 318 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8); 319 if (sspi->word_width == 2 && t->len == 4 && 320 (!(spi->mode & SPI_LSB_FIRST))) 321 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); 322 writel(cmd, sspi->base + SIRFSOC_SPI_CMD); 323 writel(SIRFSOC_SPI_FRM_END_INT_EN, 324 sspi->base + SIRFSOC_SPI_INT_EN); 325 writel(SIRFSOC_SPI_CMD_TX_EN, 326 sspi->base + SIRFSOC_SPI_TX_RX_EN); 327 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 328 dev_err(&spi->dev, "cmd transfer timeout\n"); 329 return 0; 330 } 331 332 return t->len; 333} 334 335static void spi_sirfsoc_dma_transfer(struct spi_device *spi, 336 struct spi_transfer *t) 337{ 338 struct sirfsoc_spi *sspi; 339 struct dma_async_tx_descriptor *rx_desc, *tx_desc; 340 int timeout = t->len * 10; 341 342 sspi = spi_master_get_devdata(spi->master); 343 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 344 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 345 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 346 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 347 writel(0, sspi->base + SIRFSOC_SPI_INT_EN); 348 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); 349 if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) { 350 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 351 SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE, 352 sspi->base + SIRFSOC_SPI_CTRL); 353 writel(sspi->left_tx_word - 1, 354 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 355 writel(sspi->left_tx_word - 1, 356 sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 357 } else { 358 writel(readl(sspi->base + SIRFSOC_SPI_CTRL), 359 sspi->base + SIRFSOC_SPI_CTRL); 360 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 361 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 362 } 363 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, 364 (t->tx_buf != t->rx_buf) ? 365 DMA_FROM_DEVICE : DMA_BIDIRECTIONAL); 366 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, 367 sspi->dst_start, t->len, DMA_DEV_TO_MEM, 368 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 369 rx_desc->callback = spi_sirfsoc_dma_fini_callback; 370 rx_desc->callback_param = &sspi->rx_done; 371 372 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, 373 (t->tx_buf != t->rx_buf) ? 374 DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 375 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, 376 sspi->src_start, t->len, DMA_MEM_TO_DEV, 377 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 378 tx_desc->callback = spi_sirfsoc_dma_fini_callback; 379 tx_desc->callback_param = &sspi->tx_done; 380 381 dmaengine_submit(tx_desc); 382 dmaengine_submit(rx_desc); 383 dma_async_issue_pending(sspi->tx_chan); 384 dma_async_issue_pending(sspi->rx_chan); 385 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, 386 sspi->base + SIRFSOC_SPI_TX_RX_EN); 387 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { 388 dev_err(&spi->dev, "transfer timeout\n"); 389 dmaengine_terminate_all(sspi->rx_chan); 390 } else 391 sspi->left_rx_word = 0; 392 /* 393 * we only wait tx-done event if transferring by DMA. for PIO, 394 * we get rx data by writing tx data, so if rx is done, tx has 395 * done earlier 396 */ 397 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 398 dev_err(&spi->dev, "transfer timeout\n"); 399 dmaengine_terminate_all(sspi->tx_chan); 400 } 401 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); 402 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); 403 /* TX, RX FIFO stop */ 404 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 405 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 406 if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX) 407 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); 408} 409 410static void spi_sirfsoc_pio_transfer(struct spi_device *spi, 411 struct spi_transfer *t) 412{ 413 struct sirfsoc_spi *sspi; 414 int timeout = t->len * 10; 415 416 sspi = spi_master_get_devdata(spi->master); 417 do { 418 writel(SIRFSOC_SPI_FIFO_RESET, 419 sspi->base + SIRFSOC_SPI_RXFIFO_OP); 420 writel(SIRFSOC_SPI_FIFO_RESET, 421 sspi->base + SIRFSOC_SPI_TXFIFO_OP); 422 writel(SIRFSOC_SPI_FIFO_START, 423 sspi->base + SIRFSOC_SPI_RXFIFO_OP); 424 writel(SIRFSOC_SPI_FIFO_START, 425 sspi->base + SIRFSOC_SPI_TXFIFO_OP); 426 writel(0, sspi->base + SIRFSOC_SPI_INT_EN); 427 writel(SIRFSOC_SPI_INT_MASK_ALL, 428 sspi->base + SIRFSOC_SPI_INT_STATUS); 429 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 430 SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR, 431 sspi->base + SIRFSOC_SPI_CTRL); 432 writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width)) 433 - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 434 writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width)) 435 - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 436 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) 437 & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word) 438 sspi->tx_word(sspi); 439 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | 440 SIRFSOC_SPI_TX_UFLOW_INT_EN | 441 SIRFSOC_SPI_RX_OFLOW_INT_EN, 442 sspi->base + SIRFSOC_SPI_INT_EN); 443 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, 444 sspi->base + SIRFSOC_SPI_TX_RX_EN); 445 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) || 446 !wait_for_completion_timeout(&sspi->rx_done, timeout)) { 447 dev_err(&spi->dev, "transfer timeout\n"); 448 break; 449 } 450 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) 451 & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word) 452 sspi->rx_word(sspi); 453 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 454 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 455 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0); 456} 457 458static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) 459{ 460 struct sirfsoc_spi *sspi; 461 sspi = spi_master_get_devdata(spi->master); 462 463 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; 464 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; 465 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; 466 reinit_completion(&sspi->rx_done); 467 reinit_completion(&sspi->tx_done); 468 /* 469 * in the transfer, if transfer data using command register with rx_buf 470 * null, just fill command data into command register and wait for its 471 * completion. 472 */ 473 if (sspi->tx_by_cmd) 474 spi_sirfsoc_cmd_transfer(spi, t); 475 else if (IS_DMA_VALID(t)) 476 spi_sirfsoc_dma_transfer(spi, t); 477 else 478 spi_sirfsoc_pio_transfer(spi, t); 479 480 return t->len - sspi->left_rx_word * sspi->word_width; 481} 482 483static void spi_sirfsoc_chipselect(struct spi_device *spi, int value) 484{ 485 struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master); 486 487 if (sspi->chipselect[spi->chip_select] == 0) { 488 u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); 489 switch (value) { 490 case BITBANG_CS_ACTIVE: 491 if (spi->mode & SPI_CS_HIGH) 492 regval |= SIRFSOC_SPI_CS_IO_OUT; 493 else 494 regval &= ~SIRFSOC_SPI_CS_IO_OUT; 495 break; 496 case BITBANG_CS_INACTIVE: 497 if (spi->mode & SPI_CS_HIGH) 498 regval &= ~SIRFSOC_SPI_CS_IO_OUT; 499 else 500 regval |= SIRFSOC_SPI_CS_IO_OUT; 501 break; 502 } 503 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 504 } else { 505 int gpio = sspi->chipselect[spi->chip_select]; 506 switch (value) { 507 case BITBANG_CS_ACTIVE: 508 gpio_direction_output(gpio, 509 spi->mode & SPI_CS_HIGH ? 1 : 0); 510 break; 511 case BITBANG_CS_INACTIVE: 512 gpio_direction_output(gpio, 513 spi->mode & SPI_CS_HIGH ? 0 : 1); 514 break; 515 } 516 } 517} 518 519static int 520spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 521{ 522 struct sirfsoc_spi *sspi; 523 u8 bits_per_word = 0; 524 int hz = 0; 525 u32 regval; 526 u32 txfifo_ctrl, rxfifo_ctrl; 527 u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4; 528 529 sspi = spi_master_get_devdata(spi->master); 530 531 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 532 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; 533 534 regval = (sspi->ctrl_freq / (2 * hz)) - 1; 535 if (regval > 0xFFFF || regval < 0) { 536 dev_err(&spi->dev, "Speed %d not supported\n", hz); 537 return -EINVAL; 538 } 539 540 switch (bits_per_word) { 541 case 8: 542 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8; 543 sspi->rx_word = spi_sirfsoc_rx_word_u8; 544 sspi->tx_word = spi_sirfsoc_tx_word_u8; 545 break; 546 case 12: 547 case 16: 548 regval |= (bits_per_word == 12) ? 549 SIRFSOC_SPI_TRAN_DAT_FORMAT_12 : 550 SIRFSOC_SPI_TRAN_DAT_FORMAT_16; 551 sspi->rx_word = spi_sirfsoc_rx_word_u16; 552 sspi->tx_word = spi_sirfsoc_tx_word_u16; 553 break; 554 case 32: 555 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32; 556 sspi->rx_word = spi_sirfsoc_rx_word_u32; 557 sspi->tx_word = spi_sirfsoc_tx_word_u32; 558 break; 559 default: 560 BUG(); 561 } 562 563 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); 564 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 565 sspi->word_width; 566 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 567 sspi->word_width; 568 569 if (!(spi->mode & SPI_CS_HIGH)) 570 regval |= SIRFSOC_SPI_CS_IDLE_STAT; 571 if (!(spi->mode & SPI_LSB_FIRST)) 572 regval |= SIRFSOC_SPI_TRAN_MSB; 573 if (spi->mode & SPI_CPOL) 574 regval |= SIRFSOC_SPI_CLK_IDLE_STAT; 575 576 /* 577 * Data should be driven at least 1/2 cycle before the fetch edge 578 * to make sure that data gets stable at the fetch edge. 579 */ 580 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || 581 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) 582 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE; 583 else 584 regval |= SIRFSOC_SPI_DRV_POS_EDGE; 585 586 writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) | 587 SIRFSOC_SPI_FIFO_LC(fifo_size / 2) | 588 SIRFSOC_SPI_FIFO_HC(2), 589 sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK); 590 writel(SIRFSOC_SPI_FIFO_SC(2) | 591 SIRFSOC_SPI_FIFO_LC(fifo_size / 2) | 592 SIRFSOC_SPI_FIFO_HC(fifo_size - 2), 593 sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK); 594 writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL); 595 writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL); 596 597 if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) { 598 regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) | 599 SIRFSOC_SPI_CMD_MODE); 600 sspi->tx_by_cmd = true; 601 } else { 602 regval &= ~SIRFSOC_SPI_CMD_MODE; 603 sspi->tx_by_cmd = false; 604 } 605 /* 606 * set spi controller in RISC chipselect mode, we are controlling CS by 607 * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE. 608 */ 609 regval |= SIRFSOC_SPI_CS_IO_MODE; 610 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 611 612 if (IS_DMA_VALID(t)) { 613 /* Enable DMA mode for RX, TX */ 614 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 615 writel(SIRFSOC_SPI_RX_DMA_FLUSH, 616 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 617 } else { 618 /* Enable IO mode for RX, TX */ 619 writel(SIRFSOC_SPI_IO_MODE_SEL, 620 sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 621 writel(SIRFSOC_SPI_IO_MODE_SEL, 622 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 623 } 624 625 return 0; 626} 627 628static int spi_sirfsoc_setup(struct spi_device *spi) 629{ 630 if (!spi->max_speed_hz) 631 return -EINVAL; 632 633 return spi_sirfsoc_setup_transfer(spi, NULL); 634} 635 636static int spi_sirfsoc_probe(struct platform_device *pdev) 637{ 638 struct sirfsoc_spi *sspi; 639 struct spi_master *master; 640 struct resource *mem_res; 641 int num_cs, cs_gpio, irq; 642 int i; 643 int ret; 644 645 ret = of_property_read_u32(pdev->dev.of_node, 646 "sirf,spi-num-chipselects", &num_cs); 647 if (ret < 0) { 648 dev_err(&pdev->dev, "Unable to get chip select number\n"); 649 goto err_cs; 650 } 651 652 master = spi_alloc_master(&pdev->dev, 653 sizeof(*sspi) + sizeof(int) * num_cs); 654 if (!master) { 655 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 656 return -ENOMEM; 657 } 658 platform_set_drvdata(pdev, master); 659 sspi = spi_master_get_devdata(master); 660 661 master->num_chipselect = num_cs; 662 663 for (i = 0; i < master->num_chipselect; i++) { 664 cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i); 665 if (cs_gpio < 0) { 666 dev_err(&pdev->dev, "can't get cs gpio from DT\n"); 667 ret = -ENODEV; 668 goto free_master; 669 } 670 671 sspi->chipselect[i] = cs_gpio; 672 if (cs_gpio == 0) 673 continue; /* use cs from spi controller */ 674 675 ret = gpio_request(cs_gpio, DRIVER_NAME); 676 if (ret) { 677 while (i > 0) { 678 i--; 679 if (sspi->chipselect[i] > 0) 680 gpio_free(sspi->chipselect[i]); 681 } 682 dev_err(&pdev->dev, "fail to request cs gpios\n"); 683 goto free_master; 684 } 685 } 686 687 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 688 sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); 689 if (IS_ERR(sspi->base)) { 690 ret = PTR_ERR(sspi->base); 691 goto free_master; 692 } 693 694 irq = platform_get_irq(pdev, 0); 695 if (irq < 0) { 696 ret = -ENXIO; 697 goto free_master; 698 } 699 ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0, 700 DRIVER_NAME, sspi); 701 if (ret) 702 goto free_master; 703 704 sspi->bitbang.master = master; 705 sspi->bitbang.chipselect = spi_sirfsoc_chipselect; 706 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer; 707 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer; 708 sspi->bitbang.master->setup = spi_sirfsoc_setup; 709 master->bus_num = pdev->id; 710 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH; 711 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | 712 SPI_BPW_MASK(16) | SPI_BPW_MASK(32); 713 sspi->bitbang.master->dev.of_node = pdev->dev.of_node; 714 715 /* request DMA channels */ 716 sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx"); 717 if (!sspi->rx_chan) { 718 dev_err(&pdev->dev, "can not allocate rx dma channel\n"); 719 ret = -ENODEV; 720 goto free_master; 721 } 722 sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx"); 723 if (!sspi->tx_chan) { 724 dev_err(&pdev->dev, "can not allocate tx dma channel\n"); 725 ret = -ENODEV; 726 goto free_rx_dma; 727 } 728 729 sspi->clk = clk_get(&pdev->dev, NULL); 730 if (IS_ERR(sspi->clk)) { 731 ret = PTR_ERR(sspi->clk); 732 goto free_tx_dma; 733 } 734 clk_prepare_enable(sspi->clk); 735 sspi->ctrl_freq = clk_get_rate(sspi->clk); 736 737 init_completion(&sspi->rx_done); 738 init_completion(&sspi->tx_done); 739 740 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 741 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 742 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 743 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 744 /* We are not using dummy delay between command and data */ 745 writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL); 746 747 sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL); 748 if (!sspi->dummypage) { 749 ret = -ENOMEM; 750 goto free_clk; 751 } 752 753 ret = spi_bitbang_start(&sspi->bitbang); 754 if (ret) 755 goto free_dummypage; 756 757 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); 758 759 return 0; 760free_dummypage: 761 kfree(sspi->dummypage); 762free_clk: 763 clk_disable_unprepare(sspi->clk); 764 clk_put(sspi->clk); 765free_tx_dma: 766 dma_release_channel(sspi->tx_chan); 767free_rx_dma: 768 dma_release_channel(sspi->rx_chan); 769free_master: 770 spi_master_put(master); 771err_cs: 772 return ret; 773} 774 775static int spi_sirfsoc_remove(struct platform_device *pdev) 776{ 777 struct spi_master *master; 778 struct sirfsoc_spi *sspi; 779 int i; 780 781 master = platform_get_drvdata(pdev); 782 sspi = spi_master_get_devdata(master); 783 784 spi_bitbang_stop(&sspi->bitbang); 785 for (i = 0; i < master->num_chipselect; i++) { 786 if (sspi->chipselect[i] > 0) 787 gpio_free(sspi->chipselect[i]); 788 } 789 kfree(sspi->dummypage); 790 clk_disable_unprepare(sspi->clk); 791 clk_put(sspi->clk); 792 dma_release_channel(sspi->rx_chan); 793 dma_release_channel(sspi->tx_chan); 794 spi_master_put(master); 795 return 0; 796} 797 798#ifdef CONFIG_PM_SLEEP 799static int spi_sirfsoc_suspend(struct device *dev) 800{ 801 struct spi_master *master = dev_get_drvdata(dev); 802 struct sirfsoc_spi *sspi = spi_master_get_devdata(master); 803 int ret; 804 805 ret = spi_master_suspend(master); 806 if (ret) 807 return ret; 808 809 clk_disable(sspi->clk); 810 return 0; 811} 812 813static int spi_sirfsoc_resume(struct device *dev) 814{ 815 struct spi_master *master = dev_get_drvdata(dev); 816 struct sirfsoc_spi *sspi = spi_master_get_devdata(master); 817 818 clk_enable(sspi->clk); 819 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 820 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 821 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 822 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 823 824 return spi_master_resume(master); 825} 826#endif 827 828static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend, 829 spi_sirfsoc_resume); 830 831static const struct of_device_id spi_sirfsoc_of_match[] = { 832 { .compatible = "sirf,prima2-spi", }, 833 { .compatible = "sirf,marco-spi", }, 834 {} 835}; 836MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); 837 838static struct platform_driver spi_sirfsoc_driver = { 839 .driver = { 840 .name = DRIVER_NAME, 841 .owner = THIS_MODULE, 842 .pm = &spi_sirfsoc_pm_ops, 843 .of_match_table = spi_sirfsoc_of_match, 844 }, 845 .probe = spi_sirfsoc_probe, 846 .remove = spi_sirfsoc_remove, 847}; 848module_platform_driver(spi_sirfsoc_driver); 849MODULE_DESCRIPTION("SiRF SoC SPI master driver"); 850MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>"); 851MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>"); 852MODULE_LICENSE("GPL v2");