Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 989a7241df87526bfef0396567e71ebe53a84ae4 865 lines 22 kB view raw
1/* 2 * Driver for Atmel AT32 and AT91 SPI Controllers 3 * 4 * Copyright (C) 2006 Atmel Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <linux/kernel.h> 12#include <linux/init.h> 13#include <linux/clk.h> 14#include <linux/module.h> 15#include <linux/platform_device.h> 16#include <linux/delay.h> 17#include <linux/dma-mapping.h> 18#include <linux/err.h> 19#include <linux/interrupt.h> 20#include <linux/spi/spi.h> 21 22#include <asm/io.h> 23#include <asm/arch/board.h> 24#include <asm/arch/gpio.h> 25#include <asm/arch/cpu.h> 26 27#include "atmel_spi.h" 28 29/* 30 * The core SPI transfer engine just talks to a register bank to set up 31 * DMA transfers; transfer queue progress is driven by IRQs. The clock 32 * framework provides the base clock, subdivided for each spi_device. 33 * 34 * Newer controllers, marked with "new_1" flag, have: 35 * - CR.LASTXFER 36 * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero) 37 * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) 38 * - SPI_CSRx.CSAAT 39 * - SPI_CSRx.SBCR allows faster clocking 40 */ 41struct atmel_spi { 42 spinlock_t lock; 43 44 void __iomem *regs; 45 int irq; 46 struct clk *clk; 47 struct platform_device *pdev; 48 unsigned new_1:1; 49 struct spi_device *stay; 50 51 u8 stopping; 52 struct list_head queue; 53 struct spi_transfer *current_transfer; 54 unsigned long current_remaining_bytes; 55 struct spi_transfer *next_transfer; 56 unsigned long next_remaining_bytes; 57 58 void *buffer; 59 dma_addr_t buffer_dma; 60}; 61 62#define BUFFER_SIZE PAGE_SIZE 63#define INVALID_DMA_ADDRESS 0xffffffff 64 65/* 66 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 67 * they assume that spi slave device state will not change on deselect, so 68 * that automagic deselection is OK. ("NPCSx rises if no data is to be 69 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer 70 * controllers have CSAAT and friends. 71 * 72 * Since the CSAAT functionality is a bit weird on newer controllers as 73 * well, we use GPIO to control nCSx pins on all controllers, updating 74 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us 75 * support active-high chipselects despite the controller's belief that 76 * only active-low devices/systems exists. 77 * 78 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work 79 * right when driven with GPIO. ("Mode Fault does not allow more than one 80 * Master on Chip Select 0.") No workaround exists for that ... so for 81 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, 82 * and (c) will trigger that first erratum in some cases. 83 */ 84 85static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 86{ 87 unsigned gpio = (unsigned) spi->controller_data; 88 unsigned active = spi->mode & SPI_CS_HIGH; 89 u32 mr; 90 int i; 91 u32 csr; 92 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 93 94 /* Make sure clock polarity is correct */ 95 for (i = 0; i < spi->master->num_chipselect; i++) { 96 csr = spi_readl(as, CSR0 + 4 * i); 97 if ((csr ^ cpol) & SPI_BIT(CPOL)) 98 spi_writel(as, CSR0 + 4 * i, csr ^ SPI_BIT(CPOL)); 99 } 100 101 mr = spi_readl(as, MR); 102 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 103 104 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 105 gpio, active ? " (high)" : "", 106 mr); 107 108 if (!(cpu_is_at91rm9200() && spi->chip_select == 0)) 109 gpio_set_value(gpio, active); 110 spi_writel(as, MR, mr); 111} 112 113static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 114{ 115 unsigned gpio = (unsigned) spi->controller_data; 116 unsigned active = spi->mode & SPI_CS_HIGH; 117 u32 mr; 118 119 /* only deactivate *this* device; sometimes transfers to 120 * another device may be active when this routine is called. 121 */ 122 mr = spi_readl(as, MR); 123 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) { 124 mr = SPI_BFINS(PCS, 0xf, mr); 125 spi_writel(as, MR, mr); 126 } 127 128 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 129 gpio, active ? " (low)" : "", 130 mr); 131 132 if (!(cpu_is_at91rm9200() && spi->chip_select == 0)) 133 gpio_set_value(gpio, !active); 134} 135 136static inline int atmel_spi_xfer_is_last(struct spi_message *msg, 137 struct spi_transfer *xfer) 138{ 139 return msg->transfers.prev == &xfer->transfer_list; 140} 141 142static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) 143{ 144 return xfer->delay_usecs == 0 && !xfer->cs_change; 145} 146 147static void atmel_spi_next_xfer_data(struct spi_master *master, 148 struct spi_transfer *xfer, 149 dma_addr_t *tx_dma, 150 dma_addr_t *rx_dma, 151 u32 *plen) 152{ 153 struct atmel_spi *as = spi_master_get_devdata(master); 154 u32 len = *plen; 155 156 /* use scratch buffer only when rx or tx data is unspecified */ 157 if (xfer->rx_buf) 158 *rx_dma = xfer->rx_dma + xfer->len - len; 159 else { 160 *rx_dma = as->buffer_dma; 161 if (len > BUFFER_SIZE) 162 len = BUFFER_SIZE; 163 } 164 if (xfer->tx_buf) 165 *tx_dma = xfer->tx_dma + xfer->len - len; 166 else { 167 *tx_dma = as->buffer_dma; 168 if (len > BUFFER_SIZE) 169 len = BUFFER_SIZE; 170 memset(as->buffer, 0, len); 171 dma_sync_single_for_device(&as->pdev->dev, 172 as->buffer_dma, len, DMA_TO_DEVICE); 173 } 174 175 *plen = len; 176} 177 178/* 179 * Submit next transfer for DMA. 180 * lock is held, spi irq is blocked 181 */ 182static void atmel_spi_next_xfer(struct spi_master *master, 183 struct spi_message *msg) 184{ 185 struct atmel_spi *as = spi_master_get_devdata(master); 186 struct spi_transfer *xfer; 187 u32 len, remaining, total; 188 dma_addr_t tx_dma, rx_dma; 189 190 if (!as->current_transfer) 191 xfer = list_entry(msg->transfers.next, 192 struct spi_transfer, transfer_list); 193 else if (!as->next_transfer) 194 xfer = list_entry(as->current_transfer->transfer_list.next, 195 struct spi_transfer, transfer_list); 196 else 197 xfer = NULL; 198 199 if (xfer) { 200 len = xfer->len; 201 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 202 remaining = xfer->len - len; 203 204 spi_writel(as, RPR, rx_dma); 205 spi_writel(as, TPR, tx_dma); 206 207 if (msg->spi->bits_per_word > 8) 208 len >>= 1; 209 spi_writel(as, RCR, len); 210 spi_writel(as, TCR, len); 211 212 dev_dbg(&msg->spi->dev, 213 " start xfer %p: len %u tx %p/%08x rx %p/%08x\n", 214 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 215 xfer->rx_buf, xfer->rx_dma); 216 } else { 217 xfer = as->next_transfer; 218 remaining = as->next_remaining_bytes; 219 } 220 221 as->current_transfer = xfer; 222 as->current_remaining_bytes = remaining; 223 224 if (remaining > 0) 225 len = remaining; 226 else if (!atmel_spi_xfer_is_last(msg, xfer) 227 && atmel_spi_xfer_can_be_chained(xfer)) { 228 xfer = list_entry(xfer->transfer_list.next, 229 struct spi_transfer, transfer_list); 230 len = xfer->len; 231 } else 232 xfer = NULL; 233 234 as->next_transfer = xfer; 235 236 if (xfer) { 237 total = len; 238 atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); 239 as->next_remaining_bytes = total - len; 240 241 spi_writel(as, RNPR, rx_dma); 242 spi_writel(as, TNPR, tx_dma); 243 244 if (msg->spi->bits_per_word > 8) 245 len >>= 1; 246 spi_writel(as, RNCR, len); 247 spi_writel(as, TNCR, len); 248 249 dev_dbg(&msg->spi->dev, 250 " next xfer %p: len %u tx %p/%08x rx %p/%08x\n", 251 xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, 252 xfer->rx_buf, xfer->rx_dma); 253 } else { 254 spi_writel(as, RNCR, 0); 255 spi_writel(as, TNCR, 0); 256 } 257 258 /* REVISIT: We're waiting for ENDRX before we start the next 259 * transfer because we need to handle some difficult timing 260 * issues otherwise. If we wait for ENDTX in one transfer and 261 * then starts waiting for ENDRX in the next, it's difficult 262 * to tell the difference between the ENDRX interrupt we're 263 * actually waiting for and the ENDRX interrupt of the 264 * previous transfer. 265 * 266 * It should be doable, though. Just not now... 267 */ 268 spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); 269 spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); 270} 271 272static void atmel_spi_next_message(struct spi_master *master) 273{ 274 struct atmel_spi *as = spi_master_get_devdata(master); 275 struct spi_message *msg; 276 struct spi_device *spi; 277 278 BUG_ON(as->current_transfer); 279 280 msg = list_entry(as->queue.next, struct spi_message, queue); 281 spi = msg->spi; 282 283 dev_dbg(master->dev.parent, "start message %p for %s\n", 284 msg, spi->dev.bus_id); 285 286 /* select chip if it's not still active */ 287 if (as->stay) { 288 if (as->stay != spi) { 289 cs_deactivate(as, as->stay); 290 cs_activate(as, spi); 291 } 292 as->stay = NULL; 293 } else 294 cs_activate(as, spi); 295 296 atmel_spi_next_xfer(master, msg); 297} 298 299/* 300 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: 301 * - The buffer is either valid for CPU access, else NULL 302 * - If the buffer is valid, so is its DMA addresss 303 * 304 * This driver manages the dma addresss unless message->is_dma_mapped. 305 */ 306static int 307atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 308{ 309 struct device *dev = &as->pdev->dev; 310 311 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 312 if (xfer->tx_buf) { 313 xfer->tx_dma = dma_map_single(dev, 314 (void *) xfer->tx_buf, xfer->len, 315 DMA_TO_DEVICE); 316 if (dma_mapping_error(xfer->tx_dma)) 317 return -ENOMEM; 318 } 319 if (xfer->rx_buf) { 320 xfer->rx_dma = dma_map_single(dev, 321 xfer->rx_buf, xfer->len, 322 DMA_FROM_DEVICE); 323 if (dma_mapping_error(xfer->rx_dma)) { 324 if (xfer->tx_buf) 325 dma_unmap_single(dev, 326 xfer->tx_dma, xfer->len, 327 DMA_TO_DEVICE); 328 return -ENOMEM; 329 } 330 } 331 return 0; 332} 333 334static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 335 struct spi_transfer *xfer) 336{ 337 if (xfer->tx_dma != INVALID_DMA_ADDRESS) 338 dma_unmap_single(master->dev.parent, xfer->tx_dma, 339 xfer->len, DMA_TO_DEVICE); 340 if (xfer->rx_dma != INVALID_DMA_ADDRESS) 341 dma_unmap_single(master->dev.parent, xfer->rx_dma, 342 xfer->len, DMA_FROM_DEVICE); 343} 344 345static void 346atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, 347 struct spi_message *msg, int status, int stay) 348{ 349 if (!stay || status < 0) 350 cs_deactivate(as, msg->spi); 351 else 352 as->stay = msg->spi; 353 354 list_del(&msg->queue); 355 msg->status = status; 356 357 dev_dbg(master->dev.parent, 358 "xfer complete: %u bytes transferred\n", 359 msg->actual_length); 360 361 spin_unlock(&as->lock); 362 msg->complete(msg->context); 363 spin_lock(&as->lock); 364 365 as->current_transfer = NULL; 366 as->next_transfer = NULL; 367 368 /* continue if needed */ 369 if (list_empty(&as->queue) || as->stopping) 370 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 371 else 372 atmel_spi_next_message(master); 373} 374 375static irqreturn_t 376atmel_spi_interrupt(int irq, void *dev_id) 377{ 378 struct spi_master *master = dev_id; 379 struct atmel_spi *as = spi_master_get_devdata(master); 380 struct spi_message *msg; 381 struct spi_transfer *xfer; 382 u32 status, pending, imr; 383 int ret = IRQ_NONE; 384 385 spin_lock(&as->lock); 386 387 xfer = as->current_transfer; 388 msg = list_entry(as->queue.next, struct spi_message, queue); 389 390 imr = spi_readl(as, IMR); 391 status = spi_readl(as, SR); 392 pending = status & imr; 393 394 if (pending & SPI_BIT(OVRES)) { 395 int timeout; 396 397 ret = IRQ_HANDLED; 398 399 spi_writel(as, IDR, (SPI_BIT(ENDTX) | SPI_BIT(ENDRX) 400 | SPI_BIT(OVRES))); 401 402 /* 403 * When we get an overrun, we disregard the current 404 * transfer. Data will not be copied back from any 405 * bounce buffer and msg->actual_len will not be 406 * updated with the last xfer. 407 * 408 * We will also not process any remaning transfers in 409 * the message. 410 * 411 * First, stop the transfer and unmap the DMA buffers. 412 */ 413 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 414 if (!msg->is_dma_mapped) 415 atmel_spi_dma_unmap_xfer(master, xfer); 416 417 /* REVISIT: udelay in irq is unfriendly */ 418 if (xfer->delay_usecs) 419 udelay(xfer->delay_usecs); 420 421 dev_warn(master->dev.parent, "fifo overrun (%u/%u remaining)\n", 422 spi_readl(as, TCR), spi_readl(as, RCR)); 423 424 /* 425 * Clean up DMA registers and make sure the data 426 * registers are empty. 427 */ 428 spi_writel(as, RNCR, 0); 429 spi_writel(as, TNCR, 0); 430 spi_writel(as, RCR, 0); 431 spi_writel(as, TCR, 0); 432 for (timeout = 1000; timeout; timeout--) 433 if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) 434 break; 435 if (!timeout) 436 dev_warn(master->dev.parent, 437 "timeout waiting for TXEMPTY"); 438 while (spi_readl(as, SR) & SPI_BIT(RDRF)) 439 spi_readl(as, RDR); 440 441 /* Clear any overrun happening while cleaning up */ 442 spi_readl(as, SR); 443 444 atmel_spi_msg_done(master, as, msg, -EIO, 0); 445 } else if (pending & SPI_BIT(ENDRX)) { 446 ret = IRQ_HANDLED; 447 448 spi_writel(as, IDR, pending); 449 450 if (as->current_remaining_bytes == 0) { 451 msg->actual_length += xfer->len; 452 453 if (!msg->is_dma_mapped) 454 atmel_spi_dma_unmap_xfer(master, xfer); 455 456 /* REVISIT: udelay in irq is unfriendly */ 457 if (xfer->delay_usecs) 458 udelay(xfer->delay_usecs); 459 460 if (atmel_spi_xfer_is_last(msg, xfer)) { 461 /* report completed message */ 462 atmel_spi_msg_done(master, as, msg, 0, 463 xfer->cs_change); 464 } else { 465 if (xfer->cs_change) { 466 cs_deactivate(as, msg->spi); 467 udelay(1); 468 cs_activate(as, msg->spi); 469 } 470 471 /* 472 * Not done yet. Submit the next transfer. 473 * 474 * FIXME handle protocol options for xfer 475 */ 476 atmel_spi_next_xfer(master, msg); 477 } 478 } else { 479 /* 480 * Keep going, we still have data to send in 481 * the current transfer. 482 */ 483 atmel_spi_next_xfer(master, msg); 484 } 485 } 486 487 spin_unlock(&as->lock); 488 489 return ret; 490} 491 492/* the spi->mode bits understood by this driver: */ 493#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) 494 495static int atmel_spi_setup(struct spi_device *spi) 496{ 497 struct atmel_spi *as; 498 u32 scbr, csr; 499 unsigned int bits = spi->bits_per_word; 500 unsigned long bus_hz, sck_hz; 501 unsigned int npcs_pin; 502 int ret; 503 504 as = spi_master_get_devdata(spi->master); 505 506 if (as->stopping) 507 return -ESHUTDOWN; 508 509 if (spi->chip_select > spi->master->num_chipselect) { 510 dev_dbg(&spi->dev, 511 "setup: invalid chipselect %u (%u defined)\n", 512 spi->chip_select, spi->master->num_chipselect); 513 return -EINVAL; 514 } 515 516 if (bits == 0) 517 bits = 8; 518 if (bits < 8 || bits > 16) { 519 dev_dbg(&spi->dev, 520 "setup: invalid bits_per_word %u (8 to 16)\n", 521 bits); 522 return -EINVAL; 523 } 524 525 if (spi->mode & ~MODEBITS) { 526 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", 527 spi->mode & ~MODEBITS); 528 return -EINVAL; 529 } 530 531 /* see notes above re chipselect */ 532 if (cpu_is_at91rm9200() 533 && spi->chip_select == 0 534 && (spi->mode & SPI_CS_HIGH)) { 535 dev_dbg(&spi->dev, "setup: can't be active-high\n"); 536 return -EINVAL; 537 } 538 539 /* speed zero convention is used by some upper layers */ 540 bus_hz = clk_get_rate(as->clk); 541 if (spi->max_speed_hz) { 542 /* assume div32/fdiv/mbz == 0 */ 543 if (!as->new_1) 544 bus_hz /= 2; 545 scbr = ((bus_hz + spi->max_speed_hz - 1) 546 / spi->max_speed_hz); 547 if (scbr >= (1 << SPI_SCBR_SIZE)) { 548 dev_dbg(&spi->dev, 549 "setup: %d Hz too slow, scbr %u; min %ld Hz\n", 550 spi->max_speed_hz, scbr, bus_hz/255); 551 return -EINVAL; 552 } 553 } else 554 scbr = 0xff; 555 sck_hz = bus_hz / scbr; 556 557 csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); 558 if (spi->mode & SPI_CPOL) 559 csr |= SPI_BIT(CPOL); 560 if (!(spi->mode & SPI_CPHA)) 561 csr |= SPI_BIT(NCPHA); 562 563 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 564 * 565 * DLYBCT would add delays between words, slowing down transfers. 566 * It could potentially be useful to cope with DMA bottlenecks, but 567 * in those cases it's probably best to just use a lower bitrate. 568 */ 569 csr |= SPI_BF(DLYBS, 0); 570 csr |= SPI_BF(DLYBCT, 0); 571 572 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 573 npcs_pin = (unsigned int)spi->controller_data; 574 if (!spi->controller_state) { 575 ret = gpio_request(npcs_pin, spi->dev.bus_id); 576 if (ret) 577 return ret; 578 spi->controller_state = (void *)npcs_pin; 579 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 580 } else { 581 unsigned long flags; 582 583 spin_lock_irqsave(&as->lock, flags); 584 if (as->stay == spi) 585 as->stay = NULL; 586 cs_deactivate(as, spi); 587 spin_unlock_irqrestore(&as->lock, flags); 588 } 589 590 dev_dbg(&spi->dev, 591 "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", 592 sck_hz, bits, spi->mode, spi->chip_select, csr); 593 594 spi_writel(as, CSR0 + 4 * spi->chip_select, csr); 595 596 return 0; 597} 598 599static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) 600{ 601 struct atmel_spi *as; 602 struct spi_transfer *xfer; 603 unsigned long flags; 604 struct device *controller = spi->master->dev.parent; 605 606 as = spi_master_get_devdata(spi->master); 607 608 dev_dbg(controller, "new message %p submitted for %s\n", 609 msg, spi->dev.bus_id); 610 611 if (unlikely(list_empty(&msg->transfers) 612 || !spi->max_speed_hz)) 613 return -EINVAL; 614 615 if (as->stopping) 616 return -ESHUTDOWN; 617 618 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 619 if (!(xfer->tx_buf || xfer->rx_buf)) { 620 dev_dbg(&spi->dev, "missing rx or tx buf\n"); 621 return -EINVAL; 622 } 623 624 /* FIXME implement these protocol options!! */ 625 if (xfer->bits_per_word || xfer->speed_hz) { 626 dev_dbg(&spi->dev, "no protocol options yet\n"); 627 return -ENOPROTOOPT; 628 } 629 630 /* 631 * DMA map early, for performance (empties dcache ASAP) and 632 * better fault reporting. This is a DMA-only driver. 633 * 634 * NOTE that if dma_unmap_single() ever starts to do work on 635 * platforms supported by this driver, we would need to clean 636 * up mappings for previously-mapped transfers. 637 */ 638 if (!msg->is_dma_mapped) { 639 if (atmel_spi_dma_map_xfer(as, xfer) < 0) 640 return -ENOMEM; 641 } 642 } 643 644#ifdef VERBOSE 645 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 646 dev_dbg(controller, 647 " xfer %p: len %u tx %p/%08x rx %p/%08x\n", 648 xfer, xfer->len, 649 xfer->tx_buf, xfer->tx_dma, 650 xfer->rx_buf, xfer->rx_dma); 651 } 652#endif 653 654 msg->status = -EINPROGRESS; 655 msg->actual_length = 0; 656 657 spin_lock_irqsave(&as->lock, flags); 658 list_add_tail(&msg->queue, &as->queue); 659 if (!as->current_transfer) 660 atmel_spi_next_message(spi->master); 661 spin_unlock_irqrestore(&as->lock, flags); 662 663 return 0; 664} 665 666static void atmel_spi_cleanup(struct spi_device *spi) 667{ 668 struct atmel_spi *as = spi_master_get_devdata(spi->master); 669 unsigned gpio = (unsigned) spi->controller_data; 670 unsigned long flags; 671 672 if (!spi->controller_state) 673 return; 674 675 spin_lock_irqsave(&as->lock, flags); 676 if (as->stay == spi) { 677 as->stay = NULL; 678 cs_deactivate(as, spi); 679 } 680 spin_unlock_irqrestore(&as->lock, flags); 681 682 gpio_free(gpio); 683} 684 685/*-------------------------------------------------------------------------*/ 686 687static int __init atmel_spi_probe(struct platform_device *pdev) 688{ 689 struct resource *regs; 690 int irq; 691 struct clk *clk; 692 int ret; 693 struct spi_master *master; 694 struct atmel_spi *as; 695 696 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 697 if (!regs) 698 return -ENXIO; 699 700 irq = platform_get_irq(pdev, 0); 701 if (irq < 0) 702 return irq; 703 704 clk = clk_get(&pdev->dev, "spi_clk"); 705 if (IS_ERR(clk)) 706 return PTR_ERR(clk); 707 708 /* setup spi core then atmel-specific driver state */ 709 ret = -ENOMEM; 710 master = spi_alloc_master(&pdev->dev, sizeof *as); 711 if (!master) 712 goto out_free; 713 714 master->bus_num = pdev->id; 715 master->num_chipselect = 4; 716 master->setup = atmel_spi_setup; 717 master->transfer = atmel_spi_transfer; 718 master->cleanup = atmel_spi_cleanup; 719 platform_set_drvdata(pdev, master); 720 721 as = spi_master_get_devdata(master); 722 723 /* 724 * Scratch buffer is used for throwaway rx and tx data. 725 * It's coherent to minimize dcache pollution. 726 */ 727 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 728 &as->buffer_dma, GFP_KERNEL); 729 if (!as->buffer) 730 goto out_free; 731 732 spin_lock_init(&as->lock); 733 INIT_LIST_HEAD(&as->queue); 734 as->pdev = pdev; 735 as->regs = ioremap(regs->start, (regs->end - regs->start) + 1); 736 if (!as->regs) 737 goto out_free_buffer; 738 as->irq = irq; 739 as->clk = clk; 740 if (!cpu_is_at91rm9200()) 741 as->new_1 = 1; 742 743 ret = request_irq(irq, atmel_spi_interrupt, 0, 744 pdev->dev.bus_id, master); 745 if (ret) 746 goto out_unmap_regs; 747 748 /* Initialize the hardware */ 749 clk_enable(clk); 750 spi_writel(as, CR, SPI_BIT(SWRST)); 751 spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); 752 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 753 spi_writel(as, CR, SPI_BIT(SPIEN)); 754 755 /* go! */ 756 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 757 (unsigned long)regs->start, irq); 758 759 ret = spi_register_master(master); 760 if (ret) 761 goto out_reset_hw; 762 763 return 0; 764 765out_reset_hw: 766 spi_writel(as, CR, SPI_BIT(SWRST)); 767 clk_disable(clk); 768 free_irq(irq, master); 769out_unmap_regs: 770 iounmap(as->regs); 771out_free_buffer: 772 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 773 as->buffer_dma); 774out_free: 775 clk_put(clk); 776 spi_master_put(master); 777 return ret; 778} 779 780static int __exit atmel_spi_remove(struct platform_device *pdev) 781{ 782 struct spi_master *master = platform_get_drvdata(pdev); 783 struct atmel_spi *as = spi_master_get_devdata(master); 784 struct spi_message *msg; 785 786 /* reset the hardware and block queue progress */ 787 spin_lock_irq(&as->lock); 788 as->stopping = 1; 789 spi_writel(as, CR, SPI_BIT(SWRST)); 790 spi_readl(as, SR); 791 spin_unlock_irq(&as->lock); 792 793 /* Terminate remaining queued transfers */ 794 list_for_each_entry(msg, &as->queue, queue) { 795 /* REVISIT unmapping the dma is a NOP on ARM and AVR32 796 * but we shouldn't depend on that... 797 */ 798 msg->status = -ESHUTDOWN; 799 msg->complete(msg->context); 800 } 801 802 dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, 803 as->buffer_dma); 804 805 clk_disable(as->clk); 806 clk_put(as->clk); 807 free_irq(as->irq, master); 808 iounmap(as->regs); 809 810 spi_unregister_master(master); 811 812 return 0; 813} 814 815#ifdef CONFIG_PM 816 817static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg) 818{ 819 struct spi_master *master = platform_get_drvdata(pdev); 820 struct atmel_spi *as = spi_master_get_devdata(master); 821 822 clk_disable(as->clk); 823 return 0; 824} 825 826static int atmel_spi_resume(struct platform_device *pdev) 827{ 828 struct spi_master *master = platform_get_drvdata(pdev); 829 struct atmel_spi *as = spi_master_get_devdata(master); 830 831 clk_enable(as->clk); 832 return 0; 833} 834 835#else 836#define atmel_spi_suspend NULL 837#define atmel_spi_resume NULL 838#endif 839 840 841static struct platform_driver atmel_spi_driver = { 842 .driver = { 843 .name = "atmel_spi", 844 .owner = THIS_MODULE, 845 }, 846 .suspend = atmel_spi_suspend, 847 .resume = atmel_spi_resume, 848 .remove = __exit_p(atmel_spi_remove), 849}; 850 851static int __init atmel_spi_init(void) 852{ 853 return platform_driver_probe(&atmel_spi_driver, atmel_spi_probe); 854} 855module_init(atmel_spi_init); 856 857static void __exit atmel_spi_exit(void) 858{ 859 platform_driver_unregister(&atmel_spi_driver); 860} 861module_exit(atmel_spi_exit); 862 863MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); 864MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); 865MODULE_LICENSE("GPL");