Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.38-rc7 1490 lines 41 kB view raw
1/* 2 * Blackfin On-Chip SPI Driver 3 * 4 * Copyright 2004-2010 Analog Devices Inc. 5 * 6 * Enter bugs at http://blackfin.uclinux.org/ 7 * 8 * Licensed under the GPL-2 or later. 9 */ 10 11#include <linux/init.h> 12#include <linux/module.h> 13#include <linux/delay.h> 14#include <linux/device.h> 15#include <linux/slab.h> 16#include <linux/io.h> 17#include <linux/ioport.h> 18#include <linux/irq.h> 19#include <linux/errno.h> 20#include <linux/interrupt.h> 21#include <linux/platform_device.h> 22#include <linux/dma-mapping.h> 23#include <linux/spi/spi.h> 24#include <linux/workqueue.h> 25 26#include <asm/dma.h> 27#include <asm/portmux.h> 28#include <asm/bfin5xx_spi.h> 29#include <asm/cacheflush.h> 30 31#define DRV_NAME "bfin-spi" 32#define DRV_AUTHOR "Bryan Wu, Luke Yang" 33#define DRV_DESC "Blackfin on-chip SPI Controller Driver" 34#define DRV_VERSION "1.0" 35 36MODULE_AUTHOR(DRV_AUTHOR); 37MODULE_DESCRIPTION(DRV_DESC); 38MODULE_LICENSE("GPL"); 39 40#define START_STATE ((void *)0) 41#define RUNNING_STATE ((void *)1) 42#define DONE_STATE ((void *)2) 43#define ERROR_STATE ((void *)-1) 44 45struct bfin_spi_master_data; 46 47struct bfin_spi_transfer_ops { 48 void (*write) (struct bfin_spi_master_data *); 49 void (*read) (struct bfin_spi_master_data *); 50 void (*duplex) (struct bfin_spi_master_data *); 51}; 52 53struct bfin_spi_master_data { 54 /* Driver model hookup */ 55 struct platform_device *pdev; 56 57 /* SPI framework hookup */ 58 struct spi_master *master; 59 60 /* Regs base of SPI controller */ 61 void __iomem *regs_base; 62 63 /* Pin request list */ 64 u16 *pin_req; 65 66 /* BFIN hookup */ 67 struct bfin5xx_spi_master *master_info; 68 69 /* Driver message queue */ 70 struct workqueue_struct *workqueue; 71 struct work_struct pump_messages; 72 spinlock_t lock; 73 struct list_head queue; 74 int busy; 75 bool running; 76 77 /* Message Transfer pump */ 78 struct tasklet_struct pump_transfers; 79 80 /* Current message transfer state info */ 81 struct spi_message *cur_msg; 82 struct spi_transfer *cur_transfer; 83 struct bfin_spi_slave_data *cur_chip; 84 size_t len_in_bytes; 85 size_t len; 86 void *tx; 87 void *tx_end; 88 void *rx; 89 void *rx_end; 90 91 /* DMA stuffs */ 92 int dma_channel; 93 int dma_mapped; 94 int dma_requested; 95 dma_addr_t rx_dma; 96 dma_addr_t tx_dma; 97 98 int irq_requested; 99 int spi_irq; 100 101 size_t rx_map_len; 102 size_t tx_map_len; 103 u8 n_bytes; 104 u16 ctrl_reg; 105 u16 flag_reg; 106 107 int cs_change; 108 const struct bfin_spi_transfer_ops *ops; 109}; 110 111struct bfin_spi_slave_data { 112 u16 ctl_reg; 113 u16 baud; 114 u16 flag; 115 116 u8 chip_select_num; 117 u8 enable_dma; 118 u16 cs_chg_udelay; /* Some devices require > 255usec delay */ 119 u32 cs_gpio; 120 u16 idle_tx_val; 121 u8 pio_interrupt; /* use spi data irq */ 122 const struct bfin_spi_transfer_ops *ops; 123}; 124 125#define DEFINE_SPI_REG(reg, off) \ 126static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ 127 { return bfin_read16(drv_data->regs_base + off); } \ 128static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ 129 { bfin_write16(drv_data->regs_base + off, v); } 130 131DEFINE_SPI_REG(CTRL, 0x00) 132DEFINE_SPI_REG(FLAG, 0x04) 133DEFINE_SPI_REG(STAT, 0x08) 134DEFINE_SPI_REG(TDBR, 0x0C) 135DEFINE_SPI_REG(RDBR, 0x10) 136DEFINE_SPI_REG(BAUD, 0x14) 137DEFINE_SPI_REG(SHAW, 0x18) 138 139static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) 140{ 141 u16 cr; 142 143 cr = read_CTRL(drv_data); 144 write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); 145} 146 147static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) 148{ 149 u16 cr; 150 151 cr = read_CTRL(drv_data); 152 write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE))); 153} 154 155/* Caculate the SPI_BAUD register value based on input HZ */ 156static u16 hz_to_spi_baud(u32 speed_hz) 157{ 158 u_long sclk = get_sclk(); 159 u16 spi_baud = (sclk / (2 * speed_hz)); 160 161 if ((sclk % (2 * speed_hz)) > 0) 162 spi_baud++; 163 164 if (spi_baud < MIN_SPI_BAUD_VAL) 165 spi_baud = MIN_SPI_BAUD_VAL; 166 167 return spi_baud; 168} 169 170static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) 171{ 172 unsigned long limit = loops_per_jiffy << 1; 173 174 /* wait for stop and clear stat */ 175 while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit) 176 cpu_relax(); 177 178 write_STAT(drv_data, BIT_STAT_CLR); 179 180 return limit; 181} 182 183/* Chip select operation functions for cs_change flag */ 184static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) 185{ 186 if (likely(chip->chip_select_num < MAX_CTRL_CS)) { 187 u16 flag = read_FLAG(drv_data); 188 189 flag &= ~chip->flag; 190 191 write_FLAG(drv_data, flag); 192 } else { 193 gpio_set_value(chip->cs_gpio, 0); 194 } 195} 196 197static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, 198 struct bfin_spi_slave_data *chip) 199{ 200 if (likely(chip->chip_select_num < MAX_CTRL_CS)) { 201 u16 flag = read_FLAG(drv_data); 202 203 flag |= chip->flag; 204 205 write_FLAG(drv_data, flag); 206 } else { 207 gpio_set_value(chip->cs_gpio, 1); 208 } 209 210 /* Move delay here for consistency */ 211 if (chip->cs_chg_udelay) 212 udelay(chip->cs_chg_udelay); 213} 214 215/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ 216static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, 217 struct bfin_spi_slave_data *chip) 218{ 219 if (chip->chip_select_num < MAX_CTRL_CS) { 220 u16 flag = read_FLAG(drv_data); 221 222 flag |= (chip->flag >> 8); 223 224 write_FLAG(drv_data, flag); 225 } 226} 227 228static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, 229 struct bfin_spi_slave_data *chip) 230{ 231 if (chip->chip_select_num < MAX_CTRL_CS) { 232 u16 flag = read_FLAG(drv_data); 233 234 flag &= ~(chip->flag >> 8); 235 236 write_FLAG(drv_data, flag); 237 } 238} 239 240/* stop controller and re-config current chip*/ 241static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) 242{ 243 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 244 245 /* Clear status and disable clock */ 246 write_STAT(drv_data, BIT_STAT_CLR); 247 bfin_spi_disable(drv_data); 248 dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); 249 250 SSYNC(); 251 252 /* Load the registers */ 253 write_CTRL(drv_data, chip->ctl_reg); 254 write_BAUD(drv_data, chip->baud); 255 256 bfin_spi_enable(drv_data); 257 bfin_spi_cs_active(drv_data, chip); 258} 259 260/* used to kick off transfer in rx mode and read unwanted RX data */ 261static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) 262{ 263 (void) read_RDBR(drv_data); 264} 265 266static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) 267{ 268 /* clear RXS (we check for RXS inside the loop) */ 269 bfin_spi_dummy_read(drv_data); 270 271 while (drv_data->tx < drv_data->tx_end) { 272 write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); 273 /* wait until transfer finished. 274 checking SPIF or TXS may not guarantee transfer completion */ 275 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 276 cpu_relax(); 277 /* discard RX data and clear RXS */ 278 bfin_spi_dummy_read(drv_data); 279 } 280} 281 282static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) 283{ 284 u16 tx_val = drv_data->cur_chip->idle_tx_val; 285 286 /* discard old RX data and clear RXS */ 287 bfin_spi_dummy_read(drv_data); 288 289 while (drv_data->rx < drv_data->rx_end) { 290 write_TDBR(drv_data, tx_val); 291 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 292 cpu_relax(); 293 *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); 294 } 295} 296 297static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) 298{ 299 /* discard old RX data and clear RXS */ 300 bfin_spi_dummy_read(drv_data); 301 302 while (drv_data->rx < drv_data->rx_end) { 303 write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); 304 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 305 cpu_relax(); 306 *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); 307 } 308} 309 310static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { 311 .write = bfin_spi_u8_writer, 312 .read = bfin_spi_u8_reader, 313 .duplex = bfin_spi_u8_duplex, 314}; 315 316static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) 317{ 318 /* clear RXS (we check for RXS inside the loop) */ 319 bfin_spi_dummy_read(drv_data); 320 321 while (drv_data->tx < drv_data->tx_end) { 322 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 323 drv_data->tx += 2; 324 /* wait until transfer finished. 325 checking SPIF or TXS may not guarantee transfer completion */ 326 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 327 cpu_relax(); 328 /* discard RX data and clear RXS */ 329 bfin_spi_dummy_read(drv_data); 330 } 331} 332 333static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) 334{ 335 u16 tx_val = drv_data->cur_chip->idle_tx_val; 336 337 /* discard old RX data and clear RXS */ 338 bfin_spi_dummy_read(drv_data); 339 340 while (drv_data->rx < drv_data->rx_end) { 341 write_TDBR(drv_data, tx_val); 342 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 343 cpu_relax(); 344 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 345 drv_data->rx += 2; 346 } 347} 348 349static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) 350{ 351 /* discard old RX data and clear RXS */ 352 bfin_spi_dummy_read(drv_data); 353 354 while (drv_data->rx < drv_data->rx_end) { 355 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 356 drv_data->tx += 2; 357 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 358 cpu_relax(); 359 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 360 drv_data->rx += 2; 361 } 362} 363 364static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { 365 .write = bfin_spi_u16_writer, 366 .read = bfin_spi_u16_reader, 367 .duplex = bfin_spi_u16_duplex, 368}; 369 370/* test if there is more transfer to be done */ 371static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) 372{ 373 struct spi_message *msg = drv_data->cur_msg; 374 struct spi_transfer *trans = drv_data->cur_transfer; 375 376 /* Move to next transfer */ 377 if (trans->transfer_list.next != &msg->transfers) { 378 drv_data->cur_transfer = 379 list_entry(trans->transfer_list.next, 380 struct spi_transfer, transfer_list); 381 return RUNNING_STATE; 382 } else 383 return DONE_STATE; 384} 385 386/* 387 * caller already set message->status; 388 * dma and pio irqs are blocked give finished message back 389 */ 390static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) 391{ 392 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 393 struct spi_transfer *last_transfer; 394 unsigned long flags; 395 struct spi_message *msg; 396 397 spin_lock_irqsave(&drv_data->lock, flags); 398 msg = drv_data->cur_msg; 399 drv_data->cur_msg = NULL; 400 drv_data->cur_transfer = NULL; 401 drv_data->cur_chip = NULL; 402 queue_work(drv_data->workqueue, &drv_data->pump_messages); 403 spin_unlock_irqrestore(&drv_data->lock, flags); 404 405 last_transfer = list_entry(msg->transfers.prev, 406 struct spi_transfer, transfer_list); 407 408 msg->state = NULL; 409 410 if (!drv_data->cs_change) 411 bfin_spi_cs_deactive(drv_data, chip); 412 413 /* Not stop spi in autobuffer mode */ 414 if (drv_data->tx_dma != 0xFFFF) 415 bfin_spi_disable(drv_data); 416 417 if (msg->complete) 418 msg->complete(msg->context); 419} 420 421/* spi data irq handler */ 422static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) 423{ 424 struct bfin_spi_master_data *drv_data = dev_id; 425 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 426 struct spi_message *msg = drv_data->cur_msg; 427 int n_bytes = drv_data->n_bytes; 428 429 /* wait until transfer finished. */ 430 while (!(read_STAT(drv_data) & BIT_STAT_RXS)) 431 cpu_relax(); 432 433 if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || 434 (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { 435 /* last read */ 436 if (drv_data->rx) { 437 dev_dbg(&drv_data->pdev->dev, "last read\n"); 438 if (n_bytes == 2) 439 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 440 else if (n_bytes == 1) 441 *(u8 *) (drv_data->rx) = read_RDBR(drv_data); 442 drv_data->rx += n_bytes; 443 } 444 445 msg->actual_length += drv_data->len_in_bytes; 446 if (drv_data->cs_change) 447 bfin_spi_cs_deactive(drv_data, chip); 448 /* Move to next transfer */ 449 msg->state = bfin_spi_next_transfer(drv_data); 450 451 disable_irq_nosync(drv_data->spi_irq); 452 453 /* Schedule transfer tasklet */ 454 tasklet_schedule(&drv_data->pump_transfers); 455 return IRQ_HANDLED; 456 } 457 458 if (drv_data->rx && drv_data->tx) { 459 /* duplex */ 460 dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); 461 if (drv_data->n_bytes == 2) { 462 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 463 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 464 } else if (drv_data->n_bytes == 1) { 465 *(u8 *) (drv_data->rx) = read_RDBR(drv_data); 466 write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); 467 } 468 } else if (drv_data->rx) { 469 /* read */ 470 dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); 471 if (drv_data->n_bytes == 2) 472 *(u16 *) (drv_data->rx) = read_RDBR(drv_data); 473 else if (drv_data->n_bytes == 1) 474 *(u8 *) (drv_data->rx) = read_RDBR(drv_data); 475 write_TDBR(drv_data, chip->idle_tx_val); 476 } else if (drv_data->tx) { 477 /* write */ 478 dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); 479 bfin_spi_dummy_read(drv_data); 480 if (drv_data->n_bytes == 2) 481 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 482 else if (drv_data->n_bytes == 1) 483 write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); 484 } 485 486 if (drv_data->tx) 487 drv_data->tx += n_bytes; 488 if (drv_data->rx) 489 drv_data->rx += n_bytes; 490 491 return IRQ_HANDLED; 492} 493 494static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) 495{ 496 struct bfin_spi_master_data *drv_data = dev_id; 497 struct bfin_spi_slave_data *chip = drv_data->cur_chip; 498 struct spi_message *msg = drv_data->cur_msg; 499 unsigned long timeout; 500 unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); 501 u16 spistat = read_STAT(drv_data); 502 503 dev_dbg(&drv_data->pdev->dev, 504 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", 505 dmastat, spistat); 506 507 if (drv_data->rx != NULL) { 508 u16 cr = read_CTRL(drv_data); 509 /* discard old RX data and clear RXS */ 510 bfin_spi_dummy_read(drv_data); 511 write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ 512 write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ 513 write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ 514 } 515 516 clear_dma_irqstat(drv_data->dma_channel); 517 518 /* 519 * wait for the last transaction shifted out. HRM states: 520 * at this point there may still be data in the SPI DMA FIFO waiting 521 * to be transmitted ... software needs to poll TXS in the SPI_STAT 522 * register until it goes low for 2 successive reads 523 */ 524 if (drv_data->tx != NULL) { 525 while ((read_STAT(drv_data) & BIT_STAT_TXS) || 526 (read_STAT(drv_data) & BIT_STAT_TXS)) 527 cpu_relax(); 528 } 529 530 dev_dbg(&drv_data->pdev->dev, 531 "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", 532 dmastat, read_STAT(drv_data)); 533 534 timeout = jiffies + HZ; 535 while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) 536 if (!time_before(jiffies, timeout)) { 537 dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); 538 break; 539 } else 540 cpu_relax(); 541 542 if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { 543 msg->state = ERROR_STATE; 544 dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); 545 } else { 546 msg->actual_length += drv_data->len_in_bytes; 547 548 if (drv_data->cs_change) 549 bfin_spi_cs_deactive(drv_data, chip); 550 551 /* Move to next transfer */ 552 msg->state = bfin_spi_next_transfer(drv_data); 553 } 554 555 /* Schedule transfer tasklet */ 556 tasklet_schedule(&drv_data->pump_transfers); 557 558 /* free the irq handler before next transfer */ 559 dev_dbg(&drv_data->pdev->dev, 560 "disable dma channel irq%d\n", 561 drv_data->dma_channel); 562 dma_disable_irq_nosync(drv_data->dma_channel); 563 564 return IRQ_HANDLED; 565} 566 567static void bfin_spi_pump_transfers(unsigned long data) 568{ 569 struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; 570 struct spi_message *message = NULL; 571 struct spi_transfer *transfer = NULL; 572 struct spi_transfer *previous = NULL; 573 struct bfin_spi_slave_data *chip = NULL; 574 unsigned int bits_per_word; 575 u16 cr, cr_width, dma_width, dma_config; 576 u32 tranf_success = 1; 577 u8 full_duplex = 0; 578 579 /* Get current state information */ 580 message = drv_data->cur_msg; 581 transfer = drv_data->cur_transfer; 582 chip = drv_data->cur_chip; 583 584 /* 585 * if msg is error or done, report it back using complete() callback 586 */ 587 588 /* Handle for abort */ 589 if (message->state == ERROR_STATE) { 590 dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n"); 591 message->status = -EIO; 592 bfin_spi_giveback(drv_data); 593 return; 594 } 595 596 /* Handle end of message */ 597 if (message->state == DONE_STATE) { 598 dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); 599 message->status = 0; 600 bfin_spi_giveback(drv_data); 601 return; 602 } 603 604 /* Delay if requested at end of transfer */ 605 if (message->state == RUNNING_STATE) { 606 dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n"); 607 previous = list_entry(transfer->transfer_list.prev, 608 struct spi_transfer, transfer_list); 609 if (previous->delay_usecs) 610 udelay(previous->delay_usecs); 611 } 612 613 /* Flush any existing transfers that may be sitting in the hardware */ 614 if (bfin_spi_flush(drv_data) == 0) { 615 dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); 616 message->status = -EIO; 617 bfin_spi_giveback(drv_data); 618 return; 619 } 620 621 if (transfer->len == 0) { 622 /* Move to next transfer of this msg */ 623 message->state = bfin_spi_next_transfer(drv_data); 624 /* Schedule next transfer tasklet */ 625 tasklet_schedule(&drv_data->pump_transfers); 626 } 627 628 if (transfer->tx_buf != NULL) { 629 drv_data->tx = (void *)transfer->tx_buf; 630 drv_data->tx_end = drv_data->tx + transfer->len; 631 dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n", 632 transfer->tx_buf, drv_data->tx_end); 633 } else { 634 drv_data->tx = NULL; 635 } 636 637 if (transfer->rx_buf != NULL) { 638 full_duplex = transfer->tx_buf != NULL; 639 drv_data->rx = transfer->rx_buf; 640 drv_data->rx_end = drv_data->rx + transfer->len; 641 dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n", 642 transfer->rx_buf, drv_data->rx_end); 643 } else { 644 drv_data->rx = NULL; 645 } 646 647 drv_data->rx_dma = transfer->rx_dma; 648 drv_data->tx_dma = transfer->tx_dma; 649 drv_data->len_in_bytes = transfer->len; 650 drv_data->cs_change = transfer->cs_change; 651 652 /* Bits per word setup */ 653 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; 654 if (bits_per_word == 8) { 655 drv_data->n_bytes = 1; 656 drv_data->len = transfer->len; 657 cr_width = 0; 658 drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; 659 } else if (bits_per_word == 16) { 660 drv_data->n_bytes = 2; 661 drv_data->len = (transfer->len) >> 1; 662 cr_width = BIT_CTL_WORDSIZE; 663 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; 664 } else { 665 dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); 666 message->status = -EINVAL; 667 bfin_spi_giveback(drv_data); 668 return; 669 } 670 cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); 671 cr |= cr_width; 672 write_CTRL(drv_data, cr); 673 674 dev_dbg(&drv_data->pdev->dev, 675 "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", 676 drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); 677 678 message->state = RUNNING_STATE; 679 dma_config = 0; 680 681 /* Speed setup (surely valid because already checked) */ 682 if (transfer->speed_hz) 683 write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz)); 684 else 685 write_BAUD(drv_data, chip->baud); 686 687 write_STAT(drv_data, BIT_STAT_CLR); 688 bfin_spi_cs_active(drv_data, chip); 689 690 dev_dbg(&drv_data->pdev->dev, 691 "now pumping a transfer: width is %d, len is %d\n", 692 cr_width, transfer->len); 693 694 /* 695 * Try to map dma buffer and do a dma transfer. If successful use, 696 * different way to r/w according to the enable_dma settings and if 697 * we are not doing a full duplex transfer (since the hardware does 698 * not support full duplex DMA transfers). 699 */ 700 if (!full_duplex && drv_data->cur_chip->enable_dma 701 && drv_data->len > 6) { 702 703 unsigned long dma_start_addr, flags; 704 705 disable_dma(drv_data->dma_channel); 706 clear_dma_irqstat(drv_data->dma_channel); 707 708 /* config dma channel */ 709 dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); 710 set_dma_x_count(drv_data->dma_channel, drv_data->len); 711 if (cr_width == BIT_CTL_WORDSIZE) { 712 set_dma_x_modify(drv_data->dma_channel, 2); 713 dma_width = WDSIZE_16; 714 } else { 715 set_dma_x_modify(drv_data->dma_channel, 1); 716 dma_width = WDSIZE_8; 717 } 718 719 /* poll for SPI completion before start */ 720 while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) 721 cpu_relax(); 722 723 /* dirty hack for autobuffer DMA mode */ 724 if (drv_data->tx_dma == 0xFFFF) { 725 dev_dbg(&drv_data->pdev->dev, 726 "doing autobuffer DMA out.\n"); 727 728 /* no irq in autobuffer mode */ 729 dma_config = 730 (DMAFLOW_AUTO | RESTART | dma_width | DI_EN); 731 set_dma_config(drv_data->dma_channel, dma_config); 732 set_dma_start_addr(drv_data->dma_channel, 733 (unsigned long)drv_data->tx); 734 enable_dma(drv_data->dma_channel); 735 736 /* start SPI transfer */ 737 write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX); 738 739 /* just return here, there can only be one transfer 740 * in this mode 741 */ 742 message->status = 0; 743 bfin_spi_giveback(drv_data); 744 return; 745 } 746 747 /* In dma mode, rx or tx must be NULL in one transfer */ 748 dma_config = (RESTART | dma_width | DI_EN); 749 if (drv_data->rx != NULL) { 750 /* set transfer mode, and enable SPI */ 751 dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n", 752 drv_data->rx, drv_data->len_in_bytes); 753 754 /* invalidate caches, if needed */ 755 if (bfin_addr_dcacheable((unsigned long) drv_data->rx)) 756 invalidate_dcache_range((unsigned long) drv_data->rx, 757 (unsigned long) (drv_data->rx + 758 drv_data->len_in_bytes)); 759 760 dma_config |= WNR; 761 dma_start_addr = (unsigned long)drv_data->rx; 762 cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT; 763 764 } else if (drv_data->tx != NULL) { 765 dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n"); 766 767 /* flush caches, if needed */ 768 if (bfin_addr_dcacheable((unsigned long) drv_data->tx)) 769 flush_dcache_range((unsigned long) drv_data->tx, 770 (unsigned long) (drv_data->tx + 771 drv_data->len_in_bytes)); 772 773 dma_start_addr = (unsigned long)drv_data->tx; 774 cr |= BIT_CTL_TIMOD_DMA_TX; 775 776 } else 777 BUG(); 778 779 /* oh man, here there be monsters ... and i dont mean the 780 * fluffy cute ones from pixar, i mean the kind that'll eat 781 * your data, kick your dog, and love it all. do *not* try 782 * and change these lines unless you (1) heavily test DMA 783 * with SPI flashes on a loaded system (e.g. ping floods), 784 * (2) know just how broken the DMA engine interaction with 785 * the SPI peripheral is, and (3) have someone else to blame 786 * when you screw it all up anyways. 787 */ 788 set_dma_start_addr(drv_data->dma_channel, dma_start_addr); 789 set_dma_config(drv_data->dma_channel, dma_config); 790 local_irq_save(flags); 791 SSYNC(); 792 write_CTRL(drv_data, cr); 793 enable_dma(drv_data->dma_channel); 794 dma_enable_irq(drv_data->dma_channel); 795 local_irq_restore(flags); 796 797 return; 798 } 799 800 /* 801 * We always use SPI_WRITE mode (transfer starts with TDBR write). 802 * SPI_READ mode (transfer starts with RDBR read) seems to have 803 * problems with setting up the output value in TDBR prior to the 804 * start of the transfer. 805 */ 806 write_CTRL(drv_data, cr | BIT_CTL_TXMOD); 807 808 if (chip->pio_interrupt) { 809 /* SPI irq should have been disabled by now */ 810 811 /* discard old RX data and clear RXS */ 812 bfin_spi_dummy_read(drv_data); 813 814 /* start transfer */ 815 if (drv_data->tx == NULL) 816 write_TDBR(drv_data, chip->idle_tx_val); 817 else { 818 if (bits_per_word == 8) 819 write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); 820 else 821 write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); 822 drv_data->tx += drv_data->n_bytes; 823 } 824 825 /* once TDBR is empty, interrupt is triggered */ 826 enable_irq(drv_data->spi_irq); 827 return; 828 } 829 830 /* IO mode */ 831 dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); 832 833 if (full_duplex) { 834 /* full duplex mode */ 835 BUG_ON((drv_data->tx_end - drv_data->tx) != 836 (drv_data->rx_end - drv_data->rx)); 837 dev_dbg(&drv_data->pdev->dev, 838 "IO duplex: cr is 0x%x\n", cr); 839 840 drv_data->ops->duplex(drv_data); 841 842 if (drv_data->tx != drv_data->tx_end) 843 tranf_success = 0; 844 } else if (drv_data->tx != NULL) { 845 /* write only half duplex */ 846 dev_dbg(&drv_data->pdev->dev, 847 "IO write: cr is 0x%x\n", cr); 848 849 drv_data->ops->write(drv_data); 850 851 if (drv_data->tx != drv_data->tx_end) 852 tranf_success = 0; 853 } else if (drv_data->rx != NULL) { 854 /* read only half duplex */ 855 dev_dbg(&drv_data->pdev->dev, 856 "IO read: cr is 0x%x\n", cr); 857 858 drv_data->ops->read(drv_data); 859 if (drv_data->rx != drv_data->rx_end) 860 tranf_success = 0; 861 } 862 863 if (!tranf_success) { 864 dev_dbg(&drv_data->pdev->dev, 865 "IO write error!\n"); 866 message->state = ERROR_STATE; 867 } else { 868 /* Update total byte transfered */ 869 message->actual_length += drv_data->len_in_bytes; 870 /* Move to next transfer of this msg */ 871 message->state = bfin_spi_next_transfer(drv_data); 872 if (drv_data->cs_change) 873 bfin_spi_cs_deactive(drv_data, chip); 874 } 875 876 /* Schedule next transfer tasklet */ 877 tasklet_schedule(&drv_data->pump_transfers); 878} 879 880/* pop a msg from queue and kick off real transfer */ 881static void bfin_spi_pump_messages(struct work_struct *work) 882{ 883 struct bfin_spi_master_data *drv_data; 884 unsigned long flags; 885 886 drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); 887 888 /* Lock queue and check for queue work */ 889 spin_lock_irqsave(&drv_data->lock, flags); 890 if (list_empty(&drv_data->queue) || !drv_data->running) { 891 /* pumper kicked off but no work to do */ 892 drv_data->busy = 0; 893 spin_unlock_irqrestore(&drv_data->lock, flags); 894 return; 895 } 896 897 /* Make sure we are not already running a message */ 898 if (drv_data->cur_msg) { 899 spin_unlock_irqrestore(&drv_data->lock, flags); 900 return; 901 } 902 903 /* Extract head of queue */ 904 drv_data->cur_msg = list_entry(drv_data->queue.next, 905 struct spi_message, queue); 906 907 /* Setup the SSP using the per chip configuration */ 908 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 909 bfin_spi_restore_state(drv_data); 910 911 list_del_init(&drv_data->cur_msg->queue); 912 913 /* Initial message state */ 914 drv_data->cur_msg->state = START_STATE; 915 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 916 struct spi_transfer, transfer_list); 917 918 dev_dbg(&drv_data->pdev->dev, "got a message to pump, " 919 "state is set to: baud %d, flag 0x%x, ctl 0x%x\n", 920 drv_data->cur_chip->baud, drv_data->cur_chip->flag, 921 drv_data->cur_chip->ctl_reg); 922 923 dev_dbg(&drv_data->pdev->dev, 924 "the first transfer len is %d\n", 925 drv_data->cur_transfer->len); 926 927 /* Mark as busy and launch transfers */ 928 tasklet_schedule(&drv_data->pump_transfers); 929 930 drv_data->busy = 1; 931 spin_unlock_irqrestore(&drv_data->lock, flags); 932} 933 934/* 935 * got a msg to transfer, queue it in drv_data->queue. 936 * And kick off message pumper 937 */ 938static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) 939{ 940 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 941 unsigned long flags; 942 943 spin_lock_irqsave(&drv_data->lock, flags); 944 945 if (!drv_data->running) { 946 spin_unlock_irqrestore(&drv_data->lock, flags); 947 return -ESHUTDOWN; 948 } 949 950 msg->actual_length = 0; 951 msg->status = -EINPROGRESS; 952 msg->state = START_STATE; 953 954 dev_dbg(&spi->dev, "adding an msg in transfer() \n"); 955 list_add_tail(&msg->queue, &drv_data->queue); 956 957 if (drv_data->running && !drv_data->busy) 958 queue_work(drv_data->workqueue, &drv_data->pump_messages); 959 960 spin_unlock_irqrestore(&drv_data->lock, flags); 961 962 return 0; 963} 964 965#define MAX_SPI_SSEL 7 966 967static u16 ssel[][MAX_SPI_SSEL] = { 968 {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3, 969 P_SPI0_SSEL4, P_SPI0_SSEL5, 970 P_SPI0_SSEL6, P_SPI0_SSEL7}, 971 972 {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3, 973 P_SPI1_SSEL4, P_SPI1_SSEL5, 974 P_SPI1_SSEL6, P_SPI1_SSEL7}, 975 976 {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3, 977 P_SPI2_SSEL4, P_SPI2_SSEL5, 978 P_SPI2_SSEL6, P_SPI2_SSEL7}, 979}; 980 981/* setup for devices (may be called multiple times -- not just first setup) */ 982static int bfin_spi_setup(struct spi_device *spi) 983{ 984 struct bfin5xx_spi_chip *chip_info; 985 struct bfin_spi_slave_data *chip = NULL; 986 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 987 u16 bfin_ctl_reg; 988 int ret = -EINVAL; 989 990 /* Only alloc (or use chip_info) on first setup */ 991 chip_info = NULL; 992 chip = spi_get_ctldata(spi); 993 if (chip == NULL) { 994 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 995 if (!chip) { 996 dev_err(&spi->dev, "cannot allocate chip data\n"); 997 ret = -ENOMEM; 998 goto error; 999 } 1000 1001 chip->enable_dma = 0; 1002 chip_info = spi->controller_data; 1003 } 1004 1005 /* Let people set non-standard bits directly */ 1006 bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | 1007 BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; 1008 1009 /* chip_info isn't always needed */ 1010 if (chip_info) { 1011 /* Make sure people stop trying to set fields via ctl_reg 1012 * when they should actually be using common SPI framework. 1013 * Currently we let through: WOM EMISO PSSE GM SZ. 1014 * Not sure if a user actually needs/uses any of these, 1015 * but let's assume (for now) they do. 1016 */ 1017 if (chip_info->ctl_reg & ~bfin_ctl_reg) { 1018 dev_err(&spi->dev, "do not set bits in ctl_reg " 1019 "that the SPI framework manages\n"); 1020 goto error; 1021 } 1022 chip->enable_dma = chip_info->enable_dma != 0 1023 && drv_data->master_info->enable_dma; 1024 chip->ctl_reg = chip_info->ctl_reg; 1025 chip->cs_chg_udelay = chip_info->cs_chg_udelay; 1026 chip->idle_tx_val = chip_info->idle_tx_val; 1027 chip->pio_interrupt = chip_info->pio_interrupt; 1028 spi->bits_per_word = chip_info->bits_per_word; 1029 } else { 1030 /* force a default base state */ 1031 chip->ctl_reg &= bfin_ctl_reg; 1032 } 1033 1034 if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { 1035 dev_err(&spi->dev, "%d bits_per_word is not supported\n", 1036 spi->bits_per_word); 1037 goto error; 1038 } 1039 1040 /* translate common spi framework into our register */ 1041 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { 1042 dev_err(&spi->dev, "unsupported spi modes detected\n"); 1043 goto error; 1044 } 1045 if (spi->mode & SPI_CPOL) 1046 chip->ctl_reg |= BIT_CTL_CPOL; 1047 if (spi->mode & SPI_CPHA) 1048 chip->ctl_reg |= BIT_CTL_CPHA; 1049 if (spi->mode & SPI_LSB_FIRST) 1050 chip->ctl_reg |= BIT_CTL_LSBF; 1051 /* we dont support running in slave mode (yet?) */ 1052 chip->ctl_reg |= BIT_CTL_MASTER; 1053 1054 /* 1055 * Notice: for blackfin, the speed_hz is the value of register 1056 * SPI_BAUD, not the real baudrate 1057 */ 1058 chip->baud = hz_to_spi_baud(spi->max_speed_hz); 1059 chip->chip_select_num = spi->chip_select; 1060 if (chip->chip_select_num < MAX_CTRL_CS) { 1061 if (!(spi->mode & SPI_CPHA)) 1062 dev_warn(&spi->dev, "Warning: SPI CPHA not set:" 1063 " Slave Select not under software control!\n" 1064 " See Documentation/blackfin/bfin-spi-notes.txt"); 1065 1066 chip->flag = (1 << spi->chip_select) << 8; 1067 } else 1068 chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; 1069 1070 if (chip->enable_dma && chip->pio_interrupt) { 1071 dev_err(&spi->dev, "enable_dma is set, " 1072 "do not set pio_interrupt\n"); 1073 goto error; 1074 } 1075 /* 1076 * if any one SPI chip is registered and wants DMA, request the 1077 * DMA channel for it 1078 */ 1079 if (chip->enable_dma && !drv_data->dma_requested) { 1080 /* register dma irq handler */ 1081 ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); 1082 if (ret) { 1083 dev_err(&spi->dev, 1084 "Unable to request BlackFin SPI DMA channel\n"); 1085 goto error; 1086 } 1087 drv_data->dma_requested = 1; 1088 1089 ret = set_dma_callback(drv_data->dma_channel, 1090 bfin_spi_dma_irq_handler, drv_data); 1091 if (ret) { 1092 dev_err(&spi->dev, "Unable to set dma callback\n"); 1093 goto error; 1094 } 1095 dma_disable_irq(drv_data->dma_channel); 1096 } 1097 1098 if (chip->pio_interrupt && !drv_data->irq_requested) { 1099 ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, 1100 IRQF_DISABLED, "BFIN_SPI", drv_data); 1101 if (ret) { 1102 dev_err(&spi->dev, "Unable to register spi IRQ\n"); 1103 goto error; 1104 } 1105 drv_data->irq_requested = 1; 1106 /* we use write mode, spi irq has to be disabled here */ 1107 disable_irq(drv_data->spi_irq); 1108 } 1109 1110 if (chip->chip_select_num >= MAX_CTRL_CS) { 1111 /* Only request on first setup */ 1112 if (spi_get_ctldata(spi) == NULL) { 1113 ret = gpio_request(chip->cs_gpio, spi->modalias); 1114 if (ret) { 1115 dev_err(&spi->dev, "gpio_request() error\n"); 1116 goto pin_error; 1117 } 1118 gpio_direction_output(chip->cs_gpio, 1); 1119 } 1120 } 1121 1122 dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", 1123 spi->modalias, spi->bits_per_word, chip->enable_dma); 1124 dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", 1125 chip->ctl_reg, chip->flag); 1126 1127 spi_set_ctldata(spi, chip); 1128 1129 dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); 1130 if (chip->chip_select_num < MAX_CTRL_CS) { 1131 ret = peripheral_request(ssel[spi->master->bus_num] 1132 [chip->chip_select_num-1], spi->modalias); 1133 if (ret) { 1134 dev_err(&spi->dev, "peripheral_request() error\n"); 1135 goto pin_error; 1136 } 1137 } 1138 1139 bfin_spi_cs_enable(drv_data, chip); 1140 bfin_spi_cs_deactive(drv_data, chip); 1141 1142 return 0; 1143 1144 pin_error: 1145 if (chip->chip_select_num >= MAX_CTRL_CS) 1146 gpio_free(chip->cs_gpio); 1147 else 1148 peripheral_free(ssel[spi->master->bus_num] 1149 [chip->chip_select_num - 1]); 1150 error: 1151 if (chip) { 1152 if (drv_data->dma_requested) 1153 free_dma(drv_data->dma_channel); 1154 drv_data->dma_requested = 0; 1155 1156 kfree(chip); 1157 /* prevent free 'chip' twice */ 1158 spi_set_ctldata(spi, NULL); 1159 } 1160 1161 return ret; 1162} 1163 1164/* 1165 * callback for spi framework. 1166 * clean driver specific data 1167 */ 1168static void bfin_spi_cleanup(struct spi_device *spi) 1169{ 1170 struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); 1171 struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); 1172 1173 if (!chip) 1174 return; 1175 1176 if (chip->chip_select_num < MAX_CTRL_CS) { 1177 peripheral_free(ssel[spi->master->bus_num] 1178 [chip->chip_select_num-1]); 1179 bfin_spi_cs_disable(drv_data, chip); 1180 } else 1181 gpio_free(chip->cs_gpio); 1182 1183 kfree(chip); 1184 /* prevent free 'chip' twice */ 1185 spi_set_ctldata(spi, NULL); 1186} 1187 1188static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) 1189{ 1190 INIT_LIST_HEAD(&drv_data->queue); 1191 spin_lock_init(&drv_data->lock); 1192 1193 drv_data->running = false; 1194 drv_data->busy = 0; 1195 1196 /* init transfer tasklet */ 1197 tasklet_init(&drv_data->pump_transfers, 1198 bfin_spi_pump_transfers, (unsigned long)drv_data); 1199 1200 /* init messages workqueue */ 1201 INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages); 1202 drv_data->workqueue = create_singlethread_workqueue( 1203 dev_name(drv_data->master->dev.parent)); 1204 if (drv_data->workqueue == NULL) 1205 return -EBUSY; 1206 1207 return 0; 1208} 1209 1210static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) 1211{ 1212 unsigned long flags; 1213 1214 spin_lock_irqsave(&drv_data->lock, flags); 1215 1216 if (drv_data->running || drv_data->busy) { 1217 spin_unlock_irqrestore(&drv_data->lock, flags); 1218 return -EBUSY; 1219 } 1220 1221 drv_data->running = true; 1222 drv_data->cur_msg = NULL; 1223 drv_data->cur_transfer = NULL; 1224 drv_data->cur_chip = NULL; 1225 spin_unlock_irqrestore(&drv_data->lock, flags); 1226 1227 queue_work(drv_data->workqueue, &drv_data->pump_messages); 1228 1229 return 0; 1230} 1231 1232static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) 1233{ 1234 unsigned long flags; 1235 unsigned limit = 500; 1236 int status = 0; 1237 1238 spin_lock_irqsave(&drv_data->lock, flags); 1239 1240 /* 1241 * This is a bit lame, but is optimized for the common execution path. 1242 * A wait_queue on the drv_data->busy could be used, but then the common 1243 * execution path (pump_messages) would be required to call wake_up or 1244 * friends on every SPI message. Do this instead 1245 */ 1246 drv_data->running = false; 1247 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { 1248 spin_unlock_irqrestore(&drv_data->lock, flags); 1249 msleep(10); 1250 spin_lock_irqsave(&drv_data->lock, flags); 1251 } 1252 1253 if (!list_empty(&drv_data->queue) || drv_data->busy) 1254 status = -EBUSY; 1255 1256 spin_unlock_irqrestore(&drv_data->lock, flags); 1257 1258 return status; 1259} 1260 1261static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) 1262{ 1263 int status; 1264 1265 status = bfin_spi_stop_queue(drv_data); 1266 if (status != 0) 1267 return status; 1268 1269 destroy_workqueue(drv_data->workqueue); 1270 1271 return 0; 1272} 1273 1274static int __init bfin_spi_probe(struct platform_device *pdev) 1275{ 1276 struct device *dev = &pdev->dev; 1277 struct bfin5xx_spi_master *platform_info; 1278 struct spi_master *master; 1279 struct bfin_spi_master_data *drv_data; 1280 struct resource *res; 1281 int status = 0; 1282 1283 platform_info = dev->platform_data; 1284 1285 /* Allocate master with space for drv_data */ 1286 master = spi_alloc_master(dev, sizeof(*drv_data)); 1287 if (!master) { 1288 dev_err(&pdev->dev, "can not alloc spi_master\n"); 1289 return -ENOMEM; 1290 } 1291 1292 drv_data = spi_master_get_devdata(master); 1293 drv_data->master = master; 1294 drv_data->master_info = platform_info; 1295 drv_data->pdev = pdev; 1296 drv_data->pin_req = platform_info->pin_req; 1297 1298 /* the spi->mode bits supported by this driver: */ 1299 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 1300 1301 master->bus_num = pdev->id; 1302 master->num_chipselect = platform_info->num_chipselect; 1303 master->cleanup = bfin_spi_cleanup; 1304 master->setup = bfin_spi_setup; 1305 master->transfer = bfin_spi_transfer; 1306 1307 /* Find and map our resources */ 1308 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1309 if (res == NULL) { 1310 dev_err(dev, "Cannot get IORESOURCE_MEM\n"); 1311 status = -ENOENT; 1312 goto out_error_get_res; 1313 } 1314 1315 drv_data->regs_base = ioremap(res->start, resource_size(res)); 1316 if (drv_data->regs_base == NULL) { 1317 dev_err(dev, "Cannot map IO\n"); 1318 status = -ENXIO; 1319 goto out_error_ioremap; 1320 } 1321 1322 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1323 if (res == NULL) { 1324 dev_err(dev, "No DMA channel specified\n"); 1325 status = -ENOENT; 1326 goto out_error_free_io; 1327 } 1328 drv_data->dma_channel = res->start; 1329 1330 drv_data->spi_irq = platform_get_irq(pdev, 0); 1331 if (drv_data->spi_irq < 0) { 1332 dev_err(dev, "No spi pio irq specified\n"); 1333 status = -ENOENT; 1334 goto out_error_free_io; 1335 } 1336 1337 /* Initial and start queue */ 1338 status = bfin_spi_init_queue(drv_data); 1339 if (status != 0) { 1340 dev_err(dev, "problem initializing queue\n"); 1341 goto out_error_queue_alloc; 1342 } 1343 1344 status = bfin_spi_start_queue(drv_data); 1345 if (status != 0) { 1346 dev_err(dev, "problem starting queue\n"); 1347 goto out_error_queue_alloc; 1348 } 1349 1350 status = peripheral_request_list(drv_data->pin_req, DRV_NAME); 1351 if (status != 0) { 1352 dev_err(&pdev->dev, ": Requesting Peripherals failed\n"); 1353 goto out_error_queue_alloc; 1354 } 1355 1356 /* Reset SPI registers. If these registers were used by the boot loader, 1357 * the sky may fall on your head if you enable the dma controller. 1358 */ 1359 write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); 1360 write_FLAG(drv_data, 0xFF00); 1361 1362 /* Register with the SPI framework */ 1363 platform_set_drvdata(pdev, drv_data); 1364 status = spi_register_master(master); 1365 if (status != 0) { 1366 dev_err(dev, "problem registering spi master\n"); 1367 goto out_error_queue_alloc; 1368 } 1369 1370 dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n", 1371 DRV_DESC, DRV_VERSION, drv_data->regs_base, 1372 drv_data->dma_channel); 1373 return status; 1374 1375out_error_queue_alloc: 1376 bfin_spi_destroy_queue(drv_data); 1377out_error_free_io: 1378 iounmap((void *) drv_data->regs_base); 1379out_error_ioremap: 1380out_error_get_res: 1381 spi_master_put(master); 1382 1383 return status; 1384} 1385 1386/* stop hardware and remove the driver */ 1387static int __devexit bfin_spi_remove(struct platform_device *pdev) 1388{ 1389 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1390 int status = 0; 1391 1392 if (!drv_data) 1393 return 0; 1394 1395 /* Remove the queue */ 1396 status = bfin_spi_destroy_queue(drv_data); 1397 if (status != 0) 1398 return status; 1399 1400 /* Disable the SSP at the peripheral and SOC level */ 1401 bfin_spi_disable(drv_data); 1402 1403 /* Release DMA */ 1404 if (drv_data->master_info->enable_dma) { 1405 if (dma_channel_active(drv_data->dma_channel)) 1406 free_dma(drv_data->dma_channel); 1407 } 1408 1409 if (drv_data->irq_requested) { 1410 free_irq(drv_data->spi_irq, drv_data); 1411 drv_data->irq_requested = 0; 1412 } 1413 1414 /* Disconnect from the SPI framework */ 1415 spi_unregister_master(drv_data->master); 1416 1417 peripheral_free_list(drv_data->pin_req); 1418 1419 /* Prevent double remove */ 1420 platform_set_drvdata(pdev, NULL); 1421 1422 return 0; 1423} 1424 1425#ifdef CONFIG_PM 1426static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) 1427{ 1428 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1429 int status = 0; 1430 1431 status = bfin_spi_stop_queue(drv_data); 1432 if (status != 0) 1433 return status; 1434 1435 drv_data->ctrl_reg = read_CTRL(drv_data); 1436 drv_data->flag_reg = read_FLAG(drv_data); 1437 1438 /* 1439 * reset SPI_CTL and SPI_FLG registers 1440 */ 1441 write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); 1442 write_FLAG(drv_data, 0xFF00); 1443 1444 return 0; 1445} 1446 1447static int bfin_spi_resume(struct platform_device *pdev) 1448{ 1449 struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); 1450 int status = 0; 1451 1452 write_CTRL(drv_data, drv_data->ctrl_reg); 1453 write_FLAG(drv_data, drv_data->flag_reg); 1454 1455 /* Start the queue running */ 1456 status = bfin_spi_start_queue(drv_data); 1457 if (status != 0) { 1458 dev_err(&pdev->dev, "problem starting queue (%d)\n", status); 1459 return status; 1460 } 1461 1462 return 0; 1463} 1464#else 1465#define bfin_spi_suspend NULL 1466#define bfin_spi_resume NULL 1467#endif /* CONFIG_PM */ 1468 1469MODULE_ALIAS("platform:bfin-spi"); 1470static struct platform_driver bfin_spi_driver = { 1471 .driver = { 1472 .name = DRV_NAME, 1473 .owner = THIS_MODULE, 1474 }, 1475 .suspend = bfin_spi_suspend, 1476 .resume = bfin_spi_resume, 1477 .remove = __devexit_p(bfin_spi_remove), 1478}; 1479 1480static int __init bfin_spi_init(void) 1481{ 1482 return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); 1483} 1484subsys_initcall(bfin_spi_init); 1485 1486static void __exit bfin_spi_exit(void) 1487{ 1488 platform_driver_unregister(&bfin_spi_driver); 1489} 1490module_exit(bfin_spi_exit);