Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: spi-ep93xx: use the default master transfer queueing mechanism

Update this driver to the default implementation of transfer_one_message().

Signed-off-by: H Hartley Sweeten <hsweeten@visionengravers.com>
Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

H Hartley Sweeten and committed by
Mark Brown
d9a01771 c7a909cf

+108 -214
+108 -214
drivers/spi/spi-ep93xx.c
··· 73 73 * @clk: clock for the controller 74 74 * @mmio: pointer to ioremap()'d registers 75 75 * @sspdr_phys: physical address of the SSPDR register 76 - * @wait: wait here until given transfer is completed 77 76 * @tx: current byte in transfer to transmit 78 77 * @rx: current byte in transfer to receive 79 78 * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one ··· 90 91 struct clk *clk; 91 92 void __iomem *mmio; 92 93 unsigned long sspdr_phys; 93 - struct completion wait; 94 94 size_t tx; 95 95 size_t rx; 96 96 size_t fifo_level; ··· 121 123 122 124 /* 123 125 * Make sure that max value is between values supported by the 124 - * controller. Note that minimum value is already checked in 125 - * ep93xx_spi_transfer_one_message(). 126 + * controller. 126 127 */ 127 128 rate = clamp(rate, master->min_speed_hz, master->max_speed_hz); 128 129 ··· 144 147 } 145 148 146 149 return -EINVAL; 147 - } 148 - 149 - static void ep93xx_spi_cs_control(struct spi_device *spi, bool enable) 150 - { 151 - if (spi->mode & SPI_CS_HIGH) 152 - enable = !enable; 153 - 154 - if (gpio_is_valid(spi->cs_gpio)) 155 - gpio_set_value(spi->cs_gpio, !enable); 156 150 } 157 151 158 152 static int ep93xx_spi_chip_setup(struct spi_master *master, ··· 176 188 return 0; 177 189 } 178 190 179 - static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t) 191 + static void ep93xx_do_write(struct spi_master *master) 180 192 { 193 + struct ep93xx_spi *espi = spi_master_get_devdata(master); 194 + struct spi_transfer *xfer = master->cur_msg->state; 181 195 u32 val = 0; 182 196 183 - if (t->bits_per_word > 8) { 184 - if (t->tx_buf) 185 - val = ((u16 *)t->tx_buf)[espi->tx]; 197 + if (xfer->bits_per_word > 8) { 198 + if (xfer->tx_buf) 199 + val = ((u16 *)xfer->tx_buf)[espi->tx]; 186 200 espi->tx += 2; 187 201 } else { 188 - if (t->tx_buf) 189 - val = ((u8 *)t->tx_buf)[espi->tx]; 202 + if (xfer->tx_buf) 203 + val = ((u8 *)xfer->tx_buf)[espi->tx]; 190 204 espi->tx += 1; 191 205 } 192 206 writel(val, espi->mmio + SSPDR); 193 207 } 194 208 195 - static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t) 209 + static void ep93xx_do_read(struct spi_master *master) 196 210 { 211 + struct ep93xx_spi *espi = spi_master_get_devdata(master); 212 + struct spi_transfer *xfer = master->cur_msg->state; 197 213 u32 val; 198 214 199 215 val = readl(espi->mmio + SSPDR); 200 - if (t->bits_per_word > 8) { 201 - if (t->rx_buf) 202 - ((u16 *)t->rx_buf)[espi->rx] = val; 216 + if (xfer->bits_per_word > 8) { 217 + if (xfer->rx_buf) 218 + ((u16 *)xfer->rx_buf)[espi->rx] = val; 203 219 espi->rx += 2; 204 220 } else { 205 - if (t->rx_buf) 206 - ((u8 *)t->rx_buf)[espi->rx] = val; 221 + if (xfer->rx_buf) 222 + ((u8 *)xfer->rx_buf)[espi->rx] = val; 207 223 espi->rx += 1; 208 224 } 209 225 } ··· 226 234 static int ep93xx_spi_read_write(struct spi_master *master) 227 235 { 228 236 struct ep93xx_spi *espi = spi_master_get_devdata(master); 229 - struct spi_transfer *t = master->cur_msg->state; 237 + struct spi_transfer *xfer = master->cur_msg->state; 230 238 231 239 /* read as long as RX FIFO has frames in it */ 232 240 while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) { 233 - ep93xx_do_read(espi, t); 241 + ep93xx_do_read(master); 234 242 espi->fifo_level--; 235 243 } 236 244 237 245 /* write as long as TX FIFO has room */ 238 - while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) { 239 - ep93xx_do_write(espi, t); 246 + while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) { 247 + ep93xx_do_write(master); 240 248 espi->fifo_level++; 241 249 } 242 250 243 - if (espi->rx == t->len) 251 + if (espi->rx == xfer->len) 244 252 return 0; 245 253 246 254 return -EINPROGRESS; 247 - } 248 - 249 - static void ep93xx_spi_pio_transfer(struct spi_master *master) 250 - { 251 - struct ep93xx_spi *espi = spi_master_get_devdata(master); 252 - 253 - /* 254 - * Now everything is set up for the current transfer. We prime the TX 255 - * FIFO, enable interrupts, and wait for the transfer to complete. 256 - */ 257 - if (ep93xx_spi_read_write(master)) { 258 - u32 val; 259 - 260 - val = readl(espi->mmio + SSPCR1); 261 - val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 262 - writel(val, espi->mmio + SSPCR1); 263 - 264 - wait_for_completion(&espi->wait); 265 - } 266 255 } 267 256 268 257 /** ··· 260 287 enum dma_transfer_direction dir) 261 288 { 262 289 struct ep93xx_spi *espi = spi_master_get_devdata(master); 263 - struct spi_transfer *t = master->cur_msg->state; 290 + struct spi_transfer *xfer = master->cur_msg->state; 264 291 struct dma_async_tx_descriptor *txd; 265 292 enum dma_slave_buswidth buswidth; 266 293 struct dma_slave_config conf; ··· 268 295 struct sg_table *sgt; 269 296 struct dma_chan *chan; 270 297 const void *buf, *pbuf; 271 - size_t len = t->len; 298 + size_t len = xfer->len; 272 299 int i, ret, nents; 273 300 274 - if (t->bits_per_word > 8) 301 + if (xfer->bits_per_word > 8) 275 302 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 276 303 else 277 304 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; ··· 281 308 282 309 if (dir == DMA_DEV_TO_MEM) { 283 310 chan = espi->dma_rx; 284 - buf = t->rx_buf; 311 + buf = xfer->rx_buf; 285 312 sgt = &espi->rx_sgt; 286 313 287 314 conf.src_addr = espi->sspdr_phys; 288 315 conf.src_addr_width = buswidth; 289 316 } else { 290 317 chan = espi->dma_tx; 291 - buf = t->tx_buf; 318 + buf = xfer->tx_buf; 292 319 sgt = &espi->tx_sgt; 293 320 294 321 conf.dst_addr = espi->sspdr_phys; ··· 379 406 380 407 static void ep93xx_spi_dma_callback(void *callback_param) 381 408 { 382 - complete(callback_param); 409 + struct spi_master *master = callback_param; 410 + 411 + ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV); 412 + ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); 413 + 414 + spi_finalize_current_transfer(master); 383 415 } 384 416 385 - static void ep93xx_spi_dma_transfer(struct spi_master *master) 417 + static int ep93xx_spi_dma_transfer(struct spi_master *master) 386 418 { 387 419 struct ep93xx_spi *espi = spi_master_get_devdata(master); 388 420 struct dma_async_tx_descriptor *rxd, *txd; ··· 395 417 rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM); 396 418 if (IS_ERR(rxd)) { 397 419 dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); 398 - master->cur_msg->status = PTR_ERR(rxd); 399 - return; 420 + return PTR_ERR(rxd); 400 421 } 401 422 402 423 txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV); 403 424 if (IS_ERR(txd)) { 404 425 ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); 405 426 dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd)); 406 - master->cur_msg->status = PTR_ERR(txd); 407 - return; 427 + return PTR_ERR(txd); 408 428 } 409 429 410 430 /* We are ready when RX is done */ 411 431 rxd->callback = ep93xx_spi_dma_callback; 412 - rxd->callback_param = &espi->wait; 432 + rxd->callback_param = master; 413 433 414 - /* Now submit both descriptors and wait while they finish */ 434 + /* Now submit both descriptors and start DMA */ 415 435 dmaengine_submit(rxd); 416 436 dmaengine_submit(txd); 417 437 418 438 dma_async_issue_pending(espi->dma_rx); 419 439 dma_async_issue_pending(espi->dma_tx); 420 440 421 - wait_for_completion(&espi->wait); 422 - 423 - ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV); 424 - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM); 425 - } 426 - 427 - /** 428 - * ep93xx_spi_process_transfer() - processes one SPI transfer 429 - * @master: SPI master 430 - * @msg: current message 431 - * @t: transfer to process 432 - * 433 - * This function processes one SPI transfer given in @t. Function waits until 434 - * transfer is complete (may sleep) and updates @msg->status based on whether 435 - * transfer was successfully processed or not. 436 - */ 437 - static void ep93xx_spi_process_transfer(struct spi_master *master, 438 - struct spi_message *msg, 439 - struct spi_transfer *t) 440 - { 441 - struct ep93xx_spi *espi = spi_master_get_devdata(master); 442 - int err; 443 - 444 - msg->state = t; 445 - 446 - err = ep93xx_spi_chip_setup(master, msg->spi, t); 447 - if (err) { 448 - dev_err(&master->dev, 449 - "failed to setup chip for transfer\n"); 450 - msg->status = err; 451 - return; 452 - } 453 - 454 - espi->rx = 0; 455 - espi->tx = 0; 456 - 457 - /* 458 - * There is no point of setting up DMA for the transfers which will 459 - * fit into the FIFO and can be transferred with a single interrupt. 460 - * So in these cases we will be using PIO and don't bother for DMA. 461 - */ 462 - if (espi->dma_rx && t->len > SPI_FIFO_SIZE) 463 - ep93xx_spi_dma_transfer(master); 464 - else 465 - ep93xx_spi_pio_transfer(master); 466 - 467 - /* 468 - * In case of error during transmit, we bail out from processing 469 - * the message. 470 - */ 471 - if (msg->status) 472 - return; 473 - 474 - msg->actual_length += t->len; 475 - 476 - /* 477 - * After this transfer is finished, perform any possible 478 - * post-transfer actions requested by the protocol driver. 479 - */ 480 - if (t->delay_usecs) { 481 - set_current_state(TASK_UNINTERRUPTIBLE); 482 - schedule_timeout(usecs_to_jiffies(t->delay_usecs)); 483 - } 484 - if (t->cs_change) { 485 - if (!list_is_last(&t->transfer_list, &msg->transfers)) { 486 - /* 487 - * In case protocol driver is asking us to drop the 488 - * chipselect briefly, we let the scheduler to handle 489 - * any "delay" here. 490 - */ 491 - ep93xx_spi_cs_control(msg->spi, false); 492 - cond_resched(); 493 - ep93xx_spi_cs_control(msg->spi, true); 494 - } 495 - } 496 - } 497 - 498 - /* 499 - * ep93xx_spi_process_message() - process one SPI message 500 - * @master: SPI master 501 - * @msg: message to process 502 - * 503 - * This function processes a single SPI message. We go through all transfers in 504 - * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is 505 - * asserted during the whole message (unless per transfer cs_change is set). 506 - * 507 - * @msg->status contains %0 in case of success or negative error code in case of 508 - * failure. 509 - */ 510 - static void ep93xx_spi_process_message(struct spi_master *master, 511 - struct spi_message *msg) 512 - { 513 - struct ep93xx_spi *espi = spi_master_get_devdata(master); 514 - unsigned long timeout; 515 - struct spi_transfer *t; 516 - 517 - /* 518 - * Just to be sure: flush any data from RX FIFO. 519 - */ 520 - timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 521 - while (readl(espi->mmio + SSPSR) & SSPSR_RNE) { 522 - if (time_after(jiffies, timeout)) { 523 - dev_warn(&master->dev, 524 - "timeout while flushing RX FIFO\n"); 525 - msg->status = -ETIMEDOUT; 526 - return; 527 - } 528 - readl(espi->mmio + SSPDR); 529 - } 530 - 531 - /* 532 - * We explicitly handle FIFO level. This way we don't have to check TX 533 - * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 534 - */ 535 - espi->fifo_level = 0; 536 - 537 - /* 538 - * Assert the chipselect. 539 - */ 540 - ep93xx_spi_cs_control(msg->spi, true); 541 - 542 - list_for_each_entry(t, &msg->transfers, transfer_list) { 543 - ep93xx_spi_process_transfer(master, msg, t); 544 - if (msg->status) 545 - break; 546 - } 547 - 548 - /* 549 - * Now the whole message is transferred (or failed for some reason). We 550 - * deselect the device and disable the SPI controller. 551 - */ 552 - ep93xx_spi_cs_control(msg->spi, false); 553 - } 554 - 555 - static int ep93xx_spi_transfer_one_message(struct spi_master *master, 556 - struct spi_message *msg) 557 - { 558 - struct ep93xx_spi *espi = spi_master_get_devdata(master); 559 - 560 - msg->state = NULL; 561 - msg->status = 0; 562 - msg->actual_length = 0; 563 - 564 - ep93xx_spi_process_message(master, msg); 565 - 566 - spi_finalize_current_message(master); 567 - 568 - return 0; 441 + /* signal that we need to wait for completion */ 442 + return 1; 569 443 } 570 444 571 445 static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) ··· 460 630 val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 461 631 writel(val, espi->mmio + SSPCR1); 462 632 463 - complete(&espi->wait); 633 + spi_finalize_current_transfer(master); 464 634 465 635 return IRQ_HANDLED; 636 + } 637 + 638 + static int ep93xx_spi_transfer_one(struct spi_master *master, 639 + struct spi_device *spi, 640 + struct spi_transfer *xfer) 641 + { 642 + struct ep93xx_spi *espi = spi_master_get_devdata(master); 643 + u32 val; 644 + int ret; 645 + 646 + ret = ep93xx_spi_chip_setup(master, spi, xfer); 647 + if (ret) { 648 + dev_err(&master->dev, "failed to setup chip for transfer\n"); 649 + return ret; 650 + } 651 + 652 + master->cur_msg->state = xfer; 653 + espi->rx = 0; 654 + espi->tx = 0; 655 + 656 + /* 657 + * There is no point of setting up DMA for the transfers which will 658 + * fit into the FIFO and can be transferred with a single interrupt. 659 + * So in these cases we will be using PIO and don't bother for DMA. 660 + */ 661 + if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE) 662 + return ep93xx_spi_dma_transfer(master); 663 + 664 + /* Using PIO so prime the TX FIFO and enable interrupts */ 665 + ep93xx_spi_read_write(master); 666 + 667 + val = readl(espi->mmio + SSPCR1); 668 + val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE); 669 + writel(val, espi->mmio + SSPCR1); 670 + 671 + /* signal that we need to wait for completion */ 672 + return 1; 673 + } 674 + 675 + static int ep93xx_spi_prepare_message(struct spi_master *master, 676 + struct spi_message *msg) 677 + { 678 + struct ep93xx_spi *espi = spi_master_get_devdata(master); 679 + unsigned long timeout; 680 + 681 + /* 682 + * Just to be sure: flush any data from RX FIFO. 683 + */ 684 + timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT); 685 + while (readl(espi->mmio + SSPSR) & SSPSR_RNE) { 686 + if (time_after(jiffies, timeout)) { 687 + dev_warn(&master->dev, 688 + "timeout while flushing RX FIFO\n"); 689 + return -ETIMEDOUT; 690 + } 691 + readl(espi->mmio + SSPDR); 692 + } 693 + 694 + /* 695 + * We explicitly handle FIFO level. This way we don't have to check TX 696 + * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns. 697 + */ 698 + espi->fifo_level = 0; 699 + 700 + return 0; 466 701 } 467 702 468 703 static int ep93xx_spi_prepare_hardware(struct spi_master *master) ··· 664 769 665 770 master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware; 666 771 master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware; 667 - master->transfer_one_message = ep93xx_spi_transfer_one_message; 772 + master->prepare_message = ep93xx_spi_prepare_message; 773 + master->transfer_one = ep93xx_spi_transfer_one; 668 774 master->bus_num = pdev->id; 669 775 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 670 776 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); ··· 705 809 error = PTR_ERR(espi->clk); 706 810 goto fail_release_master; 707 811 } 708 - 709 - init_completion(&espi->wait); 710 812 711 813 /* 712 814 * Calculate maximum and minimum supported clock rates