Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: qup: Add DMA capabilities

This patch adds DMA capabilities to the spi-qup driver. If DMA channels are
present, the QUP will use DMA instead of block mode for transfers to/from SPI
peripherals for transactions larger than the length of a block.

Signed-off-by: Andy Gross <agross@codeaurora.org>
Signed-off-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
Reviewed-by: Ivan T. Ivanov <iivanov@mm-sol.com
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

Andy Gross and committed by
Mark Brown
612762e8 c517d838

+312 -32
+8
Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
··· 33 33 nodes. If unspecified, a single SPI device without a chip 34 34 select can be used. 35 35 36 + - dmas: Two DMA channel specifiers following the convention outlined 37 + in bindings/dma/dma.txt 38 + - dma-names: Names for the dma channels, if present. There must be at 39 + least one channel named "tx" for transmit and named "rx" for 40 + receive. 36 41 37 42 SPI slave nodes must be children of the SPI master node and can contain 38 43 properties described in Documentation/devicetree/bindings/spi/spi-bus.txt ··· 55 50 56 51 clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; 57 52 clock-names = "core", "iface"; 53 + 54 + dmas = <&blsp1_bam 13>, <&blsp1_bam 12>; 55 + dma-names = "rx", "tx"; 58 56 59 57 pinctrl-names = "default"; 60 58 pinctrl-0 = <&spi8_default>;
+304 -32
drivers/spi/spi-qup.c
··· 22 22 #include <linux/platform_device.h> 23 23 #include <linux/pm_runtime.h> 24 24 #include <linux/spi/spi.h> 25 + #include <linux/dmaengine.h> 26 + #include <linux/dma-mapping.h> 25 27 26 28 #define QUP_CONFIG 0x0000 27 29 #define QUP_STATE 0x0004 ··· 118 116 119 117 #define SPI_NUM_CHIPSELECTS 4 120 118 119 + #define SPI_MAX_DMA_XFER (SZ_64K - 64) 120 + 121 121 /* high speed mode is when bus rate is greater then 26MHz */ 122 122 #define SPI_HS_MIN_RATE 26000000 123 123 #define SPI_MAX_RATE 50000000 ··· 144 140 struct completion done; 145 141 int error; 146 142 int w_size; /* bytes per SPI word */ 143 + int n_words; 147 144 int tx_bytes; 148 145 int rx_bytes; 149 146 int qup_v1; 147 + 148 + int use_dma; 149 + struct dma_slave_config rx_conf; 150 + struct dma_slave_config tx_conf; 150 151 }; 151 152 152 153 ··· 206 197 207 198 return 0; 208 199 } 209 - 210 200 211 201 static void spi_qup_fifo_read(struct spi_qup *controller, 212 202 struct spi_transfer *xfer) ··· 274 266 } 275 267 } 276 268 269 + static void spi_qup_dma_done(void *data) 270 + { 271 + struct spi_qup *qup = data; 272 + 273 + complete(&qup->done); 274 + } 275 + 276 + static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer, 277 + enum dma_transfer_direction dir, 278 + dma_async_tx_callback callback) 279 + { 280 + struct spi_qup *qup = spi_master_get_devdata(master); 281 + unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; 282 + struct dma_async_tx_descriptor *desc; 283 + struct scatterlist *sgl; 284 + struct dma_chan *chan; 285 + dma_cookie_t cookie; 286 + unsigned int nents; 287 + 288 + if (dir == DMA_MEM_TO_DEV) { 289 + chan = master->dma_tx; 290 + nents = xfer->tx_sg.nents; 291 + sgl = xfer->tx_sg.sgl; 292 + } else { 293 + chan = master->dma_rx; 294 + nents = xfer->rx_sg.nents; 295 + sgl = xfer->rx_sg.sgl; 296 + } 297 + 298 + desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags); 299 + if (!desc) 300 + return -EINVAL; 301 + 302 + desc->callback = callback; 303 + desc->callback_param = qup; 304 + 305 + cookie = dmaengine_submit(desc); 306 + 307 + return dma_submit_error(cookie); 308 + } 309 + 310 + static void spi_qup_dma_terminate(struct spi_master *master, 311 + struct spi_transfer *xfer) 312 + { 313 + if (xfer->tx_buf) 314 + dmaengine_terminate_all(master->dma_tx); 315 + if (xfer->rx_buf) 316 + dmaengine_terminate_all(master->dma_rx); 317 + } 318 + 319 + static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer) 320 + { 321 + dma_async_tx_callback rx_done = NULL, tx_done = NULL; 322 + int ret; 323 + 324 + if (xfer->rx_buf) 325 + rx_done = spi_qup_dma_done; 326 + else if (xfer->tx_buf) 327 + tx_done = spi_qup_dma_done; 328 + 329 + if (xfer->rx_buf) { 330 + ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done); 331 + if (ret) 332 + return ret; 333 + 334 + dma_async_issue_pending(master->dma_rx); 335 + } 336 + 337 + if (xfer->tx_buf) { 338 + ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done); 339 + if (ret) 340 + return ret; 341 + 342 + dma_async_issue_pending(master->dma_tx); 343 + } 344 + 345 + return 0; 346 + } 347 + 348 + static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer) 349 + { 350 + struct spi_qup *qup = spi_master_get_devdata(master); 351 + int ret; 352 + 353 + ret = spi_qup_set_state(qup, QUP_STATE_RUN); 354 + if (ret) { 355 + dev_warn(qup->dev, "cannot set RUN state\n"); 356 + return ret; 357 + } 358 + 359 + ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); 360 + if (ret) { 361 + dev_warn(qup->dev, "cannot set PAUSE state\n"); 362 + return ret; 363 + } 364 + 365 + spi_qup_fifo_write(qup, xfer); 366 + 367 + return 0; 368 + } 369 + 277 370 static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) 278 371 { 279 372 struct spi_qup *controller = dev_id; ··· 424 315 error = -EIO; 425 316 } 426 317 427 - if (opflags & QUP_OP_IN_SERVICE_FLAG) 428 - spi_qup_fifo_read(controller, xfer); 318 + if (!controller->use_dma) { 319 + if (opflags & QUP_OP_IN_SERVICE_FLAG) 320 + spi_qup_fifo_read(controller, xfer); 429 321 430 - if (opflags & QUP_OP_OUT_SERVICE_FLAG) 431 - spi_qup_fifo_write(controller, xfer); 322 + if (opflags & QUP_OP_OUT_SERVICE_FLAG) 323 + spi_qup_fifo_write(controller, xfer); 324 + } 432 325 433 326 spin_lock_irqsave(&controller->lock, flags); 434 327 controller->error = error; ··· 443 332 return IRQ_HANDLED; 444 333 } 445 334 335 + static u32 336 + spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer) 337 + { 338 + struct spi_qup *qup = spi_master_get_devdata(master); 339 + u32 mode; 340 + 341 + qup->w_size = 4; 342 + 343 + if (xfer->bits_per_word <= 8) 344 + qup->w_size = 1; 345 + else if (xfer->bits_per_word <= 16) 346 + qup->w_size = 2; 347 + 348 + qup->n_words = xfer->len / qup->w_size; 349 + 350 + if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32))) 351 + mode = QUP_IO_M_MODE_FIFO; 352 + else 353 + mode = QUP_IO_M_MODE_BLOCK; 354 + 355 + return mode; 356 + } 446 357 447 358 /* set clock freq ... bits per word */ 448 359 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) 449 360 { 450 361 struct spi_qup *controller = spi_master_get_devdata(spi->master); 451 362 u32 config, iomode, mode, control; 452 - int ret, n_words, w_size; 363 + int ret, n_words; 453 364 454 365 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { 455 366 dev_err(controller->dev, "too big size for loopback %d > %d\n", ··· 491 358 return -EIO; 492 359 } 493 360 494 - w_size = 4; 495 - if (xfer->bits_per_word <= 8) 496 - w_size = 1; 497 - else if (xfer->bits_per_word <= 16) 498 - w_size = 2; 361 + mode = spi_qup_get_mode(spi->master, xfer); 362 + n_words = controller->n_words; 499 363 500 - n_words = xfer->len / w_size; 501 - controller->w_size = w_size; 502 - 503 - if (n_words <= (controller->in_fifo_sz / sizeof(u32))) { 504 - mode = QUP_IO_M_MODE_FIFO; 364 + if (mode == QUP_IO_M_MODE_FIFO) { 505 365 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); 506 366 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); 507 367 /* must be zero for FIFO */ 508 368 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); 509 369 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); 510 - } else { 511 - mode = QUP_IO_M_MODE_BLOCK; 370 + } else if (!controller->use_dma) { 512 371 writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); 513 372 writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); 514 373 /* must be zero for BLOCK and BAM */ 515 374 writel_relaxed(0, controller->base + QUP_MX_READ_CNT); 516 375 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); 376 + } else { 377 + mode = QUP_IO_M_MODE_BAM; 378 + writel_relaxed(0, controller->base + QUP_MX_READ_CNT); 379 + writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); 380 + 381 + if (!controller->qup_v1) { 382 + void __iomem *input_cnt; 383 + 384 + input_cnt = controller->base + QUP_MX_INPUT_CNT; 385 + /* 386 + * for DMA transfers, both QUP_MX_INPUT_CNT and 387 + * QUP_MX_OUTPUT_CNT must be zero to all cases but one. 388 + * That case is a non-balanced transfer when there is 389 + * only a rx_buf. 390 + */ 391 + if (xfer->tx_buf) 392 + writel_relaxed(0, input_cnt); 393 + else 394 + writel_relaxed(n_words, input_cnt); 395 + 396 + writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); 397 + } 517 398 } 518 399 519 400 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); 520 401 /* Set input and output transfer mode */ 521 402 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); 522 - iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); 403 + 404 + if (!controller->use_dma) 405 + iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); 406 + else 407 + iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN; 408 + 523 409 iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); 524 410 iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); 525 411 ··· 580 428 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); 581 429 config |= xfer->bits_per_word - 1; 582 430 config |= QUP_CONFIG_SPI_MODE; 431 + 432 + if (controller->use_dma) { 433 + if (!xfer->tx_buf) 434 + config |= QUP_CONFIG_NO_OUTPUT; 435 + if (!xfer->rx_buf) 436 + config |= QUP_CONFIG_NO_INPUT; 437 + } 438 + 583 439 writel_relaxed(config, controller->base + QUP_CONFIG); 584 440 585 441 /* only write to OPERATIONAL_MASK when register is present */ 586 - if (!controller->qup_v1) 587 - writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK); 442 + if (!controller->qup_v1) { 443 + u32 mask = 0; 444 + 445 + /* 446 + * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO 447 + * status change in BAM mode 448 + */ 449 + 450 + if (mode == QUP_IO_M_MODE_BAM) 451 + mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG; 452 + 453 + writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); 454 + } 455 + 588 456 return 0; 589 457 } 590 458 ··· 633 461 controller->tx_bytes = 0; 634 462 spin_unlock_irqrestore(&controller->lock, flags); 635 463 636 - if (spi_qup_set_state(controller, QUP_STATE_RUN)) { 637 - dev_warn(controller->dev, "cannot set RUN state\n"); 638 - goto exit; 639 - } 464 + if (controller->use_dma) 465 + ret = spi_qup_do_dma(master, xfer); 466 + else 467 + ret = spi_qup_do_pio(master, xfer); 640 468 641 - if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) { 642 - dev_warn(controller->dev, "cannot set PAUSE state\n"); 469 + if (ret) 643 470 goto exit; 644 - } 645 - 646 - spi_qup_fifo_write(controller, xfer); 647 471 648 472 if (spi_qup_set_state(controller, QUP_STATE_RUN)) { 649 473 dev_warn(controller->dev, "cannot set EXECUTE state\n"); ··· 648 480 649 481 if (!wait_for_completion_timeout(&controller->done, timeout)) 650 482 ret = -ETIMEDOUT; 483 + 651 484 exit: 652 485 spi_qup_set_state(controller, QUP_STATE_RESET); 653 486 spin_lock_irqsave(&controller->lock, flags); ··· 656 487 if (!ret) 657 488 ret = controller->error; 658 489 spin_unlock_irqrestore(&controller->lock, flags); 490 + 491 + if (ret && controller->use_dma) 492 + spi_qup_dma_terminate(master, xfer); 493 + 494 + return ret; 495 + } 496 + 497 + static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi, 498 + struct spi_transfer *xfer) 499 + { 500 + struct spi_qup *qup = spi_master_get_devdata(master); 501 + size_t dma_align = dma_get_cache_alignment(); 502 + u32 mode; 503 + 504 + qup->use_dma = 0; 505 + 506 + if (xfer->rx_buf && (xfer->len % qup->in_blk_sz || 507 + IS_ERR_OR_NULL(master->dma_rx) || 508 + !IS_ALIGNED((size_t)xfer->rx_buf, dma_align))) 509 + return false; 510 + 511 + if (xfer->tx_buf && (xfer->len % qup->out_blk_sz || 512 + IS_ERR_OR_NULL(master->dma_tx) || 513 + !IS_ALIGNED((size_t)xfer->tx_buf, dma_align))) 514 + return false; 515 + 516 + mode = spi_qup_get_mode(master, xfer); 517 + if (mode == QUP_IO_M_MODE_FIFO) 518 + return false; 519 + 520 + qup->use_dma = 1; 521 + 522 + return true; 523 + } 524 + 525 + static void spi_qup_release_dma(struct spi_master *master) 526 + { 527 + if (!IS_ERR_OR_NULL(master->dma_rx)) 528 + dma_release_channel(master->dma_rx); 529 + if (!IS_ERR_OR_NULL(master->dma_tx)) 530 + dma_release_channel(master->dma_tx); 531 + } 532 + 533 + static int spi_qup_init_dma(struct spi_master *master, resource_size_t base) 534 + { 535 + struct spi_qup *spi = spi_master_get_devdata(master); 536 + struct dma_slave_config *rx_conf = &spi->rx_conf, 537 + *tx_conf = &spi->tx_conf; 538 + struct device *dev = spi->dev; 539 + int ret; 540 + 541 + /* allocate dma resources, if available */ 542 + master->dma_rx = dma_request_slave_channel_reason(dev, "rx"); 543 + if (IS_ERR(master->dma_rx)) 544 + return PTR_ERR(master->dma_rx); 545 + 546 + master->dma_tx = dma_request_slave_channel_reason(dev, "tx"); 547 + if (IS_ERR(master->dma_tx)) { 548 + ret = PTR_ERR(master->dma_tx); 549 + goto err_tx; 550 + } 551 + 552 + /* set DMA parameters */ 553 + rx_conf->direction = DMA_DEV_TO_MEM; 554 + rx_conf->device_fc = 1; 555 + rx_conf->src_addr = base + QUP_INPUT_FIFO; 556 + rx_conf->src_maxburst = spi->in_blk_sz; 557 + 558 + tx_conf->direction = DMA_MEM_TO_DEV; 559 + tx_conf->device_fc = 1; 560 + tx_conf->dst_addr = base + QUP_OUTPUT_FIFO; 561 + tx_conf->dst_maxburst = spi->out_blk_sz; 562 + 563 + ret = dmaengine_slave_config(master->dma_rx, rx_conf); 564 + if (ret) { 565 + dev_err(dev, "failed to configure RX channel\n"); 566 + goto err; 567 + } 568 + 569 + ret = dmaengine_slave_config(master->dma_tx, tx_conf); 570 + if (ret) { 571 + dev_err(dev, "failed to configure TX channel\n"); 572 + goto err; 573 + } 574 + 575 + return 0; 576 + 577 + err: 578 + dma_release_channel(master->dma_tx); 579 + err_tx: 580 + dma_release_channel(master->dma_rx); 659 581 return ret; 660 582 } 661 583 ··· 822 562 master->transfer_one = spi_qup_transfer_one; 823 563 master->dev.of_node = pdev->dev.of_node; 824 564 master->auto_runtime_pm = true; 565 + master->dma_alignment = dma_get_cache_alignment(); 566 + master->max_dma_len = SPI_MAX_DMA_XFER; 825 567 826 568 platform_set_drvdata(pdev, master); 827 569 ··· 834 572 controller->iclk = iclk; 835 573 controller->cclk = cclk; 836 574 controller->irq = irq; 575 + 576 + ret = spi_qup_init_dma(master, res->start); 577 + if (ret == -EPROBE_DEFER) 578 + goto error; 579 + else if (!ret) 580 + master->can_dma = spi_qup_can_dma; 837 581 838 582 /* set v1 flag if device is version 1 */ 839 583 if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) ··· 877 609 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 878 610 if (ret) { 879 611 dev_err(dev, "cannot set RESET state\n"); 880 - goto error; 612 + goto error_dma; 881 613 } 882 614 883 615 writel_relaxed(0, base + QUP_OPERATIONAL); ··· 901 633 ret = devm_request_irq(dev, irq, spi_qup_qup_irq, 902 634 IRQF_TRIGGER_HIGH, pdev->name, controller); 903 635 if (ret) 904 - goto error; 636 + goto error_dma; 905 637 906 638 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); 907 639 pm_runtime_use_autosuspend(dev); ··· 916 648 917 649 disable_pm: 918 650 pm_runtime_disable(&pdev->dev); 651 + error_dma: 652 + spi_qup_release_dma(master); 919 653 error: 920 654 clk_disable_unprepare(cclk); 921 655 clk_disable_unprepare(iclk); ··· 1008 738 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 1009 739 if (ret) 1010 740 return ret; 741 + 742 + spi_qup_release_dma(master); 1011 743 1012 744 clk_disable_unprepare(controller->cclk); 1013 745 clk_disable_unprepare(controller->iclk);