Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: uniphier: Add DMA transfer mode support

This adds DMA transfer mode support for UniPhier SPI controller.

Since this controller requires simulteaneous transmission and reception,
this indicates SPI_CONTROLLER_MUST_RX and SPI_CONTROLLER_MUST_TX.

Because the supported dma controller has alignment restiction,
there is also a restriction that 'maxburst' parameters in dma_slave_config
corresponds to one word width.

Signed-off-by: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
Link: https://lore.kernel.org/r/1577149107-30670-6-git-send-email-hayashi.kunihiko@socionext.com
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

Kunihiko Hayashi and committed by
Mark Brown
28d1dddc 790514ed

+198 -2
+198 -2
drivers/spi/spi-uniphier.c
··· 8 8 #include <linux/bitops.h> 9 9 #include <linux/clk.h> 10 10 #include <linux/delay.h> 11 + #include <linux/dmaengine.h> 11 12 #include <linux/interrupt.h> 12 13 #include <linux/io.h> 13 14 #include <linux/module.h> ··· 24 23 25 24 struct uniphier_spi_priv { 26 25 void __iomem *base; 26 + dma_addr_t base_dma_addr; 27 27 struct clk *clk; 28 28 struct spi_master *master; 29 29 struct completion xfer_done; ··· 34 32 unsigned int rx_bytes; 35 33 const u8 *tx_buf; 36 34 u8 *rx_buf; 35 + atomic_t dma_busy; 37 36 38 37 bool is_save_param; 39 38 u8 bits_per_word; ··· 64 61 #define SSI_FPS_FSTRT BIT(14) 65 62 66 63 #define SSI_SR 0x14 64 + #define SSI_SR_BUSY BIT(7) 67 65 #define SSI_SR_RNE BIT(0) 68 66 69 67 #define SSI_IE 0x18 68 + #define SSI_IE_TCIE BIT(4) 70 69 #define SSI_IE_RCIE BIT(3) 70 + #define SSI_IE_TXRE BIT(2) 71 + #define SSI_IE_RXRE BIT(1) 71 72 #define SSI_IE_RORIE BIT(0) 73 + #define SSI_IE_ALL_MASK GENMASK(4, 0) 72 74 73 75 #define SSI_IS 0x1c 74 76 #define SSI_IS_RXRS BIT(9) ··· 95 87 #define SSI_RXDR 0x24 96 88 97 89 #define SSI_FIFO_DEPTH 8U 90 + #define SSI_FIFO_BURST_NUM 1 91 + 92 + #define SSI_DMA_RX_BUSY BIT(1) 93 + #define SSI_DMA_TX_BUSY BIT(0) 98 94 99 95 static inline unsigned int bytes_per_word(unsigned int bits) 100 96 { ··· 346 334 writel(val, priv->base + SSI_FPS); 347 335 } 348 336 337 + static bool uniphier_spi_can_dma(struct spi_master *master, 338 + struct spi_device *spi, 339 + struct spi_transfer *t) 340 + { 341 + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); 342 + unsigned int bpw = bytes_per_word(priv->bits_per_word); 343 + 344 + if ((!master->dma_tx && !master->dma_rx) 345 + || (!master->dma_tx && t->tx_buf) 346 + || (!master->dma_rx && t->rx_buf)) 347 + return false; 348 + 349 + return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH; 350 + } 351 + 352 + static void uniphier_spi_dma_rxcb(void *data) 353 + { 354 + struct spi_master *master = data; 355 + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); 356 + int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); 357 + 358 + uniphier_spi_irq_disable(priv, SSI_IE_RXRE); 359 + 360 + if (!(state & SSI_DMA_TX_BUSY)) 361 + spi_finalize_current_transfer(master); 362 + } 363 + 364 + static void uniphier_spi_dma_txcb(void *data) 365 + { 366 + struct spi_master *master = data; 367 + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); 368 + int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); 369 + 370 + uniphier_spi_irq_disable(priv, SSI_IE_TXRE); 371 + 372 + if (!(state & SSI_DMA_RX_BUSY)) 373 + spi_finalize_current_transfer(master); 374 + } 375 + 376 + static int uniphier_spi_transfer_one_dma(struct spi_master *master, 377 + struct spi_device *spi, 378 + struct spi_transfer *t) 379 + { 380 + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); 381 + struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; 382 + int buswidth; 383 + 384 + atomic_set(&priv->dma_busy, 0); 385 + 386 + uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM); 387 + 388 + if (priv->bits_per_word <= 8) 389 + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; 390 + else if (priv->bits_per_word <= 16) 391 + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; 392 + else 393 + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 394 + 395 + if (priv->rx_buf) { 396 + struct dma_slave_config rxconf = { 397 + .direction = DMA_DEV_TO_MEM, 398 + .src_addr = priv->base_dma_addr + SSI_RXDR, 399 + .src_addr_width = buswidth, 400 + .src_maxburst = SSI_FIFO_BURST_NUM, 401 + }; 402 + 403 + dmaengine_slave_config(master->dma_rx, &rxconf); 404 + 405 + rxdesc = dmaengine_prep_slave_sg( 406 + master->dma_rx, 407 + t->rx_sg.sgl, t->rx_sg.nents, 408 + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 409 + if (!rxdesc) 410 + goto out_err_prep; 411 + 412 + rxdesc->callback = uniphier_spi_dma_rxcb; 413 + rxdesc->callback_param = master; 414 + 415 + uniphier_spi_irq_enable(priv, SSI_IE_RXRE); 416 + atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy); 417 + 418 + dmaengine_submit(rxdesc); 419 + dma_async_issue_pending(master->dma_rx); 420 + } 421 + 422 + if (priv->tx_buf) { 423 + struct dma_slave_config txconf = { 424 + .direction = DMA_MEM_TO_DEV, 425 + .dst_addr = priv->base_dma_addr + SSI_TXDR, 426 + .dst_addr_width = buswidth, 427 + .dst_maxburst = SSI_FIFO_BURST_NUM, 428 + }; 429 + 430 + dmaengine_slave_config(master->dma_tx, &txconf); 431 + 432 + txdesc = dmaengine_prep_slave_sg( 433 + master->dma_tx, 434 + t->tx_sg.sgl, t->tx_sg.nents, 435 + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 436 + if (!txdesc) 437 + goto out_err_prep; 438 + 439 + txdesc->callback = uniphier_spi_dma_txcb; 440 + txdesc->callback_param = master; 441 + 442 + uniphier_spi_irq_enable(priv, SSI_IE_TXRE); 443 + atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy); 444 + 445 + dmaengine_submit(txdesc); 446 + dma_async_issue_pending(master->dma_tx); 447 + } 448 + 449 + /* signal that we need to wait for completion */ 450 + return (priv->tx_buf || priv->rx_buf); 451 + 452 + out_err_prep: 453 + if (rxdesc) 454 + dmaengine_terminate_sync(master->dma_rx); 455 + 456 + return -EINVAL; 457 + } 458 + 349 459 static int uniphier_spi_transfer_one_irq(struct spi_master *master, 350 460 struct spi_device *spi, 351 461 struct spi_transfer *t) ··· 529 395 { 530 396 struct uniphier_spi_priv *priv = spi_master_get_devdata(master); 531 397 unsigned long threshold; 398 + bool use_dma; 532 399 533 400 /* Terminate and return success for 0 byte length transfer */ 534 401 if (!t->len) 535 402 return 0; 536 403 537 404 uniphier_spi_setup_transfer(spi, t); 405 + 406 + use_dma = master->can_dma ? master->can_dma(master, spi, t) : false; 407 + if (use_dma) 408 + return uniphier_spi_transfer_one_dma(master, spi, t); 538 409 539 410 /* 540 411 * If the transfer operation will take longer than ··· 584 445 val = SSI_FC_TXFFL | SSI_FC_RXFFL; 585 446 writel(val, priv->base + SSI_FC); 586 447 587 - uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE); 448 + uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK); 449 + 450 + if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) { 451 + dmaengine_terminate_async(master->dma_tx); 452 + atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); 453 + } 454 + 455 + if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) { 456 + dmaengine_terminate_async(master->dma_rx); 457 + atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); 458 + } 588 459 } 589 460 590 461 static irqreturn_t uniphier_spi_handler(int irq, void *dev_id) ··· 642 493 { 643 494 struct uniphier_spi_priv *priv; 644 495 struct spi_master *master; 496 + struct resource *res; 497 + struct dma_slave_caps caps; 498 + u32 dma_tx_burst = 0, dma_rx_burst = 0; 645 499 unsigned long clk_rate; 646 500 int irq; 647 501 int ret; ··· 659 507 priv->master = master; 660 508 priv->is_save_param = false; 661 509 662 - priv->base = devm_platform_ioremap_resource(pdev, 0); 510 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 511 + priv->base = devm_ioremap_resource(&pdev->dev, res); 663 512 if (IS_ERR(priv->base)) { 664 513 ret = PTR_ERR(priv->base); 665 514 goto out_master_put; 666 515 } 516 + priv->base_dma_addr = res->start; 667 517 668 518 priv->clk = devm_clk_get(&pdev->dev, NULL); 669 519 if (IS_ERR(priv->clk)) { ··· 709 555 master->unprepare_transfer_hardware 710 556 = uniphier_spi_unprepare_transfer_hardware; 711 557 master->handle_err = uniphier_spi_handle_err; 558 + master->can_dma = uniphier_spi_can_dma; 559 + 712 560 master->num_chipselect = 1; 561 + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; 562 + 563 + master->dma_tx = dma_request_chan(&pdev->dev, "tx"); 564 + if (IS_ERR_OR_NULL(master->dma_tx)) { 565 + if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) 566 + goto out_disable_clk; 567 + master->dma_tx = NULL; 568 + dma_tx_burst = INT_MAX; 569 + } else { 570 + ret = dma_get_slave_caps(master->dma_tx, &caps); 571 + if (ret) { 572 + dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", 573 + ret); 574 + goto out_disable_clk; 575 + } 576 + dma_tx_burst = caps.max_burst; 577 + } 578 + 579 + master->dma_rx = dma_request_chan(&pdev->dev, "rx"); 580 + if (IS_ERR_OR_NULL(master->dma_rx)) { 581 + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) 582 + goto out_disable_clk; 583 + master->dma_rx = NULL; 584 + dma_rx_burst = INT_MAX; 585 + } else { 586 + ret = dma_get_slave_caps(master->dma_rx, &caps); 587 + if (ret) { 588 + dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", 589 + ret); 590 + goto out_disable_clk; 591 + } 592 + dma_rx_burst = caps.max_burst; 593 + } 594 + 595 + master->max_dma_len = min(dma_tx_burst, dma_rx_burst); 713 596 714 597 ret = devm_spi_register_master(&pdev->dev, master); 715 598 if (ret) ··· 765 574 static int uniphier_spi_remove(struct platform_device *pdev) 766 575 { 767 576 struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); 577 + 578 + if (priv->master->dma_tx) 579 + dma_release_channel(priv->master->dma_tx); 580 + if (priv->master->dma_rx) 581 + dma_release_channel(priv->master->dma_rx); 768 582 769 583 clk_disable_unprepare(priv->clk); 770 584