Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

spi: tegra210-quad: Add support for internal DMA

Add support for internal DMA in Tegra234 devices. Tegra234 has an
internal DMA controller, while Tegra241 continues to use an external
DMA controller (GPCDMA). This patch adds support for both internal
and external DMA controllers.

Signed-off-by: Vishwaroop A <va@nvidia.com>
Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
Link: https://patch.msgid.link/20250513200043.608292-2-va@nvidia.com
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

Vishwaroop A and committed by
Mark Brown
017f1b0b 65cb56d4

+131 -94
+131 -94
drivers/spi/spi-tegra210-quad.c
··· 111 111 #define QSPI_DMA_BLK 0x024 112 112 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0) 113 113 114 + #define QSPI_DMA_MEM_ADDRESS 0x028 115 + #define QSPI_DMA_HI_ADDRESS 0x02c 116 + 114 117 #define QSPI_TX_FIFO 0x108 115 118 #define QSPI_RX_FIFO 0x188 116 119 ··· 170 167 }; 171 168 172 169 struct tegra_qspi_soc_data { 173 - bool has_dma; 174 170 bool cmb_xfer_capable; 175 171 bool supports_tpm; 172 + bool has_ext_dma; 176 173 unsigned int cs_count; 177 174 }; 178 175 ··· 608 605 609 606 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; 610 607 611 - dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 612 - dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); 608 + if (t->tx_buf) 609 + dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); 610 + if (t->rx_buf) 611 + dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); 613 612 } 614 613 615 614 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t) 616 615 { 617 616 struct dma_slave_config dma_sconfig = { 0 }; 617 + dma_addr_t rx_dma_phys, tx_dma_phys; 618 618 unsigned int len; 619 619 u8 dma_burst; 620 620 int ret = 0; ··· 640 634 len = tqspi->curr_dma_words * 4; 641 635 642 636 /* set attention level based on length of transfer */ 643 - val = 0; 644 - if (len & 0xf) { 645 - val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; 646 - dma_burst = 1; 647 - } else if (((len) >> 4) & 0x1) { 648 - val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; 649 - dma_burst = 4; 650 - } else { 651 - val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; 652 - dma_burst = 8; 637 + if (tqspi->soc_data->has_ext_dma) { 638 + val = 0; 639 + if (len & 0xf) { 640 + val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; 641 + dma_burst = 1; 642 + } else if (((len) >> 4) & 0x1) { 643 + val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; 644 + dma_burst = 4; 645 + } else { 646 + val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; 647 + dma_burst = 8; 648 + } 649 + 650 + tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 653 651 } 654 652 655 - tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); 656 653 tqspi->dma_control_reg = val; 657 654 658 655 dma_sconfig.device_fc = true; 659 - if (tqspi->cur_direction & DATA_DIR_TX) { 660 - dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; 661 - dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 662 - dma_sconfig.dst_maxburst = dma_burst; 663 - ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); 664 - if (ret < 0) { 665 - dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 666 - return ret; 667 - } 668 656 669 - tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 670 - ret = tegra_qspi_start_tx_dma(tqspi, t, len); 671 - if (ret < 0) { 672 - dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); 673 - return ret; 657 + if (tqspi->cur_direction & DATA_DIR_TX) { 658 + if (tqspi->tx_dma_chan) { 659 + dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; 660 + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 661 + dma_sconfig.dst_maxburst = dma_burst; 662 + ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); 663 + if (ret < 0) { 664 + dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 665 + return ret; 666 + } 667 + 668 + tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 669 + ret = tegra_qspi_start_tx_dma(tqspi, t, len); 670 + if (ret < 0) { 671 + dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); 672 + return ret; 673 + } 674 + } else { 675 + if (tqspi->is_packed) 676 + tx_dma_phys = t->tx_dma; 677 + else 678 + tx_dma_phys = tqspi->tx_dma_phys; 679 + tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); 680 + tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys), 681 + QSPI_DMA_MEM_ADDRESS); 682 + tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff), 683 + QSPI_DMA_HI_ADDRESS); 674 684 } 675 685 } 676 686 677 687 if (tqspi->cur_direction & DATA_DIR_RX) { 678 - dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; 679 - dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 680 - dma_sconfig.src_maxburst = dma_burst; 681 - ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); 682 - if (ret < 0) { 683 - dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 684 - return ret; 685 - } 688 + if (tqspi->rx_dma_chan) { 689 + dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; 690 + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 691 + dma_sconfig.src_maxburst = dma_burst; 692 + ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); 693 + if (ret < 0) { 694 + dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); 695 + return ret; 696 + } 686 697 687 - dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, 688 - tqspi->dma_buf_size, 689 - DMA_FROM_DEVICE); 698 + dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, 699 + tqspi->dma_buf_size, DMA_FROM_DEVICE); 700 + ret = tegra_qspi_start_rx_dma(tqspi, t, len); 701 + if (ret < 0) { 702 + dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); 703 + if (tqspi->cur_direction & DATA_DIR_TX) 704 + dmaengine_terminate_all(tqspi->tx_dma_chan); 705 + return ret; 706 + } 707 + } else { 708 + if (tqspi->is_packed) 709 + rx_dma_phys = t->rx_dma; 710 + else 711 + rx_dma_phys = tqspi->rx_dma_phys; 690 712 691 - ret = tegra_qspi_start_rx_dma(tqspi, t, len); 692 - if (ret < 0) { 693 - dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); 694 - if (tqspi->cur_direction & DATA_DIR_TX) 695 - dmaengine_terminate_all(tqspi->tx_dma_chan); 696 - return ret; 713 + tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys), 714 + QSPI_DMA_MEM_ADDRESS); 715 + tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff), 716 + QSPI_DMA_HI_ADDRESS); 697 717 } 698 718 } 699 719 ··· 758 726 759 727 static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) 760 728 { 761 - if (!tqspi->soc_data->has_dma) 762 - return; 763 - 764 729 if (tqspi->tx_dma_buf) { 765 730 dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, 766 731 tqspi->tx_dma_buf, tqspi->tx_dma_phys); ··· 788 759 u32 *dma_buf; 789 760 int err; 790 761 791 - if (!tqspi->soc_data->has_dma) 792 - return 0; 762 + if (tqspi->soc_data->has_ext_dma) { 763 + dma_chan = dma_request_chan(tqspi->dev, "rx"); 764 + if (IS_ERR(dma_chan)) { 765 + err = PTR_ERR(dma_chan); 766 + goto err_out; 767 + } 793 768 794 - dma_chan = dma_request_chan(tqspi->dev, "rx"); 795 - if (IS_ERR(dma_chan)) { 796 - err = PTR_ERR(dma_chan); 797 - goto err_out; 769 + tqspi->rx_dma_chan = dma_chan; 770 + 771 + dma_chan = dma_request_chan(tqspi->dev, "tx"); 772 + if (IS_ERR(dma_chan)) { 773 + err = PTR_ERR(dma_chan); 774 + goto err_out; 775 + } 776 + 777 + tqspi->tx_dma_chan = dma_chan; 778 + } else { 779 + if (!device_iommu_mapped(tqspi->dev)) { 780 + dev_warn(tqspi->dev, 781 + "IOMMU not enabled in device-tree, falling back to PIO mode\n"); 782 + return 0; 783 + } 798 784 } 799 - 800 - tqspi->rx_dma_chan = dma_chan; 801 785 802 786 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 803 787 if (!dma_buf) { ··· 820 778 821 779 tqspi->rx_dma_buf = dma_buf; 822 780 tqspi->rx_dma_phys = dma_phys; 823 - 824 - dma_chan = dma_request_chan(tqspi->dev, "tx"); 825 - if (IS_ERR(dma_chan)) { 826 - err = PTR_ERR(dma_chan); 827 - goto err_out; 828 - } 829 - 830 - tqspi->tx_dma_chan = dma_chan; 831 781 832 782 dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); 833 783 if (!dma_buf) { ··· 1162 1128 if (WARN_ON_ONCE(ret == 0)) { 1163 1129 dev_err_ratelimited(tqspi->dev, 1164 1130 "QSPI Transfer failed with timeout\n"); 1165 - if (tqspi->is_curr_dma_xfer && 1166 - (tqspi->cur_direction & DATA_DIR_TX)) 1167 - dmaengine_terminate_all 1168 - (tqspi->tx_dma_chan); 1169 - 1170 - if (tqspi->is_curr_dma_xfer && 1171 - (tqspi->cur_direction & DATA_DIR_RX)) 1172 - dmaengine_terminate_all 1173 - (tqspi->rx_dma_chan); 1131 + if (tqspi->is_curr_dma_xfer) { 1132 + if ((tqspi->cur_direction & DATA_DIR_TX) && 1133 + tqspi->tx_dma_chan) 1134 + dmaengine_terminate_all(tqspi->tx_dma_chan); 1135 + if ((tqspi->cur_direction & DATA_DIR_RX) && 1136 + tqspi->rx_dma_chan) 1137 + dmaengine_terminate_all(tqspi->rx_dma_chan); 1138 + } 1174 1139 1175 1140 /* Abort transfer by resetting pio/dma bit */ 1176 1141 if (!tqspi->is_curr_dma_xfer) { ··· 1284 1251 QSPI_DMA_TIMEOUT); 1285 1252 if (WARN_ON(ret == 0)) { 1286 1253 dev_err(tqspi->dev, "transfer timeout\n"); 1287 - if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) 1288 - dmaengine_terminate_all(tqspi->tx_dma_chan); 1289 - if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX)) 1290 - dmaengine_terminate_all(tqspi->rx_dma_chan); 1254 + if (tqspi->is_curr_dma_xfer) { 1255 + if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan) 1256 + dmaengine_terminate_all(tqspi->tx_dma_chan); 1257 + if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan) 1258 + dmaengine_terminate_all(tqspi->rx_dma_chan); 1259 + } 1291 1260 tegra_qspi_handle_error(tqspi); 1292 1261 ret = -EIO; 1293 1262 goto complete_xfer; ··· 1358 1323 return false; 1359 1324 xfer = list_next_entry(xfer, transfer_list); 1360 1325 } 1361 - if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) 1326 + if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) 1362 1327 return false; 1363 1328 1364 1329 return true; ··· 1419 1384 unsigned int total_fifo_words; 1420 1385 unsigned long flags; 1421 1386 long wait_status; 1422 - int err = 0; 1387 + int num_errors = 0; 1423 1388 1424 1389 if (tqspi->cur_direction & DATA_DIR_TX) { 1425 1390 if (tqspi->tx_status) { 1426 - dmaengine_terminate_all(tqspi->tx_dma_chan); 1427 - err += 1; 1428 - } else { 1391 + if (tqspi->tx_dma_chan) 1392 + dmaengine_terminate_all(tqspi->tx_dma_chan); 1393 + num_errors++; 1394 + } else if (tqspi->tx_dma_chan) { 1429 1395 wait_status = wait_for_completion_interruptible_timeout( 1430 1396 &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); 1431 1397 if (wait_status <= 0) { 1432 1398 dmaengine_terminate_all(tqspi->tx_dma_chan); 1433 1399 dev_err(tqspi->dev, "failed TX DMA transfer\n"); 1434 - err += 1; 1400 + num_errors++; 1435 1401 } 1436 1402 } 1437 1403 } 1438 1404 1439 1405 if (tqspi->cur_direction & DATA_DIR_RX) { 1440 1406 if (tqspi->rx_status) { 1441 - dmaengine_terminate_all(tqspi->rx_dma_chan); 1442 - err += 2; 1443 - } else { 1407 + if (tqspi->rx_dma_chan) 1408 + dmaengine_terminate_all(tqspi->rx_dma_chan); 1409 + num_errors++; 1410 + } else if (tqspi->rx_dma_chan) { 1444 1411 wait_status = wait_for_completion_interruptible_timeout( 1445 1412 &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); 1446 1413 if (wait_status <= 0) { 1447 1414 dmaengine_terminate_all(tqspi->rx_dma_chan); 1448 1415 dev_err(tqspi->dev, "failed RX DMA transfer\n"); 1449 - err += 2; 1416 + num_errors++; 1450 1417 } 1451 1418 } 1452 1419 } 1453 1420 1454 1421 spin_lock_irqsave(&tqspi->lock, flags); 1455 1422 1456 - if (err) { 1423 + if (num_errors) { 1457 1424 tegra_qspi_dma_unmap_xfer(tqspi, t); 1458 1425 tegra_qspi_handle_error(tqspi); 1459 1426 complete(&tqspi->xfer_completion); ··· 1481 1444 /* continue transfer in current message */ 1482 1445 total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); 1483 1446 if (total_fifo_words > QSPI_FIFO_DEPTH) 1484 - err = tegra_qspi_start_dma_based_transfer(tqspi, t); 1447 + num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t); 1485 1448 else 1486 - err = tegra_qspi_start_cpu_based_transfer(tqspi, t); 1449 + num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t); 1487 1450 1488 1451 exit: 1489 1452 spin_unlock_irqrestore(&tqspi->lock, flags); ··· 1511 1474 } 1512 1475 1513 1476 static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { 1514 - .has_dma = true, 1477 + .has_ext_dma = true, 1515 1478 .cmb_xfer_capable = false, 1516 1479 .supports_tpm = false, 1517 1480 .cs_count = 1, 1518 1481 }; 1519 1482 1520 1483 static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { 1521 - .has_dma = true, 1484 + .has_ext_dma = true, 1522 1485 .cmb_xfer_capable = true, 1523 1486 .supports_tpm = false, 1524 1487 .cs_count = 1, 1525 1488 }; 1526 1489 1527 1490 static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { 1528 - .has_dma = false, 1491 + .has_ext_dma = false, 1529 1492 .cmb_xfer_capable = true, 1530 1493 .supports_tpm = true, 1531 1494 .cs_count = 1, 1532 1495 }; 1533 1496 1534 1497 static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { 1535 - .has_dma = false, 1498 + .has_ext_dma = true, 1536 1499 .cmb_xfer_capable = true, 1537 1500 .supports_tpm = true, 1538 1501 .cs_count = 4,