Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge series "spi: stm32: various fixes & cleanup" from Alain Volmat <alain.volmat@foss.st.com>:

This series contains fixes & cleanup mainly regarding fifo
and the way end of transfer triggered, when used with or
without DMA.
An additional patch cleans up the pm_runtime calls and another
one enables the autosuspend.

v2: - split pm_runtime fix patch into two
- correct revert commit subject line

Alain Volmat (6):
spi: stm32: fixes pm_runtime calls in probe/remove
spi: stm32: enable pm_runtime autosuspend
spi: stm32h7: fix full duplex irq handler handling
spi: stm32: Revert "properly handle 0 byte transfer"
spi: stm32h7: don't wait for EOT and flush fifo on disable
spi: stm32: finalize message either on dma callback or EOT

Amelie Delaunay (1):
spi: stm32h7: rework rx fifo read function

drivers/spi/spi-stm32.c | 146 +++++++++++++++++-----------------------
1 file changed, 61 insertions(+), 85 deletions(-)

--
2.25.1

+44 -78
+44 -78
drivers/spi/spi-stm32.c
··· 162 162 #define SPI_3WIRE_TX 3 163 163 #define SPI_3WIRE_RX 4 164 164 165 + #define STM32_SPI_AUTOSUSPEND_DELAY 1 /* 1 ms */ 166 + 165 167 /* 166 168 * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers 167 169 * without fifo buffers. ··· 570 568 /** 571 569 * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register 572 570 * @spi: pointer to the spi controller data structure 573 - * @flush: boolean indicating that FIFO should be flushed 574 571 * 575 572 * Write in rx_buf depends on remaining bytes to avoid to write beyond 576 573 * rx_buf end. 577 574 */ 578 - static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush) 575 + static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi) 579 576 { 580 577 u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR); 581 578 u32 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr); 582 579 583 580 while ((spi->rx_len > 0) && 584 581 ((sr & STM32H7_SPI_SR_RXP) || 585 - (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) { 582 + ((sr & STM32H7_SPI_SR_EOT) && 583 + ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) { 586 584 u32 offs = spi->cur_xferlen - spi->rx_len; 587 585 588 586 if ((spi->rx_len >= sizeof(u32)) || 589 - (flush && (sr & STM32H7_SPI_SR_RXWNE))) { 587 + (sr & STM32H7_SPI_SR_RXWNE)) { 590 588 u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs); 591 589 592 590 *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR); 593 591 spi->rx_len -= sizeof(u32); 594 592 } else if ((spi->rx_len >= sizeof(u16)) || 595 - (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) { 593 + (!(sr & STM32H7_SPI_SR_RXWNE) && 594 + (rxplvl >= 2 || spi->cur_bpw > 8))) { 596 595 u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); 597 596 598 597 *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR); ··· 609 606 rxplvl = FIELD_GET(STM32H7_SPI_SR_RXPLVL, sr); 610 607 } 611 608 612 - dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__, 613 - flush ? "(flush)" : "", spi->rx_len); 609 + dev_dbg(spi->dev, "%s: %d bytes left (sr=%08x)\n", 610 + __func__, spi->rx_len, sr); 614 611 } 615 612 616 613 /** ··· 677 674 * stm32h7_spi_disable - Disable SPI controller 678 675 * @spi: pointer to the spi controller data structure 679 676 * 680 - * RX-Fifo is flushed when SPI controller is disabled. To prevent any data 681 - * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in 682 - * RX-Fifo. 683 - * Normally, if TSIZE has been configured, we should relax the hardware at the 684 - * reception of the EOT interrupt. But in case of error, EOT will not be 685 - * raised. So the subsystem unprepare_message call allows us to properly 686 - * complete the transfer from an hardware point of view. 677 + * RX-Fifo is flushed when SPI controller is disabled. 687 678 */ 688 679 static void stm32h7_spi_disable(struct stm32_spi *spi) 689 680 { 690 681 unsigned long flags; 691 - u32 cr1, sr; 682 + u32 cr1; 692 683 693 684 dev_dbg(spi->dev, "disable controller\n"); 694 685 ··· 694 697 spin_unlock_irqrestore(&spi->lock, flags); 695 698 return; 696 699 } 697 - 698 - /* Wait on EOT or suspend the flow */ 699 - if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR, 700 - sr, !(sr & STM32H7_SPI_SR_EOT), 701 - 10, 100000) < 0) { 702 - if (cr1 & STM32H7_SPI_CR1_CSTART) { 703 - writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP, 704 - spi->base + STM32H7_SPI_CR1); 705 - if (readl_relaxed_poll_timeout_atomic( 706 - spi->base + STM32H7_SPI_SR, 707 - sr, !(sr & STM32H7_SPI_SR_SUSP), 708 - 10, 100000) < 0) 709 - dev_warn(spi->dev, 710 - "Suspend request timeout\n"); 711 - } 712 - } 713 - 714 - if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0)) 715 - stm32h7_spi_read_rxfifo(spi, true); 716 700 717 701 if (spi->cur_usedma && spi->dma_tx) 718 702 dmaengine_terminate_all(spi->dma_tx); ··· 889 911 if (__ratelimit(&rs)) 890 912 dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); 891 913 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 892 - stm32h7_spi_read_rxfifo(spi, false); 914 + stm32h7_spi_read_rxfifo(spi); 893 915 /* 894 916 * If communication is suspended while using DMA, it means 895 917 * that something went wrong, so stop the current transfer ··· 910 932 911 933 if (sr & STM32H7_SPI_SR_EOT) { 912 934 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 913 - stm32h7_spi_read_rxfifo(spi, true); 914 - end = true; 935 + stm32h7_spi_read_rxfifo(spi); 936 + if (!spi->cur_usedma || 937 + (spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX || 938 + spi->cur_comm == SPI_3WIRE_TX))) 939 + end = true; 915 940 } 916 941 917 942 if (sr & STM32H7_SPI_SR_TXP) ··· 923 942 924 943 if (sr & STM32H7_SPI_SR_RXP) 925 944 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 926 - stm32h7_spi_read_rxfifo(spi, false); 945 + stm32h7_spi_read_rxfifo(spi); 927 946 928 947 writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR); 929 948 ··· 1022 1041 } 1023 1042 1024 1043 /** 1025 - * stm32f4_spi_dma_rx_cb - dma callback 1044 + * stm32_spi_dma_rx_cb - dma callback 1026 1045 * @data: pointer to the spi controller data structure 1027 1046 * 1028 1047 * DMA callback is called when the transfer is complete for DMA RX channel. 1029 1048 */ 1030 - static void stm32f4_spi_dma_rx_cb(void *data) 1049 + static void stm32_spi_dma_rx_cb(void *data) 1031 1050 { 1032 1051 struct stm32_spi *spi = data; 1033 1052 1034 1053 spi_finalize_current_transfer(spi->master); 1035 - stm32f4_spi_disable(spi); 1036 - } 1037 - 1038 - /** 1039 - * stm32h7_spi_dma_cb - dma callback 1040 - * @data: pointer to the spi controller data structure 1041 - * 1042 - * DMA callback is called when the transfer is complete or when an error 1043 - * occurs. If the transfer is complete, EOT flag is raised. 1044 - */ 1045 - static void stm32h7_spi_dma_cb(void *data) 1046 - { 1047 - struct stm32_spi *spi = data; 1048 - unsigned long flags; 1049 - u32 sr; 1050 - 1051 - spin_lock_irqsave(&spi->lock, flags); 1052 - 1053 - sr = readl_relaxed(spi->base + STM32H7_SPI_SR); 1054 - 1055 - spin_unlock_irqrestore(&spi->lock, flags); 1056 - 1057 - if (!(sr & STM32H7_SPI_SR_EOT)) 1058 - dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr); 1059 - 1060 - /* Now wait for EOT, or SUSP or OVR in case of error */ 1054 + spi->cfg->disable(spi); 1061 1055 } 1062 1056 1063 1057 /** ··· 1198 1242 */ 1199 1243 static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) 1200 1244 { 1201 - /* Enable the interrupts relative to the end of transfer */ 1202 - stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE | 1203 - STM32H7_SPI_IER_TXTFIE | 1204 - STM32H7_SPI_IER_OVRIE | 1205 - STM32H7_SPI_IER_MODFIE); 1245 + uint32_t ier = STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE; 1246 + 1247 + /* Enable the interrupts */ 1248 + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) 1249 + ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE; 1250 + 1251 + stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier); 1206 1252 1207 1253 stm32_spi_enable(spi); 1208 1254 ··· 1603 1645 struct stm32_spi *spi = spi_master_get_devdata(master); 1604 1646 int ret; 1605 1647 1606 - /* Don't do anything on 0 bytes transfers */ 1607 - if (transfer->len == 0) 1608 - return 0; 1609 - 1610 1648 spi->tx_buf = transfer->tx_buf; 1611 1649 spi->rx_buf = transfer->rx_buf; 1612 1650 spi->tx_len = spi->tx_buf ? transfer->len : 0; ··· 1716 1762 .set_mode = stm32f4_spi_set_mode, 1717 1763 .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start, 1718 1764 .dma_tx_cb = stm32f4_spi_dma_tx_cb, 1719 - .dma_rx_cb = stm32f4_spi_dma_rx_cb, 1765 + .dma_rx_cb = stm32_spi_dma_rx_cb, 1720 1766 .transfer_one_irq = stm32f4_spi_transfer_one_irq, 1721 1767 .irq_handler_event = stm32f4_spi_irq_event, 1722 1768 .irq_handler_thread = stm32f4_spi_irq_thread, ··· 1736 1782 .set_data_idleness = stm32h7_spi_data_idleness, 1737 1783 .set_number_of_data = stm32h7_spi_number_of_data, 1738 1784 .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start, 1739 - .dma_rx_cb = stm32h7_spi_dma_cb, 1740 - .dma_tx_cb = stm32h7_spi_dma_cb, 1785 + .dma_rx_cb = stm32_spi_dma_rx_cb, 1786 + /* 1787 + * dma_tx_cb is not necessary since in case of TX, dma is followed by 1788 + * SPI access hence handling is performed within the SPI interrupt 1789 + */ 1741 1790 .transfer_one_irq = stm32h7_spi_transfer_one_irq, 1742 1791 .irq_handler_thread = stm32h7_spi_irq_thread, 1743 1792 .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, ··· 1884 1927 if (spi->dma_tx || spi->dma_rx) 1885 1928 master->can_dma = stm32_spi_can_dma; 1886 1929 1930 + pm_runtime_set_autosuspend_delay(&pdev->dev, 1931 + STM32_SPI_AUTOSUSPEND_DELAY); 1932 + pm_runtime_use_autosuspend(&pdev->dev); 1887 1933 pm_runtime_set_active(&pdev->dev); 1888 1934 pm_runtime_get_noresume(&pdev->dev); 1889 1935 pm_runtime_enable(&pdev->dev); ··· 1898 1938 goto err_pm_disable; 1899 1939 } 1900 1940 1941 + pm_runtime_mark_last_busy(&pdev->dev); 1942 + pm_runtime_put_autosuspend(&pdev->dev); 1943 + 1901 1944 dev_info(&pdev->dev, "driver initialized\n"); 1902 1945 1903 1946 return 0; ··· 1909 1946 pm_runtime_disable(&pdev->dev); 1910 1947 pm_runtime_put_noidle(&pdev->dev); 1911 1948 pm_runtime_set_suspended(&pdev->dev); 1949 + pm_runtime_dont_use_autosuspend(&pdev->dev); 1912 1950 err_dma_release: 1913 1951 if (spi->dma_tx) 1914 1952 dma_release_channel(spi->dma_tx); ··· 1934 1970 pm_runtime_disable(&pdev->dev); 1935 1971 pm_runtime_put_noidle(&pdev->dev); 1936 1972 pm_runtime_set_suspended(&pdev->dev); 1973 + pm_runtime_dont_use_autosuspend(&pdev->dev); 1974 + 1937 1975 if (master->dma_tx) 1938 1976 dma_release_channel(master->dma_tx); 1939 1977 if (master->dma_rx)