spi: tegra210-quad: Protect curr_xfer check in IRQ handler

Now that all other accesses to curr_xfer are done under the lock,
protect the curr_xfer NULL check in tegra_qspi_isr_thread() with the
spinlock. Without this protection, the following race can occur:

CPU0 (ISR thread) CPU1 (timeout path)
---------------- -------------------
if (!tqspi->curr_xfer)
// sees non-NULL
spin_lock()
tqspi->curr_xfer = NULL
spin_unlock()
handle_*_xfer()
spin_lock()
t = tqspi->curr_xfer // NULL!
... t->len ... // NULL dereference!

With this patch, all curr_xfer accesses are now properly synchronized.

Although all accesses to curr_xfer are done under the lock, in
tegra_qspi_isr_thread() it checks for NULL, releases the lock and
reacquires it later in handle_cpu_based_xfer()/handle_dma_based_xfer().
There is a potential for an update in between, which could cause a NULL
pointer dereference.

To handle this, add a NULL check inside the handlers after acquiring
the lock. This ensures that if the timeout path has already cleared
curr_xfer, the handler will safely return without dereferencing the
NULL pointer.

Fixes: b4e002d8a7ce ("spi: tegra210-quad: Fix timeout handling")
Signed-off-by: Breno Leitao <leitao@debian.org>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://patch.msgid.link/20260126-tegra_xfer-v2-6-6d2115e4f387@debian.org
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by Breno Leitao and committed by Mark Brown edf9088b 6d7723e8

+20
+20
drivers/spi/spi-tegra210-quad.c
··· 1457 spin_lock_irqsave(&tqspi->lock, flags); 1458 t = tqspi->curr_xfer; 1459 1460 if (tqspi->tx_status || tqspi->rx_status) { 1461 tegra_qspi_handle_error(tqspi); 1462 complete(&tqspi->xfer_completion); ··· 1532 spin_lock_irqsave(&tqspi->lock, flags); 1533 t = tqspi->curr_xfer; 1534 1535 if (num_errors) { 1536 tegra_qspi_dma_unmap_xfer(tqspi, t); 1537 tegra_qspi_handle_error(tqspi); ··· 1575 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) 1576 { 1577 struct tegra_qspi *tqspi = context_data; 1578 u32 status; 1579 1580 /* ··· 1593 * If no transfer is in progress, check if this was a real interrupt 1594 * that the timeout handler already processed, or a spurious one. 1595 */ 1596 if (!tqspi->curr_xfer) { 1597 /* Spurious interrupt - transfer not ready */ 1598 if (!(status & QSPI_RDY)) 1599 return IRQ_NONE; ··· 1612 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); 1613 1614 tegra_qspi_mask_clear_irq(tqspi); 1615 1616 if (!tqspi->is_curr_dma_xfer) 1617 return handle_cpu_based_xfer(tqspi); 1618
··· 1457 spin_lock_irqsave(&tqspi->lock, flags); 1458 t = tqspi->curr_xfer; 1459 1460 + if (!t) { 1461 + spin_unlock_irqrestore(&tqspi->lock, flags); 1462 + return IRQ_HANDLED; 1463 + } 1464 + 1465 if (tqspi->tx_status || tqspi->rx_status) { 1466 tegra_qspi_handle_error(tqspi); 1467 complete(&tqspi->xfer_completion); ··· 1527 spin_lock_irqsave(&tqspi->lock, flags); 1528 t = tqspi->curr_xfer; 1529 1530 + if (!t) { 1531 + spin_unlock_irqrestore(&tqspi->lock, flags); 1532 + return IRQ_HANDLED; 1533 + } 1534 + 1535 if (num_errors) { 1536 tegra_qspi_dma_unmap_xfer(tqspi, t); 1537 tegra_qspi_handle_error(tqspi); ··· 1565 static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) 1566 { 1567 struct tegra_qspi *tqspi = context_data; 1568 + unsigned long flags; 1569 u32 status; 1570 1571 /* ··· 1582 * If no transfer is in progress, check if this was a real interrupt 1583 * that the timeout handler already processed, or a spurious one. 1584 */ 1585 + spin_lock_irqsave(&tqspi->lock, flags); 1586 if (!tqspi->curr_xfer) { 1587 + spin_unlock_irqrestore(&tqspi->lock, flags); 1588 /* Spurious interrupt - transfer not ready */ 1589 if (!(status & QSPI_RDY)) 1590 return IRQ_NONE; ··· 1599 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); 1600 1601 tegra_qspi_mask_clear_irq(tqspi); 1602 + spin_unlock_irqrestore(&tqspi->lock, flags); 1603 1604 + /* 1605 + * Lock is released here but handlers safely re-check curr_xfer under 1606 + * lock before dereferencing. 1607 + * DMA handler also needs to sleep in wait_for_completion_*(), which 1608 + * cannot be done while holding spinlock. 1609 + */ 1610 if (!tqspi->is_curr_dma_xfer) 1611 return handle_cpu_based_xfer(tqspi); 1612