diff options
| author | Breno Leitao <leitao@debian.org> | 2026-01-26 09:50:31 -0800 |
|---|---|---|
| committer | Mark Brown <broonie@kernel.org> | 2026-01-30 13:53:17 +0000 |
| commit | edf9088b6e1d6d88982db7eb5e736a0e4fbcc09e (patch) | |
| tree | b4c89ce8e5c376175767acc477f88e32822d704f | |
| parent | 6d7723e8161f3c3f14125557e19dd080e9d882be (diff) | |
spi: tegra210-quad: Protect curr_xfer check in IRQ handler
Now that all other accesses to curr_xfer are done under the lock,
protect the curr_xfer NULL check in tegra_qspi_isr_thread() with the
spinlock. Without this protection, the following race can occur:
CPU0 (ISR thread) CPU1 (timeout path)
---------------- -------------------
if (!tqspi->curr_xfer)
// sees non-NULL
spin_lock()
tqspi->curr_xfer = NULL
spin_unlock()
handle_*_xfer()
spin_lock()
t = tqspi->curr_xfer // NULL!
... t->len ... // NULL dereference!
With this patch, all curr_xfer accesses are now properly synchronized.
Although all accesses to curr_xfer are done under the lock, in
tegra_qspi_isr_thread() it checks for NULL, releases the lock and
reacquires it later in handle_cpu_based_xfer()/handle_dma_based_xfer().
There is a potential for an update in between, which could cause a NULL
pointer dereference.
To handle this, add a NULL check inside the handlers after acquiring
the lock. This ensures that if the timeout path has already cleared
curr_xfer, the handler will safely return without dereferencing the
NULL pointer.
Fixes: b4e002d8a7ce ("spi: tegra210-quad: Fix timeout handling")
Signed-off-by: Breno Leitao <leitao@debian.org>
Tested-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Jon Hunter <jonathanh@nvidia.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://patch.msgid.link/20260126-tegra_xfer-v2-6-6d2115e4f387@debian.org
Signed-off-by: Mark Brown <broonie@kernel.org>
| -rw-r--r-- | drivers/spi/spi-tegra210-quad.c | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 79aeb80aa4a7..f425d62e0c27 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -1457,6 +1457,11 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi) spin_lock_irqsave(&tqspi->lock, flags); t = tqspi->curr_xfer; + if (!t) { + spin_unlock_irqrestore(&tqspi->lock, flags); + return IRQ_HANDLED; + } + if (tqspi->tx_status || tqspi->rx_status) { tegra_qspi_handle_error(tqspi); complete(&tqspi->xfer_completion); @@ -1527,6 +1532,11 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) spin_lock_irqsave(&tqspi->lock, flags); t = tqspi->curr_xfer; + if (!t) { + spin_unlock_irqrestore(&tqspi->lock, flags); + return IRQ_HANDLED; + } + if (num_errors) { tegra_qspi_dma_unmap_xfer(tqspi, t); tegra_qspi_handle_error(tqspi); @@ -1565,6 +1575,7 @@ exit: static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) { struct tegra_qspi *tqspi = context_data; + unsigned long flags; u32 status; /* @@ -1582,7 +1593,9 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) * If no transfer is in progress, check if this was a real interrupt * that the timeout handler already processed, or a spurious one. */ + spin_lock_irqsave(&tqspi->lock, flags); if (!tqspi->curr_xfer) { + spin_unlock_irqrestore(&tqspi->lock, flags); /* Spurious interrupt - transfer not ready */ if (!(status & QSPI_RDY)) return IRQ_NONE; @@ -1599,7 +1612,14 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF); tegra_qspi_mask_clear_irq(tqspi); + spin_unlock_irqrestore(&tqspi->lock, flags); + /* + * Lock is released here but handlers safely re-check curr_xfer under + * lock before dereferencing. + * DMA handler also needs to sleep in wait_for_completion_*(), which + * cannot be done while holding spinlock. + */ if (!tqspi->is_curr_dma_xfer) return handle_cpu_based_xfer(tqspi); |
