summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-07 09:37:34 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-07 09:37:34 -0800
commite7aa57247700733e52a8e2e4dee6a52c2a76de02 (patch)
treee0ab6f0656284e0b8e17577e8e72cd7049c7cf40 /drivers
parent142fdd7bb7095c114d027b1ee36878a67b869228 (diff)
parenta0a75b40c919b9f6d3a0b6c978e6ccf344c1be5a (diff)
Merge tag 'spi-fix-v6.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spiHEADmaster
Pull spi fixes from Mark Brown: "One final batch of fixes for the Tegra SPI drivers, the main one is a batch of fixes for races with the interrupts in the Tegra210 QSPI driver that Breno has been working on for a while" * tag 'spi-fix-v6.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: spi: tegra114: Preserve SPI mode bits in def_command1_reg spi: tegra: Fix a memory leak in tegra_slink_probe() spi: tegra210-quad: Protect curr_xfer check in IRQ handler spi: tegra210-quad: Protect curr_xfer clearing in tegra_qspi_non_combined_seq_xfer spi: tegra210-quad: Protect curr_xfer in tegra_qspi_combined_seq_xfer spi: tegra210-quad: Protect curr_xfer assignment in tegra_qspi_setup_transfer_one spi: tegra210-quad: Move curr_xfer read inside spinlock spi: tegra210-quad: Return IRQ_HANDLED when timeout already processed transfer
Diffstat (limited to 'drivers')
-rw-r--r--drivers/spi/spi-tegra114.c3
-rw-r--r--drivers/spi/spi-tegra20-slink.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c56
3 files changed, 59 insertions, 6 deletions
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 795a8482c2c7..48fb11fea55f 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -978,11 +978,14 @@ static int tegra_spi_setup(struct spi_device *spi)
if (spi_get_csgpiod(spi, 0))
gpiod_set_value(spi_get_csgpiod(spi, 0), 0);
+ /* Update default register to include CS polarity and SPI mode */
val = tspi->def_command1_reg;
if (spi->mode & SPI_CS_HIGH)
val &= ~SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
else
val |= SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));
+ val &= ~SPI_CONTROL_MODE_MASK;
+ val |= SPI_MODE_SEL(spi->mode & 0x3);
tspi->def_command1_reg = val;
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
spin_unlock_irqrestore(&tspi->lock, flags);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index fe452d03c1ee..709669610840 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1086,8 +1086,10 @@ static int tegra_slink_probe(struct platform_device *pdev)
reset_control_deassert(tspi->rst);
spi_irq = platform_get_irq(pdev, 0);
- if (spi_irq < 0)
- return spi_irq;
+ if (spi_irq < 0) {
+ ret = spi_irq;
+ goto exit_pm_put;
+ }
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
tegra_slink_isr_thread, IRQF_ONESHOT,
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index cdc3cb7c01f9..f425d62e0c27 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -839,6 +839,7 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
u32 command1, command2, speed = t->speed_hz;
u8 bits_per_word = t->bits_per_word;
u32 tx_tap = 0, rx_tap = 0;
+ unsigned long flags;
int req_mode;
if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {
@@ -846,10 +847,12 @@ static u32 tegra_qspi_setup_transfer_one(struct spi_device *spi, struct spi_tran
tqspi->cur_speed = speed;
}
+ spin_lock_irqsave(&tqspi->lock, flags);
tqspi->cur_pos = 0;
tqspi->cur_rx_pos = 0;
tqspi->cur_tx_pos = 0;
tqspi->curr_xfer = t;
+ spin_unlock_irqrestore(&tqspi->lock, flags);
if (is_first_of_msg) {
tegra_qspi_mask_clear_irq(tqspi);
@@ -1158,6 +1161,7 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
u32 address_value = 0;
u32 cmd_config = 0, addr_config = 0;
u8 cmd_value = 0, val = 0;
+ unsigned long flags;
/* Enable Combined sequence mode */
val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);
@@ -1261,13 +1265,17 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
tegra_qspi_transfer_end(spi);
spi_transfer_delay_exec(xfer);
}
+ spin_lock_irqsave(&tqspi->lock, flags);
tqspi->curr_xfer = NULL;
+ spin_unlock_irqrestore(&tqspi->lock, flags);
transfer_phase++;
}
ret = 0;
exit:
+ spin_lock_irqsave(&tqspi->lock, flags);
tqspi->curr_xfer = NULL;
+ spin_unlock_irqrestore(&tqspi->lock, flags);
msg->status = ret;
return ret;
@@ -1280,6 +1288,7 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_transfer *transfer;
bool is_first_msg = true;
int ret = 0, val = 0;
+ unsigned long flags;
msg->status = 0;
msg->actual_length = 0;
@@ -1360,7 +1369,9 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
msg->actual_length += xfer->len + dummy_bytes;
complete_xfer:
+ spin_lock_irqsave(&tqspi->lock, flags);
tqspi->curr_xfer = NULL;
+ spin_unlock_irqrestore(&tqspi->lock, flags);
if (ret < 0) {
tegra_qspi_transfer_end(spi);
@@ -1440,10 +1451,16 @@ static int tegra_qspi_transfer_one_message(struct spi_controller *host,
static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)
{
- struct spi_transfer *t = tqspi->curr_xfer;
+ struct spi_transfer *t;
unsigned long flags;
spin_lock_irqsave(&tqspi->lock, flags);
+ t = tqspi->curr_xfer;
+
+ if (!t) {
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+ }
if (tqspi->tx_status || tqspi->rx_status) {
tegra_qspi_handle_error(tqspi);
@@ -1474,7 +1491,7 @@ exit:
static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
{
- struct spi_transfer *t = tqspi->curr_xfer;
+ struct spi_transfer *t;
unsigned int total_fifo_words;
unsigned long flags;
long wait_status;
@@ -1513,6 +1530,12 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
}
spin_lock_irqsave(&tqspi->lock, flags);
+ t = tqspi->curr_xfer;
+
+ if (!t) {
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ return IRQ_HANDLED;
+ }
if (num_errors) {
tegra_qspi_dma_unmap_xfer(tqspi, t);
@@ -1552,15 +1575,33 @@ exit:
static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
{
struct tegra_qspi *tqspi = context_data;
+ unsigned long flags;
+ u32 status;
+
+ /*
+ * Read transfer status to check if interrupt was triggered by transfer
+ * completion
+ */
+ status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);
/*
* Occasionally the IRQ thread takes a long time to wake up (usually
* when the CPU that it's running on is excessively busy) and we have
* already reached the timeout before and cleaned up the timed out
* transfer. Avoid any processing in that case and bail out early.
+ *
+ * If no transfer is in progress, check if this was a real interrupt
+ * that the timeout handler already processed, or a spurious one.
*/
- if (!tqspi->curr_xfer)
- return IRQ_NONE;
+ spin_lock_irqsave(&tqspi->lock, flags);
+ if (!tqspi->curr_xfer) {
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ /* Spurious interrupt - transfer not ready */
+ if (!(status & QSPI_RDY))
+ return IRQ_NONE;
+ /* Real interrupt, already handled by timeout path */
+ return IRQ_HANDLED;
+ }
tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);
@@ -1571,7 +1612,14 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);
tegra_qspi_mask_clear_irq(tqspi);
+ spin_unlock_irqrestore(&tqspi->lock, flags);
+ /*
+ * Lock is released here but handlers safely re-check curr_xfer under
+ * lock before dereferencing.
+ * DMA handler also needs to sleep in wait_for_completion_*(), which
+ * cannot be done while holding spinlock.
+ */
if (!tqspi->is_curr_dma_xfer)
return handle_cpu_based_xfer(tqspi);