summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMagnus Kalland <magnus@dolphinics.com>2026-04-02 09:42:50 +0200
committerJoerg Roedel <joerg.roedel@amd.com>2026-04-02 11:42:45 +0200
commit5aac28784dca6819e96e5f93e644cdee59e50f6e (patch)
treeb7c737e5066b87e5ab5c9137f35fe964e5239065
parentfaad224fe0f0857a04ff2eb3c90f0de57f47d0f3 (diff)
iommu/amd: Invalidate IRT cache for DMA aliases
DMA aliasing causes interrupt remapping table entries (IRTEs) to be shared between multiple device IDs. See commit 3c124435e8dd ("iommu/amd: Support multiple PCI DMA aliases in IRQ Remapping") for more information on this. However, the AMD IOMMU driver currently invalidates IRTE cache entries on a per-device basis whenever an IRTE is updated, not for each alias. This approach leaves stale IRTE cache entries when an IRTE is cached under one DMA alias but later updated and invalidated through a different alias. In such cases, the original device ID is never invalidated, since it is programmed via aliasing. This incoherency bug has been observed when IRTEs are cached for one Non-Transparent Bridge (NTB) DMA alias, later updated via another. Fix this by invalidating the interrupt remapping table cache for all DMA aliases when updating an IRTE. Co-developed-by: Lars B. Kristiansen <larsk@dolphinics.com> Signed-off-by: Lars B. Kristiansen <larsk@dolphinics.com> Co-developed-by: Jonas Markussen <jonas@dolphinics.com> Signed-off-by: Jonas Markussen <jonas@dolphinics.com> Co-developed-by: Tore H. Larsen <torel@simula.no> Signed-off-by: Tore H. Larsen <torel@simula.no> Signed-off-by: Magnus Kalland <magnus@dolphinics.com> Link: https://lore.kernel.org/linux-iommu/9204da81-f821-4034-b8ad-501e43383b56@amd.com/ Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--drivers/iommu/amd/iommu.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b6acb519fea5..340ae2150f91 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3167,26 +3167,44 @@ const struct iommu_ops amd_iommu_ops = {
static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
+static int iommu_flush_dev_irt(struct pci_dev *unused, u16 devid, void *data)
+{
+ int ret;
+ struct iommu_cmd cmd;
+ struct amd_iommu *iommu = data;
+
+ build_inv_irt(&cmd, devid);
+ ret = __iommu_queue_command_sync(iommu, &cmd, true);
+ return ret;
+}
+
static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
{
int ret;
u64 data;
unsigned long flags;
- struct iommu_cmd cmd, cmd2;
+ struct iommu_cmd cmd;
+ struct pci_dev *pdev = NULL;
+ struct iommu_dev_data *dev_data = search_dev_data(iommu, devid);
if (iommu->irtcachedis_enabled)
return;
- build_inv_irt(&cmd, devid);
+ if (dev_data && dev_data->dev && dev_is_pci(dev_data->dev))
+ pdev = to_pci_dev(dev_data->dev);
raw_spin_lock_irqsave(&iommu->lock, flags);
data = get_cmdsem_val(iommu);
- build_completion_wait(&cmd2, iommu, data);
+ build_completion_wait(&cmd, iommu, data);
- ret = __iommu_queue_command_sync(iommu, &cmd, true);
+ if (pdev)
+ ret = pci_for_each_dma_alias(pdev, iommu_flush_dev_irt, iommu);
+ else
+ ret = iommu_flush_dev_irt(NULL, devid, iommu);
if (ret)
goto out_err;
- ret = __iommu_queue_command_sync(iommu, &cmd2, false);
+
+ ret = __iommu_queue_command_sync(iommu, &cmd, false);
if (ret)
goto out_err;
raw_spin_unlock_irqrestore(&iommu->lock, flags);