summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-10-16 16:31:54 -0700
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-17 08:03:05 +0100
commitf05810c9962bba3e809f07619bda1bfdebbfbfb9 (patch)
treed226f13a0d93cda208f9aea85d2a9ac29086f3e1 /drivers
parent2e532d68a2b3e2aa6b19731501222069735c741c (diff)
dmar: use spin_lock_irqsave() in qi_submit_sync()
Next patch in the series will use queued invalidation interface qi_submit_sync() for DMA-remapping aswell, which can be called from interrupt context. So use spin_lock_irqsave() instead of spin_lock() in qi_submit_sync(). Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/dmar.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index e842e756308a..b64cec190542 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -580,11 +580,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw = qi->desc;
- spin_lock(&qi->q_lock);
+ spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
- spin_unlock(&qi->q_lock);
+ spin_unlock_irqrestore(&qi->q_lock, flags);
cpu_relax();
- spin_lock(&qi->q_lock);
+ spin_lock_irqsave(&qi->q_lock, flags);
}
index = qi->free_head;
@@ -605,15 +605,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2;
- spin_lock_irqsave(&iommu->register_lock, flags);
+ spin_lock(&iommu->register_lock);
/*
* update the HW tail register indicating the presence of
* new descriptors.
*/
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
+ spin_unlock(&iommu->register_lock);
while (qi->desc_status[wait_index] != QI_DONE) {
+ /*
+ * We will leave the interrupts disabled, to prevent interrupt
+ * context to queue another cmd while a cmd is already submitted
+ * and waiting for completion on this cpu. This is to avoid
+ * a deadlock where the interrupt context can wait indefinitely
+ * for free slots in the queue.
+ */
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
@@ -622,7 +629,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
- spin_unlock(&qi->q_lock);
+ spin_unlock_irqrestore(&qi->q_lock, flags);
}
/*