diff options
author | Hiroshi Doyu <hdoyu@nvidia.com> | 2013-09-06 13:31:32 +0300 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2013-09-16 17:41:20 -0700 |
commit | e93755b1375becddbdeb2fc8a478a2277673308e (patch) | |
tree | bfb183a511e6bf64ff5c9719f9835a8bd925b3c1 /drivers/iommu | |
parent | 413820dee9865421530c7523170baa00251790ba (diff) |
iommu/tegra: smmu: smaller preempt latency for map_pages
Take smaller preemption latency for map_pages since there's not much
perf improvement on this larger lock range.
Bug 1290869
Change-Id: Ic7579fe9ffe89d01ad6e7fc3e18404b742b38b50
Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
Reviewed-on: http://git-master/r/271447
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 4ec354968df4..c44ad13be4f3 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -1125,7 +1125,6 @@ static int smmu_iommu_map_pages(struct iommu_domain *domain, unsigned long iova, { struct smmu_as *as = domain->priv; struct smmu_device *smmu = as->smmu; - unsigned long flags; u32 *pdir = page_address(as->pdir_page); int err = 0; unsigned long iova_base = iova; @@ -1137,8 +1136,6 @@ static int smmu_iommu_map_pages(struct iommu_domain *domain, unsigned long iova, else if (dma_get_attr(DMA_ATTR_WRITE_ONLY, (struct dma_attrs *)prot)) attrs &= ~_READABLE; - spin_lock_irqsave(&as->lock, flags); - while (total > 0) { int pdn = SMMU_ADDR_TO_PDN(iova); int ptn = SMMU_ADDR_TO_PTN(iova); @@ -1148,11 +1145,15 @@ static int smmu_iommu_map_pages(struct iommu_domain *domain, unsigned long iova, u32 *ptbl; u32 *pte; int i; + unsigned long flags; + + spin_lock_irqsave(&as->lock, flags); if (pdir[pdn] == _PDE_VACANT(pdn)) { tbl_page = alloc_ptbl(as, iova, !flush_all); if (!tbl_page) { err = -ENOMEM; + spin_unlock_irqrestore(&as->lock, flags); goto out; } @@ -1182,14 +1183,14 @@ skip: iova += PAGE_SIZE * count; total -= count; pages += count; + + spin_unlock_irqrestore(&as->lock, flags); } out: if (flush_all) flush_ptc_and_tlb_as(as, iova_base, iova_base + total * PAGE_SIZE); - - spin_unlock_irqrestore(&as->lock, flags); return err; } |