From afeeb7cebbd223ffee303fd8de4ba97458b13581 Mon Sep 17 00:00:00 2001 From: "Zhao, Yu" Date: Fri, 13 Feb 2009 17:55:49 +0800 Subject: intel-iommu: Fix address wrap on 32-bit kernel. The problem is in dma_pte_clear_range and dma_pte_free_pagetable. When intel_unmap_single and intel_unmap_sg call them, the end address may be zero if the 'start_addr + size' rounds up. So no PTE gets cleared. The uncleared PTE fires the BUG_ON when it's used again to create new mappings. After I modified dma_pte_clear_range a bit, the BUG_ON is gone. Tested both 32 and 32 PAE modes on Intel X58 and Q35 platforms. Signed-off-by: Yu Zhao Signed-off-by: David Woodhouse --- drivers/pci/intel-iommu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/pci/intel-iommu.c') diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 0c12d06bade6..002c8b95edf8 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -718,15 +718,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) { int addr_width = agaw_to_width(domain->agaw); + int npages; start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ start = PAGE_ALIGN(start); end &= PAGE_MASK; + npages = (end - start) / VTD_PAGE_SIZE; /* we don't need lock here, nobody else touches the iova range */ - while (start < end) { + while (npages--) { dma_pte_clear_one(domain, start); start += VTD_PAGE_SIZE; } -- cgit v1.2.3