summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-06-28 11:55:58 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-06-29 13:33:59 +0100
commit61df744314079e8cb8cdec75f517cf0e704e41ef (patch)
tree55fd23fd2b1e909e203876d3d3b0d2eec879c675 /drivers/pci
parent1c5a46ed49e37f56f8aa9000bb1c2ac59670c372 (diff)
intel-iommu: Introduce domain_pfn_mapping()
... and use it in the trivial cases; the other callers want individual (and bisectable) attention, since I screwed them up the first time... Make the BUG_ON() happen on too-large virtual address rather than physical address, too. That's the one we care about. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/intel-iommu.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f08d7865fe00..7540ef91d5f7 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1643,40 +1643,48 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn);
}
-static int
-domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
- u64 hpa, size_t size, int prot)
+static int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ unsigned long phys_pfn, unsigned long nr_pages,
+ int prot)
{
- unsigned long start_pfn = hpa >> VTD_PAGE_SHIFT;
- unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT;
struct dma_pte *pte;
- int index = 0;
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
- while (start_pfn <= last_pfn) {
- pte = pfn_to_dma_pte(domain, (iova >> VTD_PAGE_SHIFT) + index);
+ while (nr_pages--) {
+ pte = pfn_to_dma_pte(domain, iov_pfn);
if (!pte)
return -ENOMEM;
/* We don't need lock here, nobody else
* touches the iova range
*/
BUG_ON(dma_pte_addr(pte));
- dma_set_pte_pfn(pte, start_pfn);
+ dma_set_pte_pfn(pte, phys_pfn);
dma_set_pte_prot(pte, prot);
if (prot & DMA_PTE_SNP)
dma_set_pte_snp(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
- start_pfn++;
- index++;
+ iov_pfn++;
+ phys_pfn++;
}
return 0;
}
+static int domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
+ u64 hpa, size_t size, int prot)
+{
+ unsigned long first_pfn = hpa >> VTD_PAGE_SHIFT;
+ unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT;
+
+ return domain_pfn_mapping(domain, iova >> VTD_PAGE_SHIFT, first_pfn,
+ last_pfn - first_pfn + 1, prot);
+
+}
+
static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu)
@@ -1893,8 +1901,10 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
dma_pte_clear_range(domain, base >> VTD_PAGE_SHIFT,
(base + size - 1) >> VTD_PAGE_SHIFT);
- return domain_page_mapping(domain, base, base, size,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ return domain_pfn_mapping(domain, base >> VTD_PAGE_SHIFT,
+ base >> VTD_PAGE_SHIFT,
+ size >> VTD_PAGE_SHIFT,
+ DMA_PTE_READ|DMA_PTE_WRITE);
}
static int iommu_prepare_identity_map(struct pci_dev *pdev,