summaryrefslogtreecommitdiff
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-08-08 11:25:28 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-08-08 11:26:15 +0100
commita131bc185528331451a93db6c50a7d2070376a61 (patch)
tree18cccd206d4835ee8df147ac3b0c0e30cc00680d /drivers/pci/intel-iommu.c
parent19943b0e30b05d42e494ae6fef78156ebc8c637e (diff)
parentff1649ff780fb7c0bfbf42d05ffc9b56336b9aa3 (diff)
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Pull fixes in from 2.6.31 so that people testing the iommu-2.6.git tree no longer trip over bugs which were already fixed (sorry, Horms).
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 147b3b960b61..3f256b8d83c1 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1503,7 +1503,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
}
set_bit(num, iommu->domain_ids);
- set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain;
id = num;
}
@@ -1646,6 +1645,14 @@ static int domain_context_mapped(struct pci_dev *pdev)
tmp->devfn);
}
+/* Returns a number of VTD pages, but aligned to MM page size */
+static inline unsigned long aligned_nrpages(unsigned long host_addr,
+ size_t size)
+{
+ host_addr &= ~PAGE_MASK;
+ return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
+}
+
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
@@ -1673,7 +1680,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
uint64_t tmp;
if (!sg_res) {
- sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT;
+ sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
@@ -2389,14 +2396,6 @@ error:
return ret;
}
-/* Returns a number of VTD pages, but aligned to MM page size */
-static inline unsigned long aligned_nrpages(unsigned long host_addr,
- size_t size)
-{
- host_addr &= ~PAGE_MASK;
- return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
-}
-
/* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
@@ -2539,6 +2538,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
int prot = 0;
int ret;
struct intel_iommu *iommu;
+ unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
BUG_ON(dir == DMA_NONE);
@@ -2573,7 +2573,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
* is not a big problem
*/
ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
- paddr >> VTD_PAGE_SHIFT, size, prot);
+ mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
@@ -2864,7 +2864,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
- ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot);
+ ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
/* clear the page */
dma_pte_clear_range(domain, start_vpfn,
@@ -3390,6 +3390,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0;
domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
domain->max_addr = 0;
/* always allocate the top pgd */
@@ -3582,6 +3583,9 @@ static void intel_iommu_unmap_range(struct iommu_domain *domain,
{
struct dmar_domain *dmar_domain = domain->priv;
+ if (!size)
+ return;
+
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT);