diff options
author | David S. Miller <davem@huronp11.davemloft.net> | 2008-02-06 03:50:26 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-02-06 04:12:25 -0800 |
commit | 38192d52f159bc06b7f523800c10b583cdd661d5 (patch) | |
tree | 4cf695d583c0a657133642c0299cbfa536e25663 /arch/sparc64/kernel/pci_sun4v.c | |
parent | b3ff81dd8ae29ec431f6cc91aff601a51ef6fb8c (diff) |
[SPARC64]: Temporarily remove IOMMU merging code.
Changeset fde6a3c82d67f592eb587be4d12222b0ae6d4321 ("iommu sg merging:
sparc64: make iommu respect the segment size limits") broke sparc64
because whilst it added the segment limiting code to the first pass of
SG mapping (in prepare_sg()) it did not add matching code to the
second pass handling (in fill_sg())
As a result the two passes disagree where the segment boundaries
should be, resulting in OOPSes, DMA corruption, and corrupted
superblocks.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/pci_sun4v.c')
-rw-r--r-- | arch/sparc64/kernel/pci_sun4v.c | 170 |
1 files changed, 39 insertions, 131 deletions
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index 5ea2eab1ccda..61baf8dc095e 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c @@ -365,113 +365,14 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr, spin_unlock_irqrestore(&iommu->lock, flags); } -#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG)))) - -static long fill_sg(long entry, struct device *dev, - struct scatterlist *sg, - int nused, int nelems, unsigned long prot) -{ - struct scatterlist *dma_sg = sg; - unsigned long flags; - int i; - - local_irq_save(flags); - - iommu_batch_start(dev, prot, entry); - - for (i = 0; i < nused; i++) { - unsigned long pteval = ~0UL; - u32 dma_npages; - - dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) + - dma_sg->dma_length + - ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT; - do { - unsigned long offset; - signed int len; - - /* If we are here, we know we have at least one - * more page to map. So walk forward until we - * hit a page crossing, and begin creating new - * mappings from that spot. - */ - for (;;) { - unsigned long tmp; - - tmp = SG_ENT_PHYS_ADDRESS(sg); - len = sg->length; - if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) { - pteval = tmp & IO_PAGE_MASK; - offset = tmp & (IO_PAGE_SIZE - 1UL); - break; - } - if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) { - pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK; - offset = 0UL; - len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); - break; - } - sg = sg_next(sg); - nelems--; - } - - pteval = (pteval & IOPTE_PAGE); - while (len > 0) { - long err; - - err = iommu_batch_add(pteval); - if (unlikely(err < 0L)) - goto iommu_map_failed; - - pteval += IO_PAGE_SIZE; - len -= (IO_PAGE_SIZE - offset); - offset = 0; - dma_npages--; - } - - pteval = (pteval & IOPTE_PAGE) + len; - sg = sg_next(sg); - nelems--; - - /* Skip over any tail mappings we've fully mapped, - * adjusting pteval along the way. Stop when we - * detect a page crossing event. - */ - while (nelems && - (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && - (pteval == SG_ENT_PHYS_ADDRESS(sg)) && - ((pteval ^ - (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { - pteval += sg->length; - sg = sg_next(sg); - nelems--; - } - if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) - pteval = ~0UL; - } while (dma_npages != 0); - dma_sg = sg_next(dma_sg); - } - - if (unlikely(iommu_batch_end() < 0L)) - goto iommu_map_failed; - - local_irq_restore(flags); - return 0; - -iommu_map_failed: - local_irq_restore(flags); - return -1L; -} - static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { + unsigned long flags, npages, i, prot; + struct scatterlist *sg; struct iommu *iommu; - unsigned long flags, npages, prot; - u32 dma_base; - struct scatterlist *sgtmp; long entry, err; - int used; + u32 dma_base; /* Fast path single entry scatterlists. */ if (nelems == 1) { @@ -489,10 +390,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (unlikely(direction == DMA_NONE)) goto bad; - /* Step 1: Prepare scatter list. */ - npages = prepare_sg(dev, sglist, nelems); + npages = calc_npages(sglist, nelems); - /* Step 2: Allocate a cluster and context, if necessary. */ spin_lock_irqsave(&iommu->lock, flags); entry = arena_alloc(&iommu->arena, npages); spin_unlock_irqrestore(&iommu->lock, flags); @@ -503,27 +402,45 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, dma_base = iommu->page_table_map_base + (entry << IO_PAGE_SHIFT); - /* Step 3: Normalize DMA addresses. */ - used = nelems; - - sgtmp = sglist; - while (used && sgtmp->dma_length) { - sgtmp->dma_address += dma_base; - sgtmp = sg_next(sgtmp); - used--; - } - used = nelems - used; - - /* Step 4: Create the mappings. */ prot = HV_PCI_MAP_ATTR_READ; if (direction != DMA_TO_DEVICE) prot |= HV_PCI_MAP_ATTR_WRITE; - err = fill_sg(entry, dev, sglist, used, nelems, prot); + local_irq_save(flags); + + iommu_batch_start(dev, prot, entry); + + for_each_sg(sglist, sg, nelems, i) { + unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg); + unsigned long slen = sg->length; + unsigned long this_npages; + + this_npages = iommu_num_pages(paddr, slen); + + sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK); + sg->dma_length = slen; + + paddr &= IO_PAGE_MASK; + while (this_npages--) { + err = iommu_batch_add(paddr); + if (unlikely(err < 0L)) { + local_irq_restore(flags); + goto iommu_map_failed; + } + + paddr += IO_PAGE_SIZE; + dma_base += IO_PAGE_SIZE; + } + } + + err = iommu_batch_end(); + + local_irq_restore(flags); + if (unlikely(err < 0L)) goto iommu_map_failed; - return used; + return nelems; bad: if (printk_ratelimit()) @@ -541,12 +458,11 @@ iommu_map_failed: static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { + unsigned long flags, npages; struct pci_pbm_info *pbm; + u32 devhandle, bus_addr; struct iommu *iommu; - unsigned long flags, i, npages; - struct scatterlist *sg, *sgprv; long entry; - u32 devhandle, bus_addr; if (unlikely(direction == DMA_NONE)) { if (printk_ratelimit()) @@ -558,16 +474,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, devhandle = pbm->devhandle; bus_addr = sglist->dma_address & IO_PAGE_MASK; - sgprv = NULL; - for_each_sg(sglist, sg, nelems, i) { - if (sg->dma_length == 0) - break; - - sgprv = sg; - } - npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - - bus_addr) >> IO_PAGE_SHIFT; + npages = calc_npages(sglist, nelems); entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); |