diff options
author | Sowmini Varadhan <sowmini.varadhan@oracle.com> | 2015-04-09 15:33:31 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-18 12:32:59 -0700 |
commit | bb620c3d3925aec0ed4f21010c86df08ec18a8c7 (patch) | |
tree | f3adfa12c95f9aa6dd844fd47fd486332bbd0a2e /arch/sparc/kernel/pci_sun4v.c | |
parent | ff7d37a502022149655c18035b99a53391be0383 (diff) |
sparc: Make sparc64 use scalable lib/iommu-common.c functions
In iperf experiments running linux as the Tx side (TCP client) with
10 threads results in a severe performance drop when TSO is disabled,
indicating a weakness in the software that can be avoided by using
the scalable IOMMU arena DMA allocation.
Baseline numbers before this patch:
with default settings (TSO enabled) : 9-9.5 Gbps
Disable TSO using ethtool- drops badly: 2-3 Gbps.
After this patch, iperf client with 10 threads, can give a
throughput of at least 8.5 Gbps, even when TSO is disabled.
Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/pci_sun4v.c')
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 183 |
1 files changed, 82 insertions, 101 deletions
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 47ddbd496a1e..d2fe57dad433 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -15,6 +15,7 @@ #include <linux/export.h> #include <linux/log2.h> #include <linux/of_device.h> +#include <linux/iommu-common.h> #include <asm/iommu.h> #include <asm/irq.h> @@ -155,15 +156,13 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, iommu = dev->archdata.iommu; - spin_lock_irqsave(&iommu->lock, flags); - entry = iommu_range_alloc(dev, iommu, npages, NULL); - spin_unlock_irqrestore(&iommu->lock, flags); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + (unsigned long)(-1), 0); if (unlikely(entry == DMA_ERROR_CODE)) goto range_alloc_fail; - *dma_addrp = (iommu->page_table_map_base + - (entry << IO_PAGE_SHIFT)); + *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); @@ -188,45 +187,46 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, return ret; iommu_map_fail: - /* Interrupts are disabled. */ - spin_lock(&iommu->lock); - iommu_range_free(iommu, *dma_addrp, npages); - spin_unlock_irqrestore(&iommu->lock, flags); + iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); range_alloc_fail: free_pages(first_page, order); return NULL; } +static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, + unsigned long npages) +{ + u32 devhandle = *(u32 *)demap_arg; + unsigned long num, flags; + + local_irq_save(flags); + do { + num = pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, entry), + npages); + + entry += num; + npages -= num; + } while (npages != 0); + local_irq_restore(flags); +} + static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, dma_addr_t dvma, struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; - unsigned long flags, order, npages, entry; + unsigned long order, npages, entry; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; - entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); - - spin_lock_irqsave(&iommu->lock, flags); - - iommu_range_free(iommu, dvma, npages); - - do { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - npages); - entry += num; - npages -= num; - } while (npages != 0); - - spin_unlock_irqrestore(&iommu->lock, flags); - + entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); + dma_4v_iommu_demap(&devhandle, entry, npages); + iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); @@ -253,15 +253,13 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - spin_lock_irqsave(&iommu->lock, flags); - entry = iommu_range_alloc(dev, iommu, npages, NULL); - spin_unlock_irqrestore(&iommu->lock, flags); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + (unsigned long)(-1), 0); if (unlikely(entry == DMA_ERROR_CODE)) goto bad; - bus_addr = (iommu->page_table_map_base + - (entry << IO_PAGE_SHIFT)); + bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; @@ -290,11 +288,7 @@ bad: return DMA_ERROR_CODE; iommu_map_fail: - /* Interrupts are disabled. */ - spin_lock(&iommu->lock); - iommu_range_free(iommu, bus_addr, npages); - spin_unlock_irqrestore(&iommu->lock, flags); - + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); return DMA_ERROR_CODE; } @@ -304,7 +298,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, { struct pci_pbm_info *pbm; struct iommu *iommu; - unsigned long flags, npages; + unsigned long npages; long entry; u32 devhandle; @@ -321,22 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; - - spin_lock_irqsave(&iommu->lock, flags); - - iommu_range_free(iommu, bus_addr, npages); - - entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; - do { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - npages); - entry += num; - npages -= num; - } while (npages != 0); - - spin_unlock_irqrestore(&iommu->lock, flags); + entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; + dma_4v_iommu_demap(&devhandle, entry, npages); + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, @@ -371,14 +352,14 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Init first segment length for backout at failure */ outs->dma_length = 0; - spin_lock_irqsave(&iommu->lock, flags); + local_irq_save(flags); iommu_batch_start(dev, prot, ~0UL); max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; - base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; + base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; @@ -391,7 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); - entry = iommu_range_alloc(dev, iommu, npages, &handle); + entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + &handle, (unsigned long)(-1), 0); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { @@ -404,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, iommu_batch_new_entry(entry); /* Convert entry to a dma_addr_t */ - dma_addr = iommu->page_table_map_base + - (entry << IO_PAGE_SHIFT); + dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ @@ -451,7 +432,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, if (unlikely(err < 0L)) goto iommu_map_failed; - spin_unlock_irqrestore(&iommu->lock, flags); + local_irq_restore(flags); if (outcount < incount) { outs = sg_next(outs); @@ -469,7 +450,8 @@ iommu_map_failed: vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); - iommu_range_free(iommu, vaddr, npages); + iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + DMA_ERROR_CODE); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -477,7 +459,7 @@ iommu_map_failed: if (s == outs) break; } - spin_unlock_irqrestore(&iommu->lock, flags); + local_irq_restore(flags); return 0; } @@ -489,7 +471,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, struct pci_pbm_info *pbm; struct scatterlist *sg; struct iommu *iommu; - unsigned long flags; + unsigned long flags, entry; u32 devhandle; BUG_ON(direction == DMA_NONE); @@ -498,33 +480,27 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, pbm = dev->archdata.host_controller; devhandle = pbm->devhandle; - spin_lock_irqsave(&iommu->lock, flags); + local_irq_save(flags); sg = sglist; while (nelems--) { dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; - unsigned long npages, entry; + unsigned long npages; + struct iommu_map_table *tbl = &iommu->tbl; + unsigned long shift = IO_PAGE_SHIFT; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); - iommu_range_free(iommu, dma_handle, npages); - - entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); - while (npages) { - unsigned long num; - - num = pci_sun4v_iommu_demap(devhandle, HV_PCI_TSBID(0, entry), - npages); - entry += num; - npages -= num; - } - + entry = ((dma_handle - tbl->table_map_base) >> shift); + dma_4v_iommu_demap(&devhandle, entry, npages); + iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + DMA_ERROR_CODE); sg = sg_next(sg); } - spin_unlock_irqrestore(&iommu->lock, flags); + local_irq_restore(flags); } static struct dma_map_ops sun4v_dma_ops = { @@ -550,30 +526,33 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) } static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, - struct iommu *iommu) + struct iommu_map_table *iommu) { - struct iommu_arena *arena = &iommu->arena; - unsigned long i, cnt = 0; + struct iommu_pool *pool; + unsigned long i, pool_nr, cnt = 0; u32 devhandle; devhandle = pbm->devhandle; - for (i = 0; i < arena->limit; i++) { - unsigned long ret, io_attrs, ra; - - ret = pci_sun4v_iommu_getmap(devhandle, - HV_PCI_TSBID(0, i), - &io_attrs, &ra); - if (ret == HV_EOK) { - if (page_in_phys_avail(ra)) { - pci_sun4v_iommu_demap(devhandle, - HV_PCI_TSBID(0, i), 1); - } else { - cnt++; - __set_bit(i, arena->map); + for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) { + pool = &(iommu->pools[pool_nr]); + for (i = pool->start; i <= pool->end; i++) { + unsigned long ret, io_attrs, ra; + + ret = pci_sun4v_iommu_getmap(devhandle, + HV_PCI_TSBID(0, i), + &io_attrs, &ra); + if (ret == HV_EOK) { + if (page_in_phys_avail(ra)) { + pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, + i), 1); + } else { + cnt++; + __set_bit(i, iommu->map); + } } } } - return cnt; } @@ -603,20 +582,22 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) /* Setup initial software IOMMU state. */ spin_lock_init(&iommu->lock); iommu->ctx_lowest_free = 1; - iommu->page_table_map_base = dma_offset; + iommu->tbl.table_map_base = dma_offset; iommu->dma_addr_mask = dma_mask; /* Allocate and initialize the free area map. */ sz = (num_tsb_entries + 7) / 8; sz = (sz + 7UL) & ~7UL; - iommu->arena.map = kzalloc(sz, GFP_KERNEL); - if (!iommu->arena.map) { + iommu->tbl.map = kzalloc(sz, GFP_KERNEL); + if (!iommu->tbl.map) { printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n"); return -ENOMEM; } - iommu->arena.limit = num_tsb_entries; - - sz = probe_existing_entries(pbm, iommu); + iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + sz = probe_existing_entries(pbm, &iommu->tbl); if (sz) printk("%s: Imported %lu TSB entries from OBP\n", pbm->name, sz); |