diff options
author | Alex Williamson <alex.williamson@hp.com> | 2005-09-06 11:20:49 -0600 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-09-14 16:22:11 -0700 |
commit | 0b9afede3d9c66fef06f1d5ef5ff15c4b97730fc (patch) | |
tree | 95f55e1219eed9c3fd2458ddbf960307c631114e /arch/ia64/lib | |
parent | 1619cca2921f6927f4240e03f413d4165c7002fc (diff) |
[IA64] more robust zx1/sx1000 machvec support
Machine vector selection has always been a bit of a hack given how
early in system boot it needs to be done. Services like ACPI namespace
are not available and there are non-trivial problems to moving them to
early boot. However, there's no reason we can't change to a different
machvec later in boot when the services we need are available. By
adding a entry point for later initialization of the swiotlb, we can add
an error path for the hpzx1 machevec initialization and fall back to the
DIG machine vector if IOMMU hardware isn't found in the system. Since
ia64 uses 4GB for zone DMA (no ISA support), it's trivial to allocate a
contiguous range from the slab for bounce buffer usage.
Signed-off-by: Alex Williamson <alex.williamson@hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/lib')
-rw-r--r-- | arch/ia64/lib/swiotlb.c | 102 |
1 files changed, 102 insertions, 0 deletions
diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index dbc0b3e449c5..875b0c16250c 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c @@ -49,6 +49,15 @@ */ #define IO_TLB_SHIFT 11 +#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) + +/* + * Minimum IO TLB size to bother booting with. Systems with mainly + * 64bit capable cards will only lightly use the swiotlb. If we can't + * allocate a contiguous 1MB, we're probably in trouble anyway. + */ +#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) + int swiotlb_force; /* @@ -154,6 +163,99 @@ swiotlb_init (void) swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ } +/* + * Systems with larger DMA zones (those that don't support ISA) can + * initialize the swiotlb later using the slab allocator if needed. + * This should be just like above, but with some error catching. + */ +int +swiotlb_late_init_with_default_size (size_t default_size) +{ + unsigned long i, req_nslabs = io_tlb_nslabs; + unsigned int order; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + io_tlb_nslabs = SLABS_PER_PAGE << order; + + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); + if (io_tlb_start) + break; + order--; + } + + if (!io_tlb_start) + goto cleanup1; + + if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { + printk(KERN_WARNING "Warning: only able to allocate %ld MB " + "for software IO TLB\n", (PAGE_SIZE << order) >> 20); + io_tlb_nslabs = SLABS_PER_PAGE << order; + } + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); + memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(int))); + if (!io_tlb_list) + goto cleanup2; + + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; + + io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(char *))); + if (!io_tlb_orig_addr) + goto cleanup3; + + memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); + + /* + * Get the overflow emergency buffer + */ + io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, + get_order(io_tlb_overflow)); + if (!io_tlb_overflow_buffer) + goto cleanup4; + + printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " + "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); + + return 0; + +cleanup4: + free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * + sizeof(char *))); + io_tlb_orig_addr = NULL; +cleanup3: + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + io_tlb_list = NULL; + io_tlb_end = NULL; +cleanup2: + free_pages((unsigned long)io_tlb_start, order); + io_tlb_start = NULL; +cleanup1: + io_tlb_nslabs = req_nslabs; + return -ENOMEM; +} + static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) { |