diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 4 | ||||
-rw-r--r-- | arch/arm/mm/cache-feroceon-l2.c | 45 | ||||
-rw-r--r-- | arch/arm/mm/cache-tauros2.c | 29 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 144 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 2 |
5 files changed, 163 insertions, 61 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ca8ecdee47d8..f5ad9ee70426 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -264,7 +264,7 @@ config CPU_ARM1026 # SA110 config CPU_SA110 - bool "Support StrongARM(R) SA-110 processor" if ARCH_RPC + bool select CPU_32v3 if ARCH_RPC select CPU_32v4 if !ARCH_RPC select CPU_ABRT_EV4 @@ -854,7 +854,7 @@ config OUTER_CACHE_SYNC config CACHE_FEROCEON_L2 bool "Enable the Feroceon L2 cache controller" - depends on ARCH_KIRKWOOD || ARCH_MV78XX0 + depends on ARCH_KIRKWOOD || ARCH_MV78XX0 || ARCH_MVEBU default y select OUTER_CACHE help diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index aae891820f8f..dc814a548056 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -13,10 +13,15 @@ */ #include <linux/init.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/highmem.h> +#include <linux/io.h> #include <asm/cacheflush.h> #include <asm/cp15.h> -#include <plat/cache-feroceon-l2.h> +#include <asm/hardware/cache-feroceon-l2.h> + +#define L2_WRITETHROUGH_KIRKWOOD BIT(4) /* * Low-level cache maintenance operations. @@ -352,3 +357,41 @@ void __init feroceon_l2_init(int __l2_wt_override) printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n", l2_wt_override ? ", in WT override mode" : ""); } +#ifdef CONFIG_OF +static const struct of_device_id feroceon_ids[] __initconst = { + { .compatible = "marvell,kirkwood-cache"}, + { .compatible = "marvell,feroceon-cache"}, + {} +}; + +int __init feroceon_of_init(void) +{ + struct device_node *node; + void __iomem *base; + bool l2_wt_override = false; + struct resource res; + +#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) + l2_wt_override = true; +#endif + + node = of_find_matching_node(NULL, feroceon_ids); + if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) { + if (of_address_to_resource(node, 0, &res)) + return -ENODEV; + + base = ioremap(res.start, resource_size(&res)); + if (!base) + return -ENOMEM; + + if (l2_wt_override) + writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base); + else + writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base); + } + + feroceon_l2_init(l2_wt_override); + + return 0; +} +#endif diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 1be0f4e5e6eb..b273739e6359 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c @@ -33,7 +33,7 @@ * outer cache operations into the kernel image if the kernel has been * configured to support a pre-v7 CPU. */ -#if __LINUX_ARM_ARCH__ < 7 +#ifdef CONFIG_CPU_32v5 /* * Low-level cache maintenance operations. */ @@ -229,33 +229,6 @@ static void __init tauros2_internal_init(unsigned int features) } #endif -#ifdef CONFIG_CPU_32v6 - /* - * Check whether this CPU lacks support for the v7 hierarchical - * cache ops. (PJ4 is in its v6 personality mode if the MMFR3 - * register indicates no support for the v7 hierarchical cache - * ops.) - */ - if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) { - /* - * When Tauros2 is used in an ARMv6 system, the L2 - * enable bit is in the ARMv6 ARM-mandated position - * (bit [26] of the System Control Register). - */ - if (!(get_cr() & 0x04000000)) { - printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); - adjust_cr(0x04000000, 0x04000000); - } - - mode = "ARMv6"; - outer_cache.inv_range = tauros2_inv_range; - outer_cache.clean_range = tauros2_clean_range; - outer_cache.flush_range = tauros2_flush_range; - outer_cache.disable = tauros2_disable; - outer_cache.resume = tauros2_resume; - } -#endif - #ifdef CONFIG_CPU_32v7 /* * Check whether this CPU has support for the v7 hierarchical diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c9c6acdf90cc..f62aa0677e5c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1066,6 +1066,8 @@ fs_initcall(dma_debug_do_init); /* IOMMU */ +static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); + static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, size_t size) { @@ -1073,41 +1075,87 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, unsigned int align = 0; unsigned int count, start; unsigned long flags; + dma_addr_t iova; + int i; if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; - count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + - (1 << mapping->order) - 1) >> mapping->order; - - if (order > mapping->order) - align = (1 << (order - mapping->order)) - 1; + count = PAGE_ALIGN(size) >> PAGE_SHIFT; + align = (1 << order) - 1; spin_lock_irqsave(&mapping->lock, flags); - start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, - count, align); - if (start > mapping->bits) { - spin_unlock_irqrestore(&mapping->lock, flags); - return DMA_ERROR_CODE; + for (i = 0; i < mapping->nr_bitmaps; i++) { + start = bitmap_find_next_zero_area(mapping->bitmaps[i], + mapping->bits, 0, count, align); + + if (start > mapping->bits) + continue; + + bitmap_set(mapping->bitmaps[i], start, count); + break; } - bitmap_set(mapping->bitmap, start, count); + /* + * No unused range found. Try to extend the existing mapping + * and perform a second attempt to reserve an IO virtual + * address range of size bytes. + */ + if (i == mapping->nr_bitmaps) { + if (extend_iommu_mapping(mapping)) { + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; + } + + start = bitmap_find_next_zero_area(mapping->bitmaps[i], + mapping->bits, 0, count, align); + + if (start > mapping->bits) { + spin_unlock_irqrestore(&mapping->lock, flags); + return DMA_ERROR_CODE; + } + + bitmap_set(mapping->bitmaps[i], start, count); + } spin_unlock_irqrestore(&mapping->lock, flags); - return mapping->base + (start << (mapping->order + PAGE_SHIFT)); + iova = mapping->base + (mapping->size * i); + iova += start << PAGE_SHIFT; + + return iova; } static inline void __free_iova(struct dma_iommu_mapping *mapping, dma_addr_t addr, size_t size) { - unsigned int start = (addr - mapping->base) >> - (mapping->order + PAGE_SHIFT); - unsigned int count = ((size >> PAGE_SHIFT) + - (1 << mapping->order) - 1) >> mapping->order; + unsigned int start, count; unsigned long flags; + dma_addr_t bitmap_base; + u32 bitmap_index; + + if (!size) + return; + + bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size; + BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); + + bitmap_base = mapping->base + mapping->size * bitmap_index; + + start = (addr - bitmap_base) >> PAGE_SHIFT; + + if (addr + size > bitmap_base + mapping->size) { + /* + * The address range to be freed reaches into the iova + * range of the next bitmap. This should not happen as + * we don't allow this in __alloc_iova (at the + * moment). + */ + BUG(); + } else + count = size >> PAGE_SHIFT; spin_lock_irqsave(&mapping->lock, flags); - bitmap_clear(mapping->bitmap, start, count); + bitmap_clear(mapping->bitmaps[bitmap_index], start, count); spin_unlock_irqrestore(&mapping->lock, flags); } @@ -1872,8 +1920,7 @@ struct dma_map_ops iommu_coherent_ops = { * arm_iommu_create_mapping * @bus: pointer to the bus holding the client device (for IOMMU calls) * @base: start address of the valid IO address space - * @size: size of the valid IO address space - * @order: accuracy of the IO addresses allocations + * @size: maximum size of the valid IO address space * * Creates a mapping structure which holds information about used/unused * IO address ranges, which is required to perform memory allocation and @@ -1883,38 +1930,54 @@ struct dma_map_ops iommu_coherent_ops = { * arm_iommu_attach_device function. */ struct dma_iommu_mapping * -arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, - int order) +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) { - unsigned int count = size >> (PAGE_SHIFT + order); - unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); + unsigned int bits = size >> PAGE_SHIFT; + unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); struct dma_iommu_mapping *mapping; + int extensions = 1; int err = -ENOMEM; - if (!count) + if (!bitmap_size) return ERR_PTR(-EINVAL); + if (bitmap_size > PAGE_SIZE) { + extensions = bitmap_size / PAGE_SIZE; + bitmap_size = PAGE_SIZE; + } + mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); if (!mapping) goto err; - mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!mapping->bitmap) + mapping->bitmap_size = bitmap_size; + mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), + GFP_KERNEL); + if (!mapping->bitmaps) goto err2; + mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); + if (!mapping->bitmaps[0]) + goto err3; + + mapping->nr_bitmaps = 1; + mapping->extensions = extensions; mapping->base = base; + mapping->size = bitmap_size << PAGE_SHIFT; mapping->bits = BITS_PER_BYTE * bitmap_size; - mapping->order = order; + spin_lock_init(&mapping->lock); mapping->domain = iommu_domain_alloc(bus); if (!mapping->domain) - goto err3; + goto err4; kref_init(&mapping->kref); return mapping; +err4: + kfree(mapping->bitmaps[0]); err3: - kfree(mapping->bitmap); + kfree(mapping->bitmaps); err2: kfree(mapping); err: @@ -1924,14 +1987,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); static void release_iommu_mapping(struct kref *kref) { + int i; struct dma_iommu_mapping *mapping = container_of(kref, struct dma_iommu_mapping, kref); iommu_domain_free(mapping->domain); - kfree(mapping->bitmap); + for (i = 0; i < mapping->nr_bitmaps; i++) + kfree(mapping->bitmaps[i]); + kfree(mapping->bitmaps); kfree(mapping); } +static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) +{ + int next_bitmap; + + if (mapping->nr_bitmaps > mapping->extensions) + return -EINVAL; + + next_bitmap = mapping->nr_bitmaps; + mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, + GFP_ATOMIC); + if (!mapping->bitmaps[next_bitmap]) + return -ENOMEM; + + mapping->nr_bitmaps++; + + return 0; +} + void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) { if (mapping) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 804d61566a53..2a77ba8796ae 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -323,6 +323,8 @@ void __init arm_memblock_init(struct meminfo *mi, if (mdesc->reserve) mdesc->reserve(); + early_init_fdt_scan_reserved_mem(); + /* * reserve memory for DMA contigouos allocations, * must come from DMA area inside low memory |