summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig22
-rw-r--r--arch/arm/mm/alignment.c53
-rw-r--r--arch/arm/mm/cache-l2x0.c39
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/init.c34
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c39
8 files changed, 124 insertions, 73 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4ed9f93f646..33027301639e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -754,7 +754,8 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
+ ARCH_NOMADIK || ARCH_OMAP4 || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
default y
select OUTER_CACHE
help
@@ -781,3 +782,22 @@ config ARM_L1_CACHE_SHIFT
int
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
+
+config ARM_DMA_MEM_BUFFERABLE
+ bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+ default y if CPU_V6 || CPU_V7
+ help
+ Historically, the kernel has used strongly ordered mappings to
+ provide DMA coherent memory. With the advent of ARMv7, mapping
+ memory with differing types results in unpredictable behaviour,
+ so on these CPUs, this option is forced on.
+
+ Multiple mappings with differing attributes is also unpredictable
+ on ARMv6 CPUs, but since they do not have aggressive speculative
+ prefetch, no harm appears to occur.
+
+ However, drivers may be missing the necessary barriers for ARMv6,
+ and therefore turning this on may result in unpredictable driver
+ behaviour. Therefore, we offer this as an option.
+
+ You are recommended say 'Y' here and debug any affected drivers.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index edddd66faac6..28b7c2776198 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -94,36 +95,29 @@ static const char *usermode_action[] = {
"signal+warn"
};
-static int
-proc_alignment_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int alignment_proc_show(struct seq_file *m, void *v)
{
- char *p = page;
- int len;
-
- p += sprintf(p, "User:\t\t%lu\n", ai_user);
- p += sprintf(p, "System:\t\t%lu\n", ai_sys);
- p += sprintf(p, "Skipped:\t%lu\n", ai_skipped);
- p += sprintf(p, "Half:\t\t%lu\n", ai_half);
- p += sprintf(p, "Word:\t\t%lu\n", ai_word);
+ seq_printf(m, "User:\t\t%lu\n", ai_user);
+ seq_printf(m, "System:\t\t%lu\n", ai_sys);
+ seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
+ seq_printf(m, "Half:\t\t%lu\n", ai_half);
+ seq_printf(m, "Word:\t\t%lu\n", ai_word);
if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
- p += sprintf(p, "DWord:\t\t%lu\n", ai_dword);
- p += sprintf(p, "Multi:\t\t%lu\n", ai_multi);
- p += sprintf(p, "User faults:\t%i (%s)\n", ai_usermode,
+ seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
usermode_action[ai_usermode]);
- len = (p - page) - off;
- if (len < 0)
- len = 0;
-
- *eof = (len <= count) ? 1 : 0;
- *start = page + off;
+ return 0;
+}
- return len;
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
}
-static int proc_alignment_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
char mode;
@@ -136,6 +130,13 @@ static int proc_alignment_write(struct file *file, const char __user *buffer,
return count;
}
+static const struct file_operations alignment_proc_fops = {
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
#endif /* CONFIG_PROC_FS */
union offset_union {
@@ -901,12 +902,10 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
+ res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
+ &alignment_proc_fops);
if (!res)
return -ENOMEM;
-
- res->read_proc = proc_alignment_read;
- res->write_proc = proc_alignment_write;
#endif
/*
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 07334632d3e2..78f0fc8595e2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -27,6 +27,7 @@
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
+static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
@@ -99,8 +100,8 @@ static inline void l2x0_inv_all(void)
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
- writel(0xff, l2x0_base + L2X0_INV_WAY);
- cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
+ writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
+ cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -199,9 +200,37 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
+ __u32 cache_id;
+ int ways;
+ const char *type;
l2x0_base = base;
+ cache_id = readl(l2x0_base + L2X0_CACHE_ID);
+ aux = readl(l2x0_base + L2X0_AUX_CTRL);
+
+ /* Determine the number of ways */
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L310:
+ if (aux & (1 << 16))
+ ways = 16;
+ else
+ ways = 8;
+ type = "L310";
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+ ways = (aux >> 13) & 0xf;
+ type = "L210";
+ break;
+ default:
+ /* Assume unknown chips have 8 ways */
+ ways = 8;
+ type = "L2x0 series";
+ break;
+ }
+
+ l2x0_way_mask = (1 << ways) - 1;
+
/*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
@@ -210,8 +239,6 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */
-
- aux = readl(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
@@ -226,5 +253,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
- printk(KERN_INFO "L2X0 cache controller enabled\n");
+ printk(KERN_INFO "%s cache controller enabled\n", type);
+ printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
+ ways, cache_id, aux);
}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index c9b97e9836a2..82df01a72f4a 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -133,8 +133,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_unlock(mapping);
if (aliases)
do_adjust_pte(vma, addr, pfn, ptep);
- else
- flush_cache_page(vma, addr, pfn);
}
/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9d40c341e07e..92f5801f99c1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -463,7 +463,12 @@ static struct fsr_info {
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGILL, BUS_ADRALN, "alignment exception" },
+/* Do we need runtime check ? */
+#if __LINUX_ARM_ARCH__ < 6
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
+#else
+ { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" },
+#endif
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7829cb5425f5..105d1d4f420b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -15,7 +15,6 @@
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
-#include <linux/sort.h>
#include <linux/highmem.h>
#include <asm/mach-types.h>
@@ -226,20 +225,6 @@ static int __init check_initrd(struct meminfo *mi)
return initrd_node;
}
-static inline void map_memory_bank(struct membank *bank)
-{
-#ifdef CONFIG_MMU
- struct map_desc map;
-
- map.pfn = bank_pfn_start(bank);
- map.virtual = __phys_to_virt(bank_phys_start(bank));
- map.length = bank_phys_size(bank);
- map.type = MT_MEMORY;
-
- create_mapping(&map);
-#endif
-}
-
static void __init bootmem_init_node(int node, struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
@@ -249,16 +234,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
int i;
/*
- * Map the memory banks for this node.
- */
- for_each_nodebank(i, mi, node) {
- struct membank *bank = &mi->bank[i];
-
- if (!bank->highmem)
- map_memory_bank(bank);
- }
-
- /*
* Allocate the bootmem bitmap page.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
@@ -387,21 +362,12 @@ static void arm_memory_present(struct meminfo *mi, int node)
}
#endif
-static int __init meminfo_cmp(const void *_a, const void *_b)
-{
- const struct membank *a = _a, *b = _b;
- long cmp = bank_pfn_start(a) - bank_pfn_start(b);
- return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
-}
-
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int node, initrd_node;
- sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL);
-
/*
* Locate which node contains the ramdisk image, if any.
*/
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index a888363398f8..815d08eecbb0 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,10 +28,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#endif
-struct map_desc;
-struct meminfo;
struct pglist_data;
-void __init create_mapping(struct map_desc *md);
void __init bootmem_init(void);
void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d4da6ac28eb..69852003675f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -14,6 +14,7 @@
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
+#include <linux/sort.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
@@ -599,7 +600,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
* offsets, and we take full advantage of sections and
* supersections.
*/
-void __init create_mapping(struct map_desc *md)
+static void __init create_mapping(struct map_desc *md)
{
unsigned long phys, addr, length, end;
const struct mem_type *type;
@@ -1013,6 +1014,39 @@ static void __init kmap_init(void)
#endif
}
+static inline void map_memory_bank(struct membank *bank)
+{
+ struct map_desc map;
+
+ map.pfn = bank_pfn_start(bank);
+ map.virtual = __phys_to_virt(bank_phys_start(bank));
+ map.length = bank_phys_size(bank);
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+}
+
+static void __init map_lowmem(void)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ /* Map all the lowmem memory banks. */
+ for (i = 0; i < mi->nr_banks; i++) {
+ struct membank *bank = &mi->bank[i];
+
+ if (!bank->highmem)
+ map_memory_bank(bank);
+ }
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1021,9 +1055,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
build_mem_type_table();
sanity_check_meminfo();
prepare_page_table();
+ map_lowmem();
bootmem_init();
devicemaps_init(mdesc);
kmap_init();