summaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/consistent.c1
-rw-r--r--arch/sh/mm/hugetlbpage.c1
-rw-r--r--arch/sh/mm/init.c1
-rw-r--r--arch/sh/mm/ioremap.c71
-rw-r--r--arch/sh/mm/ioremap_fixed.c12
-rw-r--r--arch/sh/mm/numa.c3
-rw-r--r--arch/sh/mm/pgtable.c1
-rw-r--r--arch/sh/mm/pmb.c417
-rw-r--r--arch/sh/mm/tlb-pteaex.c30
-rw-r--r--arch/sh/mm/tlb-sh3.c19
-rw-r--r--arch/sh/mm/tlb-sh4.c28
-rw-r--r--arch/sh/mm/tlb-urb.c22
-rw-r--r--arch/sh/mm/tlbflush_32.c19
-rw-r--r--arch/sh/mm/uncached.c9
14 files changed, 429 insertions, 205 deletions
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 902967e3f841..c86a08540258 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -16,6 +16,7 @@
#include <linux/dma-debug.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/gfp.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 9304117039c4..9163db3e8d15 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -13,7 +13,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
-#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 68028e8f26ce..c505de61a5ca 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
+#include <linux/gfp.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pagemap.h>
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index c68d2d7d00a9..0c99ec2e7ed8 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -14,6 +14,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/io.h>
@@ -34,11 +35,12 @@
* caller shouldn't need to know that small detail.
*/
void __iomem * __init_refok
-__ioremap_caller(unsigned long phys_addr, unsigned long size,
+__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
pgprot_t pgprot, void *caller)
{
struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
+ void __iomem *mapped;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
@@ -46,6 +48,20 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
return NULL;
/*
+ * If we can't yet use the regular approach, go the fixmap route.
+ */
+ if (!mem_init_done)
+ return ioremap_fixed(phys_addr, size, pgprot);
+
+ /*
+ * First try to remap through the PMB.
+ * PMB entries are all pre-faulted.
+ */
+ mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
+ if (mapped && !IS_ERR(mapped))
+ return mapped;
+
+ /*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
@@ -53,12 +69,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
size = PAGE_ALIGN(last_addr+1) - phys_addr;
/*
- * If we can't yet use the regular approach, go the fixmap route.
- */
- if (!mem_init_done)
- return ioremap_fixed(phys_addr, offset, size, pgprot);
-
- /*
* Ok, go for it..
*/
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -67,33 +77,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr;
-#ifdef CONFIG_PMB
- /*
- * First try to remap through the PMB once a valid VMA has been
- * established. Smaller allocations (or the rest of the size
- * remaining after a PMB mapping due to the size not being
- * perfectly aligned on a PMB size boundary) are then mapped
- * through the UTLB using conventional page tables.
- *
- * PMB entries are all pre-faulted.
- */
- if (unlikely(phys_addr >= P1SEG)) {
- unsigned long mapped;
-
- mapped = pmb_remap(addr, phys_addr, size, pgprot);
- if (likely(mapped)) {
- addr += mapped;
- phys_addr += mapped;
- size -= mapped;
- }
+ if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+ vunmap((void *)orig_addr);
+ return NULL;
}
-#endif
-
- if (likely(size))
- if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
- vunmap((void *)orig_addr);
- return NULL;
- }
return (void __iomem *)(offset + (char *)orig_addr);
}
@@ -133,23 +120,11 @@ void __iounmap(void __iomem *addr)
if (iounmap_fixed(addr) == 0)
return;
-#ifdef CONFIG_PMB
/*
- * Purge any PMB entries that may have been established for this
- * mapping, then proceed with conventional VMA teardown.
- *
- * XXX: Note that due to the way that remove_vm_area() does
- * matching of the resultant VMA, we aren't able to fast-forward
- * the address past the PMB space until the end of the VMA where
- * the page tables reside. As such, unmap_vm_area() will be
- * forced to linearly scan over the area until it finds the page
- * tables where PTEs that need to be unmapped actually reside,
- * which is far from optimal. Perhaps we need to use a separate
- * VMA for the PMB mappings?
- * -- PFM.
+ * If the PMB handled it, there's nothing else to do.
*/
- pmb_unmap(vaddr);
-#endif
+ if (pmb_unmap(addr) == 0)
+ return;
p = remove_vm_area((void *)(vaddr & PAGE_MASK));
if (!p) {
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 0b78b1e20ef1..efbe84af9983 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
-#include <linux/slab.h>
#include <asm/fixmap.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -45,14 +44,21 @@ void __init ioremap_fixed_init(void)
}
void __init __iomem *
-ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
- unsigned long size, pgprot_t prot)
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
enum fixed_addresses idx0, idx;
struct ioremap_map *map;
unsigned int nrpages;
+ unsigned long offset;
int i, slot;
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(phys_addr + size) - phys_addr;
+
slot = -1;
for (i = 0; i < FIX_N_IOREMAPS; i++) {
map = &ioremap_maps[i];
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 422e92721878..961b34085e3b 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -74,6 +74,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
+ pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
+ PAGE_KERNEL);
+
lmb_add(start, end - start);
__add_active_range(nid, start_pfn, end_pfn);
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index 6f21fb1d8726..26e03a1f7ca4 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -1,4 +1,5 @@
#include <linux/mm.h>
+#include <linux/slab.h>
#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 198bcff5e96f..e43ec600afcf 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -15,7 +15,6 @@
#include <linux/sysdev.h>
#include <linux/cpu.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
@@ -23,7 +22,8 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -52,12 +52,24 @@ struct pmb_entry {
struct pmb_entry *link;
};
+static struct {
+ unsigned long size;
+ int flag;
+} pmb_sizes[] = {
+ { .size = SZ_512M, .flag = PMB_SZ_512M, },
+ { .size = SZ_128M, .flag = PMB_SZ_128M, },
+ { .size = SZ_64M, .flag = PMB_SZ_64M, },
+ { .size = SZ_16M, .flag = PMB_SZ_16M, },
+};
+
static void pmb_unmap_entry(struct pmb_entry *, int depth);
static DEFINE_RWLOCK(pmb_rwlock);
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
+static unsigned int pmb_iomapping_enabled;
+
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
{
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -73,6 +85,142 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry)
return mk_pmb_entry(entry) | PMB_DATA;
}
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+ return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
+}
+
+/*
+ * Ensure that the PMB entries match our cache configuration.
+ *
+ * When we are in 32-bit address extended mode, CCR.CB becomes
+ * invalid, so care must be taken to manually adjust cacheable
+ * translations.
+ */
+static __always_inline unsigned long pmb_cache_flags(void)
+{
+ unsigned long flags = 0;
+
+#if defined(CONFIG_CACHE_OFF)
+ flags |= PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITETHROUGH)
+ flags |= PMB_C | PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ flags |= PMB_C;
+#endif
+
+ return flags;
+}
+
+/*
+ * Convert typical pgprot value to the PMB equivalent
+ */
+static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
+{
+ unsigned long pmb_flags = 0;
+ u64 flags = pgprot_val(prot);
+
+ if (flags & _PAGE_CACHABLE)
+ pmb_flags |= PMB_C;
+ if (flags & _PAGE_WT)
+ pmb_flags |= PMB_WT | PMB_UB;
+
+ return pmb_flags;
+}
+
+static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
+{
+ return (b->vpn == (a->vpn + a->size)) &&
+ (b->ppn == (a->ppn + a->size)) &&
+ (b->flags == a->flags);
+}
+
+static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size)
+{
+ int i;
+
+ read_lock(&pmb_rwlock);
+
+ for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
+ struct pmb_entry *pmbe, *iter;
+ unsigned long span;
+
+ if (!test_bit(i, pmb_map))
+ continue;
+
+ pmbe = &pmb_entry_list[i];
+
+ /*
+ * See if VPN and PPN are bounded by an existing mapping.
+ */
+ if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
+ continue;
+ if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
+ continue;
+
+ /*
+ * Now see if we're in range of a simple mapping.
+ */
+ if (size <= pmbe->size) {
+ read_unlock(&pmb_rwlock);
+ return true;
+ }
+
+ span = pmbe->size;
+
+ /*
+ * Finally for sizes that involve compound mappings, walk
+ * the chain.
+ */
+ for (iter = pmbe->link; iter; iter = iter->link)
+ span += iter->size;
+
+ /*
+ * Nothing else to do if the range requirements are met.
+ */
+ if (size <= span) {
+ read_unlock(&pmb_rwlock);
+ return true;
+ }
+ }
+
+ read_unlock(&pmb_rwlock);
+ return false;
+}
+
+static bool pmb_size_valid(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return true;
+
+ return false;
+}
+
+static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
+{
+ return (addr >= P1SEG && (addr + size - 1) < P3SEG);
+}
+
+static inline bool pmb_prot_valid(pgprot_t prot)
+{
+ return (pgprot_val(prot) & _PAGE_USER) == 0;
+}
+
+static int pmb_size_to_flags(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return pmb_sizes[i].flag;
+
+ return 0;
+}
+
static int pmb_alloc_entry(void)
{
int pos;
@@ -140,33 +288,22 @@ static void pmb_free(struct pmb_entry *pmbe)
}
/*
- * Ensure that the PMB entries match our cache configuration.
- *
- * When we are in 32-bit address extended mode, CCR.CB becomes
- * invalid, so care must be taken to manually adjust cacheable
- * translations.
+ * Must be run uncached.
*/
-static __always_inline unsigned long pmb_cache_flags(void)
+static void __set_pmb_entry(struct pmb_entry *pmbe)
{
- unsigned long flags = 0;
+ unsigned long addr, data;
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- flags |= PMB_C | PMB_WT | PMB_UB;
-#elif defined(CONFIG_CACHE_WRITEBACK)
- flags |= PMB_C;
-#endif
+ addr = mk_pmb_addr(pmbe->entry);
+ data = mk_pmb_data(pmbe->entry);
- return flags;
-}
+ jump_to_uncached();
-/*
- * Must be run uncached.
- */
-static void __set_pmb_entry(struct pmb_entry *pmbe)
-{
- writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
- writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
- mk_pmb_data(pmbe->entry));
+ /* Set V-bit */
+ __raw_writel(pmbe->vpn | PMB_V, addr);
+ __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
+
+ back_to_cached();
}
static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -185,6 +322,7 @@ static void __clear_pmb_entry(struct pmb_entry *pmbe)
writel_uncached(data_val & ~PMB_V, data);
}
+#ifdef CONFIG_PM
static void set_pmb_entry(struct pmb_entry *pmbe)
{
unsigned long flags;
@@ -193,145 +331,157 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
__set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags);
}
+#endif /* CONFIG_PM */
-static struct {
- unsigned long size;
- int flag;
-} pmb_sizes[] = {
- { .size = SZ_512M, .flag = PMB_SZ_512M, },
- { .size = SZ_128M, .flag = PMB_SZ_128M, },
- { .size = SZ_64M, .flag = PMB_SZ_64M, },
- { .size = SZ_16M, .flag = PMB_SZ_16M, },
-};
-
-long pmb_remap(unsigned long vaddr, unsigned long phys,
- unsigned long size, pgprot_t prot)
+int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size, pgprot_t prot)
{
struct pmb_entry *pmbp, *pmbe;
- unsigned long wanted;
- int pmb_flags, i;
- long err;
- u64 flags;
-
- flags = pgprot_val(prot);
+ unsigned long orig_addr, orig_size;
+ unsigned long flags, pmb_flags;
+ int i, mapped;
- pmb_flags = PMB_WT | PMB_UB;
+ if (!pmb_addr_valid(vaddr, size))
+ return -EFAULT;
+ if (pmb_mapping_exists(vaddr, phys, size))
+ return 0;
- /* Convert typical pgprot value to the PMB equivalent */
- if (flags & _PAGE_CACHABLE) {
- pmb_flags |= PMB_C;
+ orig_addr = vaddr;
+ orig_size = size;
- if ((flags & _PAGE_WT) == 0)
- pmb_flags &= ~(PMB_WT | PMB_UB);
- }
+ flush_tlb_kernel_range(vaddr, vaddr + size);
+ pmb_flags = pgprot_to_pmb_flags(prot);
pmbp = NULL;
- wanted = size;
-again:
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
- unsigned long flags;
+ do {
+ for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
+ if (size < pmb_sizes[i].size)
+ continue;
+
+ pmbe = pmb_alloc(vaddr, phys, pmb_flags |
+ pmb_sizes[i].flag, PMB_NO_ENTRY);
+ if (IS_ERR(pmbe)) {
+ pmb_unmap_entry(pmbp, mapped);
+ return PTR_ERR(pmbe);
+ }
- if (size < pmb_sizes[i].size)
- continue;
+ spin_lock_irqsave(&pmbe->lock, flags);
- pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
- PMB_NO_ENTRY);
- if (IS_ERR(pmbe)) {
- err = PTR_ERR(pmbe);
- goto out;
- }
+ pmbe->size = pmb_sizes[i].size;
- spin_lock_irqsave(&pmbe->lock, flags);
+ __set_pmb_entry(pmbe);
- __set_pmb_entry(pmbe);
+ phys += pmbe->size;
+ vaddr += pmbe->size;
+ size -= pmbe->size;
- phys += pmb_sizes[i].size;
- vaddr += pmb_sizes[i].size;
- size -= pmb_sizes[i].size;
+ /*
+ * Link adjacent entries that span multiple PMB
+ * entries for easier tear-down.
+ */
+ if (likely(pmbp)) {
+ spin_lock(&pmbp->lock);
+ pmbp->link = pmbe;
+ spin_unlock(&pmbp->lock);
+ }
- pmbe->size = pmb_sizes[i].size;
+ pmbp = pmbe;
- /*
- * Link adjacent entries that span multiple PMB entries
- * for easier tear-down.
- */
- if (likely(pmbp)) {
- spin_lock(&pmbp->lock);
- pmbp->link = pmbe;
- spin_unlock(&pmbp->lock);
+ /*
+ * Instead of trying smaller sizes on every
+ * iteration (even if we succeed in allocating
+ * space), try using pmb_sizes[i].size again.
+ */
+ i--;
+ mapped++;
+
+ spin_unlock_irqrestore(&pmbe->lock, flags);
}
+ } while (size >= SZ_16M);
- pmbp = pmbe;
+ flush_cache_vmap(orig_addr, orig_addr + orig_size);
- /*
- * Instead of trying smaller sizes on every iteration
- * (even if we succeed in allocating space), try using
- * pmb_sizes[i].size again.
- */
- i--;
+ return 0;
+}
- spin_unlock_irqrestore(&pmbe->lock, flags);
- }
+void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller)
+{
+ unsigned long vaddr;
+ phys_addr_t offset, last_addr;
+ phys_addr_t align_mask;
+ unsigned long aligned;
+ struct vm_struct *area;
+ int i, ret;
- if (size >= SZ_16M)
- goto again;
+ if (!pmb_iomapping_enabled)
+ return NULL;
- return wanted - size;
+ /*
+ * Small mappings need to go through the TLB.
+ */
+ if (size < SZ_16M)
+ return ERR_PTR(-EINVAL);
+ if (!pmb_prot_valid(prot))
+ return ERR_PTR(-EINVAL);
-out:
- pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (size >= pmb_sizes[i].size)
+ break;
+
+ last_addr = phys + size;
+ align_mask = ~(pmb_sizes[i].size - 1);
+ offset = phys & ~align_mask;
+ phys &= align_mask;
+ aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
+
+ /*
+ * XXX: This should really start from uncached_end, but this
+ * causes the MMU to reset, so for now we restrict it to the
+ * 0xb000...0xc000 range.
+ */
+ area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
+ P3SEG, caller);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys;
+ vaddr = (unsigned long)area->addr;
- return err;
+ ret = pmb_bolt_mapping(vaddr, phys, size, prot);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ return (void __iomem *)(offset + (char *)vaddr);
}
-void pmb_unmap(unsigned long addr)
+int pmb_unmap(void __iomem *addr)
{
struct pmb_entry *pmbe = NULL;
- int i;
+ unsigned long vaddr = (unsigned long __force)addr;
+ int i, found = 0;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i];
- if (pmbe->vpn == addr)
+ if (pmbe->vpn == vaddr) {
+ found = 1;
break;
+ }
}
}
read_unlock(&pmb_rwlock);
- pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
-}
-
-static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
-{
- return (b->vpn == (a->vpn + a->size)) &&
- (b->ppn == (a->ppn + a->size)) &&
- (b->flags == a->flags);
-}
-
-static bool pmb_size_valid(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return true;
-
- return false;
-}
-
-static int pmb_size_to_flags(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return pmb_sizes[i].flag;
+ if (found) {
+ pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
+ return 0;
+ }
- return 0;
+ return -EINVAL;
}
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
@@ -351,6 +501,8 @@ static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
*/
__clear_pmb_entry(pmbe);
+ flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
+
pmbe = pmblink->link;
pmb_free(pmblink);
@@ -369,11 +521,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
write_unlock_irqrestore(&pmb_rwlock, flags);
}
-static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
-{
- return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
-}
-
static void __init pmb_notify(void)
{
int i;
@@ -625,6 +772,18 @@ static void __init pmb_resize(void)
}
#endif
+static int __init early_pmb(char *p)
+{
+ if (!p)
+ return 0;
+
+ if (strstr(p, "iomap"))
+ pmb_iomapping_enabled = 1;
+
+ return 0;
+}
+early_param("pmb", early_pmb);
+
void __init pmb_init(void)
{
/* Synchronize software state */
@@ -644,7 +803,7 @@ void __init pmb_init(void)
writel_uncached(0, PMB_IRMCR);
/* Flush out the TLB */
- __raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
+ local_flush_tlb_all();
ctrl_barrier();
}
@@ -713,7 +872,7 @@ static int __init pmb_debugfs_init(void)
return 0;
}
-postcore_initcall(pmb_debugfs_init);
+subsys_initcall(pmb_debugfs_init);
#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index 32dc674c550c..b71db6af8060 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -73,5 +73,35 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
jump_to_uncached();
__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
+ __raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
+ __raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
back_to_cached();
}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+ int i;
+
+ /*
+ * Flush all the TLB.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ status = __raw_readl(MMUCR);
+ status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+ if (status == 0)
+ status = MMUCR_URB_NENTRIES;
+
+ for (i = 0; i < status; i++)
+ __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+ for (i = 0; i < 4; i++)
+ __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+ back_to_cached();
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 4f5f7cbdd508..7a940dbfc2e9 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -77,3 +77,22 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
for (i = 0; i < ways; i++)
__raw_writel(data, addr + (i << 8));
}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+
+ /*
+ * Flush all the TLB.
+ *
+ * Write to the MMU control register's bit:
+ * TF-bit for SH-3, TI-bit for SH-4.
+ * It's same position, bit #2.
+ */
+ local_irq_save(flags);
+ status = __raw_readl(MMUCR);
+ status |= 0x04;
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index ccac77f504a8..cfdf7930d294 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -80,3 +80,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
__raw_writel(data, addr);
back_to_cached();
}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+ int i;
+
+ /*
+ * Flush all the TLB.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ status = __raw_readl(MMUCR);
+ status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+ if (status == 0)
+ status = MMUCR_URB_NENTRIES;
+
+ for (i = 0; i < status; i++)
+ __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+ for (i = 0; i < 4; i++)
+ __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+ back_to_cached();
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
index bb5b9098956d..c92ce20db39b 100644
--- a/arch/sh/mm/tlb-urb.c
+++ b/arch/sh/mm/tlb-urb.c
@@ -24,13 +24,9 @@ void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
local_irq_save(flags);
- /* Load the entry into the TLB */
- __update_tlb(vma, addr, pte);
-
- /* ... and wire it up. */
status = __raw_readl(MMUCR);
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
- status &= ~MMUCR_URB;
+ status &= ~MMUCR_URC;
/*
* Make sure we're not trying to wire the last TLB entry slot.
@@ -39,7 +35,23 @@ void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
urb = urb % MMUCR_URB_NENTRIES;
+ /*
+ * Insert this entry into the highest non-wired TLB slot (via
+ * the URC field).
+ */
+ status |= (urb << MMUCR_URC_SHIFT);
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+
+ /* Load the entry into the TLB */
+ __update_tlb(vma, addr, pte);
+
+ /* ... and wire it up. */
+ status = __raw_readl(MMUCR);
+
+ status &= ~MMUCR_URB;
status |= (urb << MMUCR_URB_SHIFT);
+
__raw_writel(status, MMUCR);
ctrl_barrier();
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 004bb3f25b5f..3fbe03ce8fe3 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -119,22 +119,3 @@ void local_flush_tlb_mm(struct mm_struct *mm)
local_irq_restore(flags);
}
}
-
-void local_flush_tlb_all(void)
-{
- unsigned long flags, status;
-
- /*
- * Flush all the TLB.
- *
- * Write to the MMU control register's bit:
- * TF-bit for SH-3, TI-bit for SH-4.
- * It's same position, bit #2.
- */
- local_irq_save(flags);
- status = __raw_readl(MMUCR);
- status |= 0x04;
- __raw_writel(status, MMUCR);
- ctrl_barrier();
- local_irq_restore(flags);
-}
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index cf20a5c5136a..8a4eca551fc0 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -1,6 +1,8 @@
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/sizes.h>
#include <asm/page.h>
+#include <asm/addrspace.h>
/*
* This is the offset of the uncached section from its cached alias.
@@ -15,15 +17,22 @@
unsigned long cached_to_uncached = SZ_512M;
unsigned long uncached_size = SZ_512M;
unsigned long uncached_start, uncached_end;
+EXPORT_SYMBOL(uncached_start);
+EXPORT_SYMBOL(uncached_end);
int virt_addr_uncached(unsigned long kaddr)
{
return (kaddr >= uncached_start) && (kaddr < uncached_end);
}
+EXPORT_SYMBOL(virt_addr_uncached);
void __init uncached_init(void)
{
+#ifdef CONFIG_29BIT
+ uncached_start = P2SEG;
+#else
uncached_start = memory_end;
+#endif
uncached_end = uncached_start + uncached_size;
}