summaryrefslogtreecommitdiff
path: root/arch/ppc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/mm/init.c')
-rw-r--r--arch/ppc64/mm/init.c96
1 files changed, 54 insertions, 42 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index e58a24d42879..c02dc9809ca5 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -42,7 +42,6 @@
#include <asm/pgalloc.h>
#include <asm/page.h>
-#include <asm/abs_addr.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/rtas.h>
@@ -66,6 +65,14 @@
#include <asm/vdso.h>
#include <asm/imalloc.h>
+#if PGTABLE_RANGE > USER_VSID_RANGE
+#warning Limited user VSID range means pagetable space is wasted
+#endif
+
+#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
+#warning TASK_SIZE is smaller than it needs to be.
+#endif
+
int mem_init_done;
unsigned long ioremap_bot = IMALLOC_BASE;
static unsigned long phbs_io_bot = PHBS_IO_BASE;
@@ -159,7 +166,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
if (!ptep)
return -ENOMEM;
- pa = abs_to_phys(pa);
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
spin_unlock(&init_mm.page_table_lock);
@@ -226,7 +232,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
* Before that, we map using addresses going
* up from ioremap_bot. imalloc will use
* the addresses from ioremap_bot through
- * IMALLOC_END (0xE000001fffffffff)
+ * IMALLOC_END
*
*/
pa = addr & PAGE_MASK;
@@ -417,12 +423,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
int index;
int err;
-#ifdef CONFIG_HUGETLB_PAGE
- /* We leave htlb_segs as it was, but for a fork, we need to
- * clear the huge_pgdir. */
- mm->context.huge_pgdir = NULL;
-#endif
-
again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
return -ENOMEM;
@@ -453,8 +453,6 @@ void destroy_context(struct mm_struct *mm)
spin_unlock(&mmu_context_lock);
mm->context.id = NO_CONTEXT;
-
- hugetlb_mm_free_pgd(mm);
}
/*
@@ -484,9 +482,9 @@ void __init mm_init_ppc64(void)
for (i = 1; i < lmb.memory.cnt; i++) {
unsigned long base, prevbase, prevsize;
- prevbase = lmb.memory.region[i-1].physbase;
+ prevbase = lmb.memory.region[i-1].base;
prevsize = lmb.memory.region[i-1].size;
- base = lmb.memory.region[i].physbase;
+ base = lmb.memory.region[i].base;
if (base > (prevbase + prevsize)) {
io_hole_start = prevbase + prevsize;
io_hole_size = base - (prevbase + prevsize);
@@ -513,11 +511,8 @@ int page_is_ram(unsigned long pfn)
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base;
-#ifdef CONFIG_MSCHUNKS
- base = lmb.memory.region[i].physbase;
-#else
base = lmb.memory.region[i].base;
-#endif
+
if ((paddr >= base) &&
(paddr < (base + lmb.memory.region[i].size))) {
return 1;
@@ -547,7 +542,7 @@ void __init do_init_bootmem(void)
*/
bootmap_pages = bootmem_bootmap_pages(total_pages);
- start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE));
+ start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
BUG_ON(!start);
boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
@@ -558,25 +553,25 @@ void __init do_init_bootmem(void)
* present.
*/
for (i=0; i < lmb.memory.cnt; i++) {
- unsigned long physbase, size;
+ unsigned long base, size;
unsigned long start_pfn, end_pfn;
- physbase = lmb.memory.region[i].physbase;
+ base = lmb.memory.region[i].base;
size = lmb.memory.region[i].size;
- start_pfn = physbase >> PAGE_SHIFT;
+ start_pfn = base >> PAGE_SHIFT;
end_pfn = start_pfn + (size >> PAGE_SHIFT);
memory_present(0, start_pfn, end_pfn);
- free_bootmem(physbase, size);
+ free_bootmem(base, size);
}
/* reserve the sections we're already using */
for (i=0; i < lmb.reserved.cnt; i++) {
- unsigned long physbase = lmb.reserved.region[i].physbase;
+ unsigned long base = lmb.reserved.region[i].base;
unsigned long size = lmb.reserved.region[i].size;
- reserve_bootmem(physbase, size);
+ reserve_bootmem(base, size);
}
}
@@ -615,10 +610,10 @@ static int __init setup_kcore(void)
int i;
for (i=0; i < lmb.memory.cnt; i++) {
- unsigned long physbase, size;
+ unsigned long base, size;
struct kcore_list *kcore_mem;
- physbase = lmb.memory.region[i].physbase;
+ base = lmb.memory.region[i].base;
size = lmb.memory.region[i].size;
/* GFP_ATOMIC to avoid might_sleep warnings during boot */
@@ -626,7 +621,7 @@ static int __init setup_kcore(void)
if (!kcore_mem)
panic("mem_init: kmalloc failed\n");
- kclist_add(kcore_mem, __va(physbase), size);
+ kclist_add(kcore_mem, __va(base), size);
}
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
@@ -686,9 +681,6 @@ void __init mem_init(void)
mem_init_done = 1;
-#ifdef CONFIG_PPC_ISERIES
- iommu_vio_init();
-#endif
/* Initialize the vDSO */
vdso_init();
}
@@ -833,23 +825,43 @@ void __iomem * reserve_phb_iospace(unsigned long size)
return virt_addr;
}
-kmem_cache_t *zero_cache;
-
-static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
{
- memset(pte, 0, PAGE_SIZE);
+ memset(addr, 0, kmem_cache_size(cache));
}
+static const int pgtable_cache_size[2] = {
+ PTE_TABLE_SIZE, PMD_TABLE_SIZE
+};
+static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
+ "pgd_pte_cache", "pud_pmd_cache",
+};
+
+kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+
void pgtable_cache_init(void)
{
- zero_cache = kmem_cache_create("zero",
- PAGE_SIZE,
- 0,
- SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
- zero_ctor,
- NULL);
- if (!zero_cache)
- panic("pgtable_cache_init(): could not create zero_cache!\n");
+ int i;
+
+ BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
+ BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
+ BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
+ BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
+
+ for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
+ int size = pgtable_cache_size[i];
+ const char *name = pgtable_cache_name[i];
+
+ pgtable_cache[i] = kmem_cache_create(name,
+ size, size,
+ SLAB_HWCACHE_ALIGN
+ | SLAB_MUST_HWCACHE_ALIGN,
+ zero_ctor,
+ NULL);
+ if (! pgtable_cache[i])
+ panic("pgtable_cache_init(): could not create %s!\n",
+ name);
+ }
}
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,