diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 28 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 16 |
8 files changed, 65 insertions, 16 deletions
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index ddeaf9e38ad5..b9ba7d930801 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -1,6 +1,4 @@ /* - * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $ - * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 21d248486479..70f4c833fa32 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -568,6 +568,10 @@ htab_inval_old_hpte: ld r7,STK_PARM(r9)(r1) /* ssize */ ld r8,STK_PARM(r8)(r1) /* local */ bl .flush_hash_page + /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ + lis r0,_PAGE_HPTE_SUB@h + ori r0,r0,_PAGE_HPTE_SUB@l + andc r30,r30,r0 b htab_insert_pte htab_bail_ok: diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 2b5a399f6fa6..0f2d239d94c4 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -94,6 +94,9 @@ unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; int mmu_vmalloc_psize = MMU_PAGE_4K; +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int mmu_vmemmap_psize = MMU_PAGE_4K; +#endif int mmu_io_psize = MMU_PAGE_4K; int mmu_kernel_ssize = MMU_SEGSIZE_256M; int mmu_highuser_ssize = MMU_SEGSIZE_256M; @@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void) } #endif /* CONFIG_PPC_64K_PAGES */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* We try to use 16M pages for vmemmap if that is supported + * and we have at least 1G of RAM at boot + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift && + lmb_phys_mem_size() >= 0x40000000) + mmu_vmemmap_psize = MMU_PAGE_16M; + else if (mmu_psize_defs[MMU_PAGE_64K].shift) + mmu_vmemmap_psize = MMU_PAGE_64K; + else + mmu_vmemmap_psize = MMU_PAGE_4K; +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + printk(KERN_DEBUG "Page orders: linear mapping = %d, " - "virtual = %d, io = %d\n", + "virtual = %d, io = %d" +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ", vmemmap = %d" +#endif + "\n", mmu_psize_defs[mmu_linear_psize].shift, mmu_psize_defs[mmu_virtual_psize].shift, - mmu_psize_defs[mmu_io_psize].shift); + mmu_psize_defs[mmu_io_psize].shift +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ,mmu_psize_defs[mmu_vmemmap_psize].shift +#endif + ); #ifdef CONFIG_HUGETLB_PAGE /* Init large page size. Currently, we pick 16M or 1M depending diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c5ac532a0161..6aa65375abf5 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -19,6 +19,8 @@ * */ +#undef DEBUG + #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -208,12 +210,12 @@ int __meminit vmemmap_populated(unsigned long start, int page_size) } int __meminit vmemmap_populate(struct page *start_page, - unsigned long nr_pages, int node) + unsigned long nr_pages, int node) { unsigned long mode_rw; unsigned long start = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + nr_pages); - unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift; + unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; @@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page, start, p, __pa(p)); mapped = htab_bolt_mapping(start, start + page_size, - __pa(p), mode_rw, mmu_linear_psize, + __pa(p), mode_rw, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(mapped < 0); } return 0; } -#endif +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f67e118116fa..51f82d83bf14 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -151,6 +151,7 @@ out: return ret; } #endif /* CONFIG_MEMORY_HOTREMOVE */ +#endif /* CONFIG_MEMORY_HOTPLUG */ /* * walk_memory_resource() needs to make sure there is no holes in a given @@ -184,8 +185,6 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, } EXPORT_SYMBOL_GPL(walk_memory_resource); -#endif /* CONFIG_MEMORY_HOTPLUG */ - void show_mem(void) { unsigned long total = 0, reserved = 0; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 80d1babb230d..e0ff59f21135 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -402,7 +402,7 @@ void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) return; } - map_page(address, phys, flags); + map_page(address, phys, pgprot_val(flags)); fixmaps++; } diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index cf8705e32d60..89497fb04280 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -28,7 +28,7 @@ #include <asm/udbg.h> #ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) +#define DBG(fmt...) printk(fmt) #else #define DBG pr_debug #endif @@ -263,13 +263,19 @@ void slb_initialize(void) extern unsigned int *slb_miss_kernel_load_linear; extern unsigned int *slb_miss_kernel_load_io; extern unsigned int *slb_compare_rr_to_size; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + extern unsigned int *slb_miss_kernel_load_vmemmap; + unsigned long vmemmap_llp; +#endif /* Prepare our SLB miss handler based on our page size */ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; io_llp = mmu_psize_defs[mmu_io_psize].sllp; vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; - +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; +#endif if (!slb_encoding_inited) { slb_encoding_inited = 1; patch_slb_encoding(slb_miss_kernel_load_linear, @@ -281,6 +287,12 @@ void slb_initialize(void) DBG("SLB: linear LLP = %04lx\n", linear_llp); DBG("SLB: io LLP = %04lx\n", io_llp); + +#ifdef CONFIG_SPARSEMEM_VMEMMAP + patch_slb_encoding(slb_miss_kernel_load_vmemmap, + SLB_VSID_KERNEL | vmemmap_llp); + DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); +#endif } get_paca()->stab_rr = SLB_NUM_BOLTED; diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 657f6b37e9df..bc44dc4b5c67 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode) * it to VSID 0, which is reserved as a bad VSID - one which * will never have any pages in it. */ - /* Check if hitting the linear mapping of the vmalloc/ioremap - * kernel space + /* Check if hitting the linear mapping or some other kernel space */ bne cr7,1f @@ -62,7 +61,18 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) b slb_finish_load_1T -1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below +1: +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* Check virtual memmap region. To be patches at kernel boot */ + cmpldi cr0,r9,0xf + bne 1f +_GLOBAL(slb_miss_kernel_load_vmemmap) + li r11,0 + b 6f +1: +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + + /* vmalloc/ioremap mapping encoding bits, the "li" instructions below * will be patched by the kernel at boot */ BEGIN_FTR_SECTION |