diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 21:58:35 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 21:58:35 +1000 |
commit | ab1f9dac6eea25ee59e4c8e1cf0b7476afbbfe07 (patch) | |
tree | 03577652197b5e58c348ede3c474bc8dd47e046c /arch | |
parent | 70d64ceaa1a84d2502405422a4dfd3f87786a347 (diff) |
powerpc: Merge arch/ppc64/mm to arch/powerpc/mm
This moves the remaining files in arch/ppc64/mm to arch/powerpc/mm,
and arranges that we use them when compiling with ARCH=ppc64.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/mm/Makefile | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S (renamed from arch/ppc64/mm/hash_low.S) | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c (renamed from arch/ppc64/mm/hash_native.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c (renamed from arch/ppc64/mm/hash_utils.c) | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c (renamed from arch/ppc64/mm/hugetlbpage.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/imalloc.c (renamed from arch/ppc64/mm/imalloc.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 36 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/mmap.c (renamed from arch/ppc64/mm/mmap.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 18 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c (renamed from arch/ppc64/mm/numa.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 34 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c (renamed from arch/ppc64/mm/slb.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S (renamed from arch/ppc64/mm/slb_low.S) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c (renamed from arch/ppc64/mm/stab.c) | 0 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c (renamed from arch/ppc64/mm/tlb.c) | 0 | ||||
-rw-r--r-- | arch/ppc64/Makefile | 2 | ||||
-rw-r--r-- | arch/ppc64/mm/Makefile | 11 | ||||
-rw-r--r-- | arch/ppc64/mm/fault.c | 333 | ||||
-rw-r--r-- | arch/ppc64/mm/init.c | 870 |
20 files changed, 39 insertions, 1288 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 35497deeb4b2..612bc4ec72b1 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -5,8 +5,14 @@ obj-y := fault.o mem.o lmb.o obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o \ tlb_32.o -obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o +hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o +obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \ + hash_utils_64.o hash_low_64.o tlb_64.o \ + slb_low.o slb.o stab.o mmap.o imalloc.o \ + $(hash-y) obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o obj-$(CONFIG_40x) += 4xx_mmu.o obj-$(CONFIG_44x) += 44x_mmu.o obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o +obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/ppc64/mm/hash_low.S b/arch/powerpc/mm/hash_low_64.S index ee5a5d36bfa8..d6ed9102eeea 100644 --- a/arch/ppc64/mm/hash_low.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -10,7 +10,7 @@ * described in the kernel's COPYING file. */ -#include <asm/processor.h> +#include <asm/reg.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/page.h> diff --git a/arch/ppc64/mm/hash_native.c b/arch/powerpc/mm/hash_native_64.c index 174d14576c28..174d14576c28 100644 --- a/arch/ppc64/mm/hash_native.c +++ b/arch/powerpc/mm/hash_native_64.c diff --git a/arch/ppc64/mm/hash_utils.c b/arch/powerpc/mm/hash_utils_64.c index 83507438d6a0..35dd93eeaf4b 100644 --- a/arch/ppc64/mm/hash_utils.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -78,7 +78,7 @@ extern unsigned long dart_tablebase; hpte_t *htab_address; unsigned long htab_hash_mask; -extern unsigned long _SDR1; +unsigned long _SDR1; #define KB (1024) #define MB (1024*KB) diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0ea0994ed974..0ea0994ed974 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c diff --git a/arch/ppc64/mm/imalloc.c b/arch/powerpc/mm/imalloc.c index c65b87b92756..c65b87b92756 100644 --- a/arch/ppc64/mm/imalloc.c +++ b/arch/powerpc/mm/imalloc.c diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c0ce6a7af3c7..b0fc822ec29f 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -73,18 +73,8 @@ #warning TASK_SIZE is smaller than it needs to be. #endif -int mem_init_done; -unsigned long ioremap_bot = IMALLOC_BASE; -static unsigned long phbs_io_bot = PHBS_IO_BASE; - -extern pgd_t swapper_pg_dir[]; -extern struct task_struct *current_set[NR_CPUS]; - unsigned long klimit = (unsigned long)_end; -unsigned long _SDR1=0; -unsigned long _ASR=0; - /* max amount of RAM to use */ unsigned long __max_memory; @@ -193,19 +183,6 @@ static int __init setup_kcore(void) } module_init(setup_kcore); -void __iomem * reserve_phb_iospace(unsigned long size) -{ - void __iomem *virt_addr; - - if (phbs_io_bot >= IMALLOC_BASE) - panic("reserve_phb_iospace(): phb io space overflow\n"); - - virt_addr = (void __iomem *) phbs_io_bot; - phbs_io_bot += size; - - return virt_addr; -} - static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) { memset(addr, 0, kmem_cache_size(cache)); @@ -244,16 +221,3 @@ void pgtable_cache_init(void) name); } } - -pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, - unsigned long size, pgprot_t vma_prot) -{ - if (ppc_md.phys_mem_access_prot) - return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); - - if (!page_is_ram(addr >> PAGE_SHIFT)) - vma_prot = __pgprot(pgprot_val(vma_prot) - | _PAGE_GUARDED | _PAGE_NO_CACHE); - return vma_prot; -} -EXPORT_SYMBOL(phys_mem_access_prot); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0650de74d0b3..55b5860ed3c9 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -47,6 +47,9 @@ #include <asm/prom.h> #include <asm/lmb.h> #include <asm/sections.h> +#ifdef CONFIG_PPC64 +#include <asm/vdso.h> +#endif #include "mmu_decl.h" @@ -334,7 +337,7 @@ void flush_dcache_icache_page(struct page *page) void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); __flush_dcache_icache(start); kunmap_atomic(start, KM_PPC_SYNC_ICACHE); -#elif defined(CONFIG_8xx) +#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) /* On 8xx there is no need to kmap since highmem is not supported */ __flush_dcache_icache(page_address(page)); #else @@ -463,18 +466,18 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, if (pgdir == NULL) return; - ptep = find_linux_pte(pgdir, ea); + ptep = find_linux_pte(pgdir, address); if (!ptep) return; - vsid = get_vsid(vma->vm_mm->context.id, ea); + vsid = get_vsid(vma->vm_mm->context.id, address); local_irq_save(flags); tmp = cpumask_of_cpu(smp_processor_id()); if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) local = 1; - __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, + __hash_page(address, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, 0x300, local); local_irq_restore(flags); #endif diff --git a/arch/ppc64/mm/mmap.c b/arch/powerpc/mm/mmap.c index fe65f522aff3..fe65f522aff3 100644 --- a/arch/ppc64/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 06fe8af3af55..a4d7a327c0e5 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -22,11 +22,11 @@ #include <asm/tlbflush.h> #include <asm/mmu.h> +#ifdef CONFIG_PPC32 extern void mapin_ram(void); extern int map_page(unsigned long va, phys_addr_t pa, int flags); extern void setbat(int index, unsigned long virt, unsigned long phys, unsigned int size, int flags); -extern void reserve_phys_mem(unsigned long start, unsigned long size); extern void settlbcam(int index, unsigned long virt, phys_addr_t phys, unsigned int size, int flags, unsigned int pid); extern void invalidate_tlbcam_entry(int index); @@ -36,16 +36,16 @@ extern unsigned long ioremap_base; extern unsigned long ioremap_bot; extern unsigned int rtas_data, rtas_size; -extern unsigned long __max_low_memory; -extern unsigned long __initial_memory_limit; -extern unsigned long total_memory; -extern unsigned long total_lowmem; -extern int mem_init_done; - extern PTE *Hash, *Hash_end; extern unsigned long Hash_size, Hash_mask; extern unsigned int num_tlbcam_entries; +#endif + +extern unsigned long __max_low_memory; +extern unsigned long __initial_memory_limit; +extern unsigned long total_memory; +extern unsigned long total_lowmem; /* ...and now those things that may be slightly different between processor * architectures. -- Dan @@ -66,8 +66,8 @@ extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); extern void adjust_total_lowmem(void); -#else -/* anything except 4xx or 8xx */ +#elif defined(CONFIG_PPC32) +/* anything 32-bit except 4xx or 8xx */ extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); diff --git a/arch/ppc64/mm/numa.c b/arch/powerpc/mm/numa.c index cb864b8f2750..cb864b8f2750 100644 --- a/arch/ppc64/mm/numa.c +++ b/arch/powerpc/mm/numa.c diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 724f97e5dee5..484d24f9208b 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -67,30 +67,9 @@ #include <asm/vdso.h> #include <asm/imalloc.h> -#if PGTABLE_RANGE > USER_VSID_RANGE -#warning Limited user VSID range means pagetable space is wasted -#endif - -#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) -#warning TASK_SIZE is smaller than it needs to be. -#endif - -int mem_init_done; unsigned long ioremap_bot = IMALLOC_BASE; static unsigned long phbs_io_bot = PHBS_IO_BASE; -extern pgd_t swapper_pg_dir[]; -extern struct task_struct *current_set[NR_CPUS]; - -unsigned long klimit = (unsigned long)_end; - -/* max amount of RAM to use */ -unsigned long __max_memory; - -/* info on what we think the IO hole is */ -unsigned long io_hole_start; -unsigned long io_hole_size; - #ifdef CONFIG_PPC_ISERIES void __iomem *ioremap(unsigned long addr, unsigned long size) @@ -355,3 +334,16 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size) EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); + +void __iomem * reserve_phb_iospace(unsigned long size) +{ + void __iomem *virt_addr; + + if (phbs_io_bot >= IMALLOC_BASE) + panic("reserve_phb_iospace(): phb io space overflow\n"); + + virt_addr = (void __iomem *) phbs_io_bot; + phbs_io_bot += size; + + return virt_addr; +} diff --git a/arch/ppc64/mm/slb.c b/arch/powerpc/mm/slb.c index 0473953f6a37..0473953f6a37 100644 --- a/arch/ppc64/mm/slb.c +++ b/arch/powerpc/mm/slb.c diff --git a/arch/ppc64/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index a3a03da503bc..a3a03da503bc 100644 --- a/arch/ppc64/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S diff --git a/arch/ppc64/mm/stab.c b/arch/powerpc/mm/stab.c index 1b83f002bf27..1b83f002bf27 100644 --- a/arch/ppc64/mm/stab.c +++ b/arch/powerpc/mm/stab.c diff --git a/arch/ppc64/mm/tlb.c b/arch/powerpc/mm/tlb_64.c index 09ab81a10f4f..09ab81a10f4f 100644 --- a/arch/ppc64/mm/tlb.c +++ b/arch/powerpc/mm/tlb_64.c diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index fa889204d6ae..4a9928ef3032 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile @@ -83,7 +83,7 @@ head-y := arch/ppc64/kernel/head.o libs-y += arch/ppc64/lib/ core-y += arch/ppc64/kernel/ arch/powerpc/kernel/ -core-y += arch/ppc64/mm/ +core-y += arch/powerpc/mm/ core-y += arch/powerpc/platforms/ core-$(CONFIG_XMON) += arch/ppc64/xmon/ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile deleted file mode 100644 index 3695d00d347f..000000000000 --- a/arch/ppc64/mm/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# -# Makefile for the linux ppc-specific parts of the memory manager. -# - -EXTRA_CFLAGS += -mno-minimal-toc - -obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \ - slb_low.o slb.o stab.o mmap.o -obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c deleted file mode 100644 index be3f25cf3e9f..000000000000 --- a/arch/ppc64/mm/fault.c +++ /dev/null @@ -1,333 +0,0 @@ -/* - * arch/ppc/mm/fault.c - * - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Derived from "arch/i386/mm/fault.c" - * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds - * - * Modified by Cort Dougan and Paul Mackerras. - * - * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/config.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/smp_lock.h> -#include <linux/module.h> -#include <linux/kprobes.h> - -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/mmu.h> -#include <asm/mmu_context.h> -#include <asm/system.h> -#include <asm/uaccess.h> -#include <asm/kdebug.h> -#include <asm/siginfo.h> - -/* - * Check whether the instruction at regs->nip is a store using - * an update addressing form which will update r1. - */ -static int store_updates_sp(struct pt_regs *regs) -{ - unsigned int inst; - - if (get_user(inst, (unsigned int __user *)regs->nip)) - return 0; - /* check for 1 in the rA field */ - if (((inst >> 16) & 0x1f) != 1) - return 0; - /* check major opcode */ - switch (inst >> 26) { - case 37: /* stwu */ - case 39: /* stbu */ - case 45: /* sthu */ - case 53: /* stfsu */ - case 55: /* stfdu */ - return 1; - case 62: /* std or stdu */ - return (inst & 3) == 1; - case 31: - /* check minor opcode */ - switch ((inst >> 1) & 0x3ff) { - case 181: /* stdux */ - case 183: /* stwux */ - case 247: /* stbux */ - case 439: /* sthux */ - case 695: /* stfsux */ - case 759: /* stfdux */ - return 1; - } - } - return 0; -} - -static void do_dabr(struct pt_regs *regs, unsigned long error_code) -{ - siginfo_t info; - - if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, - 11, SIGSEGV) == NOTIFY_STOP) - return; - - if (debugger_dabr_match(regs)) - return; - - /* Clear the DABR */ - set_dabr(0); - - /* Deliver the signal to userspace */ - info.si_signo = SIGTRAP; - info.si_errno = 0; - info.si_code = TRAP_HWBKPT; - info.si_addr = (void __user *)regs->nip; - force_sig_info(SIGTRAP, &info, current); -} - -/* - * The error_code parameter is - * - DSISR for a non-SLB data access fault, - * - SRR1 & 0x08000000 for a non-SLB instruction access fault - * - 0 any SLB fault. - * The return value is 0 if the fault was handled, or the signal - * number if this is a kernel fault that can't be handled here. - */ -int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ - struct vm_area_struct * vma; - struct mm_struct *mm = current->mm; - siginfo_t info; - unsigned long code = SEGV_MAPERR; - unsigned long is_write = error_code & DSISR_ISSTORE; - unsigned long trap = TRAP(regs); - unsigned long is_exec = trap == 0x400; - - BUG_ON((trap == 0x380) || (trap == 0x480)); - - if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, - 11, SIGSEGV) == NOTIFY_STOP) - return 0; - - if (trap == 0x300) { - if (debugger_fault_handler(regs)) - return 0; - } - - /* On a kernel SLB miss we can only check for a valid exception entry */ - if (!user_mode(regs) && (address >= TASK_SIZE)) - return SIGSEGV; - - if (error_code & DSISR_DABRMATCH) { - do_dabr(regs, error_code); - return 0; - } - - if (in_atomic() || mm == NULL) { - if (!user_mode(regs)) - return SIGSEGV; - /* in_atomic() in user mode is really bad, - as is current->mm == NULL. */ - printk(KERN_EMERG "Page fault in user mode with" - "in_atomic() = %d mm = %p\n", in_atomic(), mm); - printk(KERN_EMERG "NIP = %lx MSR = %lx\n", - regs->nip, regs->msr); - die("Weird page fault", regs, SIGSEGV); - } - - /* When running in the kernel we expect faults to occur only to - * addresses in user space. All other faults represent errors in the - * kernel and should generate an OOPS. Unfortunatly, in the case of an - * erroneous fault occuring in a code path which already holds mmap_sem - * we will deadlock attempting to validate the fault against the - * address space. Luckily the kernel only validly references user - * space from well defined areas of code, which are listed in the - * exceptions table. - * - * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibilty of a deadlock. - * Attempt to lock the address space, if we cannot we then validate the - * source. If this is invalid we can skip the address space check, - * thus avoiding the deadlock. - */ - if (!down_read_trylock(&mm->mmap_sem)) { - if (!user_mode(regs) && !search_exception_tables(regs->nip)) - goto bad_area_nosemaphore; - - down_read(&mm->mmap_sem); - } - - vma = find_vma(mm, address); - if (!vma) - goto bad_area; - - if (vma->vm_start <= address) { - goto good_area; - } - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - - /* - * N.B. The POWER/Open ABI allows programs to access up to - * 288 bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB - * below the stack pointer (r1) before decrementing it. - * The exec code can write slightly over 640kB to the stack - * before setting the user r1. Thus we allow the stack to - * expand to 1MB without further checks. - */ - if (address + 0x100000 < vma->vm_end) { - /* get user regs even if this fault is in kernel mode */ - struct pt_regs *uregs = current->thread.regs; - if (uregs == NULL) - goto bad_area; - - /* - * A user-mode access to an address a long way below - * the stack pointer is only valid if the instruction - * is one which would update the stack pointer to the - * address accessed if the instruction completed, - * i.e. either stwu rs,n(r1) or stwux rs,r1,rb - * (or the byte, halfword, float or double forms). - * - * If we don't check this then any write to the area - * between the last mapped region and the stack will - * expand the stack rather than segfaulting. - */ - if (address + 2048 < uregs->gpr[1] - && (!user_mode(regs) || !store_updates_sp(regs))) - goto bad_area; - } - - if (expand_stack(vma, address)) - goto bad_area; - -good_area: - code = SEGV_ACCERR; - - if (is_exec) { - /* protection fault */ - if (error_code & DSISR_PROTFAULT) - goto bad_area; - if (!(vma->vm_flags & VM_EXEC)) - goto bad_area; - /* a write */ - } else if (is_write) { - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - /* a read */ - } else { - if (!(vma->vm_flags & VM_READ)) - goto bad_area; - } - - survive: - /* - * If for any reason at all we couldn't handle the fault, - * make sure we exit gracefully rather than endlessly redo - * the fault. - */ - switch (handle_mm_fault(mm, vma, address, is_write)) { - - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - goto do_sigbus; - case VM_FAULT_OOM: - goto out_of_memory; - default: - BUG(); - } - - up_read(&mm->mmap_sem); - return 0; - -bad_area: - up_read(&mm->mmap_sem); - -bad_area_nosemaphore: - /* User mode accesses cause a SIGSEGV */ - if (user_mode(regs)) { - info.si_signo = SIGSEGV; - info.si_errno = 0; - info.si_code = code; - info.si_addr = (void __user *) address; - force_sig_info(SIGSEGV, &info, current); - return 0; - } - - if (trap == 0x400 && (error_code & DSISR_PROTFAULT) - && printk_ratelimit()) - printk(KERN_CRIT "kernel tried to execute NX-protected" - " page (%lx) - exploit attempt? (uid: %d)\n", - address, current->uid); - - return SIGSEGV; - -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ -out_of_memory: - up_read(&mm->mmap_sem); - if (current->pid == 1) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", current->comm); - if (user_mode(regs)) - do_exit(SIGKILL); - return SIGKILL; - -do_sigbus: - up_read(&mm->mmap_sem); - if (user_mode(regs)) { - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRERR; - info.si_addr = (void __user *)address; - force_sig_info(SIGBUS, &info, current); - return 0; - } - return SIGBUS; -} - -/* - * bad_page_fault is called when we have a bad access from the kernel. - * It is called from do_page_fault above and from some of the procedures - * in traps.c. - */ -void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) -{ - const struct exception_table_entry *entry; - - /* Are we prepared to handle this fault? */ - if ((entry = search_exception_tables(regs->nip)) != NULL) { - regs->nip = entry->fixup; - return; - } - - /* kernel has accessed a bad area */ - die("Kernel access of bad area", regs, sig); -} diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c deleted file mode 100644 index c2157c9c3acb..000000000000 --- a/arch/ppc64/mm/init.c +++ /dev/null @@ -1,870 +0,0 @@ -/* - * PowerPC version - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) - * - * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) - * and Cort Dougan (PReP) (cort@cs.nmt.edu) - * Copyright (C) 1996 Paul Mackerras - * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). - * - * Derived from "arch/i386/mm/init.c" - * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds - * - * Dave Engebretsen <engebret@us.ibm.com> - * Rework for PPC64 port. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include <linux/config.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/swap.h> -#include <linux/stddef.h> -#include <linux/vmalloc.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/bootmem.h> -#include <linux/highmem.h> -#include <linux/idr.h> -#include <linux/nodemask.h> -#include <linux/module.h> - -#include <asm/pgalloc.h> -#include <asm/page.h> -#include <asm/prom.h> -#include <asm/lmb.h> -#include <asm/rtas.h> -#include <asm/io.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/mmu.h> -#include <asm/uaccess.h> -#include <asm/smp.h> -#include <asm/machdep.h> -#include <asm/tlb.h> -#include <asm/eeh.h> -#include <asm/processor.h> -#include <asm/mmzone.h> -#include <asm/cputable.h> -#include <asm/ppcdebug.h> -#include <asm/sections.h> -#include <asm/system.h> -#include <asm/iommu.h> -#include <asm/abs_addr.h> -#include <asm/vdso.h> -#include <asm/imalloc.h> - -#if PGTABLE_RANGE > USER_VSID_RANGE -#warning Limited user VSID range means pagetable space is wasted -#endif - -#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) -#warning TASK_SIZE is smaller than it needs to be. -#endif - -int mem_init_done; -unsigned long ioremap_bot = IMALLOC_BASE; -static unsigned long phbs_io_bot = PHBS_IO_BASE; - -extern pgd_t swapper_pg_dir[]; -extern struct task_struct *current_set[NR_CPUS]; - -unsigned long klimit = (unsigned long)_end; - -unsigned long _SDR1=0; -unsigned long _ASR=0; - -/* max amount of RAM to use */ -unsigned long __max_memory; - -/* info on what we think the IO hole is */ -unsigned long io_hole_start; -unsigned long io_hole_size; - -void show_mem(void) -{ - unsigned long total = 0, reserved = 0; - unsigned long shared = 0, cached = 0; - struct page *page; - pg_data_t *pgdat; - unsigned long i; - - printk("Mem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { - for (i = 0; i < pgdat->node_spanned_pages; i++) { - page = pgdat_page_nr(pgdat, i); - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; - } - } - printk("%ld pages of RAM\n", total); - printk("%ld reserved pages\n", reserved); - printk("%ld pages shared\n", shared); - printk("%ld pages swap cached\n", cached); -} - -#ifdef CONFIG_PPC_ISERIES - -void __iomem *ioremap(unsigned long addr, unsigned long size) -{ - return (void __iomem *)addr; -} - -extern void __iomem *__ioremap(unsigned long addr, unsigned long size, - unsigned long flags) -{ - return (void __iomem *)addr; -} - -void iounmap(volatile void __iomem *addr) -{ - return; -} - -#else - -/* - * map_io_page currently only called by __ioremap - * map_io_page adds an entry to the ioremap page table - * and adds an entry to the HPT, possibly bolting it - */ -static int map_io_page(unsigned long ea, unsigned long pa, int flags) -{ - pgd_t *pgdp; - pud_t *pudp; - pmd_t *pmdp; - pte_t *ptep; - unsigned long vsid; - - if (mem_init_done) { - spin_lock(&init_mm.page_table_lock); - pgdp = pgd_offset_k(ea); - pudp = pud_alloc(&init_mm, pgdp, ea); - if (!pudp) - return -ENOMEM; - pmdp = pmd_alloc(&init_mm, pudp, ea); - if (!pmdp) - return -ENOMEM; - ptep = pte_alloc_kernel(&init_mm, pmdp, ea); - if (!ptep) - return -ENOMEM; - set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, - __pgprot(flags))); - spin_unlock(&init_mm.page_table_lock); - } else { - unsigned long va, vpn, hash, hpteg; - - /* - * If the mm subsystem is not fully up, we cannot create a - * linux page table entry for this mapping. Simply bolt an - * entry in the hardware page table. - */ - vsid = get_kernel_vsid(ea); - va = (vsid << 28) | (ea & 0xFFFFFFF); - vpn = va >> PAGE_SHIFT; - - hash = hpt_hash(vpn, 0); - - hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - - /* Panic if a pte grpup is full */ - if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, - HPTE_V_BOLTED, - _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX) - == -1) { - panic("map_io_page: could not insert mapping"); - } - } - return 0; -} - - -static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, - unsigned long ea, unsigned long size, - unsigned long flags) -{ - unsigned long i; - - if ((flags & _PAGE_PRESENT) == 0) - flags |= pgprot_val(PAGE_KERNEL); - - for (i = 0; i < size; i += PAGE_SIZE) - if (map_io_page(ea+i, pa+i, flags)) - return NULL; - - return (void __iomem *) (ea + (addr & ~PAGE_MASK)); -} - - -void __iomem * -ioremap(unsigned long addr, unsigned long size) -{ - return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); -} - -void __iomem * __ioremap(unsigned long addr, unsigned long size, - unsigned long flags) -{ - unsigned long pa, ea; - void __iomem *ret; - - /* - * Choose an address to map it to. - * Once the imalloc system is running, we use it. - * Before that, we map using addresses going - * up from ioremap_bot. imalloc will use - * the addresses from ioremap_bot through - * IMALLOC_END - * - */ - pa = addr & PAGE_MASK; - size = PAGE_ALIGN(addr + size) - pa; - - if (size == 0) - return NULL; - - if (mem_init_done) { - struct vm_struct *area; - area = im_get_free_area(size); - if (area == NULL) - return NULL; - ea = (unsigned long)(area->addr); - ret = __ioremap_com(addr, pa, ea, size, flags); - if (!ret) - im_free(area->addr); - } else { - ea = ioremap_bot; - ret = __ioremap_com(addr, pa, ea, size, flags); - if (ret) - ioremap_bot += size; - } - return ret; -} - -#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) - -int __ioremap_explicit(unsigned long pa, unsigned long ea, - unsigned long size, unsigned long flags) -{ - struct vm_struct *area; - void __iomem *ret; - - /* For now, require page-aligned values for pa, ea, and size */ - if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || - !IS_PAGE_ALIGNED(size)) { - printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__); - return 1; - } - - if (!mem_init_done) { - /* Two things to consider in this case: - * 1) No records will be kept (imalloc, etc) that the region - * has been remapped - * 2) It won't be easy to iounmap() the region later (because - * of 1) - */ - ; - } else { - area = im_get_area(ea, size, - IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS); - if (area == NULL) { - /* Expected when PHB-dlpar is in play */ - return 1; - } - if (ea != (unsigned long) area->addr) { - printk(KERN_ERR "unexpected addr return from " - "im_get_area\n"); - return 1; - } - } - - ret = __ioremap_com(pa, pa, ea, size, flags); - if (ret == NULL) { - printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); - return 1; - } - if (ret != (void *) ea) { - printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); - return 1; - } - - return 0; -} - -/* - * Unmap an IO region and remove it from imalloc'd list. - * Access to IO memory should be serialized by driver. - * This code is modeled after vmalloc code - unmap_vm_area() - * - * XXX what about calls before mem_init_done (ie python_countermeasures()) - */ -void iounmap(volatile void __iomem *token) -{ - void *addr; - - if (!mem_init_done) - return; - - addr = (void *) ((unsigned long __force) token & PAGE_MASK); - - im_free(addr); -} - -static int iounmap_subset_regions(unsigned long addr, unsigned long size) -{ - struct vm_struct *area; - - /* Check whether subsets of this region exist */ - area = im_get_area(addr, size, IM_REGION_SUPERSET); - if (area == NULL) - return 1; - - while (area) { - iounmap((void __iomem *) area->addr); - area = im_get_area(addr, size, - IM_REGION_SUPERSET); - } - - return 0; -} - -int iounmap_explicit(volatile void __iomem *start, unsigned long size) -{ - struct vm_struct *area; - unsigned long addr; - int rc; - - addr = (unsigned long __force) start & PAGE_MASK; - - /* Verify that the region either exists or is a subset of an existing - * region. In the latter case, split the parent region to create - * the exact region - */ - area = im_get_area(addr, size, - IM_REGION_EXISTS | IM_REGION_SUBSET); - if (area == NULL) { - /* Determine whether subset regions exist. If so, unmap */ - rc = iounmap_subset_regions(addr, size); - if (rc) { - printk(KERN_ERR - "%s() cannot unmap nonexistent range 0x%lx\n", - __FUNCTION__, addr); - return 1; - } - } else { - iounmap((void __iomem *) area->addr); - } - /* - * FIXME! This can't be right: - iounmap(area->addr); - * Maybe it should be "iounmap(area);" - */ - return 0; -} - -#endif - -EXPORT_SYMBOL(ioremap); -EXPORT_SYMBOL(__ioremap); -EXPORT_SYMBOL(iounmap); - -void free_initmem(void) -{ - unsigned long addr; - - addr = (unsigned long)__init_begin; - for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { - memset((void *)addr, 0xcc, PAGE_SIZE); - ClearPageReserved(virt_to_page(addr)); - set_page_count(virt_to_page(addr), 1); - free_page(addr); - totalram_pages++; - } - printk ("Freeing unused kernel memory: %luk freed\n", - ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); -} - -#ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) -{ - if (start < end) - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - set_page_count(virt_to_page(start), 1); - free_page(start); - totalram_pages++; - } -} -#endif - -static DEFINE_SPINLOCK(mmu_context_lock); -static DEFINE_IDR(mmu_context_idr); - -int init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - int index; - int err; - -again: - if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) - return -ENOMEM; - - spin_lock(&mmu_context_lock); - err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); - spin_unlock(&mmu_context_lock); - - if (err == -EAGAIN) - goto again; - else if (err) - return err; - - if (index > MAX_CONTEXT) { - idr_remove(&mmu_context_idr, index); - return -ENOMEM; - } - - mm->context.id = index; - - return 0; -} - -void destroy_context(struct mm_struct *mm) -{ - spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, mm->context.id); - spin_unlock(&mmu_context_lock); - - mm->context.id = NO_CONTEXT; -} - -/* - * Do very early mm setup. - */ -void __init mm_init_ppc64(void) -{ -#ifndef CONFIG_PPC_ISERIES - unsigned long i; -#endif - - ppc64_boot_msg(0x100, "MM Init"); - - /* This is the story of the IO hole... please, keep seated, - * unfortunately, we are out of oxygen masks at the moment. - * So we need some rough way to tell where your big IO hole - * is. On pmac, it's between 2G and 4G, on POWER3, it's around - * that area as well, on POWER4 we don't have one, etc... - * We need that as a "hint" when sizing the TCE table on POWER3 - * So far, the simplest way that seem work well enough for us it - * to just assume that the first discontinuity in our physical - * RAM layout is the IO hole. That may not be correct in the future - * (and isn't on iSeries but then we don't care ;) - */ - -#ifndef CONFIG_PPC_ISERIES - for (i = 1; i < lmb.memory.cnt; i++) { - unsigned long base, prevbase, prevsize; - - prevbase = lmb.memory.region[i-1].base; - prevsize = lmb.memory.region[i-1].size; - base = lmb.memory.region[i].base; - if (base > (prevbase + prevsize)) { - io_hole_start = prevbase + prevsize; - io_hole_size = base - (prevbase + prevsize); - break; - } - } -#endif /* CONFIG_PPC_ISERIES */ - if (io_hole_start) - printk("IO Hole assumed to be %lx -> %lx\n", - io_hole_start, io_hole_start + io_hole_size - 1); - - ppc64_boot_msg(0x100, "MM Init Done"); -} - -/* - * This is called by /dev/mem to know if a given address has to - * be mapped non-cacheable or not - */ -int page_is_ram(unsigned long pfn) -{ - int i; - unsigned long paddr = (pfn << PAGE_SHIFT); - - for (i=0; i < lmb.memory.cnt; i++) { - unsigned long base; - - base = lmb.memory.region[i].base; - - if ((paddr >= base) && - (paddr < (base + lmb.memory.region[i].size))) { - return 1; - } - } - - return 0; -} -EXPORT_SYMBOL(page_is_ram); - -/* - * Initialize the bootmem system and give it all the memory we - * have available. - */ -#ifndef CONFIG_NEED_MULTIPLE_NODES -void __init do_init_bootmem(void) -{ - unsigned long i; - unsigned long start, bootmap_pages; - unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; - int boot_mapsize; - - /* - * Find an area to use for the bootmem bitmap. Calculate the size of - * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. - * Add 1 additional page in case the address isn't page-aligned. - */ - bootmap_pages = bootmem_bootmap_pages(total_pages); - - start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); - BUG_ON(!start); - - boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); - - max_pfn = max_low_pfn; - - /* Add all physical memory to the bootmem map, mark each area - * present. - */ - for (i=0; i < lmb.memory.cnt; i++) - free_bootmem(lmb.memory.region[i].base, - lmb_size_bytes(&lmb.memory, i)); - - /* reserve the sections we're already using */ - for (i=0; i < lmb.reserved.cnt; i++) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i)); - - for (i=0; i < lmb.memory.cnt; i++) - memory_present(0, lmb_start_pfn(&lmb.memory, i), - lmb_end_pfn(&lmb.memory, i)); -} - -/* - * paging_init() sets up the page tables - in fact we've already done this. - */ -void __init paging_init(void) -{ - unsigned long zones_size[MAX_NR_ZONES]; - unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long total_ram = lmb_phys_mem_size(); - unsigned long top_of_ram = lmb_end_of_DRAM(); - - printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", - top_of_ram, total_ram); - printk(KERN_INFO "Memory hole size: %ldMB\n", - (top_of_ram - total_ram) >> 20); - /* - * All pages are DMA-able so we put them all in the DMA zone. - */ - memset(zones_size, 0, sizeof(zones_size)); - memset(zholes_size, 0, sizeof(zholes_size)); - - zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; - zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; - - free_area_init_node(0, NODE_DATA(0), zones_size, - __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); -} -#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ - -static struct kcore_list kcore_vmem; - -static int __init setup_kcore(void) -{ - int i; - - for (i=0; i < lmb.memory.cnt; i++) { - unsigned long base, size; - struct kcore_list *kcore_mem; - - base = lmb.memory.region[i].base; - size = lmb.memory.region[i].size; - - /* GFP_ATOMIC to avoid might_sleep warnings during boot */ - kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC); - if (!kcore_mem) - panic("mem_init: kmalloc failed\n"); - - kclist_add(kcore_mem, __va(base), size); - } - - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - - return 0; -} -module_init(setup_kcore); - -void __init mem_init(void) -{ -#ifdef CONFIG_NEED_MULTIPLE_NODES - int nid; -#endif - pg_data_t *pgdat; - unsigned long i; - struct page *page; - unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; - - num_physpages = max_low_pfn; /* RAM is assumed contiguous */ - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); - -#ifdef CONFIG_NEED_MULTIPLE_NODES - for_each_online_node(nid) { - if (NODE_DATA(nid)->node_spanned_pages != 0) { - printk("freeing bootmem node %x\n", nid); - totalram_pages += - free_all_bootmem_node(NODE_DATA(nid)); - } - } -#else - max_mapnr = num_physpages; - totalram_pages += free_all_bootmem(); -#endif - - for_each_pgdat(pgdat) { - for (i = 0; i < pgdat->node_spanned_pages; i++) { - page = pgdat_page_nr(pgdat, i); - if (PageReserved(page)) - reservedpages++; - } - } - - codesize = (unsigned long)&_etext - (unsigned long)&_stext; - initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; - datasize = (unsigned long)&_edata - (unsigned long)&__init_end; - bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; - - printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " - "%luk reserved, %luk data, %luk bss, %luk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - bsssize >> 10, - initsize >> 10); - - mem_init_done = 1; - - /* Initialize the vDSO */ - vdso_init(); -} - -/* - * This is called when a page has been modified by the kernel. - * It just marks the page as not i-cache clean. We do the i-cache - * flush later when the page is given to a user process, if necessary. - */ -void flush_dcache_page(struct page *page) -{ - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &page->flags)) - clear_bit(PG_arch_1, &page->flags); -} -EXPORT_SYMBOL(flush_dcache_page); - -void clear_user_page(void *page, unsigned long vaddr, struct page *pg) -{ - clear_page(page); - - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; - /* - * We shouldnt have to do this, but some versions of glibc - * require it (ld.so assumes zero filled pages are icache clean) - * - Anton - */ - - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &pg->flags)) - clear_bit(PG_arch_1, &pg->flags); -} -EXPORT_SYMBOL(clear_user_page); - -void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, - struct page *pg) -{ - copy_page(vto, vfrom); - - /* - * We should be able to use the following optimisation, however - * there are two problems. - * Firstly a bug in some versions of binutils meant PLT sections - * were not marked executable. - * Secondly the first word in the GOT section is blrl, used - * to establish the GOT address. Until recently the GOT was - * not marked executable. - * - Anton - */ -#if 0 - if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) - return; -#endif - - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; - - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &pg->flags)) - clear_bit(PG_arch_1, &pg->flags); -} - -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, - unsigned long addr, int len) -{ - unsigned long maddr; - - maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK); - flush_icache_range(maddr, maddr + len); -} -EXPORT_SYMBOL(flush_icache_user_range); - -/* - * This is called at the end of handling a user page fault, when the - * fault has been handled by updating a PTE in the linux page tables. - * We use it to preload an HPTE into the hash table corresponding to - * the updated linux PTE. - * - * This must always be called with the mm->page_table_lock held - */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, - pte_t pte) -{ - unsigned long vsid; - void *pgdir; - pte_t *ptep; - int local = 0; - cpumask_t tmp; - unsigned long flags; - - /* handle i-cache coherency */ - if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && - !cpu_has_feature(CPU_FTR_NOEXECUTE)) { - unsigned long pfn = pte_pfn(pte); - if (pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - if (!PageReserved(page) - && !test_bit(PG_arch_1, &page->flags)) { - __flush_dcache_icache(page_address(page)); - set_bit(PG_arch_1, &page->flags); - } - } - } - - /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte)) - return; - - pgdir = vma->vm_mm->pgd; - if (pgdir == NULL) - return; - - ptep = find_linux_pte(pgdir, ea); - if (!ptep) - return; - - vsid = get_vsid(vma->vm_mm->context.id, ea); - - local_irq_save(flags); - tmp = cpumask_of_cpu(smp_processor_id()); - if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) - local = 1; - - __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, - 0x300, local); - local_irq_restore(flags); -} - -void __iomem * reserve_phb_iospace(unsigned long size) -{ - void __iomem *virt_addr; - - if (phbs_io_bot >= IMALLOC_BASE) - panic("reserve_phb_iospace(): phb io space overflow\n"); - - virt_addr = (void __iomem *) phbs_io_bot; - phbs_io_bot += size; - - return virt_addr; -} - -static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) -{ - memset(addr, 0, kmem_cache_size(cache)); -} - -static const int pgtable_cache_size[2] = { - PTE_TABLE_SIZE, PMD_TABLE_SIZE -}; -static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = { - "pgd_pte_cache", "pud_pmd_cache", -}; - -kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; - -void pgtable_cache_init(void) -{ - int i; - - BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]); - BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]); - BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]); - BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]); - - for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { - int size = pgtable_cache_size[i]; - const char *name = pgtable_cache_name[i]; - - pgtable_cache[i] = kmem_cache_create(name, - size, size, - SLAB_HWCACHE_ALIGN - | SLAB_MUST_HWCACHE_ALIGN, - zero_ctor, - NULL); - if (! pgtable_cache[i]) - panic("pgtable_cache_init(): could not create %s!\n", - name); - } -} - -pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, - unsigned long size, pgprot_t vma_prot) -{ - if (ppc_md.phys_mem_access_prot) - return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); - - if (!page_is_ram(addr >> PAGE_SHIFT)) - vma_prot = __pgprot(pgprot_val(vma_prot) - | _PAGE_GUARDED | _PAGE_NO_CACHE); - return vma_prot; -} -EXPORT_SYMBOL(phys_mem_access_prot); |