diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 11:50:19 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-08 11:50:19 -0700 |
commit | df6d3916f3b7b7e2067567a256dd4f0c1ea854a2 (patch) | |
tree | 0fdeab1ab5d566605fc99aeb5ea3f621f11e7608 /arch/powerpc/mm | |
parent | 74add80cbd7fe246c893b93ee75ac59acdd01dd4 (diff) | |
parent | 197686dfe0038fd190326d118b743ff65ad20c0e (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (77 commits)
[POWERPC] Abolish powerpc_flash_init()
[POWERPC] Early serial debug support for PPC44x
[POWERPC] Support for the Ebony 440GP reference board in arch/powerpc
[POWERPC] Add device tree for Ebony
[POWERPC] Add powerpc/platforms/44x, disable platforms/4xx for now
[POWERPC] MPIC U3/U4 MSI backend
[POWERPC] MPIC MSI allocator
[POWERPC] Enable MSI mappings for MPIC
[POWERPC] Tell Phyp we support MSI
[POWERPC] RTAS MSI implementation
[POWERPC] PowerPC MSI infrastructure
[POWERPC] Rip out the existing powerpc msi stubs
[POWERPC] Remove use of 4level-fixup.h for ppc32
[POWERPC] Add powerpc PCI-E reset API implementation
[POWERPC] Holly bootwrapper
[POWERPC] Holly DTS
[POWERPC] Holly defconfig
[POWERPC] Add support for 750CL Holly board
[POWERPC] Generalize tsi108 PCI setup
[POWERPC] Generalize tsi108 PHY types
...
Fixed conflict in include/asm-powerpc/kdebug.h manually
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 82 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 42 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 84 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 28 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 2 |
8 files changed, 116 insertions, 128 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 0a0a0487b334..ca4dcb07a939 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -24,73 +24,38 @@ * */ -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/swap.h> -#include <linux/stddef.h> -#include <linux/vmalloc.h> #include <linux/init.h> -#include <linux/delay.h> -#include <linux/highmem.h> - -#include <asm/pgalloc.h> -#include <asm/prom.h> -#include <asm/io.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> #include <asm/mmu.h> -#include <asm/uaccess.h> -#include <asm/smp.h> -#include <asm/bootx.h> -#include <asm/machdep.h> -#include <asm/setup.h> +#include <asm/system.h> +#include <asm/page.h> #include "mmu_decl.h" -extern char etext[], _stext[]; - /* Used by the 44x TLB replacement exception handler. * Just needed it declared someplace. */ -unsigned int tlb_44x_index = 0; -unsigned int tlb_44x_hwater = 62; +unsigned int tlb_44x_index; /* = 0 */ +unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem */ -static void __init -ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys) +static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) { - unsigned long attrib = 0; - - __asm__ __volatile__("\ - clrrwi %2,%2,10\n\ - ori %2,%2,%4\n\ - clrrwi %1,%1,10\n\ - li %0,0\n\ - ori %0,%0,%5\n\ - tlbwe %2,%3,%6\n\ - tlbwe %1,%3,%7\n\ - tlbwe %0,%3,%8" + __asm__ __volatile__( + "tlbwe %2,%3,%4\n" + "tlbwe %1,%3,%5\n" + "tlbwe %0,%3,%6\n" : - : "r" (attrib), "r" (phys), "r" (virt), "r" (slot), - "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M), - "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), + : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), + "r" (phys), + "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), + "r" (tlb_44x_hwater--), /* slot for this TLB entry */ "i" (PPC44x_TLB_PAGEID), "i" (PPC44x_TLB_XLAT), "i" (PPC44x_TLB_ATTRIB)); } -/* - * MMU_init_hw does the chip-specific initialization of the MMU hardware. - */ void __init MMU_init_hw(void) { flush_instruction_cache(); @@ -98,22 +63,13 @@ void __init MMU_init_hw(void) unsigned long __init mmu_mapin_ram(void) { - unsigned int pinned_tlbs = 1; - int i; - - /* Determine number of entries necessary to cover lowmem */ - pinned_tlbs = (unsigned int) - (_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT); - - /* Write upper watermark to save location */ - tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs; + unsigned long addr; - /* If necessary, set additional pinned TLBs */ - if (pinned_tlbs > 1) - for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) { - unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE; - ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr); - } + /* Pin in enough TLBs to cover any lowmem not covered by the + * initial 256M mapping established in head_44x.S */ + for (addr = PPC_PIN_SIZE; addr < total_lowmem; + addr += PPC_PIN_SIZE) + ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); return total_lowmem; } diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index bec0cce79a78..bfe901353142 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -39,37 +39,26 @@ #include <asm/tlbflush.h> #include <asm/siginfo.h> -#ifdef CONFIG_KPROBES -ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); -/* Hook to register for page fault notifications */ -int register_page_fault_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); -} - -int unregister_page_fault_notifier(struct notifier_block *nb) +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs) { - return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); -} + int ret = 0; + + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, 11)) + ret = 1; + preempt_enable(); + } -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig - }; - return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); + return ret; } #else -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) +static inline int notify_page_fault(struct pt_regs *regs) { - return NOTIFY_DONE; + return 0; } #endif @@ -175,8 +164,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, is_write = error_code & ESR_DST; #endif /* CONFIG_4xx || CONFIG_BOOKE */ - if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code, - 11, SIGSEGV) == NOTIFY_STOP) + if (notify_page_fault(regs)) return 0; if (trap == 0x300) { diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 79aedaf36f2b..7b7fe2d7b9dc 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -26,6 +26,7 @@ #include <asm/tlb.h> #include <asm/cputable.h> #include <asm/udbg.h> +#include <asm/kexec.h> #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) @@ -340,31 +341,70 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, local_irq_restore(flags); } -/* - * XXX This need fixing based on page size. It's only used by - * native_hpte_clear() for now which needs fixing too so they - * make a good pair... - */ -static unsigned long slot2va(unsigned long hpte_v, unsigned long slot) -{ - unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v); - unsigned long va; +#define LP_SHIFT 12 +#define LP_BITS 8 +#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) - va = avpn << 23; +static void hpte_decode(hpte_t *hpte, unsigned long slot, + int *psize, unsigned long *va) +{ + unsigned long hpte_r = hpte->r; + unsigned long hpte_v = hpte->v; + unsigned long avpn; + int i, size, shift, penc, avpnm_bits; + + if (!(hpte_v & HPTE_V_LARGE)) + size = MMU_PAGE_4K; + else { + for (i = 0; i < LP_BITS; i++) { + if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1)) + break; + } + penc = LP_MASK(i+1) >> LP_SHIFT; + for (size = 0; size < MMU_PAGE_COUNT; size++) { - if (! (hpte_v & HPTE_V_LARGE)) { - unsigned long vpi, pteg; + /* 4K pages are not represented by LP */ + if (size == MMU_PAGE_4K) + continue; - pteg = slot / HPTES_PER_GROUP; - if (hpte_v & HPTE_V_SECONDARY) - pteg = ~pteg; + /* valid entries have a shift value */ + if (!mmu_psize_defs[size].shift) + continue; - vpi = ((va >> 28) ^ pteg) & htab_hash_mask; + if (penc == mmu_psize_defs[size].penc) + break; + } + } - va |= vpi << PAGE_SHIFT; + /* + * FIXME, the code below works for 16M, 64K, and 4K pages as these + * fall under the p<=23 rules for calculating the virtual address. + * In the case of 16M pages, an extra bit is stolen from the AVPN + * field to achieve the requisite 24 bits. + * + * Does not work for 16G pages or 1 TB segments. + */ + shift = mmu_psize_defs[size].shift; + if (mmu_psize_defs[size].avpnm) + avpnm_bits = __ilog2_u64(mmu_psize_defs[size].avpnm) + 1; + else + avpnm_bits = 0; + if (shift - avpnm_bits <= 23) { + avpn = HPTE_V_AVPN_VAL(hpte_v) << 23; + + if (shift < 23) { + unsigned long vpi, pteg; + + pteg = slot / HPTES_PER_GROUP; + if (hpte_v & HPTE_V_SECONDARY) + pteg = ~pteg; + vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; + avpn |= (vpi << mmu_psize_defs[size].shift); + } } - return va; + *va = avpn; + *psize = size; } /* @@ -374,15 +414,14 @@ static unsigned long slot2va(unsigned long hpte_v, unsigned long slot) * * TODO: add batching support when enabled. remember, no dynamic memory here, * athough there is the control page available... - * - * XXX FIXME: 4k only for now ! */ static void native_hpte_clear(void) { unsigned long slot, slots, flags; hpte_t *hptep = htab_address; - unsigned long hpte_v; + unsigned long hpte_v, va; unsigned long pteg_count; + int psize; pteg_count = htab_hash_mask + 1; @@ -408,8 +447,9 @@ static void native_hpte_clear(void) * already hold the native_tlbie_lock. */ if (hpte_v & HPTE_V_VALID) { + hpte_decode(hptep, slot, &psize, &va); hptep->v = 0; - __tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K); + __tlbie(va, psize); } } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 49618461defb..9b226fa7006f 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -103,7 +103,7 @@ int mmu_ci_restrictions; #ifdef CONFIG_DEBUG_PAGEALLOC static u8 *linear_map_hash_slots; static unsigned long linear_map_hash_count; -static spinlock_t linear_map_hash_lock; +static DEFINE_SPINLOCK(linear_map_hash_lock); #endif /* CONFIG_DEBUG_PAGEALLOC */ /* There are definitions of page sizes arrays to be used when none diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index c4bcd7546424..1a6e08f3298f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -80,7 +80,6 @@ int page_is_ram(unsigned long pfn) return 0; #endif } -EXPORT_SYMBOL(page_is_ram); pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 9c4538bb04b0..2558c34eedaa 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -40,7 +40,8 @@ extern int __map_without_bats; extern unsigned long ioremap_base; extern unsigned int rtas_data, rtas_size; -extern PTE *Hash, *Hash_end; +struct _PTE; +extern struct _PTE *Hash, *Hash_end; extern unsigned long Hash_size, Hash_mask; extern unsigned int num_tlbcam_entries; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index bca560374927..d8232b7a08f7 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -261,7 +261,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ - pd = pmd_offset(pgd_offset_k(va), va); + pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); /* Use middle 10 bits of VA to index the second-level map */ pg = pte_alloc_kernel(pd, va); if (pg != 0) { @@ -354,23 +354,27 @@ int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; int retval = 0; pgd = pgd_offset(mm, addr & PAGE_MASK); if (pgd) { - pmd = pmd_offset(pgd, addr & PAGE_MASK); - if (pmd_present(*pmd)) { - pte = pte_offset_map(pmd, addr & PAGE_MASK); - if (pte) { - retval = 1; - *ptep = pte; - if (pmdp) - *pmdp = pmd; - /* XXX caller needs to do pte_unmap, yuck */ - } - } + pud = pud_offset(pgd, addr & PAGE_MASK); + if (pud && pud_present(*pud)) { + pmd = pmd_offset(pud, addr & PAGE_MASK); + if (pmd_present(*pmd)) { + pte = pte_offset_map(pmd, addr & PAGE_MASK); + if (pte) { + retval = 1; + *ptep = pte; + if (pmdp) + *pmdp = pmd; + /* XXX caller needs to do pte_unmap, yuck */ + } + } + } } return(retval); } diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index eeeacab548e6..132c6bc66ce1 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -227,7 +227,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) * the first (bolted) segment, so that do_stab_bolted won't get a * recursive segment miss on the segment table itself. */ -void stabs_alloc(void) +void __init stabs_alloc(void) { int cpu; |