summaryrefslogtreecommitdiff
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/Makefile10
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c527
-rw-r--r--arch/sparc64/mm/generic.c182
-rw-r--r--arch/sparc64/mm/hugetlbpage.c310
-rw-r--r--arch/sparc64/mm/init.c1769
-rw-r--r--arch/sparc64/mm/tlb.c151
-rw-r--r--arch/sparc64/mm/ultra.S583
8 files changed, 3612 insertions, 0 deletions
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
new file mode 100644
index 000000000000..cda87333a77b
--- /dev/null
+++ b/arch/sparc64/mm/Makefile
@@ -0,0 +1,10 @@
+# $Id: Makefile,v 1.8 2000/12/14 22:57:25 davem Exp $
+# Makefile for the linux Sparc64-specific parts of the memory manager.
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
new file mode 100644
index 000000000000..ec334297ff4f
--- /dev/null
+++ b/arch/sparc64/mm/extable.c
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/sparc64/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish)
+{
+}
+
+/* Caller knows they are in a range if ret->fixup == 0 */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *start,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ const struct exception_table_entry *walk;
+
+ /* Single insn entries are encoded as:
+ * word 1: insn address
+ * word 2: fixup code address
+ *
+ * Range entries are encoded as:
+ * word 1: first insn address
+ * word 2: 0
+ * word 3: last insn address + 4 bytes
+ * word 4: fixup code address
+ *
+ * See asm/uaccess.h for more details.
+ */
+
+ /* 1. Try to find an exact match. */
+ for (walk = start; walk <= last; walk++) {
+ if (walk->fixup == 0) {
+ /* A range entry, skip both parts. */
+ walk++;
+ continue;
+ }
+
+ if (walk->insn == value)
+ return walk;
+ }
+
+ /* 2. Try to find a range match. */
+ for (walk = start; walk <= (last - 1); walk++) {
+ if (walk->fixup)
+ continue;
+
+ if (walk[0].insn <= value && walk[1].insn > value)
+ return walk;
+
+ walk++;
+ }
+
+ return NULL;
+}
+
+/* Special extable search, which handles ranges. Returns fixup */
+unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
+{
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(addr);
+ if (!entry)
+ return 0;
+
+ /* Inside range? Fix g2 and return correct fixup */
+ if (!entry->fixup) {
+ *g2 = (addr - entry->insn) / 4;
+ return (entry + 1)->fixup;
+ }
+
+ return entry->fixup;
+}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
new file mode 100644
index 000000000000..3ffee7b51aed
--- /dev/null
+++ b/arch/sparc64/mm/fault.c
@@ -0,0 +1,527 @@
+/* $Id: fault.c,v 1.59 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/head.h>
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/asi.h>
+#include <asm/lsu.h>
+#include <asm/sections.h>
+#include <asm/kdebug.h>
+
+#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
+
+extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+/*
+ * To debug kernel during syscall entry.
+ */
+void syscall_trace_entry(struct pt_regs *regs)
+{
+ printk("scall entry: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
+}
+
+/*
+ * To debug kernel during syscall exit.
+ */
+void syscall_trace_exit(struct pt_regs *regs)
+{
+ printk("scall exit: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
+}
+
+/*
+ * To debug kernel to catch accesses to certain virtual/physical addresses.
+ * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
+ * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses.
+ * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be
+ * watched. This is only useful on a single cpu machine for now. After the watchpoint
+ * is detected, the process causing it will be killed, thus preventing an infinite loop.
+ */
+void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
+{
+ unsigned long lsubits;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (lsubits)
+ : "i" (ASI_LSU_CONTROL));
+ lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM |
+ LSU_CONTROL_PR | LSU_CONTROL_VR |
+ LSU_CONTROL_PW | LSU_CONTROL_VW);
+
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT),
+ "i" (ASI_DMMU));
+
+ lsubits |= ((unsigned long)mask << (mode ? 25 : 33));
+ if (flags & VM_READ)
+ lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR);
+ if (flags & VM_WRITE)
+ lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW);
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (lsubits), "i" (ASI_LSU_CONTROL)
+ : "memory");
+}
+
+/* Nice, simple, prom library does all the sweating for us. ;) */
+unsigned long __init prom_probe_memory (void)
+{
+ register struct linux_mlist_p1275 *mlist;
+ register unsigned long bytes, base_paddr, tally;
+ register int i;
+
+ i = 0;
+ mlist = *prom_meminfo()->p1275_available;
+ bytes = tally = mlist->num_bytes;
+ base_paddr = mlist->start_adr;
+
+ sp_banks[0].base_addr = base_paddr;
+ sp_banks[0].num_bytes = bytes;
+
+ while (mlist->theres_more != (void *) 0) {
+ i++;
+ mlist = mlist->theres_more;
+ bytes = mlist->num_bytes;
+ tally += bytes;
+ if (i >= SPARC_PHYS_BANKS-1) {
+ printk ("The machine has more banks than "
+ "this kernel can support\n"
+ "Increase the SPARC_PHYS_BANKS "
+ "setting (currently %d)\n",
+ SPARC_PHYS_BANKS);
+ i = SPARC_PHYS_BANKS-1;
+ break;
+ }
+
+ sp_banks[i].base_addr = mlist->start_adr;
+ sp_banks[i].num_bytes = mlist->num_bytes;
+ }
+
+ i++;
+ sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
+ sp_banks[i].num_bytes = 0;
+
+ /* Now mask all bank sizes on a page boundary, it is all we can
+ * use anyways.
+ */
+ for (i = 0; sp_banks[i].num_bytes != 0; i++)
+ sp_banks[i].num_bytes &= PAGE_MASK;
+
+ return tally;
+}
+
+static void unhandled_fault(unsigned long address, struct task_struct *tsk,
+ struct pt_regs *regs)
+{
+ if ((unsigned long) address < PAGE_SIZE) {
+ printk(KERN_ALERT "Unable to handle kernel NULL "
+ "pointer dereference\n");
+ } else {
+ printk(KERN_ALERT "Unable to handle kernel paging request "
+ "at virtual address %016lx\n", (unsigned long)address);
+ }
+ printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
+ (tsk->mm ?
+ CTX_HWBITS(tsk->mm->context) :
+ CTX_HWBITS(tsk->active_mm->context)));
+ printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
+ (tsk->mm ? (unsigned long) tsk->mm->pgd :
+ (unsigned long) tsk->active_mm->pgd));
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ 0, 0, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die_if_kernel("Oops", regs);
+}
+
+static void bad_kernel_pc(struct pt_regs *regs)
+{
+ unsigned long *ksp;
+
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ __asm__("mov %%sp, %0" : "=r" (ksp));
+ show_stack(current, ksp);
+ unhandled_fault(regs->tpc, current, regs);
+}
+
+/*
+ * We now make sure that mmap_sem is held in all paths that call
+ * this. Additionally, to prevent kswapd from ripping ptes from
+ * under us, raise interrupts around the time that we look at the
+ * pte, kswapd will have to wait to get his smp ipi response from
+ * us. This saves us having to get page_table_lock.
+ */
+static unsigned int get_user_insn(unsigned long tpc)
+{
+ pgd_t *pgdp = pgd_offset(current->mm, tpc);
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep, pte;
+ unsigned long pa;
+ u32 insn = 0;
+ unsigned long pstate;
+
+ if (pgd_none(*pgdp))
+ goto outret;
+ pudp = pud_offset(pgdp, tpc);
+ if (pud_none(*pudp))
+ goto outret;
+ pmdp = pmd_offset(pudp, tpc);
+ if (pmd_none(*pmdp))
+ goto outret;
+
+ /* This disables preemption for us as well. */
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ __asm__ __volatile__("wrpr %0, %1, %%pstate"
+ : : "r" (pstate), "i" (PSTATE_IE));
+ ptep = pte_offset_map(pmdp, tpc);
+ pte = *ptep;
+ if (!pte_present(pte))
+ goto out;
+
+ pa = (pte_val(pte) & _PAGE_PADDR);
+ pa += (tpc & ~PAGE_MASK);
+
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
+ __asm__ __volatile__("lduwa [%1] %2, %0"
+ : "=r" (insn)
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
+
+out:
+ pte_unmap(ptep);
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+outret:
+ return insn;
+}
+
+extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
+
+static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ unsigned int insn, int fault_code)
+{
+ siginfo_t info;
+
+ info.si_code = code;
+ info.si_signo = sig;
+ info.si_errno = 0;
+ if (fault_code & FAULT_CODE_ITLB)
+ info.si_addr = (void __user *) regs->tpc;
+ else
+ info.si_addr = (void __user *)
+ compute_effective_address(regs, insn, 0);
+ info.si_trapno = 0;
+ force_sig_info(sig, &info, current);
+}
+
+extern int handle_ldf_stq(u32, struct pt_regs *);
+extern int handle_ld_nf(u32, struct pt_regs *);
+
+static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
+{
+ if (!insn) {
+ if (!regs->tpc || (regs->tpc & 0x3))
+ return 0;
+ if (regs->tstate & TSTATE_PRIV) {
+ insn = *(unsigned int *) regs->tpc;
+ } else {
+ insn = get_user_insn(regs->tpc);
+ }
+ }
+ return insn;
+}
+
+static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
+ unsigned int insn, unsigned long address)
+{
+ unsigned long g2;
+ unsigned char asi = ASI_P;
+
+ if ((!insn) && (regs->tstate & TSTATE_PRIV))
+ goto cannot_handle;
+
+ /* If user insn could be read (thus insn is zero), that
+ * is fine. We will just gun down the process with a signal
+ * in that case.
+ */
+
+ if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
+ (insn & 0xc0800000) == 0xc0800000) {
+ if (insn & 0x2000)
+ asi = (regs->tstate >> 24);
+ else
+ asi = (insn >> 5);
+ if ((asi & 0xf2) == 0x82) {
+ if (insn & 0x1000000) {
+ handle_ldf_stq(insn, regs);
+ } else {
+ /* This was a non-faulting load. Just clear the
+ * destination register(s) and continue with the next
+ * instruction. -jj
+ */
+ handle_ld_nf(insn, regs);
+ }
+ return;
+ }
+ }
+
+ g2 = regs->u_regs[UREG_G2];
+
+ /* Is this in ex_table? */
+ if (regs->tstate & TSTATE_PRIV) {
+ unsigned long fixup;
+
+ if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
+ if (insn & 0x2000)
+ asi = (regs->tstate >> 24);
+ else
+ asi = (insn >> 5);
+ }
+
+ /* Look in asi.h: All _S asis have LS bit set */
+ if ((asi & 0x1) &&
+ (fixup = search_extables_range(regs->tpc, &g2))) {
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs[UREG_G2] = g2;
+ return;
+ }
+ } else {
+ /* The si_code was set to make clear whether
+ * this was a SEGV_MAPERR or SEGV_ACCERR fault.
+ */
+ do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
+ return;
+ }
+
+cannot_handle:
+ unhandled_fault (address, current, regs);
+}
+
+asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned int insn = 0;
+ int si_code, fault_code;
+ unsigned long address;
+
+ fault_code = get_thread_fault_code();
+
+ if (notify_die(DIE_PAGE_FAULT, "page_fault", regs,
+ fault_code, 0, SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ si_code = SEGV_MAPERR;
+ address = current_thread_info()->fault_address;
+
+ if ((fault_code & FAULT_CODE_ITLB) &&
+ (fault_code & FAULT_CODE_DTLB))
+ BUG();
+
+ if (regs->tstate & TSTATE_PRIV) {
+ unsigned long tpc = regs->tpc;
+
+ /* Sanity check the PC. */
+ if ((tpc >= KERNBASE && tpc < (unsigned long) _etext) ||
+ (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
+ /* Valid, no problems... */
+ } else {
+ bad_kernel_pc(regs);
+ return;
+ }
+ }
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto intr_or_no_mm;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ if (!(regs->tstate & TSTATE_PRIV))
+ regs->tpc &= 0xffffffff;
+ address &= 0xffffffff;
+ }
+
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((regs->tstate & TSTATE_PRIV) &&
+ !search_exception_tables(regs->tpc)) {
+ insn = get_fault_insn(regs, insn);
+ goto handle_kernel_fault;
+ }
+ down_read(&mm->mmap_sem);
+ }
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+ * instruction to try and figure this out. It's an optimization
+ * so it's ok if we can't do this.
+ *
+ * Special hack, window spill/fill knows the exact fault type.
+ */
+ if (((fault_code &
+ (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
+ (vma->vm_flags & VM_WRITE) != 0) {
+ insn = get_fault_insn(regs, 0);
+ if (!insn)
+ goto continue_fault;
+ if ((insn & 0xc0200000) == 0xc0200000 &&
+ (insn & 0x1780000) != 0x1680000) {
+ /* Don't bother updating thread struct value,
+ * because update_mmu_cache only cares which tlb
+ * the access came from.
+ */
+ fault_code |= FAULT_CODE_WRITE;
+ }
+ }
+continue_fault:
+
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (!(fault_code & FAULT_CODE_WRITE)) {
+ /* Non-faulting loads shouldn't expand stack. */
+ insn = get_fault_insn(regs, insn);
+ if ((insn & 0xc0800000) == 0xc0800000) {
+ unsigned char asi;
+
+ if (insn & 0x2000)
+ asi = (regs->tstate >> 24);
+ else
+ asi = (insn >> 5);
+ if ((asi & 0xf2) == 0x82)
+ goto bad_area;
+ }
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ si_code = SEGV_ACCERR;
+
+ /* If we took a ITLB miss on a non-executable page, catch
+ * that here.
+ */
+ if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
+ BUG_ON(address != regs->tpc);
+ BUG_ON(regs->tstate & TSTATE_PRIV);
+ goto bad_area;
+ }
+
+ if (fault_code & FAULT_CODE_WRITE) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+
+ /* Spitfire has an icache which does not snoop
+ * processor stores. Later processors do...
+ */
+ if (tlb_type == spitfire &&
+ (vma->vm_flags & VM_EXEC) != 0 &&
+ vma->vm_file != NULL)
+ set_thread_fault_code(fault_code |
+ FAULT_CODE_BLKCOMMIT);
+ } else {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ goto do_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ default:
+ BUG();
+ }
+
+ up_read(&mm->mmap_sem);
+ goto fault_done;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ insn = get_fault_insn(regs, insn);
+ up_read(&mm->mmap_sem);
+
+handle_kernel_fault:
+ do_kernel_fault(regs, si_code, fault_code, insn, address);
+
+ goto fault_done;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ insn = get_fault_insn(regs, insn);
+ up_read(&mm->mmap_sem);
+ printk("VM: killing process %s\n", current->comm);
+ if (!(regs->tstate & TSTATE_PRIV))
+ do_exit(SIGKILL);
+ goto handle_kernel_fault;
+
+intr_or_no_mm:
+ insn = get_fault_insn(regs, 0);
+ goto handle_kernel_fault;
+
+do_sigbus:
+ insn = get_fault_insn(regs, insn);
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (regs->tstate & TSTATE_PRIV)
+ goto handle_kernel_fault;
+
+fault_done:
+ /* These values are no longer needed, clear them. */
+ set_thread_fault_code(0);
+ current_thread_info()->fault_address = 0;
+}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
new file mode 100644
index 000000000000..6b31f6117a95
--- /dev/null
+++ b/arch/sparc64/mm/generic.c
@@ -0,0 +1,182 @@
+/* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
+ * generic.c: Generic Sparc mm routines that are not dependent upon
+ * MMU type but are Sparc specific.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+/* Remap IO memory, the same way as remap_pfn_range(), but use
+ * the obio memory space.
+ *
+ * They use a pgprot that sets PAGE_IO and does not check the
+ * mem_map table as this is independent of normal memory.
+ */
+static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
+ unsigned long address,
+ unsigned long size,
+ unsigned long offset, pgprot_t prot,
+ int space)
+{
+ unsigned long end;
+
+ /* clear hack bit that was used as a write_combine side-effect flag */
+ offset &= ~0x1UL;
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t entry;
+ unsigned long curend = address + PAGE_SIZE;
+
+ entry = mk_pte_io(offset, prot, space);
+ if (!(address & 0xffff)) {
+ if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
+ entry = mk_pte_io(offset,
+ __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
+ space);
+ curend = address + 0x400000;
+ offset += 0x400000;
+ } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
+ entry = mk_pte_io(offset,
+ __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
+ space);
+ curend = address + 0x80000;
+ offset += 0x80000;
+ } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
+ entry = mk_pte_io(offset,
+ __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
+ space);
+ curend = address + 0x10000;
+ offset += 0x10000;
+ } else
+ offset += PAGE_SIZE;
+ } else
+ offset += PAGE_SIZE;
+
+ do {
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, address, pte, entry);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address < curend);
+ } while (address < end);
+}
+
+static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ offset -= address;
+ do {
+ pte_t * pte = pte_alloc_map(mm, pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
+ pte_unmap(pte);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PUD_MASK;
+ end = address + size;
+ if (end > PUD_SIZE)
+ end = PUD_SIZE;
+ offset -= address;
+ do {
+ pmd_t *pmd = pmd_alloc(mm, pud, address);
+ if (!pud)
+ return -ENOMEM;
+ io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
+ address = (address + PUD_SIZE) & PUD_MASK;
+ pud++;
+ } while (address < end);
+ return 0;
+}
+
+int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = vma->vm_mm;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+
+ spin_lock(&mm->page_table_lock);
+ while (from < end) {
+ pud_t *pud = pud_alloc(mm, dir, from);
+ error = -ENOMEM;
+ if (!pud)
+ break;
+ error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(vma, beg, end);
+ spin_unlock(&mm->page_table_lock);
+
+ return error;
+}
+
+int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+ struct mm_struct *mm = vma->vm_mm;
+ int space = GET_IOSPACE(pfn);
+ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(mm, from);
+ flush_cache_range(vma, beg, end);
+
+ spin_lock(&mm->page_table_lock);
+ while (from < end) {
+ pud_t *pud = pud_alloc(current->mm, dir, from);
+ error = -ENOMEM;
+ if (!pud)
+ break;
+ error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(vma, beg, end);
+ spin_unlock(&mm->page_table_lock);
+
+ return error;
+}
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
new file mode 100644
index 000000000000..5a1f831b2de1
--- /dev/null
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -0,0 +1,310 @@
+/*
+ * SPARC64 Huge TLB page support.
+ *
+ * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd) {
+ pud = pud_offset(pgd, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, addr);
+ }
+ }
+ return pte;
+}
+
+static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd) {
+ pud = pud_offset(pgd, addr);
+ if (pud) {
+ pmd = pmd_offset(pud, addr);
+ if (pmd)
+ pte = pte_offset_map(pmd, addr);
+ }
+ }
+ return pte;
+}
+
+#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
+
+static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr,
+ struct page *page, pte_t * page_table, int write_access)
+{
+ unsigned long i;
+ pte_t entry;
+
+ add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
+
+ if (write_access)
+ entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
+ vma->vm_page_prot)));
+ else
+ entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+ entry = pte_mkyoung(entry);
+ mk_pte_huge(entry);
+
+ for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+ set_pte_at(mm, addr, page_table, entry);
+ page_table++;
+ addr += PAGE_SIZE;
+
+ pte_val(entry) += PAGE_SIZE;
+ }
+}
+
+/*
+ * This function checks for proper alignment of input addr and len parameters.
+ */
+int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma)
+{
+ pte_t *src_pte, *dst_pte, entry;
+ struct page *ptepage;
+ unsigned long addr = vma->vm_start;
+ unsigned long end = vma->vm_end;
+ int i;
+
+ while (addr < end) {
+ dst_pte = huge_pte_alloc(dst, addr);
+ if (!dst_pte)
+ goto nomem;
+ src_pte = huge_pte_offset(src, addr);
+ BUG_ON(!src_pte || pte_none(*src_pte));
+ entry = *src_pte;
+ ptepage = pte_page(entry);
+ get_page(ptepage);
+ for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+ set_pte_at(dst, addr, dst_pte, entry);
+ pte_val(entry) += PAGE_SIZE;
+ dst_pte++;
+ addr += PAGE_SIZE;
+ }
+ add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
+ }
+ return 0;
+
+nomem:
+ return -ENOMEM;
+}
+
+int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct page **pages, struct vm_area_struct **vmas,
+ unsigned long *position, int *length, int i)
+{
+ unsigned long vaddr = *position;
+ int remainder = *length;
+
+ WARN_ON(!is_vm_hugetlb_page(vma));
+
+ while (vaddr < vma->vm_end && remainder) {
+ if (pages) {
+ pte_t *pte;
+ struct page *page;
+
+ pte = huge_pte_offset(mm, vaddr);
+
+ /* hugetlb should be locked, and hence, prefaulted */
+ BUG_ON(!pte || pte_none(*pte));
+
+ page = pte_page(*pte);
+
+ WARN_ON(!PageCompound(page));
+
+ get_page(page);
+ pages[i] = page;
+ }
+
+ if (vmas)
+ vmas[i] = vma;
+
+ vaddr += PAGE_SIZE;
+ --remainder;
+ ++i;
+ }
+
+ *length = remainder;
+ *position = vaddr;
+
+ return i;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm,
+ unsigned long address, int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write)
+{
+ return NULL;
+}
+
+void unmap_hugepage_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long address;
+ pte_t *pte;
+ struct page *page;
+ int i;
+
+ BUG_ON(start & (HPAGE_SIZE - 1));
+ BUG_ON(end & (HPAGE_SIZE - 1));
+
+ for (address = start; address < end; address += HPAGE_SIZE) {
+ pte = huge_pte_offset(mm, address);
+ BUG_ON(!pte);
+ if (pte_none(*pte))
+ continue;
+ page = pte_page(*pte);
+ put_page(page);
+ for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+ pte_clear(mm, address+(i*PAGE_SIZE), pte);
+ pte++;
+ }
+ }
+ add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
+ flush_tlb_range(vma, start, end);
+}
+
+static void context_reload(void *__data)
+{
+ struct mm_struct *mm = __data;
+
+ if (mm == current->mm)
+ load_secondary_context(mm);
+}
+
+int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret = 0;
+
+ /* On UltraSPARC-III+ and later, configure the second half of
+ * the Data-TLB for huge pages.
+ */
+ if (tlb_type == cheetah_plus) {
+ unsigned long ctx;
+
+ spin_lock(&ctx_alloc_lock);
+ ctx = mm->context.sparc64_ctx_val;
+ ctx &= ~CTX_PGSZ_MASK;
+ ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+ ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
+
+ if (ctx != mm->context.sparc64_ctx_val) {
+ /* When changing the page size fields, we
+ * must perform a context flush so that no
+ * stale entries match. This flush must
+ * occur with the original context register
+ * settings.
+ */
+ do_flush_tlb_mm(mm);
+
+ /* Reload the context register of all processors
+ * also executing in this address space.
+ */
+ mm->context.sparc64_ctx_val = ctx;
+ on_each_cpu(context_reload, mm, 0, 0);
+ }
+ spin_unlock(&ctx_alloc_lock);
+ }
+
+ BUG_ON(vma->vm_start & ~HPAGE_MASK);
+ BUG_ON(vma->vm_end & ~HPAGE_MASK);
+
+ spin_lock(&mm->page_table_lock);
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
+ unsigned long idx;
+ pte_t *pte = huge_pte_alloc(mm, addr);
+ struct page *page;
+
+ if (!pte) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!pte_none(*pte))
+ continue;
+
+ idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
+ + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+ page = find_get_page(mapping, idx);
+ if (!page) {
+ /* charge the fs quota first */
+ if (hugetlb_get_quota(mapping)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ page = alloc_huge_page();
+ if (!page) {
+ hugetlb_put_quota(mapping);
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
+ if (! ret) {
+ unlock_page(page);
+ } else {
+ hugetlb_put_quota(mapping);
+ free_huge_page(page);
+ goto out;
+ }
+ }
+ set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE);
+ }
+out:
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
new file mode 100644
index 000000000000..89022ccaa75b
--- /dev/null
+++ b/arch/sparc64/mm/init.c
@@ -0,0 +1,1769 @@
+/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/mm/init.c
+ *
+ * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/initrd.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/head.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/iommu.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/dma.h>
+#include <asm/starfire.h>
+#include <asm/tlb.h>
+#include <asm/spitfire.h>
+#include <asm/sections.h>
+
+extern void device_scan(void);
+
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+unsigned long *sparc64_valid_addr_bitmap;
+
+/* Ugly, but necessary... -DaveM */
+unsigned long phys_base;
+unsigned long kern_base;
+unsigned long kern_size;
+unsigned long pfn_base;
+
+/* This is even uglier. We have a problem where the kernel may not be
+ * located at phys_base. However, initial __alloc_bootmem() calls need to
+ * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
+ * those page mappings wont work. Things are ok after inherit_prom_mappings
+ * is called though. Dave says he'll clean this up some other time.
+ * -- BenC
+ */
+static unsigned long bootmap_base;
+
+/* get_new_mmu_context() uses "cache + 1". */
+DEFINE_SPINLOCK(ctx_alloc_lock);
+unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
+unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
+
+/* References to special section boundaries */
+extern char _start[], _end[];
+
+/* Initial ramdisk setup */
+extern unsigned long sparc_ramdisk_image64;
+extern unsigned int sparc_ramdisk_image;
+extern unsigned int sparc_ramdisk_size;
+
+struct page *mem_map_zero;
+
+int bigkernel = 0;
+
+/* XXX Tune this... */
+#define PGT_CACHE_LOW 25
+#define PGT_CACHE_HIGH 50
+
+void check_pgt_cache(void)
+{
+ preempt_disable();
+ if (pgtable_cache_size > PGT_CACHE_HIGH) {
+ do {
+ if (pgd_quicklist)
+ free_pgd_slow(get_pgd_fast());
+ if (pte_quicklist[0])
+ free_pte_slow(pte_alloc_one_fast(NULL, 0));
+ if (pte_quicklist[1])
+ free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
+ } while (pgtable_cache_size > PGT_CACHE_LOW);
+ }
+ preempt_enable();
+}
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+atomic_t dcpage_flushes = ATOMIC_INIT(0);
+#ifdef CONFIG_SMP
+atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+#endif
+#endif
+
+__inline__ void flush_dcache_page_impl(struct page *page)
+{
+#ifdef CONFIG_DEBUG_DCFLUSH
+ atomic_inc(&dcpage_flushes);
+#endif
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ __flush_dcache_page(page_address(page),
+ ((tlb_type == spitfire) &&
+ page_mapping(page) != NULL));
+#else
+ if (page_mapping(page) != NULL &&
+ tlb_type == spitfire)
+ __flush_icache_page(__pa(page_address(page)));
+#endif
+}
+
+#define PG_dcache_dirty PG_arch_1
+
+#define dcache_dirty_cpu(page) \
+ (((page)->flags >> 24) & (NR_CPUS - 1UL))
+
+static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
+{
+ unsigned long mask = this_cpu;
+ unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
+ mask = (mask << 24) | (1UL << PG_dcache_dirty);
+ __asm__ __volatile__("1:\n\t"
+ "ldx [%2], %%g7\n\t"
+ "and %%g7, %1, %%g1\n\t"
+ "or %%g1, %0, %%g1\n\t"
+ "casx [%2], %%g7, %%g1\n\t"
+ "cmp %%g7, %%g1\n\t"
+ "bne,pn %%xcc, 1b\n\t"
+ " membar #StoreLoad | #StoreStore"
+ : /* no outputs */
+ : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
+ : "g1", "g7");
+}
+
+static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
+{
+ unsigned long mask = (1UL << PG_dcache_dirty);
+
+ __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
+ "1:\n\t"
+ "ldx [%2], %%g7\n\t"
+ "srlx %%g7, 24, %%g1\n\t"
+ "and %%g1, %3, %%g1\n\t"
+ "cmp %%g1, %0\n\t"
+ "bne,pn %%icc, 2f\n\t"
+ " andn %%g7, %1, %%g1\n\t"
+ "casx [%2], %%g7, %%g1\n\t"
+ "cmp %%g7, %%g1\n\t"
+ "bne,pn %%xcc, 1b\n\t"
+ " membar #StoreLoad | #StoreStore\n"
+ "2:"
+ : /* no outputs */
+ : "r" (cpu), "r" (mask), "r" (&page->flags),
+ "i" (NR_CPUS - 1UL)
+ : "g1", "g7");
+}
+
+extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ struct page *page;
+ unsigned long pfn;
+ unsigned long pg_flags;
+
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn) &&
+ (page = pfn_to_page(pfn), page_mapping(page)) &&
+ ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
+ int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
+ int this_cpu = get_cpu();
+
+ /* This is just to optimize away some function calls
+ * in the SMP case.
+ */
+ if (cpu == this_cpu)
+ flush_dcache_page_impl(page);
+ else
+ smp_flush_dcache_page_impl(page, cpu);
+
+ clear_dcache_dirty_cpu(page, cpu);
+
+ put_cpu();
+ }
+
+ if (get_thread_fault_code())
+ __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
+ address, pte, get_thread_fault_code());
+}
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ int dirty = test_bit(PG_dcache_dirty, &page->flags);
+ int dirty_cpu = dcache_dirty_cpu(page);
+ int this_cpu = get_cpu();
+
+ if (mapping && !mapping_mapped(mapping)) {
+ if (dirty) {
+ if (dirty_cpu == this_cpu)
+ goto out;
+ smp_flush_dcache_page_impl(page, dirty_cpu);
+ }
+ set_dcache_dirty(page, this_cpu);
+ } else {
+ /* We could delay the flush for the !page_mapping
+ * case too. But that case is for exec env/arg
+ * pages and those are %99 certainly going to get
+ * faulted into the tlb (and thus flushed) anyways.
+ */
+ flush_dcache_page_impl(page);
+ }
+
+out:
+ put_cpu();
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ /* Cheetah has coherent I-cache. */
+ if (tlb_type == spitfire) {
+ unsigned long kaddr;
+
+ for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
+ __flush_icache_page(__get_phys(kaddr));
+ }
+}
+
+unsigned long page_to_pfn(struct page *page)
+{
+ return (unsigned long) ((page - mem_map) + pfn_base);
+}
+
+struct page *pfn_to_page(unsigned long pfn)
+{
+ return (mem_map + (pfn - pfn_base));
+}
+
+void show_mem(void)
+{
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n",
+ nr_swap_pages << (PAGE_SHIFT-10));
+ printk("%ld pages of RAM\n", num_physpages);
+ printk("%d free pages\n", nr_free_pages());
+ printk("%d pages in page table cache\n",pgtable_cache_size);
+}
+
+void mmu_info(struct seq_file *m)
+{
+ if (tlb_type == cheetah)
+ seq_printf(m, "MMU Type\t: Cheetah\n");
+ else if (tlb_type == cheetah_plus)
+ seq_printf(m, "MMU Type\t: Cheetah+\n");
+ else if (tlb_type == spitfire)
+ seq_printf(m, "MMU Type\t: Spitfire\n");
+ else
+ seq_printf(m, "MMU Type\t: ???\n");
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+ seq_printf(m, "DCPageFlushes\t: %d\n",
+ atomic_read(&dcpage_flushes));
+#ifdef CONFIG_SMP
+ seq_printf(m, "DCPageFlushesXC\t: %d\n",
+ atomic_read(&dcpage_flushes_xcall));
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_DEBUG_DCFLUSH */
+}
+
+struct linux_prom_translation {
+ unsigned long virt;
+ unsigned long size;
+ unsigned long data;
+};
+
+extern unsigned long prom_boot_page;
+extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
+extern int prom_get_mmu_ihandle(void);
+extern void register_prom_callbacks(void);
+
+/* Exported for SMP bootup purposes. */
+unsigned long kern_locked_tte_data;
+
+void __init early_pgtable_allocfail(char *type)
+{
+ prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
+ prom_halt();
+}
+
+#define BASE_PAGE_SIZE 8192
+static pmd_t *prompmd;
+
+/*
+ * Translate PROM's mapping we capture at boot time into physical address.
+ * The second parameter is only set from prom_callback() invocations.
+ */
+unsigned long prom_virt_to_phys(unsigned long promva, int *error)
+{
+ pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
+ pte_t *ptep;
+ unsigned long base;
+
+ if (pmd_none(*pmdp)) {
+ if (error)
+ *error = 1;
+ return(0);
+ }
+ ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
+ if (!pte_present(*ptep)) {
+ if (error)
+ *error = 1;
+ return(0);
+ }
+ if (error) {
+ *error = 0;
+ return(pte_val(*ptep));
+ }
+ base = pte_val(*ptep) & _PAGE_PADDR;
+ return(base + (promva & (BASE_PAGE_SIZE - 1)));
+}
+
+static void inherit_prom_mappings(void)
+{
+ struct linux_prom_translation *trans;
+ unsigned long phys_page, tte_vaddr, tte_data;
+ void (*remap_func)(unsigned long, unsigned long, int);
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int node, n, i, tsz;
+ extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
+
+ node = prom_finddevice("/virtual-memory");
+ n = prom_getproplen(node, "translations");
+ if (n == 0 || n == -1) {
+ prom_printf("Couldn't get translation property\n");
+ prom_halt();
+ }
+ n += 5 * sizeof(struct linux_prom_translation);
+ for (tsz = 1; tsz < n; tsz <<= 1)
+ /* empty */;
+ trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
+ if (trans == NULL) {
+ prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
+ prom_halt();
+ }
+ memset(trans, 0, tsz);
+
+ if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
+ prom_printf("Couldn't get translation property\n");
+ prom_halt();
+ }
+ n = n / sizeof(*trans);
+
+ /*
+ * The obp translations are saved based on 8k pagesize, since obp can
+ * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
+ * ie obp range, are handled in entry.S and do not use the vpte scheme
+ * (see rant in inherit_locked_prom_mappings()).
+ */
+#define OBP_PMD_SIZE 2048
+ prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
+ if (prompmd == NULL)
+ early_pgtable_allocfail("pmd");
+ memset(prompmd, 0, OBP_PMD_SIZE);
+ for (i = 0; i < n; i++) {
+ unsigned long vaddr;
+
+ if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
+ for (vaddr = trans[i].virt;
+ ((vaddr < trans[i].virt + trans[i].size) &&
+ (vaddr < HI_OBP_ADDRESS));
+ vaddr += BASE_PAGE_SIZE) {
+ unsigned long val;
+
+ pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
+ if (pmd_none(*pmdp)) {
+ ptep = __alloc_bootmem(BASE_PAGE_SIZE,
+ BASE_PAGE_SIZE,
+ bootmap_base);
+ if (ptep == NULL)
+ early_pgtable_allocfail("pte");
+ memset(ptep, 0, BASE_PAGE_SIZE);
+ pmd_set(pmdp, ptep);
+ }
+ ptep = (pte_t *)__pmd_page(*pmdp) +
+ ((vaddr >> 13) & 0x3ff);
+
+ val = trans[i].data;
+
+ /* Clear diag TTE bits. */
+ if (tlb_type == spitfire)
+ val &= ~0x0003fe0000000000UL;
+
+ set_pte_at(&init_mm, vaddr,
+ ptep, __pte(val | _PAGE_MODIFIED));
+ trans[i].data += BASE_PAGE_SIZE;
+ }
+ }
+ }
+ phys_page = __pa(prompmd);
+ obp_iaddr_patch[0] |= (phys_page >> 10);
+ obp_iaddr_patch[1] |= (phys_page & 0x3ff);
+ flushi((long)&obp_iaddr_patch[0]);
+ obp_daddr_patch[0] |= (phys_page >> 10);
+ obp_daddr_patch[1] |= (phys_page & 0x3ff);
+ flushi((long)&obp_daddr_patch[0]);
+
+ /* Now fixup OBP's idea about where we really are mapped. */
+ prom_printf("Remapping the kernel... ");
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Using plain zero for the context value is
+ * correct here, we are not using the Linux trap
+ * tables yet so we should not use the special
+ * UltraSPARC-III+ page size encodings yet.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ switch (tlb_type) {
+ default:
+ case spitfire:
+ phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
+ break;
+
+ case cheetah:
+ case cheetah_plus:
+ phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
+ break;
+ };
+
+ phys_page &= _PAGE_PADDR;
+ phys_page += ((unsigned long)&prom_boot_page -
+ (unsigned long)KERNBASE);
+
+ if (tlb_type == spitfire) {
+ /* Lock this into i/d tlb entry 59 */
+ __asm__ __volatile__(
+ "stxa %%g0, [%2] %3\n\t"
+ "stxa %0, [%1] %4\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6\n\t"
+ "stxa %%g0, [%2] %5\n\t"
+ "stxa %0, [%1] %6\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6"
+ : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
+ _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
+ "r" (59 << 3), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
+ "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
+ : "memory");
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ /* Lock this into i/d tlb-0 entry 11 */
+ __asm__ __volatile__(
+ "stxa %%g0, [%2] %3\n\t"
+ "stxa %0, [%1] %4\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6\n\t"
+ "stxa %%g0, [%2] %5\n\t"
+ "stxa %0, [%1] %6\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6"
+ : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
+ _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
+ "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
+ "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
+ : "memory");
+ } else {
+ /* Implement me :-) */
+ BUG();
+ }
+
+ tte_vaddr = (unsigned long) KERNBASE;
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Using plain zero for the context value is
+ * correct here, we are not using the Linux trap
+ * tables yet so we should not use the special
+ * UltraSPARC-III+ page size encodings yet.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ if (tlb_type == spitfire)
+ tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
+ else
+ tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+
+ kern_locked_tte_data = tte_data;
+
+ remap_func = (void *) ((unsigned long) &prom_remap -
+ (unsigned long) &prom_boot_page);
+
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Using plain zero for the context value is
+ * correct here, we are not using the Linux trap
+ * tables yet so we should not use the special
+ * UltraSPARC-III+ page size encodings yet.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ remap_func((tlb_type == spitfire ?
+ (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
+ (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
+ (unsigned long) KERNBASE,
+ prom_get_mmu_ihandle());
+
+ if (bigkernel)
+ remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
+ (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
+
+ /* Flush out that temporary mapping. */
+ spitfire_flush_dtlb_nucleus_page(0x0);
+ spitfire_flush_itlb_nucleus_page(0x0);
+
+ /* Now lock us back into the TLBs via OBP. */
+ prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+ prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+ if (bigkernel) {
+ prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
+ tte_vaddr + 0x400000);
+ prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
+ tte_vaddr + 0x400000);
+ }
+
+ /* Re-read translations property. */
+ if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
+ prom_printf("Couldn't get translation property\n");
+ prom_halt();
+ }
+ n = n / sizeof(*trans);
+
+ for (i = 0; i < n; i++) {
+ unsigned long vaddr = trans[i].virt;
+ unsigned long size = trans[i].size;
+
+ if (vaddr < 0xf0000000UL) {
+ unsigned long avoid_start = (unsigned long) KERNBASE;
+ unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
+
+ if (bigkernel)
+ avoid_end += (4 * 1024 * 1024);
+ if (vaddr < avoid_start) {
+ unsigned long top = vaddr + size;
+
+ if (top > avoid_start)
+ top = avoid_start;
+ prom_unmap(top - vaddr, vaddr);
+ }
+ if ((vaddr + size) > avoid_end) {
+ unsigned long bottom = vaddr;
+
+ if (bottom < avoid_end)
+ bottom = avoid_end;
+ prom_unmap((vaddr + size) - bottom, bottom);
+ }
+ }
+ }
+
+ prom_printf("done.\n");
+
+ register_prom_callbacks();
+}
+
+/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
+ * upwards as reserved for use by the firmware (I wonder if this
+ * will be the same on Cheetah...). We use this virtual address
+ * range for the VPTE table mappings of the nucleus so we need
+ * to zap them when we enter the PROM. -DaveM
+ */
+static void __flush_nucleus_vptes(void)
+{
+ unsigned long prom_reserved_base = 0xfffffffc00000000UL;
+ int i;
+
+ /* Only DTLB must be checked for VPTE entries. */
+ if (tlb_type == spitfire) {
+ for (i = 0; i < 63; i++) {
+ unsigned long tag;
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no cheetah+
+ * page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ tag = spitfire_get_dtlb_tag(i);
+ if (((tag & ~(PAGE_MASK)) == 0) &&
+ ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ spitfire_put_dtlb_data(i, 0x0UL);
+ }
+ }
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ for (i = 0; i < 512; i++) {
+ unsigned long tag = cheetah_get_dtlb_tag(i, 2);
+
+ if ((tag & ~PAGE_MASK) == 0 &&
+ (tag & PAGE_MASK) >= prom_reserved_base) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ cheetah_put_dtlb_data(i, 0x0UL, 2);
+ }
+
+ if (tlb_type != cheetah_plus)
+ continue;
+
+ tag = cheetah_get_dtlb_tag(i, 3);
+
+ if ((tag & ~PAGE_MASK) == 0 &&
+ (tag & PAGE_MASK) >= prom_reserved_base) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ cheetah_put_dtlb_data(i, 0x0UL, 3);
+ }
+ }
+ } else {
+ /* Implement me :-) */
+ BUG();
+ }
+}
+
+static int prom_ditlb_set;
+struct prom_tlb_entry {
+ int tlb_ent;
+ unsigned long tlb_tag;
+ unsigned long tlb_data;
+};
+struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
+
+void prom_world(int enter)
+{
+ unsigned long pstate;
+ int i;
+
+ if (!enter)
+ set_fs((mm_segment_t) { get_thread_current_ds() });
+
+ if (!prom_ditlb_set)
+ return;
+
+ /* Make sure the following runs atomically. */
+ __asm__ __volatile__("flushw\n\t"
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ if (enter) {
+ /* Kick out nucleus VPTEs. */
+ __flush_nucleus_vptes();
+
+ /* Install PROM world. */
+ for (i = 0; i < 16; i++) {
+ if (prom_dtlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU));
+ if (tlb_type == spitfire)
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ }
+ if (prom_itlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : : "r" (prom_itlb[i].tlb_tag),
+ "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
+ if (tlb_type == spitfire)
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ }
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ if (prom_dtlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ if (tlb_type == spitfire)
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+ else
+ cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+ }
+ if (prom_itlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : : "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
+ if (tlb_type == spitfire)
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+ else
+ cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+ }
+ }
+ }
+ __asm__ __volatile__("wrpr %0, 0, %%pstate"
+ : : "r" (pstate));
+}
+
+void inherit_locked_prom_mappings(int save_p)
+{
+ int i;
+ int dtlb_seen = 0;
+ int itlb_seen = 0;
+
+ /* Fucking losing PROM has more mappings in the TLB, but
+ * it (conveniently) fails to mention any of these in the
+ * translations property. The only ones that matter are
+ * the locked PROM tlb entries, so we impose the following
+ * irrecovable rule on the PROM, it is allowed 8 locked
+ * entries in the ITLB and 8 in the DTLB.
+ *
+ * Supposedly the upper 16GB of the address space is
+ * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
+ * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
+ * used between the client program and the firmware on sun5
+ * systems to coordinate mmu mappings is also COMPLETELY
+ * UNDOCUMENTED!!!!!! Thanks S(t)un!
+ */
+ if (save_p) {
+ for (i = 0; i < 16; i++) {
+ prom_itlb[i].tlb_ent = -1;
+ prom_dtlb[i].tlb_ent = -1;
+ }
+ }
+ if (tlb_type == spitfire) {
+ int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
+ for (i = 0; i < high; i++) {
+ unsigned long data;
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no cheetah+
+ * page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ data = spitfire_get_dtlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no
+ * cheetah+ page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ tag = spitfire_get_dtlb_tag(i);
+ if (save_p) {
+ prom_dtlb[dtlb_seen].tlb_ent = i;
+ prom_dtlb[dtlb_seen].tlb_tag = tag;
+ prom_dtlb[dtlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ spitfire_put_dtlb_data(i, 0x0UL);
+
+ dtlb_seen++;
+ if (dtlb_seen > 15)
+ break;
+ }
+ }
+
+ for (i = 0; i < high; i++) {
+ unsigned long data;
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no
+ * cheetah+ page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ data = spitfire_get_itlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no
+ * cheetah+ page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ tag = spitfire_get_itlb_tag(i);
+ if (save_p) {
+ prom_itlb[itlb_seen].tlb_ent = i;
+ prom_itlb[itlb_seen].tlb_tag = tag;
+ prom_itlb[itlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ spitfire_put_itlb_data(i, 0x0UL);
+
+ itlb_seen++;
+ if (itlb_seen > 15)
+ break;
+ }
+ }
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+
+ for (i = 0; i < high; i++) {
+ unsigned long data;
+
+ data = cheetah_get_ldtlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ tag = cheetah_get_ldtlb_tag(i);
+ if (save_p) {
+ prom_dtlb[dtlb_seen].tlb_ent = i;
+ prom_dtlb[dtlb_seen].tlb_tag = tag;
+ prom_dtlb[dtlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ cheetah_put_ldtlb_data(i, 0x0UL);
+
+ dtlb_seen++;
+ if (dtlb_seen > 15)
+ break;
+ }
+ }
+
+ for (i = 0; i < high; i++) {
+ unsigned long data;
+
+ data = cheetah_get_litlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ tag = cheetah_get_litlb_tag(i);
+ if (save_p) {
+ prom_itlb[itlb_seen].tlb_ent = i;
+ prom_itlb[itlb_seen].tlb_tag = tag;
+ prom_itlb[itlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ cheetah_put_litlb_data(i, 0x0UL);
+
+ itlb_seen++;
+ if (itlb_seen > 15)
+ break;
+ }
+ }
+ } else {
+ /* Implement me :-) */
+ BUG();
+ }
+ if (save_p)
+ prom_ditlb_set = 1;
+}
+
+/* Give PROM back his world, done during reboots... */
+void prom_reload_locked(void)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ if (prom_dtlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU));
+ if (tlb_type == spitfire)
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ }
+
+ if (prom_itlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : : "r" (prom_itlb[i].tlb_tag),
+ "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
+ if (tlb_type == spitfire)
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ else
+ cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ }
+ }
+}
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+void __flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long va;
+
+ if (tlb_type == spitfire) {
+ int n = 0;
+
+ for (va = start; va < end; va += 32) {
+ spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+ if (++n >= 512)
+ break;
+ }
+ } else {
+ start = __pa(start);
+ end = __pa(end);
+ for (va = start; va < end; va += 32)
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (va),
+ "i" (ASI_DCACHE_INVALIDATE));
+ }
+}
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+/* If not locked, zap it. */
+void __flush_tlb_all(void)
+{
+ unsigned long pstate;
+ int i;
+
+ __asm__ __volatile__("flushw\n\t"
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+ if (tlb_type == spitfire) {
+ for (i = 0; i < 64; i++) {
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no
+ * cheetah+ page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ spitfire_put_dtlb_data(i, 0x0UL);
+ }
+
+ /* Spitfire Errata #32 workaround */
+ /* NOTE: Always runs on spitfire, so no
+ * cheetah+ page size encodings.
+ */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ spitfire_put_itlb_data(i, 0x0UL);
+ }
+ }
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ cheetah_flush_dtlb_all();
+ cheetah_flush_itlb_all();
+ }
+ __asm__ __volatile__("wrpr %0, 0, %%pstate"
+ : : "r" (pstate));
+}
+
+/* Caller does TLB context flushing on local CPU if necessary.
+ * The caller also ensures that CTX_VALID(mm->context) is false.
+ *
+ * We must be careful about boundary cases so that we never
+ * let the user have CTX 0 (nucleus) or we ever use a CTX
+ * version of zero (and thus NO_CONTEXT would not be caught
+ * by version mis-match tests in mmu_context.h).
+ */
+void get_new_mmu_context(struct mm_struct *mm)
+{
+ unsigned long ctx, new_ctx;
+ unsigned long orig_pgsz_bits;
+
+
+ spin_lock(&ctx_alloc_lock);
+ orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
+ ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
+ new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+ if (new_ctx >= (1 << CTX_NR_BITS)) {
+ new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
+ if (new_ctx >= ctx) {
+ int i;
+ new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
+ CTX_FIRST_VERSION;
+ if (new_ctx == 1)
+ new_ctx = CTX_FIRST_VERSION;
+
+ /* Don't call memset, for 16 entries that's just
+ * plain silly...
+ */
+ mmu_context_bmap[0] = 3;
+ mmu_context_bmap[1] = 0;
+ mmu_context_bmap[2] = 0;
+ mmu_context_bmap[3] = 0;
+ for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
+ mmu_context_bmap[i + 0] = 0;
+ mmu_context_bmap[i + 1] = 0;
+ mmu_context_bmap[i + 2] = 0;
+ mmu_context_bmap[i + 3] = 0;
+ }
+ goto out;
+ }
+ }
+ mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
+ new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
+out:
+ tlb_context_cache = new_ctx;
+ mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+ spin_unlock(&ctx_alloc_lock);
+}
+
+#ifndef CONFIG_SMP
+struct pgtable_cache_struct pgt_quicklists;
+#endif
+
+/* OK, we have to color these pages. The page tables are accessed
+ * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
+ * code, as well as by PAGE_OFFSET range direct-mapped addresses by
+ * other parts of the kernel. By coloring, we make sure that the tlbmiss
+ * fast handlers do not get data from old/garbage dcache lines that
+ * correspond to an old/stale virtual address (user/kernel) that
+ * previously mapped the pagetable page while accessing vpte range
+ * addresses. The idea is that if the vpte color and PAGE_OFFSET range
+ * color is the same, then when the kernel initializes the pagetable
+ * using the later address range, accesses with the first address
+ * range will see the newly initialized data rather than the garbage.
+ */
+#ifdef DCACHE_ALIASING_POSSIBLE
+#define DC_ALIAS_SHIFT 1
+#else
+#define DC_ALIAS_SHIFT 0
+#endif
+pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+{
+ struct page *page;
+ unsigned long color;
+
+ {
+ pte_t *ptep = pte_alloc_one_fast(mm, address);
+
+ if (ptep)
+ return ptep;
+ }
+
+ color = VPTE_COLOR(address);
+ page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
+ if (page) {
+ unsigned long *to_free;
+ unsigned long paddr;
+ pte_t *pte;
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ set_page_count(page, 1);
+ ClearPageCompound(page);
+
+ set_page_count((page + 1), 1);
+ ClearPageCompound(page + 1);
+#endif
+ paddr = (unsigned long) page_address(page);
+ memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
+
+ if (!color) {
+ pte = (pte_t *) paddr;
+ to_free = (unsigned long *) (paddr + PAGE_SIZE);
+ } else {
+ pte = (pte_t *) (paddr + PAGE_SIZE);
+ to_free = (unsigned long *) paddr;
+ }
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ /* Now free the other one up, adjust cache size. */
+ preempt_disable();
+ *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
+ pte_quicklist[color ^ 0x1] = to_free;
+ pgtable_cache_size++;
+ preempt_enable();
+#endif
+
+ return pte;
+ }
+ return NULL;
+}
+
+void sparc_ultra_dump_itlb(void)
+{
+ int slot;
+
+ if (tlb_type == spitfire) {
+ printk ("Contents of itlb: ");
+ for (slot = 0; slot < 14; slot++) printk (" ");
+ printk ("%2x:%016lx,%016lx\n",
+ 0,
+ spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
+ for (slot = 1; slot < 64; slot+=3) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
+ slot+1,
+ spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
+ slot+2,
+ spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
+ }
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ printk ("Contents of itlb0:\n");
+ for (slot = 0; slot < 16; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
+ slot+1,
+ cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
+ }
+ printk ("Contents of itlb2:\n");
+ for (slot = 0; slot < 128; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
+ slot+1,
+ cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
+ }
+ }
+}
+
+void sparc_ultra_dump_dtlb(void)
+{
+ int slot;
+
+ if (tlb_type == spitfire) {
+ printk ("Contents of dtlb: ");
+ for (slot = 0; slot < 14; slot++) printk (" ");
+ printk ("%2x:%016lx,%016lx\n", 0,
+ spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
+ for (slot = 1; slot < 64; slot+=3) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
+ slot+1,
+ spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
+ slot+2,
+ spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
+ }
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ printk ("Contents of dtlb0:\n");
+ for (slot = 0; slot < 16; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
+ slot+1,
+ cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
+ }
+ printk ("Contents of dtlb2:\n");
+ for (slot = 0; slot < 512; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
+ slot+1,
+ cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
+ }
+ if (tlb_type == cheetah_plus) {
+ printk ("Contents of dtlb3:\n");
+ for (slot = 0; slot < 512; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
+ slot+1,
+ cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
+ }
+ }
+ }
+}
+
+extern unsigned long cmdline_memory_size;
+
+unsigned long __init bootmem_init(unsigned long *pages_avail)
+{
+ unsigned long bootmap_size, start_pfn, end_pfn;
+ unsigned long end_of_phys_memory = 0UL;
+ unsigned long bootmap_pfn, bytes_avail, size;
+ int i;
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("bootmem_init: Scan sp_banks, ");
+#endif
+
+ bytes_avail = 0UL;
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ end_of_phys_memory = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ bytes_avail += sp_banks[i].num_bytes;
+ if (cmdline_memory_size) {
+ if (bytes_avail > cmdline_memory_size) {
+ unsigned long slack = bytes_avail - cmdline_memory_size;
+
+ bytes_avail -= slack;
+ end_of_phys_memory -= slack;
+
+ sp_banks[i].num_bytes -= slack;
+ if (sp_banks[i].num_bytes == 0) {
+ sp_banks[i].base_addr = 0xdeadbeef;
+ } else {
+ sp_banks[i+1].num_bytes = 0;
+ sp_banks[i+1].base_addr = 0xdeadbeef;
+ }
+ break;
+ }
+ }
+ }
+
+ *pages_avail = bytes_avail >> PAGE_SHIFT;
+
+ /* Start with page aligned address of last symbol in kernel
+ * image. The kernel is hard mapped below PAGE_OFFSET in a
+ * 4MB locked TLB translation.
+ */
+ start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT;
+
+ bootmap_pfn = start_pfn;
+
+ end_pfn = end_of_phys_memory >> PAGE_SHIFT;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
+ if (sparc_ramdisk_image || sparc_ramdisk_image64) {
+ unsigned long ramdisk_image = sparc_ramdisk_image ?
+ sparc_ramdisk_image : sparc_ramdisk_image64;
+ if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
+ ramdisk_image -= KERNBASE;
+ initrd_start = ramdisk_image + phys_base;
+ initrd_end = initrd_start + sparc_ramdisk_size;
+ if (initrd_end > end_of_phys_memory) {
+ printk(KERN_CRIT "initrd extends beyond end of memory "
+ "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+ initrd_end, end_of_phys_memory);
+ initrd_start = 0;
+ }
+ if (initrd_start) {
+ if (initrd_start >= (start_pfn << PAGE_SHIFT) &&
+ initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE)
+ bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT;
+ }
+ }
+#endif
+ /* Initialize the boot-time allocator. */
+ max_pfn = max_low_pfn = end_pfn;
+ min_low_pfn = pfn_base;
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
+ min_low_pfn, bootmap_pfn, max_low_pfn);
+#endif
+ bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
+
+ bootmap_base = bootmap_pfn << PAGE_SHIFT;
+
+ /* Now register the available physical memory with the
+ * allocator.
+ */
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
+ i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
+#endif
+ free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ }
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ size = initrd_end - initrd_start;
+
+ /* Resert the initrd image area. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
+ initrd_start, initrd_end);
+#endif
+ reserve_bootmem(initrd_start, size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ initrd_start += PAGE_OFFSET;
+ initrd_end += PAGE_OFFSET;
+ }
+#endif
+ /* Reserve the kernel text/data/bss. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
+#endif
+ reserve_bootmem(kern_base, kern_size);
+ *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
+
+ /* Reserve the bootmem map. We do not account for it
+ * in pages_avail because we will release that memory
+ * in free_all_bootmem.
+ */
+ size = bootmap_size;
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
+ (bootmap_pfn << PAGE_SHIFT), size);
+#endif
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+ *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ return end_pfn;
+}
+
+/* paging_init() sets up the page tables */
+
+extern void cheetah_ecache_flush_init(void);
+
+static unsigned long last_valid_pfn;
+
+void __init paging_init(void)
+{
+ extern pmd_t swapper_pmd_dir[1024];
+ extern unsigned int sparc64_vpte_patchme1[1];
+ extern unsigned int sparc64_vpte_patchme2[1];
+ unsigned long alias_base = kern_base + PAGE_OFFSET;
+ unsigned long second_alias_page = 0;
+ unsigned long pt, flags, end_pfn, pages_avail;
+ unsigned long shift = alias_base - ((unsigned long)KERNBASE);
+ unsigned long real_end;
+
+ set_bit(0, mmu_context_bmap);
+
+ real_end = (unsigned long)_end;
+ if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
+ bigkernel = 1;
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (sparc_ramdisk_image || sparc_ramdisk_image64)
+ real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
+#endif
+
+ /* We assume physical memory starts at some 4mb multiple,
+ * if this were not true we wouldn't boot up to this point
+ * anyways.
+ */
+ pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
+ pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
+ local_irq_save(flags);
+ if (tlb_type == spitfire) {
+ __asm__ __volatile__(
+ " stxa %1, [%0] %3\n"
+ " stxa %2, [%5] %4\n"
+ " membar #Sync\n"
+ " flush %%g6\n"
+ " nop\n"
+ " nop\n"
+ " nop\n"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
+ : "memory");
+ if (real_end >= KERNBASE + 0x340000) {
+ second_alias_page = alias_base + 0x400000;
+ __asm__ __volatile__(
+ " stxa %1, [%0] %3\n"
+ " stxa %2, [%5] %4\n"
+ " membar #Sync\n"
+ " flush %%g6\n"
+ " nop\n"
+ " nop\n"
+ " nop\n"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
+ : "memory");
+ }
+ } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ __asm__ __volatile__(
+ " stxa %1, [%0] %3\n"
+ " stxa %2, [%5] %4\n"
+ " membar #Sync\n"
+ " flush %%g6\n"
+ " nop\n"
+ " nop\n"
+ " nop\n"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
+ : "memory");
+ if (real_end >= KERNBASE + 0x340000) {
+ second_alias_page = alias_base + 0x400000;
+ __asm__ __volatile__(
+ " stxa %1, [%0] %3\n"
+ " stxa %2, [%5] %4\n"
+ " membar #Sync\n"
+ " flush %%g6\n"
+ " nop\n"
+ " nop\n"
+ " nop\n"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
+ : "memory");
+ }
+ }
+ local_irq_restore(flags);
+
+ /* Now set kernel pgd to upper alias so physical page computations
+ * work.
+ */
+ init_mm.pgd += ((shift) / (sizeof(pgd_t)));
+
+ memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
+
+ /* Now can init the kernel/bad page tables. */
+ pud_set(pud_offset(&swapper_pg_dir[0], 0),
+ swapper_pmd_dir + (shift / sizeof(pgd_t)));
+
+ sparc64_vpte_patchme1[0] |=
+ (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
+ sparc64_vpte_patchme2[0] |=
+ (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
+ flushi((long)&sparc64_vpte_patchme1[0]);
+
+ /* Setup bootmem... */
+ pages_avail = 0;
+ last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+
+ /* Inherit non-locked OBP mappings. */
+ inherit_prom_mappings();
+
+ /* Ok, we can use our TLB miss and window trap handlers safely.
+ * We need to do a quick peek here to see if we are on StarFire
+ * or not, so setup_tba can setup the IRQ globals correctly (it
+ * needs to get the hard smp processor id correctly).
+ */
+ {
+ extern void setup_tba(int);
+ setup_tba(this_is_starfire);
+ }
+
+ inherit_locked_prom_mappings(1);
+
+ /* We only created DTLB mapping of this stuff. */
+ spitfire_flush_dtlb_nucleus_page(alias_base);
+ if (second_alias_page)
+ spitfire_flush_dtlb_nucleus_page(second_alias_page);
+
+ __flush_tlb_all();
+
+ {
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long zholes_size[MAX_NR_ZONES];
+ unsigned long npages;
+ int znum;
+
+ for (znum = 0; znum < MAX_NR_ZONES; znum++)
+ zones_size[znum] = zholes_size[znum] = 0;
+
+ npages = end_pfn - pfn_base;
+ zones_size[ZONE_DMA] = npages;
+ zholes_size[ZONE_DMA] = npages - pages_avail;
+
+ free_area_init_node(0, &contig_page_data, zones_size,
+ phys_base >> PAGE_SHIFT, zholes_size);
+ }
+
+ device_scan();
+}
+
+/* Ok, it seems that the prom can allocate some more memory chunks
+ * as a side effect of some prom calls we perform during the
+ * boot sequence. My most likely theory is that it is from the
+ * prom_set_traptable() call, and OBP is allocating a scratchpad
+ * for saving client program register state etc.
+ */
+static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
+{
+ int swapi = 0;
+ int i, mitr;
+ unsigned long tmpaddr, tmpsize;
+ unsigned long lowest;
+
+ for (i = 0; thislist[i].theres_more != 0; i++) {
+ lowest = thislist[i].start_adr;
+ for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
+ if (thislist[mitr].start_adr < lowest) {
+ lowest = thislist[mitr].start_adr;
+ swapi = mitr;
+ }
+ if (lowest == thislist[i].start_adr)
+ continue;
+ tmpaddr = thislist[swapi].start_adr;
+ tmpsize = thislist[swapi].num_bytes;
+ for (mitr = swapi; mitr > i; mitr--) {
+ thislist[mitr].start_adr = thislist[mitr-1].start_adr;
+ thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
+ }
+ thislist[i].start_adr = tmpaddr;
+ thislist[i].num_bytes = tmpsize;
+ }
+}
+
+void __init rescan_sp_banks(void)
+{
+ struct linux_prom64_registers memlist[64];
+ struct linux_mlist_p1275 avail[64], *mlist;
+ unsigned long bytes, base_paddr;
+ int num_regs, node = prom_finddevice("/memory");
+ int i;
+
+ num_regs = prom_getproperty(node, "available",
+ (char *) memlist, sizeof(memlist));
+ num_regs = (num_regs / sizeof(struct linux_prom64_registers));
+ for (i = 0; i < num_regs; i++) {
+ avail[i].start_adr = memlist[i].phys_addr;
+ avail[i].num_bytes = memlist[i].reg_size;
+ avail[i].theres_more = &avail[i + 1];
+ }
+ avail[i - 1].theres_more = NULL;
+ sort_memlist(avail);
+
+ mlist = &avail[0];
+ i = 0;
+ bytes = mlist->num_bytes;
+ base_paddr = mlist->start_adr;
+
+ sp_banks[0].base_addr = base_paddr;
+ sp_banks[0].num_bytes = bytes;
+
+ while (mlist->theres_more != NULL){
+ i++;
+ mlist = mlist->theres_more;
+ bytes = mlist->num_bytes;
+ if (i >= SPARC_PHYS_BANKS-1) {
+ printk ("The machine has more banks than "
+ "this kernel can support\n"
+ "Increase the SPARC_PHYS_BANKS "
+ "setting (currently %d)\n",
+ SPARC_PHYS_BANKS);
+ i = SPARC_PHYS_BANKS-1;
+ break;
+ }
+
+ sp_banks[i].base_addr = mlist->start_adr;
+ sp_banks[i].num_bytes = mlist->num_bytes;
+ }
+
+ i++;
+ sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
+ sp_banks[i].num_bytes = 0;
+
+ for (i = 0; sp_banks[i].num_bytes != 0; i++)
+ sp_banks[i].num_bytes &= PAGE_MASK;
+}
+
+static void __init taint_real_pages(void)
+{
+ struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
+ int i;
+
+ for (i = 0; i < SPARC_PHYS_BANKS; i++) {
+ saved_sp_banks[i].base_addr =
+ sp_banks[i].base_addr;
+ saved_sp_banks[i].num_bytes =
+ sp_banks[i].num_bytes;
+ }
+
+ rescan_sp_banks();
+
+ /* Find changes discovered in the sp_bank rescan and
+ * reserve the lost portions in the bootmem maps.
+ */
+ for (i = 0; saved_sp_banks[i].num_bytes; i++) {
+ unsigned long old_start, old_end;
+
+ old_start = saved_sp_banks[i].base_addr;
+ old_end = old_start +
+ saved_sp_banks[i].num_bytes;
+ while (old_start < old_end) {
+ int n;
+
+ for (n = 0; sp_banks[n].num_bytes; n++) {
+ unsigned long new_start, new_end;
+
+ new_start = sp_banks[n].base_addr;
+ new_end = new_start + sp_banks[n].num_bytes;
+
+ if (new_start <= old_start &&
+ new_end >= (old_start + PAGE_SIZE)) {
+ set_bit (old_start >> 22,
+ sparc64_valid_addr_bitmap);
+ goto do_next_page;
+ }
+ }
+ reserve_bootmem(old_start, PAGE_SIZE);
+
+ do_next_page:
+ old_start += PAGE_SIZE;
+ }
+ }
+}
+
+void __init mem_init(void)
+{
+ unsigned long codepages, datapages, initpages;
+ unsigned long addr, last;
+ int i;
+
+ i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
+ i += 1;
+ sparc64_valid_addr_bitmap = (unsigned long *)
+ __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
+ if (sparc64_valid_addr_bitmap == NULL) {
+ prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
+ prom_halt();
+ }
+ memset(sparc64_valid_addr_bitmap, 0, i << 3);
+
+ addr = PAGE_OFFSET + kern_base;
+ last = PAGE_ALIGN(kern_size) + addr;
+ while (addr < last) {
+ set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+ addr += PAGE_SIZE;
+ }
+
+ taint_real_pages();
+
+ max_mapnr = last_valid_pfn - pfn_base;
+ high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+
+#ifdef CONFIG_DEBUG_BOOTMEM
+ prom_printf("mem_init: Calling free_all_bootmem().\n");
+#endif
+ totalram_pages = num_physpages = free_all_bootmem() - 1;
+
+ /*
+ * Set up the zero page, mark it reserved, so that page count
+ * is not manipulated when freeing the page from user ptes.
+ */
+ mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
+ if (mem_map_zero == NULL) {
+ prom_printf("paging_init: Cannot alloc zero page.\n");
+ prom_halt();
+ }
+ SetPageReserved(mem_map_zero);
+
+ codepages = (((unsigned long) _etext) - ((unsigned long) _start));
+ codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
+ datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
+ datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
+ initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
+ initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
+
+ printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ codepages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10),
+ PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
+
+ if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ cheetah_ecache_flush_init();
+}
+
+void free_initmem (void)
+{
+ unsigned long addr, initend;
+
+ /*
+ * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
+ */
+ addr = PAGE_ALIGN((unsigned long)(__init_begin));
+ initend = (unsigned long)(__init_end) & PAGE_MASK;
+ for (; addr < initend; addr += PAGE_SIZE) {
+ unsigned long page;
+ struct page *p;
+
+ page = (addr +
+ ((unsigned long) __va(kern_base)) -
+ ((unsigned long) KERNBASE));
+ memset((void *)addr, 0xcc, PAGE_SIZE);
+ p = virt_to_page(page);
+
+ ClearPageReserved(p);
+ set_page_count(p, 1);
+ __free_page(p);
+ num_physpages++;
+ totalram_pages++;
+ }
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ struct page *p = virt_to_page(start);
+
+ ClearPageReserved(p);
+ set_page_count(p, 1);
+ __free_page(p);
+ num_physpages++;
+ totalram_pages++;
+ }
+}
+#endif
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
new file mode 100644
index 000000000000..90ca99d0b89c
--- /dev/null
+++ b/arch/sparc64/mm/tlb.c
@@ -0,0 +1,151 @@
+/* arch/sparc64/mm/tlb.c
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* Heavily inspired by the ppc64 code. */
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
+ { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
+
+void flush_tlb_pending(void)
+{
+ struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+
+ if (mp->tlb_nr) {
+ if (CTX_VALID(mp->mm->context)) {
+#ifdef CONFIG_SMP
+ smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
+ &mp->vaddrs[0]);
+#else
+ __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
+ mp->tlb_nr, &mp->vaddrs[0]);
+#endif
+ }
+ mp->tlb_nr = 0;
+ }
+}
+
+void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
+{
+ struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ unsigned long nr;
+
+ vaddr &= PAGE_MASK;
+ if (pte_exec(orig))
+ vaddr |= 0x1UL;
+
+ if (pte_dirty(orig)) {
+ unsigned long paddr, pfn = pte_pfn(orig);
+ struct address_space *mapping;
+ struct page *page;
+
+ if (!pfn_valid(pfn))
+ goto no_cache_flush;
+
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ goto no_cache_flush;
+
+ /* A real file page? */
+ mapping = page_mapping(page);
+ if (!mapping)
+ goto no_cache_flush;
+
+ paddr = (unsigned long) page_address(page);
+ if ((paddr ^ vaddr) & (1 << 13))
+ flush_dcache_page_all(mm, page);
+ }
+
+no_cache_flush:
+
+ if (mp->tlb_frozen)
+ return;
+
+ nr = mp->tlb_nr;
+
+ if (unlikely(nr != 0 && mm != mp->mm)) {
+ flush_tlb_pending();
+ nr = 0;
+ }
+
+ if (nr == 0)
+ mp->mm = mm;
+
+ mp->vaddrs[nr] = vaddr;
+ mp->tlb_nr = ++nr;
+ if (nr >= TLB_BATCH_NR)
+ flush_tlb_pending();
+}
+
+void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ unsigned long nr = mp->tlb_nr;
+ long s = start, e = end, vpte_base;
+
+ if (mp->tlb_frozen)
+ return;
+
+ /* If start is greater than end, that is a real problem. */
+ BUG_ON(start > end);
+
+ /* However, straddling the VA space hole is quite normal. */
+ s &= PMD_MASK;
+ e = (e + PMD_SIZE - 1) & PMD_MASK;
+
+ vpte_base = (tlb_type == spitfire ?
+ VPTE_BASE_SPITFIRE :
+ VPTE_BASE_CHEETAH);
+
+ if (unlikely(nr != 0 && mm != mp->mm)) {
+ flush_tlb_pending();
+ nr = 0;
+ }
+
+ if (nr == 0)
+ mp->mm = mm;
+
+ start = vpte_base + (s >> (PAGE_SHIFT - 3));
+ end = vpte_base + (e >> (PAGE_SHIFT - 3));
+
+ /* If the request straddles the VA space hole, we
+ * need to swap start and end. The reason this
+ * occurs is that "vpte_base" is the center of
+ * the linear page table mapping area. Thus,
+ * high addresses with the sign bit set map to
+ * addresses below vpte_base and non-sign bit
+ * addresses map to addresses above vpte_base.
+ */
+ if (end < start) {
+ unsigned long tmp = start;
+
+ start = end;
+ end = tmp;
+ }
+
+ while (start < end) {
+ mp->vaddrs[nr] = start;
+ mp->tlb_nr = ++nr;
+ if (nr >= TLB_BATCH_NR) {
+ flush_tlb_pending();
+ nr = 0;
+ }
+ start += PAGE_SIZE;
+ }
+ if (nr)
+ flush_tlb_pending();
+}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
new file mode 100644
index 000000000000..7a0934321010
--- /dev/null
+++ b/arch/sparc64/mm/ultra.S
@@ -0,0 +1,583 @@
+/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
+ * ultra.S: Don't expand these all over the place...
+ *
+ * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <asm/asi.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/spitfire.h>
+#include <asm/mmu_context.h>
+#include <asm/pil.h>
+#include <asm/head.h>
+#include <asm/thread_info.h>
+#include <asm/cacheflush.h>
+
+ /* Basically, most of the Spitfire vs. Cheetah madness
+ * has to do with the fact that Cheetah does not support
+ * IMMU flushes out of the secondary context. Someone needs
+ * to throw a south lake birthday party for the folks
+ * in Microelectronics who refused to fix this shit.
+ */
+
+ /* This file is meant to be read efficiently by the CPU, not humans.
+ * Staraj sie tego nikomu nie pierdolnac...
+ */
+ .text
+ .align 32
+ .globl __flush_tlb_mm
+__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
+ ldxa [%o1] ASI_DMMU, %g2
+ cmp %g2, %o0
+ bne,pn %icc, __spitfire_flush_tlb_mm_slow
+ mov 0x50, %g3
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ retl
+ flush %g6
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .align 32
+ .globl __flush_tlb_pending
+__flush_tlb_pending:
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ rdpr %pstate, %g7
+ sllx %o1, 3, %o1
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, %pstate
+ mov SECONDARY_CONTEXT, %o4
+ ldxa [%o4] ASI_DMMU, %g2
+ stxa %o0, [%o4] ASI_DMMU
+1: sub %o1, (1 << 3), %o1
+ ldx [%o2 + %o1], %o3
+ andcc %o3, 1, %g0
+ andn %o3, 1, %o3
+ be,pn %icc, 2f
+ or %o3, 0x10, %o3
+ stxa %g0, [%o3] ASI_IMMU_DEMAP
+2: stxa %g0, [%o3] ASI_DMMU_DEMAP
+ membar #Sync
+ brnz,pt %o1, 1b
+ nop
+ stxa %g2, [%o4] ASI_DMMU
+ flush %g6
+ retl
+ wrpr %g7, 0x0, %pstate
+
+ .align 32
+ .globl __flush_tlb_kernel_range
+__flush_tlb_kernel_range: /* %o0=start, %o1=end */
+ cmp %o0, %o1
+ be,pn %xcc, 2f
+ sethi %hi(PAGE_SIZE), %o4
+ sub %o1, %o0, %o3
+ sub %o3, %o4, %o3
+ or %o0, 0x20, %o0 ! Nucleus
+1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
+ stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %o3, 1b
+ sub %o3, %o4, %o3
+2: retl
+ flush %g6
+
+__spitfire_flush_tlb_mm_slow:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IE, %pstate
+ stxa %o0, [%o1] ASI_DMMU
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ flush %g6
+ stxa %g2, [%o1] ASI_DMMU
+ flush %g6
+ retl
+ wrpr %g1, 0, %pstate
+
+/*
+ * The following code flushes one page_size worth.
+ */
+#if (PAGE_SHIFT == 13)
+#define ITAG_MASK 0xfe
+#elif (PAGE_SHIFT == 16)
+#define ITAG_MASK 0x7fe
+#else
+#error unsupported PAGE_SIZE
+#endif
+ .align 32
+ .globl __flush_icache_page
+__flush_icache_page: /* %o0 = phys_page */
+ membar #StoreStore
+ srlx %o0, PAGE_SHIFT, %o0
+ sethi %uhi(PAGE_OFFSET), %g1
+ sllx %o0, PAGE_SHIFT, %o0
+ sethi %hi(PAGE_SIZE), %g2
+ sllx %g1, 32, %g1
+ add %o0, %g1, %o0
+1: subcc %g2, 32, %g2
+ bne,pt %icc, 1b
+ flush %o0 + %g2
+ retl
+ nop
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+
+#if (PAGE_SHIFT != 13)
+#error only page shift of 13 is supported by dcache flush
+#endif
+
+#define DTAG_MASK 0x3
+
+ .align 64
+ .globl __flush_dcache_page
+__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
+ sethi %uhi(PAGE_OFFSET), %g1
+ sllx %g1, 32, %g1
+ sub %o0, %g1, %o0
+ clr %o4
+ srlx %o0, 11, %o0
+ sethi %hi(1 << 14), %o2
+1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
+ add %o4, (1 << 5), %o4 ! IEU0
+ ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
+ add %o4, (1 << 5), %o4 ! IEU0
+ ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
+ add %o4, (1 << 5), %o4 ! IEU0
+ andn %o3, DTAG_MASK, %o3 ! IEU1
+ ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
+ add %o4, (1 << 5), %o4 ! IEU0
+ andn %g1, DTAG_MASK, %g1 ! IEU1
+ cmp %o0, %o3 ! IEU1 Group
+ be,a,pn %xcc, dflush1 ! CTI
+ sub %o4, (4 << 5), %o4 ! IEU0 (Group)
+ cmp %o0, %g1 ! IEU1 Group
+ andn %g2, DTAG_MASK, %g2 ! IEU0
+ be,a,pn %xcc, dflush2 ! CTI
+ sub %o4, (3 << 5), %o4 ! IEU0 (Group)
+ cmp %o0, %g2 ! IEU1 Group
+ andn %g3, DTAG_MASK, %g3 ! IEU0
+ be,a,pn %xcc, dflush3 ! CTI
+ sub %o4, (2 << 5), %o4 ! IEU0 (Group)
+ cmp %o0, %g3 ! IEU1 Group
+ be,a,pn %xcc, dflush4 ! CTI
+ sub %o4, (1 << 5), %o4 ! IEU0
+2: cmp %o4, %o2 ! IEU1 Group
+ bne,pt %xcc, 1b ! CTI
+ nop ! IEU0
+
+ /* The I-cache does not snoop local stores so we
+ * better flush that too when necessary.
+ */
+ brnz,pt %o1, __flush_icache_page
+ sllx %o0, 11, %o0
+ retl
+ nop
+
+dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
+ add %o4, (1 << 5), %o4
+dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
+ add %o4, (1 << 5), %o4
+dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
+ add %o4, (1 << 5), %o4
+dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
+ add %o4, (1 << 5), %o4
+ membar #Sync
+ ba,pt %xcc, 2b
+ nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+ .align 32
+__prefill_dtlb:
+ rdpr %pstate, %g7
+ wrpr %g7, PSTATE_IE, %pstate
+ mov TLB_TAG_ACCESS, %g1
+ stxa %o5, [%g1] ASI_DMMU
+ stxa %o2, [%g0] ASI_DTLB_DATA_IN
+ flush %g6
+ retl
+ wrpr %g7, %pstate
+__prefill_itlb:
+ rdpr %pstate, %g7
+ wrpr %g7, PSTATE_IE, %pstate
+ mov TLB_TAG_ACCESS, %g1
+ stxa %o5, [%g1] ASI_IMMU
+ stxa %o2, [%g0] ASI_ITLB_DATA_IN
+ flush %g6
+ retl
+ wrpr %g7, %pstate
+
+ .globl __update_mmu_cache
+__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
+ srlx %o1, PAGE_SHIFT, %o1
+ andcc %o3, FAULT_CODE_DTLB, %g0
+ sllx %o1, PAGE_SHIFT, %o5
+ bne,pt %xcc, __prefill_dtlb
+ or %o5, %o0, %o5
+ ba,a,pt %xcc, __prefill_itlb
+
+ /* Cheetah specific versions, patched at boot time.
+ *
+ * This writes of the PRIMARY_CONTEXT register in this file are
+ * safe even on Cheetah+ and later wrt. the page size fields.
+ * The nucleus page size fields do not matter because we make
+ * no data references, and these instructions execute out of a
+ * locked I-TLB entry sitting in the fully assosciative I-TLB.
+ * This sequence should also never trap.
+ */
+__cheetah_flush_tlb_mm: /* 15 insns */
+ rdpr %pstate, %g7
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, 0x0, %pstate
+ wrpr %g0, 1, %tl
+ mov PRIMARY_CONTEXT, %o2
+ mov 0x40, %g3
+ ldxa [%o2] ASI_DMMU, %g2
+ stxa %o0, [%o2] ASI_DMMU
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ stxa %g2, [%o2] ASI_DMMU
+ flush %g6
+ wrpr %g0, 0, %tl
+ retl
+ wrpr %g7, 0x0, %pstate
+
+__cheetah_flush_tlb_pending: /* 22 insns */
+ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+ rdpr %pstate, %g7
+ sllx %o1, 3, %o1
+ andn %g7, PSTATE_IE, %g2
+ wrpr %g2, 0x0, %pstate
+ wrpr %g0, 1, %tl
+ mov PRIMARY_CONTEXT, %o4
+ ldxa [%o4] ASI_DMMU, %g2
+ stxa %o0, [%o4] ASI_DMMU
+1: sub %o1, (1 << 3), %o1
+ ldx [%o2 + %o1], %o3
+ andcc %o3, 1, %g0
+ be,pn %icc, 2f
+ andn %o3, 1, %o3
+ stxa %g0, [%o3] ASI_IMMU_DEMAP
+2: stxa %g0, [%o3] ASI_DMMU_DEMAP
+ brnz,pt %o1, 1b
+ membar #Sync
+ stxa %g2, [%o4] ASI_DMMU
+ flush %g6
+ wrpr %g0, 0, %tl
+ retl
+ wrpr %g7, 0x0, %pstate
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+flush_dcpage_cheetah: /* 11 insns */
+ sethi %uhi(PAGE_OFFSET), %g1
+ sllx %g1, 32, %g1
+ sub %o0, %g1, %o0
+ sethi %hi(PAGE_SIZE), %o4
+1: subcc %o4, (1 << 5), %o4
+ stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
+ membar #Sync
+ bne,pt %icc, 1b
+ nop
+ retl /* I-cache flush never needed on Cheetah, see callers. */
+ nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+cheetah_patch_one:
+1: lduw [%o1], %g1
+ stw %g1, [%o0]
+ flush %o0
+ subcc %o2, 1, %o2
+ add %o1, 4, %o1
+ bne,pt %icc, 1b
+ add %o0, 4, %o0
+ retl
+ nop
+
+ .globl cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+ save %sp, -128, %sp
+
+ sethi %hi(__flush_tlb_mm), %o0
+ or %o0, %lo(__flush_tlb_mm), %o0
+ sethi %hi(__cheetah_flush_tlb_mm), %o1
+ or %o1, %lo(__cheetah_flush_tlb_mm), %o1
+ call cheetah_patch_one
+ mov 15, %o2
+
+ sethi %hi(__flush_tlb_pending), %o0
+ or %o0, %lo(__flush_tlb_pending), %o0
+ sethi %hi(__cheetah_flush_tlb_pending), %o1
+ or %o1, %lo(__cheetah_flush_tlb_pending), %o1
+ call cheetah_patch_one
+ mov 22, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ sethi %hi(__flush_dcache_page), %o0
+ or %o0, %lo(__flush_dcache_page), %o0
+ sethi %hi(flush_dcpage_cheetah), %o1
+ or %o1, %lo(flush_dcpage_cheetah), %o1
+ call cheetah_patch_one
+ mov 11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+ ret
+ restore
+
+#ifdef CONFIG_SMP
+ /* These are all called by the slaves of a cross call, at
+ * trap level 1, with interrupts fully disabled.
+ *
+ * Register usage:
+ * %g5 mm->context (all tlb flushes)
+ * %g1 address arg 1 (tlb page and range flushes)
+ * %g7 address arg 2 (tlb range flush only)
+ *
+ * %g6 ivector table, don't touch
+ * %g2 scratch 1
+ * %g3 scratch 2
+ * %g4 scratch 3
+ *
+ * TODO: Make xcall TLB range flushes use the tricks above... -DaveM
+ */
+ .align 32
+ .globl xcall_flush_tlb_mm
+xcall_flush_tlb_mm:
+ mov PRIMARY_CONTEXT, %g2
+ mov 0x40, %g4
+ ldxa [%g2] ASI_DMMU, %g3
+ stxa %g5, [%g2] ASI_DMMU
+ stxa %g0, [%g4] ASI_DMMU_DEMAP
+ stxa %g0, [%g4] ASI_IMMU_DEMAP
+ stxa %g3, [%g2] ASI_DMMU
+ retry
+
+ .globl xcall_flush_tlb_pending
+xcall_flush_tlb_pending:
+ /* %g5=context, %g1=nr, %g7=vaddrs[] */
+ sllx %g1, 3, %g1
+ mov PRIMARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g2
+ stxa %g5, [%g4] ASI_DMMU
+1: sub %g1, (1 << 3), %g1
+ ldx [%g7 + %g1], %g5
+ andcc %g5, 0x1, %g0
+ be,pn %icc, 2f
+
+ andn %g5, 0x1, %g5
+ stxa %g0, [%g5] ASI_IMMU_DEMAP
+2: stxa %g0, [%g5] ASI_DMMU_DEMAP
+ membar #Sync
+ brnz,pt %g1, 1b
+ nop
+ stxa %g2, [%g4] ASI_DMMU
+ retry
+
+ .globl xcall_flush_tlb_kernel_range
+xcall_flush_tlb_kernel_range:
+ sethi %hi(PAGE_SIZE - 1), %g2
+ or %g2, %lo(PAGE_SIZE - 1), %g2
+ andn %g1, %g2, %g1
+ andn %g7, %g2, %g7
+ sub %g7, %g1, %g3
+ add %g2, 1, %g2
+ sub %g3, %g2, %g3
+ or %g1, 0x20, %g1 ! Nucleus
+1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
+ membar #Sync
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ retry
+ nop
+ nop
+
+ /* This runs in a very controlled environment, so we do
+ * not need to worry about BH races etc.
+ */
+ .globl xcall_sync_tick
+xcall_sync_tick:
+ rdpr %pstate, %g2
+ wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ sethi %hi(109f), %g7
+ b,pt %xcc, etrap_irq
+109: or %g7, %lo(109b), %g7
+ call smp_synchronize_tick_client
+ nop
+ clr %l6
+ b rtrap_xcall
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+
+ /* NOTE: This is SPECIAL!! We do etrap/rtrap however
+ * we choose to deal with the "BH's run with
+ * %pil==15" problem (described in asm/pil.h)
+ * by just invoking rtrap directly past where
+ * BH's are checked for.
+ *
+ * We do it like this because we do not want %pil==15
+ * lockups to prevent regs being reported.
+ */
+ .globl xcall_report_regs
+xcall_report_regs:
+ rdpr %pstate, %g2
+ wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ sethi %hi(109f), %g7
+ b,pt %xcc, etrap_irq
+109: or %g7, %lo(109b), %g7
+ call __show_regs
+ add %sp, PTREGS_OFF, %o0
+ clr %l6
+ /* Has to be a non-v9 branch due to the large distance. */
+ b rtrap_xcall
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+ .align 32
+ .globl xcall_flush_dcache_page_cheetah
+xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
+ sethi %hi(PAGE_SIZE), %g3
+1: subcc %g3, (1 << 5), %g3
+ stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
+ membar #Sync
+ bne,pt %icc, 1b
+ nop
+ retry
+ nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+ .globl xcall_flush_dcache_page_spitfire
+xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
+ %g7 == kernel page virtual address
+ %g5 == (page->mapping != NULL) */
+#ifdef DCACHE_ALIASING_POSSIBLE
+ srlx %g1, (13 - 2), %g1 ! Form tag comparitor
+ sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
+ sub %g3, (1 << 5), %g3 ! D$ linesize == 32
+1: ldxa [%g3] ASI_DCACHE_TAG, %g2
+ andcc %g2, 0x3, %g0
+ be,pn %xcc, 2f
+ andn %g2, 0x3, %g2
+ cmp %g2, %g1
+
+ bne,pt %xcc, 2f
+ nop
+ stxa %g0, [%g3] ASI_DCACHE_TAG
+ membar #Sync
+2: cmp %g3, 0
+ bne,pt %xcc, 1b
+ sub %g3, (1 << 5), %g3
+
+ brz,pn %g5, 2f
+#endif /* DCACHE_ALIASING_POSSIBLE */
+ sethi %hi(PAGE_SIZE), %g3
+
+1: flush %g7
+ subcc %g3, (1 << 5), %g3
+ bne,pt %icc, 1b
+ add %g7, (1 << 5), %g7
+
+2: retry
+ nop
+ nop
+
+ .globl xcall_promstop
+xcall_promstop:
+ rdpr %pstate, %g2
+ wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ sethi %hi(109f), %g7
+ b,pt %xcc, etrap_irq
+109: or %g7, %lo(109b), %g7
+ flushw
+ call prom_stopself
+ nop
+ /* We should not return, just spin if we do... */
+1: b,a,pt %xcc, 1b
+ nop
+
+ .data
+
+errata32_hwbug:
+ .xword 0
+
+ .text
+
+ /* These two are not performance critical... */
+ .globl xcall_flush_tlb_all_spitfire
+xcall_flush_tlb_all_spitfire:
+ /* Spitfire Errata #32 workaround. */
+ sethi %hi(errata32_hwbug), %g4
+ stx %g0, [%g4 + %lo(errata32_hwbug)]
+
+ clr %g2
+ clr %g3
+1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
+ and %g4, _PAGE_L, %g5
+ brnz,pn %g5, 2f
+ mov TLB_TAG_ACCESS, %g7
+
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+ stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ /* Spitfire Errata #32 workaround. */
+ sethi %hi(errata32_hwbug), %g4
+ stx %g0, [%g4 + %lo(errata32_hwbug)]
+
+2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
+ and %g4, _PAGE_L, %g5
+ brnz,pn %g5, 2f
+ mov TLB_TAG_ACCESS, %g7
+
+ stxa %g0, [%g7] ASI_IMMU
+ membar #Sync
+ stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+
+ /* Spitfire Errata #32 workaround. */
+ sethi %hi(errata32_hwbug), %g4
+ stx %g0, [%g4 + %lo(errata32_hwbug)]
+
+2: add %g2, 1, %g2
+ cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT
+ ble,pt %icc, 1b
+ sll %g2, 3, %g3
+ flush %g6
+ retry
+
+ .globl xcall_flush_tlb_all_cheetah
+xcall_flush_tlb_all_cheetah:
+ mov 0x80, %g2
+ stxa %g0, [%g2] ASI_DMMU_DEMAP
+ stxa %g0, [%g2] ASI_IMMU_DEMAP
+ retry
+
+ /* These just get rescheduled to PIL vectors. */
+ .globl xcall_call_function
+xcall_call_function:
+ wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
+ retry
+
+ .globl xcall_receive_signal
+xcall_receive_signal:
+ wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
+ retry
+
+ .globl xcall_capture
+xcall_capture:
+ wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
+ retry
+
+#endif /* CONFIG_SMP */