summaryrefslogtreecommitdiff
path: root/arch/mips/lib-64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/lib-64
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/mips/lib-64')
-rw-r--r--arch/mips/lib-64/Makefile25
-rw-r--r--arch/mips/lib-64/csum_partial.S242
-rw-r--r--arch/mips/lib-64/dump_tlb.c211
-rw-r--r--arch/mips/lib-64/memset.S142
-rw-r--r--arch/mips/lib-64/watch.S57
5 files changed, 677 insertions, 0 deletions
diff --git a/arch/mips/lib-64/Makefile b/arch/mips/lib-64/Makefile
new file mode 100644
index 000000000000..fd6a2bafdfcf
--- /dev/null
+++ b/arch/mips/lib-64/Makefile
@@ -0,0 +1,25 @@
+#
+# Makefile for MIPS-specific library files..
+#
+
+lib-y += csum_partial.o memset.o watch.o
+
+obj-$(CONFIG_CPU_MIPS32) += dump_tlb.o
+obj-$(CONFIG_CPU_MIPS64) += dump_tlb.o
+obj-$(CONFIG_CPU_NEVADA) += dump_tlb.o
+obj-$(CONFIG_CPU_R10000) += dump_tlb.o
+obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
+obj-$(CONFIG_CPU_R4300) += dump_tlb.o
+obj-$(CONFIG_CPU_R4X00) += dump_tlb.o
+obj-$(CONFIG_CPU_R5000) += dump_tlb.o
+obj-$(CONFIG_CPU_R5432) += dump_tlb.o
+obj-$(CONFIG_CPU_R6000) +=
+obj-$(CONFIG_CPU_R8000) +=
+obj-$(CONFIG_CPU_RM7000) += dump_tlb.o
+obj-$(CONFIG_CPU_RM9000) += dump_tlb.o
+obj-$(CONFIG_CPU_SB1) += dump_tlb.o
+obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
+obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o
+obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o
+
+EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/lib-64/csum_partial.S b/arch/mips/lib-64/csum_partial.S
new file mode 100644
index 000000000000..25aba660cc9c
--- /dev/null
+++ b/arch/mips/lib-64/csum_partial.S
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Quick'n'dirty IP checksum ...
+ *
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#define ADDC(sum,reg) \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1
+
+#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define sum v0
+
+ .text
+ .set noreorder
+
+/* unknown src alignment and < 8 bytes to go */
+small_csumcpy:
+ move a1, ta2
+
+ andi ta0, a1, 4
+ beqz ta0, 1f
+ andi ta0, a1, 2
+
+ /* Still a full word to go */
+ ulw ta1, (src)
+ daddiu src, 4
+ ADDC(sum, ta1)
+
+1: move ta1, zero
+ beqz ta0, 1f
+ andi ta0, a1, 1
+
+ /* Still a halfword to go */
+ ulhu ta1, (src)
+ daddiu src, 2
+
+1: beqz ta0, 1f
+ sll ta1, ta1, 16
+
+ lbu ta2, (src)
+ nop
+
+#ifdef __MIPSEB__
+ sll ta2, ta2, 8
+#endif
+ or ta1, ta2
+
+1: ADDC(sum, ta1)
+
+ /* fold checksum */
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz t3, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a2)
+ jr ra
+ .set noreorder
+
+/* ------------------------------------------------------------------------- */
+
+ .align 5
+LEAF(csum_partial)
+ move sum, zero
+ move t3, zero
+
+ sltiu t8, a1, 0x8
+ bnez t8, small_csumcpy /* < 8 bytes to copy */
+ move ta2, a1
+
+ beqz a1, out
+ andi t3, src, 0x1 /* odd buffer? */
+
+hword_align:
+ beqz t3, word_align
+ andi t8, src, 0x2
+
+ lbu ta0, (src)
+ dsubu a1, a1, 0x1
+#ifdef __MIPSEL__
+ sll ta0, ta0, 8
+#endif
+ ADDC(sum, ta0)
+ daddu src, src, 0x1
+ andi t8, src, 0x2
+
+word_align:
+ beqz t8, dword_align
+ sltiu t8, a1, 56
+
+ lhu ta0, (src)
+ dsubu a1, a1, 0x2
+ ADDC(sum, ta0)
+ sltiu t8, a1, 56
+ daddu src, src, 0x2
+
+dword_align:
+ bnez t8, do_end_words
+ move t8, a1
+
+ andi t8, src, 0x4
+ beqz t8, qword_align
+ andi t8, src, 0x8
+
+ lw ta0, 0x00(src)
+ dsubu a1, a1, 0x4
+ ADDC(sum, ta0)
+ daddu src, src, 0x4
+ andi t8, src, 0x8
+
+qword_align:
+ beqz t8, oword_align
+ andi t8, src, 0x10
+
+ lw ta0, 0x00(src)
+ lw ta1, 0x04(src)
+ dsubu a1, a1, 0x8
+ ADDC(sum, ta0)
+ ADDC(sum, ta1)
+ daddu src, src, 0x8
+ andi t8, src, 0x10
+
+oword_align:
+ beqz t8, begin_movement
+ dsrl t8, a1, 0x7
+
+ lw ta3, 0x08(src)
+ lw t0, 0x0c(src)
+ lw ta0, 0x00(src)
+ lw ta1, 0x04(src)
+ ADDC(sum, ta3)
+ ADDC(sum, t0)
+ ADDC(sum, ta0)
+ ADDC(sum, ta1)
+ dsubu a1, a1, 0x10
+ daddu src, src, 0x10
+ dsrl t8, a1, 0x7
+
+begin_movement:
+ beqz t8, 1f
+ andi ta2, a1, 0x40
+
+move_128bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
+ CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
+ CSUM_BIGCHUNK(src, 0x40, sum, ta0, ta1, ta3, t0)
+ CSUM_BIGCHUNK(src, 0x60, sum, ta0, ta1, ta3, t0)
+ dsubu t8, t8, 0x01
+ bnez t8, move_128bytes
+ daddu src, src, 0x80
+
+1:
+ beqz ta2, 1f
+ andi ta2, a1, 0x20
+
+move_64bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
+ CSUM_BIGCHUNK(src, 0x20, sum, ta0, ta1, ta3, t0)
+ daddu src, src, 0x40
+
+1:
+ beqz ta2, do_end_words
+ andi t8, a1, 0x1c
+
+move_32bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, ta0, ta1, ta3, t0)
+ andi t8, a1, 0x1c
+ daddu src, src, 0x20
+
+do_end_words:
+ beqz t8, maybe_end_cruft
+ dsrl t8, t8, 0x2
+
+end_words:
+ lw ta0, (src)
+ dsubu t8, t8, 0x1
+ ADDC(sum, ta0)
+ bnez t8, end_words
+ daddu src, src, 0x4
+
+maybe_end_cruft:
+ andi ta2, a1, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a1, ta2 /* XXX ??? */
+ beqz t2, out
+ move a1, ta2
+
+end_bytes:
+ lb ta0, (src)
+ dsubu a1, a1, 0x1
+ bnez a2, end_bytes
+ daddu src, src, 0x1
+
+out:
+ jr ra
+ move v0, sum
+ END(csum_partial)
diff --git a/arch/mips/lib-64/dump_tlb.c b/arch/mips/lib-64/dump_tlb.c
new file mode 100644
index 000000000000..42f88e055b4c
--- /dev/null
+++ b/arch/mips/lib-64/dump_tlb.c
@@ -0,0 +1,211 @@
+/*
+ * Dump R4x00 TLB for debugging purposes.
+ *
+ * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
+ * Copyright (C) 1999 by Silicon Graphics, Inc.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cachectl.h>
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+static inline const char *msk2str(unsigned int mask)
+{
+ switch (mask) {
+ case PM_4K: return "4kb";
+ case PM_16K: return "16kb";
+ case PM_64K: return "64kb";
+ case PM_256K: return "256kb";
+#ifndef CONFIG_CPU_VR41XX
+ case PM_1M: return "1Mb";
+ case PM_4M: return "4Mb";
+ case PM_16M: return "16Mb";
+ case PM_64M: return "64Mb";
+ case PM_256M: return "256Mb";
+#endif
+ }
+
+ return "unknown";
+}
+
+#define BARRIER() \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ "nop;nop;nop;nop;nop;nop;nop\n\t" \
+ ".set\treorder");
+
+void dump_tlb(int first, int last)
+{
+ unsigned long s_entryhi, entryhi, entrylo0, entrylo1, asid;
+ unsigned int s_index, pagemask, c0, c1, i;
+
+ s_entryhi = read_c0_entryhi();
+ s_index = read_c0_index();
+ asid = s_entryhi & 0xff;
+
+ for (i = first; i <= last; i++) {
+ write_c0_index(i);
+ BARRIER();
+ tlb_read();
+ BARRIER();
+ pagemask = read_c0_pagemask();
+ entryhi = read_c0_entryhi();
+ entrylo0 = read_c0_entrylo0();
+ entrylo1 = read_c0_entrylo1();
+
+ /* Unused entries have a virtual address of CKSEG0. */
+ if ((entryhi & ~0x1ffffUL) != CKSEG0
+ && (entryhi & 0xff) == asid) {
+ /*
+ * Only print entries in use
+ */
+ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
+
+ c0 = (entrylo0 >> 3) & 7;
+ c1 = (entrylo1 >> 3) & 7;
+
+ printk("va=%011lx asid=%02lx\n",
+ (entryhi & ~0x1fffUL),
+ entryhi & 0xff);
+ printk("\t[pa=%011lx c=%d d=%d v=%d g=%ld] ",
+ (entrylo0 << 6) & PAGE_MASK, c0,
+ (entrylo0 & 4) ? 1 : 0,
+ (entrylo0 & 2) ? 1 : 0,
+ (entrylo0 & 1));
+ printk("[pa=%011lx c=%d d=%d v=%d g=%ld]\n",
+ (entrylo1 << 6) & PAGE_MASK, c1,
+ (entrylo1 & 4) ? 1 : 0,
+ (entrylo1 & 2) ? 1 : 0,
+ (entrylo1 & 1));
+ }
+ }
+ printk("\n");
+
+ write_c0_entryhi(s_entryhi);
+ write_c0_index(s_index);
+}
+
+void dump_tlb_all(void)
+{
+ dump_tlb(0, current_cpu_data.tlbsize - 1);
+}
+
+void dump_tlb_wired(void)
+{
+ int wired;
+
+ wired = read_c0_wired();
+ printk("Wired: %d", wired);
+ dump_tlb(0, read_c0_wired());
+}
+
+void dump_tlb_addr(unsigned long addr)
+{
+ unsigned int flags, oldpid;
+ int index;
+
+ local_irq_save(flags);
+ oldpid = read_c0_entryhi() & 0xff;
+ BARRIER();
+ write_c0_entryhi((addr & PAGE_MASK) | oldpid);
+ BARRIER();
+ tlb_probe();
+ BARRIER();
+ index = read_c0_index();
+ write_c0_entryhi(oldpid);
+ local_irq_restore(flags);
+
+ if (index < 0) {
+ printk("No entry for address 0x%08lx in TLB\n", addr);
+ return;
+ }
+
+ printk("Entry %d maps address 0x%08lx\n", index, addr);
+ dump_tlb(index, index);
+}
+
+void dump_tlb_nonwired(void)
+{
+ dump_tlb(read_c0_wired(), current_cpu_data.tlbsize - 1);
+}
+
+void dump_list_process(struct task_struct *t, void *address)
+{
+ pgd_t *page_dir, *pgd;
+ pmd_t *pmd;
+ pte_t *pte, page;
+ unsigned long addr, val;
+
+ addr = (unsigned long) address;
+
+ printk("Addr == %08lx\n", addr);
+ printk("tasks->mm.pgd == %08lx\n", (unsigned long) t->mm->pgd);
+
+ page_dir = pgd_offset(t->mm, 0);
+ printk("page_dir == %016lx\n", (unsigned long) page_dir);
+
+ pgd = pgd_offset(t->mm, addr);
+ printk("pgd == %016lx\n", (unsigned long) pgd);
+
+ pmd = pmd_offset(pgd, addr);
+ printk("pmd == %016lx\n", (unsigned long) pmd);
+
+ pte = pte_offset(pmd, addr);
+ printk("pte == %016lx\n", (unsigned long) pte);
+
+ page = *pte;
+ printk("page == %08lx\n", pte_val(page));
+
+ val = pte_val(page);
+ if (val & _PAGE_PRESENT) printk("present ");
+ if (val & _PAGE_READ) printk("read ");
+ if (val & _PAGE_WRITE) printk("write ");
+ if (val & _PAGE_ACCESSED) printk("accessed ");
+ if (val & _PAGE_MODIFIED) printk("modified ");
+ if (val & _PAGE_R4KBUG) printk("r4kbug ");
+ if (val & _PAGE_GLOBAL) printk("global ");
+ if (val & _PAGE_VALID) printk("valid ");
+ printk("\n");
+}
+
+void dump_list_current(void *address)
+{
+ dump_list_process(current, address);
+}
+
+unsigned int vtop(void *address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned int addr, paddr;
+
+ addr = (unsigned long) address;
+ pgd = pgd_offset(current->mm, addr);
+ pmd = pmd_offset(pgd, addr);
+ pte = pte_offset(pmd, addr);
+ paddr = (CKSEG1 | (unsigned int) pte_val(*pte)) & PAGE_MASK;
+ paddr |= (addr & ~PAGE_MASK);
+
+ return paddr;
+}
+
+void dump16(unsigned long *p)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ printk("*%08lx == %08lx, ", (unsigned long)p, *p);
+ p++;
+ printk("*%08lx == %08lx\n", (unsigned long)p, *p);
+ p++;
+ }
+}
diff --git a/arch/mips/lib-64/memset.S b/arch/mips/lib-64/memset.S
new file mode 100644
index 000000000000..242f1976cfaf
--- /dev/null
+++ b/arch/mips/lib-64/memset.S
@@ -0,0 +1,142 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+ .macro f_fill64 dst, offset, val, fixup
+ EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup)
+ EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup)
+ .endm
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+ .set noreorder
+ .align 5
+LEAF(memset)
+ beqz a1, 1f
+ move v0, a0 /* result */
+
+ andi a1, 0xff /* spread fillword */
+ dsll t1, a1, 8
+ or a1, t1
+ dsll t1, a1, 16
+ or a1, t1
+ dsll t1, a1, 32
+ or a1, t1
+1:
+
+FEXPORT(__bzero)
+ sltiu t0, a2, LONGSIZE /* very small region? */
+ bnez t0, small_memset
+ andi t0, a0, LONGMASK /* aligned? */
+
+ beqz t0, 1f
+ PTR_SUBU t0, LONGSIZE /* alignment in bytes */
+
+#ifdef __MIPSEB__
+ EX(sdl, a1, (a0), first_fixup) /* make dword aligned */
+#endif
+#ifdef __MIPSEL__
+ EX(sdr, a1, (a0), first_fixup) /* make dword aligned */
+#endif
+ PTR_SUBU a0, t0 /* long align ptr */
+ PTR_ADDU a2, t0 /* correct size */
+
+1: ori t1, a2, 0x3f /* # of full blocks */
+ xori t1, 0x3f
+ beqz t1, memset_partial /* no block to fill */
+ andi t0, a2, 0x38
+
+ PTR_ADDU t1, a0 /* end address */
+ .set reorder
+1: PTR_ADDIU a0, 64
+ f_fill64 a0, -64, a1, fwd_fixup
+ bne t1, a0, 1b
+ .set noreorder
+
+memset_partial:
+ PTR_LA t1, 2f /* where to start */
+ .set noat
+ dsrl AT, t0, 1
+ PTR_SUBU t1, AT
+ .set noat
+ jr t1
+ PTR_ADDU a0, t0 /* dest ptr */
+
+ .set push
+ .set noreorder
+ .set nomacro
+ f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */
+2: .set pop
+ andi a2, LONGMASK /* At most one long to go */
+
+ beqz a2, 1f
+ PTR_ADDU a0, a2 /* What's left */
+#ifdef __MIPSEB__
+ EX(sdr, a1, -1(a0), last_fixup)
+#endif
+#ifdef __MIPSEL__
+ EX(sdl, a1, -1(a0), last_fixup)
+#endif
+1: jr ra
+ move a2, zero
+
+small_memset:
+ beqz a2, 2f
+ PTR_ADDU t1, a0, a2
+
+1: PTR_ADDIU a0, 1 /* fill bytewise */
+ bne t1, a0, 1b
+ sb a1, -1(a0)
+
+2: jr ra /* done */
+ move a2, zero
+ END(memset)
+
+first_fixup:
+ jr ra
+ nop
+
+fwd_fixup:
+ PTR_L t0, TI_TASK($28)
+ LONG_L t0, THREAD_BUADDR(t0)
+ andi a2, 0x3f
+ LONG_ADDU a2, t1
+ jr ra
+ LONG_SUBU a2, t0
+
+partial_fixup:
+ PTR_L t0, TI_TASK($28)
+ LONG_L t0, THREAD_BUADDR(t0)
+ andi a2, LONGMASK
+ LONG_ADDU a2, t1
+ jr ra
+ LONG_SUBU a2, t0
+
+last_fixup:
+ jr ra
+ andi v1, a2, LONGMASK
diff --git a/arch/mips/lib-64/watch.S b/arch/mips/lib-64/watch.S
new file mode 100644
index 000000000000..f91434013695
--- /dev/null
+++ b/arch/mips/lib-64/watch.S
@@ -0,0 +1,57 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Kernel debug stuff to use the Watch registers.
+ * Useful to find stack overflows, dangling pointers etc.
+ *
+ * Copyright (C) 1995, 1996, 1999, 2001 by Ralf Baechle
+ */
+#include <asm/asm.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+
+ .set noreorder
+/*
+ * Parameter: a0 - physical address to watch
+ * a1 - set bit #1 to trap on load references
+ * bit #0 to trap on store references
+ * Results : none
+ */
+ LEAF(__watch_set)
+ ori a0, 7
+ xori a0, 7
+ or a0, a1
+ mtc0 a0, CP0_WATCHLO
+ sd a0, watch_savelo
+ dsrl32 a0, a0, 0
+
+ jr ra
+ mtc0 zero, CP0_WATCHHI
+ END(__watch_set)
+
+/*
+ * Parameter: none
+ * Results : none
+ */
+ LEAF(__watch_clear)
+ jr ra
+ mtc0 zero, CP0_WATCHLO
+ END(__watch_clear)
+
+/*
+ * Parameter: none
+ * Results : none
+ */
+ LEAF(__watch_reenable)
+ ld t0, watch_savelo
+ jr ra
+ mtc0 t0, CP0_WATCHLO
+ END(__watch_reenable)
+
+/*
+ * Saved value of the c0_watchlo register for watch_reenable()
+ */
+ .local watch_savelo
+ .comm watch_savelo, 8, 8