summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mips/mm/pg-sb1.c8
-rw-r--r--arch/mips/mm/tlbex.c30
5 files changed, 27 insertions, 27 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index f61e038b4440..b56a0abdc3d4 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -5,8 +5,8 @@
obj-y += cache.o extable.o fault.o init.o pgtable.o \
tlbex.o tlbex-fault.o
-obj-$(CONFIG_MIPS32) += ioremap.o pgtable-32.o
-obj-$(CONFIG_MIPS64) += pgtable-64.o
+obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
+obj-$(CONFIG_64BIT) += pgtable-64.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index a03ebb2cba67..20d40725e5bb 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -723,10 +723,10 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
".set push\n\t"
".set noat\n\t"
".set mips3\n\t"
-#ifdef CONFIG_MIPS32
+#ifdef CONFIG_32BIT
"la $at,1f\n\t"
#endif
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
"dla $at,1f\n\t"
#endif
"cache %0,($at)\n\t"
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9c9a271c8a3a..dc6830b10fab 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -96,7 +96,7 @@ static void __init kmap_init(void)
kmap_prot = PAGE_KERNEL;
}
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
@@ -125,7 +125,7 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
j = 0;
}
}
-#endif /* CONFIG_MIPS64 */
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_HIGHMEM */
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -258,7 +258,7 @@ void __init mem_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
/* Switch from KSEG0 to XKPHYS addresses */
start = (unsigned long)phys_to_virt(CPHYSADDR(start));
end = (unsigned long)phys_to_virt(CPHYSADDR(end));
@@ -286,7 +286,7 @@ void free_initmem(void)
addr = (unsigned long) &__init_begin;
while (addr < (unsigned long) &__init_end) {
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
page = PAGE_OFFSET | CPHYSADDR(addr);
#else
page = addr;
diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c
index 59d131b5e536..b63e1ca350f5 100644
--- a/arch/mips/mm/pg-sb1.c
+++ b/arch/mips/mm/pg-sb1.c
@@ -114,7 +114,7 @@ static inline void copy_page_cpu(void *to, void *from)
" pref " SB1_PREF_STORE_STREAMED_HINT ", -64(%1)\n"
" pref " SB1_PREF_LOAD_STREAMED_HINT ", -32(%0)\n"
"1: pref " SB1_PREF_STORE_STREAMED_HINT ", -32(%1)\n"
-# ifdef CONFIG_MIPS64
+# ifdef CONFIG_64BIT
" ld $8, -128(%0) \n" /* Block copy a cacheline */
" ld $9, -120(%0) \n"
" ld $10, -112(%0) \n"
@@ -148,7 +148,7 @@ static inline void copy_page_cpu(void *to, void *from)
" daddiu %0, %0, -128 \n"
" daddiu %1, %1, -128 \n"
#endif
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
" ld $8, 0(%0) \n" /* Block copy a cacheline */
"1: ld $9, 8(%0) \n"
" ld $10, 16(%0) \n"
@@ -178,7 +178,7 @@ static inline void copy_page_cpu(void *to, void *from)
" daddiu %0, %0, 32 \n"
" daddiu %1, %1, 32 \n"
" bnel %0, %2, 1b \n"
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
" ld $8, 0(%0) \n"
#else
" lw $2, 0(%0) \n"
@@ -186,7 +186,7 @@ static inline void copy_page_cpu(void *to, void *from)
" .set pop \n"
: "+r" (src), "+r" (dst)
: "r" (end)
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
: "$8","$9","$10","$11","memory");
#else
: "$2","$3","$6","$7","$8","$9","$10","$11","memory");
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 87e229f4d3d5..592377fa694d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -448,7 +448,7 @@ L_LA(_r3000_write_probe_fail)
L_LA(_r3000_write_probe_ok)
/* convenience macros for instructions */
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
# define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
# define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
# define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
@@ -486,7 +486,7 @@ L_LA(_r3000_write_probe_ok)
#define i_ssnop(buf) i_sll(buf, 0, 0, 1)
#define i_ehb(buf) i_sll(buf, 0, 0, 3)
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
static __init int __attribute__((unused)) in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
@@ -516,7 +516,7 @@ static __init int rel_lo(long val)
static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
-#if CONFIG_MIPS64
+#if CONFIG_64BIT
if (!in_compat_space_p(addr)) {
i_lui(buf, rs, rel_highest(addr));
if (rel_higher(addr))
@@ -682,7 +682,7 @@ static void il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
#define C0_EPC 14
#define C0_XCONTEXT 20
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
#else
# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
@@ -923,7 +923,7 @@ static __init void build_tlb_write_entry(u32 **p, struct label **l,
}
}
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
/*
* TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry.
@@ -1010,7 +1010,7 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
}
}
-#else /* !CONFIG_MIPS64 */
+#else /* !CONFIG_64BIT */
/*
* TMP and PTR are scratch.
@@ -1038,7 +1038,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
}
-#endif /* !CONFIG_MIPS64 */
+#endif /* !CONFIG_64BIT */
static __init void build_adjust_context(u32 **p, unsigned int ctx)
{
@@ -1159,7 +1159,7 @@ static void __init build_r4000_tlb_refill_handler(void)
/* No need for i_nop */
}
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
#else
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
@@ -1171,7 +1171,7 @@ static void __init build_r4000_tlb_refill_handler(void)
l_leave(&l, p);
i_eret(&p); /* return from trap */
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
#endif
@@ -1182,7 +1182,7 @@ static void __init build_r4000_tlb_refill_handler(void)
* need three, with the the second nop'ed and the third being
* unused.
*/
-#ifdef CONFIG_MIPS32
+#ifdef CONFIG_32BIT
if ((p - tlb_handler) > 64)
panic("TLB refill handler space exceeded");
#else
@@ -1195,12 +1195,12 @@ static void __init build_r4000_tlb_refill_handler(void)
/*
* Now fold the handler in the TLB refill handler space.
*/
-#ifdef CONFIG_MIPS32
+#ifdef CONFIG_32BIT
f = final_handler;
/* Simplest case, just copy the handler. */
copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
-#else /* CONFIG_MIPS64 */
+#else /* CONFIG_64BIT */
f = final_handler + 32;
if ((p - tlb_handler) <= 32) {
/* Just copy the handler. */
@@ -1235,7 +1235,7 @@ static void __init build_r4000_tlb_refill_handler(void)
copy_handler(relocs, labels, split, p, final_handler);
final_len = (f - (final_handler + 32)) + (p - split);
}
-#endif /* CONFIG_MIPS64 */
+#endif /* CONFIG_64BIT */
resolve_relocs(relocs, labels);
printk("Synthesized TLB refill handler (%u instructions).\n",
@@ -1605,7 +1605,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct label **l,
struct reloc **r, unsigned int pte,
unsigned int ptr)
{
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
#else
build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
@@ -1636,7 +1636,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct label **l,
l_leave(l, *p);
i_eret(p); /* return from trap */
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
#endif
}