summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2012-10-17 00:48:10 +0200
committerRalf Baechle <ralf@linux-mips.org>2012-12-12 16:48:47 +0100
commitaa1762f49c81a14d0453e4f67f922e4f155510a3 (patch)
treeba9c7318105db788ededd51e57b568793be0d5fa
parentc17a6554782ad531f4713b33fd6339ba67ef6391 (diff)
MIPS: Control huge tlb support via Kconfig symbol MIPS_HUGE_TLB_SUPPORT
We need Huge TLBs for HUGETLB_PAGE, or the soon to follow TRANSPARENT_HUGEPAGE. collect this information under a single Kconfig symbol. Signed-off-by: David Daney <david.daney@cavium.com>
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/include/asm/mipsregs.h2
-rw-r--r--arch/mips/include/asm/page.h6
-rw-r--r--arch/mips/include/asm/pgtable-bits.h2
-rw-r--r--arch/mips/include/asm/sparsemem.h2
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c26
7 files changed, 23 insertions, 20 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dba9390d37cf..397194a263ce 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1077,6 +1077,9 @@ config SYS_SUPPORTS_HUGETLBFS
depends on CPU_SUPPORTS_HUGEPAGES && 64BIT
default y
+config MIPS_HUGE_TLB_SUPPORT
+ def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE
+
config IRQ_CPU
bool
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index eb742895dcbe..881b980c72d2 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -240,7 +240,7 @@
#define PM_HUGE_MASK PM_64M
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define PM_HUGE_MASK PM_256M
-#elif defined(CONFIG_HUGETLB_PAGE)
+#elif defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
#error Bad page size configuration for hugetlbfs!
#endif
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index c48a7f0bcf3c..31ab10f02bad 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -33,17 +33,17 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#else /* !CONFIG_HUGETLB_PAGE */
+#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
#define HPAGE_SIZE ({BUILD_BUG(); 0; })
#define HPAGE_MASK ({BUILD_BUG(); 0; })
#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifndef __ASSEMBLY__
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index f8b8fb358412..1e2642c360a8 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -128,7 +128,7 @@
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
#define _PAGE_FILE (_PAGE_MODIFIED)
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 4461198361c9..65900dab3ad3 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -6,7 +6,7 @@
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
-#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && defined(CONFIG_PAGE_SIZE_64KB)
# define SECTION_SIZE_BITS 29
#else
# define SECTION_SIZE_BITS 28
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 4b9b935a070e..936165d167e1 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -305,7 +305,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
pudp = pud_offset(pgdp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index a36b4955d4ed..98b2b732005a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -158,7 +158,7 @@ enum label_id {
label_smp_pgtable_change,
label_r3000_write_probe_fail,
label_large_segbits_fault,
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
label_tlb_huge_update,
#endif
};
@@ -177,7 +177,7 @@ UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
UASM_L_LA(_large_segbits_fault)
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
UASM_L_LA(_tlb_huge_update)
#endif
@@ -666,7 +666,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
}
}
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static __cpuinit void build_restore_pagemask(u32 **p,
struct uasm_reloc **r,
@@ -792,7 +792,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifdef CONFIG_64BIT
/*
@@ -1237,7 +1237,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
/* Adjust the context during the load latency. */
build_adjust_context(p, tmp);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
@@ -1246,7 +1246,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
*/
if (use_lwx_insns())
uasm_i_nop(p);
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
/* build_update_entries */
@@ -1349,7 +1349,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
#endif
@@ -1359,7 +1359,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
}
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
@@ -1404,7 +1404,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
} else {
-#if defined(CONFIG_HUGETLB_PAGE)
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
const enum label_id ls = label_tlb_huge_update;
#else
const enum label_id ls = label_vmalloc;
@@ -1880,7 +1880,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
#endif
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* For huge tlb entries, pmd doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
@@ -1996,7 +1996,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
build_make_valid(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page.
@@ -2089,7 +2089,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
@@ -2137,7 +2137,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.