summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/Kconfig2
-rw-r--r--arch/sparc64/kernel/ktlb.S16
-rw-r--r--arch/sparc64/mm/init.c52
3 files changed, 69 insertions, 1 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 33dabf588bdd..2f22fa90461a 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -240,10 +240,10 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_SPARSEMEM_ENABLE
def_bool y
+ select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
- select SPARSEMEM_STATIC
source "mm/Kconfig"
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index d4024ac0d619..964527d2ffa0 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -226,6 +226,15 @@ kvmap_dtlb_load:
ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
+kvmap_vmemmap:
+ sub %g4, %g5, %g5
+ srlx %g5, 22, %g5
+ sethi %hi(vmemmap_table), %g1
+ sllx %g5, 3, %g5
+ or %g1, %lo(vmemmap_table), %g1
+ ba,pt %xcc, kvmap_dtlb_load
+ ldx [%g1 + %g5], %g5
+
kvmap_dtlb_nonlinear:
/* Catch kernel NULL pointer derefs. */
sethi %hi(PAGE_SIZE), %g5
@@ -233,6 +242,13 @@ kvmap_dtlb_nonlinear:
bleu,pn %xcc, kvmap_dtlb_longpath
nop
+ /* Do not use the TSB for vmemmap. */
+ mov (VMEMMAP_BASE >> 24), %g5
+ sllx %g5, 24, %g5
+ cmp %g4,%g5
+ bgeu,pn %xcc, kvmap_vmemmap
+ nop
+
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
kvmap_dtlb_tsbmiss:
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index f0ab9aab308f..69538d1aa0ad 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1645,6 +1645,58 @@ EXPORT_SYMBOL(_PAGE_E);
unsigned long _PAGE_CACHE __read_mostly;
EXPORT_SYMBOL(_PAGE_CACHE);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+
+#define VMEMMAP_CHUNK_SHIFT 22
+#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
+#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
+#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
+
+#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
+ sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
+unsigned long vmemmap_table[VMEMMAP_SIZE];
+
+int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+{
+ unsigned long vstart = (unsigned long) start;
+ unsigned long vend = (unsigned long) (start + nr);
+ unsigned long phys_start = (vstart - VMEMMAP_BASE);
+ unsigned long phys_end = (vend - VMEMMAP_BASE);
+ unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
+ unsigned long end = VMEMMAP_ALIGN(phys_end);
+ unsigned long pte_base;
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+ if (tlb_type == hypervisor)
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+ for (; addr < end; addr += VMEMMAP_CHUNK) {
+ unsigned long *vmem_pp =
+ vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
+ void *block;
+
+ if (!(*vmem_pp & _PAGE_VALID)) {
+ block = vmemmap_alloc_block(1UL << 22, node);
+ if (!block)
+ return -ENOMEM;
+
+ *vmem_pp = pte_base | __pa(block);
+
+ printk(KERN_INFO "[%p-%p] page_structs=%lu "
+ "node=%d entry=%lu/%lu\n", start, block, nr,
+ node,
+ addr >> VMEMMAP_CHUNK_SHIFT,
+ VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,