diff options
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash.c')
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index d16100c9416a..2fbc680c2c71 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -93,6 +93,7 @@ static inline int mmu_get_tsize(int psize) int mmu_linear_psize; /* Page size used for the linear mapping */ int mmu_pte_psize; /* Page size used for PTE pages */ +int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ unsigned long linear_map_top; /* Top of linear mapping */ @@ -356,10 +357,18 @@ static void __early_init_mmu(int boot_cpu) unsigned int mas4; /* XXX This will have to be decided at runtime, but right - * now our boot and TLB miss code hard wires it + * now our boot and TLB miss code hard wires it. Ideally + * we should find out a suitable page size and patch the + * TLB miss code (either that or use the PACA to store + * the value we want) */ mmu_linear_psize = MMU_PAGE_1G; + /* XXX This should be decided at runtime based on supported + * page sizes in the TLB, but for now let's assume 16M is + * always there and a good fit (which it probably is) + */ + mmu_vmemmap_psize = MMU_PAGE_16M; /* Check if HW tablewalk is present, and if yes, enable it by: * |