summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c18
-rw-r--r--arch/powerpc/mm/init_64.c24
-rw-r--r--arch/powerpc/mm/mem.c39
3 files changed, 19 insertions, 62 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index fb42c4dd3217..f1c2d55b4377 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -113,7 +113,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
unsigned long address, unsigned int psize)
{
- pte_t *new = kmem_cache_alloc(huge_pgtable_cache(psize),
+ pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize),
GFP_KERNEL|__GFP_REPEAT);
if (! new)
@@ -730,25 +730,27 @@ static int __init hugepage_setup_sz(char *str)
}
__setup("hugepagesz=", hugepage_setup_sz);
-static void zero_ctor(struct kmem_cache *cache, void *addr)
-{
- memset(addr, 0, kmem_cache_size(cache));
-}
-
static int __init hugetlbpage_init(void)
{
unsigned int psize;
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -ENODEV;
+
/* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
* and adjust PTE_NONCACHE_NUM if the number of supported huge page
* sizes changes.
*/
set_huge_psize(MMU_PAGE_16M);
- set_huge_psize(MMU_PAGE_64K);
set_huge_psize(MMU_PAGE_16G);
+ /* Temporarily disable support for 64K huge pages when 64K SPU local
+ * store support is enabled as the current implementation conflicts.
+ */
+#ifndef CONFIG_SPU_FS_64K_LS
+ set_huge_psize(MMU_PAGE_64K);
+#endif
+
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
if (mmu_huge_psizes[psize]) {
huge_pgtable_cache(psize) = kmem_cache_create(
@@ -756,7 +758,7 @@ static int __init hugetlbpage_init(void)
HUGEPTE_TABLE_SIZE(psize),
HUGEPTE_TABLE_SIZE(psize),
0,
- zero_ctor);
+ NULL);
if (!huge_pgtable_cache(psize))
panic("hugetlbpage_init(): could not create %s"\
"\n", HUGEPTE_CACHE_NAME(psize));
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index a41bc5aa2043..4f7df85129d8 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -136,9 +136,14 @@ static int __init setup_kcore(void)
module_init(setup_kcore);
#endif
-static void zero_ctor(struct kmem_cache *cache, void *addr)
+static void pgd_ctor(void *addr)
{
- memset(addr, 0, kmem_cache_size(cache));
+ memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static void pmd_ctor(void *addr)
+{
+ memset(addr, 0, PMD_TABLE_SIZE);
}
static const unsigned int pgtable_cache_size[2] = {
@@ -163,19 +168,8 @@ struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
void pgtable_cache_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
- int size = pgtable_cache_size[i];
- const char *name = pgtable_cache_name[i];
-
- pr_debug("Allocating page table cache %s (#%d) "
- "for size: %08x...\n", name, i, size);
- pgtable_cache[i] = kmem_cache_create(name,
- size, size,
- SLAB_PANIC,
- zero_ctor);
- }
+ pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor);
+ pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor);
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1ca2235f0965..702691cb9e82 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -186,45 +186,6 @@ walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
}
EXPORT_SYMBOL_GPL(walk_memory_resource);
-void show_mem(void)
-{
- unsigned long total = 0, reserved = 0;
- unsigned long shared = 0, cached = 0;
- unsigned long highmem = 0;
- struct page *page;
- pg_data_t *pgdat;
- unsigned long i;
-
- printk("Mem-info:\n");
- show_free_areas();
- for_each_online_pgdat(pgdat) {
- unsigned long flags;
- pgdat_resize_lock(pgdat, &flags);
- for (i = 0; i < pgdat->node_spanned_pages; i++) {
- if (!pfn_valid(pgdat->node_start_pfn + i))
- continue;
- page = pgdat_page_nr(pgdat, i);
- total++;
- if (PageHighMem(page))
- highmem++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page) - 1;
- }
- pgdat_resize_unlock(pgdat, &flags);
- }
- printk("%ld pages of RAM\n", total);
-#ifdef CONFIG_HIGHMEM
- printk("%ld pages of HIGHMEM\n", highmem);
-#endif
- printk("%ld reserved pages\n", reserved);
- printk("%ld pages shared\n", shared);
- printk("%ld pages swap cached\n", cached);
-}
-
/*
* Initialize the bootmem system and give it all the memory we
* have available. If we are using highmem, we only put the