summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_low_32.S18
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/mm/mmap.c1
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
6 files changed, 27 insertions, 11 deletions
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index ddceefc06ecc..7f830a4888d6 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -30,7 +30,11 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_SMP
- .comm mmu_hash_lock,4
+ .section .bss
+ .align 2
+ .globl mmu_hash_lock
+mmu_hash_lock:
+ .space 4
#endif /* CONFIG_SMP */
/*
@@ -455,9 +459,15 @@ found_slot:
sync /* make sure pte updates get to memory */
blr
- .comm next_slot,4
- .comm primary_pteg_full,4
- .comm htab_hash_searches,4
+ .section .bss
+ .align 2
+next_slot:
+ .space 4
+primary_pteg_full:
+ .space 4
+htab_hash_searches:
+ .space 4
+ .previous
/*
* Flush the entry for a particular page from the hash table.
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 028ba4ed03d2..4f2f4534a9d8 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -430,7 +430,7 @@ static void __init htab_finish_init(void)
make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC_HAS_HASH_64K */
make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
@@ -837,7 +837,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
else
-#endif /* CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC_HAS_HASH_64K */
__hash_page_4K(ea, access, vsid, ptep, trap, local);
local_irq_restore(flags);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 246eeea40ece..0266a94d83b6 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -310,11 +310,12 @@ void __init paging_init(void)
#ifdef CONFIG_HIGHMEM
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
- pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
- (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
+ pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
+ (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
- kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
- (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
+ kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
+ (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
+ KMAP_FIX_BEGIN);
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 972a8e884b9a..86010fc7d3b1 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -24,6 +24,7 @@
#include <linux/personality.h>
#include <linux/mm.h>
+#include <linux/sched.h>
/*
* Top of mmap area (just below the process stack).
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index d8232b7a08f7..f6ae1a57d652 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -93,7 +93,7 @@ void pgd_free(pgd_t *pgd)
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 1d443407423c..ad6e135bf212 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -322,6 +322,8 @@ EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__iounmap);
+static DEFINE_SPINLOCK(phb_io_lock);
+
void __iomem * reserve_phb_iospace(unsigned long size)
{
void __iomem *virt_addr;
@@ -329,8 +331,10 @@ void __iomem * reserve_phb_iospace(unsigned long size)
if (phbs_io_bot >= IMALLOC_BASE)
panic("reserve_phb_iospace(): phb io space overflow\n");
+ spin_lock(&phb_io_lock);
virt_addr = (void __iomem *) phbs_io_bot;
phbs_io_bot += size;
+ spin_unlock(&phb_io_lock);
return virt_addr;
}