summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c27
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c3
-rw-r--r--arch/powerpc/mm/hugetlbpage.c72
-rw-r--r--arch/powerpc/mm/mem.c4
4 files changed, 68 insertions, 38 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7a81e866e7b1..845231643987 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1180,13 +1180,25 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
pgdir = mm->pgd;
if (pgdir == NULL)
return;
+
+ /* Get VSID */
+ ssize = user_segment_size(ea);
+ vsid = get_vsid(mm->context.id, ea, ssize);
+ if (!vsid)
+ return;
+ /*
+ * Hash doesn't like irqs. Walking linux page table with irq disabled
+ * saves us from holding multiple locks.
+ */
+ local_irq_save(flags);
+
/*
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift);
if (!ptep)
- return;
+ goto out_exit;
WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES
@@ -1197,18 +1209,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
* page size demotion here
*/
if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
- return;
+ goto out_exit;
#endif /* CONFIG_PPC_64K_PAGES */
- /* Get VSID */
- ssize = user_segment_size(ea);
- vsid = get_vsid(mm->context.id, ea, ssize);
- if (!vsid)
- return;
-
- /* Hash doesn't like irqs */
- local_irq_save(flags);
-
/* Is that local to this CPU ? */
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
local = 1;
@@ -1230,7 +1233,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
mm->context.user_psize,
mm->context.user_psize,
pte_val(*ptep));
-
+out_exit:
local_irq_restore(flags);
}
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 3c22fa307b9b..34de9e0cdc34 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -37,6 +37,9 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
/* If PMD busy, retry the access */
if (unlikely(old_pmd & _PAGE_BUSY))
return 0;
+ /* If PMD is trans splitting retry the access */
+ if (unlikely(old_pmd & _PAGE_SPLITTING))
+ return 0;
/* If PMD permissions don't match, take page fault */
if (unlikely(access & ~old_pmd))
return 1;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8add58061003..e9e6882231da 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -925,12 +925,16 @@ void flush_dcache_icache_hugepage(struct page *page)
* (2) pointer to next table, as normal; bottom 6 bits == 0
* (3) leaf pte for huge page, bottom two bits != 00
* (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
+ *
+ * So long as we atomically load page table pointers we are safe against teardown,
+ * we can follow the address down to the the page and take a ref on it.
*/
+
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
+ pgd_t pgd, *pgdp;
+ pud_t pud, *pudp;
+ pmd_t pmd, *pmdp;
pte_t *ret_pte;
hugepd_t *hpdp = NULL;
unsigned pdshift = PGDIR_SHIFT;
@@ -938,34 +942,42 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
if (shift)
*shift = 0;
- pg = pgdir + pgd_index(ea);
-
+ pgdp = pgdir + pgd_index(ea);
+ pgd = ACCESS_ONCE(*pgdp);
/*
- * we should first check for none. That takes care of a
- * a parallel hugetlb or THP pagefault moving none entries
- * to respective types.
+ * Always operate on the local stack value. This make sure the
+ * value don't get updated by a parallel THP split/collapse,
+ * page fault or a page unmap. The return pte_t * is still not
+ * stable. So should be checked there for above conditions.
*/
- if (pgd_none(*pg))
+ if (pgd_none(pgd))
return NULL;
- else if (pgd_huge(*pg)) {
- ret_pte = (pte_t *) pg;
+ else if (pgd_huge(pgd)) {
+ ret_pte = (pte_t *) pgdp;
goto out;
- } else if (is_hugepd(pg))
- hpdp = (hugepd_t *)pg;
+ } else if (is_hugepd(&pgd))
+ hpdp = (hugepd_t *)&pgd;
else {
+ /*
+ * Even if we end up with an unmap, the pgtable will not
+ * be freed, because we do an rcu free and here we are
+ * irq disabled
+ */
pdshift = PUD_SHIFT;
- pu = pud_offset(pg, ea);
+ pudp = pud_offset(&pgd, ea);
+ pud = ACCESS_ONCE(*pudp);
- if (pud_none(*pu))
+ if (pud_none(pud))
return NULL;
- else if (pud_huge(*pu)) {
- ret_pte = (pte_t *) pu;
+ else if (pud_huge(pud)) {
+ ret_pte = (pte_t *) pudp;
goto out;
- } else if (is_hugepd(pu))
- hpdp = (hugepd_t *)pu;
+ } else if (is_hugepd(&pud))
+ hpdp = (hugepd_t *)&pud;
else {
pdshift = PMD_SHIFT;
- pm = pmd_offset(pu, ea);
+ pmdp = pmd_offset(&pud, ea);
+ pmd = ACCESS_ONCE(*pmdp);
/*
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
@@ -975,16 +987,16 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
* hpte invalidate
*
*/
- if (pmd_none(*pm) || pmd_trans_splitting(*pm))
+ if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return NULL;
- if (pmd_huge(*pm) || pmd_large(*pm)) {
- ret_pte = (pte_t *) pm;
+ if (pmd_huge(pmd) || pmd_large(pmd)) {
+ ret_pte = (pte_t *) pmdp;
goto out;
- } else if (is_hugepd(pm))
- hpdp = (hugepd_t *)pm;
+ } else if (is_hugepd(&pmd))
+ hpdp = (hugepd_t *)&pmd;
else
- return pte_offset_kernel(pm, ea);
+ return pte_offset_kernel(&pmd, ea);
}
}
if (!hpdp)
@@ -1020,6 +1032,14 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
if ((pte_val(pte) & mask) != mask)
return 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /*
+ * check for splitting here
+ */
+ if (pmd_trans_splitting(pte_pmd(pte)))
+ return 0;
+#endif
+
/* hugepages are never "special" */
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0988a26e0413..ccd49f9503a9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -508,6 +508,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
#ifdef CONFIG_PPC_STD_MMU
+ /*
+ * We don't need to worry about _PAGE_PRESENT here because we are
+ * called with either mm->page_table_lock held or ptl lock held
+ */
unsigned long access = 0, trap;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */