summaryrefslogtreecommitdiff
path: root/arch/tile/mm/pgtable.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-13 09:57:10 +0200
committerIngo Molnar <mingo@kernel.org>2012-04-13 09:57:10 +0200
commita385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (patch)
treea2c186cb828e3713c2ec48a4d7191166fb798b3d /arch/tile/mm/pgtable.c
parent659c36fcda403013a01b85da07cf2d9711e6d6c7 (diff)
parent0034102808e0dbbf3a2394b82b1bb40b5778de9e (diff)
Merge tag 'v3.4-rc2' into perf/core
Merge Linux 3.4-rc2: we were on v3.3, update the base. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/tile/mm/pgtable.c')
-rw-r--r--arch/tile/mm/pgtable.c39
1 files changed, 24 insertions, 15 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index de7d8e21e01d..2410aa899b3e 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -27,7 +27,6 @@
#include <linux/vmalloc.h>
#include <linux/smp.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -178,14 +177,10 @@ void shatter_huge_page(unsigned long addr)
if (!pmd_huge_page(*pmd))
return;
- /*
- * Grab the pgd_lock, since we may need it to walk the pgd_list,
- * and since we need some kind of lock here to avoid races.
- */
- spin_lock_irqsave(&pgd_lock, flags);
+ spin_lock_irqsave(&init_mm.page_table_lock, flags);
if (!pmd_huge_page(*pmd)) {
/* Lost the race to convert the huge page. */
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
return;
}
@@ -195,6 +190,7 @@ void shatter_huge_page(unsigned long addr)
#ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */
+ spin_lock(&pgd_lock);
list_for_each(pos, &pgd_list) {
pmd_t *copy_pmd;
pgd = list_to_pgd(pos) + pgd_index(addr);
@@ -202,6 +198,7 @@ void shatter_huge_page(unsigned long addr)
copy_pmd = pmd_offset(pud, addr);
__set_pmd(copy_pmd, *pmd);
}
+ spin_unlock(&pgd_lock);
#endif
/* Tell every cpu to notice the change. */
@@ -209,7 +206,7 @@ void shatter_huge_page(unsigned long addr)
cpu_possible_mask, NULL, 0);
/* Hold the lock until the TLB flush is finished to avoid races. */
- spin_unlock_irqrestore(&pgd_lock, flags);
+ spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
}
/*
@@ -218,9 +215,13 @@ void shatter_huge_page(unsigned long addr)
* against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed.
- * The locking scheme was chosen on the basis of manfred's
- * recommendations and having no core impact whatsoever.
- * -- wli
+ *
+ * The lock is always taken with interrupts disabled, unlike on x86
+ * and other platforms, because we need to take the lock in
+ * shatter_huge_page(), which may be called from an interrupt context.
+ * We are not at risk from the tlbflush IPI deadlock that was seen on
+ * x86, since we use the flush_remote() API to have the hypervisor do
+ * the TLB flushes regardless of irq disabling.
*/
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
@@ -470,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte)
void set_pte(pte_t *ptep, pte_t pte)
{
- struct page *page = pfn_to_page(pte_pfn(pte));
-
- /* Update the home of a PTE if necessary */
- pte = pte_set_home(pte, page_home(page));
+ if (pte_present(pte) &&
+ (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
+ /* The PTE actually references physical memory. */
+ unsigned long pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ /* Update the home of the PTE from the struct page. */
+ pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
+ } else if (hv_pte_get_mode(pte) == 0) {
+ /* remap_pfn_range(), etc, must supply PTE mode. */
+ panic("set_pte(): out-of-range PFN and mode 0\n");
+ }
+ }
__set_pte(ptep, pte);
}