summaryrefslogtreecommitdiff
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-16 13:23:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-16 13:23:03 -0800
commiteb64c3c6cdb8fa8a4d324eb71a9033b62e150918 (patch)
tree035267874e94585ea25a5ad50f433c8470a5ec27 /arch/x86/mm/pageattr.c
parent61de8e53640ceeda564a65170a46c1edc2b37e11 (diff)
parentf1d04b23b2015b4c3c0a8419677179b133afea08 (diff)
Merge tag 'stable/for-linus-3.19-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull additional xen update from David Vrabel: "Xen: additional features for 3.19-rc0 - Linear p2m for x86 PV guests which simplifies the p2m code, improves performance and will allow for > 512 GB PV guests in the future. A last-minute, configuration specific issue was discovered with this change which is why it was not included in my previous pull request. This is now been fixed and tested" * tag 'stable/for-linus-3.19-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen: switch to post-init routines in xen mmu.c earlier Revert "swiotlb-xen: pass dev_addr to swiotlb_tbl_unmap_single" xen: annotate xen_set_identity_and_remap_chunk() with __init xen: introduce helper functions to do safe read and write accesses xen: Speed up set_phys_to_machine() by using read-only mappings xen: switch to linear virtual mapped sparse p2m list xen: Hide get_phys_to_machine() to be able to tune common path x86: Introduce function to get pmd entry pointer xen: Delay invalidating extra memory xen: Delay m2p_override initialization xen: Delay remapping memory of pv-domain xen: use common page allocation function in p2m.c xen: Make functions static xen: fix some style issues in p2m.c
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index dfaf2e0f5f8f..536ea2fb6e33 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -384,6 +384,26 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
}
/*
+ * Lookup the PMD entry for a virtual address. Return a pointer to the entry
+ * or NULL if not present.
+ */
+pmd_t *lookup_pmd_address(unsigned long address)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ pgd = pgd_offset_k(address);
+ if (pgd_none(*pgd))
+ return NULL;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
+ return NULL;
+
+ return pmd_offset(pud, address);
+}
+
+/*
* This is necessary because __pa() does not work on some
* kinds of memory, like vmalloc() or the alloc_remap()
* areas on 32-bit NUMA systems. The percpu areas can