diff options
author | Stefan Agner <stefan.agner@toradex.com> | 2019-05-23 11:11:00 +0200 |
---|---|---|
committer | Stefan Agner <stefan.agner@toradex.com> | 2019-05-23 11:11:00 +0200 |
commit | b794ea49ba3816c0d5cf05506964a8e69ce4efa3 (patch) | |
tree | b0e884d810b1c68b5615995a974cb2707ae02224 /fs/dax.c | |
parent | 2136d9515afb95a10fe9c2a2da1b54d31caa2e42 (diff) | |
parent | d59f5a01fa438635ae098b2e170a18644df73c06 (diff) |
Merge tag 'v5.0.17' into toradex_5.0.y
This is the 5.0.17 stable release
Diffstat (limited to 'fs/dax.c')
-rw-r--r-- | fs/dax.c | 15 |
1 files changed, 15 insertions, 0 deletions
@@ -33,6 +33,7 @@ #include <linux/sizes.h> #include <linux/mmu_notifier.h> #include <linux/iomap.h> +#include <asm/pgalloc.h> #include "internal.h" #define CREATE_TRACE_POINTS @@ -1409,7 +1410,9 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, { struct address_space *mapping = vmf->vma->vm_file->f_mapping; unsigned long pmd_addr = vmf->address & PMD_MASK; + struct vm_area_struct *vma = vmf->vma; struct inode *inode = mapping->host; + pgtable_t pgtable = NULL; struct page *zero_page; spinlock_t *ptl; pmd_t pmd_entry; @@ -1424,12 +1427,22 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, DAX_PMD | DAX_ZERO_PAGE, false); + if (arch_needs_pgtable_deposit()) { + pgtable = pte_alloc_one(vma->vm_mm); + if (!pgtable) + return VM_FAULT_OOM; + } + ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); if (!pmd_none(*(vmf->pmd))) { spin_unlock(ptl); goto fallback; } + if (pgtable) { + pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); + mm_inc_nr_ptes(vma->vm_mm); + } pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); pmd_entry = pmd_mkhuge(pmd_entry); set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); @@ -1438,6 +1451,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, return VM_FAULT_NOPAGE; fallback: + if (pgtable) + pte_free(vma->vm_mm, pgtable); trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); return VM_FAULT_FALLBACK; } |