diff options
| author | Wei Yang <richard.weiyang@gmail.com> | 2025-10-08 09:54:53 +0000 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-11-24 15:08:49 -0800 |
| commit | ac7756771a34f19c9a757eb86efe028e51f57b23 (patch) | |
| tree | 371e951ccc70273a5c0ed42bf64af8749264ded6 | |
| parent | 2a1351cd4176ee1809b0900d386919d03b7652f8 (diff) | |
mm/khugepaged: unify pmd folio installation with map_anon_folio_pmd()
Currently we install pmd folio with map_anon_folio_pmd() in
__do_huge_pmd_anonymous_page() and do_huge_zero_wp_pmd(). While in
collapse_huge_page(), it is done with identical code except statistics
adjustment.
Unify the process with map_anon_folio_pmd() to install pmd folio. Split
it to map_anon_folio_pmd_pf() and map_anon_folio_pmd_nopf() to be used in
page fault or not respectively.
No functional change is intended.
[akpm@linux-foundation.org: remove unneeded map_anon_folio_pmd_nopf() stub, per Wei & David]
Link: https://lkml.kernel.org/r/20251008095453.18772-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Lance Yang <lance.yang@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | include/linux/huge_mm.h | 2 | ||||
| -rw-r--r-- | mm/huge_memory.c | 14 | ||||
| -rw-r--r-- | mm/khugepaged.c | 9 |
3 files changed, 13 insertions, 12 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ed99e6bd31ac..396d9e3d1d46 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -533,6 +533,8 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, bool freeze); bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, struct folio *folio); +void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd, + struct vm_area_struct *vma, unsigned long haddr); #else /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a2a2fda2bff8..05bf419513ad 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1218,7 +1218,7 @@ static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma, return folio; } -static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, +void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd, struct vm_area_struct *vma, unsigned long haddr) { pmd_t entry; @@ -1229,11 +1229,17 @@ static void map_anon_folio_pmd(struct folio *folio, pmd_t *pmd, folio_add_lru_vma(folio, vma); set_pmd_at(vma->vm_mm, haddr, pmd, entry); update_mmu_cache_pmd(vma, haddr, pmd); + deferred_split_folio(folio, false); +} + +static void map_anon_folio_pmd_pf(struct folio *folio, pmd_t *pmd, + struct vm_area_struct *vma, unsigned long haddr) +{ + map_anon_folio_pmd_nopf(folio, pmd, vma, haddr); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); count_vm_event(THP_FAULT_ALLOC); count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); - deferred_split_folio(folio, false); } static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) @@ -1272,7 +1278,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf) return ret; } pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); - map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); + map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr); mm_inc_nr_ptes(vma->vm_mm); spin_unlock(vmf->ptl); } @@ -1944,7 +1950,7 @@ static vm_fault_t do_huge_zero_wp_pmd(struct vm_fault *vmf) if (ret) goto release; (void)pmdp_huge_clear_flush(vma, haddr, vmf->pmd); - map_anon_folio_pmd(folio, vmf->pmd, vma, haddr); + map_anon_folio_pmd_pf(folio, vmf->pmd, vma, haddr); goto unlock; release: folio_put(folio); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 1b5c2e942df9..af1c162c9a94 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1226,17 +1226,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, __folio_mark_uptodate(folio); pgtable = pmd_pgtable(_pmd); - _pmd = folio_mk_pmd(folio, vma->vm_page_prot); - _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); - spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); - folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); - folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); - set_pmd_at(mm, address, pmd, _pmd); - update_mmu_cache_pmd(vma, address, pmd); - deferred_split_folio(folio, false); + map_anon_folio_pmd_nopf(folio, pmd, vma, address); spin_unlock(pmd_ptl); folio = NULL; |
