From fb4ddf2085115ed28dedc427d9491707b476bbfe Mon Sep 17 00:00:00 2001 From: "David Hildenbrand (Red Hat)" Date: Mon, 19 Jan 2026 23:07:08 +0100 Subject: mm/memory: handle non-split locks correctly in zap_empty_pte_table() While we handle pte_lockptr() == pmd_lockptr() correctly in zap_pte_table_if_empty(), we don't handle it in zap_empty_pte_table(), making the spin_trylock() always fail and forcing us onto the slow path. So let's handle the scenario where pte_lockptr() == pmd_lockptr() better, which can only happen if CONFIG_SPLIT_PTE_PTLOCKS is not set. This is only relevant once we unlock CONFIG_PT_RECLAIM on architectures that are not x86-64. Link: https://lkml.kernel.org/r/20260119220708.3438514-3-david@kernel.org Signed-off-by: David Hildenbrand (Red Hat) Reviewed-by: Qi Zheng Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Mike Rapoport Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/memory.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index de22710bb217..15a6c09d54ee 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1830,16 +1830,18 @@ static bool pte_table_reclaim_possible(unsigned long start, unsigned long end, return details && details->reclaim_pt && (end - start >= PMD_SIZE); } -static bool zap_empty_pte_table(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval) +static bool zap_empty_pte_table(struct mm_struct *mm, pmd_t *pmd, + spinlock_t *ptl, pmd_t *pmdval) { spinlock_t *pml = pmd_lockptr(mm, pmd); - if (!spin_trylock(pml)) + if (ptl != pml && !spin_trylock(pml)) return false; *pmdval = pmdp_get(pmd); pmd_clear(pmd); - spin_unlock(pml); + if (ptl != pml) + spin_unlock(pml); return true; } @@ -1931,7 +1933,7 @@ retry: * from being repopulated by another thread. */ if (can_reclaim_pt && direct_reclaim && addr == end) - direct_reclaim = zap_empty_pte_table(mm, pmd, &pmdval); + direct_reclaim = zap_empty_pte_table(mm, pmd, ptl, &pmdval); add_mm_rss_vec(mm, rss); lazy_mmu_mode_disable(); -- cgit v1.2.3