diff options
| author | Andrew Morton <akpm@linux-foundation.org> | 2025-12-15 11:05:56 -0800 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2026-01-20 19:24:36 -0800 |
| commit | 7adc97bc93946e55fc6af30a03d296fb833a28df (patch) | |
| tree | 725b4a6fca649b463ace35cc820c6b8b6e42d035 | |
| parent | bf3480d7d0bce40d8687559fd6ff40c233a7052f (diff) | |
mm/vmscan.c:shrink_folio_list(): save a tabstop
We have some needlessly deep indentation in this huge function due to
if (expr1) {
if (expr2) {
...
}
}
Convert this to
if (expr1 && expr2) {
...
}
Also, reflow that big block comment to fit in 80 cols.
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Wei Xu <weixugc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
| -rw-r--r-- | mm/vmscan.c | 98 |
1 files changed, 49 insertions, 49 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 6cf5ee94be7a..67234613fbff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1276,58 +1276,58 @@ retry: * Try to allocate it some swap space here. * Lazyfree folio could be freed directly */ - if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { - if (!folio_test_swapcache(folio)) { - if (!(sc->gfp_mask & __GFP_IO)) - goto keep_locked; - if (folio_maybe_dma_pinned(folio)) - goto keep_locked; - if (folio_test_large(folio)) { - /* cannot split folio, skip it */ - if (folio_expected_ref_count(folio) != - folio_ref_count(folio) - 1) - goto activate_locked; - /* - * Split partially mapped folios right away. - * We can free the unmapped pages without IO. - */ - if (data_race(!list_empty(&folio->_deferred_list) && - folio_test_partially_mapped(folio)) && - split_folio_to_list(folio, folio_list)) - goto activate_locked; - } - if (folio_alloc_swap(folio)) { - int __maybe_unused order = folio_order(folio); - - if (!folio_test_large(folio)) - goto activate_locked_split; - /* Fallback to swap normal pages */ - if (split_folio_to_list(folio, folio_list)) - goto activate_locked; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (nr_pages >= HPAGE_PMD_NR) { - count_memcg_folio_events(folio, - THP_SWPOUT_FALLBACK, 1); - count_vm_event(THP_SWPOUT_FALLBACK); - } -#endif - count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); - if (folio_alloc_swap(folio)) - goto activate_locked_split; - } + if (folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_test_swapcache(folio)) { + if (!(sc->gfp_mask & __GFP_IO)) + goto keep_locked; + if (folio_maybe_dma_pinned(folio)) + goto keep_locked; + if (folio_test_large(folio)) { + /* cannot split folio, skip it */ + if (folio_expected_ref_count(folio) != + folio_ref_count(folio) - 1) + goto activate_locked; /* - * Normally the folio will be dirtied in unmap because its - * pte should be dirty. A special case is MADV_FREE page. The - * page's pte could have dirty bit cleared but the folio's - * SwapBacked flag is still set because clearing the dirty bit - * and SwapBacked flag has no lock protected. For such folio, - * unmap will not set dirty bit for it, so folio reclaim will - * not write the folio out. This can cause data corruption when - * the folio is swapped in later. Always setting the dirty flag - * for the folio solves the problem. + * Split partially mapped folios right away. + * We can free the unmapped pages without IO. */ - folio_mark_dirty(folio); + if (data_race(!list_empty(&folio->_deferred_list) && + folio_test_partially_mapped(folio)) && + split_folio_to_list(folio, folio_list)) + goto activate_locked; + } + if (folio_alloc_swap(folio)) { + int __maybe_unused order = folio_order(folio); + + if (!folio_test_large(folio)) + goto activate_locked_split; + /* Fallback to swap normal pages */ + if (split_folio_to_list(folio, folio_list)) + goto activate_locked; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (nr_pages >= HPAGE_PMD_NR) { + count_memcg_folio_events(folio, + THP_SWPOUT_FALLBACK, 1); + count_vm_event(THP_SWPOUT_FALLBACK); + } +#endif + count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); + if (folio_alloc_swap(folio)) + goto activate_locked_split; } + /* + * Normally the folio will be dirtied in unmap because + * its pte should be dirty. A special case is MADV_FREE + * page. The page's pte could have dirty bit cleared but + * the folio's SwapBacked flag is still set because + * clearing the dirty bit and SwapBacked flag has no + * lock protected. For such folio, unmap will not set + * dirty bit for it, so folio reclaim will not write the + * folio out. This can cause data corruption when the + * folio is swapped in later. Always setting the dirty + * flag for the folio solves the problem. + */ + folio_mark_dirty(folio); } /* |
