diff options
| author | Kairui Song <kasong@tencent.com> | 2025-01-14 01:57:27 +0800 |
|---|---|---|
| committer | Andrew Morton <akpm@linux-foundation.org> | 2025-01-25 20:22:36 -0800 |
| commit | 3494d184706ff5e7d28481de0c841b039caa38b1 (patch) | |
| tree | 846dfa2399c262ed047e4267823a6e92443096de /mm/swapfile.c | |
| parent | 9a0ddeb7988095a5c21994c37005a45b240039ef (diff) | |
mm, swap: use an enum to define all cluster flags and wrap flags changes
Currently, we are only using flags to indicate which list the cluster is
on. Using one bit for each list type might be a waste, as the list type
grows, we will consume too many bits. Additionally, the current mixed
usage of '&' and '==' is a bit confusing.
Make it clean by using an enum to define all possible cluster statuses.
Only an off-list cluster will have the NONE (0) flag. And use a wrapper
to annotate and sanitize all flag settings and list movements.
Link: https://lkml.kernel.org/r/20250113175732.48099-9-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Suggested-by: Chris Li <chrisl@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Hugh Dickens <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kalesh Singh <kaleshsingh@google.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
| -rw-r--r-- | mm/swapfile.c | 76 |
1 files changed, 40 insertions, 36 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c index 3898576f947a..b754c9e16c3b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -403,7 +403,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, static inline bool cluster_is_free(struct swap_cluster_info *info) { - return info->flags & CLUSTER_FLAG_FREE; + return info->flags == CLUSTER_FLAG_FREE; } static inline unsigned int cluster_index(struct swap_info_struct *si, @@ -434,6 +434,28 @@ static inline void unlock_cluster(struct swap_cluster_info *ci) spin_unlock(&ci->lock); } +static void move_cluster(struct swap_info_struct *si, + struct swap_cluster_info *ci, struct list_head *list, + enum swap_cluster_flags new_flags) +{ + VM_WARN_ON(ci->flags == new_flags); + + BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX); + + if (ci->flags == CLUSTER_FLAG_NONE) { + list_add_tail(&ci->list, list); + } else { + if (ci->flags == CLUSTER_FLAG_FRAG) { + VM_WARN_ON(!si->frag_cluster_nr[ci->order]); + si->frag_cluster_nr[ci->order]--; + } + list_move_tail(&ci->list, list); + } + ci->flags = new_flags; + if (new_flags == CLUSTER_FLAG_FRAG) + si->frag_cluster_nr[ci->order]++; +} + /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, struct swap_cluster_info *ci) @@ -447,10 +469,8 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, */ memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); - - VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - list_move_tail(&ci->list, &si->discard_clusters); - ci->flags = 0; + VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); + move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); schedule_work(&si->discard_work); } @@ -458,12 +478,7 @@ static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info { lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); - - if (ci->flags) - list_move_tail(&ci->list, &si->free_clusters); - else - list_add_tail(&ci->list, &si->free_clusters); - ci->flags = CLUSTER_FLAG_FREE; + move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); ci->order = 0; } @@ -479,6 +494,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si) while (!list_empty(&si->discard_clusters)) { ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); list_del(&ci->list); + /* Must clear flag when taking a cluster off-list */ + ci->flags = CLUSTER_FLAG_NONE; idx = cluster_index(si, ci); spin_unlock(&si->lock); @@ -519,9 +536,6 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info * lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); - if (ci->flags & CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]--; - /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed @@ -573,13 +587,9 @@ static void dec_cluster_info_page(struct swap_info_struct *si, return; } - if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { - VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - if (ci->flags & CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]--; - list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]); - ci->flags = CLUSTER_FLAG_NONFULL; - } + if (ci->flags != CLUSTER_FLAG_NONFULL) + move_cluster(si, ci, &si->nonfull_clusters[ci->order], + CLUSTER_FLAG_NONFULL); } static bool cluster_reclaim_range(struct swap_info_struct *si, @@ -663,11 +673,13 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster if (!(si->flags & SWP_WRITEOK)) return false; + VM_BUG_ON(ci->flags == CLUSTER_FLAG_NONE); + VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE); + if (cluster_is_free(ci)) { - if (nr_pages < SWAPFILE_CLUSTER) { - list_move_tail(&ci->list, &si->nonfull_clusters[order]); - ci->flags = CLUSTER_FLAG_NONFULL; - } + if (nr_pages < SWAPFILE_CLUSTER) + move_cluster(si, ci, &si->nonfull_clusters[order], + CLUSTER_FLAG_NONFULL); ci->order = order; } @@ -675,14 +687,8 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster swap_range_alloc(si, nr_pages); ci->count += nr_pages; - if (ci->count == SWAPFILE_CLUSTER) { - VM_BUG_ON(!(ci->flags & - (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG))); - if (ci->flags & CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]--; - list_move_tail(&ci->list, &si->full_clusters); - ci->flags = CLUSTER_FLAG_FULL; - } + if (ci->count == SWAPFILE_CLUSTER) + move_cluster(si, ci, &si->full_clusters, CLUSTER_FLAG_FULL); return true; } @@ -821,9 +827,7 @@ new_cluster: while (!list_empty(&si->nonfull_clusters[order])) { ci = list_first_entry(&si->nonfull_clusters[order], struct swap_cluster_info, list); - list_move_tail(&ci->list, &si->frag_clusters[order]); - ci->flags = CLUSTER_FLAG_FRAG; - si->frag_cluster_nr[order]++; + move_cluster(si, ci, &si->frag_clusters[order], CLUSTER_FLAG_FRAG); offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); frags++; |
