summaryrefslogtreecommitdiff
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@gmail.com>2025-10-10 14:11:42 +0000
committerAndrew Morton <akpm@linux-foundation.org>2025-11-24 15:08:49 -0800
commitd87f4a8f19668cdc5b8afd0d751e9d9c6a1b7595 (patch)
tree50ce9e273eb320ac240556ac975e6c885eb80721 /mm/huge_memory.c
parentac7756771a34f19c9a757eb86efe028e51f57b23 (diff)
mm/huge_memory: only get folio_order() once during __folio_split()
Before splitting folio, its order keeps the same. It is only necessary to get folio_order() once. Also rename order to old_order to represent the original folio order. Link: https://lkml.kernel.org/r/20251010141142.1349-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Acked-by: Lance Yang <lance.yang@linux.dev> Acked-by: David Hildenbrand <david@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Barry Song <baohua@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 05bf419513ad..30d6afc79016 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3682,7 +3682,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
bool is_anon = folio_test_anon(folio);
struct address_space *mapping = NULL;
struct anon_vma *anon_vma = NULL;
- int order = folio_order(folio);
+ int old_order = folio_order(folio);
struct folio *new_folio, *next;
int nr_shmem_dropped = 0;
int remap_flags = 0;
@@ -3706,7 +3706,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (!is_anon && !folio->mapping)
return -EBUSY;
- if (new_order >= folio_order(folio))
+ if (new_order >= old_order)
return -EINVAL;
if (uniform_split && !uniform_split_supported(folio, new_order, true))
@@ -3764,7 +3764,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
if (uniform_split) {
xas_set_order(&xas, folio->index, new_order);
- xas_split_alloc(&xas, folio, folio_order(folio), gfp);
+ xas_split_alloc(&xas, folio, old_order, gfp);
if (xas_error(&xas)) {
ret = xas_error(&xas);
goto out;
@@ -3820,13 +3820,13 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
struct lruvec *lruvec;
int expected_refs;
- if (folio_order(folio) > 1 &&
+ if (old_order > 1 &&
!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--;
if (folio_test_partially_mapped(folio)) {
folio_clear_partially_mapped(folio);
- mod_mthp_stat(folio_order(folio),
- MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
+ mod_mthp_stat(old_order,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
}
/*
* Reinitialize page_deferred_list after removing the
@@ -3954,7 +3954,7 @@ fail:
if (!ret && is_anon && !folio_is_device_private(folio))
remap_flags = RMP_USE_SHARED_ZEROPAGE;
- remap_page(folio, 1 << order, remap_flags);
+ remap_page(folio, 1 << old_order, remap_flags);
/*
* Unlock all after-split folios except the one containing
@@ -3985,9 +3985,9 @@ out_unlock:
i_mmap_unlock_read(mapping);
out:
xas_destroy(&xas);
- if (order == HPAGE_PMD_ORDER)
+ if (old_order == HPAGE_PMD_ORDER)
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
- count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
+ count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
return ret;
}