summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-03-24 09:12:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-03-24 09:12:45 -0700
commite3c33bc767b5512dbfec643a02abf58ce608f3b2 (patch)
tree5a3d90a42b887c035dab5e59a9c2f35bc08ce109
parent26a01984ddc1e67025fd150e845ab61d5271d6b7 (diff)
parent84481e705ab07ed46e56587fe846af194acacafe (diff)
Merge tag 'mm-hotfixes-stable-2026-03-23-17-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM fixes from Andrew Morton: "6 hotfixes. 2 are cc:stable. All are for MM. All are singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2026-03-23-17-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/damon/stat: monitor all System RAM resources mm/zswap: add missing kunmap_local() mailmap: update email address for Muhammad Usama Anjum zram: do not slot_free() written-back slots mm/damon/core: avoid use of half-online-committed context mm/rmap: clear vma->anon_vma on error
-rw-r--r--.mailmap1
-rw-r--r--drivers/block/zram/zram_drv.c39
-rw-r--r--include/linux/damon.h6
-rw-r--r--mm/damon/core.c8
-rw-r--r--mm/damon/stat.c53
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/zswap.c8
7 files changed, 93 insertions, 29 deletions
diff --git a/.mailmap b/.mailmap
index 40b4db2b2d60..7d14504daf24 100644
--- a/.mailmap
+++ b/.mailmap
@@ -587,6 +587,7 @@ Morten Welinder <terra@gnome.org>
Morten Welinder <welinder@anemone.rentec.com>
Morten Welinder <welinder@darter.rentec.com>
Morten Welinder <welinder@troll.com>
+Muhammad Usama Anjum <usama.anjum@arm.com> <usama.anjum@collabora.com>
Mukesh Ojha <quic_mojha@quicinc.com> <mojha@codeaurora.org>
Muna Sinada <quic_msinada@quicinc.com> <msinada@codeaurora.org>
Murali Nalajala <quic_mnalajal@quicinc.com> <mnalajal@codeaurora.org>
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a324ede6206d..af679375b193 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -917,9 +917,8 @@ static void zram_account_writeback_submit(struct zram *zram)
static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
{
- u32 size, index = req->pps->index;
- int err, prio;
- bool huge;
+ u32 index = req->pps->index;
+ int err;
err = blk_status_to_errno(req->bio.bi_status);
if (err) {
@@ -946,28 +945,13 @@ static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
goto out;
}
- if (zram->compressed_wb) {
- /*
- * ZRAM_WB slots get freed, we need to preserve data required
- * for read decompression.
- */
- size = get_slot_size(zram, index);
- prio = get_slot_comp_priority(zram, index);
- huge = test_slot_flag(zram, index, ZRAM_HUGE);
- }
-
- slot_free(zram, index);
- set_slot_flag(zram, index, ZRAM_WB);
+ clear_slot_flag(zram, index, ZRAM_IDLE);
+ if (test_slot_flag(zram, index, ZRAM_HUGE))
+ atomic64_dec(&zram->stats.huge_pages);
+ atomic64_sub(get_slot_size(zram, index), &zram->stats.compr_data_size);
+ zs_free(zram->mem_pool, get_slot_handle(zram, index));
set_slot_handle(zram, index, req->blk_idx);
-
- if (zram->compressed_wb) {
- if (huge)
- set_slot_flag(zram, index, ZRAM_HUGE);
- set_slot_size(zram, index, size);
- set_slot_comp_priority(zram, index, prio);
- }
-
- atomic64_inc(&zram->stats.pages_stored);
+ set_slot_flag(zram, index, ZRAM_WB);
out:
slot_unlock(zram, index);
@@ -2010,8 +1994,13 @@ static void slot_free(struct zram *zram, u32 index)
set_slot_comp_priority(zram, index, 0);
if (test_slot_flag(zram, index, ZRAM_HUGE)) {
+ /*
+ * Writeback completion decrements ->huge_pages but keeps
+ * ZRAM_HUGE flag for deferred decompression path.
+ */
+ if (!test_slot_flag(zram, index, ZRAM_WB))
+ atomic64_dec(&zram->stats.huge_pages);
clear_slot_flag(zram, index, ZRAM_HUGE);
- atomic64_dec(&zram->stats.huge_pages);
}
if (test_slot_flag(zram, index, ZRAM_WB)) {
diff --git a/include/linux/damon.h b/include/linux/damon.h
index a4fea23da857..be3d198043ff 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -810,6 +810,12 @@ struct damon_ctx {
struct damos_walk_control *walk_control;
struct mutex walk_control_lock;
+ /*
+ * indicate if this may be corrupted. Currentonly this is set only for
+ * damon_commit_ctx() failure.
+ */
+ bool maybe_corrupted;
+
/* Working thread of the given DAMON context */
struct task_struct *kdamond;
/* Protects @kdamond field access */
diff --git a/mm/damon/core.c b/mm/damon/core.c
index c1d1091d307e..3e1890d64d06 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1252,6 +1252,7 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
{
int err;
+ dst->maybe_corrupted = true;
if (!is_power_of_2(src->min_region_sz))
return -EINVAL;
@@ -1277,6 +1278,7 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
dst->addr_unit = src->addr_unit;
dst->min_region_sz = src->min_region_sz;
+ dst->maybe_corrupted = false;
return 0;
}
@@ -2678,6 +2680,8 @@ static void kdamond_call(struct damon_ctx *ctx, bool cancel)
complete(&control->completion);
else if (control->canceled && control->dealloc_on_cancel)
kfree(control);
+ if (!cancel && ctx->maybe_corrupted)
+ break;
}
mutex_lock(&ctx->call_controls_lock);
@@ -2707,6 +2711,8 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
kdamond_usleep(min_wait_time);
kdamond_call(ctx, false);
+ if (ctx->maybe_corrupted)
+ return -EINVAL;
damos_walk_cancel(ctx);
}
return -EBUSY;
@@ -2790,6 +2796,8 @@ static int kdamond_fn(void *data)
* kdamond_merge_regions() if possible, to reduce overhead
*/
kdamond_call(ctx, false);
+ if (ctx->maybe_corrupted)
+ break;
if (!list_empty(&ctx->schemes))
kdamond_apply_schemes(ctx);
else
diff --git a/mm/damon/stat.c b/mm/damon/stat.c
index 25fb44ccf99d..cf2c5a541eee 100644
--- a/mm/damon/stat.c
+++ b/mm/damon/stat.c
@@ -145,12 +145,59 @@ static int damon_stat_damon_call_fn(void *data)
return 0;
}
+struct damon_stat_system_ram_range_walk_arg {
+ bool walked;
+ struct resource res;
+};
+
+static int damon_stat_system_ram_walk_fn(struct resource *res, void *arg)
+{
+ struct damon_stat_system_ram_range_walk_arg *a = arg;
+
+ if (!a->walked) {
+ a->walked = true;
+ a->res.start = res->start;
+ }
+ a->res.end = res->end;
+ return 0;
+}
+
+static unsigned long damon_stat_res_to_core_addr(resource_size_t ra,
+ unsigned long addr_unit)
+{
+ /*
+ * Use div_u64() for avoiding linking errors related with __udivdi3,
+ * __aeabi_uldivmod, or similar problems. This should also improve the
+ * performance optimization (read div_u64() comment for the detail).
+ */
+ if (sizeof(ra) == 8 && sizeof(addr_unit) == 4)
+ return div_u64(ra, addr_unit);
+ return ra / addr_unit;
+}
+
+static int damon_stat_set_monitoring_region(struct damon_target *t,
+ unsigned long addr_unit, unsigned long min_region_sz)
+{
+ struct damon_addr_range addr_range;
+ struct damon_stat_system_ram_range_walk_arg arg = {};
+
+ walk_system_ram_res(0, -1, &arg, damon_stat_system_ram_walk_fn);
+ if (!arg.walked)
+ return -EINVAL;
+ addr_range.start = damon_stat_res_to_core_addr(
+ arg.res.start, addr_unit);
+ addr_range.end = damon_stat_res_to_core_addr(
+ arg.res.end + 1, addr_unit);
+ if (addr_range.end <= addr_range.start)
+ return -EINVAL;
+ return damon_set_regions(t, &addr_range, 1, min_region_sz);
+}
+
static struct damon_ctx *damon_stat_build_ctx(void)
{
struct damon_ctx *ctx;
struct damon_attrs attrs;
struct damon_target *target;
- unsigned long start = 0, end = 0;
ctx = damon_new_ctx();
if (!ctx)
@@ -180,8 +227,8 @@ static struct damon_ctx *damon_stat_build_ctx(void)
if (!target)
goto free_out;
damon_add_target(ctx, target);
- if (damon_set_region_biggest_system_ram_default(target, &start, &end,
- ctx->min_region_sz))
+ if (damon_stat_set_monitoring_region(target, ctx->addr_unit,
+ ctx->min_region_sz))
goto free_out;
return ctx;
free_out:
diff --git a/mm/rmap.c b/mm/rmap.c
index 391337282e3f..8f08090d7eb9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -457,6 +457,13 @@ static void cleanup_partial_anon_vmas(struct vm_area_struct *vma)
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
+
+ /*
+ * The anon_vma assigned to this VMA is no longer valid, as we were not
+ * able to correctly clone AVC state. Avoid inconsistent anon_vma tree
+ * state by resetting.
+ */
+ vma->anon_vma = NULL;
}
/**
diff --git a/mm/zswap.c b/mm/zswap.c
index e6ec3295bdb0..16b2ef7223e1 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -942,9 +942,15 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
/* zswap entries of length PAGE_SIZE are not compressed. */
if (entry->length == PAGE_SIZE) {
+ void *dst;
+
WARN_ON_ONCE(input->length != PAGE_SIZE);
- memcpy_from_sglist(kmap_local_folio(folio, 0), input, 0, PAGE_SIZE);
+
+ dst = kmap_local_folio(folio, 0);
+ memcpy_from_sglist(dst, input, 0, PAGE_SIZE);
dlen = PAGE_SIZE;
+ kunmap_local(dst);
+ flush_dcache_folio(folio);
} else {
sg_init_table(&output, 1);
sg_set_folio(&output, folio, PAGE_SIZE, 0);