summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-07 10:24:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-07 10:24:44 -0700
commit66d64899eae85dc9b96c5433933787cdcd9b21e4 (patch)
treecd6bef5fee53f8b925f815b0b4852d6cf4baf00c
parentbfe62a454542cfad3379f6ef5680b125f41e20f4 (diff)
parent7bc5da4842bed3252d26e742213741a4d0ac1b14 (diff)
Merge tag 'mm-hotfixes-stable-2026-04-06-15-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc fixes from Andrew Morton: "Eight hotfixes. All are cc:stable and seven are for MM. All are singletons - please see the changelogs for details" * tag 'mm-hotfixes-stable-2026-04-06-15-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: ocfs2: fix out-of-bounds write in ocfs2_write_end_inline mm/damon/stat: deallocate damon_call() failure leaking damon_ctx mm/vma: fix memory leak in __mmap_region() mm/memory_hotplug: maintain N_NORMAL_MEMORY during hotplug mm/damon/sysfs: dealloc repeat_call_control if damon_call() fails mm: reinstate unconditional writeback start in balance_dirty_pages() liveupdate: propagate file deserialization failures mm: filemap: fix nr_pages calculation overflow in filemap_map_pages()
-rw-r--r--fs/ocfs2/inode.c10
-rw-r--r--kernel/liveupdate/luo_session.c9
-rw-r--r--mm/damon/stat.c7
-rw-r--r--mm/damon/sysfs.c3
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/memory_hotplug.c20
-rw-r--r--mm/page-writeback.c21
-rw-r--r--mm/vma.c7
8 files changed, 82 insertions, 6 deletions
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 03a51662ea8e..a2ccd8011706 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1505,6 +1505,16 @@ int ocfs2_validate_inode_block(struct super_block *sb,
goto bail;
}
+ if (le16_to_cpu(data->id_count) >
+ ocfs2_max_inline_data_with_xattr(sb, di)) {
+ rc = ocfs2_error(sb,
+ "Invalid dinode #%llu: inline data id_count %u exceeds max %d\n",
+ (unsigned long long)bh->b_blocknr,
+ le16_to_cpu(data->id_count),
+ ocfs2_max_inline_data_with_xattr(sb, di));
+ goto bail;
+ }
+
if (le64_to_cpu(di->i_size) > le16_to_cpu(data->id_count)) {
rc = ocfs2_error(sb,
"Invalid dinode #%llu: inline data i_size %llu exceeds id_count %u\n",
diff --git a/kernel/liveupdate/luo_session.c b/kernel/liveupdate/luo_session.c
index 783677295640..25ae704d7787 100644
--- a/kernel/liveupdate/luo_session.c
+++ b/kernel/liveupdate/luo_session.c
@@ -558,8 +558,13 @@ int luo_session_deserialize(void)
}
scoped_guard(mutex, &session->mutex) {
- luo_file_deserialize(&session->file_set,
- &sh->ser[i].file_set_ser);
+ err = luo_file_deserialize(&session->file_set,
+ &sh->ser[i].file_set_ser);
+ }
+ if (err) {
+ pr_warn("Failed to deserialize files for session [%s] %pe\n",
+ session->name, ERR_PTR(err));
+ return err;
}
}
diff --git a/mm/damon/stat.c b/mm/damon/stat.c
index cf2c5a541eee..60351a719460 100644
--- a/mm/damon/stat.c
+++ b/mm/damon/stat.c
@@ -245,6 +245,12 @@ static int damon_stat_start(void)
{
int err;
+ if (damon_stat_context) {
+ if (damon_is_running(damon_stat_context))
+ return -EAGAIN;
+ damon_destroy_ctx(damon_stat_context);
+ }
+
damon_stat_context = damon_stat_build_ctx();
if (!damon_stat_context)
return -ENOMEM;
@@ -261,6 +267,7 @@ static void damon_stat_stop(void)
{
damon_stop(&damon_stat_context, 1);
damon_destroy_ctx(damon_stat_context);
+ damon_stat_context = NULL;
}
static int damon_stat_enabled_store(
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 6a44a2f3d8fc..eefa959aa30a 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1670,7 +1670,8 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
repeat_call_control->data = kdamond;
repeat_call_control->repeat = true;
repeat_call_control->dealloc_on_cancel = true;
- damon_call(ctx, repeat_call_control);
+ if (damon_call(ctx, repeat_call_control))
+ kfree(repeat_call_control);
return err;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 406cef06b684..3c1e785542dd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3883,14 +3883,19 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned int nr_pages = 0, folio_type;
unsigned short mmap_miss = 0, mmap_miss_saved;
+ /*
+ * Recalculate end_pgoff based on file_end before calling
+ * next_uptodate_folio() to avoid races with concurrent
+ * truncation.
+ */
+ file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
+ end_pgoff = min(end_pgoff, file_end);
+
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
if (!folio)
goto out;
- file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
- end_pgoff = min(end_pgoff, file_end);
-
/*
* Do not allow to map with PMD across i_size to preserve
* SIGBUS semantics.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index bc805029da51..05a47953ef21 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1209,6 +1209,13 @@ int online_pages(unsigned long pfn, unsigned long nr_pages,
if (node_arg.nid >= 0)
node_set_state(nid, N_MEMORY);
+ /*
+ * Check whether we are adding normal memory to the node for the first
+ * time.
+ */
+ if (!node_state(nid, N_NORMAL_MEMORY) && zone_idx(zone) <= ZONE_NORMAL)
+ node_set_state(nid, N_NORMAL_MEMORY);
+
if (need_zonelists_rebuild)
build_all_zonelists(NULL);
@@ -1908,6 +1915,8 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
unsigned long flags;
char *reason;
int ret;
+ unsigned long normal_pages = 0;
+ enum zone_type zt;
/*
* {on,off}lining is constrained to full memory sections (or more
@@ -2056,6 +2065,17 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
init_per_zone_wmark_min();
/*
+ * Check whether this operation removes the last normal memory from
+ * the node. We do this before clearing N_MEMORY to avoid the possible
+ * transient "!N_MEMORY && N_NORMAL_MEMORY" state.
+ */
+ if (zone_idx(zone) <= ZONE_NORMAL) {
+ for (zt = 0; zt <= ZONE_NORMAL; zt++)
+ normal_pages += pgdat->node_zones[zt].present_pages;
+ if (!normal_pages)
+ node_clear_state(node, N_NORMAL_MEMORY);
+ }
+ /*
* Make sure to mark the node as memory-less before rebuilding the zone
* list. Otherwise this node would still appear in the fallback lists.
*/
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 601a5e048d12..c1a4b32af1a7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1858,6 +1858,27 @@ free_running:
break;
}
+ /*
+ * Unconditionally start background writeback if it's not
+ * already in progress. We need to do this because the global
+ * dirty threshold check above (nr_dirty > gdtc->bg_thresh)
+ * doesn't account for these cases:
+ *
+ * a) strictlimit BDIs: throttling is calculated using per-wb
+ * thresholds. The per-wb threshold can be exceeded even when
+ * nr_dirty < gdtc->bg_thresh
+ *
+ * b) memcg-based throttling: memcg uses its own dirty count and
+ * thresholds and can trigger throttling even when global
+ * nr_dirty < gdtc->bg_thresh
+ *
+ * Writeback needs to be started else the writer stalls in the
+ * throttle loop waiting for dirty pages to be written back
+ * while no writeback is running.
+ */
+ if (unlikely(!writeback_in_progress(wb)))
+ wb_start_background_writeback(wb);
+
mem_cgroup_flush_foreign(wb);
/*
diff --git a/mm/vma.c b/mm/vma.c
index be64f781a3aa..c8df5f561ad7 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -2781,6 +2781,13 @@ unacct_error:
if (map.charged)
vm_unacct_memory(map.charged);
abort_munmap:
+ /*
+ * This indicates that .mmap_prepare has set a new file, differing from
+ * desc->vm_file. But since we're aborting the operation, only the
+ * original file will be cleaned up. Ensure we clean up both.
+ */
+ if (map.file_doesnt_need_get)
+ fput(map.file);
vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
return error;
}