From b0101ccb5b4641885f30fecc352ef891ed06e083 Mon Sep 17 00:00:00 2001 From: Liang Jie Date: Tue, 16 Dec 2025 17:39:55 +0800 Subject: sched_ext: fix uninitialized ret on alloc_percpu() failure Smatch reported: kernel/sched/ext.c:5332 scx_alloc_and_add_sched() warn: passing zero to 'ERR_PTR' In scx_alloc_and_add_sched(), the alloc_percpu() failure path jumps to err_free_gdsqs without initializing @ret. That can lead to returning ERR_PTR(0), which violates the ERR_PTR() convention and confuses callers. Set @ret to -ENOMEM before jumping to the error path when alloc_percpu() fails. Reported-by: kernel test robot Closes: https://lore.kernel.org/r/202512141601.yAXDAeA9-lkp@intel.com/ Reported-by: Dan Carpenter Fixes: c201ea1578d3 ("sched_ext: Move event_stats_cpu into scx_sched") Signed-off-by: Liang Jie Reviewed-by: Emil Tsalapatis Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 94164f2dec6d..7a53d1cf8e82 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4783,8 +4783,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) } sch->pcpu = alloc_percpu(struct scx_sched_pcpu); - if (!sch->pcpu) + if (!sch->pcpu) { + ret = -ENOMEM; goto err_free_gdsqs; + } sch->helper = kthread_run_worker(0, "sched_ext_helper"); if (IS_ERR(sch->helper)) { -- cgit v1.2.3 From aa7d3a56a20f07978d9f401e13637a6479b13bd0 Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Thu, 18 Dec 2025 01:59:50 +0000 Subject: cpuset: fix warning when disabling remote partition A warning was triggered as follows: WARNING: kernel/cgroup/cpuset.c:1651 at remote_partition_disable+0xf7/0x110 RIP: 0010:remote_partition_disable+0xf7/0x110 RSP: 0018:ffffc90001947d88 EFLAGS: 00000206 RAX: 0000000000007fff RBX: ffff888103b6e000 RCX: 0000000000006f40 RDX: 0000000000006f00 RSI: ffffc90001947da8 RDI: ffff888103b6e000 RBP: ffff888103b6e000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000001 R11: ffff88810b2e2728 R12: ffffc90001947da8 R13: 0000000000000000 R14: ffffc90001947da8 R15: ffff8881081f1c00 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f55c8bbe0b2 CR3: 000000010b14c000 CR4: 00000000000006f0 Call Trace: update_prstate+0x2d3/0x580 cpuset_partition_write+0x94/0xf0 kernfs_fop_write_iter+0x147/0x200 vfs_write+0x35d/0x500 ksys_write+0x66/0xe0 do_syscall_64+0x6b/0x390 entry_SYSCALL_64_after_hwframe+0x4b/0x53 RIP: 0033:0x7f55c8cd4887 Reproduction steps (on a 16-CPU machine): # cd /sys/fs/cgroup/ # mkdir A1 # echo +cpuset > A1/cgroup.subtree_control # echo "0-14" > A1/cpuset.cpus.exclusive # mkdir A1/A2 # echo "0-14" > A1/A2/cpuset.cpus.exclusive # echo "root" > A1/A2/cpuset.cpus.partition # echo 0 > /sys/devices/system/cpu/cpu15/online # echo member > A1/A2/cpuset.cpus.partition When CPU 15 is offlined, subpartitions_cpus gets cleared because no CPUs remain available for the top_cpuset, forcing partitions to share CPUs with the top_cpuset. In this scenario, disabling the remote partition triggers a warning stating that effective_xcpus is not a subset of subpartitions_cpus. Partitions should be invalidated in this case to inform users that the partition is now invalid(cpus are shared with top_cpuset). To fix this issue: 1. Only emit the warning only if subpartitions_cpus is not empty and the effective_xcpus is not a subset of subpartitions_cpus. 2. During the CPU hotplug process, invalidate partitions if subpartitions_cpus is empty. Fixes: f62a5d39368e ("cgroup/cpuset: Remove remote_partition_check() & make update_cpumasks_hier() handle remote partition") Signed-off-by: Chen Ridong Reviewed-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 6e6eb09b8db6..3e8cc34d8d50 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1668,7 +1668,14 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) { WARN_ON_ONCE(!is_remote_partition(cs)); - WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); + /* + * When a CPU is offlined, top_cpuset may end up with no available CPUs, + * which should clear subpartitions_cpus. We should not emit a warning for this + * scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus + * may already be cleared when disabling the partition. + */ + WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) && + !cpumask_empty(subpartitions_cpus)); spin_lock_irq(&callback_lock); cs->remote_partition = false; @@ -3976,8 +3983,9 @@ retry: if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) compute_partition_effective_cpumask(cs, &new_cpus); - if (remote && cpumask_empty(&new_cpus) && - partition_is_populated(cs, NULL)) { + if (remote && (cpumask_empty(subpartitions_cpus) || + (cpumask_empty(&new_cpus) && + partition_is_populated(cs, NULL)))) { cs->prs_err = PERR_HOTPLUG; remote_partition_disable(cs, tmp); compute_effective_cpumask(&new_cpus, cs, parent); @@ -3990,9 +3998,12 @@ retry: * 1) empty effective cpus but not valid empty partition. * 2) parent is invalid or doesn't grant any cpus to child * partitions. + * 3) subpartitions_cpus is empty. */ - if (is_local_partition(cs) && (!is_partition_valid(parent) || - tasks_nocpu_error(parent, cs, &new_cpus))) + if (is_local_partition(cs) && + (!is_partition_valid(parent) || + tasks_nocpu_error(parent, cs, &new_cpus) || + cpumask_empty(subpartitions_cpus))) partcmd = partcmd_invalidate; /* * On the other hand, an invalid partition root may be transitioned -- cgit v1.2.3 From 12494e5e2aea17dac54c0356e53e40a31c2a31e4 Mon Sep 17 00:00:00 2001 From: Zqiang Date: Fri, 19 Dec 2025 17:34:04 +0800 Subject: sched_ext: Fix some comments in ext.c This commit update balance_scx() in the comments to balance_one(). Signed-off-by: Zqiang Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7a53d1cf8e82..5ebf8a740847 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1577,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags * * @p may go through multiple stopping <-> running transitions between * here and put_prev_task_scx() if task attribute changes occur while - * balance_scx() leaves @rq unlocked. However, they don't contain any + * balance_one() leaves @rq unlocked. However, they don't contain any * information meaningful to the BPF scheduler and can be suppressed by * skipping the callbacks if the task is !QUEUED. */ @@ -2372,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next) * preempted, and it regaining control of the CPU. * * ->cpu_release() complements ->cpu_acquire(), which is emitted the - * next time that balance_scx() is invoked. + * next time that balance_one() is invoked. */ if (!rq->scx.cpu_released) { if (SCX_HAS_OP(sch, cpu_release)) { @@ -2478,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) } /* - * If balance_scx() is telling us to keep running @prev, replenish slice + * If balance_one() is telling us to keep running @prev, replenish slice * if necessary and keep running @prev. Otherwise, pop the first one * from the local DSQ. */ @@ -4025,7 +4025,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn); * * - ops.dispatch() is ignored. * - * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice + * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice * can't be trusted. Whenever a tick triggers, the running task is rotated to * the tail of the queue with core_sched_at touched. * @@ -6069,7 +6069,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id) /* * A successfully consumed task can be dequeued before it starts * running while the CPU is trying to migrate other dispatched - * tasks. Bump nr_tasks to tell balance_scx() to retry on empty + * tasks. Bump nr_tasks to tell balance_one() to retry on empty * local DSQ. */ dspc->nr_tasks++; -- cgit v1.2.3 From ccaeeb585c7c2a0ac67ee1af9acb4d1411dc409e Mon Sep 17 00:00:00 2001 From: Zqiang Date: Mon, 22 Dec 2025 19:53:17 +0800 Subject: sched_ext: Use the resched_cpu() to replace resched_curr() in the bypass_lb_node() For the PREEMPT_RT kernels, the scx_bypass_lb_timerfn() running in the preemptible per-CPU ktimer kthread context, this means that the following scenarios will occur(for x86 platform): cpu1 cpu2 ktimer kthread: ->scx_bypass_lb_timerfn ->bypass_lb_node ->for_each_cpu(cpu, resched_mask) migration/1: by preempt by migration/2: multi_cpu_stop() multi_cpu_stop() ->take_cpu_down() ->__cpu_disable() ->set cpu1 offline ->rq1 = cpu_rq(cpu1) ->resched_curr(rq1) ->smp_send_reschedule(cpu1) ->native_smp_send_reschedule(cpu1) ->if(unlikely(cpu_is_offline(cpu))) { WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu); return; } This commit therefore use the resched_cpu() to replace resched_curr() in the bypass_lb_node() to avoid send-ipi to offline CPUs. Signed-off-by: Zqiang Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 5ebf8a740847..8f6d8d7f895c 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3956,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node) nr_donor_target, nr_target); } - for_each_cpu(cpu, resched_mask) { - struct rq *rq = cpu_rq(cpu); - - raw_spin_rq_lock_irq(rq); - resched_curr(rq); - raw_spin_rq_unlock_irq(rq); - } + for_each_cpu(cpu, resched_mask) + resched_cpu(cpu); for_each_cpu_and(cpu, cpu_online_mask, node_mask) { u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); -- cgit v1.2.3 From fe55ea85939efcbf0e6baa234f0d70acb79e7b58 Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Tue, 16 Dec 2025 09:48:51 +0800 Subject: kernel/kexec: change the prototype of kimage_map_segment() The kexec segment index will be required to extract the corresponding information for that segment in kimage_map_segment(). Additionally, kexec_segment already holds the kexec relocation destination address and size. Therefore, the prototype of kimage_map_segment() can be changed. Link: https://lkml.kernel.org/r/20251216014852.8737-1-piliu@redhat.com Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu Acked-by: Baoquan He Cc: Mimi Zohar Cc: Roberto Sassu Cc: Alexander Graf Cc: Steven Chen Cc: Signed-off-by: Andrew Morton --- kernel/kexec_core.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 0f92acdd354d..1a79c5b18d8f 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -953,17 +953,20 @@ int kimage_load_segment(struct kimage *image, int idx) return result; } -void *kimage_map_segment(struct kimage *image, - unsigned long addr, unsigned long size) +void *kimage_map_segment(struct kimage *image, int idx) { + unsigned long addr, size, eaddr; unsigned long src_page_addr, dest_page_addr = 0; - unsigned long eaddr = addr + size; kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages; void *vaddr = NULL; int i; + addr = image->segment[idx].mem; + size = image->segment[idx].memsz; + eaddr = addr + size; + /* * Collect the source pages and map them in a contiguous VA range. */ -- cgit v1.2.3 From a3785ae5d334bb71d47a593d54c686a03fb9d136 Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Tue, 16 Dec 2025 09:48:52 +0800 Subject: kernel/kexec: fix IMA when allocation happens in CMA area *** Bug description *** When I tested kexec with the latest kernel, I ran into the following warning: [ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]--- *** How to reproduce *** This bug is only triggered when the kexec target address is allocated in the CMA area. If no CMA area is reserved in the kernel, use the "cma=" option in the kernel command line to reserve one. *** Root cause *** The commit 07d24902977e ("kexec: enable CMA based contiguous allocation") allocates the kexec target address directly on the CMA area to avoid copying during the jump. In this case, there is no IND_SOURCE for the kexec segment. But the current implementation of kimage_map_segment() assumes that IND_SOURCE pages exist and map them into a contiguous virtual address by vmap(). *** Solution *** If IMA segment is allocated in the CMA area, use its page_address() directly. Link: https://lkml.kernel.org/r/20251216014852.8737-2-piliu@redhat.com Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu Acked-by: Baoquan He Cc: Alexander Graf Cc: Steven Chen Cc: Mimi Zohar Cc: Roberto Sassu Cc: Signed-off-by: Andrew Morton --- kernel/kexec_core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 1a79c5b18d8f..95c585c6ddc3 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -960,13 +960,17 @@ void *kimage_map_segment(struct kimage *image, int idx) kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages; + struct page *cma; void *vaddr = NULL; int i; + cma = image->segment_cma[idx]; + if (cma) + return page_address(cma); + addr = image->segment[idx].mem; size = image->segment[idx].memsz; eaddr = addr + size; - /* * Collect the source pages and map them in a contiguous VA range. */ @@ -1007,7 +1011,8 @@ void *kimage_map_segment(struct kimage *image, int idx) void kimage_unmap_segment(void *segment_buffer) { - vunmap(segment_buffer); + if (is_vmalloc_addr(segment_buffer)) + vunmap(segment_buffer); } struct kexec_load_limit { -- cgit v1.2.3 From 684d3b2670a26313bbb99de6d66f384ac0e31c9b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 26 Dec 2025 14:50:57 +0100 Subject: PM: sleep: Fix suspend_test() at the TEST_CORE level Commit a10ad1b10402 ("PM: suspend: Make pm_test delay interruptible by wakeup events") replaced mdelay() in suspend_test() with msleep() which does not work at the TEST_CORE test level that calls suspend_test() while running on one CPU with interrupts off. Address this by making suspend_test() check if the test level is suitable for using msleep() and use mdelay() otherwise. Fixes: a10ad1b10402 ("PM: suspend: Make pm_test delay interruptible by wakeup events") Reported-by: Sebastian Reichel Closes: https://lore.kernel.org/linux-pm/aUsAk0k1N9hw8IkY@venus/ Signed-off-by: Rafael J. Wysocki Tested-by: Sebastian Reichel Link: https://patch.msgid.link/6251576.lOV4Wx5bFT@rafael.j.wysocki --- kernel/power/suspend.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 2da4482bb6eb..57c44268698f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -349,9 +349,12 @@ static int suspend_test(int level) if (pm_test_level == level) { pr_info("suspend debug: Waiting for %d second(s).\n", pm_test_delay); - for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++) - msleep(1000); - + for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++) { + if (level > TEST_CORE) + msleep(1000); + else + mdelay(1000); + } return 1; } #endif /* !CONFIG_PM_DEBUG */ -- cgit v1.2.3 From ff5860f5088e9076ebcccf05a6ca709d5935cfa9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 20 Dec 2025 14:14:41 +0100 Subject: perf: Ensure swevent hrtimer is properly destroyed With the change to hrtimer_try_to_cancel() in perf_swevent_cancel_hrtimer() it appears possible for the hrtimer to still be active by the time the event gets freed. Make sure the event does a full hrtimer_cancel() on the free path by installing a perf_event::destroy handler. Fixes: eb3182ef0405 ("perf/core: Fix system hang caused by cpu-clock usage") Reported-by: CyberUnicorns Tested-by: CyberUnicorns Debugged-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) --- kernel/events/core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index dad0d3d2e85f..e3d8338fd51c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11906,6 +11906,11 @@ static void perf_swevent_cancel_hrtimer(struct perf_event *event) } } +static void perf_swevent_destroy_hrtimer(struct perf_event *event) +{ + hrtimer_cancel(&event->hw.hrtimer); +} + static void perf_swevent_init_hrtimer(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -11914,6 +11919,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) return; hrtimer_setup(&hwc->hrtimer, perf_swevent_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); + event->destroy = perf_swevent_destroy_hrtimer; /* * Since hrtimers have a fixed rate, we can do a static freq->period -- cgit v1.2.3 From 7966cf0ebe32c981bfa3db252cb5fc3bb1bf2e77 Mon Sep 17 00:00:00 2001 From: Malaya Kumar Rout Date: Tue, 30 Dec 2025 17:26:13 +0530 Subject: PM: hibernate: Fix crash when freeing invalid crypto compressor When crypto_alloc_acomp() fails, it returns an ERR_PTR value, not NULL. The cleanup code in save_compressed_image() and load_compressed_image() unconditionally calls crypto_free_acomp() without checking for ERR_PTR, which causes crypto_acomp_tfm() to dereference an invalid pointer and crash the kernel. This can be triggered when the compression algorithm is unavailable (e.g., CONFIG_CRYPTO_LZO not enabled). Fix by adding IS_ERR_OR_NULL() checks before calling crypto_free_acomp() and acomp_request_free(), similar to the existing kthread_stop() check. Fixes: b03d542c3c95 ("PM: hibernate: Use crypto_acomp interface") Signed-off-by: Malaya Kumar Rout Cc: 6.15+ # 6.15+ [ rjw: Added 2 empty code lines ] Link: https://patch.msgid.link/20251230115613.64080-1-mrout@redhat.com Signed-off-by: Rafael J. Wysocki --- kernel/power/swap.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 33a186373bef..8050e5182835 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -902,8 +902,11 @@ out_clean: for (thr = 0; thr < nr_threads; thr++) { if (data[thr].thr) kthread_stop(data[thr].thr); - acomp_request_free(data[thr].cr); - crypto_free_acomp(data[thr].cc); + if (data[thr].cr) + acomp_request_free(data[thr].cr); + + if (!IS_ERR_OR_NULL(data[thr].cc)) + crypto_free_acomp(data[thr].cc); } vfree(data); } @@ -1499,8 +1502,11 @@ out_clean: for (thr = 0; thr < nr_threads; thr++) { if (data[thr].thr) kthread_stop(data[thr].thr); - acomp_request_free(data[thr].cr); - crypto_free_acomp(data[thr].cc); + if (data[thr].cr) + acomp_request_free(data[thr].cr); + + if (!IS_ERR_OR_NULL(data[thr].cc)) + crypto_free_acomp(data[thr].cc); } vfree(data); } -- cgit v1.2.3 From 7cc3fe8e754eb1b7d9876c8ae2ee77dd2fb47b6d Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 26 Dec 2025 12:05:31 +0100 Subject: tracing: Drop unneeded assignment to soft_mode soft_mode is not read in the enable case, so drop the assignment. Drop also the comment text that refers to the assignment and realign the comment. Cc: "Paul E . McKenney" Cc: Gabriele Paoloni Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Link: https://patch.msgid.link/20251226110531.4129794-1-Julia.Lawall@inria.fr Signed-off-by: Julia Lawall Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_events.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 76067529db61..137b4d9bb116 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -826,16 +826,15 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, * When soft_disable is set and enable is set, we want to * register the tracepoint for the event, but leave the event * as is. That means, if the event was already enabled, we do - * nothing (but set soft_mode). If the event is disabled, we - * set SOFT_DISABLED before enabling the event tracepoint, so - * it still seems to be disabled. + * nothing. If the event is disabled, we set SOFT_DISABLED + * before enabling the event tracepoint, so it still seems + * to be disabled. */ if (!soft_disable) clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags); else { if (atomic_inc_return(&file->sm_ref) > 1) break; - soft_mode = true; /* Enable use of trace_buffered_event */ trace_buffered_event_enable(); } -- cgit v1.2.3 From 6435ffd6c7fcba330dfa91c58dc30aed2df3d0bf Mon Sep 17 00:00:00 2001 From: Wupeng Ma Date: Sun, 28 Dec 2025 14:50:07 +0800 Subject: ring-buffer: Avoid softlockup in ring_buffer_resize() during memory free When user resize all trace ring buffer through file 'buffer_size_kb', then in ring_buffer_resize(), kernel allocates buffer pages for each cpu in a loop. If the kernel preemption model is PREEMPT_NONE and there are many cpus and there are many buffer pages to be freed, it may not give up cpu for a long time and finally cause a softlockup. To avoid it, call cond_resched() after each cpu buffer free as Commit f6bd2c92488c ("ring-buffer: Avoid softlockup in ring_buffer_resize()") does. Detailed call trace as follow: rcu: INFO: rcu_sched self-detected stall on CPU rcu: 24-....: (14837 ticks this GP) idle=521c/1/0x4000000000000000 softirq=230597/230597 fqs=5329 rcu: (t=15004 jiffies g=26003221 q=211022 ncpus=96) CPU: 24 UID: 0 PID: 11253 Comm: bash Kdump: loaded Tainted: G EL 6.18.2+ #278 NONE pc : arch_local_irq_restore+0x8/0x20 arch_local_irq_restore+0x8/0x20 (P) free_frozen_page_commit+0x28c/0x3b0 __free_frozen_pages+0x1c0/0x678 ___free_pages+0xc0/0xe0 free_pages+0x3c/0x50 ring_buffer_resize.part.0+0x6a8/0x880 ring_buffer_resize+0x3c/0x58 __tracing_resize_ring_buffer.part.0+0x34/0xd8 tracing_resize_ring_buffer+0x8c/0xd0 tracing_entries_write+0x74/0xd8 vfs_write+0xcc/0x288 ksys_write+0x74/0x118 __arm64_sys_write+0x24/0x38 Cc: Link: https://patch.msgid.link/20251228065008.2396573-1-mawupeng1@huawei.com Signed-off-by: Wupeng Ma Acked-by: Masami Hiramatsu (Google) Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ring_buffer.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 41c9f5d079be..630221b00838 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -3137,6 +3137,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, list) { list_del_init(&bpage->list); free_buffer_page(bpage); + + cond_resched(); } } out_err_unlock: -- cgit v1.2.3 From 5f1ef0dfcb5b7f4a91a9b0e0ba533efd9f7e2cdb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 5 Jan 2026 20:31:41 -0500 Subject: tracing: Add recursion protection in kernel stack trace recording A bug was reported about an infinite recursion caused by tracing the rcu events with the kernel stack trace trigger enabled. The stack trace code called back into RCU which then called the stack trace again. Expand the ftrace recursion protection to add a set of bits to protect events from recursion. Each bit represents the context that the event is in (normal, softirq, interrupt and NMI). Have the stack trace code use the interrupt context to protect against recursion. Note, the bug showed an issue in both the RCU code as well as the tracing stacktrace code. This only handles the tracing stack trace side of the bug. The RCU fix will be handled separately. Link: https://lore.kernel.org/all/20260102122807.7025fc87@gandalf.local.home/ Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Joel Fernandes Cc: "Paul E. McKenney" Cc: Boqun Feng Link: https://patch.msgid.link/20260105203141.515cd49f@gandalf.local.home Reported-by: Yao Kai Tested-by: Yao Kai Fixes: 5f5fa7ea89dc ("rcu: Don't use negative nesting depth in __rcu_read_unlock()") Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6f2148df14d9..aef9058537d5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3012,6 +3012,11 @@ static void __ftrace_trace_stack(struct trace_array *tr, struct ftrace_stack *fstack; struct stack_entry *entry; int stackidx; + int bit; + + bit = trace_test_and_set_recursion(_THIS_IP_, _RET_IP_, TRACE_EVENT_START); + if (bit < 0) + return; /* * Add one, for this function and the call to save_stack_trace() @@ -3080,6 +3085,7 @@ static void __ftrace_trace_stack(struct trace_array *tr, /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); + trace_clear_recursion(bit); } static inline void ftrace_trace_stack(struct trace_array *tr, -- cgit v1.2.3 From 1e2ed4bfd50ace3c4272cfab7e9aa90956fb7ae0 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 6 Jan 2026 23:10:54 +0000 Subject: trace: ftrace_dump_on_oops[] is not exported, make it static The ftrace_dump_on_oops string is not used outside of trace.c so make it static to avoid the export warning from sparse: kernel/trace/trace.c:141:6: warning: symbol 'ftrace_dump_on_oops' was not declared. Should it be static? Fixes: dd293df6395a2 ("tracing: Move trace sysctls into trace.c") Link: https://patch.msgid.link/20260106231054.84270-1-ben.dooks@codethink.co.uk Signed-off-by: Ben Dooks Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index aef9058537d5..baec63134ab6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -138,7 +138,7 @@ cpumask_var_t __read_mostly tracing_buffer_mask; * by commas. */ /* Set to string format zero to disable by default */ -char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; +static char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; /* When set, tracing will stop when a WARN*() is hit */ static int __disable_trace_on_warning; -- cgit v1.2.3 From 2bdf777410dc6e022d1081885ff34673b5dfee99 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 23 Dec 2025 13:51:13 -0800 Subject: sched/mm_cid: Prevent NULL mm dereference in sched_mm_cid_after_execve() sched_mm_cid_after_execve() is called in bprm_execve()'s cleanup path even when exec_binprm() fails. For the init task's first execve(), this causes a problem: 1. current->mm is NULL (kernel threads don't have an mm) 2. sched_mm_cid_before_execve() exits early because mm is NULL 3. exec_binprm() fails (e.g., ENOENT for missing script interpreter) 4. sched_mm_cid_after_execve() is called with mm still NULL 5. sched_mm_cid_fork() is called unconditionally, triggering WARN_ON This is easily reproduced by booting with an init that is a shell script (#!/bin/sh) where the interpreter doesn't exist in the initramfs. Fix this by checking if t->mm is NULL before calling sched_mm_cid_fork(), matching the behavior of sched_mm_cid_before_execve() which already handles this case via sched_mm_cid_exit()'s early return. Fixes: b0c3d51b54f8 ("sched/mmcid: Provide precomputed maximal value") Signed-off-by: Cong Wang Signed-off-by: Thomas Gleixner Reviewed-by: Mathieu Desnoyers Acked-by: Will Deacon Link: https://patch.msgid.link/20251223215113.639686-1-xiyou.wangcong@gmail.com --- kernel/sched/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 41ba0be16911..60afadb6eede 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10694,10 +10694,11 @@ void sched_mm_cid_before_execve(struct task_struct *t) sched_mm_cid_exit(t); } -/* Reactivate MM CID after successful execve() */ +/* Reactivate MM CID after execve() */ void sched_mm_cid_after_execve(struct task_struct *t) { - sched_mm_cid_fork(t); + if (t->mm) + sched_mm_cid_fork(t); } static void mm_cid_work_fn(struct work_struct *work) -- cgit v1.2.3 From 2e4b28c48f88ce9e263957b1d944cf5349952f88 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 11 Jan 2026 16:53:48 +0100 Subject: treewide: Update email address In a vain attempt to consolidate the email zoo switch everything to the kernel.org account. Signed-off-by: Thomas Gleixner Signed-off-by: Linus Torvalds --- kernel/events/callchain.c | 2 +- kernel/events/core.c | 2 +- kernel/events/ring_buffer.c | 2 +- kernel/irq/debugfs.c | 2 +- kernel/irq/matrix.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/pelt.c | 2 +- kernel/time/clockevents.c | 2 +- kernel/time/hrtimer.c | 2 +- kernel/time/tick-broadcast.c | 2 +- kernel/time/tick-common.c | 2 +- kernel/time/tick-oneshot.c | 2 +- kernel/time/tick-sched.c | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index b9c7e00725d6..1f6589578703 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -2,7 +2,7 @@ /* * Performance events callchain code, extracted from core.c: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. diff --git a/kernel/events/core.c b/kernel/events/core.c index dad0d3d2e85f..f5e9d30e4fa9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2,7 +2,7 @@ /* * Performance events core code: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 20a905023736..3e7de2661417 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -2,7 +2,7 @@ /* * Performance events ring-buffer code: * - * Copyright (C) 2008 Thomas Gleixner + * Copyright (C) 2008 Linutronix GmbH, Thomas Gleixner * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra * Copyright © 2009 Paul Mackerras, IBM Corp. diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 3527defd2890..5c5ebaee35f2 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright 2017 Thomas Gleixner +// Copyright 2017 Linutronix GmbH, Thomas Gleixner #include #include diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 8f222d1cccec..a50f2305a8dc 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -// Copyright (C) 2017 Thomas Gleixner +// Copyright (C) 2017 Linutronix GmbH, Thomas Gleixner #include #include diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da46c3164537..e71302282671 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -15,7 +15,7 @@ * Author: Srivatsa Vaddagiri * * Scaled math optimizations by Thomas Gleixner - * Copyright (C) 2007, Thomas Gleixner + * Copyright (C) 2007, Linutronix GmbH, Thomas Gleixner * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index fa83bbaf4f3e..897790889ba3 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -15,7 +15,7 @@ * Author: Srivatsa Vaddagiri * * Scaled math optimizations by Thomas Gleixner - * Copyright (C) 2007, Thomas Gleixner + * Copyright (C) 2007, Linutronix GmbH, Thomas Gleixner * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index a59bc75ab7c5..eaae1ce9f060 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -2,7 +2,7 @@ /* * This file contains functions which manage clock event devices. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index f8ea8c8fc895..bdb30cc5e873 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 0207868c8b4d..f63c65881364 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -3,7 +3,7 @@ * This file contains functions which emulate a local clock-event * device via a broadcast event source. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 7e33d3f2e889..d305d8521896 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -3,7 +3,7 @@ * This file contains the base functions to manage periodic tick * related events. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index ffee943d796d..7472597f3225 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -3,7 +3,7 @@ * This file contains functions which manage high resolution tick * related events. * - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8ddf74e705d3..2f8a7923fa27 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright(C) 2005-2006, Thomas Gleixner + * Copyright(C) 2005-2006, Linutronix GmbH, Thomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner * -- cgit v1.2.3