From 0128c850513a7f5199397a79da9c39558a5558cb Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Thu, 4 Sep 2025 07:57:16 +0200 Subject: sched_ext: Exit early on hotplug events during attach There is no need to complete the entire scx initialization if a scheduler is failing to be attached due to a hotplug event. Exit early to avoid unnecessary work and simplify the attach flow. Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index c645d47124e7..9b2ab67951e6 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4513,7 +4513,7 @@ err_free_sch: return ERR_PTR(ret); } -static void check_hotplug_seq(struct scx_sched *sch, +static int check_hotplug_seq(struct scx_sched *sch, const struct sched_ext_ops *ops) { unsigned long long global_hotplug_seq; @@ -4530,8 +4530,11 @@ static void check_hotplug_seq(struct scx_sched *sch, SCX_ECODE_ACT_RESTART | SCX_ECODE_RSN_HOTPLUG, "expected hotplug seq %llu did not match actual %llu", ops->hotplug_seq, global_hotplug_seq); + return -EBUSY; } } + + return 0; } static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) @@ -4633,7 +4636,11 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) if (((void (**)(void))ops)[i]) set_bit(i, sch->has_op); - check_hotplug_seq(sch, ops); + ret = check_hotplug_seq(sch, ops); + if (ret) { + cpus_read_unlock(); + goto err_disable; + } scx_idle_update_selcpu_topology(ops); cpus_read_unlock(); -- cgit v1.2.3 From 3035addfaf285f32d1f513d181ebcb85924ae3c3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 6 Oct 2025 15:51:45 -1000 Subject: sched_ext: Add scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() With the planned hierarchical scheduler support, sub-schedulers will need to be verified for authority before being allowed to modify task->scx.slice and task->scx.dsq_vtime. Add scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() which will perform the necessary permission checks. Root schedulers can still directly write to these fields, so this doesn't affect existing schedulers. Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 9b2ab67951e6..2ae6aab04150 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5936,6 +5936,34 @@ static const struct btf_kfunc_id_set scx_kfunc_set_unlocked = { __bpf_kfunc_start_defs(); +/** + * scx_bpf_task_set_slice - Set task's time slice + * @p: task of interest + * @slice: time slice to set in nsecs + * + * Set @p's time slice to @slice. Returns %true on success, %false if the + * calling scheduler doesn't have authority over @p. + */ +__bpf_kfunc bool scx_bpf_task_set_slice(struct task_struct *p, u64 slice) +{ + p->scx.slice = slice; + return true; +} + +/** + * scx_bpf_task_set_dsq_vtime - Set task's virtual time for DSQ ordering + * @p: task of interest + * @vtime: virtual time to set + * + * Set @p's virtual time to @vtime. Returns %true on success, %false if the + * calling scheduler doesn't have authority over @p. + */ +__bpf_kfunc bool scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime) +{ + p->scx.dsq_vtime = vtime; + return true; +} + static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags) { struct rq *this_rq; @@ -6741,6 +6769,8 @@ __bpf_kfunc void scx_bpf_events(struct scx_event_stats *events, __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_any) +BTF_ID_FLAGS(func, scx_bpf_task_set_slice, KF_RCU); +BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU); BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) -- cgit v1.2.3 From c0d630ba347c7671210e1bab3c79defea19844e9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 6 Oct 2025 15:51:46 -1000 Subject: sched_ext: Wrap kfunc args in struct to prepare for aux__prog scx_bpf_dsq_insert_vtime() and scx_bpf_select_cpu_and() currently have 5 parameters. An upcoming change will add aux__prog parameter which will exceed BPF's 5 argument limit. Prepare by adding new kfuncs __scx_bpf_dsq_insert_vtime() and __scx_bpf_select_cpu_and() that take args structs. The existing kfuncs are kept as compatibility wrappers. BPF programs use inline wrappers that detect kernel API version via bpf_core_type_exists() and use the new struct-based kfuncs when available, falling back to compat kfuncs otherwise. This allows BPF programs to work with both old and new kernels. Reviewed-by: Emil Tsalapatis Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 82 ++++++++++++++++++++++++++++++++++++------------- kernel/sched/ext_idle.c | 43 ++++++++++++++++++++++---- 2 files changed, 98 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2ae6aab04150..950113d8c8f3 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5448,54 +5448,94 @@ __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); } +static void scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, + u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) +{ + if (!scx_dsq_insert_preamble(sch, p, enq_flags)) + return; + + if (slice) + p->scx.slice = slice; + else + p->scx.slice = p->scx.slice ?: 1; + + p->scx.dsq_vtime = vtime; + + scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); +} + +struct scx_bpf_dsq_insert_vtime_args { + /* @p can't be packed together as KF_RCU is not transitive */ + u64 dsq_id; + u64 slice; + u64 vtime; + u64 enq_flags; +}; + /** - * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ + * __scx_bpf_dsq_insert_vtime - Arg-wrapped vtime DSQ insertion * @p: task_struct to insert - * @dsq_id: DSQ to insert into - * @slice: duration @p can run for in nsecs, 0 to keep the current value - * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ - * @enq_flags: SCX_ENQ_* + * @args: struct containing the rest of the arguments + * @args->dsq_id: DSQ to insert into + * @args->slice: duration @p can run for in nsecs, 0 to keep the current value + * @args->vtime: @p's ordering inside the vtime-sorted queue of the target DSQ + * @args->enq_flags: SCX_ENQ_* * - * Insert @p into the vtime priority queue of the DSQ identified by @dsq_id. - * Tasks queued into the priority queue are ordered by @vtime. All other aspects - * are identical to scx_bpf_dsq_insert(). + * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument + * limit. BPF programs should use scx_bpf_dsq_insert_vtime() which is provided + * as an inline wrapper in common.bpf.h. * - * @vtime ordering is according to time_before64() which considers wrapping. A - * numerically larger vtime may indicate an earlier position in the ordering and - * vice-versa. + * Insert @p into the vtime priority queue of the DSQ identified by + * @args->dsq_id. Tasks queued into the priority queue are ordered by + * @args->vtime. All other aspects are identical to scx_bpf_dsq_insert(). + * + * @args->vtime ordering is according to time_before64() which considers + * wrapping. A numerically larger vtime may indicate an earlier position in the + * ordering and vice-versa. * * A DSQ can only be used as a FIFO or priority queue at any given time and this * function must not be called on a DSQ which already has one or more FIFO tasks * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and * SCX_DSQ_GLOBAL) cannot be used as priority queues. */ -__bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, - u64 slice, u64 vtime, u64 enq_flags) +__bpf_kfunc void +__scx_bpf_dsq_insert_vtime(struct task_struct *p, + struct scx_bpf_dsq_insert_vtime_args *args) { struct scx_sched *sch; guard(rcu)(); + sch = rcu_dereference(scx_root); if (unlikely(!sch)) return; - if (!scx_dsq_insert_preamble(sch, p, enq_flags)) - return; + scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, args->vtime, + args->enq_flags); +} - if (slice) - p->scx.slice = slice; - else - p->scx.slice = p->scx.slice ?: 1; +/* + * COMPAT: Will be removed in v6.23. + */ +__bpf_kfunc void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, + u64 slice, u64 vtime, u64 enq_flags) +{ + struct scx_sched *sch; - p->scx.dsq_vtime = vtime; + guard(rcu)(); - scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); + sch = rcu_dereference(scx_root); + if (unlikely(!sch)) + return; + + scx_dsq_insert_vtime(sch, p, dsq_id, slice, vtime, enq_flags); } __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) +BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c index d2434c954848..3d9d404d5cd2 100644 --- a/kernel/sched/ext_idle.c +++ b/kernel/sched/ext_idle.c @@ -995,26 +995,56 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, return prev_cpu; } +struct scx_bpf_select_cpu_and_args { + /* @p and @cpus_allowed can't be packed together as KF_RCU is not transitive */ + s32 prev_cpu; + u64 wake_flags; + u64 flags; +}; + /** - * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p, - * prioritizing those in @cpus_allowed + * __scx_bpf_select_cpu_and - Arg-wrapped CPU selection with cpumask * @p: task_struct to select a CPU for - * @prev_cpu: CPU @p was on previously - * @wake_flags: %SCX_WAKE_* flags * @cpus_allowed: cpumask of allowed CPUs - * @flags: %SCX_PICK_IDLE* flags + * @args: struct containing the rest of the arguments + * @args->prev_cpu: CPU @p was on previously + * @args->wake_flags: %SCX_WAKE_* flags + * @args->flags: %SCX_PICK_IDLE* flags + * + * Wrapper kfunc that takes arguments via struct to work around BPF's 5 argument + * limit. BPF programs should use scx_bpf_select_cpu_and() which is provided + * as an inline wrapper in common.bpf.h. * * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked * context such as a BPF test_run() call, as long as built-in CPU selection * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE * is set. * - * @p, @prev_cpu and @wake_flags match ops.select_cpu(). + * @p, @args->prev_cpu and @args->wake_flags match ops.select_cpu(). * * Returns the selected idle CPU, which will be automatically awakened upon * returning from ops.select_cpu() and can be used for direct dispatch, or * a negative value if no idle CPU is available. */ +__bpf_kfunc s32 +__scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed, + struct scx_bpf_select_cpu_and_args *args) +{ + struct scx_sched *sch; + + guard(rcu)(); + + sch = rcu_dereference(scx_root); + if (unlikely(!sch)) + return -ENODEV; + + return select_cpu_from_kfunc(sch, p, args->prev_cpu, args->wake_flags, + cpus_allowed, args->flags); +} + +/* + * COMPAT: Will be removed in v6.22. + */ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, const struct cpumask *cpus_allowed, u64 flags) { @@ -1383,6 +1413,7 @@ BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu_node, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU) +BTF_ID_FLAGS(func, __scx_bpf_select_cpu_and, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_select_cpu_and, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_idle) -- cgit v1.2.3 From cded46d971597ecfe505ba92a54253c0f5e1f2e4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 6 Oct 2025 15:51:47 -1000 Subject: sched_ext: Make scx_bpf_dsq_insert*() return bool In preparation for hierarchical schedulers, change scx_bpf_dsq_insert() and scx_bpf_dsq_insert_vtime() to return bool instead of void. With sub-schedulers, there will be no reliable way to guarantee a task is still owned by the sub-scheduler at insertion time (e.g., the task may have been migrated to another scheduler). The bool return value will enable sub-schedulers to detect and gracefully handle insertion failures. For the root scheduler, insertion failures will continue to trigger scheduler abort via scx_error(), so existing code doesn't need to check the return value. Backward compatibility is maintained through compat wrappers. Also update scx_bpf_dsq_move() documentation to clarify that it can return false for sub-schedulers when @dsq_id points to a disallowed local DSQ. Reviewed-by: Changwoo Min Reviewed-by: Emil Tsalapatis Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 45 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 950113d8c8f3..4b1467d3541a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5426,8 +5426,12 @@ __bpf_kfunc_start_defs(); * exhaustion. If zero, the current residual slice is maintained. If * %SCX_SLICE_INF, @p never expires and the BPF scheduler must kick the CPU with * scx_bpf_kick_cpu() to trigger scheduling. + * + * Returns %true on successful insertion, %false on failure. On the root + * scheduler, %false return triggers scheduler abort and the caller doesn't need + * to check the return value. */ -__bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, +__bpf_kfunc bool scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) { struct scx_sched *sch; @@ -5435,10 +5439,10 @@ __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice guard(rcu)(); sch = rcu_dereference(scx_root); if (unlikely(!sch)) - return; + return false; if (!scx_dsq_insert_preamble(sch, p, enq_flags)) - return; + return false; if (slice) p->scx.slice = slice; @@ -5446,13 +5450,24 @@ __bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice p->scx.slice = p->scx.slice ?: 1; scx_dsq_insert_commit(sch, p, dsq_id, enq_flags); + + return true; +} + +/* + * COMPAT: Will be removed in v6.23. + */ +__bpf_kfunc void scx_bpf_dsq_insert___compat(struct task_struct *p, u64 dsq_id, + u64 slice, u64 enq_flags) +{ + scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags); } -static void scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, +static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) { if (!scx_dsq_insert_preamble(sch, p, enq_flags)) - return; + return false; if (slice) p->scx.slice = slice; @@ -5462,6 +5477,8 @@ static void scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, p->scx.dsq_vtime = vtime; scx_dsq_insert_commit(sch, p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ); + + return true; } struct scx_bpf_dsq_insert_vtime_args { @@ -5497,8 +5514,12 @@ struct scx_bpf_dsq_insert_vtime_args { * function must not be called on a DSQ which already has one or more FIFO tasks * queued and vice-versa. Also, the built-in DSQs (SCX_DSQ_LOCAL and * SCX_DSQ_GLOBAL) cannot be used as priority queues. + * + * Returns %true on successful insertion, %false on failure. On the root + * scheduler, %false return triggers scheduler abort and the caller doesn't need + * to check the return value. */ -__bpf_kfunc void +__bpf_kfunc bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) { @@ -5508,10 +5529,10 @@ __scx_bpf_dsq_insert_vtime(struct task_struct *p, sch = rcu_dereference(scx_root); if (unlikely(!sch)) - return; + return false; - scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, args->vtime, - args->enq_flags); + return scx_dsq_insert_vtime(sch, p, args->dsq_id, args->slice, + args->vtime, args->enq_flags); } /* @@ -5535,6 +5556,7 @@ __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_insert___compat, KF_RCU) BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) @@ -5789,8 +5811,9 @@ __bpf_kfunc void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, * Can be called from ops.dispatch() or any BPF context which doesn't hold a rq * lock (e.g. BPF timers or SYSCALL programs). * - * Returns %true if @p has been consumed, %false if @p had already been consumed - * or dequeued. + * Returns %true if @p has been consumed, %false if @p had already been + * consumed, dequeued, or, for sub-scheds, @dsq_id points to a disallowed local + * DSQ. */ __bpf_kfunc bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, -- cgit v1.2.3 From 347ed2d566dabb06c7970fff01129c4f59995ed6 Mon Sep 17 00:00:00 2001 From: zhidao su Date: Sat, 11 Oct 2025 15:16:51 +0800 Subject: sched/ext: Implement cgroup_set_idle() callback Implement the missing cgroup_set_idle() callback that was marked as a TODO. This allows BPF schedulers to be notified when a cgroup's idle state changes, enabling them to adjust their scheduling behavior accordingly. The implementation follows the same pattern as other cgroup callbacks like cgroup_set_weight() and cgroup_set_bandwidth(). It checks if the BPF scheduler has implemented the callback and invokes it with the appropriate parameters. Fixes a spelling error in the cgroup_set_bandwidth() documentation. tj: s/scx_cgroup_rwsem/scx_cgroup_ops_rwsem/ to fix build breakage. Signed-off-by: zhidao su Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 16 +++++++++++++++- kernel/sched/ext_internal.h | 13 ++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 4b1467d3541a..430749ce46ab 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3066,6 +3066,7 @@ void scx_tg_init(struct task_group *tg) tg->scx.weight = CGROUP_WEIGHT_DFL; tg->scx.bw_period_us = default_bw_period_us(); tg->scx.bw_quota_us = RUNTIME_INF; + tg->scx.idle = false; } int scx_tg_online(struct task_group *tg) @@ -3214,7 +3215,18 @@ void scx_group_set_weight(struct task_group *tg, unsigned long weight) void scx_group_set_idle(struct task_group *tg, bool idle) { - /* TODO: Implement ops->cgroup_set_idle() */ + struct scx_sched *sch = scx_root; + + percpu_down_read(&scx_cgroup_ops_rwsem); + + if (scx_cgroup_enabled && SCX_HAS_OP(sch, cgroup_set_idle)) + SCX_CALL_OP(sch, SCX_KF_UNLOCKED, cgroup_set_idle, NULL, + tg_cgrp(tg), idle); + + /* Update the task group's idle state */ + tg->scx.idle = idle; + + percpu_up_read(&scx_cgroup_ops_rwsem); } void scx_group_set_bandwidth(struct task_group *tg, @@ -5017,6 +5029,7 @@ static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *fro static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {} static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {} static void sched_ext_ops__cgroup_set_bandwidth(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us) {} +static void sched_ext_ops__cgroup_set_idle(struct cgroup *cgrp, bool idle) {} #endif static void sched_ext_ops__cpu_online(s32 cpu) {} static void sched_ext_ops__cpu_offline(s32 cpu) {} @@ -5055,6 +5068,7 @@ static struct sched_ext_ops __bpf_ops_sched_ext_ops = { .cgroup_cancel_move = sched_ext_ops__cgroup_cancel_move, .cgroup_set_weight = sched_ext_ops__cgroup_set_weight, .cgroup_set_bandwidth = sched_ext_ops__cgroup_set_bandwidth, + .cgroup_set_idle = sched_ext_ops__cgroup_set_idle, #endif .cpu_online = sched_ext_ops__cpu_online, .cpu_offline = sched_ext_ops__cpu_offline, diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h index b3617abed510..7d00a0a2456e 100644 --- a/kernel/sched/ext_internal.h +++ b/kernel/sched/ext_internal.h @@ -697,12 +697,23 @@ struct sched_ext_ops { * 2_500_000. @cgrp is entitled to 2.5 CPUs. @burst_us can be * interpreted in the same fashion and specifies how much @cgrp can * burst temporarily. The specific control mechanism and thus the - * interpretation of @period_us and burstiness is upto to the BPF + * interpretation of @period_us and burstiness is up to the BPF * scheduler. */ void (*cgroup_set_bandwidth)(struct cgroup *cgrp, u64 period_us, u64 quota_us, u64 burst_us); + /** + * @cgroup_set_idle: A cgroup's idle state is being changed + * @cgrp: cgroup whose idle state is being updated + * @idle: whether the cgroup is entering or exiting idle state + * + * Update @cgrp's idle state to @idle. This callback is invoked when + * a cgroup transitions between idle and non-idle states, allowing the + * BPF scheduler to adjust its behavior accordingly. + */ + void (*cgroup_set_idle)(struct cgroup *cgrp, bool idle); + #endif /* CONFIG_EXT_GROUP_SCHED */ /* -- cgit v1.2.3 From 44f5c8ec5b9ad8ed4ade08d727f803b2bb07f1c3 Mon Sep 17 00:00:00 2001 From: Ryan Newton Date: Wed, 15 Oct 2025 11:50:35 -0400 Subject: sched_ext: Add lockless peek operation for DSQs The builtin DSQ queue data structures are meant to be used by a wide range of different sched_ext schedulers with different demands on these data structures. They might be per-cpu with low-contention, or high-contention shared queues. Unfortunately, DSQs have a coarse-grained lock around the whole data structure. Without going all the way to a lock-free, more scalable implementation, a small step we can take to reduce lock contention is to allow a lockless, small-fixed-cost peek at the head of the queue. This change allows certain custom SCX schedulers to cheaply peek at queues, e.g. during load balancing, before locking them. But it represents a few extra memory operations to update the pointer each time the DSQ is modified, including a memory barrier on ARM so the write appears correctly ordered. This commit adds a first_task pointer field which is updated atomically when the DSQ is modified, and allows any thread to peek at the head of the queue without holding the lock. Signed-off-by: Ryan Newton Reviewed-by: Andrea Righi Reviewed-by: Christian Loehle Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 430749ce46ab..f9c0888ef279 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -965,8 +965,11 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, container_of(rbp, struct task_struct, scx.dsq_priq); list_add(&p->scx.dsq_list.node, &prev->scx.dsq_list.node); + /* first task unchanged - no update needed */ } else { list_add(&p->scx.dsq_list.node, &dsq->list); + /* not builtin and new task is at head - use fastpath */ + rcu_assign_pointer(dsq->first_task, p); } } else { /* a FIFO DSQ shouldn't be using PRIQ enqueuing */ @@ -974,10 +977,19 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, scx_error(sch, "DSQ ID 0x%016llx already had PRIQ-enqueued tasks", dsq->id); - if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) + if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) { list_add(&p->scx.dsq_list.node, &dsq->list); - else + /* new task inserted at head - use fastpath */ + if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN)) + rcu_assign_pointer(dsq->first_task, p); + } else { + bool was_empty; + + was_empty = list_empty(&dsq->list); list_add_tail(&p->scx.dsq_list.node, &dsq->list); + if (was_empty && !(dsq->id & SCX_DSQ_FLAG_BUILTIN)) + rcu_assign_pointer(dsq->first_task, p); + } } /* seq records the order tasks are queued, used by BPF DSQ iterator */ @@ -1032,6 +1044,13 @@ static void task_unlink_from_dsq(struct task_struct *p, p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; } + if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) { + struct task_struct *first_task; + + first_task = nldsq_next_task(dsq, NULL, false); + rcu_assign_pointer(dsq->first_task, first_task); + } + list_del_init(&p->scx.dsq_list.node); dsq_mod_nr(dsq, -1); } @@ -6292,6 +6311,40 @@ __bpf_kfunc void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) kit->dsq = NULL; } +/** + * scx_bpf_dsq_peek - Lockless peek at the first element. + * @dsq_id: DSQ to examine. + * + * Read the first element in the DSQ. This is semantically equivalent to using + * the DSQ iterator, but is lockfree. Of course, like any lockless operation, + * this provides only a point-in-time snapshot, and the contents may change + * by the time any subsequent locking operation reads the queue. + * + * Returns the pointer, or NULL indicates an empty queue OR internal error. + */ +__bpf_kfunc struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) +{ + struct scx_sched *sch; + struct scx_dispatch_q *dsq; + + sch = rcu_dereference(scx_root); + if (unlikely(!sch)) + return NULL; + + if (unlikely(dsq_id & SCX_DSQ_FLAG_BUILTIN)) { + scx_error(sch, "peek disallowed on builtin DSQ 0x%llx", dsq_id); + return NULL; + } + + dsq = find_user_dsq(sch, dsq_id); + if (unlikely(!dsq)) { + scx_error(sch, "peek on non-existent DSQ 0x%llx", dsq_id); + return NULL; + } + + return rcu_dereference(dsq->first_task); +} + __bpf_kfunc_end_defs(); static s32 __bstr_format(struct scx_sched *sch, u64 *data_buf, char *line_buf, @@ -6851,6 +6904,7 @@ BTF_ID_FLAGS(func, scx_bpf_task_set_dsq_vtime, KF_RCU); BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) BTF_ID_FLAGS(func, scx_bpf_destroy_dsq) +BTF_ID_FLAGS(func, scx_bpf_dsq_peek, KF_RCU_PROTECTED | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_scx_dsq_new, KF_ITER_NEW | KF_RCU_PROTECTED) BTF_ID_FLAGS(func, bpf_iter_scx_dsq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) -- cgit v1.2.3 From 67fa319f5fff523a17993eafe990ad11aa5d6be9 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 18 Oct 2025 16:10:14 +0200 Subject: sched_ext: Allow forcibly picking an scx task Refactor pick_task_scx() adding a new argument to forcibly pick a SCHED_EXT task, ignoring any higher-priority sched class activity. This refactoring prepares the code for future scenarios, e.g., allowing the ext dl_server to force a SCHED_EXT task selection. No functional changes. Suggested-by: Tejun Heo Signed-off-by: Andrea Righi Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index adff739b396c..35bc37c7ee19 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2351,7 +2351,8 @@ static struct task_struct *first_local_task(struct rq *rq) struct task_struct, scx.dsq_list.node); } -static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) +static struct task_struct * +do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) { struct task_struct *prev = rq->curr; bool keep_prev, kick_idle = false; @@ -2365,7 +2366,15 @@ static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) maybe_queue_balance_callback(rq); - if (rq_modified_above(rq, &ext_sched_class)) + /* + * If any higher-priority sched class enqueued a runnable task on + * this rq during balance_one(), abort and return RETRY_TASK, so + * that the scheduler loop can restart. + * + * If @force_scx is true, always try to pick a SCHED_EXT task, + * regardless of any higher-priority sched classes activity. + */ + if (!force_scx && rq_modified_above(rq, &ext_sched_class)) return RETRY_TASK; keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; @@ -2408,6 +2417,11 @@ static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) return p; } +static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf) +{ + return do_pick_task_scx(rq, rf, false); +} + #ifdef CONFIG_SCHED_CORE /** * scx_prio_less - Task ordering for core-sched -- cgit v1.2.3 From 2dbbdeda77a61b39dc4a34dfce873907cfea2c4b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 21 Oct 2025 10:39:24 -1000 Subject: sched_ext: Fix scx_bpf_dsq_insert() backward binary compatibility cded46d97159 ("sched_ext: Make scx_bpf_dsq_insert*() return bool") introduced a new bool-returning scx_bpf_dsq_insert() and renamed the old void-returning version to scx_bpf_dsq_insert___compat, with the expectation that libbpf would match old binaries to the ___compat variant, maintaining backward binary compatibility. However, while libbpf ignores ___suffix on the BPF side when matching symbols, it doesn't do so for kernel-side symbols. Old binaries compiled with the original scx_bpf_dsq_insert() could no longer resolve the symbol. Fix by reversing the naming: Keep scx_bpf_dsq_insert() as the old void-returning interface and add ___v2 to the new bool-returning version. This allows old binaries to continue working while new code can use the ___v2 variant. Once libbpf is updated to ignore kernel-side ___SUFFIX, the ___v2 suffix can be dropped when the compat interface is removed. v2: Use ___v2 instead of ___new. Fixes: cded46d97159 ("sched_ext: Make scx_bpf_dsq_insert*() return bool") Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 35bc37c7ee19..dc86ce0be32a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5421,8 +5421,8 @@ __bpf_kfunc_start_defs(); * scheduler, %false return triggers scheduler abort and the caller doesn't need * to check the return value. */ -__bpf_kfunc bool scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, - u64 enq_flags) +__bpf_kfunc bool scx_bpf_dsq_insert___v2(struct task_struct *p, u64 dsq_id, + u64 slice, u64 enq_flags) { struct scx_sched *sch; @@ -5445,12 +5445,12 @@ __bpf_kfunc bool scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice } /* - * COMPAT: Will be removed in v6.23. + * COMPAT: Will be removed in v6.23 along with the ___v2 suffix. */ -__bpf_kfunc void scx_bpf_dsq_insert___compat(struct task_struct *p, u64 dsq_id, +__bpf_kfunc void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) { - scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags); + scx_bpf_dsq_insert___v2(p, dsq_id, slice, enq_flags); } static bool scx_dsq_insert_vtime(struct scx_sched *sch, struct task_struct *p, @@ -5546,7 +5546,7 @@ __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch) BTF_ID_FLAGS(func, scx_bpf_dsq_insert, KF_RCU) -BTF_ID_FLAGS(func, scx_bpf_dsq_insert___compat, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_insert___v2, KF_RCU) BTF_ID_FLAGS(func, __scx_bpf_dsq_insert_vtime, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_insert_vtime, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_enqueue_dispatch) -- cgit v1.2.3 From a9c1fbbd6dadbaa38c157a07d5d11005460b86b9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 22 Oct 2025 10:56:27 -1000 Subject: sched_ext: Don't kick CPUs running higher classes When a sched_ext scheduler tries to kick a CPU, the CPU may be running a higher class task. sched_ext has no control over such CPUs. A sched_ext scheduler couldn't have expected to get access to the CPU after kicking it anyway. Skip kicking when the target CPU is running a higher class. Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index dc86ce0be32a..12e00f77a848 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5122,18 +5122,23 @@ static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) { struct rq *rq = cpu_rq(cpu); struct scx_rq *this_scx = &this_rq->scx; + const struct sched_class *cur_class; bool should_wait = false; unsigned long flags; raw_spin_rq_lock_irqsave(rq, flags); + cur_class = rq->curr->sched_class; /* * During CPU hotplug, a CPU may depend on kicking itself to make - * forward progress. Allow kicking self regardless of online state. + * forward progress. Allow kicking self regardless of online state. If + * @cpu is running a higher class task, we have no control over @cpu. + * Skip kicking. */ - if (cpu_online(cpu) || cpu == cpu_of(this_rq)) { + if ((cpu_online(cpu) || cpu == cpu_of(this_rq)) && + !sched_class_above(cur_class, &ext_sched_class)) { if (cpumask_test_cpu(cpu, this_scx->cpus_to_preempt)) { - if (rq->curr->sched_class == &ext_sched_class) + if (cur_class == &ext_sched_class) rq->curr->scx.slice = 0; cpumask_clear_cpu(cpu, this_scx->cpus_to_preempt); } -- cgit v1.2.3 From a379fa1e2cae15d7422b4eead83a6366f2f445cb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 22 Oct 2025 10:56:28 -1000 Subject: sched_ext: Fix SCX_KICK_WAIT to work reliably SCX_KICK_WAIT is used to synchronously wait for the target CPU to complete a reschedule and can be used to implement operations like core scheduling. This used to be implemented by scx_next_task_picked() incrementing pnt_seq, which was always called when a CPU picks the next task to run, allowing SCX_KICK_WAIT to reliably wait for the target CPU to enter the scheduler and pick the next task. However, commit b999e365c298 ("sched_ext: Replace scx_next_task_picked() with switch_class()") replaced scx_next_task_picked() with the switch_class() callback, which is only called when switching between sched classes. This broke SCX_KICK_WAIT because pnt_seq would no longer be reliably incremented unless the previous task was SCX and the next task was not. This fix leverages commit 4c95380701f5 ("sched/ext: Fold balance_scx() into pick_task_scx()") which refactored the pick path making put_prev_task_scx() the natural place to track task switches for SCX_KICK_WAIT. The fix moves pnt_seq increment to put_prev_task_scx() and also increments it in pick_task_scx() to handle cases where the same task is re-selected, whether by BPF scheduler decision or slice refill. The semantics: If the current task on the target CPU is SCX, SCX_KICK_WAIT waits until the CPU enters the scheduling path. This provides sufficient guarantee for use cases like core scheduling while keeping the operation self-contained within SCX. v2: - Also increment pnt_seq in pick_task_scx() to handle same-task re-selection (Andrea Righi). - Use smp_cond_load_acquire() for the busy-wait loop for better architecture optimization (Peter Zijlstra). Reported-by: Wen-Fang Liu Link: http://lkml.kernel.org/r/228ebd9e6ed3437996dffe15735a9caa@honor.com Cc: Peter Zijlstra Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 46 +++++++++++++++++++++++++-------------------- kernel/sched/ext_internal.h | 6 ++++-- 2 files changed, 30 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 12e00f77a848..1999f6e67022 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2260,12 +2260,6 @@ static void switch_class(struct rq *rq, struct task_struct *next) struct scx_sched *sch = scx_root; const struct sched_class *next_class = next->sched_class; - /* - * Pairs with the smp_load_acquire() issued by a CPU in - * kick_cpus_irq_workfn() who is waiting for this CPU to perform a - * resched. - */ - smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); if (!(sch->ops.flags & SCX_OPS_HAS_CPU_PREEMPT)) return; @@ -2305,6 +2299,10 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct scx_sched *sch = scx_root; + + /* see kick_cpus_irq_workfn() */ + smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); + update_curr_scx(rq); /* see dequeue_task_scx() on why we skip when !QUEUED */ @@ -2358,6 +2356,9 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) bool keep_prev, kick_idle = false; struct task_struct *p; + /* see kick_cpus_irq_workfn() */ + smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); + rq_modified_clear(rq); rq_unpin_lock(rq, rf); @@ -5144,8 +5145,12 @@ static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) } if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { - pseqs[cpu] = rq->scx.pnt_seq; - should_wait = true; + if (cur_class == &ext_sched_class) { + pseqs[cpu] = rq->scx.pnt_seq; + should_wait = true; + } else { + cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); + } } resched_curr(rq); @@ -5206,18 +5211,19 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) for_each_cpu(cpu, this_scx->cpus_to_wait) { unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; - if (cpu != cpu_of(this_rq)) { - /* - * Pairs with smp_store_release() issued by this CPU in - * switch_class() on the resched path. - * - * We busy-wait here to guarantee that no other task can - * be scheduled on our core before the target CPU has - * entered the resched path. - */ - while (smp_load_acquire(wait_pnt_seq) == pseqs[cpu]) - cpu_relax(); - } + /* + * Busy-wait until the task running at the time of kicking is no + * longer running. This can be used to implement e.g. core + * scheduling. + * + * smp_cond_load_acquire() pairs with store_releases in + * pick_task_scx() and put_prev_task_scx(). The former breaks + * the wait if SCX's scheduling path is entered even if the same + * task is picked subsequently. The latter is necessary to break + * the wait when $cpu is taken by a higher sched class. + */ + if (cpu != cpu_of(this_rq)) + smp_cond_load_acquire(wait_pnt_seq, VAL != pseqs[cpu]); cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); } diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h index 7d00a0a2456e..fb161fc35328 100644 --- a/kernel/sched/ext_internal.h +++ b/kernel/sched/ext_internal.h @@ -997,8 +997,10 @@ enum scx_kick_flags { SCX_KICK_PREEMPT = 1LLU << 1, /* - * Wait for the CPU to be rescheduled. The scx_bpf_kick_cpu() call will - * return after the target CPU finishes picking the next task. + * The scx_bpf_kick_cpu() call will return after the current SCX task of + * the target CPU switches out. This can be used to implement e.g. core + * scheduling. This has no effect if the current task on the target CPU + * is not on SCX. */ SCX_KICK_WAIT = 1LLU << 2, }; -- cgit v1.2.3 From 987e00035c0e68bc0d44dec811cc9c26310b5bff Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 22 Oct 2025 10:56:29 -1000 Subject: sched_ext: Rename pnt_seq to kick_sync The pnt_seq field and related infrastructure were originally named for "pick next task sequence", reflecting their original implementation in scx_next_task_picked(). However, the sequence counter is now incremented in both put_prev_task_scx() and pick_task_scx() and its purpose is to synchronize kick operations via SCX_KICK_WAIT, not specifically to track pick_next_task events. Rename to better reflect the actual semantics: - pnt_seq -> kick_sync - scx_kick_pseqs -> scx_kick_syncs - pseqs variables -> ksyncs - Update comments to refer to "kick_sync sequence" instead of "pick_task sequence" This is a pure renaming with no functional changes. Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 80 ++++++++++++++++++++++++++-------------------------- kernel/sched/sched.h | 2 +- 2 files changed, 41 insertions(+), 41 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 1999f6e67022..04079ae4246e 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -68,18 +68,18 @@ static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; static struct delayed_work scx_watchdog_work; /* - * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of pick_task sequence + * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of kick_sync sequence * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated * lazily when enabling and freed when disabling to avoid waste when sched_ext * isn't active. */ -struct scx_kick_pseqs { +struct scx_kick_syncs { struct rcu_head rcu; - unsigned long seqs[]; + unsigned long syncs[]; }; -static DEFINE_PER_CPU(struct scx_kick_pseqs __rcu *, scx_kick_pseqs); +static DEFINE_PER_CPU(struct scx_kick_syncs __rcu *, scx_kick_syncs); /* * Direct dispatch marker. @@ -2301,7 +2301,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p, struct scx_sched *sch = scx_root; /* see kick_cpus_irq_workfn() */ - smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); + smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); update_curr_scx(rq); @@ -2357,7 +2357,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx) struct task_struct *p; /* see kick_cpus_irq_workfn() */ - smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); + smp_store_release(&rq->scx.kick_sync, rq->scx.kick_sync + 1); rq_modified_clear(rq); @@ -3883,24 +3883,24 @@ static const char *scx_exit_reason(enum scx_exit_kind kind) } } -static void free_kick_pseqs_rcu(struct rcu_head *rcu) +static void free_kick_syncs_rcu(struct rcu_head *rcu) { - struct scx_kick_pseqs *pseqs = container_of(rcu, struct scx_kick_pseqs, rcu); + struct scx_kick_syncs *ksyncs = container_of(rcu, struct scx_kick_syncs, rcu); - kvfree(pseqs); + kvfree(ksyncs); } -static void free_kick_pseqs(void) +static void free_kick_syncs(void) { int cpu; for_each_possible_cpu(cpu) { - struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); - struct scx_kick_pseqs *to_free; + struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); + struct scx_kick_syncs *to_free; - to_free = rcu_replace_pointer(*pseqs, NULL, true); + to_free = rcu_replace_pointer(*ksyncs, NULL, true); if (to_free) - call_rcu(&to_free->rcu, free_kick_pseqs_rcu); + call_rcu(&to_free->rcu, free_kick_syncs_rcu); } } @@ -4038,7 +4038,7 @@ static void scx_disable_workfn(struct kthread_work *work) free_percpu(scx_dsp_ctx); scx_dsp_ctx = NULL; scx_dsp_max_batch = 0; - free_kick_pseqs(); + free_kick_syncs(); mutex_unlock(&scx_enable_mutex); @@ -4287,10 +4287,10 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len) seq_buf_init(&ns, buf, avail); dump_newline(&ns); - dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu pnt_seq=%lu", + dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu", cpu, rq->scx.nr_running, rq->scx.flags, rq->scx.cpu_released, rq->scx.ops_qseq, - rq->scx.pnt_seq); + rq->scx.kick_sync); dump_line(&ns, " curr=%s[%d] class=%ps", rq->curr->comm, rq->curr->pid, rq->curr->sched_class); @@ -4401,7 +4401,7 @@ static void scx_vexit(struct scx_sched *sch, irq_work_queue(&sch->error_irq_work); } -static int alloc_kick_pseqs(void) +static int alloc_kick_syncs(void) { int cpu; @@ -4410,19 +4410,19 @@ static int alloc_kick_pseqs(void) * can exceed percpu allocator limits on large machines. */ for_each_possible_cpu(cpu) { - struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); - struct scx_kick_pseqs *new_pseqs; + struct scx_kick_syncs **ksyncs = per_cpu_ptr(&scx_kick_syncs, cpu); + struct scx_kick_syncs *new_ksyncs; - WARN_ON_ONCE(rcu_access_pointer(*pseqs)); + WARN_ON_ONCE(rcu_access_pointer(*ksyncs)); - new_pseqs = kvzalloc_node(struct_size(new_pseqs, seqs, nr_cpu_ids), - GFP_KERNEL, cpu_to_node(cpu)); - if (!new_pseqs) { - free_kick_pseqs(); + new_ksyncs = kvzalloc_node(struct_size(new_ksyncs, syncs, nr_cpu_ids), + GFP_KERNEL, cpu_to_node(cpu)); + if (!new_ksyncs) { + free_kick_syncs(); return -ENOMEM; } - rcu_assign_pointer(*pseqs, new_pseqs); + rcu_assign_pointer(*ksyncs, new_ksyncs); } return 0; @@ -4578,14 +4578,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) goto err_unlock; } - ret = alloc_kick_pseqs(); + ret = alloc_kick_syncs(); if (ret) goto err_unlock; sch = scx_alloc_and_add_sched(ops); if (IS_ERR(sch)) { ret = PTR_ERR(sch); - goto err_free_pseqs; + goto err_free_ksyncs; } /* @@ -4788,8 +4788,8 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) return 0; -err_free_pseqs: - free_kick_pseqs(); +err_free_ksyncs: + free_kick_syncs(); err_unlock: mutex_unlock(&scx_enable_mutex); return ret; @@ -5119,7 +5119,7 @@ static bool can_skip_idle_kick(struct rq *rq) return !is_idle_task(rq->curr) && !(rq->scx.flags & SCX_RQ_IN_BALANCE); } -static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) +static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *ksyncs) { struct rq *rq = cpu_rq(cpu); struct scx_rq *this_scx = &this_rq->scx; @@ -5146,7 +5146,7 @@ static bool kick_one_cpu(s32 cpu, struct rq *this_rq, unsigned long *pseqs) if (cpumask_test_cpu(cpu, this_scx->cpus_to_wait)) { if (cur_class == &ext_sched_class) { - pseqs[cpu] = rq->scx.pnt_seq; + ksyncs[cpu] = rq->scx.kick_sync; should_wait = true; } else { cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); @@ -5182,20 +5182,20 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) { struct rq *this_rq = this_rq(); struct scx_rq *this_scx = &this_rq->scx; - struct scx_kick_pseqs __rcu *pseqs_pcpu = __this_cpu_read(scx_kick_pseqs); + struct scx_kick_syncs __rcu *ksyncs_pcpu = __this_cpu_read(scx_kick_syncs); bool should_wait = false; - unsigned long *pseqs; + unsigned long *ksyncs; s32 cpu; - if (unlikely(!pseqs_pcpu)) { - pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_pseqs"); + if (unlikely(!ksyncs_pcpu)) { + pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_syncs"); return; } - pseqs = rcu_dereference_bh(pseqs_pcpu)->seqs; + ksyncs = rcu_dereference_bh(ksyncs_pcpu)->syncs; for_each_cpu(cpu, this_scx->cpus_to_kick) { - should_wait |= kick_one_cpu(cpu, this_rq, pseqs); + should_wait |= kick_one_cpu(cpu, this_rq, ksyncs); cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); cpumask_clear_cpu(cpu, this_scx->cpus_to_kick_if_idle); } @@ -5209,7 +5209,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) return; for_each_cpu(cpu, this_scx->cpus_to_wait) { - unsigned long *wait_pnt_seq = &cpu_rq(cpu)->scx.pnt_seq; + unsigned long *wait_kick_sync = &cpu_rq(cpu)->scx.kick_sync; /* * Busy-wait until the task running at the time of kicking is no @@ -5223,7 +5223,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) * the wait when $cpu is taken by a higher sched class. */ if (cpu != cpu_of(this_rq)) - smp_cond_load_acquire(wait_pnt_seq, VAL != pseqs[cpu]); + smp_cond_load_acquire(wait_kick_sync, VAL != ksyncs[cpu]); cpumask_clear_cpu(cpu, this_scx->cpus_to_wait); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 63ffb3eafd05..909e94794f8a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -803,7 +803,7 @@ struct scx_rq { cpumask_var_t cpus_to_kick_if_idle; cpumask_var_t cpus_to_preempt; cpumask_var_t cpus_to_wait; - unsigned long pnt_seq; + unsigned long kick_sync; struct balance_callback deferred_bal_cb; struct irq_work deferred_irq_work; struct irq_work kick_cpus_irq_work; -- cgit v1.2.3 From e67708823d7362cfbb557008563cce7e7c9c5128 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 21 Sep 2025 15:32:40 -1000 Subject: sched_ext: Use rhashtable_lookup() instead of rhashtable_lookup_fast() The find_user_dsq() function is called from contexts that are already under RCU read lock protection. Switch from rhashtable_lookup_fast() to rhashtable_lookup() to avoid redundant RCU locking. Requires: bee8a520eb84 ("rhashtable: Use rcu_dereference_all and rcu_dereference_all_check") Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 04079ae4246e..bde49e47f1e4 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -200,7 +200,7 @@ static struct scx_dispatch_q *find_global_dsq(struct scx_sched *sch, static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) { - return rhashtable_lookup_fast(&sch->dsq_hash, &dsq_id, dsq_hash_params); + return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params); } /* -- cgit v1.2.3 From 71d7847cad4475f1f795c7737e08b604b448ca70 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 25 Oct 2025 00:01:02 +0200 Subject: sched_ext: Fix scx_bpf_dsq_peek() with FIFO DSQs When removing a task from a FIFO DSQ, we must delete it from the list before updating dsq->first_task, otherwise the following lookup will just re-read the same task, leaving first_task pointing to removed entry. This issue only affects DSQs operating in FIFO mode, as priority DSQs correctly update the rbtree before re-evaluating the new first task. Remove the item from the list before refreshing the first task to guarantee the correct behavior in FIFO DSQs. Fixes: 44f5c8ec5b9ad ("sched_ext: Add lockless peek operation for DSQs") Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index bde49e47f1e4..1cfe4b43d31b 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1044,15 +1044,15 @@ static void task_unlink_from_dsq(struct task_struct *p, p->scx.dsq_flags &= ~SCX_TASK_DSQ_ON_PRIQ; } + list_del_init(&p->scx.dsq_list.node); + dsq_mod_nr(dsq, -1); + if (!(dsq->id & SCX_DSQ_FLAG_BUILTIN) && dsq->first_task == p) { struct task_struct *first_task; first_task = nldsq_next_task(dsq, NULL, false); rcu_assign_pointer(dsq->first_task, first_task); } - - list_del_init(&p->scx.dsq_list.node); - dsq_mod_nr(dsq, -1); } static void dispatch_dequeue(struct rq *rq, struct task_struct *p) -- cgit v1.2.3 From b7d4b28db7da89de1d03316c4e67b5a47390fcd3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 28 Oct 2025 11:38:34 -1000 Subject: sched_ext: Use SCX_TASK_READY test instead of tryget_task_struct() during class switch ddf7233fcab6 ("sched/ext: Fix invalid task state transitions on class switch") added tryget_task_struct() test during scx_enable()'s class switching loop. The reason for the addition was to avoid enabling tasks which skipped prep in the previous loop due to being dead. While tryget_task_struct() does work for this purpose as tasks that fail tryget always will fail it, it's a bit roundabout. A more direct way is testing whether the task is in READY state. Switch to testing SCX_TASK_READY directly. Cc: Andrea Righi Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 1cfe4b43d31b..603de983a509 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4753,7 +4753,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) const struct sched_class *new_class = __setscheduler_class(p->policy, p->prio); - if (!tryget_task_struct(p)) + if (scx_get_task_state(p) != SCX_TASK_READY) continue; if (old_class != new_class) @@ -4763,8 +4763,6 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) p->scx.slice = SCX_SLICE_DFL; p->sched_class = new_class; } - - put_task_struct(p); } scx_task_iter_stop(&sti); percpu_up_write(&scx_fork_rwsem); -- cgit v1.2.3 From 180b4ac3420c4b4e03c75b7077beb854af9282da Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 Oct 2025 14:18:47 -1000 Subject: sched_ext: Split schedule_deferred() into locked and unlocked variants schedule_deferred() currently requires the rq lock to be held so that it can use scheduler hooks for efficiency when available. However, there are cases where deferred actions need to be scheduled from contexts that don't hold the rq lock. Split into schedule_deferred() which can be called from any context and just queues irq_work, and schedule_deferred_locked() which requires the rq lock and can optimize by using scheduler hooks when available. Update the existing call site to use the _locked variant. Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 75e9f8352f8c..93ee196841d8 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -775,11 +775,27 @@ static void deferred_irq_workfn(struct irq_work *irq_work) * schedule_deferred - Schedule execution of deferred actions on an rq * @rq: target rq * - * Schedule execution of deferred actions on @rq. Must be called with @rq - * locked. Deferred actions are executed with @rq locked but unpinned, and thus - * can unlock @rq to e.g. migrate tasks to other rqs. + * Schedule execution of deferred actions on @rq. Deferred actions are executed + * with @rq locked but unpinned, and thus can unlock @rq to e.g. migrate tasks + * to other rqs. */ static void schedule_deferred(struct rq *rq) +{ + /* + * Queue an irq work. They are executed on IRQ re-enable which may take + * a bit longer than the scheduler hook in schedule_deferred_locked(). + */ + irq_work_queue(&rq->scx.deferred_irq_work); +} + +/** + * schedule_deferred_locked - Schedule execution of deferred actions on an rq + * @rq: target rq + * + * Schedule execution of deferred actions on @rq. Equivalent to + * schedule_deferred() but requires @rq to be locked and can be more efficient. + */ +static void schedule_deferred_locked(struct rq *rq) { lockdep_assert_rq_held(rq); @@ -812,12 +828,11 @@ static void schedule_deferred(struct rq *rq) } /* - * No scheduler hooks available. Queue an irq work. They are executed on - * IRQ re-enable which may take a bit longer than the scheduler hooks. - * The above WAKEUP and BALANCE paths should cover most of the cases and - * the time to IRQ re-enable shouldn't be long. + * No scheduler hooks available. Use the generic irq_work path. The + * above WAKEUP and BALANCE paths should cover most of the cases and the + * time to IRQ re-enable shouldn't be long. */ - irq_work_queue(&rq->scx.deferred_irq_work); + schedule_deferred(rq); } /** @@ -1211,7 +1226,7 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p, WARN_ON_ONCE(p->scx.dsq || !list_empty(&p->scx.dsq_list.node)); list_add_tail(&p->scx.dsq_list.node, &rq->scx.ddsp_deferred_locals); - schedule_deferred(rq); + schedule_deferred_locked(rq); return; } -- cgit v1.2.3 From 8803e6a7fb687795ab4326f3e96e9f666605d883 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 24 Oct 2025 14:18:48 -1000 Subject: sched_ext: Factor out reenq_local() from scx_bpf_reenqueue_local() Factor out the core re-enqueue logic from scx_bpf_reenqueue_local() into a new reenq_local() helper function. scx_bpf_reenqueue_local() now handles the BPF kfunc checks and calls reenq_local() to perform the actual work. This is a prep patch to allow reenq_local() to be called from other contexts. Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 50 +++++++++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 93ee196841d8..d13ce92c3f01 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5879,32 +5879,12 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; -__bpf_kfunc_start_defs(); - -/** - * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ - * - * Iterate over all of the tasks currently enqueued on the local DSQ of the - * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of - * processed tasks. Can only be called from ops.cpu_release(). - */ -__bpf_kfunc u32 scx_bpf_reenqueue_local(void) +static u32 reenq_local(struct rq *rq) { - struct scx_sched *sch; LIST_HEAD(tasks); u32 nr_enqueued = 0; - struct rq *rq; struct task_struct *p, *n; - guard(rcu)(); - sch = rcu_dereference(scx_root); - if (unlikely(!sch)) - return 0; - - if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) - return 0; - - rq = cpu_rq(smp_processor_id()); lockdep_assert_rq_held(rq); /* @@ -5941,6 +5921,34 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void) return nr_enqueued; } +__bpf_kfunc_start_defs(); + +/** + * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ + * + * Iterate over all of the tasks currently enqueued on the local DSQ of the + * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of + * processed tasks. Can only be called from ops.cpu_release(). + */ +__bpf_kfunc u32 scx_bpf_reenqueue_local(void) +{ + struct scx_sched *sch; + struct rq *rq; + + guard(rcu)(); + sch = rcu_dereference(scx_root); + if (unlikely(!sch)) + return 0; + + if (!scx_kf_allowed(sch, SCX_KF_CPU_RELEASE)) + return 0; + + rq = cpu_rq(smp_processor_id()); + lockdep_assert_rq_held(rq); + + return reenq_local(rq); +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_cpu_release) -- cgit v1.2.3 From a3f5d48222532484c1e85ef27cc6893803e4cd17 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 27 Oct 2025 08:19:40 -1000 Subject: sched_ext: Allow scx_bpf_reenqueue_local() to be called from anywhere The ops.cpu_acquire/release() callbacks miss events under multiple conditions. There are two distinct task dispatch gaps that can cause cpu_released flag desynchronization: 1. balance-to-pick_task gap: This is what was originally reported. balance_scx() can enqueue a task, but during consume_remote_task() when the rq lock is released, a higher priority task can be enqueued and ultimately picked while cpu_released remains false. This gap is closeable via RETRY_TASK handling. 2. ttwu-to-pick_task gap: ttwu() can directly dispatch a task to a CPU's local DSQ. By the time the sched path runs on the target CPU, higher class tasks may already be queued. In such cases, nothing on sched_ext side will be invoked, and the only solution would be a hook invoked regardless of sched class, which isn't desirable. Rather than adding invasive core hooks, BPF schedulers can use generic BPF mechanisms like tracepoints. From SCX scheduler's perspective, this is congruent with other mechanisms it already uses and doesn't add further friction. The main use case for cpu_release() was calling scx_bpf_reenqueue_local() when a CPU gets preempted by a higher priority scheduling class. However, the old scx_bpf_reenqueue_local() could only be called from cpu_release() context. Add a new version of scx_bpf_reenqueue_local() that can be called from any context by deferring the actual re-enqueue operation. This eliminates the need for cpu_acquire/release() ops entirely. Schedulers can now use standard BPF mechanisms like the sched_switch tracepoint to detect and handle CPU preemption. Update scx_qmap to demonstrate the new approach using sched_switch instead of cpu_release, with compat support for older kernels. Mark cpu_acquire/release() as deprecated. The old scx_bpf_reenqueue_local() variant will be removed in v6.23. Reported-by: Wen-Fang Liu Link: https://lore.kernel.org/all/8d64c74118c6440f81bcf5a4ac6b9f00@honor.com/ Cc: Peter Zijlstra Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 31 +++++++++++++++++++++++++++++++ kernel/sched/sched.h | 1 + 2 files changed, 32 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index d13ce92c3f01..d1ef5bda95ae 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -147,6 +147,7 @@ static struct kset *scx_kset; #include static void process_ddsp_deferred_locals(struct rq *rq); +static u32 reenq_local(struct rq *rq); static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, s64 exit_code, const char *fmt, va_list args); @@ -755,6 +756,11 @@ static int ops_sanitize_err(struct scx_sched *sch, const char *ops_name, s32 err static void run_deferred(struct rq *rq) { process_ddsp_deferred_locals(rq); + + if (local_read(&rq->scx.reenq_local_deferred)) { + local_set(&rq->scx.reenq_local_deferred, 0); + reenq_local(rq); + } } static void deferred_bal_cb_workfn(struct rq *rq) @@ -4569,6 +4575,9 @@ static int validate_ops(struct scx_sched *sch, const struct sched_ext_ops *ops) if (ops->flags & SCX_OPS_HAS_CGROUP_WEIGHT) pr_warn("SCX_OPS_HAS_CGROUP_WEIGHT is deprecated and a noop\n"); + if (ops->cpu_acquire || ops->cpu_release) + pr_warn("ops->cpu_acquire/release() are deprecated, use sched_switch TP instead\n"); + return 0; } @@ -5929,6 +5938,9 @@ __bpf_kfunc_start_defs(); * Iterate over all of the tasks currently enqueued on the local DSQ of the * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of * processed tasks. Can only be called from ops.cpu_release(). + * + * COMPAT: Will be removed in v6.23 along with the ___v2 suffix on the void + * returning variant that can be called from anywhere. */ __bpf_kfunc u32 scx_bpf_reenqueue_local(void) { @@ -6487,6 +6499,24 @@ __bpf_kfunc void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, ops_dump_flush(); } +/** + * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ + * + * Iterate over all of the tasks currently enqueued on the local DSQ of the + * caller's CPU, and re-enqueue them in the BPF scheduler. Can be called from + * anywhere. + */ +__bpf_kfunc void scx_bpf_reenqueue_local___v2(void) +{ + struct rq *rq; + + guard(preempt)(); + + rq = this_rq(); + local_set(&rq->scx.reenq_local_deferred, 1); + schedule_deferred(rq); +} + /** * scx_bpf_cpuperf_cap - Query the maximum relative capacity of a CPU * @cpu: CPU of interest @@ -6900,6 +6930,7 @@ BTF_ID_FLAGS(func, bpf_iter_scx_dsq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, scx_bpf_dump_bstr, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, scx_bpf_reenqueue_local___v2) BTF_ID_FLAGS(func, scx_bpf_cpuperf_cap) BTF_ID_FLAGS(func, scx_bpf_cpuperf_cur) BTF_ID_FLAGS(func, scx_bpf_cpuperf_set) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 909e94794f8a..27aae2a298f8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -804,6 +804,7 @@ struct scx_rq { cpumask_var_t cpus_to_preempt; cpumask_var_t cpus_to_wait; unsigned long kick_sync; + local_t reenq_local_deferred; struct balance_callback deferred_bal_cb; struct irq_work deferred_irq_work; struct irq_work kick_cpus_irq_work; -- cgit v1.2.3 From 7900aa699c34401cf5d0c701d9ef72880ddc1a83 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Nov 2025 10:25:13 -1000 Subject: sched_ext: Fix cgroup exit ordering by moving sched_ext_free() to finish_task_switch() sched_ext_free() was called from __put_task_struct() when the last reference to the task is dropped, which could be long after the task has finished running. This causes cgroup-related problems: - ops.init_task() can be called on a cgroup which didn't get ops.cgroup_init()'d during scheduler load, because the cgroup might be destroyed/unlinked while the zombie or dead task is still lingering on the scx_tasks list. - ops.cgroup_exit() could be called before ops.exit_task() is called on all member tasks, leading to incorrect exit ordering. Fix by moving it to finish_task_switch() to be called right after the final context switch away from the dying task, matching when sched_class->task_dead() is called. Rename it to sched_ext_dead() to match the new calling context. By calling sched_ext_dead() before cgroup_task_dead(), we ensure that: - Tasks visible on scx_tasks list have valid cgroups during scheduler load, as cgroup_mutex prevents cgroup destruction while the task is still linked. - All member tasks have ops.exit_task() called and are removed from scx_tasks before the cgroup can be destroyed and trigger ops.cgroup_exit(). This fix is made possible by the cgroup_task_dead() split in the previous patch. This also makes more sense resource-wise as there's no point in keeping scheduler side resources around for dead tasks. Reported-by: Dan Schatzberg Cc: Peter Zijlstra Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/fork.c | 1 - kernel/sched/core.c | 6 ++++++ kernel/sched/ext.c | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 960c39c9c264..5ae37909a813 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -736,7 +736,6 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(tsk == current); unwind_task_free(tsk); - sched_ext_free(tsk); io_uring_free(tsk); cgroup_task_free(tsk); task_numa_free(tsk, true); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0324457622d7..3f4653106216 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5151,6 +5151,12 @@ static struct rq *finish_task_switch(struct task_struct *prev) if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); + /* + * sched_ext_dead() must come before cgroup_task_dead() to + * prevent cgroups from being removed while its member tasks are + * visible to SCX schedulers. + */ + sched_ext_dead(prev); cgroup_task_dead(prev); /* Task is done with its stack. */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index d1ef5bda95ae..2811e4f42a37 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2966,7 +2966,7 @@ void scx_cancel_fork(struct task_struct *p) percpu_up_read(&scx_fork_rwsem); } -void sched_ext_free(struct task_struct *p) +void sched_ext_dead(struct task_struct *p) { unsigned long flags; -- cgit v1.2.3 From 023af03caed8c1c7b863b912b661bb76a8c13c24 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 4 Nov 2025 11:40:22 -1000 Subject: sched_ext: Move __SCX_DSQ_ITER_ALL_FLAGS BUILD_BUG_ON to the right place The BUILD_BUG_ON() which checks that __SCX_DSQ_ITER_ALL_FLAGS doesn't overlap with the private lnode bits was in scx_task_iter_start() which has nothing to do with DSQ iteration. Move it to bpf_iter_scx_dsq_new() where it belongs. No functional changes. Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2811e4f42a37..5af015891b15 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -474,9 +474,6 @@ struct scx_task_iter { */ static void scx_task_iter_start(struct scx_task_iter *iter) { - BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & - ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); - spin_lock_irq(&scx_tasks_lock); iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; @@ -6218,6 +6215,8 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, sizeof(struct bpf_iter_scx_dsq)); BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) != __alignof__(struct bpf_iter_scx_dsq)); + BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS & + ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1)); /* * next() and destroy() will be called regardless of the return value. -- cgit v1.2.3 From d723f36e014d296d668b577c0c69cc37be75e6d2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 4 Nov 2025 11:42:55 -1000 Subject: sched_ext: Minor cleanups to scx_task_iter - Use memset() in scx_task_iter_start() instead of zeroing fields individually. - In scx_task_iter_next(), move __scx_task_iter_maybe_relock() after the batch check which is simpler. - Update comment to reflect that tasks are removed from scx_tasks when dead (commit 7900aa699c34 ("sched_ext: Fix cgroup exit ordering by moving sched_ext_free() to finish_task_switch()")). No functional changes. Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 5af015891b15..652a364e9e4c 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -470,16 +470,16 @@ struct scx_task_iter { * RCU read lock or obtaining a reference count. * * All tasks which existed when the iteration started are guaranteed to be - * visited as long as they still exist. + * visited as long as they are not dead. */ static void scx_task_iter_start(struct scx_task_iter *iter) { + memset(iter, 0, sizeof(*iter)); + spin_lock_irq(&scx_tasks_lock); iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR }; list_add(&iter->cursor.tasks_node, &scx_tasks); - iter->locked_task = NULL; - iter->cnt = 0; iter->list_locked = true; } @@ -545,14 +545,13 @@ static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) struct list_head *cursor = &iter->cursor.tasks_node; struct sched_ext_entity *pos; - __scx_task_iter_maybe_relock(iter); - if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) { scx_task_iter_unlock(iter); cond_resched(); - __scx_task_iter_maybe_relock(iter); } + __scx_task_iter_maybe_relock(iter); + list_for_each_entry(pos, cursor, tasks_node) { if (&pos->tasks_node == &scx_tasks) return NULL; -- cgit v1.2.3 From 5a629ecbcdffacfb04fc4eb0098c0688f38c8b9d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 5 Nov 2025 12:03:08 -1000 Subject: sched_ext: Mark racy bitfields to prevent adding fields that can't tolerate races The warned bitfields in struct scx_sched are updated racily from concurrent CPUs causing RMW races, which is fine for these boolean warning flags. Add a comment marking this area to prevent future fields that can't tolerate racy updates from being added here. Signed-off-by: Tejun Heo --- kernel/sched/ext_internal.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h index fb161fc35328..dd6f25fb6159 100644 --- a/kernel/sched/ext_internal.h +++ b/kernel/sched/ext_internal.h @@ -895,6 +895,10 @@ struct scx_sched { struct scx_dispatch_q **global_dsqs; struct scx_sched_pcpu __percpu *pcpu; + /* + * Updates to the following warned bitfields can race causing RMW issues + * but it doesn't really matter. + */ bool warned_zero_slice:1; bool warned_deprecated_rq:1; -- cgit v1.2.3 From bfd3749d489ec0df27ed94ee3dfd9475fea27bf9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:04 -1000 Subject: sched_ext: Use shorter slice in bypass mode There have been reported cases of bypass mode not making forward progress fast enough. The 20ms default slice is unnecessarily long for bypass mode where the primary goal is ensuring all tasks can make forward progress. Introduce SCX_SLICE_BYPASS set to 5ms and make the scheduler automatically switch to it when entering bypass mode. Also make the bypass slice value tunable through the slice_bypass_us module parameter (adjustable between 100us and 100ms) to make it easier to test whether slice durations are a factor in problem cases. v3: Use READ_ONCE/WRITE_ONCE for scx_slice_dfl access (Dan). v2: Removed slice_dfl_us module parameter. Fixed typos (Andrea). Reviewed-by: Emil Tsalapatis Reviewed-by: Andrea Righi Cc: Dan Schatzberg Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 652a364e9e4c..1a9b28dd0961 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -143,6 +143,32 @@ static struct scx_dump_data scx_dump_data = { /* /sys/kernel/sched_ext interface */ static struct kset *scx_kset; +/* + * Parameters that can be adjusted through /sys/module/sched_ext/parameters. + * There usually is no reason to modify these as normal scheduler operation + * shouldn't be affected by them. The knobs are primarily for debugging. + */ +static u64 scx_slice_dfl = SCX_SLICE_DFL; +static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC; + +static int set_slice_us(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, 100, 100 * USEC_PER_MSEC); +} + +static const struct kernel_param_ops slice_us_param_ops = { + .set = set_slice_us, + .get = param_get_uint, +}; + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "sched_ext." + +module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600); +MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)"); + +#undef MODULE_PARAM_PREFIX + #define CREATE_TRACE_POINTS #include @@ -919,7 +945,7 @@ static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta) static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p) { - p->scx.slice = SCX_SLICE_DFL; + p->scx.slice = READ_ONCE(scx_slice_dfl); __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1); } @@ -2896,7 +2922,7 @@ void init_scx_entity(struct sched_ext_entity *scx) INIT_LIST_HEAD(&scx->runnable_node); scx->runnable_at = jiffies; scx->ddsp_dsq_id = SCX_DSQ_INVALID; - scx->slice = SCX_SLICE_DFL; + scx->slice = READ_ONCE(scx_slice_dfl); } void scx_pre_fork(struct task_struct *p) @@ -3774,6 +3800,7 @@ static void scx_bypass(bool bypass) WARN_ON_ONCE(scx_bypass_depth <= 0); if (scx_bypass_depth != 1) goto unlock; + WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC); bypass_timestamp = ktime_get_ns(); if (sch) scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); @@ -3782,6 +3809,7 @@ static void scx_bypass(bool bypass) WARN_ON_ONCE(scx_bypass_depth < 0); if (scx_bypass_depth != 0) goto unlock; + WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL); if (sch) scx_add_event(sch, SCX_EV_BYPASS_DURATION, ktime_get_ns() - bypass_timestamp); @@ -4780,7 +4808,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) queue_flags |= DEQUEUE_CLASS; scoped_guard (sched_change, p, queue_flags) { - p->scx.slice = SCX_SLICE_DFL; + p->scx.slice = READ_ONCE(scx_slice_dfl); p->sched_class = new_class; } } -- cgit v1.2.3 From 3546119f18647d7ddbba579737d8a222b430cb1c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:05 -1000 Subject: sched_ext: Refactor do_enqueue_task() local and global DSQ paths The local and global DSQ enqueue paths in do_enqueue_task() share the same slice refill logic. Factor out the common code into a shared enqueue label. This makes adding new enqueue cases easier. No functional changes. Reviewed-by: Andrea Righi Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 1a9b28dd0961..43083602c15e 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1279,6 +1279,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, { struct scx_sched *sch = scx_root; struct task_struct **ddsp_taskp; + struct scx_dispatch_q *dsq; unsigned long qseq; WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); @@ -1346,8 +1347,17 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, direct: direct_dispatch(sch, p, enq_flags); return; - +local_norefill: + dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); + return; local: + dsq = &rq->scx.local_dsq; + goto enqueue; +global: + dsq = find_global_dsq(sch, p); + goto enqueue; + +enqueue: /* * For task-ordering, slice refill must be treated as implying the end * of the current slice. Otherwise, the longer @p stays on the CPU, the @@ -1355,14 +1365,7 @@ local: */ touch_core_sched(rq, p); refill_task_slice_dfl(sch, p); -local_norefill: - dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags); - return; - -global: - touch_core_sched(rq, p); /* see the comment in local: */ - refill_task_slice_dfl(sch, p); - dispatch_enqueue(sch, find_global_dsq(sch, p), p, enq_flags); + dispatch_enqueue(sch, dsq, p, enq_flags); } static bool task_runnable(const struct task_struct *p) -- cgit v1.2.3 From 61debc251c1c9150c7bdfd5c028bc2d078e17d22 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:06 -1000 Subject: sched_ext: Use per-CPU DSQs instead of per-node global DSQs in bypass mode Bypass mode routes tasks through fallback dispatch queues. Originally a single global DSQ, b7b3b2dbae73 ("sched_ext: Split the global DSQ per NUMA node") changed this to per-node DSQs to resolve NUMA-related livelocks. Dan Schatzberg found per-node DSQs can still livelock when many threads are pinned to different small CPU subsets: each CPU must scan many incompatible tasks to find runnable ones, causing severe contention with high CPU counts. Switch to per-CPU bypass DSQs. Each task queues on its current CPU. Default idle CPU selection and direct dispatch handle most cases well. This introduces a failure mode when tasks concentrate on one CPU in over-saturated systems. If the BPF scheduler severely skews placement before triggering bypass, that CPU's queue may be too long to drain, causing RCU stalls. A load balancer in a future patch will address this. The bypass DSQ is separate from local DSQ to enable load balancing: local DSQs use rq locks, preventing efficient scanning and transfer across CPUs, especially problematic when systems are already contended. v2: Clarified why bypass DSQ is separate from local DSQ (Andrea Righi). Reported-by: Dan Schatzberg Reviewed-by: Dan Schatzberg Reviewed-by: Andrea Righi Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 16 +++++++++++++--- kernel/sched/sched.h | 1 + 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 43083602c15e..747391a3f6e3 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1298,7 +1298,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, if (scx_rq_bypassing(rq)) { __scx_add_event(sch, SCX_EV_BYPASS_DISPATCH, 1); - goto global; + goto bypass; } if (p->scx.ddsp_dsq_id != SCX_DSQ_INVALID) @@ -1356,6 +1356,9 @@ local: global: dsq = find_global_dsq(sch, p); goto enqueue; +bypass: + dsq = &task_rq(p)->scx.bypass_dsq; + goto enqueue; enqueue: /* @@ -2154,8 +2157,14 @@ static int balance_one(struct rq *rq, struct task_struct *prev) if (consume_global_dsq(sch, rq)) goto has_tasks; - if (unlikely(!SCX_HAS_OP(sch, dispatch)) || - scx_rq_bypassing(rq) || !scx_rq_online(rq)) + if (scx_rq_bypassing(rq)) { + if (consume_dispatch_q(sch, rq, &rq->scx.bypass_dsq)) + goto has_tasks; + else + goto no_tasks; + } + + if (unlikely(!SCX_HAS_OP(sch, dispatch)) || !scx_rq_online(rq)) goto no_tasks; dspc->rq = rq; @@ -5371,6 +5380,7 @@ void __init init_sched_ext_class(void) int n = cpu_to_node(cpu); init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); + init_dsq(&rq->scx.bypass_dsq, SCX_DSQ_BYPASS); INIT_LIST_HEAD(&rq->scx.runnable_list); INIT_LIST_HEAD(&rq->scx.ddsp_deferred_locals); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 27aae2a298f8..5991133a4849 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -808,6 +808,7 @@ struct scx_rq { struct balance_callback deferred_bal_cb; struct irq_work deferred_irq_work; struct irq_work kick_cpus_irq_work; + struct scx_dispatch_q bypass_dsq; }; #endif /* CONFIG_SCHED_CLASS_EXT */ -- cgit v1.2.3 From a69040ed57f50156e5452474d25c79b9e62075d0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:07 -1000 Subject: sched_ext: Simplify breather mechanism with scx_aborting flag The breather mechanism was introduced in 62dcbab8b0ef ("sched_ext: Avoid live-locking bypass mode switching") and e32c260195e6 ("sched_ext: Enable the ops breather and eject BPF scheduler on softlockup") to prevent live-locks by injecting delays when CPUs are trapped in dispatch paths. Currently, it uses scx_breather_depth (atomic_t) and scx_in_softlockup (unsigned long) with separate increment/decrement and cleanup operations. The breather is only activated when aborting, so tie it directly to the exit mechanism. Replace both variables with scx_aborting flag set when exit is claimed and cleared after bypass is enabled. Introduce scx_claim_exit() to consolidate exit_kind claiming and breather enablement. This eliminates scx_clear_softlockup() and simplifies scx_softlockup() and scx_bypass(). The breather mechanism will be replaced by a different abort mechanism in a future patch. This simplification prepares for that change. Reviewed-by: Dan Schatzberg Reviewed-by: Emil Tsalapatis Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 54 +++++++++++++++++++++++++----------------------------- 1 file changed, 25 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 747391a3f6e3..5da699cacde1 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -33,9 +33,8 @@ static DEFINE_MUTEX(scx_enable_mutex); DEFINE_STATIC_KEY_FALSE(__scx_enabled); DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); -static unsigned long scx_in_softlockup; -static atomic_t scx_breather_depth = ATOMIC_INIT(0); static int scx_bypass_depth; +static bool scx_aborting; static bool scx_init_task_enabled; static bool scx_switching_all; DEFINE_STATIC_KEY_FALSE(__scx_switched_all); @@ -1831,7 +1830,7 @@ static void scx_breather(struct rq *rq) lockdep_assert_rq_held(rq); - if (likely(!atomic_read(&scx_breather_depth))) + if (likely(!READ_ONCE(scx_aborting))) return; raw_spin_rq_unlock(rq); @@ -1840,9 +1839,9 @@ static void scx_breather(struct rq *rq) do { int cnt = 1024; - while (atomic_read(&scx_breather_depth) && --cnt) + while (READ_ONCE(scx_aborting) && --cnt) cpu_relax(); - } while (atomic_read(&scx_breather_depth) && + } while (READ_ONCE(scx_aborting) && time_before64(ktime_get_ns(), until)); raw_spin_rq_lock(rq); @@ -3741,30 +3740,14 @@ void scx_softlockup(u32 dur_s) goto out_unlock; } - /* allow only one instance, cleared at the end of scx_bypass() */ - if (test_and_set_bit(0, &scx_in_softlockup)) - goto out_unlock; - printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n", smp_processor_id(), dur_s, scx_root->ops.name); - /* - * Some CPUs may be trapped in the dispatch paths. Enable breather - * immediately; otherwise, we might even be able to get to scx_bypass(). - */ - atomic_inc(&scx_breather_depth); - scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s); out_unlock: rcu_read_unlock(); } -static void scx_clear_softlockup(void) -{ - if (test_and_clear_bit(0, &scx_in_softlockup)) - atomic_dec(&scx_breather_depth); -} - /** * scx_bypass - [Un]bypass scx_ops and guarantee forward progress * @bypass: true for bypass, false for unbypass @@ -3827,8 +3810,6 @@ static void scx_bypass(bool bypass) ktime_get_ns() - bypass_timestamp); } - atomic_inc(&scx_breather_depth); - /* * No task property is changing. We just need to make sure all currently * queued tasks are re-queued according to the new scx_rq_bypassing() @@ -3884,10 +3865,8 @@ static void scx_bypass(bool bypass) raw_spin_rq_unlock(rq); } - atomic_dec(&scx_breather_depth); unlock: raw_spin_unlock_irqrestore(&bypass_lock, flags); - scx_clear_softlockup(); } static void free_exit_info(struct scx_exit_info *ei) @@ -3982,6 +3961,7 @@ static void scx_disable_workfn(struct kthread_work *work) /* guarantee forward progress by bypassing scx_ops */ scx_bypass(true); + WRITE_ONCE(scx_aborting, false); switch (scx_set_enable_state(SCX_DISABLING)) { case SCX_DISABLING: @@ -4104,9 +4084,24 @@ done: scx_bypass(false); } -static void scx_disable(enum scx_exit_kind kind) +static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind) { int none = SCX_EXIT_NONE; + + if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) + return false; + + /* + * Some CPUs may be trapped in the dispatch paths. Enable breather + * immediately; otherwise, we might not even be able to get to + * scx_bypass(). + */ + WRITE_ONCE(scx_aborting, true); + return true; +} + +static void scx_disable(enum scx_exit_kind kind) +{ struct scx_sched *sch; if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE)) @@ -4115,7 +4110,7 @@ static void scx_disable(enum scx_exit_kind kind) rcu_read_lock(); sch = rcu_dereference(scx_root); if (sch) { - atomic_try_cmpxchg(&sch->exit_kind, &none, kind); + scx_claim_exit(sch, kind); kthread_queue_work(sch->helper, &sch->disable_work); } rcu_read_unlock(); @@ -4436,9 +4431,8 @@ static void scx_vexit(struct scx_sched *sch, const char *fmt, va_list args) { struct scx_exit_info *ei = sch->exit_info; - int none = SCX_EXIT_NONE; - if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind)) + if (!scx_claim_exit(sch, kind)) return; ei->exit_code = exit_code; @@ -4654,6 +4648,8 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) */ WARN_ON_ONCE(scx_set_enable_state(SCX_ENABLING) != SCX_DISABLED); WARN_ON_ONCE(scx_root); + if (WARN_ON_ONCE(READ_ONCE(scx_aborting))) + WRITE_ONCE(scx_aborting, false); atomic_long_set(&scx_nr_rejected, 0); -- cgit v1.2.3 From 5ebec443fb96ac305baaae51efe998194f3c87c4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:08 -1000 Subject: sched_ext: Exit dispatch and move operations immediately when aborting 62dcbab8b0ef ("sched_ext: Avoid live-locking bypass mode switching") introduced the breather mechanism to inject delays during bypass mode switching. It maintains operation semantics unchanged while reducing lock contention to avoid live-locks on large NUMA systems. However, the breather only activates when exiting the scheduler, so there's no need to maintain operation semantics. Simplify by exiting dispatch and move operations immediately when scx_aborting is set. In consume_dispatch_q(), break out of the task iteration loop. In scx_dsq_move(), return early before acquiring locks. This also fixes cases the breather mechanism cannot handle. When a large system has many runnable threads affinitized to different CPU subsets and the BPF scheduler places them all into a single DSQ, many CPUs can scan the DSQ concurrently for tasks they can run. This can cause DSQ and RQ locks to be held for extended periods, leading to various failure modes. The breather cannot solve this because once in the consume loop, there's no exit. The new mechanism fixes this by exiting the loop immediately. The bypass DSQ is exempted to ensure the bypass mechanism itself can make progress. v2: Use READ_ONCE() when reading scx_aborting (Andrea Righi). Reported-by: Dan Schatzberg Reviewed-by: Dan Schatzberg Cc: Andrea Righi Cc: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 62 ++++++++++++++++-------------------------------------- 1 file changed, 18 insertions(+), 44 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 5da699cacde1..56a6d453543a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1818,48 +1818,11 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch, return dst_rq; } -/* - * A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly - * banging on the same DSQ on a large NUMA system to the point where switching - * to the bypass mode can take a long time. Inject artificial delays while the - * bypass mode is switching to guarantee timely completion. - */ -static void scx_breather(struct rq *rq) -{ - u64 until; - - lockdep_assert_rq_held(rq); - - if (likely(!READ_ONCE(scx_aborting))) - return; - - raw_spin_rq_unlock(rq); - - until = ktime_get_ns() + NSEC_PER_MSEC; - - do { - int cnt = 1024; - while (READ_ONCE(scx_aborting) && --cnt) - cpu_relax(); - } while (READ_ONCE(scx_aborting) && - time_before64(ktime_get_ns(), until)); - - raw_spin_rq_lock(rq); -} - static bool consume_dispatch_q(struct scx_sched *sch, struct rq *rq, struct scx_dispatch_q *dsq) { struct task_struct *p; retry: - /* - * This retry loop can repeatedly race against scx_bypass() dequeueing - * tasks from @dsq trying to put the system into the bypass mode. On - * some multi-socket machines (e.g. 2x Intel 8480c), this can live-lock - * the machine into soft lockups. Give a breather. - */ - scx_breather(rq); - /* * The caller can't expect to successfully consume a task if the task's * addition to @dsq isn't guaranteed to be visible somehow. Test @@ -1873,6 +1836,17 @@ retry: nldsq_for_each_task(p, dsq) { struct rq *task_rq = task_rq(p); + /* + * This loop can lead to multiple lockup scenarios, e.g. the BPF + * scheduler can put an enormous number of affinitized tasks into + * a contended DSQ, or the outer retry loop can repeatedly race + * against scx_bypass() dequeueing tasks from @dsq trying to put + * the system into the bypass mode. This can easily live-lock the + * machine. If aborting, exit from all non-bypass DSQs. + */ + if (unlikely(READ_ONCE(scx_aborting)) && dsq->id != SCX_DSQ_BYPASS) + break; + if (rq == task_rq) { task_unlink_from_dsq(p, dsq); move_local_task_to_local_dsq(p, 0, dsq, rq); @@ -5636,6 +5610,13 @@ static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, !scx_kf_allowed(sch, SCX_KF_DISPATCH)) return false; + /* + * If the BPF scheduler keeps calling this function repeatedly, it can + * cause similar live-lock conditions as consume_dispatch_q(). + */ + if (unlikely(READ_ONCE(scx_aborting))) + return false; + /* * Can be called from either ops.dispatch() locking this_rq() or any * context where no rq lock is held. If latter, lock @p's task_rq which @@ -5656,13 +5637,6 @@ static bool scx_dsq_move(struct bpf_iter_scx_dsq_kern *kit, raw_spin_rq_lock(src_rq); } - /* - * If the BPF scheduler keeps calling this function repeatedly, it can - * cause similar live-lock conditions as consume_dispatch_q(). Insert a - * breather if necessary. - */ - scx_breather(src_rq); - locked_rq = src_rq; raw_spin_lock(&src_dsq->lock); -- cgit v1.2.3 From f2fe382e1f9e568e6702f4ee416de587e9d6b671 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:09 -1000 Subject: sched_ext: Make scx_exit() and scx_vexit() return bool Make scx_exit() and scx_vexit() return bool indicating whether the calling thread successfully claimed the exit. This will be used by the abort mechanism added in a later patch. Reviewed-by: Dan Schatzberg Reviewed-by: Andrea Righi Cc: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 56a6d453543a..e6e69a7fe105 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -174,18 +174,21 @@ MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un] static void process_ddsp_deferred_locals(struct rq *rq); static u32 reenq_local(struct rq *rq); static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); -static void scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, +static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, s64 exit_code, const char *fmt, va_list args); -static __printf(4, 5) void scx_exit(struct scx_sched *sch, +static __printf(4, 5) bool scx_exit(struct scx_sched *sch, enum scx_exit_kind kind, s64 exit_code, const char *fmt, ...) { va_list args; + bool ret; va_start(args, fmt); - scx_vexit(sch, kind, exit_code, fmt, args); + ret = scx_vexit(sch, kind, exit_code, fmt, args); va_end(args); + + return ret; } #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) @@ -4400,14 +4403,14 @@ static void scx_error_irq_workfn(struct irq_work *irq_work) kthread_queue_work(sch->helper, &sch->disable_work); } -static void scx_vexit(struct scx_sched *sch, +static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, s64 exit_code, const char *fmt, va_list args) { struct scx_exit_info *ei = sch->exit_info; if (!scx_claim_exit(sch, kind)) - return; + return false; ei->exit_code = exit_code; #ifdef CONFIG_STACKTRACE @@ -4424,6 +4427,7 @@ static void scx_vexit(struct scx_sched *sch, ei->reason = scx_exit_reason(ei->kind); irq_work_queue(&sch->error_irq_work); + return true; } static int alloc_kick_syncs(void) -- cgit v1.2.3 From 4ba54a6cbd4ddde805b769b4be04623a2ce1668c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:10 -1000 Subject: sched_ext: Refactor lockup handlers into handle_lockup() scx_rcu_cpu_stall() and scx_softlockup() share the same pattern: check if the scheduler is enabled under RCU read lock and trigger an error if so. Extract the common pattern into handle_lockup() helper. Add scx_verror() macro and use guard(rcu)(). This simplifies both handlers, reduces code duplication, and prepares for hardlockup handling. Reviewed-by: Dan Schatzberg Reviewed-by: Andrea Righi Cc: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 65 +++++++++++++++++++++--------------------------------- 1 file changed, 25 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index e6e69a7fe105..ad5d3902e99d 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -192,6 +192,7 @@ static __printf(4, 5) bool scx_exit(struct scx_sched *sch, } #define scx_error(sch, fmt, args...) scx_exit((sch), SCX_EXIT_ERROR, 0, fmt, ##args) +#define scx_verror(sch, fmt, args) scx_vexit((sch), SCX_EXIT_ERROR, 0, fmt, args) #define SCX_HAS_OP(sch, op) test_bit(SCX_OP_IDX(op), (sch)->has_op) @@ -3654,39 +3655,40 @@ bool scx_allow_ttwu_queue(const struct task_struct *p) return false; } -/** - * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler - * - * While there are various reasons why RCU CPU stalls can occur on a system - * that may not be caused by the current BPF scheduler, try kicking out the - * current scheduler in an attempt to recover the system to a good state before - * issuing panics. - */ -bool scx_rcu_cpu_stall(void) +static __printf(1, 2) bool handle_lockup(const char *fmt, ...) { struct scx_sched *sch; + va_list args; - rcu_read_lock(); + guard(rcu)(); sch = rcu_dereference(scx_root); - if (unlikely(!sch)) { - rcu_read_unlock(); + if (unlikely(!sch)) return false; - } switch (scx_enable_state()) { case SCX_ENABLING: case SCX_ENABLED: - break; + va_start(args, fmt); + scx_verror(sch, fmt, args); + va_end(args); + return true; default: - rcu_read_unlock(); return false; } +} - scx_error(sch, "RCU CPU stall detected!"); - rcu_read_unlock(); - - return true; +/** + * scx_rcu_cpu_stall - sched_ext RCU CPU stall handler + * + * While there are various reasons why RCU CPU stalls can occur on a system + * that may not be caused by the current BPF scheduler, try kicking out the + * current scheduler in an attempt to recover the system to a good state before + * issuing panics. + */ +bool scx_rcu_cpu_stall(void) +{ + return handle_lockup("RCU CPU stall detected!"); } /** @@ -3701,28 +3703,11 @@ bool scx_rcu_cpu_stall(void) */ void scx_softlockup(u32 dur_s) { - struct scx_sched *sch; - - rcu_read_lock(); - - sch = rcu_dereference(scx_root); - if (unlikely(!sch)) - goto out_unlock; - - switch (scx_enable_state()) { - case SCX_ENABLING: - case SCX_ENABLED: - break; - default: - goto out_unlock; - } - - printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU%d stuck for %us, disabling \"%s\"\n", - smp_processor_id(), dur_s, scx_root->ops.name); + if (!handle_lockup("soft lockup - CPU %d stuck for %us", smp_processor_id(), dur_s)) + return; - scx_error(sch, "soft lockup - CPU#%d stuck for %us", smp_processor_id(), dur_s); -out_unlock: - rcu_read_unlock(); + printk_deferred(KERN_ERR "sched_ext: Soft lockup - CPU %d stuck for %us, disabling BPF scheduler\n", + smp_processor_id(), dur_s); } /** -- cgit v1.2.3 From 7ed8df0d15022fcc092e7c7f0bd82359476cff3c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:11 -1000 Subject: sched_ext: Make handle_lockup() propagate scx_verror() result handle_lockup() currently calls scx_verror() but ignores its return value, always returning true when the scheduler is enabled. Make it capture and return the result from scx_verror(). This prepares for hardlockup handling. Cc: Dan Schatzberg Cc: Emil Tsalapatis Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ad5d3902e99d..85bb052459ec 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3659,6 +3659,7 @@ static __printf(1, 2) bool handle_lockup(const char *fmt, ...) { struct scx_sched *sch; va_list args; + bool ret; guard(rcu)(); @@ -3670,9 +3671,9 @@ static __printf(1, 2) bool handle_lockup(const char *fmt, ...) case SCX_ENABLING: case SCX_ENABLED: va_start(args, fmt); - scx_verror(sch, fmt, args); + ret = scx_verror(sch, fmt, args); va_end(args); - return true; + return ret; default: return false; } -- cgit v1.2.3 From 582f700e1bdc5978f41e3d8d65d3e16e34e9be8a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:12 -1000 Subject: sched_ext: Hook up hardlockup detector A poorly behaving BPF scheduler can trigger hard lockup. For example, on a large system with many tasks pinned to different subsets of CPUs, if the BPF scheduler puts all tasks in a single DSQ and lets all CPUs at it, the DSQ lock can be contended to the point where hardlockup triggers. Unfortunately, hardlockup can be the first signal out of such situations, thus requiring hardlockup handling. Hook scx_hardlockup() into the hardlockup detector to try kicking out the current scheduler in an attempt to recover the system to a good state. The handling strategy can delay watchdog taking its own action by one polling period; however, given that the only remediation for hardlockup is crash, this is likely an acceptable trade-off. v2: Add missing dummy scx_hardlockup() definition for !CONFIG_SCHED_CLASS_EXT (kernel test bot). Reported-by: Dan Schatzberg Cc: Emil Tsalapatis Cc: Douglas Anderson Cc: Andrew Morton Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 18 ++++++++++++++++++ kernel/watchdog.c | 9 +++++++++ 2 files changed, 27 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 85bb052459ec..b5c87a03f112 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3711,6 +3711,24 @@ void scx_softlockup(u32 dur_s) smp_processor_id(), dur_s); } +/** + * scx_hardlockup - sched_ext hardlockup handler + * + * A poorly behaving BPF scheduler can trigger hard lockup by e.g. putting + * numerous affinitized tasks in a single queue and directing all CPUs at it. + * Try kicking out the current scheduler in an attempt to recover the system to + * a good state before taking more drastic actions. + */ +bool scx_hardlockup(void) +{ + if (!handle_lockup("hard lockup - CPU %d", smp_processor_id())) + return false; + + printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", + smp_processor_id()); + return true; +} + /** * scx_bypass - [Un]bypass scx_ops and guarantee forward progress * @bypass: true for bypass, false for unbypass diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5b62d1002783..8dfac4a8f587 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -196,6 +196,15 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) #ifdef CONFIG_SYSFS ++hardlockup_count; #endif + /* + * A poorly behaving BPF scheduler can trigger hard lockup by + * e.g. putting numerous affinitized tasks in a single queue and + * directing all CPUs at it. The following call can return true + * only once when sched_ext is enabled and will immediately + * abort the BPF scheduler and print out a warning message. + */ + if (scx_hardlockup()) + return; /* Only print hardlockups once. */ if (per_cpu(watchdog_hardlockup_warned, cpu)) -- cgit v1.2.3 From d2974cc79f7139cc851b84ad4f77805e93c40fe1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:14 -1000 Subject: sched_ext: Factor out scx_dsq_list_node cursor initialization into INIT_DSQ_LIST_CURSOR Factor out scx_dsq_list_node cursor initialization into INIT_DSQ_LIST_CURSOR macro in preparation for additional users. Reviewed-by: Emil Tsalapatis Cc: Dan Schatzberg Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b5c87a03f112..56946aceeb28 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6253,9 +6253,8 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, if (!kit->dsq) return -ENOENT; - INIT_LIST_HEAD(&kit->cursor.node); - kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags; - kit->cursor.priv = READ_ONCE(kit->dsq->seq); + kit->cursor = INIT_DSQ_LIST_CURSOR(kit->cursor, flags, + READ_ONCE(kit->dsq->seq)); return 0; } -- cgit v1.2.3 From d18b96ce12becf3f3aa3556ba722c2de61aca94e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:15 -1000 Subject: sched_ext: Factor out abbreviated dispatch dequeue into dispatch_dequeue_locked() move_task_between_dsqs() contains open-coded abbreviated dequeue logic when moving tasks between non-local DSQs. Factor this out into dispatch_dequeue_locked() which can be used when both the task's rq and dsq locks are already held. Add lockdep assertions to both dispatch_dequeue() and the new helper to verify locking requirements. This prepares for the load balancer which will need the same abbreviated dequeue pattern. Cc: Andrea Righi Cc: Dan Schatzberg Cc: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 56946aceeb28..10d8532f8d9b 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1106,6 +1106,8 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p) struct scx_dispatch_q *dsq = p->scx.dsq; bool is_local = dsq == &rq->scx.local_dsq; + lockdep_assert_rq_held(rq); + if (!dsq) { /* * If !dsq && on-list, @p is on @rq's ddsp_deferred_locals. @@ -1152,6 +1154,20 @@ static void dispatch_dequeue(struct rq *rq, struct task_struct *p) raw_spin_unlock(&dsq->lock); } +/* + * Abbreviated version of dispatch_dequeue() that can be used when both @p's rq + * and dsq are locked. + */ +static void dispatch_dequeue_locked(struct task_struct *p, + struct scx_dispatch_q *dsq) +{ + lockdep_assert_rq_held(task_rq(p)); + lockdep_assert_held(&dsq->lock); + + task_unlink_from_dsq(p, dsq); + p->scx.dsq = NULL; +} + static struct scx_dispatch_q *find_dsq_for_dispatch(struct scx_sched *sch, struct rq *rq, u64 dsq_id, struct task_struct *p) @@ -1812,8 +1828,7 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch, * @p is going from a non-local DSQ to a non-local DSQ. As * $src_dsq is already locked, do an abbreviated dequeue. */ - task_unlink_from_dsq(p, src_dsq); - p->scx.dsq = NULL; + dispatch_dequeue_locked(p, src_dsq); raw_spin_unlock(&src_dsq->lock); dispatch_enqueue(sch, dst_dsq, p, enq_flags); -- cgit v1.2.3 From 95d1df610cdc7497510cc710435a5c8c4e3db606 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Nov 2025 09:18:16 -1000 Subject: sched_ext: Implement load balancer for bypass mode In bypass mode, tasks are queued on per-CPU bypass DSQs. While this works well in most cases, there is a failure mode where a BPF scheduler can skew task placement severely before triggering bypass in highly over-saturated systems. If most tasks end up concentrated on a few CPUs, those CPUs can accumulate queues that are too long to drain in a reasonable time, leading to RCU stalls and hung tasks. Implement a simple timer-based load balancer that redistributes tasks across CPUs within each NUMA node. The balancer runs periodically (default 500ms, tunable via bypass_lb_intv_us module parameter) and moves tasks from overloaded CPUs to underloaded ones. When moving tasks between bypass DSQs, the load balancer holds nested DSQ locks to avoid dropping and reacquiring the donor DSQ lock on each iteration, as donor DSQs can be very long and highly contended. Add the SCX_ENQ_NESTED flag and use raw_spin_lock_nested() in dispatch_enqueue() to support this. The load balancer timer function reads scx_bypass_depth locklessly to check whether bypass mode is active. Use WRITE_ONCE() when updating scx_bypass_depth to pair with the READ_ONCE() in the timer function. This has been tested on a 192 CPU dual socket AMD EPYC machine with ~20k runnable tasks running scx_cpu0. As scx_cpu0 queues all tasks to CPU0, almost all tasks end up on CPU0 creating severe imbalance. Without the load balancer, disabling the scheduler can lead to RCU stalls and hung tasks, taking a very long time to complete. With the load balancer, disable completes in about a second. The load balancing operation can be monitored using the sched_ext_bypass_lb tracepoint and disabled by setting bypass_lb_intv_us to 0. v2: Lock both rq and DSQ in bypass_lb_cpu() and use dispatch_dequeue_locked() to prevent races with dispatch_dequeue() (Andrea Righi). Cc: Andrea Righi Cc: Dan Schatzberg Cc: Emil Tsalapatis Reviewed_by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 239 +++++++++++++++++++++++++++++++++++++++++++- kernel/sched/ext_internal.h | 6 ++ 2 files changed, 242 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 10d8532f8d9b..c900667b25b8 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -34,6 +34,8 @@ DEFINE_STATIC_KEY_FALSE(__scx_enabled); DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); static atomic_t scx_enable_state_var = ATOMIC_INIT(SCX_DISABLED); static int scx_bypass_depth; +static cpumask_var_t scx_bypass_lb_donee_cpumask; +static cpumask_var_t scx_bypass_lb_resched_cpumask; static bool scx_aborting; static bool scx_init_task_enabled; static bool scx_switching_all; @@ -149,6 +151,7 @@ static struct kset *scx_kset; */ static u64 scx_slice_dfl = SCX_SLICE_DFL; static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC; +static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US; static int set_slice_us(const char *val, const struct kernel_param *kp) { @@ -160,11 +163,23 @@ static const struct kernel_param_ops slice_us_param_ops = { .get = param_get_uint, }; +static int set_bypass_lb_intv_us(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, 0, 10 * USEC_PER_SEC); +} + +static const struct kernel_param_ops bypass_lb_intv_us_param_ops = { + .set = set_bypass_lb_intv_us, + .get = param_get_uint, +}; + #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "sched_ext." module_param_cb(slice_bypass_us, &slice_us_param_ops, &scx_slice_bypass_us, 0600); MODULE_PARM_DESC(slice_bypass_us, "bypass slice in microseconds, applied on [un]load (100us to 100ms)"); +module_param_cb(bypass_lb_intv_us, &bypass_lb_intv_us_param_ops, &scx_bypass_lb_intv_us, 0600); +MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microseconds (0 (disable) to 10s)"); #undef MODULE_PARAM_PREFIX @@ -962,7 +977,9 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq, !RB_EMPTY_NODE(&p->scx.dsq_priq)); if (!is_local) { - raw_spin_lock(&dsq->lock); + raw_spin_lock_nested(&dsq->lock, + (enq_flags & SCX_ENQ_NESTED) ? SINGLE_DEPTH_NESTING : 0); + if (unlikely(dsq->id == SCX_DSQ_INVALID)) { scx_error(sch, "attempting to dispatch to a destroyed dsq"); /* fall back to the global dsq */ @@ -3744,6 +3761,207 @@ bool scx_hardlockup(void) return true; } +static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq, + struct cpumask *donee_mask, struct cpumask *resched_mask, + u32 nr_donor_target, u32 nr_donee_target) +{ + struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; + struct task_struct *p, *n; + struct scx_dsq_list_node cursor = INIT_DSQ_LIST_CURSOR(cursor, 0, 0); + s32 delta = READ_ONCE(donor_dsq->nr) - nr_donor_target; + u32 nr_balanced = 0, min_delta_us; + + /* + * All we want to guarantee is reasonable forward progress. No reason to + * fine tune. Assuming every task on @donor_dsq runs their full slice, + * consider offloading iff the total queued duration is over the + * threshold. + */ + min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV; + if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us)) + return 0; + + raw_spin_rq_lock_irq(rq); + raw_spin_lock(&donor_dsq->lock); + list_add(&cursor.node, &donor_dsq->list); +resume: + n = container_of(&cursor, struct task_struct, scx.dsq_list); + n = nldsq_next_task(donor_dsq, n, false); + + while ((p = n)) { + struct rq *donee_rq; + struct scx_dispatch_q *donee_dsq; + int donee; + + n = nldsq_next_task(donor_dsq, n, false); + + if (donor_dsq->nr <= nr_donor_target) + break; + + if (cpumask_empty(donee_mask)) + break; + + donee = cpumask_any_and_distribute(donee_mask, p->cpus_ptr); + if (donee >= nr_cpu_ids) + continue; + + donee_rq = cpu_rq(donee); + donee_dsq = &donee_rq->scx.bypass_dsq; + + /* + * $p's rq is not locked but $p's DSQ lock protects its + * scheduling properties making this test safe. + */ + if (!task_can_run_on_remote_rq(sch, p, donee_rq, false)) + continue; + + /* + * Moving $p from one non-local DSQ to another. The source rq + * and DSQ are already locked. Do an abbreviated dequeue and + * then perform enqueue without unlocking $donor_dsq. + * + * We don't want to drop and reacquire the lock on each + * iteration as @donor_dsq can be very long and potentially + * highly contended. Donee DSQs are less likely to be contended. + * The nested locking is safe as only this LB moves tasks + * between bypass DSQs. + */ + dispatch_dequeue_locked(p, donor_dsq); + dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED); + + /* + * $donee might have been idle and need to be woken up. No need + * to be clever. Kick every CPU that receives tasks. + */ + cpumask_set_cpu(donee, resched_mask); + + if (READ_ONCE(donee_dsq->nr) >= nr_donee_target) + cpumask_clear_cpu(donee, donee_mask); + + nr_balanced++; + if (!(nr_balanced % SCX_BYPASS_LB_BATCH) && n) { + list_move_tail(&cursor.node, &n->scx.dsq_list.node); + raw_spin_unlock(&donor_dsq->lock); + raw_spin_rq_unlock_irq(rq); + cpu_relax(); + raw_spin_rq_lock_irq(rq); + raw_spin_lock(&donor_dsq->lock); + goto resume; + } + } + + list_del_init(&cursor.node); + raw_spin_unlock(&donor_dsq->lock); + raw_spin_rq_unlock_irq(rq); + + return nr_balanced; +} + +static void bypass_lb_node(struct scx_sched *sch, int node) +{ + const struct cpumask *node_mask = cpumask_of_node(node); + struct cpumask *donee_mask = scx_bypass_lb_donee_cpumask; + struct cpumask *resched_mask = scx_bypass_lb_resched_cpumask; + u32 nr_tasks = 0, nr_cpus = 0, nr_balanced = 0; + u32 nr_target, nr_donor_target; + u32 before_min = U32_MAX, before_max = 0; + u32 after_min = U32_MAX, after_max = 0; + int cpu; + + /* count the target tasks and CPUs */ + for_each_cpu_and(cpu, cpu_online_mask, node_mask) { + u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); + + nr_tasks += nr; + nr_cpus++; + + before_min = min(nr, before_min); + before_max = max(nr, before_max); + } + + if (!nr_cpus) + return; + + /* + * We don't want CPUs to have more than $nr_donor_target tasks and + * balancing to fill donee CPUs upto $nr_target. Once targets are + * calculated, find the donee CPUs. + */ + nr_target = DIV_ROUND_UP(nr_tasks, nr_cpus); + nr_donor_target = DIV_ROUND_UP(nr_target * SCX_BYPASS_LB_DONOR_PCT, 100); + + cpumask_clear(donee_mask); + for_each_cpu_and(cpu, cpu_online_mask, node_mask) { + if (READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr) < nr_target) + cpumask_set_cpu(cpu, donee_mask); + } + + /* iterate !donee CPUs and see if they should be offloaded */ + cpumask_clear(resched_mask); + for_each_cpu_and(cpu, cpu_online_mask, node_mask) { + struct rq *rq = cpu_rq(cpu); + struct scx_dispatch_q *donor_dsq = &rq->scx.bypass_dsq; + + if (cpumask_empty(donee_mask)) + break; + if (cpumask_test_cpu(cpu, donee_mask)) + continue; + if (READ_ONCE(donor_dsq->nr) <= nr_donor_target) + continue; + + nr_balanced += bypass_lb_cpu(sch, rq, donee_mask, resched_mask, + nr_donor_target, nr_target); + } + + for_each_cpu(cpu, resched_mask) { + struct rq *rq = cpu_rq(cpu); + + raw_spin_rq_lock_irq(rq); + resched_curr(rq); + raw_spin_rq_unlock_irq(rq); + } + + for_each_cpu_and(cpu, cpu_online_mask, node_mask) { + u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr); + + after_min = min(nr, after_min); + after_max = max(nr, after_max); + + } + + trace_sched_ext_bypass_lb(node, nr_cpus, nr_tasks, nr_balanced, + before_min, before_max, after_min, after_max); +} + +/* + * In bypass mode, all tasks are put on the per-CPU bypass DSQs. If the machine + * is over-saturated and the BPF scheduler skewed tasks into few CPUs, some + * bypass DSQs can be overloaded. If there are enough tasks to saturate other + * lightly loaded CPUs, such imbalance can lead to very high execution latency + * on the overloaded CPUs and thus to hung tasks and RCU stalls. To avoid such + * outcomes, a simple load balancing mechanism is implemented by the following + * timer which runs periodically while bypass mode is in effect. + */ +static void scx_bypass_lb_timerfn(struct timer_list *timer) +{ + struct scx_sched *sch; + int node; + u32 intv_us; + + sch = rcu_dereference_all(scx_root); + if (unlikely(!sch) || !READ_ONCE(scx_bypass_depth)) + return; + + for_each_node_with_cpus(node) + bypass_lb_node(sch, node); + + intv_us = READ_ONCE(scx_bypass_lb_intv_us); + if (intv_us) + mod_timer(timer, jiffies + usecs_to_jiffies(intv_us)); +} + +static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn); + /** * scx_bypass - [Un]bypass scx_ops and guarantee forward progress * @bypass: true for bypass, false for unbypass @@ -3787,7 +4005,9 @@ static void scx_bypass(bool bypass) sch = rcu_dereference_bh(scx_root); if (bypass) { - scx_bypass_depth++; + u32 intv_us; + + WRITE_ONCE(scx_bypass_depth, scx_bypass_depth + 1); WARN_ON_ONCE(scx_bypass_depth <= 0); if (scx_bypass_depth != 1) goto unlock; @@ -3795,8 +4015,15 @@ static void scx_bypass(bool bypass) bypass_timestamp = ktime_get_ns(); if (sch) scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1); + + intv_us = READ_ONCE(scx_bypass_lb_intv_us); + if (intv_us && !timer_pending(&scx_bypass_lb_timer)) { + scx_bypass_lb_timer.expires = + jiffies + usecs_to_jiffies(intv_us); + add_timer_global(&scx_bypass_lb_timer); + } } else { - scx_bypass_depth--; + WRITE_ONCE(scx_bypass_depth, scx_bypass_depth - 1); WARN_ON_ONCE(scx_bypass_depth < 0); if (scx_bypass_depth != 0) goto unlock; @@ -7052,6 +7279,12 @@ static int __init scx_init(void) return ret; } + if (!alloc_cpumask_var(&scx_bypass_lb_donee_cpumask, GFP_KERNEL) || + !alloc_cpumask_var(&scx_bypass_lb_resched_cpumask, GFP_KERNEL)) { + pr_err("sched_ext: Failed to allocate cpumasks\n"); + return -ENOMEM; + } + return 0; } __initcall(scx_init); diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h index dd6f25fb6159..386c677e4c9a 100644 --- a/kernel/sched/ext_internal.h +++ b/kernel/sched/ext_internal.h @@ -23,6 +23,11 @@ enum scx_consts { * scx_tasks_lock to avoid causing e.g. CSD and RCU stalls. */ SCX_TASK_ITER_BATCH = 32, + + SCX_BYPASS_LB_DFL_INTV_US = 500 * USEC_PER_MSEC, + SCX_BYPASS_LB_DONOR_PCT = 125, + SCX_BYPASS_LB_MIN_DELTA_DIV = 4, + SCX_BYPASS_LB_BATCH = 256, }; enum scx_exit_kind { @@ -963,6 +968,7 @@ enum scx_enq_flags { SCX_ENQ_CLEAR_OPSS = 1LLU << 56, SCX_ENQ_DSQ_PRIQ = 1LLU << 57, + SCX_ENQ_NESTED = 1LLU << 58, }; enum scx_deq_flags { -- cgit v1.2.3 From 67932f691895294a95861571b0ca69a38e0a4894 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Wed, 12 Nov 2025 18:29:45 +0100 Subject: sched_ext: Update comments replacing breather with aborting mechanism Commit 5ebec443fb96a ("sched_ext: Exit dispatch and move operations immediately when aborting") replaced the breather mechanism with the scx_aborting flag. Update comments removing references to the breather mechanism to avoid confusion. Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index c900667b25b8..8a3b8f64a06b 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3732,7 +3732,7 @@ bool scx_rcu_cpu_stall(void) * live-lock the system by making many CPUs target the same DSQ to the point * where soft-lockup detection triggers. This function is called from * soft-lockup watchdog when the triggering point is close and tries to unjam - * the system by enabling the breather and aborting the BPF scheduler. + * the system and aborting the BPF scheduler. */ void scx_softlockup(u32 dur_s) { @@ -4315,9 +4315,9 @@ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind) return false; /* - * Some CPUs may be trapped in the dispatch paths. Enable breather - * immediately; otherwise, we might not even be able to get to - * scx_bypass(). + * Some CPUs may be trapped in the dispatch paths. Set the aborting + * flag to break potential live-lock scenarios, ensuring we can + * successfully reach scx_bypass(). */ WRITE_ONCE(scx_aborting, true); return true; -- cgit v1.2.3 From 1dcb98bbb7538d4b9015d47c934acdf5ea86045c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Nov 2025 15:33:41 -1000 Subject: sched_ext: Pass locked CPU parameter to scx_hardlockup() and add docs With the buddy lockup detector, smp_processor_id() returns the detecting CPU, not the locked CPU, making scx_hardlockup()'s printouts confusing. Pass the locked CPU number from watchdog_hardlockup_check() as a parameter instead. Also add kerneldoc comments to handle_lockup(), scx_hardlockup(), and scx_rcu_cpu_stall() documenting their return value semantics. Suggested-by: Doug Anderson Reviewed-by: Douglas Anderson Acked-by: Andrea Righi Reviewed-by: Emil Tsalapatis Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 25 ++++++++++++++++++++++--- kernel/watchdog.c | 2 +- 2 files changed, 23 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 8a3b8f64a06b..918573f3f088 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3687,6 +3687,17 @@ bool scx_allow_ttwu_queue(const struct task_struct *p) return false; } +/** + * handle_lockup - sched_ext common lockup handler + * @fmt: format string + * + * Called on system stall or lockup condition and initiates abort of sched_ext + * if enabled, which may resolve the reported lockup. + * + * Returns %true if sched_ext is enabled and abort was initiated, which may + * resolve the lockup. %false if sched_ext is not enabled or abort was already + * initiated by someone else. + */ static __printf(1, 2) bool handle_lockup(const char *fmt, ...) { struct scx_sched *sch; @@ -3718,6 +3729,10 @@ static __printf(1, 2) bool handle_lockup(const char *fmt, ...) * that may not be caused by the current BPF scheduler, try kicking out the * current scheduler in an attempt to recover the system to a good state before * issuing panics. + * + * Returns %true if sched_ext is enabled and abort was initiated, which may + * resolve the reported RCU stall. %false if sched_ext is not enabled or someone + * else already initiated abort. */ bool scx_rcu_cpu_stall(void) { @@ -3750,14 +3765,18 @@ void scx_softlockup(u32 dur_s) * numerous affinitized tasks in a single queue and directing all CPUs at it. * Try kicking out the current scheduler in an attempt to recover the system to * a good state before taking more drastic actions. + * + * Returns %true if sched_ext is enabled and abort was initiated, which may + * resolve the reported hardlockdup. %false if sched_ext is not enabled or + * someone else already initiated abort. */ -bool scx_hardlockup(void) +bool scx_hardlockup(int cpu) { - if (!handle_lockup("hard lockup - CPU %d", smp_processor_id())) + if (!handle_lockup("hard lockup - CPU %d", cpu)) return false; printk_deferred(KERN_ERR "sched_ext: Hard lockup - CPU %d, disabling BPF scheduler\n", - smp_processor_id()); + cpu); return true; } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 8dfac4a8f587..873020a2a581 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -203,7 +203,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) * only once when sched_ext is enabled and will immediately * abort the BPF scheduler and print out a warning message. */ - if (scx_hardlockup()) + if (scx_hardlockup(cpu)) return; /* Only print hardlockups once. */ -- cgit v1.2.3 From 348d3c587ac6fa5aaa24ab5c71b7b5635c47e765 Mon Sep 17 00:00:00 2001 From: Zqiang Date: Mon, 17 Nov 2025 20:53:11 +0800 Subject: sched_ext: Use kvfree_rcu() to release per-cpu ksyncs object The free_kick_syncs_rcu() rcu-callback only invoke kvfree() to release per-cpu ksyncs object, this can use kvfree_rcu() replace call_rcu() to release per-cpu ksyncs object in the free_kick_syncs(). Signed-off-by: Zqiang Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 918573f3f088..dca9ca0c1854 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4161,13 +4161,6 @@ static const char *scx_exit_reason(enum scx_exit_kind kind) } } -static void free_kick_syncs_rcu(struct rcu_head *rcu) -{ - struct scx_kick_syncs *ksyncs = container_of(rcu, struct scx_kick_syncs, rcu); - - kvfree(ksyncs); -} - static void free_kick_syncs(void) { int cpu; @@ -4178,7 +4171,7 @@ static void free_kick_syncs(void) to_free = rcu_replace_pointer(*ksyncs, NULL, true); if (to_free) - call_rcu(&to_free->rcu, free_kick_syncs_rcu); + kvfree_rcu(to_free, rcu); } } -- cgit v1.2.3 From 1dd6c84f1c544e552848a8968599220bd464e338 Mon Sep 17 00:00:00 2001 From: Zqiang Date: Mon, 1 Dec 2025 19:25:40 +0800 Subject: sched_ext: Fix incorrect sched_class settings for per-cpu migration tasks When loading the ebpf scheduler, the tasks in the scx_tasks list will be traversed and invoke __setscheduler_class() to get new sched_class. however, this would also incorrectly set the per-cpu migration task's->sched_class to rt_sched_class, even after unload, the per-cpu migration task's->sched_class remains sched_rt_class. The log for this issue is as follows: ./scx_rustland --stats 1 [ 199.245639][ T630] sched_ext: "rustland" does not implement cgroup cpu.weight [ 199.269213][ T630] sched_ext: BPF scheduler "rustland" enabled 04:25:09 [INFO] RustLand scheduler attached bpftrace -e 'iter:task /strcontains(ctx->task->comm, "migration")/ { printf("%s:%d->%pS\n", ctx->task->comm, ctx->task->pid, ctx->task->sched_class); }' Attaching 1 probe... migration/0:24->rt_sched_class+0x0/0xe0 migration/1:27->rt_sched_class+0x0/0xe0 migration/2:33->rt_sched_class+0x0/0xe0 migration/3:39->rt_sched_class+0x0/0xe0 migration/4:45->rt_sched_class+0x0/0xe0 migration/5:52->rt_sched_class+0x0/0xe0 migration/6:58->rt_sched_class+0x0/0xe0 migration/7:64->rt_sched_class+0x0/0xe0 sched_ext: BPF scheduler "rustland" disabled (unregistered from user space) EXIT: unregistered from user space 04:25:21 [INFO] Unregister RustLand scheduler bpftrace -e 'iter:task /strcontains(ctx->task->comm, "migration")/ { printf("%s:%d->%pS\n", ctx->task->comm, ctx->task->pid, ctx->task->sched_class); }' Attaching 1 probe... migration/0:24->rt_sched_class+0x0/0xe0 migration/1:27->rt_sched_class+0x0/0xe0 migration/2:33->rt_sched_class+0x0/0xe0 migration/3:39->rt_sched_class+0x0/0xe0 migration/4:45->rt_sched_class+0x0/0xe0 migration/5:52->rt_sched_class+0x0/0xe0 migration/6:58->rt_sched_class+0x0/0xe0 migration/7:64->rt_sched_class+0x0/0xe0 This commit therefore generate a new scx_setscheduler_class() and add check for stop_sched_class to replace __setscheduler_class(). Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class") Cc: stable@vger.kernel.org # v6.12+ Signed-off-by: Zqiang Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index dca9ca0c1854..b563b8c3fd24 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -248,6 +248,14 @@ static struct scx_dispatch_q *find_user_dsq(struct scx_sched *sch, u64 dsq_id) return rhashtable_lookup(&sch->dsq_hash, &dsq_id, dsq_hash_params); } +static const struct sched_class *scx_setscheduler_class(struct task_struct *p) +{ + if (p->sched_class == &stop_sched_class) + return &stop_sched_class; + + return __setscheduler_class(p->policy, p->prio); +} + /* * scx_kf_mask enforcement. Some kfuncs can only be called from specific SCX * ops. When invoking SCX ops, SCX_CALL_OP[_RET]() should be used to indicate @@ -4241,8 +4249,7 @@ static void scx_disable_workfn(struct kthread_work *work) while ((p = scx_task_iter_next_locked(&sti))) { unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; const struct sched_class *old_class = p->sched_class; - const struct sched_class *new_class = - __setscheduler_class(p->policy, p->prio); + const struct sched_class *new_class = scx_setscheduler_class(p); update_rq_clock(task_rq(p)); @@ -5042,8 +5049,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) while ((p = scx_task_iter_next_locked(&sti))) { unsigned int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; const struct sched_class *old_class = p->sched_class; - const struct sched_class *new_class = - __setscheduler_class(p->policy, p->prio); + const struct sched_class *new_class = scx_setscheduler_class(p); if (scx_get_task_state(p) != SCX_TASK_READY) continue; -- cgit v1.2.3