diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2026-01-23 16:49:09 +0100 |
|---|---|---|
| committer | Peter Zijlstra <peterz@infradead.org> | 2026-02-23 11:19:18 +0100 |
| commit | bcd74b2ffdd0a2233adbf26b65c62fc69a809c8e (patch) | |
| tree | f57ae03099055b5afd9f513e83beb770eb890d40 /kernel | |
| parent | b3d99f43c72b56cf7a104a364e7fb34b0702828b (diff) | |
sched/fair: Only set slice protection at pick time
We should not (re)set slice protection in the sched_change pattern
which calls put_prev_task() / set_next_task().
Fixes: 63304558ba5d ("sched/eevdf: Curb wakeup-preemption")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
Tested-by: Shubhang Kaushik <shubhang@os.amperecomputing.com>
Link: https://patch.msgid.link/20260219080624.561421378%40infradead.org
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 56dddd4fd208..f2b46c33a8c5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5445,7 +5445,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) } static void -set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) +set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first) { clear_buddies(cfs_rq, se); @@ -5460,7 +5460,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) __dequeue_entity(cfs_rq, se); update_load_avg(cfs_rq, se, UPDATE_TG); - set_protect_slice(cfs_rq, se); + if (first) + set_protect_slice(cfs_rq, se); } update_stats_curr_start(cfs_rq, se); @@ -8978,13 +8979,13 @@ again: pse = parent_entity(pse); } if (se_depth >= pse_depth) { - set_next_entity(cfs_rq_of(se), se); + set_next_entity(cfs_rq_of(se), se, true); se = parent_entity(se); } } put_prev_entity(cfs_rq, pse); - set_next_entity(cfs_rq, se); + set_next_entity(cfs_rq, se, true); __set_next_task_fair(rq, p, true); } @@ -13598,7 +13599,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); - set_next_entity(cfs_rq, se); + set_next_entity(cfs_rq, se, first); /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } |
