summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2025-07-08 18:56:29 +0200
committerPeter Zijlstra <peterz@infradead.org>2025-07-09 13:40:23 +0200
commit3a0baa8e6c570c252999cb651398a88f8f990b4a (patch)
tree1a1fb81e8540423c400d51799da8c4678ff5389d
parent052c3d87c82ea4ee83232b747512847b4e8c9976 (diff)
sched/fair: Fix entity's lag with run to parity
When an entity is enqueued without preempting current, we must ensure that the slice protection is updated to take into account the slice duration of the newly enqueued task so that its lag will not exceed its slice (+ tick). Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250708165630.1948751-6-vincent.guittot@linaro.org
-rw-r--r--kernel/sched/fair.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 45e057fc2354..1660960d64af 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -889,13 +889,13 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
* When run to parity is disabled, we give a minimum quantum to the running
* entity to ensure progress.
*/
-static inline void set_protect_slice(struct sched_entity *se)
+static inline void set_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
u64 slice = normalized_sysctl_sched_base_slice;
u64 vprot = se->deadline;
if (sched_feat(RUN_TO_PARITY))
- slice = cfs_rq_min_slice(cfs_rq_of(se));
+ slice = cfs_rq_min_slice(cfs_rq);
slice = min(slice, se->slice);
if (slice != se->slice)
@@ -904,6 +904,13 @@ static inline void set_protect_slice(struct sched_entity *se)
se->vprot = vprot;
}
+static inline void update_protect_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ u64 slice = cfs_rq_min_slice(cfs_rq);
+
+ se->vprot = min_vruntime(se->vprot, se->vruntime + calc_delta_fair(slice, se));
+}
+
static inline bool protect_slice(struct sched_entity *se)
{
return ((s64)(se->vprot - se->vruntime) > 0);
@@ -5467,7 +5474,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
- set_protect_slice(se);
+ set_protect_slice(cfs_rq, se);
}
update_stats_curr_start(cfs_rq, se);
@@ -8720,6 +8727,9 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
if (__pick_eevdf(cfs_rq, !do_preempt_short) == pse)
goto preempt;
+ if (sched_feat(RUN_TO_PARITY) && do_preempt_short)
+ update_protect_slice(cfs_rq, se);
+
return;
preempt: