diff options
| -rw-r--r-- | kernel/sched/fair.c | 16 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 |
2 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f79951facff4..65b1065f9b21 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -608,7 +608,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) * * v0 := cfs_rq->zero_vruntime * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime - * \Sum w_i := cfs_rq->avg_load + * \Sum w_i := cfs_rq->sum_weight * * Since zero_vruntime closely tracks the per-task service, these * deltas: (v_i - v), will be in the order of the maximal (virtual) lag @@ -625,7 +625,7 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) s64 key = entity_key(cfs_rq, se); cfs_rq->avg_vruntime += key * weight; - cfs_rq->avg_load += weight; + cfs_rq->sum_weight += weight; } static void @@ -635,16 +635,16 @@ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) s64 key = entity_key(cfs_rq, se); cfs_rq->avg_vruntime -= key * weight; - cfs_rq->avg_load -= weight; + cfs_rq->sum_weight -= weight; } static inline void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) { /* - * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load + * v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight */ - cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta; + cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta; } /* @@ -655,7 +655,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; s64 avg = cfs_rq->avg_vruntime; - long load = cfs_rq->avg_load; + long load = cfs_rq->sum_weight; if (curr && curr->on_rq) { unsigned long weight = scale_load_down(curr->load.weight); @@ -723,7 +723,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime) { struct sched_entity *curr = cfs_rq->curr; s64 avg = cfs_rq->avg_vruntime; - long load = cfs_rq->avg_load; + long load = cfs_rq->sum_weight; if (curr && curr->on_rq) { unsigned long weight = scale_load_down(curr->load.weight); @@ -5131,7 +5131,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) * * vl_i = (W + w_i)*vl'_i / W */ - load = cfs_rq->avg_load; + load = cfs_rq->sum_weight; if (curr && curr->on_rq) load += scale_load_down(curr->load.weight); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 82522c98c903..3334aa535423 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -679,7 +679,7 @@ struct cfs_rq { unsigned int h_nr_idle; /* SCHED_IDLE */ s64 avg_vruntime; - u64 avg_load; + u64 sum_weight; u64 zero_vruntime; #ifdef CONFIG_SCHED_CORE |
