summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d79de755c1c2..a441990fe808 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -651,7 +651,8 @@ struct balance_callback {
struct cfs_rq {
struct load_weight load;
unsigned int nr_running;
- unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
+ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
unsigned int h_nr_delayed;
@@ -907,7 +908,7 @@ static inline void se_update_runnable(struct sched_entity *se)
if (!entity_is_task(se)) {
struct cfs_rq *cfs_rq = se->my_q;
- se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
+ se->runnable_weight = cfs_rq->h_nr_queued - cfs_rq->h_nr_delayed;
}
}
@@ -1155,7 +1156,7 @@ struct rq {
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
- unsigned int nr_uninterruptible;
+ unsigned long nr_uninterruptible;
struct task_struct __rcu *curr;
struct sched_dl_entity *dl_server;