summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h55
1 files changed, 31 insertions, 24 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6d2a119c7ad9..efd3bfc7e347 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -84,6 +84,10 @@ static inline void update_cpu_load_active(struct rq *this_rq) { }
*/
#define RUNTIME_INF ((u64)~0ULL)
+static inline int idle_policy(int policy)
+{
+ return policy == SCHED_IDLE;
+}
static inline int fair_policy(int policy)
{
return policy == SCHED_NORMAL || policy == SCHED_BATCH;
@@ -98,6 +102,11 @@ static inline int dl_policy(int policy)
{
return policy == SCHED_DEADLINE;
}
+static inline bool valid_policy(int policy)
+{
+ return idle_policy(policy) || fair_policy(policy) ||
+ rt_policy(policy) || dl_policy(policy);
+}
static inline int task_has_rt_policy(struct task_struct *p)
{
@@ -109,11 +118,6 @@ static inline int task_has_dl_policy(struct task_struct *p)
return dl_policy(p->policy);
}
-static inline bool dl_time_before(u64 a, u64 b)
-{
- return (s64)(a - b) < 0;
-}
-
/*
* Tells if entity @a should preempt entity @b.
*/
@@ -1003,17 +1007,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
-#ifdef CONFIG_NUMA_BALANCING
-#define sched_feat_numa(x) sched_feat(x)
-#ifdef CONFIG_SCHED_DEBUG
-#define numabalancing_enabled sched_feat_numa(NUMA)
-#else
-extern bool numabalancing_enabled;
-#endif /* CONFIG_SCHED_DEBUG */
-#else
-#define sched_feat_numa(x) (0)
-#define numabalancing_enabled (0)
-#endif /* CONFIG_NUMA_BALANCING */
+extern struct static_key_false sched_numa_balancing;
static inline u64 global_rt_period(void)
{
@@ -1157,16 +1151,18 @@ static const u32 prio_to_wmult[40] = {
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
-#define ENQUEUE_WAKEUP 1
-#define ENQUEUE_HEAD 2
+#define ENQUEUE_WAKEUP 0x01
+#define ENQUEUE_HEAD 0x02
#ifdef CONFIG_SMP
-#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
+#define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */
#else
-#define ENQUEUE_WAKING 0
+#define ENQUEUE_WAKING 0x00
#endif
-#define ENQUEUE_REPLENISH 8
+#define ENQUEUE_REPLENISH 0x08
+#define ENQUEUE_RESTORE 0x10
-#define DEQUEUE_SLEEP 1
+#define DEQUEUE_SLEEP 0x01
+#define DEQUEUE_SAVE 0x02
#define RETRY_TASK ((void *)-1UL)
@@ -1194,7 +1190,7 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
- void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
+ void (*migrate_task_rq)(struct task_struct *p);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
@@ -1227,7 +1223,7 @@ struct sched_class {
void (*update_curr) (struct rq *rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*task_move_group) (struct task_struct *p, int on_rq);
+ void (*task_move_group) (struct task_struct *p);
#endif
};
@@ -1405,6 +1401,17 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
}
#endif
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+ return sd->smt_gain / sd->span_weight;
+
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));