summaryrefslogtreecommitdiff
path: root/include/linux/sched
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched')
-rw-r--r--include/linux/sched/deadline.h27
-rw-r--r--include/linux/sched/ext.h109
-rw-r--r--include/linux/sched/signal.h3
-rw-r--r--include/linux/sched/topology.h26
4 files changed, 112 insertions, 53 deletions
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index c40115d4e34d..1198138cb839 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -37,4 +37,31 @@ extern void dl_clear_root_domain_cpu(int cpu);
extern u64 dl_cookie;
extern bool dl_bw_visited(int cpu, u64 cookie);
+static inline bool dl_server(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_server;
+}
+
+static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
+{
+ BUG_ON(dl_server(dl_se));
+ return container_of(dl_se, struct task_struct, dl);
+}
+
+/*
+ * Regarding the deadline, a task with implicit deadline has a relative
+ * deadline == relative period. A task with constrained deadline has a
+ * relative deadline <= relative period.
+ *
+ * We support constrained deadline tasks. However, there are some restrictions
+ * applied only for tasks which do not have an implicit deadline. See
+ * update_dl_entity() to know more about such restrictions.
+ *
+ * The dl_is_implicit() returns true if the task has an implicit deadline.
+ */
+static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
+{
+ return dl_se->dl_deadline == dl_se->dl_period;
+}
+
#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index bcb962d5ee7d..1a3af2ea2a79 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -62,6 +62,16 @@ enum scx_dsq_id_flags {
SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
};
+struct scx_deferred_reenq_user {
+ struct list_head node;
+ u64 flags;
+};
+
+struct scx_dsq_pcpu {
+ struct scx_dispatch_q *dsq;
+ struct scx_deferred_reenq_user deferred_reenq_user;
+};
+
/*
* A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
* queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
@@ -78,30 +88,58 @@ struct scx_dispatch_q {
u64 id;
struct rhash_head hash_node;
struct llist_node free_node;
+ struct scx_sched *sched;
+ struct scx_dsq_pcpu __percpu *pcpu;
struct rcu_head rcu;
};
-/* scx_entity.flags */
+/* sched_ext_entity.flags */
enum scx_ent_flags {
SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_IN_CUSTODY = 1 << 1, /* in custody, needs ops.dequeue() when leaving */
SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+ SCX_TASK_SUB_INIT = 1 << 4, /* task being initialized for a sub sched */
+ SCX_TASK_IMMED = 1 << 5, /* task is on local DSQ with %SCX_ENQ_IMMED */
- SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ /*
+ * Bits 8 and 9 are used to carry task state:
+ *
+ * NONE ops.init_task() not called yet
+ * INIT ops.init_task() succeeded, but task can be cancelled
+ * READY fully initialized, but not in sched_ext
+ * ENABLED fully initialized and in sched_ext
+ */
+ SCX_TASK_STATE_SHIFT = 8, /* bits 8 and 9 are used to carry task state */
SCX_TASK_STATE_BITS = 2,
SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
- SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
-};
+ SCX_TASK_NONE = 0 << SCX_TASK_STATE_SHIFT,
+ SCX_TASK_INIT = 1 << SCX_TASK_STATE_SHIFT,
+ SCX_TASK_READY = 2 << SCX_TASK_STATE_SHIFT,
+ SCX_TASK_ENABLED = 3 << SCX_TASK_STATE_SHIFT,
-/* scx_entity.flags & SCX_TASK_STATE_MASK */
-enum scx_task_state {
- SCX_TASK_NONE, /* ops.init_task() not called yet */
- SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
- SCX_TASK_READY, /* fully initialized, but not in sched_ext */
- SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+ /*
+ * Bits 12 and 13 are used to carry reenqueue reason. In addition to
+ * %SCX_ENQ_REENQ flag, ops.enqueue() can also test for
+ * %SCX_TASK_REENQ_REASON_NONE to distinguish reenqueues.
+ *
+ * NONE not being reenqueued
+ * KFUNC reenqueued by scx_bpf_dsq_reenq() and friends
+ * IMMED reenqueued due to failed ENQ_IMMED
+ * PREEMPTED preempted while running
+ */
+ SCX_TASK_REENQ_REASON_SHIFT = 12,
+ SCX_TASK_REENQ_REASON_BITS = 2,
+ SCX_TASK_REENQ_REASON_MASK = ((1 << SCX_TASK_REENQ_REASON_BITS) - 1) << SCX_TASK_REENQ_REASON_SHIFT,
+
+ SCX_TASK_REENQ_NONE = 0 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_KFUNC = 1 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_IMMED = 2 << SCX_TASK_REENQ_REASON_SHIFT,
+ SCX_TASK_REENQ_PREEMPTED = 3 << SCX_TASK_REENQ_REASON_SHIFT,
- SCX_TASK_NR_STATES,
+ /* iteration cursor, not a task */
+ SCX_TASK_CURSOR = 1 << 31,
};
/* scx_entity.dsq_flags */
@@ -109,33 +147,6 @@ enum scx_ent_dsq_flags {
SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
};
-/*
- * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
- * everywhere and the following bits track which kfunc sets are currently
- * allowed for %current. This simple per-task tracking works because SCX ops
- * nest in a limited way. BPF will likely implement a way to allow and disallow
- * kfuncs depending on the calling context which will replace this manual
- * mechanism. See scx_kf_allow().
- */
-enum scx_kf_mask {
- SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
- /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
- SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
- /*
- * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
- * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
- * nested inside DISPATCH.
- */
- SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
- SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
- SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
- SCX_KF_REST = 1 << 4, /* other rq-locked operations */
-
- __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
- SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
- __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
-};
-
enum scx_dsq_lnode_flags {
SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
@@ -149,19 +160,31 @@ struct scx_dsq_list_node {
u32 priv; /* can be used by iter cursor */
};
-#define INIT_DSQ_LIST_CURSOR(__node, __flags, __priv) \
+#define INIT_DSQ_LIST_CURSOR(__cursor, __dsq, __flags) \
(struct scx_dsq_list_node) { \
- .node = LIST_HEAD_INIT((__node).node), \
+ .node = LIST_HEAD_INIT((__cursor).node), \
.flags = SCX_DSQ_LNODE_ITER_CURSOR | (__flags), \
- .priv = (__priv), \
+ .priv = READ_ONCE((__dsq)->seq), \
}
+struct scx_sched;
+
/*
* The following is embedded in task_struct and contains all fields necessary
* for a task to be scheduled by SCX.
*/
struct sched_ext_entity {
+#ifdef CONFIG_CGROUPS
+ /*
+ * Associated scx_sched. Updated either during fork or while holding
+ * both p->pi_lock and rq lock.
+ */
+ struct scx_sched __rcu *sched;
+#endif
struct scx_dispatch_q *dsq;
+ atomic_long_t ops_state;
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
struct scx_dsq_list_node dsq_list; /* dispatch order */
struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
u32 dsq_seq;
@@ -171,9 +194,7 @@ struct sched_ext_entity {
s32 sticky_cpu;
s32 holding_cpu;
s32 selected_cpu;
- u32 kf_mask; /* see scx_kf_mask above */
struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
- atomic_long_t ops_state;
struct list_head runnable_node; /* rq->scx.runnable_list */
unsigned long runnable_at;
@@ -181,8 +202,6 @@ struct sched_ext_entity {
#ifdef CONFIG_SCHED_CORE
u64 core_sched_at; /* see scx_prio_less() */
#endif
- u64 ddsp_dsq_id;
- u64 ddsp_enq_flags;
/* BPF scheduler modifiable fields */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index a22248aebcf9..584ae88b435e 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -132,6 +132,7 @@ struct signal_struct {
*/
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
+ unsigned int autoreap:1;
#ifdef CONFIG_POSIX_TIMERS
@@ -739,7 +740,7 @@ static inline int thread_group_empty(struct task_struct *p)
extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
unsigned long *flags)
- __acquires(&task->sighand->siglock);
+ __cond_acquires(nonnull, &task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 45c0022b91ce..36553e14866d 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -95,6 +95,7 @@ struct sched_domain {
unsigned int newidle_call;
unsigned int newidle_success;
unsigned int newidle_ratio;
+ u64 newidle_stamp;
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
@@ -141,18 +142,30 @@ struct sched_domain {
unsigned int span_weight;
/*
- * Span of all CPUs in this domain.
+ * See sched_domain_span(), on why flex arrays are broken.
*
- * NOTE: this field is variable length. (Allocated dynamically
- * by attaching extra space to the end of the structure,
- * depending on how many CPUs the kernel has booted up with)
- */
unsigned long span[];
+ */
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
- return to_cpumask(sd->span);
+ /*
+ * Turns out that C flexible arrays are fundamentally broken since it
+ * is allowed for offsetof(*sd, span) < sizeof(*sd), this means that
+ * structure initialzation *sd = { ... }; which writes every byte
+ * inside sizeof(*type), will over-write the start of the flexible
+ * array.
+ *
+ * Luckily, the way we allocate sched_domain is by:
+ *
+ * sizeof(*sd) + cpumask_size()
+ *
+ * this means that we have sufficient space for the whole flex array
+ * *outside* of sizeof(*sd). So use that, and avoid using sd->span.
+ */
+ unsigned long *bitmap = (void *)sd + sizeof(*sd);
+ return to_cpumask(bitmap);
}
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
@@ -171,7 +184,6 @@ typedef int (*sched_domain_flags_f)(void);
struct sd_data {
struct sched_domain *__percpu *sd;
- struct sched_domain_shared *__percpu *sds;
struct sched_group *__percpu *sg;
struct sched_group_capacity *__percpu *sgc;
};