summaryrefslogtreecommitdiff
path: root/include/linux/rseq.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2025-10-27 09:44:33 +0100
committerIngo Molnar <mingo@kernel.org>2025-11-04 08:30:50 +0100
commitfaba9d250eaec7afa248bba71531a08ccc497aab (patch)
tree2c178787b061b036b397b6a02379499964595288 /include/linux/rseq.h
parent566d8015f7eef11d82cd63dc4e1f620fcfc2a394 (diff)
rseq: Introduce struct rseq_data
In preparation for a major rewrite of this code, provide a data structure for rseq management. Put all the rseq related data into it (except for the debug part), which allows to simplify fork/execve by using memset() and memcpy() instead of adding new fields to initialize over and over. Create a storage struct for event management as well and put the sched_switch event and a indicator for RSEQ on a task into it as a start. That uses a union, which allows to mask and clear the whole lot efficiently. The indicators are explicitly not a bit field. Bit fields generate abysmal code. The boolean members are defined as u8 as that actually guarantees that it fits. There seem to be strange architecture ABIs which need more than 8 bits for a boolean. The has_rseq member is redundant vs. task::rseq, but it turns out that boolean operations and quick checks on the union generate better code than fiddling with separate entities and data types. This struct will be extended over time to carry more information. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084306.527086690@linutronix.de
Diffstat (limited to 'include/linux/rseq.h')
-rw-r--r--include/linux/rseq.h48
1 files changed, 22 insertions, 26 deletions
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index c6267f70c746..ab91b1e6bb4a 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -9,22 +9,22 @@ void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
static inline void rseq_handle_notify_resume(struct pt_regs *regs)
{
- if (current->rseq)
+ if (current->rseq.event.has_rseq)
__rseq_handle_notify_resume(NULL, regs);
}
static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs)
{
- if (current->rseq) {
- current->rseq_event_pending = true;
+ if (current->rseq.event.has_rseq) {
+ current->rseq.event.sched_switch = true;
__rseq_handle_notify_resume(ksig, regs);
}
}
static inline void rseq_sched_switch_event(struct task_struct *t)
{
- if (t->rseq) {
- t->rseq_event_pending = true;
+ if (t->rseq.event.has_rseq) {
+ t->rseq.event.sched_switch = true;
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
}
@@ -32,8 +32,9 @@ static inline void rseq_sched_switch_event(struct task_struct *t)
static __always_inline void rseq_exit_to_user_mode(void)
{
if (IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
- if (WARN_ON_ONCE(current->rseq && current->rseq_event_pending))
- current->rseq_event_pending = false;
+ if (WARN_ON_ONCE(current->rseq.event.has_rseq &&
+ current->rseq.event.events))
+ current->rseq.event.events = 0;
}
}
@@ -49,35 +50,30 @@ static __always_inline void rseq_exit_to_user_mode(void)
*/
static inline void rseq_virt_userspace_exit(void)
{
- if (current->rseq_event_pending)
+ if (current->rseq.event.sched_switch)
set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
}
+static inline void rseq_reset(struct task_struct *t)
+{
+ memset(&t->rseq, 0, sizeof(t->rseq));
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ rseq_reset(t);
+}
+
/*
* If parent process has a registered restartable sequences area, the
* child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_pending = false;
- } else {
+ if (clone_flags & CLONE_VM)
+ rseq_reset(t);
+ else
t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_pending = current->rseq_event_pending;
- }
-}
-
-static inline void rseq_execve(struct task_struct *t)
-{
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_pending = false;
}
#else /* CONFIG_RSEQ */