summaryrefslogtreecommitdiff
path: root/kernel/rseq.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2025-10-27 09:45:12 +0100
committerIngo Molnar <mingo@kernel.org>2025-11-04 08:33:54 +0100
commite2d4f42271155045a49b89530f2c06ad8e9f1a1e (patch)
tree7bbcc777437960b76a44fdf45c1b42e9d0dd473f /kernel/rseq.c
parent9f6ffd4cebda86841700775de3213f22bb0ea22d (diff)
rseq: Rework the TIF_NOTIFY handler
Replace the whole logic with a new implementation, which is shared with signal delivery and the upcoming exit fast path. Contrary to the original implementation, this ignores invocations from KVM/IO-uring, which invoke resume_user_mode_work() with the @regs argument set to NULL. The original implementation updated the CPU/Node/MM CID fields, but that was just a side effect, which was addressing the problem that this invocation cleared TIF_NOTIFY_RESUME, which in turn could cause an update on return to user space to be lost. This problem has been addressed differently, so that it's not longer required to do that update before entering the guest. That might be considered a user visible change, when the hosts thread TLS memory is mapped into the guest, but as this was never intentionally supported, this abuse of kernel internal implementation details is not considered an ABI break. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084307.517640811@linutronix.de
Diffstat (limited to 'kernel/rseq.c')
-rw-r--r--kernel/rseq.c76
1 files changed, 33 insertions, 43 deletions
diff --git a/kernel/rseq.c b/kernel/rseq.c
index 13faadc737ad..148fb2103023 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -82,12 +82,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/rseq.h>
-#ifdef CONFIG_MEMBARRIER
-# define RSEQ_EVENT_GUARD irq
-#else
-# define RSEQ_EVENT_GUARD preempt
-#endif
-
DEFINE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
static inline void rseq_control_debug(bool on)
@@ -239,38 +233,15 @@ efault:
return false;
}
-/*
- * This resume handler must always be executed between any of:
- * - preemption,
- * - signal delivery,
- * and return to user-space.
- *
- * This is how we can ensure that the entire rseq critical section
- * will issue the commit instruction only if executed atomically with
- * respect to other threads scheduled on the same CPU, and with respect
- * to signal handlers.
- */
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+static void rseq_slowpath_update_usr(struct pt_regs *regs)
{
+ /* Preserve rseq state and user_irq state for exit to user */
+ const struct rseq_event evt_mask = { .has_rseq = true, .user_irq = true, };
struct task_struct *t = current;
struct rseq_ids ids;
u32 node_id;
bool event;
- /*
- * If invoked from hypervisors before entering the guest via
- * resume_user_mode_work(), then @regs is a NULL pointer.
- *
- * resume_user_mode_work() clears TIF_NOTIFY_RESUME and re-raises
- * it before returning from the ioctl() to user space when
- * rseq_event.sched_switch is set.
- *
- * So it's safe to ignore here instead of pointlessly updating it
- * in the vcpu_run() loop.
- */
- if (!regs)
- return;
-
if (unlikely(t->flags & PF_EXITING))
return;
@@ -294,26 +265,45 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
* with the result handed in to allow the detection of
* inconsistencies.
*/
- scoped_guard(RSEQ_EVENT_GUARD) {
+ scoped_guard(irq) {
event = t->rseq.event.sched_switch;
- t->rseq.event.sched_switch = false;
+ t->rseq.event.all &= evt_mask.all;
ids.cpu_id = task_cpu(t);
ids.mm_cid = task_mm_cid(t);
}
- if (!IS_ENABLED(CONFIG_DEBUG_RSEQ) && !event)
+ if (!event)
return;
- if (!rseq_handle_cs(t, regs))
- goto error;
-
node_id = cpu_to_node(ids.cpu_id);
- if (!rseq_set_ids(t, &ids, node_id))
- goto error;
- return;
-error:
- force_sig(SIGSEGV);
+ if (unlikely(!rseq_update_usr(t, regs, &ids, node_id))) {
+ /*
+ * Clear the errors just in case this might survive magically, but
+ * leave the rest intact.
+ */
+ t->rseq.event.error = 0;
+ force_sig(SIGSEGV);
+ }
+}
+
+void __rseq_handle_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * If invoked from hypervisors before entering the guest via
+ * resume_user_mode_work(), then @regs is a NULL pointer.
+ *
+ * resume_user_mode_work() clears TIF_NOTIFY_RESUME and re-raises
+ * it before returning from the ioctl() to user space when
+ * rseq_event.sched_switch is set.
+ *
+ * So it's safe to ignore here instead of pointlessly updating it
+ * in the vcpu_run() loop.
+ */
+ if (!regs)
+ return;
+
+ rseq_slowpath_update_usr(regs);
}
void __rseq_signal_deliver(int sig, struct pt_regs *regs)