summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2026-04-07 14:16:49 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2026-04-08 17:40:04 +0100
commitae654112eac05f316ef31587fc55e4d7160d0086 (patch)
tree72ce397aac1628c54c6b0ff0e3d1695f420d792f /arch
parenta07b7b214240e1bf3de7067f2f43d88aa8e50c28 (diff)
arm64: entry: Use split preemption logic
The generic irqentry code now provides irqentry_exit_to_kernel_mode_preempt() and irqentry_exit_to_kernel_mode_after_preempt(), which can be used where architectures have different state requirements for involuntary preemption and exception return, as is the case on arm64. Use the new functions on arm64, aligning our exit to kernel mode logic with the style of our exit to user mode logic. This removes the need for the recently-added bodge in arch_irqentry_exit_need_resched(), and allows preemption to occur when returning from any exception taken from kernel mode, which is nicer for RT. In an ideal world, we'd remove arch_irqentry_exit_need_resched(), and fold the conditionality directly into the architecture-specific entry code. That way all the logic necessary to avoid preempting from a pseudo-NMI could be constrained specifically to the EL1 IRQ/FIQ paths, avoiding redundant work for other exceptions, and making the flow a bit clearer. At present it looks like that would require a larger refactoring (e.g. for the PREEMPT_DYNAMIC logic), and so I've left that as-is for now. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Jinjie Ruan <ruanjinjie@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@kernel.org> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Will Deacon <will@kernel.org> Reviewed-by: Jinjie Ruan <ruanjinjie@huawei.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/entry-common.h21
-rw-r--r--arch/arm64/kernel/entry-common.c12
2 files changed, 12 insertions, 21 deletions
diff --git a/arch/arm64/include/asm/entry-common.h b/arch/arm64/include/asm/entry-common.h
index 20f0a7c7bde1..cab8cd78f693 100644
--- a/arch/arm64/include/asm/entry-common.h
+++ b/arch/arm64/include/asm/entry-common.h
@@ -29,19 +29,14 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
static inline bool arch_irqentry_exit_need_resched(void)
{
- if (system_uses_irq_prio_masking()) {
- /*
- * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
- * priority masking is used the GIC irqchip driver will clear DAIF.IF
- * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
- * DAIF we must have handled an NMI, so skip preemption.
- */
- if (read_sysreg(daif))
- return false;
- } else {
- if (read_sysreg(daif) & (PSR_D_BIT | PSR_A_BIT))
- return false;
- }
+ /*
+ * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+ * priority masking is used the GIC irqchip driver will clear DAIF.IF
+ * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+ * DAIF we must have handled an NMI, so skip preemption.
+ */
+ if (system_uses_irq_prio_masking() && read_sysreg(daif))
+ return false;
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 16a65987a6a9..f42ce7b5c67f 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -54,8 +54,11 @@ static noinstr irqentry_state_t arm64_enter_from_kernel_mode(struct pt_regs *reg
static void noinstr arm64_exit_to_kernel_mode(struct pt_regs *regs,
irqentry_state_t state)
{
+ local_irq_disable();
+ irqentry_exit_to_kernel_mode_preempt(regs, state);
+ local_daif_mask();
mte_check_tfsr_exit();
- irqentry_exit_to_kernel_mode(regs, state);
+ irqentry_exit_to_kernel_mode_after_preempt(regs, state);
}
/*
@@ -301,7 +304,6 @@ static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_mem_abort(far, esr, regs);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}
@@ -313,7 +315,6 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_sp_pc_abort(far, esr, regs);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}
@@ -324,7 +325,6 @@ static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_undef(regs, esr);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}
@@ -335,7 +335,6 @@ static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_bti(regs, esr);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}
@@ -346,7 +345,6 @@ static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_gcs(regs, esr);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}
@@ -357,7 +355,6 @@ static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_mops(regs, esr);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}
@@ -423,7 +420,6 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
state = arm64_enter_from_kernel_mode(regs);
local_daif_inherit(regs);
do_el1_fpac(regs, esr);
- local_daif_mask();
arm64_exit_to_kernel_mode(regs, state);
}