summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2025-12-16 13:26:04 -0800
committerDave Hansen <dave.hansen@linux.intel.com>2026-01-13 16:37:58 -0800
commit36d83c249e0395a915144eceeb528ddc19b1fbe6 (patch)
tree4581bf7d2a7228f88a21b75ae8b3eec246c31327 /arch/x86
parentf49ecf5e110ab0ed255ddea5e321689faf4e50e6 (diff)
x86/entry/vdso32: When using int $0x80, use it directly
When neither sysenter32 nor syscall32 is available (on either FRED-capable 64-bit hardware or old 32-bit hardware), there is no reason to do a bunch of stack shuffling in __kernel_vsyscall. Unfortunately, just overwriting the initial "push" instructions will mess up the CFI annotations, so suffer the 3-byte NOP if not applicable. Similarly, inline the int $0x80 when doing inline system calls in the vdso instead of calling __kernel_vsyscall. Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://patch.msgid.link/20251216212606.1325678-11-hpa@zytor.com
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S18
-rw-r--r--arch/x86/include/asm/vdso/sys_call.h4
2 files changed, 17 insertions, 5 deletions
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 7b1c0f16e511..9157cf9c5749 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -14,6 +14,18 @@
ALIGN
__kernel_vsyscall:
CFI_STARTPROC
+
+ /*
+ * If using int $0x80, there is no reason to muck about with the
+ * stack here. Unfortunately just overwriting the push instructions
+ * would mess up the CFI annotations, but it is only a 3-byte
+ * NOP in that case. This could be avoided by patching the
+ * vdso symbol table (not the code) and entry point, but that
+ * would a fair bit of tooling work or by simply compiling
+ * two different vDSO images, but that doesn't seem worth it.
+ */
+ ALTERNATIVE "int $0x80; ret", "", X86_FEATURE_SYSFAST32
+
/*
* Reshuffle regs so that all of any of the entry instructions
* will preserve enough state.
@@ -52,11 +64,9 @@ __kernel_vsyscall:
#define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter"
#define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall"
- /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
- ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \
- SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
+ ALTERNATIVE SYSENTER_SEQUENCE, SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32
- /* Enter using int $0x80 */
+ /* Re-enter using int $0x80 */
int $0x80
SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
diff --git a/arch/x86/include/asm/vdso/sys_call.h b/arch/x86/include/asm/vdso/sys_call.h
index dcfd17c6dd57..5806b1cd6aef 100644
--- a/arch/x86/include/asm/vdso/sys_call.h
+++ b/arch/x86/include/asm/vdso/sys_call.h
@@ -20,7 +20,9 @@
# define __sys_reg4 "r10"
# define __sys_reg5 "r8"
#else
-# define __sys_instr "call __kernel_vsyscall"
+# define __sys_instr ALTERNATIVE("ds;ds;ds;int $0x80", \
+ "call __kernel_vsyscall", \
+ X86_FEATURE_SYSFAST32)
# define __sys_clobber "memory"
# define __sys_nr(x,y) __NR_ ## x ## y
# define __sys_reg1 "ebx"