summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-10 19:34:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-10 19:34:26 -0800
commit6f7e6393d1ce636bb7ec77a7fe7b77458fddf701 (patch)
tree12b97016aeaf6d8e75db39705bf92b0250872788 /arch/x86/include/asm
parentca8f421ea0d3f1d39f773e14f68f93c978e470ef (diff)
parentce9b1c10c3f1c723c3cc7b63aa8331fdb6c57a04 (diff)
Merge tag 'x86_entry_for_7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 entry code updates from Dave Hansen: "This is entirely composed of a set of long overdue VDSO cleanups. They makes the VDSO build much more logical and zap quite a bit of old cruft. It also results in a coveted net-code-removal diffstat" * tag 'x86_entry_for_7.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/entry/vdso: Add vdso2c to .gitignore x86/entry/vdso32: Omit '.cfi_offset eflags' for LLVM < 16 MAINTAINERS: Adjust vdso file entry in INTEL SGX x86/entry/vdso/selftest: Update location of vgetrandom-chacha.S x86/entry/vdso: Fix filtering of vdso compiler flags x86/entry/vdso: Update the object paths for "make vdso_install" x86/entry/vdso32: When using int $0x80, use it directly x86/cpufeature: Replace X86_FEATURE_SYSENTER32 with X86_FEATURE_SYSFAST32 x86/vdso: Abstract out vdso system call internals x86/entry/vdso: Include GNU_PROPERTY and GNU_STACK PHDRs x86/entry/vdso32: Remove open-coded DWARF in sigreturn.S x86/entry/vdso32: Remove SYSCALL_ENTER_KERNEL macro in sigreturn.S x86/entry/vdso32: Don't rely on int80_landing_pad for adjusting ip x86/entry/vdso: Refactor the vdso build x86/entry/vdso: Move vdso2c to arch/x86/tools x86/entry/vdso: Rename vdso_image_* to vdso*_image
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/dwarf2.h1
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/vdso.h6
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h108
-rw-r--r--arch/x86/include/asm/vdso/sys_call.h105
6 files changed, 119 insertions, 105 deletions
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index c3b53beb1300..63b0f9aa9b3e 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -84,7 +84,7 @@
#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
+#define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 302e11b15da8..09c9684d3ad6 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -20,6 +20,7 @@
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#define CFI_ESCAPE .cfi_escape
+#define CFI_SIGNAL_FRAME .cfi_signal_frame
#ifndef BUILD_VDSO
/*
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 6c8fdc96be7e..2ba5f166e58f 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -361,7 +361,7 @@ else if (IS_ENABLED(CONFIG_IA32_EMULATION)) \
#define VDSO_ENTRY \
((unsigned long)current->mm->context.vdso + \
- vdso_image_32.sym___kernel_vsyscall)
+ vdso32_image.sym___kernel_vsyscall)
struct linux_binprm;
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index b7253ef3205a..e8afbe9faa5b 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -27,9 +27,9 @@ struct vdso_image {
long sym_vdso32_rt_sigreturn_landing_pad;
};
-extern const struct vdso_image vdso_image_64;
-extern const struct vdso_image vdso_image_x32;
-extern const struct vdso_image vdso_image_32;
+extern const struct vdso_image vdso64_image;
+extern const struct vdso_image vdsox32_image;
+extern const struct vdso_image vdso32_image;
extern int __init init_vdso_image(const struct vdso_image *image);
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 73b2e7ee8f0f..3cf214cc4a75 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -18,6 +18,7 @@
#include <asm/msr.h>
#include <asm/pvclock.h>
#include <clocksource/hyperv_timer.h>
+#include <asm/vdso/sys_call.h>
#define VDSO_HAS_TIME 1
@@ -53,130 +54,37 @@ extern struct ms_hyperv_tsc_page hvclock_page
__attribute__((visibility("hidden")));
#endif
-#ifndef BUILD_VDSO32
-
static __always_inline
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
- long ret;
-
- asm ("syscall" : "=a" (ret), "=m" (*_ts) :
- "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) :
- "rcx", "r11");
-
- return ret;
+ return VDSO_SYSCALL2(clock_gettime,64,_clkid,_ts);
}
static __always_inline
long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
{
- long ret;
-
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory");
-
- return ret;
+ return VDSO_SYSCALL2(gettimeofday,,_tv,_tz);
}
static __always_inline
long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
- long ret;
-
- asm ("syscall" : "=a" (ret), "=m" (*_ts) :
- "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) :
- "rcx", "r11");
-
- return ret;
+ return VDSO_SYSCALL2(clock_getres,_time64,_clkid,_ts);
}
-#else
-
-static __always_inline
-long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
-{
- long ret;
-
- asm (
- "mov %%ebx, %%edx \n"
- "mov %[clock], %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret), "=m" (*_ts)
- : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts)
- : "edx");
-
- return ret;
-}
+#ifndef CONFIG_X86_64
static __always_inline
long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
{
- long ret;
-
- asm (
- "mov %%ebx, %%edx \n"
- "mov %[clock], %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret), "=m" (*_ts)
- : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)
- : "edx");
-
- return ret;
-}
-
-static __always_inline
-long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
- struct timezone *_tz)
-{
- long ret;
-
- asm(
- "mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz)
- : "memory", "edx");
-
- return ret;
+ return VDSO_SYSCALL2(clock_gettime,,_clkid,_ts);
}
static __always_inline long
-clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
-{
- long ret;
-
- asm (
- "mov %%ebx, %%edx \n"
- "mov %[clock], %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret), "=m" (*_ts)
- : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts)
- : "edx");
-
- return ret;
-}
-
-static __always_inline
-long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
{
- long ret;
-
- asm (
- "mov %%ebx, %%edx \n"
- "mov %[clock], %%ebx \n"
- "call __kernel_vsyscall \n"
- "mov %%edx, %%ebx \n"
- : "=a" (ret), "=m" (*_ts)
- : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)
- : "edx");
-
- return ret;
+ return VDSO_SYSCALL2(clock_getres,,_clkid,_ts);
}
#endif
diff --git a/arch/x86/include/asm/vdso/sys_call.h b/arch/x86/include/asm/vdso/sys_call.h
new file mode 100644
index 000000000000..5806b1cd6aef
--- /dev/null
+++ b/arch/x86/include/asm/vdso/sys_call.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Macros for issuing an inline system call from the vDSO.
+ */
+
+#ifndef X86_ASM_VDSO_SYS_CALL_H
+#define X86_ASM_VDSO_SYS_CALL_H
+
+#include <linux/compiler.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative.h>
+
+#ifdef CONFIG_X86_64
+# define __sys_instr "syscall"
+# define __sys_clobber "rcx", "r11", "memory"
+# define __sys_nr(x,y) __NR_ ## x
+# define __sys_reg1 "rdi"
+# define __sys_reg2 "rsi"
+# define __sys_reg3 "rdx"
+# define __sys_reg4 "r10"
+# define __sys_reg5 "r8"
+#else
+# define __sys_instr ALTERNATIVE("ds;ds;ds;int $0x80", \
+ "call __kernel_vsyscall", \
+ X86_FEATURE_SYSFAST32)
+# define __sys_clobber "memory"
+# define __sys_nr(x,y) __NR_ ## x ## y
+# define __sys_reg1 "ebx"
+# define __sys_reg2 "ecx"
+# define __sys_reg3 "edx"
+# define __sys_reg4 "esi"
+# define __sys_reg5 "edi"
+#endif
+
+/*
+ * Example usage:
+ *
+ * result = VDSO_SYSCALL3(foo,64,x,y,z);
+ *
+ * ... calls foo(x,y,z) on 64 bits, and foo64(x,y,z) on 32 bits.
+ *
+ * VDSO_SYSCALL6() is currently missing, because it would require
+ * special handling for %ebp on 32 bits when the vdso is compiled with
+ * frame pointers enabled (the default on 32 bits.) Add it as a special
+ * case when and if it becomes necessary.
+ */
+#define _VDSO_SYSCALL(name,suf32,...) \
+ ({ \
+ long _sys_num_ret = __sys_nr(name,suf32); \
+ asm_inline volatile( \
+ __sys_instr \
+ : "+a" (_sys_num_ret) \
+ : __VA_ARGS__ \
+ : __sys_clobber); \
+ _sys_num_ret; \
+ })
+
+#define VDSO_SYSCALL0(name,suf32) \
+ _VDSO_SYSCALL(name,suf32)
+#define VDSO_SYSCALL1(name,suf32,a1) \
+ ({ \
+ register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \
+ _VDSO_SYSCALL(name,suf32, \
+ "r" (_sys_arg1)); \
+ })
+#define VDSO_SYSCALL2(name,suf32,a1,a2) \
+ ({ \
+ register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \
+ register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \
+ _VDSO_SYSCALL(name,suf32, \
+ "r" (_sys_arg1), "r" (_sys_arg2)); \
+ })
+#define VDSO_SYSCALL3(name,suf32,a1,a2,a3) \
+ ({ \
+ register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \
+ register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \
+ register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \
+ _VDSO_SYSCALL(name,suf32, \
+ "r" (_sys_arg1), "r" (_sys_arg2), \
+ "r" (_sys_arg3)); \
+ })
+#define VDSO_SYSCALL4(name,suf32,a1,a2,a3,a4) \
+ ({ \
+ register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \
+ register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \
+ register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \
+ register long _sys_arg4 asm(__sys_reg4) = (long)(a4); \
+ _VDSO_SYSCALL(name,suf32, \
+ "r" (_sys_arg1), "r" (_sys_arg2), \
+ "r" (_sys_arg3), "r" (_sys_arg4)); \
+ })
+#define VDSO_SYSCALL5(name,suf32,a1,a2,a3,a4,a5) \
+ ({ \
+ register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \
+ register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \
+ register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \
+ register long _sys_arg4 asm(__sys_reg4) = (long)(a4); \
+ register long _sys_arg5 asm(__sys_reg5) = (long)(a5); \
+ _VDSO_SYSCALL(name,suf32, \
+ "r" (_sys_arg1), "r" (_sys_arg2), \
+ "r" (_sys_arg3), "r" (_sys_arg4), \
+ "r" (_sys_arg5)); \
+ })
+
+#endif /* X86_VDSO_SYS_CALL_H */