From 93d73005bff4f600696ce30e366e742c3373b13d Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:25:55 -0800 Subject: x86/entry/vdso: Rename vdso_image_* to vdso*_image The vdso .so files are named vdso*.so. These structures are binary images and descriptions of these files, so it is more consistent for them to have a naming that more directly mirrors the filenames. It is also very slightly more compact (by one character...) and simplifies the Makefile just a little bit. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-2-hpa@zytor.com --- arch/x86/entry/syscall_32.c | 2 +- arch/x86/entry/vdso/.gitignore | 11 ++++------- arch/x86/entry/vdso/Makefile | 8 ++++---- arch/x86/entry/vdso/vma.c | 10 +++++----- arch/x86/include/asm/elf.h | 2 +- arch/x86/include/asm/vdso.h | 6 +++--- arch/x86/kernel/process_64.c | 6 +++--- arch/x86/kernel/signal_32.c | 4 ++-- 8 files changed, 23 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index a67a644d0cfe..8e829575e12f 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -319,7 +319,7 @@ __visible noinstr bool do_fast_syscall_32(struct pt_regs *regs) * convention. Adjust regs so it looks like we entered using int80. */ unsigned long landing_pad = (unsigned long)current->mm->context.vdso + - vdso_image_32.sym_int80_landing_pad; + vdso32_image.sym_int80_landing_pad; /* * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward diff --git a/arch/x86/entry/vdso/.gitignore b/arch/x86/entry/vdso/.gitignore index 37a6129d597b..eb60859dbcbf 100644 --- a/arch/x86/entry/vdso/.gitignore +++ b/arch/x86/entry/vdso/.gitignore @@ -1,8 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -vdso.lds -vdsox32.lds -vdso32-syscall-syms.lds -vdso32-sysenter-syms.lds -vdso32-int80-syms.lds -vdso-image-*.c -vdso2c +*.lds +*.so +*.so.dbg +vdso*-image.c diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index f247f5f5cb44..7f833026d5b2 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -16,9 +16,9 @@ vobjs-$(CONFIG_X86_SGX) += vsgx.o obj-y += vma.o extable.o # vDSO images to build: -obj-$(CONFIG_X86_64) += vdso-image-64.o -obj-$(CONFIG_X86_X32_ABI) += vdso-image-x32.o -obj-$(CONFIG_COMPAT_32) += vdso-image-32.o vdso32-setup.o +obj-$(CONFIG_X86_64) += vdso64-image.o +obj-$(CONFIG_X86_X32_ABI) += vdsox32-image.o +obj-$(CONFIG_COMPAT_32) += vdso32-image.o vdso32-setup.o vobjs := $(addprefix $(obj)/, $(vobjs-y)) vobjs32 := $(addprefix $(obj)/, $(vobjs32-y)) @@ -44,7 +44,7 @@ hostprogs += vdso2c quiet_cmd_vdso2c = VDSO2C $@ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@ -$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE +$(obj)/vdso%-image.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE $(call if_changed,vdso2c) # diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index afe105b2f907..8f98c2d7c7a9 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -65,7 +65,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, static void vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) { - if (in_ia32_syscall() && image == &vdso_image_32) { + if (in_ia32_syscall() && image == &vdso32_image) { struct pt_regs *regs = current_pt_regs(); unsigned long vdso_land = image->sym_int80_landing_pad; unsigned long old_land_addr = vdso_land + @@ -230,7 +230,7 @@ static int load_vdso32(void) if (vdso32_enabled != 1) /* Other values all mean "disabled" */ return 0; - return map_vdso(&vdso_image_32, 0); + return map_vdso(&vdso32_image, 0); } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) @@ -239,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!vdso64_enabled) return 0; - return map_vdso(&vdso_image_64, 0); + return map_vdso(&vdso64_image, 0); } return load_vdso32(); @@ -252,7 +252,7 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm, if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) { if (!vdso64_enabled) return 0; - return map_vdso(&vdso_image_x32, 0); + return map_vdso(&vdsox32_image, 0); } if (IS_ENABLED(CONFIG_IA32_EMULATION)) @@ -267,7 +267,7 @@ bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) const struct vdso_image *image = current->mm->context.vdso_image; unsigned long vdso = (unsigned long) current->mm->context.vdso; - if (in_ia32_syscall() && image == &vdso_image_32) { + if (in_ia32_syscall() && image == &vdso32_image) { if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad || regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad) return true; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 6c8fdc96be7e..2ba5f166e58f 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -361,7 +361,7 @@ else if (IS_ENABLED(CONFIG_IA32_EMULATION)) \ #define VDSO_ENTRY \ ((unsigned long)current->mm->context.vdso + \ - vdso_image_32.sym___kernel_vsyscall) + vdso32_image.sym___kernel_vsyscall) struct linux_binprm; diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index b7253ef3205a..e8afbe9faa5b 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h @@ -27,9 +27,9 @@ struct vdso_image { long sym_vdso32_rt_sigreturn_landing_pad; }; -extern const struct vdso_image vdso_image_64; -extern const struct vdso_image vdso_image_x32; -extern const struct vdso_image vdso_image_32; +extern const struct vdso_image vdso64_image; +extern const struct vdso_image vdsox32_image; +extern const struct vdso_image vdso32_image; extern int __init init_vdso_image(const struct vdso_image *image); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 432c0a004c60..08e72f429870 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -941,14 +941,14 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) #ifdef CONFIG_CHECKPOINT_RESTORE # ifdef CONFIG_X86_X32_ABI case ARCH_MAP_VDSO_X32: - return prctl_map_vdso(&vdso_image_x32, arg2); + return prctl_map_vdso(&vdsox32_image, arg2); # endif # ifdef CONFIG_IA32_EMULATION case ARCH_MAP_VDSO_32: - return prctl_map_vdso(&vdso_image_32, arg2); + return prctl_map_vdso(&vdso32_image, arg2); # endif case ARCH_MAP_VDSO_64: - return prctl_map_vdso(&vdso_image_64, arg2); + return prctl_map_vdso(&vdso64_image, arg2); #endif #ifdef CONFIG_ADDRESS_MASKING case ARCH_GET_UNTAG_MASK: diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 42bbc42bd350..e55cf19e68fe 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -282,7 +282,7 @@ int ia32_setup_frame(struct ksignal *ksig, struct pt_regs *regs) /* Return stub is in 32bit vsyscall page */ if (current->mm->context.vdso) restorer = current->mm->context.vdso + - vdso_image_32.sym___kernel_sigreturn; + vdso32_image.sym___kernel_sigreturn; else restorer = &frame->retcode; } @@ -368,7 +368,7 @@ int ia32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) restorer = ksig->ka.sa.sa_restorer; else restorer = current->mm->context.vdso + - vdso_image_32.sym___kernel_rt_sigreturn; + vdso32_image.sym___kernel_rt_sigreturn; unsafe_put_user(ptr_to_compat(restorer), &frame->pretcode, Efault); /* -- cgit v1.2.3 From a76108d05ee13cddb72b620752a80b2c3e87aee1 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:25:56 -0800 Subject: x86/entry/vdso: Move vdso2c to arch/x86/tools It is generally better to build tools in arch/x86/tools to keep host cflags proliferation down, and to reduce makefile sequencing issues. Move the vdso build tool vdso2c into arch/x86/tools in preparation for refactoring the vdso makefiles. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-3-hpa@zytor.com --- arch/x86/Makefile | 2 +- arch/x86/entry/vdso/Makefile | 7 +- arch/x86/entry/vdso/vdso2c.c | 233 ------------------------------------------- arch/x86/entry/vdso/vdso2c.h | 208 -------------------------------------- arch/x86/tools/Makefile | 15 ++- arch/x86/tools/vdso2c.c | 233 +++++++++++++++++++++++++++++++++++++++++++ arch/x86/tools/vdso2c.h | 208 ++++++++++++++++++++++++++++++++++++++ 7 files changed, 455 insertions(+), 451 deletions(-) delete mode 100644 arch/x86/entry/vdso/vdso2c.c delete mode 100644 arch/x86/entry/vdso/vdso2c.h create mode 100644 arch/x86/tools/vdso2c.c create mode 100644 arch/x86/tools/vdso2c.h (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 1d403a3612ea..9ab7522ced18 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -252,7 +252,7 @@ endif archscripts: scripts_basic - $(Q)$(MAKE) $(build)=arch/x86/tools relocs + $(Q)$(MAKE) $(build)=arch/x86/tools relocs vdso2c ### # Syscall table generation diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 7f833026d5b2..3d9b09f00c70 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -38,13 +38,12 @@ VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 \ $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(call if_changed,vdso_and_check) -HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/$(SUBARCH)/include/uapi -hostprogs += vdso2c +VDSO2C = $(objtree)/arch/x86/tools/vdso2c quiet_cmd_vdso2c = VDSO2C $@ - cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@ + cmd_vdso2c = $(VDSO2C) $< $(<:%.dbg=%) $@ -$(obj)/vdso%-image.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE +$(obj)/vdso%-image.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(VDSO2C) FORCE $(call if_changed,vdso2c) # diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c deleted file mode 100644 index f84e8f8fa5fe..000000000000 --- a/arch/x86/entry/vdso/vdso2c.c +++ /dev/null @@ -1,233 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * vdso2c - A vdso image preparation tool - * Copyright (c) 2014 Andy Lutomirski and others - * - * vdso2c requires stripped and unstripped input. It would be trivial - * to fully strip the input in here, but, for reasons described below, - * we need to write a section table. Doing this is more or less - * equivalent to dropping all non-allocatable sections, but it's - * easier to let objcopy handle that instead of doing it ourselves. - * If we ever need to do something fancier than what objcopy provides, - * it would be straightforward to add here. - * - * We're keep a section table for a few reasons: - * - * The Go runtime had a couple of bugs: it would read the section - * table to try to figure out how many dynamic symbols there were (it - * shouldn't have looked at the section table at all) and, if there - * were no SHT_SYNDYM section table entry, it would use an - * uninitialized value for the number of symbols. An empty DYNSYM - * table would work, but I see no reason not to write a valid one (and - * keep full performance for old Go programs). This hack is only - * needed on x86_64. - * - * The bug was introduced on 2012-08-31 by: - * https://code.google.com/p/go/source/detail?r=56ea40aac72b - * and was fixed on 2014-06-13 by: - * https://code.google.com/p/go/source/detail?r=fc1cd5e12595 - * - * Binutils has issues debugging the vDSO: it reads the section table to - * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which - * would break build-id if we removed the section table. Binutils - * also requires that shstrndx != 0. See: - * https://sourceware.org/bugzilla/show_bug.cgi?id=17064 - * - * elfutils might not look for PT_NOTE if there is a section table at - * all. I don't know whether this matters for any practical purpose. - * - * For simplicity, rather than hacking up a partial section table, we - * just write a mostly complete one. We omit non-dynamic symbols, - * though, since they're rather large. - * - * Once binutils gets fixed, we might be able to drop this for all but - * the 64-bit vdso, since build-id only works in kernel RPMs, and - * systems that update to new enough kernel RPMs will likely update - * binutils in sync. build-id has never worked for home-built kernel - * RPMs without manual symlinking, and I suspect that no one ever does - * that. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include -#include -#include - -const char *outfilename; - -struct vdso_sym { - const char *name; - bool export; -}; - -struct vdso_sym required_syms[] = { - {"VDSO32_NOTE_MASK", true}, - {"__kernel_vsyscall", true}, - {"__kernel_sigreturn", true}, - {"__kernel_rt_sigreturn", true}, - {"int80_landing_pad", true}, - {"vdso32_rt_sigreturn_landing_pad", true}, - {"vdso32_sigreturn_landing_pad", true}, -}; - -__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) -static void fail(const char *format, ...) -{ - va_list ap; - va_start(ap, format); - fprintf(stderr, "Error: "); - vfprintf(stderr, format, ap); - if (outfilename) - unlink(outfilename); - exit(1); - va_end(ap); -} - -/* - * Evil macros for little-endian reads and writes - */ -#define GLE(x, bits, ifnot) \ - __builtin_choose_expr( \ - (sizeof(*(x)) == bits/8), \ - (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot) - -extern void bad_get_le(void); -#define LAST_GLE(x) \ - __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le()) - -#define GET_LE(x) \ - GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x)))) - -#define PLE(x, val, bits, ifnot) \ - __builtin_choose_expr( \ - (sizeof(*(x)) == bits/8), \ - put_unaligned_le##bits((val), (x)), ifnot) - -extern void bad_put_le(void); -#define LAST_PLE(x, val) \ - __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le()) - -#define PUT_LE(x, val) \ - PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val)))) - - -#define NSYMS ARRAY_SIZE(required_syms) - -#define BITSFUNC3(name, bits, suffix) name##bits##suffix -#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix) -#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, ) - -#define INT_BITS BITSFUNC2(int, ELF_BITS, _t) - -#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x -#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x) -#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x) - -#define ELF_BITS 64 -#include "vdso2c.h" -#undef ELF_BITS - -#define ELF_BITS 32 -#include "vdso2c.h" -#undef ELF_BITS - -static void go(void *raw_addr, size_t raw_len, - void *stripped_addr, size_t stripped_len, - FILE *outfile, const char *name) -{ - Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr; - - if (hdr->e_ident[EI_CLASS] == ELFCLASS64) { - go64(raw_addr, raw_len, stripped_addr, stripped_len, - outfile, name); - } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) { - go32(raw_addr, raw_len, stripped_addr, stripped_len, - outfile, name); - } else { - fail("unknown ELF class\n"); - } -} - -static void map_input(const char *name, void **addr, size_t *len, int prot) -{ - off_t tmp_len; - - int fd = open(name, O_RDONLY); - if (fd == -1) - err(1, "open(%s)", name); - - tmp_len = lseek(fd, 0, SEEK_END); - if (tmp_len == (off_t)-1) - err(1, "lseek"); - *len = (size_t)tmp_len; - - *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0); - if (*addr == MAP_FAILED) - err(1, "mmap"); - - close(fd); -} - -int main(int argc, char **argv) -{ - size_t raw_len, stripped_len; - void *raw_addr, *stripped_addr; - FILE *outfile; - char *name, *tmp; - int namelen; - - if (argc != 4) { - printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n"); - return 1; - } - - /* - * Figure out the struct name. If we're writing to a .so file, - * generate raw output instead. - */ - name = strdup(argv[3]); - namelen = strlen(name); - if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) { - name = NULL; - } else { - tmp = strrchr(name, '/'); - if (tmp) - name = tmp + 1; - tmp = strchr(name, '.'); - if (tmp) - *tmp = '\0'; - for (tmp = name; *tmp; tmp++) - if (*tmp == '-') - *tmp = '_'; - } - - map_input(argv[1], &raw_addr, &raw_len, PROT_READ); - map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ); - - outfilename = argv[3]; - outfile = fopen(outfilename, "w"); - if (!outfile) - err(1, "fopen(%s)", outfilename); - - go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name); - - munmap(raw_addr, raw_len); - munmap(stripped_addr, stripped_len); - fclose(outfile); - - return 0; -} diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h deleted file mode 100644 index 78ed1c1f28b9..000000000000 --- a/arch/x86/entry/vdso/vdso2c.h +++ /dev/null @@ -1,208 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This file is included twice from vdso2c.c. It generates code for 32-bit - * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs - * are built for 32-bit userspace. - */ - -static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len) -{ - size_t i; - - for (i = 0; i < len; i++) { - if (i % 10 == 0) - fprintf(outfile, "\n\t"); - fprintf(outfile, "0x%02X, ", (int)(data)[i]); - } -} - - -/* - * Extract a section from the input data into a standalone blob. Used to - * capture kernel-only data that needs to persist indefinitely, e.g. the - * exception fixup tables, but only in the kernel, i.e. the section can - * be stripped from the final vDSO image. - */ -static void BITSFUNC(extract)(const unsigned char *data, size_t data_len, - FILE *outfile, ELF(Shdr) *sec, const char *name) -{ - unsigned long offset; - size_t len; - - offset = (unsigned long)GET_LE(&sec->sh_offset); - len = (size_t)GET_LE(&sec->sh_size); - - if (offset + len > data_len) - fail("section to extract overruns input data"); - - fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len); - BITSFUNC(copy)(outfile, data + offset, len); - fprintf(outfile, "\n};\n\n"); -} - -static void BITSFUNC(go)(void *raw_addr, size_t raw_len, - void *stripped_addr, size_t stripped_len, - FILE *outfile, const char *image_name) -{ - int found_load = 0; - unsigned long load_size = -1; /* Work around bogus warning */ - unsigned long mapping_size; - ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; - unsigned long i, syms_nr; - ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, - *alt_sec = NULL, *extable_sec = NULL; - ELF(Dyn) *dyn = 0, *dyn_end = 0; - const char *secstrings; - INT_BITS syms[NSYMS] = {}; - - ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff)); - - if (GET_LE(&hdr->e_type) != ET_DYN) - fail("input is not a shared object\n"); - - /* Walk the segment table. */ - for (i = 0; i < GET_LE(&hdr->e_phnum); i++) { - if (GET_LE(&pt[i].p_type) == PT_LOAD) { - if (found_load) - fail("multiple PT_LOAD segs\n"); - - if (GET_LE(&pt[i].p_offset) != 0 || - GET_LE(&pt[i].p_vaddr) != 0) - fail("PT_LOAD in wrong place\n"); - - if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz)) - fail("cannot handle memsz != filesz\n"); - - load_size = GET_LE(&pt[i].p_memsz); - found_load = 1; - } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) { - dyn = raw_addr + GET_LE(&pt[i].p_offset); - dyn_end = raw_addr + GET_LE(&pt[i].p_offset) + - GET_LE(&pt[i].p_memsz); - } - } - if (!found_load) - fail("no PT_LOAD seg\n"); - - if (stripped_len < load_size) - fail("stripped input is too short\n"); - - if (!dyn) - fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n"); - - /* Walk the dynamic table */ - for (i = 0; dyn + i < dyn_end && - GET_LE(&dyn[i].d_tag) != DT_NULL; i++) { - typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag); - if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA || - tag == DT_RELENT || tag == DT_TEXTREL) - fail("vdso image contains dynamic relocations\n"); - } - - /* Walk the section table */ - secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) + - GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx); - secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset); - for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { - ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) + - GET_LE(&hdr->e_shentsize) * i; - if (GET_LE(&sh->sh_type) == SHT_SYMTAB) - symtab_hdr = sh; - - if (!strcmp(secstrings + GET_LE(&sh->sh_name), - ".altinstructions")) - alt_sec = sh; - if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table")) - extable_sec = sh; - } - - if (!symtab_hdr) - fail("no symbol table\n"); - - strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) + - GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link); - - syms_nr = GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); - /* Walk the symbol table */ - for (i = 0; i < syms_nr; i++) { - unsigned int k; - ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) + - GET_LE(&symtab_hdr->sh_entsize) * i; - const char *sym_name = raw_addr + - GET_LE(&strtab_hdr->sh_offset) + - GET_LE(&sym->st_name); - - for (k = 0; k < NSYMS; k++) { - if (!strcmp(sym_name, required_syms[k].name)) { - if (syms[k]) { - fail("duplicate symbol %s\n", - required_syms[k].name); - } - - /* - * Careful: we use negative addresses, but - * st_value is unsigned, so we rely - * on syms[k] being a signed type of the - * correct width. - */ - syms[k] = GET_LE(&sym->st_value); - } - } - } - - if (!image_name) { - fwrite(stripped_addr, stripped_len, 1, outfile); - return; - } - - mapping_size = (stripped_len + 4095) / 4096 * 4096; - - fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n"); - fprintf(outfile, "#include \n"); - fprintf(outfile, "#include \n"); - fprintf(outfile, "#include \n"); - fprintf(outfile, "#include \n"); - fprintf(outfile, "\n"); - fprintf(outfile, - "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {", - mapping_size); - for (i = 0; i < stripped_len; i++) { - if (i % 10 == 0) - fprintf(outfile, "\n\t"); - fprintf(outfile, "0x%02X, ", - (int)((unsigned char *)stripped_addr)[i]); - } - fprintf(outfile, "\n};\n\n"); - if (extable_sec) - BITSFUNC(extract)(raw_addr, raw_len, outfile, - extable_sec, "extable"); - - fprintf(outfile, "const struct vdso_image %s = {\n", image_name); - fprintf(outfile, "\t.data = raw_data,\n"); - fprintf(outfile, "\t.size = %lu,\n", mapping_size); - if (alt_sec) { - fprintf(outfile, "\t.alt = %lu,\n", - (unsigned long)GET_LE(&alt_sec->sh_offset)); - fprintf(outfile, "\t.alt_len = %lu,\n", - (unsigned long)GET_LE(&alt_sec->sh_size)); - } - if (extable_sec) { - fprintf(outfile, "\t.extable_base = %lu,\n", - (unsigned long)GET_LE(&extable_sec->sh_offset)); - fprintf(outfile, "\t.extable_len = %lu,\n", - (unsigned long)GET_LE(&extable_sec->sh_size)); - fprintf(outfile, "\t.extable = extable,\n"); - } - - for (i = 0; i < NSYMS; i++) { - if (required_syms[i].export && syms[i]) - fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", - required_syms[i].name, (int64_t)syms[i]); - } - fprintf(outfile, "};\n\n"); - fprintf(outfile, "static __init int init_%s(void) {\n", image_name); - fprintf(outfile, "\treturn init_vdso_image(&%s);\n", image_name); - fprintf(outfile, "};\n"); - fprintf(outfile, "subsys_initcall(init_%s);\n", image_name); - -} diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 7278e2545c35..39a183fffd04 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -38,9 +38,14 @@ $(obj)/insn_decoder_test.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tool $(obj)/insn_sanity.o: $(srctree)/tools/arch/x86/lib/insn.c $(srctree)/tools/arch/x86/lib/inat.c $(srctree)/tools/arch/x86/include/asm/inat_types.h $(srctree)/tools/arch/x86/include/asm/inat.h $(srctree)/tools/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c -HOST_EXTRACFLAGS += -I$(srctree)/tools/include -hostprogs += relocs -relocs-objs := relocs_32.o relocs_64.o relocs_common.o -PHONY += relocs -relocs: $(obj)/relocs +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi \ + -I$(srctree)/arch/$(SUBARCH)/include/uapi + +hostprogs += relocs vdso2c +relocs-objs := relocs_32.o relocs_64.o relocs_common.o + +always-y := $(hostprogs) + +PHONY += $(hostprogs) +$(hostprogs): %: $(obj)/% @: diff --git a/arch/x86/tools/vdso2c.c b/arch/x86/tools/vdso2c.c new file mode 100644 index 000000000000..f84e8f8fa5fe --- /dev/null +++ b/arch/x86/tools/vdso2c.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vdso2c - A vdso image preparation tool + * Copyright (c) 2014 Andy Lutomirski and others + * + * vdso2c requires stripped and unstripped input. It would be trivial + * to fully strip the input in here, but, for reasons described below, + * we need to write a section table. Doing this is more or less + * equivalent to dropping all non-allocatable sections, but it's + * easier to let objcopy handle that instead of doing it ourselves. + * If we ever need to do something fancier than what objcopy provides, + * it would be straightforward to add here. + * + * We're keep a section table for a few reasons: + * + * The Go runtime had a couple of bugs: it would read the section + * table to try to figure out how many dynamic symbols there were (it + * shouldn't have looked at the section table at all) and, if there + * were no SHT_SYNDYM section table entry, it would use an + * uninitialized value for the number of symbols. An empty DYNSYM + * table would work, but I see no reason not to write a valid one (and + * keep full performance for old Go programs). This hack is only + * needed on x86_64. + * + * The bug was introduced on 2012-08-31 by: + * https://code.google.com/p/go/source/detail?r=56ea40aac72b + * and was fixed on 2014-06-13 by: + * https://code.google.com/p/go/source/detail?r=fc1cd5e12595 + * + * Binutils has issues debugging the vDSO: it reads the section table to + * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which + * would break build-id if we removed the section table. Binutils + * also requires that shstrndx != 0. See: + * https://sourceware.org/bugzilla/show_bug.cgi?id=17064 + * + * elfutils might not look for PT_NOTE if there is a section table at + * all. I don't know whether this matters for any practical purpose. + * + * For simplicity, rather than hacking up a partial section table, we + * just write a mostly complete one. We omit non-dynamic symbols, + * though, since they're rather large. + * + * Once binutils gets fixed, we might be able to drop this for all but + * the 64-bit vdso, since build-id only works in kernel RPMs, and + * systems that update to new enough kernel RPMs will likely update + * binutils in sync. build-id has never worked for home-built kernel + * RPMs without manual symlinking, and I suspect that no one ever does + * that. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + +const char *outfilename; + +struct vdso_sym { + const char *name; + bool export; +}; + +struct vdso_sym required_syms[] = { + {"VDSO32_NOTE_MASK", true}, + {"__kernel_vsyscall", true}, + {"__kernel_sigreturn", true}, + {"__kernel_rt_sigreturn", true}, + {"int80_landing_pad", true}, + {"vdso32_rt_sigreturn_landing_pad", true}, + {"vdso32_sigreturn_landing_pad", true}, +}; + +__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) +static void fail(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + fprintf(stderr, "Error: "); + vfprintf(stderr, format, ap); + if (outfilename) + unlink(outfilename); + exit(1); + va_end(ap); +} + +/* + * Evil macros for little-endian reads and writes + */ +#define GLE(x, bits, ifnot) \ + __builtin_choose_expr( \ + (sizeof(*(x)) == bits/8), \ + (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot) + +extern void bad_get_le(void); +#define LAST_GLE(x) \ + __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le()) + +#define GET_LE(x) \ + GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x)))) + +#define PLE(x, val, bits, ifnot) \ + __builtin_choose_expr( \ + (sizeof(*(x)) == bits/8), \ + put_unaligned_le##bits((val), (x)), ifnot) + +extern void bad_put_le(void); +#define LAST_PLE(x, val) \ + __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le()) + +#define PUT_LE(x, val) \ + PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val)))) + + +#define NSYMS ARRAY_SIZE(required_syms) + +#define BITSFUNC3(name, bits, suffix) name##bits##suffix +#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix) +#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, ) + +#define INT_BITS BITSFUNC2(int, ELF_BITS, _t) + +#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x +#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x) +#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x) + +#define ELF_BITS 64 +#include "vdso2c.h" +#undef ELF_BITS + +#define ELF_BITS 32 +#include "vdso2c.h" +#undef ELF_BITS + +static void go(void *raw_addr, size_t raw_len, + void *stripped_addr, size_t stripped_len, + FILE *outfile, const char *name) +{ + Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr; + + if (hdr->e_ident[EI_CLASS] == ELFCLASS64) { + go64(raw_addr, raw_len, stripped_addr, stripped_len, + outfile, name); + } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) { + go32(raw_addr, raw_len, stripped_addr, stripped_len, + outfile, name); + } else { + fail("unknown ELF class\n"); + } +} + +static void map_input(const char *name, void **addr, size_t *len, int prot) +{ + off_t tmp_len; + + int fd = open(name, O_RDONLY); + if (fd == -1) + err(1, "open(%s)", name); + + tmp_len = lseek(fd, 0, SEEK_END); + if (tmp_len == (off_t)-1) + err(1, "lseek"); + *len = (size_t)tmp_len; + + *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0); + if (*addr == MAP_FAILED) + err(1, "mmap"); + + close(fd); +} + +int main(int argc, char **argv) +{ + size_t raw_len, stripped_len; + void *raw_addr, *stripped_addr; + FILE *outfile; + char *name, *tmp; + int namelen; + + if (argc != 4) { + printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n"); + return 1; + } + + /* + * Figure out the struct name. If we're writing to a .so file, + * generate raw output instead. + */ + name = strdup(argv[3]); + namelen = strlen(name); + if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) { + name = NULL; + } else { + tmp = strrchr(name, '/'); + if (tmp) + name = tmp + 1; + tmp = strchr(name, '.'); + if (tmp) + *tmp = '\0'; + for (tmp = name; *tmp; tmp++) + if (*tmp == '-') + *tmp = '_'; + } + + map_input(argv[1], &raw_addr, &raw_len, PROT_READ); + map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ); + + outfilename = argv[3]; + outfile = fopen(outfilename, "w"); + if (!outfile) + err(1, "fopen(%s)", outfilename); + + go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name); + + munmap(raw_addr, raw_len); + munmap(stripped_addr, stripped_len); + fclose(outfile); + + return 0; +} diff --git a/arch/x86/tools/vdso2c.h b/arch/x86/tools/vdso2c.h new file mode 100644 index 000000000000..78ed1c1f28b9 --- /dev/null +++ b/arch/x86/tools/vdso2c.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is included twice from vdso2c.c. It generates code for 32-bit + * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs + * are built for 32-bit userspace. + */ + +static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i % 10 == 0) + fprintf(outfile, "\n\t"); + fprintf(outfile, "0x%02X, ", (int)(data)[i]); + } +} + + +/* + * Extract a section from the input data into a standalone blob. Used to + * capture kernel-only data that needs to persist indefinitely, e.g. the + * exception fixup tables, but only in the kernel, i.e. the section can + * be stripped from the final vDSO image. + */ +static void BITSFUNC(extract)(const unsigned char *data, size_t data_len, + FILE *outfile, ELF(Shdr) *sec, const char *name) +{ + unsigned long offset; + size_t len; + + offset = (unsigned long)GET_LE(&sec->sh_offset); + len = (size_t)GET_LE(&sec->sh_size); + + if (offset + len > data_len) + fail("section to extract overruns input data"); + + fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len); + BITSFUNC(copy)(outfile, data + offset, len); + fprintf(outfile, "\n};\n\n"); +} + +static void BITSFUNC(go)(void *raw_addr, size_t raw_len, + void *stripped_addr, size_t stripped_len, + FILE *outfile, const char *image_name) +{ + int found_load = 0; + unsigned long load_size = -1; /* Work around bogus warning */ + unsigned long mapping_size; + ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; + unsigned long i, syms_nr; + ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, + *alt_sec = NULL, *extable_sec = NULL; + ELF(Dyn) *dyn = 0, *dyn_end = 0; + const char *secstrings; + INT_BITS syms[NSYMS] = {}; + + ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff)); + + if (GET_LE(&hdr->e_type) != ET_DYN) + fail("input is not a shared object\n"); + + /* Walk the segment table. */ + for (i = 0; i < GET_LE(&hdr->e_phnum); i++) { + if (GET_LE(&pt[i].p_type) == PT_LOAD) { + if (found_load) + fail("multiple PT_LOAD segs\n"); + + if (GET_LE(&pt[i].p_offset) != 0 || + GET_LE(&pt[i].p_vaddr) != 0) + fail("PT_LOAD in wrong place\n"); + + if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz)) + fail("cannot handle memsz != filesz\n"); + + load_size = GET_LE(&pt[i].p_memsz); + found_load = 1; + } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) { + dyn = raw_addr + GET_LE(&pt[i].p_offset); + dyn_end = raw_addr + GET_LE(&pt[i].p_offset) + + GET_LE(&pt[i].p_memsz); + } + } + if (!found_load) + fail("no PT_LOAD seg\n"); + + if (stripped_len < load_size) + fail("stripped input is too short\n"); + + if (!dyn) + fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n"); + + /* Walk the dynamic table */ + for (i = 0; dyn + i < dyn_end && + GET_LE(&dyn[i].d_tag) != DT_NULL; i++) { + typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag); + if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA || + tag == DT_RELENT || tag == DT_TEXTREL) + fail("vdso image contains dynamic relocations\n"); + } + + /* Walk the section table */ + secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) + + GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx); + secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset); + for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { + ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) + + GET_LE(&hdr->e_shentsize) * i; + if (GET_LE(&sh->sh_type) == SHT_SYMTAB) + symtab_hdr = sh; + + if (!strcmp(secstrings + GET_LE(&sh->sh_name), + ".altinstructions")) + alt_sec = sh; + if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table")) + extable_sec = sh; + } + + if (!symtab_hdr) + fail("no symbol table\n"); + + strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) + + GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link); + + syms_nr = GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); + /* Walk the symbol table */ + for (i = 0; i < syms_nr; i++) { + unsigned int k; + ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) + + GET_LE(&symtab_hdr->sh_entsize) * i; + const char *sym_name = raw_addr + + GET_LE(&strtab_hdr->sh_offset) + + GET_LE(&sym->st_name); + + for (k = 0; k < NSYMS; k++) { + if (!strcmp(sym_name, required_syms[k].name)) { + if (syms[k]) { + fail("duplicate symbol %s\n", + required_syms[k].name); + } + + /* + * Careful: we use negative addresses, but + * st_value is unsigned, so we rely + * on syms[k] being a signed type of the + * correct width. + */ + syms[k] = GET_LE(&sym->st_value); + } + } + } + + if (!image_name) { + fwrite(stripped_addr, stripped_len, 1, outfile); + return; + } + + mapping_size = (stripped_len + 4095) / 4096 * 4096; + + fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n"); + fprintf(outfile, "#include \n"); + fprintf(outfile, "#include \n"); + fprintf(outfile, "#include \n"); + fprintf(outfile, "#include \n"); + fprintf(outfile, "\n"); + fprintf(outfile, + "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {", + mapping_size); + for (i = 0; i < stripped_len; i++) { + if (i % 10 == 0) + fprintf(outfile, "\n\t"); + fprintf(outfile, "0x%02X, ", + (int)((unsigned char *)stripped_addr)[i]); + } + fprintf(outfile, "\n};\n\n"); + if (extable_sec) + BITSFUNC(extract)(raw_addr, raw_len, outfile, + extable_sec, "extable"); + + fprintf(outfile, "const struct vdso_image %s = {\n", image_name); + fprintf(outfile, "\t.data = raw_data,\n"); + fprintf(outfile, "\t.size = %lu,\n", mapping_size); + if (alt_sec) { + fprintf(outfile, "\t.alt = %lu,\n", + (unsigned long)GET_LE(&alt_sec->sh_offset)); + fprintf(outfile, "\t.alt_len = %lu,\n", + (unsigned long)GET_LE(&alt_sec->sh_size)); + } + if (extable_sec) { + fprintf(outfile, "\t.extable_base = %lu,\n", + (unsigned long)GET_LE(&extable_sec->sh_offset)); + fprintf(outfile, "\t.extable_len = %lu,\n", + (unsigned long)GET_LE(&extable_sec->sh_size)); + fprintf(outfile, "\t.extable = extable,\n"); + } + + for (i = 0; i < NSYMS; i++) { + if (required_syms[i].export && syms[i]) + fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", + required_syms[i].name, (int64_t)syms[i]); + } + fprintf(outfile, "};\n\n"); + fprintf(outfile, "static __init int init_%s(void) {\n", image_name); + fprintf(outfile, "\treturn init_vdso_image(&%s);\n", image_name); + fprintf(outfile, "};\n"); + fprintf(outfile, "subsys_initcall(init_%s);\n", image_name); + +} -- cgit v1.2.3 From 693c819fedcdcabfda7488e2d5e355a84c2fd1b0 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:25:57 -0800 Subject: x86/entry/vdso: Refactor the vdso build - Separate out the vdso sources into common, vdso32, and vdso64 directories. - Build the 32- and 64-bit vdsos in their respective subdirectories; this greatly simplifies the build flags handling. - Unify the mangling of Makefile flags between the 32- and 64-bit vdso code as much as possible; all common rules are put in arch/x86/entry/vdso/common/Makefile.include. The remaining is very simple for 32 bits; the 64-bit one is only slightly more complicated because it contains the x32 generation rule. - Define __DISABLE_EXPORTS when building the vdso. This need seems to have been masked by different ordering compile flags before. - Change CONFIG_X86_64 to BUILD_VDSO32_64 in vdso32/system_call.S, to make it compatible with including fake_32bit_build.h. - The -fcf-protection= option was "leaking" from the kernel build, for reasons that was not clear to me. Furthermore, several distributions ship with it set to a default value other than "-fcf-protection=none". Make it match the configuration options for *user space*. Note that this patch may seem large, but the vast majority of it is simply code movement. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-4-hpa@zytor.com --- arch/x86/entry/vdso/Makefile | 161 +--------------------- arch/x86/entry/vdso/common/Makefile.include | 89 +++++++++++++ arch/x86/entry/vdso/common/note.S | 18 +++ arch/x86/entry/vdso/common/vclock_gettime.c | 77 +++++++++++ arch/x86/entry/vdso/common/vdso-layout.lds.S | 101 ++++++++++++++ arch/x86/entry/vdso/common/vgetcpu.c | 22 +++ arch/x86/entry/vdso/vclock_gettime.c | 77 ----------- arch/x86/entry/vdso/vdso-layout.lds.S | 101 -------------- arch/x86/entry/vdso/vdso-note.S | 15 --- arch/x86/entry/vdso/vdso.lds.S | 37 ----- arch/x86/entry/vdso/vdso32/Makefile | 24 ++++ arch/x86/entry/vdso/vdso32/note.S | 19 +-- arch/x86/entry/vdso/vdso32/system_call.S | 2 +- arch/x86/entry/vdso/vdso32/vclock_gettime.c | 5 +- arch/x86/entry/vdso/vdso32/vdso32.lds.S | 2 +- arch/x86/entry/vdso/vdso32/vgetcpu.c | 4 +- arch/x86/entry/vdso/vdso64/Makefile | 46 +++++++ arch/x86/entry/vdso/vdso64/note.S | 1 + arch/x86/entry/vdso/vdso64/vclock_gettime.c | 1 + arch/x86/entry/vdso/vdso64/vdso64.lds.S | 37 +++++ arch/x86/entry/vdso/vdso64/vdsox32.lds.S | 27 ++++ arch/x86/entry/vdso/vdso64/vgetcpu.c | 1 + arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S | 178 +++++++++++++++++++++++++ arch/x86/entry/vdso/vdso64/vgetrandom.c | 15 +++ arch/x86/entry/vdso/vdso64/vsgx.S | 150 +++++++++++++++++++++ arch/x86/entry/vdso/vdsox32.lds.S | 27 ---- arch/x86/entry/vdso/vgetcpu.c | 22 --- arch/x86/entry/vdso/vgetrandom-chacha.S | 178 ------------------------- arch/x86/entry/vdso/vgetrandom.c | 15 --- arch/x86/entry/vdso/vsgx.S | 150 --------------------- 30 files changed, 798 insertions(+), 804 deletions(-) create mode 100644 arch/x86/entry/vdso/common/Makefile.include create mode 100644 arch/x86/entry/vdso/common/note.S create mode 100644 arch/x86/entry/vdso/common/vclock_gettime.c create mode 100644 arch/x86/entry/vdso/common/vdso-layout.lds.S create mode 100644 arch/x86/entry/vdso/common/vgetcpu.c delete mode 100644 arch/x86/entry/vdso/vclock_gettime.c delete mode 100644 arch/x86/entry/vdso/vdso-layout.lds.S delete mode 100644 arch/x86/entry/vdso/vdso-note.S delete mode 100644 arch/x86/entry/vdso/vdso.lds.S create mode 100644 arch/x86/entry/vdso/vdso32/Makefile create mode 100644 arch/x86/entry/vdso/vdso64/Makefile create mode 100644 arch/x86/entry/vdso/vdso64/note.S create mode 100644 arch/x86/entry/vdso/vdso64/vclock_gettime.c create mode 100644 arch/x86/entry/vdso/vdso64/vdso64.lds.S create mode 100644 arch/x86/entry/vdso/vdso64/vdsox32.lds.S create mode 100644 arch/x86/entry/vdso/vdso64/vgetcpu.c create mode 100644 arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S create mode 100644 arch/x86/entry/vdso/vdso64/vgetrandom.c create mode 100644 arch/x86/entry/vdso/vdso64/vsgx.S delete mode 100644 arch/x86/entry/vdso/vdsox32.lds.S delete mode 100644 arch/x86/entry/vdso/vgetcpu.c delete mode 100644 arch/x86/entry/vdso/vgetrandom-chacha.S delete mode 100644 arch/x86/entry/vdso/vgetrandom.c delete mode 100644 arch/x86/entry/vdso/vsgx.S (limited to 'arch') diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 3d9b09f00c70..987b43fd4cd3 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -3,159 +3,10 @@ # Building vDSO images for x86. # -# Include the generic Makefile to check the built vDSO: -include $(srctree)/lib/vdso/Makefile.include +# Regular kernel objects +obj-y := vma.o extable.o +obj-$(CONFIG_COMPAT_32) += vdso32-setup.o -# Files to link into the vDSO: -vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vgetrandom.o vgetrandom-chacha.o -vobjs32-y := vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o -vobjs32-y += vdso32/vclock_gettime.o vdso32/vgetcpu.o -vobjs-$(CONFIG_X86_SGX) += vsgx.o - -# Files to link into the kernel: -obj-y += vma.o extable.o - -# vDSO images to build: -obj-$(CONFIG_X86_64) += vdso64-image.o -obj-$(CONFIG_X86_X32_ABI) += vdsox32-image.o -obj-$(CONFIG_COMPAT_32) += vdso32-image.o vdso32-setup.o - -vobjs := $(addprefix $(obj)/, $(vobjs-y)) -vobjs32 := $(addprefix $(obj)/, $(vobjs32-y)) - -$(obj)/vdso.o: $(obj)/vdso.so - -targets += vdso.lds $(vobjs-y) -targets += vdso32/vdso32.lds $(vobjs32-y) - -targets += $(foreach x, 64 x32 32, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg) - -CPPFLAGS_vdso.lds += -P -C - -VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 \ - -z max-page-size=4096 - -$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE - $(call if_changed,vdso_and_check) - -VDSO2C = $(objtree)/arch/x86/tools/vdso2c - -quiet_cmd_vdso2c = VDSO2C $@ - cmd_vdso2c = $(VDSO2C) $< $(<:%.dbg=%) $@ - -$(obj)/vdso%-image.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(VDSO2C) FORCE - $(call if_changed,vdso2c) - -# -# Don't omit frame pointers for ease of userspace debugging, but do -# optimize sibling calls. -# -CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ - $(filter -g%,$(KBUILD_CFLAGS)) -fno-stack-protector \ - -fno-omit-frame-pointer -foptimize-sibling-calls \ - -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO - -ifdef CONFIG_MITIGATION_RETPOLINE -ifneq ($(RETPOLINE_VDSO_CFLAGS),) - CFL += $(RETPOLINE_VDSO_CFLAGS) -endif -endif - -$(vobjs): KBUILD_CFLAGS := $(filter-out $(PADDING_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) $(RANDSTRUCT_CFLAGS) $(KSTACK_ERASE_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) -$(vobjs): KBUILD_AFLAGS += -DBUILD_VDSO - -# -# vDSO code runs in userspace and -pg doesn't help with profiling anyway. -# -CFLAGS_REMOVE_vclock_gettime.o = -pg -CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg -CFLAGS_REMOVE_vgetcpu.o = -pg -CFLAGS_REMOVE_vdso32/vgetcpu.o = -pg -CFLAGS_REMOVE_vsgx.o = -pg -CFLAGS_REMOVE_vgetrandom.o = -pg - -# -# X32 processes use x32 vDSO to access 64bit kernel data. -# -# Build x32 vDSO image: -# 1. Compile x32 vDSO as 64bit. -# 2. Convert object files to x32. -# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes -# so that it can reach 64bit address space with 64bit pointers. -# - -CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds) -VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \ - -z max-page-size=4096 - -# x32-rebranded versions -vobjx32s-y := $(vobjs-y:.o=-x32.o) - -# same thing, but in the output directory -vobjx32s := $(addprefix $(obj)/, $(vobjx32s-y)) - -# Convert 64bit object file to x32 for x32 vDSO. -quiet_cmd_x32 = X32 $@ - cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@ - -$(obj)/%-x32.o: $(obj)/%.o FORCE - $(call if_changed,x32) - -targets += vdsox32.lds $(vobjx32s-y) - -$(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table -$(obj)/%.so: $(obj)/%.so.dbg FORCE - $(call if_changed,objcopy) - -$(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE - $(call if_changed,vdso_and_check) - -CPPFLAGS_vdso32/vdso32.lds = $(CPPFLAGS_vdso.lds) -VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1 - -KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO -$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) -$(obj)/vdso32.so.dbg: asflags-$(CONFIG_X86_64) += -m32 - -KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) -KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(RANDSTRUCT_CFLAGS),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(KSTACK_ERASE_CFLAGS),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(CC_FLAGS_CFI),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 := $(filter-out $(PADDING_CFLAGS),$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic -KBUILD_CFLAGS_32 += -fno-stack-protector -KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) -KBUILD_CFLAGS_32 += -fno-omit-frame-pointer -KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS_32 += -DBUILD_VDSO - -ifdef CONFIG_MITIGATION_RETPOLINE -ifneq ($(RETPOLINE_VDSO_CFLAGS),) - KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS) -endif -endif - -$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) - -$(obj)/vdso32.so.dbg: $(obj)/vdso32/vdso32.lds $(vobjs32) FORCE - $(call if_changed,vdso_and_check) - -# -# The DSO images are built using a special linker script. -# -quiet_cmd_vdso = VDSO $@ - cmd_vdso = $(LD) -o $@ \ - $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ - -T $(filter %.lds,$^) $(filter %.o,$^) - -VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 --no-undefined \ - $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack - -quiet_cmd_vdso_and_check = VDSO $@ - cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check) +# vDSO directories +obj-$(CONFIG_X86_64) += vdso64/ +obj-$(CONFIG_COMPAT_32) += vdso32/ diff --git a/arch/x86/entry/vdso/common/Makefile.include b/arch/x86/entry/vdso/common/Makefile.include new file mode 100644 index 000000000000..3514b4a6869b --- /dev/null +++ b/arch/x86/entry/vdso/common/Makefile.include @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Building vDSO images for x86. +# + +# Include the generic Makefile to check the built vDSO: +include $(srctree)/lib/vdso/Makefile.include + +obj-y += $(foreach x,$(vdsos-y),vdso$(x)-image.o) + +targets += $(foreach x,$(vdsos-y),vdso$(x)-image.c vdso$(x).so vdso$(x).so.dbg vdso$(x).lds) +targets += $(vobjs-y) + +# vobjs-y with $(obj)/ prepended +vobjs := $(addprefix $(obj)/,$(vobjs-y)) + +# Options for vdso*.lds +CPPFLAGS_VDSO_LDS := -P -C -I$(src)/.. +$(obj)/%.lds : KBUILD_CPPFLAGS += $(CPPFLAGS_VDSO_LDS) + +# +# Options from KBUILD_[AC]FLAGS that should *NOT* be kept +# +flags-remove-y += \ + -D__KERNEL__ -mcmodel=kernel -mregparm=3 \ + -fno-pic -fno-PIC -fno-pie fno-PIE \ + -mfentry -pg \ + $(RANDSTRUCT_CFLAGS) $(GCC_PLUGIN_CFLAGS) $(KSTACK_ERASE_CFLAGS) \ + $(RETPOLINE_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \ + $(PADDING_CFLAGS) + +# +# Don't omit frame pointers for ease of userspace debugging, but do +# optimize sibling calls. +# +flags-y += -D__DISABLE_EXPORTS +flags-y += -DDISABLE_BRANCH_PROFILING +flags-y += -DBUILD_VDSO +flags-y += -I$(src)/.. -I$(srctree) +flags-y += -O2 -fpic +flags-y += -fno-stack-protector +flags-y += -fno-omit-frame-pointer +flags-y += -foptimize-sibling-calls +flags-y += -fasynchronous-unwind-tables + +# Reset cf protections enabled by compiler default +flags-y += $(call cc-option, -fcf-protection=none) +flags-$(X86_USER_SHADOW_STACK) += $(call cc-option, -fcf-protection=return) +# When user space IBT is supported, enable this. +# flags-$(CONFIG_USER_IBT) += $(call cc-option, -fcf-protection=branch) + +flags-$(CONFIG_MITIGATION_RETPOLINE) += $(RETPOLINE_VDSO_CFLAGS) + +# These need to be conditional on $(vobjs) as they do not apply to +# the output vdso*-image.o files which are standard kernel objects. +$(vobjs) : KBUILD_AFLAGS := \ + $(filter-out $(flags-remove-y),$(KBUILD_AFLAGS)) $(flags-y) +$(vobjs) : KBUILD_CFLAGS := \ + $(filter-out $(flags-remove-y),$(KBUILD_CFLAGS)) $(flags-y) + +# +# The VDSO images are built using a special linker script. +# +VDSO_LDFLAGS := -shared --hash-style=both --build-id=sha1 --no-undefined \ + $(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack + +quiet_cmd_vdso = VDSO $@ + cmd_vdso = $(LD) -o $@ \ + $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$*) \ + -T $(filter %.lds,$^) $(filter %.o,$^) +quiet_cmd_vdso_and_check = VDSO $@ + cmd_vdso_and_check = $(cmd_vdso); $(cmd_vdso_check) + +$(obj)/vdso%.so.dbg: $(obj)/vdso%.lds FORCE + $(call if_changed,vdso_and_check) + +$(obj)/%.so: OBJCOPYFLAGS := -S --remove-section __ex_table +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +VDSO2C = $(objtree)/arch/x86/tools/vdso2c + +quiet_cmd_vdso2c = VDSO2C $@ + cmd_vdso2c = $(VDSO2C) $< $(<:%.dbg=%) $@ + +$(obj)/%-image.c: $(obj)/%.so.dbg $(obj)/%.so $(VDSO2C) FORCE + $(call if_changed,vdso2c) + +$(obj)/%-image.o: $(obj)/%-image.c diff --git a/arch/x86/entry/vdso/common/note.S b/arch/x86/entry/vdso/common/note.S new file mode 100644 index 000000000000..2cbd39939dc6 --- /dev/null +++ b/arch/x86/entry/vdso/common/note.S @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include +#include +#include + +/* Ideally this would use UTS_NAME, but using a quoted string here + doesn't work. Remember to change this when changing the + kernel's name. */ +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END + +BUILD_SALT diff --git a/arch/x86/entry/vdso/common/vclock_gettime.c b/arch/x86/entry/vdso/common/vclock_gettime.c new file mode 100644 index 000000000000..0debc194bd78 --- /dev/null +++ b/arch/x86/entry/vdso/common/vclock_gettime.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Fast user context implementation of clock_gettime, gettimeofday, and time. + * + * Copyright 2006 Andi Kleen, SUSE Labs. + * Copyright 2019 ARM Limited + * + * 32 Bit compat layer by Stefani Seibold + * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany + */ +#include +#include +#include +#include + +#include "../../../../lib/vdso/gettimeofday.c" + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + return __cvdso_gettimeofday(tv, tz); +} + +int gettimeofday(struct __kernel_old_timeval *, struct timezone *) + __attribute__((weak, alias("__vdso_gettimeofday"))); + +__kernel_old_time_t __vdso_time(__kernel_old_time_t *t) +{ + return __cvdso_time(t); +} + +__kernel_old_time_t time(__kernel_old_time_t *t) __attribute__((weak, alias("__vdso_time"))); + + +#if defined(CONFIG_X86_64) && !defined(BUILD_VDSO32_64) +/* both 64-bit and x32 use these */ +int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int clock_gettime(clockid_t, struct __kernel_timespec *) + __attribute__((weak, alias("__vdso_clock_gettime"))); + +int __vdso_clock_getres(clockid_t clock, + struct __kernel_timespec *res) +{ + return __cvdso_clock_getres(clock, res); +} +int clock_getres(clockid_t, struct __kernel_timespec *) + __attribute__((weak, alias("__vdso_clock_getres"))); + +#else +/* i386 only */ +int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) +{ + return __cvdso_clock_gettime32(clock, ts); +} + +int clock_gettime(clockid_t, struct old_timespec32 *) + __attribute__((weak, alias("__vdso_clock_gettime"))); + +int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) +{ + return __cvdso_clock_gettime(clock, ts); +} + +int clock_gettime64(clockid_t, struct __kernel_timespec *) + __attribute__((weak, alias("__vdso_clock_gettime64"))); + +int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res) +{ + return __cvdso_clock_getres_time32(clock, res); +} + +int clock_getres(clockid_t, struct old_timespec32 *) + __attribute__((weak, alias("__vdso_clock_getres"))); +#endif diff --git a/arch/x86/entry/vdso/common/vdso-layout.lds.S b/arch/x86/entry/vdso/common/vdso-layout.lds.S new file mode 100644 index 000000000000..ec1ac191a057 --- /dev/null +++ b/arch/x86/entry/vdso/common/vdso-layout.lds.S @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include + +/* + * Linker script for vDSO. This is an ELF shared object prelinked to + * its virtual address, and with only one read-only segment. + * This script controls its layout. + */ + +SECTIONS +{ + /* + * User/kernel shared data is before the vDSO. This may be a little + * uglier than putting it after the vDSO, but it avoids issues with + * non-allocatable things that dangle past the end of the PT_LOAD + * segment. + */ + + VDSO_VVAR_SYMS + + vclock_pages = VDSO_VCLOCK_PAGES_START(vdso_u_data); + pvclock_page = vclock_pages + VDSO_PAGE_PVCLOCK_OFFSET * PAGE_SIZE; + hvclock_page = vclock_pages + VDSO_PAGE_HVCLOCK_OFFSET * PAGE_SIZE; + + . = SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { + *(.rodata*) + *(.data*) + *(.sdata*) + *(.got.plt) *(.got) + *(.gnu.linkonce.d.*) + *(.bss*) + *(.dynbss*) + *(.gnu.linkonce.b.*) + } :text + + /* + * Discard .note.gnu.property sections which are unused and have + * different alignment requirement from vDSO note sections. + */ + /DISCARD/ : { + *(.note.gnu.property) + } + .note : { *(.note.*) } :text :note + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + + /* + * Text is well-separated from actual data: there's plenty of + * stuff that isn't used at runtime in between. + */ + + .text : { + *(.text*) + } :text =0x90909090, + + + + .altinstructions : { *(.altinstructions) } :text + .altinstr_replacement : { *(.altinstr_replacement) } :text + + __ex_table : { *(__ex_table) } :text + + /DISCARD/ : { + *(.discard) + *(.discard.*) + *(__bug_table) + } +} + +/* + * Very old versions of ld do not recognize this name token; use the constant. + */ +#define PT_GNU_EH_FRAME 0x6474e550 + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} diff --git a/arch/x86/entry/vdso/common/vgetcpu.c b/arch/x86/entry/vdso/common/vgetcpu.c new file mode 100644 index 000000000000..e4640306b2e3 --- /dev/null +++ b/arch/x86/entry/vdso/common/vgetcpu.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2006 Andi Kleen, SUSE Labs. + * + * Fast user context implementation of getcpu() + */ + +#include +#include +#include +#include + +notrace long +__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) +{ + vdso_read_cpunode(cpu, node); + + return 0; +} + +long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) + __attribute__((weak, alias("__vdso_getcpu"))); diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c deleted file mode 100644 index 0debc194bd78..000000000000 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Fast user context implementation of clock_gettime, gettimeofday, and time. - * - * Copyright 2006 Andi Kleen, SUSE Labs. - * Copyright 2019 ARM Limited - * - * 32 Bit compat layer by Stefani Seibold - * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany - */ -#include -#include -#include -#include - -#include "../../../../lib/vdso/gettimeofday.c" - -int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) -{ - return __cvdso_gettimeofday(tv, tz); -} - -int gettimeofday(struct __kernel_old_timeval *, struct timezone *) - __attribute__((weak, alias("__vdso_gettimeofday"))); - -__kernel_old_time_t __vdso_time(__kernel_old_time_t *t) -{ - return __cvdso_time(t); -} - -__kernel_old_time_t time(__kernel_old_time_t *t) __attribute__((weak, alias("__vdso_time"))); - - -#if defined(CONFIG_X86_64) && !defined(BUILD_VDSO32_64) -/* both 64-bit and x32 use these */ -int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) -{ - return __cvdso_clock_gettime(clock, ts); -} - -int clock_gettime(clockid_t, struct __kernel_timespec *) - __attribute__((weak, alias("__vdso_clock_gettime"))); - -int __vdso_clock_getres(clockid_t clock, - struct __kernel_timespec *res) -{ - return __cvdso_clock_getres(clock, res); -} -int clock_getres(clockid_t, struct __kernel_timespec *) - __attribute__((weak, alias("__vdso_clock_getres"))); - -#else -/* i386 only */ -int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) -{ - return __cvdso_clock_gettime32(clock, ts); -} - -int clock_gettime(clockid_t, struct old_timespec32 *) - __attribute__((weak, alias("__vdso_clock_gettime"))); - -int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) -{ - return __cvdso_clock_gettime(clock, ts); -} - -int clock_gettime64(clockid_t, struct __kernel_timespec *) - __attribute__((weak, alias("__vdso_clock_gettime64"))); - -int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res) -{ - return __cvdso_clock_getres_time32(clock, res); -} - -int clock_getres(clockid_t, struct old_timespec32 *) - __attribute__((weak, alias("__vdso_clock_getres"))); -#endif diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S deleted file mode 100644 index ec1ac191a057..000000000000 --- a/arch/x86/entry/vdso/vdso-layout.lds.S +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#include -#include -#include - -/* - * Linker script for vDSO. This is an ELF shared object prelinked to - * its virtual address, and with only one read-only segment. - * This script controls its layout. - */ - -SECTIONS -{ - /* - * User/kernel shared data is before the vDSO. This may be a little - * uglier than putting it after the vDSO, but it avoids issues with - * non-allocatable things that dangle past the end of the PT_LOAD - * segment. - */ - - VDSO_VVAR_SYMS - - vclock_pages = VDSO_VCLOCK_PAGES_START(vdso_u_data); - pvclock_page = vclock_pages + VDSO_PAGE_PVCLOCK_OFFSET * PAGE_SIZE; - hvclock_page = vclock_pages + VDSO_PAGE_HVCLOCK_OFFSET * PAGE_SIZE; - - . = SIZEOF_HEADERS; - - .hash : { *(.hash) } :text - .gnu.hash : { *(.gnu.hash) } - .dynsym : { *(.dynsym) } - .dynstr : { *(.dynstr) } - .gnu.version : { *(.gnu.version) } - .gnu.version_d : { *(.gnu.version_d) } - .gnu.version_r : { *(.gnu.version_r) } - - .dynamic : { *(.dynamic) } :text :dynamic - - .rodata : { - *(.rodata*) - *(.data*) - *(.sdata*) - *(.got.plt) *(.got) - *(.gnu.linkonce.d.*) - *(.bss*) - *(.dynbss*) - *(.gnu.linkonce.b.*) - } :text - - /* - * Discard .note.gnu.property sections which are unused and have - * different alignment requirement from vDSO note sections. - */ - /DISCARD/ : { - *(.note.gnu.property) - } - .note : { *(.note.*) } :text :note - - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr - .eh_frame : { KEEP (*(.eh_frame)) } :text - - - /* - * Text is well-separated from actual data: there's plenty of - * stuff that isn't used at runtime in between. - */ - - .text : { - *(.text*) - } :text =0x90909090, - - - - .altinstructions : { *(.altinstructions) } :text - .altinstr_replacement : { *(.altinstr_replacement) } :text - - __ex_table : { *(__ex_table) } :text - - /DISCARD/ : { - *(.discard) - *(.discard.*) - *(__bug_table) - } -} - -/* - * Very old versions of ld do not recognize this name token; use the constant. - */ -#define PT_GNU_EH_FRAME 0x6474e550 - -/* - * We must supply the ELF program headers explicitly to get just one - * PT_LOAD segment, and set the flags explicitly to make segments read-only. - */ -PHDRS -{ - text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ - dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ - note PT_NOTE FLAGS(4); /* PF_R */ - eh_frame_hdr PT_GNU_EH_FRAME; -} diff --git a/arch/x86/entry/vdso/vdso-note.S b/arch/x86/entry/vdso/vdso-note.S deleted file mode 100644 index 79423170118f..000000000000 --- a/arch/x86/entry/vdso/vdso-note.S +++ /dev/null @@ -1,15 +0,0 @@ -/* - * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. - * Here we can supply some information useful to userland. - */ - -#include -#include -#include -#include - -ELFNOTE_START(Linux, 0, "a") - .long LINUX_VERSION_CODE -ELFNOTE_END - -BUILD_SALT diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S deleted file mode 100644 index 0bab5f4af6d1..000000000000 --- a/arch/x86/entry/vdso/vdso.lds.S +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Linker script for 64-bit vDSO. - * We #include the file to define the layout details. - * - * This file defines the version script giving the user-exported symbols in - * the DSO. - */ - -#define BUILD_VDSO64 - -#include "vdso-layout.lds.S" - -/* - * This controls what userland symbols we export from the vDSO. - */ -VERSION { - LINUX_2.6 { - global: - clock_gettime; - __vdso_clock_gettime; - gettimeofday; - __vdso_gettimeofday; - getcpu; - __vdso_getcpu; - time; - __vdso_time; - clock_getres; - __vdso_clock_getres; -#ifdef CONFIG_X86_SGX - __vdso_sgx_enter_enclave; -#endif - getrandom; - __vdso_getrandom; - local: *; - }; -} diff --git a/arch/x86/entry/vdso/vdso32/Makefile b/arch/x86/entry/vdso/vdso32/Makefile new file mode 100644 index 000000000000..add6afb484ba --- /dev/null +++ b/arch/x86/entry/vdso/vdso32/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 32-bit vDSO images for x86. +# + +# The vDSOs built in this directory +vdsos-y := 32 + +# Files to link into the vDSO: +vobjs-y := note.o vclock_gettime.o vgetcpu.o +vobjs-y += system_call.o sigreturn.o + +# Compilation flags +flags-y := -DBUILD_VDSO32 -m32 -mregparm=0 +flags-$(CONFIG_X86_64) += -include $(src)/fake_32bit_build.h +flags-remove-y := -m64 + +# The location of this include matters! +include $(src)/../common/Makefile.include + +# Linker options for the vdso +VDSO_LDFLAGS_32 := -m elf_i386 -soname linux-gate.so.1 + +$(obj)/vdso32.so.dbg: $(vobjs) diff --git a/arch/x86/entry/vdso/vdso32/note.S b/arch/x86/entry/vdso/vdso32/note.S index 2cbd39939dc6..62d8aa51ce99 100644 --- a/arch/x86/entry/vdso/vdso32/note.S +++ b/arch/x86/entry/vdso/vdso32/note.S @@ -1,18 +1 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. - * Here we can supply some information useful to userland. - */ - -#include -#include -#include - -/* Ideally this would use UTS_NAME, but using a quoted string here - doesn't work. Remember to change this when changing the - kernel's name. */ -ELFNOTE_START(Linux, 0, "a") - .long LINUX_VERSION_CODE -ELFNOTE_END - -BUILD_SALT +#include "common/note.S" diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index d33c6513fd2c..2a15634bbe75 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -52,7 +52,7 @@ __kernel_vsyscall: #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" -#ifdef CONFIG_X86_64 +#ifdef BUILD_VDSO32_64 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c index 86981decfea8..1481f0021b9f 100644 --- a/arch/x86/entry/vdso/vdso32/vclock_gettime.c +++ b/arch/x86/entry/vdso/vdso32/vclock_gettime.c @@ -1,4 +1 @@ -// SPDX-License-Identifier: GPL-2.0 -#define BUILD_VDSO32 -#include "fake_32bit_build.h" -#include "../vclock_gettime.c" +#include "common/vclock_gettime.c" diff --git a/arch/x86/entry/vdso/vdso32/vdso32.lds.S b/arch/x86/entry/vdso/vdso32/vdso32.lds.S index 8a3be07006bb..8a853543fc0d 100644 --- a/arch/x86/entry/vdso/vdso32/vdso32.lds.S +++ b/arch/x86/entry/vdso/vdso32/vdso32.lds.S @@ -11,7 +11,7 @@ #define BUILD_VDSO32 -#include "../vdso-layout.lds.S" +#include "common/vdso-layout.lds.S" /* The ELF entry point can be used to set the AT_SYSINFO value. */ ENTRY(__kernel_vsyscall); diff --git a/arch/x86/entry/vdso/vdso32/vgetcpu.c b/arch/x86/entry/vdso/vdso32/vgetcpu.c index 3a9791f5e998..00cc8325a020 100644 --- a/arch/x86/entry/vdso/vdso32/vgetcpu.c +++ b/arch/x86/entry/vdso/vdso32/vgetcpu.c @@ -1,3 +1 @@ -// SPDX-License-Identifier: GPL-2.0 -#include "fake_32bit_build.h" -#include "../vgetcpu.c" +#include "common/vgetcpu.c" diff --git a/arch/x86/entry/vdso/vdso64/Makefile b/arch/x86/entry/vdso/vdso64/Makefile new file mode 100644 index 000000000000..bfffaf1aeecc --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/Makefile @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# 64-bit vDSO images for x86. +# + +# The vDSOs built in this directory +vdsos-y := 64 +vdsos-$(CONFIG_X86_X32_ABI) += x32 + +# Files to link into the vDSO: +vobjs-y := note.o vclock_gettime.o vgetcpu.o +vobjs-y += vgetrandom.o vgetrandom-chacha.o +vobjs-$(CONFIG_X86_SGX) += vsgx.o + +# Compilation flags +flags-y := -DBUILD_VDSO64 -m64 -mcmodel=small + +# The location of this include matters! +include $(src)/../common/Makefile.include + +# +# X32 processes use x32 vDSO to access 64bit kernel data. +# +# Build x32 vDSO image: +# 1. Compile x32 vDSO as 64bit. +# 2. Convert object files to x32. +# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes +# so that it can reach 64bit address space with 64bit pointers. +# + +# Convert 64bit object file to x32 for x32 vDSO. +quiet_cmd_x32 = X32 $@ + cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@ + +$(obj)/%-x32.o: $(obj)/%.o FORCE + $(call if_changed,x32) + +vobjsx32 = $(patsubst %.o,%-x32.o,$(vobjs)) +targets += $(patsubst %.o,%-x32.o,$(vobjs-y)) + +# Linker options for the vdso +VDSO_LDFLAGS_64 := -m elf_x86_64 -soname linux-vdso.so.1 -z max-page-size=4096 +VDSO_LDFLAGS_x32 := $(subst elf_x86_64,elf32_x86_64,$(VDSO_LDFLAGS_64)) + +$(obj)/vdso64.so.dbg: $(vobjs) +$(obj)/vdsox32.so.dbg: $(vobjsx32) diff --git a/arch/x86/entry/vdso/vdso64/note.S b/arch/x86/entry/vdso/vdso64/note.S new file mode 100644 index 000000000000..62d8aa51ce99 --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/note.S @@ -0,0 +1 @@ +#include "common/note.S" diff --git a/arch/x86/entry/vdso/vdso64/vclock_gettime.c b/arch/x86/entry/vdso/vdso64/vclock_gettime.c new file mode 100644 index 000000000000..1481f0021b9f --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vclock_gettime.c @@ -0,0 +1 @@ +#include "common/vclock_gettime.c" diff --git a/arch/x86/entry/vdso/vdso64/vdso64.lds.S b/arch/x86/entry/vdso/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..5ce3f2b6373a --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vdso64.lds.S @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linker script for 64-bit vDSO. + * We #include the file to define the layout details. + * + * This file defines the version script giving the user-exported symbols in + * the DSO. + */ + +#define BUILD_VDSO64 + +#include "common/vdso-layout.lds.S" + +/* + * This controls what userland symbols we export from the vDSO. + */ +VERSION { + LINUX_2.6 { + global: + clock_gettime; + __vdso_clock_gettime; + gettimeofday; + __vdso_gettimeofday; + getcpu; + __vdso_getcpu; + time; + __vdso_time; + clock_getres; + __vdso_clock_getres; +#ifdef CONFIG_X86_SGX + __vdso_sgx_enter_enclave; +#endif + getrandom; + __vdso_getrandom; + local: *; + }; +} diff --git a/arch/x86/entry/vdso/vdso64/vdsox32.lds.S b/arch/x86/entry/vdso/vdso64/vdsox32.lds.S new file mode 100644 index 000000000000..3dbd20c8dacc --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vdsox32.lds.S @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linker script for x32 vDSO. + * We #include the file to define the layout details. + * + * This file defines the version script giving the user-exported symbols in + * the DSO. + */ + +#define BUILD_VDSOX32 + +#include "common/vdso-layout.lds.S" + +/* + * This controls what userland symbols we export from the vDSO. + */ +VERSION { + LINUX_2.6 { + global: + __vdso_clock_gettime; + __vdso_gettimeofday; + __vdso_getcpu; + __vdso_time; + __vdso_clock_getres; + local: *; + }; +} diff --git a/arch/x86/entry/vdso/vdso64/vgetcpu.c b/arch/x86/entry/vdso/vdso64/vgetcpu.c new file mode 100644 index 000000000000..00cc8325a020 --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vgetcpu.c @@ -0,0 +1 @@ +#include "common/vgetcpu.c" diff --git a/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S b/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S new file mode 100644 index 000000000000..bcba5639b8ee --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vgetrandom-chacha.S @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022-2024 Jason A. Donenfeld . All Rights Reserved. + */ + +#include +#include + +.section .rodata, "a" +.align 16 +CONSTANTS: .octa 0x6b20657479622d323320646e61707865 +.text + +/* + * Very basic SSE2 implementation of ChaCha20. Produces a given positive number + * of blocks of output with a nonce of 0, taking an input key and 8-byte + * counter. Importantly does not spill to the stack. Its arguments are: + * + * rdi: output bytes + * rsi: 32-byte key input + * rdx: 8-byte counter input/output + * rcx: number of 64-byte blocks to write to output + */ +SYM_FUNC_START(__arch_chacha20_blocks_nostack) + +.set output, %rdi +.set key, %rsi +.set counter, %rdx +.set nblocks, %rcx +.set i, %al +/* xmm registers are *not* callee-save. */ +.set temp, %xmm0 +.set state0, %xmm1 +.set state1, %xmm2 +.set state2, %xmm3 +.set state3, %xmm4 +.set copy0, %xmm5 +.set copy1, %xmm6 +.set copy2, %xmm7 +.set copy3, %xmm8 +.set one, %xmm9 + + /* copy0 = "expand 32-byte k" */ + movaps CONSTANTS(%rip),copy0 + /* copy1,copy2 = key */ + movups 0x00(key),copy1 + movups 0x10(key),copy2 + /* copy3 = counter || zero nonce */ + movq 0x00(counter),copy3 + /* one = 1 || 0 */ + movq $1,%rax + movq %rax,one + +.Lblock: + /* state0,state1,state2,state3 = copy0,copy1,copy2,copy3 */ + movdqa copy0,state0 + movdqa copy1,state1 + movdqa copy2,state2 + movdqa copy3,state3 + + movb $10,i +.Lpermute: + /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */ + paddd state1,state0 + pxor state0,state3 + movdqa state3,temp + pslld $16,temp + psrld $16,state3 + por temp,state3 + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */ + paddd state3,state2 + pxor state2,state1 + movdqa state1,temp + pslld $12,temp + psrld $20,state1 + por temp,state1 + + /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */ + paddd state1,state0 + pxor state0,state3 + movdqa state3,temp + pslld $8,temp + psrld $24,state3 + por temp,state3 + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */ + paddd state3,state2 + pxor state2,state1 + movdqa state1,temp + pslld $7,temp + psrld $25,state1 + por temp,state1 + + /* state1[0,1,2,3] = state1[1,2,3,0] */ + pshufd $0x39,state1,state1 + /* state2[0,1,2,3] = state2[2,3,0,1] */ + pshufd $0x4e,state2,state2 + /* state3[0,1,2,3] = state3[3,0,1,2] */ + pshufd $0x93,state3,state3 + + /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */ + paddd state1,state0 + pxor state0,state3 + movdqa state3,temp + pslld $16,temp + psrld $16,state3 + por temp,state3 + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */ + paddd state3,state2 + pxor state2,state1 + movdqa state1,temp + pslld $12,temp + psrld $20,state1 + por temp,state1 + + /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */ + paddd state1,state0 + pxor state0,state3 + movdqa state3,temp + pslld $8,temp + psrld $24,state3 + por temp,state3 + + /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */ + paddd state3,state2 + pxor state2,state1 + movdqa state1,temp + pslld $7,temp + psrld $25,state1 + por temp,state1 + + /* state1[0,1,2,3] = state1[3,0,1,2] */ + pshufd $0x93,state1,state1 + /* state2[0,1,2,3] = state2[2,3,0,1] */ + pshufd $0x4e,state2,state2 + /* state3[0,1,2,3] = state3[1,2,3,0] */ + pshufd $0x39,state3,state3 + + decb i + jnz .Lpermute + + /* output0 = state0 + copy0 */ + paddd copy0,state0 + movups state0,0x00(output) + /* output1 = state1 + copy1 */ + paddd copy1,state1 + movups state1,0x10(output) + /* output2 = state2 + copy2 */ + paddd copy2,state2 + movups state2,0x20(output) + /* output3 = state3 + copy3 */ + paddd copy3,state3 + movups state3,0x30(output) + + /* ++copy3.counter */ + paddq one,copy3 + + /* output += 64, --nblocks */ + addq $64,output + decq nblocks + jnz .Lblock + + /* counter = copy3.counter */ + movq copy3,0x00(counter) + + /* Zero out the potentially sensitive regs, in case nothing uses these again. */ + pxor state0,state0 + pxor state1,state1 + pxor state2,state2 + pxor state3,state3 + pxor copy1,copy1 + pxor copy2,copy2 + pxor temp,temp + + ret +SYM_FUNC_END(__arch_chacha20_blocks_nostack) diff --git a/arch/x86/entry/vdso/vdso64/vgetrandom.c b/arch/x86/entry/vdso/vdso64/vgetrandom.c new file mode 100644 index 000000000000..6a95d36b12d9 --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vgetrandom.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022-2024 Jason A. Donenfeld . All Rights Reserved. + */ +#include + +#include "lib/vdso/getrandom.c" + +ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) +{ + return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); +} + +ssize_t getrandom(void *, size_t, unsigned int, void *, size_t) + __attribute__((weak, alias("__vdso_getrandom"))); diff --git a/arch/x86/entry/vdso/vdso64/vsgx.S b/arch/x86/entry/vdso/vdso64/vsgx.S new file mode 100644 index 000000000000..37a3d4c02366 --- /dev/null +++ b/arch/x86/entry/vdso/vdso64/vsgx.S @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include +#include +#include + +#include "extable.h" + +/* Relative to %rbp. */ +#define SGX_ENCLAVE_OFFSET_OF_RUN 16 + +/* The offsets relative to struct sgx_enclave_run. */ +#define SGX_ENCLAVE_RUN_TCS 0 +#define SGX_ENCLAVE_RUN_LEAF 8 +#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12 +#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14 +#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16 +#define SGX_ENCLAVE_RUN_USER_HANDLER 24 +#define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */ +#define SGX_ENCLAVE_RUN_RESERVED_START 40 +#define SGX_ENCLAVE_RUN_RESERVED_END 256 + +.code64 +.section .text, "ax" + +SYM_FUNC_START(__vdso_sgx_enter_enclave) + /* Prolog */ + .cfi_startproc + push %rbp + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rbp, 0 + mov %rsp, %rbp + .cfi_def_cfa_register %rbp + push %rbx + .cfi_rel_offset %rbx, -8 + + mov %ecx, %eax +.Lenter_enclave: + /* EENTER <= function <= ERESUME */ + cmp $EENTER, %eax + jb .Linvalid_input + cmp $ERESUME, %eax + ja .Linvalid_input + + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx + + /* Validate that the reserved area contains only zeros. */ + mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx +1: + cmpq $0, (%rcx, %rbx) + jne .Linvalid_input + add $8, %rbx + cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx + jne 1b + + /* Load TCS and AEP */ + mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx + lea .Lasync_exit_pointer(%rip), %rcx + + /* Single ENCLU serving as both EENTER and AEP (ERESUME) */ +.Lasync_exit_pointer: +.Lenclu_eenter_eresume: + enclu + + /* EEXIT jumps here unless the enclave is doing something fancy. */ + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx + + /* Set exit_reason. */ + movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx) + + /* Invoke userspace's exit handler if one was provided. */ +.Lhandle_exit: + cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx) + jne .Linvoke_userspace_handler + + /* Success, in the sense that ENCLU was attempted. */ + xor %eax, %eax + +.Lout: + pop %rbx + leave + .cfi_def_cfa %rsp, 8 + RET + + /* The out-of-line code runs with the pre-leave stack frame. */ + .cfi_def_cfa %rbp, 16 + +.Linvalid_input: + mov $(-EINVAL), %eax + jmp .Lout + +.Lhandle_exception: + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx + + /* Set the exception info. */ + mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx) + mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx) + mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx) + mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx) + jmp .Lhandle_exit + +.Linvoke_userspace_handler: + /* Pass the untrusted RSP (at exit) to the callback via %rcx. */ + mov %rsp, %rcx + + /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */ + mov %rbx, %rax + + /* Save the untrusted RSP offset in %rbx (non-volatile register). */ + mov %rsp, %rbx + and $0xf, %rbx + + /* + * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned + * _after_ pushing the parameters on the stack, hence the bonus push. + */ + and $-0x10, %rsp + push %rax + + /* Push struct sgx_enclave_exception as a param to the callback. */ + push %rax + + /* Clear RFLAGS.DF per x86_64 ABI */ + cld + + /* + * Load the callback pointer to %rax and lfence for LVI (load value + * injection) protection before making the call. + */ + mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax + lfence + call *%rax + + /* Undo the post-exit %rsp adjustment. */ + lea 0x10(%rsp, %rbx), %rsp + + /* + * If the return from callback is zero or negative, return immediately, + * else re-execute ENCLU with the positive return value interpreted as + * the requested ENCLU function. + */ + cmp $0, %eax + jle .Lout + jmp .Lenter_enclave + + .cfi_endproc + +_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception) + +SYM_FUNC_END(__vdso_sgx_enter_enclave) diff --git a/arch/x86/entry/vdso/vdsox32.lds.S b/arch/x86/entry/vdso/vdsox32.lds.S deleted file mode 100644 index 16a8050a4fb6..000000000000 --- a/arch/x86/entry/vdso/vdsox32.lds.S +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Linker script for x32 vDSO. - * We #include the file to define the layout details. - * - * This file defines the version script giving the user-exported symbols in - * the DSO. - */ - -#define BUILD_VDSOX32 - -#include "vdso-layout.lds.S" - -/* - * This controls what userland symbols we export from the vDSO. - */ -VERSION { - LINUX_2.6 { - global: - __vdso_clock_gettime; - __vdso_gettimeofday; - __vdso_getcpu; - __vdso_time; - __vdso_clock_getres; - local: *; - }; -} diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c deleted file mode 100644 index e4640306b2e3..000000000000 --- a/arch/x86/entry/vdso/vgetcpu.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright 2006 Andi Kleen, SUSE Labs. - * - * Fast user context implementation of getcpu() - */ - -#include -#include -#include -#include - -notrace long -__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) -{ - vdso_read_cpunode(cpu, node); - - return 0; -} - -long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) - __attribute__((weak, alias("__vdso_getcpu"))); diff --git a/arch/x86/entry/vdso/vgetrandom-chacha.S b/arch/x86/entry/vdso/vgetrandom-chacha.S deleted file mode 100644 index bcba5639b8ee..000000000000 --- a/arch/x86/entry/vdso/vgetrandom-chacha.S +++ /dev/null @@ -1,178 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2022-2024 Jason A. Donenfeld . All Rights Reserved. - */ - -#include -#include - -.section .rodata, "a" -.align 16 -CONSTANTS: .octa 0x6b20657479622d323320646e61707865 -.text - -/* - * Very basic SSE2 implementation of ChaCha20. Produces a given positive number - * of blocks of output with a nonce of 0, taking an input key and 8-byte - * counter. Importantly does not spill to the stack. Its arguments are: - * - * rdi: output bytes - * rsi: 32-byte key input - * rdx: 8-byte counter input/output - * rcx: number of 64-byte blocks to write to output - */ -SYM_FUNC_START(__arch_chacha20_blocks_nostack) - -.set output, %rdi -.set key, %rsi -.set counter, %rdx -.set nblocks, %rcx -.set i, %al -/* xmm registers are *not* callee-save. */ -.set temp, %xmm0 -.set state0, %xmm1 -.set state1, %xmm2 -.set state2, %xmm3 -.set state3, %xmm4 -.set copy0, %xmm5 -.set copy1, %xmm6 -.set copy2, %xmm7 -.set copy3, %xmm8 -.set one, %xmm9 - - /* copy0 = "expand 32-byte k" */ - movaps CONSTANTS(%rip),copy0 - /* copy1,copy2 = key */ - movups 0x00(key),copy1 - movups 0x10(key),copy2 - /* copy3 = counter || zero nonce */ - movq 0x00(counter),copy3 - /* one = 1 || 0 */ - movq $1,%rax - movq %rax,one - -.Lblock: - /* state0,state1,state2,state3 = copy0,copy1,copy2,copy3 */ - movdqa copy0,state0 - movdqa copy1,state1 - movdqa copy2,state2 - movdqa copy3,state3 - - movb $10,i -.Lpermute: - /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */ - paddd state1,state0 - pxor state0,state3 - movdqa state3,temp - pslld $16,temp - psrld $16,state3 - por temp,state3 - - /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */ - paddd state3,state2 - pxor state2,state1 - movdqa state1,temp - pslld $12,temp - psrld $20,state1 - por temp,state1 - - /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */ - paddd state1,state0 - pxor state0,state3 - movdqa state3,temp - pslld $8,temp - psrld $24,state3 - por temp,state3 - - /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */ - paddd state3,state2 - pxor state2,state1 - movdqa state1,temp - pslld $7,temp - psrld $25,state1 - por temp,state1 - - /* state1[0,1,2,3] = state1[1,2,3,0] */ - pshufd $0x39,state1,state1 - /* state2[0,1,2,3] = state2[2,3,0,1] */ - pshufd $0x4e,state2,state2 - /* state3[0,1,2,3] = state3[3,0,1,2] */ - pshufd $0x93,state3,state3 - - /* state0 += state1, state3 = rotl32(state3 ^ state0, 16) */ - paddd state1,state0 - pxor state0,state3 - movdqa state3,temp - pslld $16,temp - psrld $16,state3 - por temp,state3 - - /* state2 += state3, state1 = rotl32(state1 ^ state2, 12) */ - paddd state3,state2 - pxor state2,state1 - movdqa state1,temp - pslld $12,temp - psrld $20,state1 - por temp,state1 - - /* state0 += state1, state3 = rotl32(state3 ^ state0, 8) */ - paddd state1,state0 - pxor state0,state3 - movdqa state3,temp - pslld $8,temp - psrld $24,state3 - por temp,state3 - - /* state2 += state3, state1 = rotl32(state1 ^ state2, 7) */ - paddd state3,state2 - pxor state2,state1 - movdqa state1,temp - pslld $7,temp - psrld $25,state1 - por temp,state1 - - /* state1[0,1,2,3] = state1[3,0,1,2] */ - pshufd $0x93,state1,state1 - /* state2[0,1,2,3] = state2[2,3,0,1] */ - pshufd $0x4e,state2,state2 - /* state3[0,1,2,3] = state3[1,2,3,0] */ - pshufd $0x39,state3,state3 - - decb i - jnz .Lpermute - - /* output0 = state0 + copy0 */ - paddd copy0,state0 - movups state0,0x00(output) - /* output1 = state1 + copy1 */ - paddd copy1,state1 - movups state1,0x10(output) - /* output2 = state2 + copy2 */ - paddd copy2,state2 - movups state2,0x20(output) - /* output3 = state3 + copy3 */ - paddd copy3,state3 - movups state3,0x30(output) - - /* ++copy3.counter */ - paddq one,copy3 - - /* output += 64, --nblocks */ - addq $64,output - decq nblocks - jnz .Lblock - - /* counter = copy3.counter */ - movq copy3,0x00(counter) - - /* Zero out the potentially sensitive regs, in case nothing uses these again. */ - pxor state0,state0 - pxor state1,state1 - pxor state2,state2 - pxor state3,state3 - pxor copy1,copy1 - pxor copy2,copy2 - pxor temp,temp - - ret -SYM_FUNC_END(__arch_chacha20_blocks_nostack) diff --git a/arch/x86/entry/vdso/vgetrandom.c b/arch/x86/entry/vdso/vgetrandom.c deleted file mode 100644 index 430862b8977c..000000000000 --- a/arch/x86/entry/vdso/vgetrandom.c +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2022-2024 Jason A. Donenfeld . All Rights Reserved. - */ -#include - -#include "../../../../lib/vdso/getrandom.c" - -ssize_t __vdso_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len) -{ - return __cvdso_getrandom(buffer, len, flags, opaque_state, opaque_len); -} - -ssize_t getrandom(void *, size_t, unsigned int, void *, size_t) - __attribute__((weak, alias("__vdso_getrandom"))); diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S deleted file mode 100644 index 37a3d4c02366..000000000000 --- a/arch/x86/entry/vdso/vsgx.S +++ /dev/null @@ -1,150 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#include -#include -#include - -#include "extable.h" - -/* Relative to %rbp. */ -#define SGX_ENCLAVE_OFFSET_OF_RUN 16 - -/* The offsets relative to struct sgx_enclave_run. */ -#define SGX_ENCLAVE_RUN_TCS 0 -#define SGX_ENCLAVE_RUN_LEAF 8 -#define SGX_ENCLAVE_RUN_EXCEPTION_VECTOR 12 -#define SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE 14 -#define SGX_ENCLAVE_RUN_EXCEPTION_ADDR 16 -#define SGX_ENCLAVE_RUN_USER_HANDLER 24 -#define SGX_ENCLAVE_RUN_USER_DATA 32 /* not used */ -#define SGX_ENCLAVE_RUN_RESERVED_START 40 -#define SGX_ENCLAVE_RUN_RESERVED_END 256 - -.code64 -.section .text, "ax" - -SYM_FUNC_START(__vdso_sgx_enter_enclave) - /* Prolog */ - .cfi_startproc - push %rbp - .cfi_adjust_cfa_offset 8 - .cfi_rel_offset %rbp, 0 - mov %rsp, %rbp - .cfi_def_cfa_register %rbp - push %rbx - .cfi_rel_offset %rbx, -8 - - mov %ecx, %eax -.Lenter_enclave: - /* EENTER <= function <= ERESUME */ - cmp $EENTER, %eax - jb .Linvalid_input - cmp $ERESUME, %eax - ja .Linvalid_input - - mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx - - /* Validate that the reserved area contains only zeros. */ - mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx -1: - cmpq $0, (%rcx, %rbx) - jne .Linvalid_input - add $8, %rbx - cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx - jne 1b - - /* Load TCS and AEP */ - mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx - lea .Lasync_exit_pointer(%rip), %rcx - - /* Single ENCLU serving as both EENTER and AEP (ERESUME) */ -.Lasync_exit_pointer: -.Lenclu_eenter_eresume: - enclu - - /* EEXIT jumps here unless the enclave is doing something fancy. */ - mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx - - /* Set exit_reason. */ - movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx) - - /* Invoke userspace's exit handler if one was provided. */ -.Lhandle_exit: - cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx) - jne .Linvoke_userspace_handler - - /* Success, in the sense that ENCLU was attempted. */ - xor %eax, %eax - -.Lout: - pop %rbx - leave - .cfi_def_cfa %rsp, 8 - RET - - /* The out-of-line code runs with the pre-leave stack frame. */ - .cfi_def_cfa %rbp, 16 - -.Linvalid_input: - mov $(-EINVAL), %eax - jmp .Lout - -.Lhandle_exception: - mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx - - /* Set the exception info. */ - mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx) - mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx) - mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx) - mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx) - jmp .Lhandle_exit - -.Linvoke_userspace_handler: - /* Pass the untrusted RSP (at exit) to the callback via %rcx. */ - mov %rsp, %rcx - - /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */ - mov %rbx, %rax - - /* Save the untrusted RSP offset in %rbx (non-volatile register). */ - mov %rsp, %rbx - and $0xf, %rbx - - /* - * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned - * _after_ pushing the parameters on the stack, hence the bonus push. - */ - and $-0x10, %rsp - push %rax - - /* Push struct sgx_enclave_exception as a param to the callback. */ - push %rax - - /* Clear RFLAGS.DF per x86_64 ABI */ - cld - - /* - * Load the callback pointer to %rax and lfence for LVI (load value - * injection) protection before making the call. - */ - mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax - lfence - call *%rax - - /* Undo the post-exit %rsp adjustment. */ - lea 0x10(%rsp, %rbx), %rsp - - /* - * If the return from callback is zero or negative, return immediately, - * else re-execute ENCLU with the positive return value interpreted as - * the requested ENCLU function. - */ - cmp $0, %eax - jle .Lout - jmp .Lenter_enclave - - .cfi_endproc - -_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception) - -SYM_FUNC_END(__vdso_sgx_enter_enclave) -- cgit v1.2.3 From 6e150b71019f386a021004fafea9ef7189bc6aea Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:25:58 -0800 Subject: x86/entry/vdso32: Don't rely on int80_landing_pad for adjusting ip There is no fundamental reason to use the int80_landing_pad symbol to adjust ip when moving the vdso. If ip falls within the vdso, and the vdso is moved, we should change the ip accordingly, regardless of mode or location within the vdso. This *currently* can only happen on 32 bits, but there isn't any reason not to do so generically. Note that if this is ever possible from a vdso-internal call, then the user space stack will also needed to be adjusted (as well as the shadow stack, if enabled.) Fortunately this is not currently the case. At the moment, we don't even consider other threads when moving the vdso. The assumption is that it is only used by process freeze/thaw for migration, where this is not an issue. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-5-hpa@zytor.com --- arch/x86/entry/vdso/vma.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 8f98c2d7c7a9..e7fd7517370f 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -65,16 +65,12 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, static void vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) { - if (in_ia32_syscall() && image == &vdso32_image) { - struct pt_regs *regs = current_pt_regs(); - unsigned long vdso_land = image->sym_int80_landing_pad; - unsigned long old_land_addr = vdso_land + - (unsigned long)current->mm->context.vdso; - - /* Fixing userspace landing - look at do_fast_syscall_32 */ - if (regs->ip == old_land_addr) - regs->ip = new_vma->vm_start + vdso_land; - } + struct pt_regs *regs = current_pt_regs(); + unsigned long ipoffset = regs->ip - + (unsigned long)current->mm->context.vdso; + + if (ipoffset < image->size) + regs->ip = new_vma->vm_start + ipoffset; } static int vdso_mremap(const struct vm_special_mapping *sm, -- cgit v1.2.3 From 98d3e996513ad00b7824ea3bece506fc645547dd Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:25:59 -0800 Subject: x86/entry/vdso32: Remove SYSCALL_ENTER_KERNEL macro in sigreturn.S A macro SYSCALL_ENTER_KERNEL was defined in sigreturn.S, with the ability of overriding it. The override capability, however, is not used anywhere, and the macro name is potentially confusing because it seems to imply that sysenter/syscall could be used here, which is NOT true: the sigreturn system calls MUST use int $0x80. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-6-hpa@zytor.com --- arch/x86/entry/vdso/vdso32/sigreturn.S | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S index 1bd068f72d4c..965900c6763b 100644 --- a/arch/x86/entry/vdso/vdso32/sigreturn.S +++ b/arch/x86/entry/vdso/vdso32/sigreturn.S @@ -3,10 +3,6 @@ #include #include -#ifndef SYSCALL_ENTER_KERNEL -#define SYSCALL_ENTER_KERNEL int $0x80 -#endif - .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function @@ -16,7 +12,7 @@ __kernel_sigreturn: .LSTART_sigreturn: popl %eax /* XXX does this mean it needs unwind info? */ movl $__NR_sigreturn, %eax - SYSCALL_ENTER_KERNEL + int $0x80 .LEND_sigreturn: SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL) nop @@ -28,7 +24,7 @@ SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL) __kernel_rt_sigreturn: .LSTART_rt_sigreturn: movl $__NR_rt_sigreturn, %eax - SYSCALL_ENTER_KERNEL + int $0x80 .LEND_rt_sigreturn: SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL) nop -- cgit v1.2.3 From 884961618ee51307cc63ab620a0bdd710fa0b0af Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:26:00 -0800 Subject: x86/entry/vdso32: Remove open-coded DWARF in sigreturn.S The vdso32 sigreturn.S contains open-coded DWARF bytecode, which includes a hack for gdb to not try to step back to a previous call instruction when backtracing from a signal handler. Neither of those are necessary anymore: the backtracing issue is handled by ".cfi_entry simple" and ".cfi_signal_frame", both of which have been supported for a very long time now, which allows the remaining frame to be built using regular .cfi annotations. Add a few more register offsets to the signal frame just for good measure. Replace the nop on fallthrough of the system call (which should never, ever happen) with a ud2a trap. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-7-hpa@zytor.com --- arch/x86/entry/vdso/vdso32/sigreturn.S | 146 ++++++++------------------------- arch/x86/include/asm/dwarf2.h | 1 + arch/x86/kernel/asm-offsets.c | 6 ++ 3 files changed, 39 insertions(+), 114 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S index 965900c6763b..25b0ac4b4bfe 100644 --- a/arch/x86/entry/vdso/vdso32/sigreturn.S +++ b/arch/x86/entry/vdso/vdso32/sigreturn.S @@ -1,136 +1,54 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include #include +#include #include +.macro STARTPROC_SIGNAL_FRAME sc + CFI_STARTPROC simple + CFI_SIGNAL_FRAME + /* -4 as pretcode has already been popped */ + CFI_DEF_CFA esp, \sc - 4 + CFI_OFFSET eip, IA32_SIGCONTEXT_ip + CFI_OFFSET eax, IA32_SIGCONTEXT_ax + CFI_OFFSET ebx, IA32_SIGCONTEXT_bx + CFI_OFFSET ecx, IA32_SIGCONTEXT_cx + CFI_OFFSET edx, IA32_SIGCONTEXT_dx + CFI_OFFSET esp, IA32_SIGCONTEXT_sp + CFI_OFFSET ebp, IA32_SIGCONTEXT_bp + CFI_OFFSET esi, IA32_SIGCONTEXT_si + CFI_OFFSET edi, IA32_SIGCONTEXT_di + CFI_OFFSET es, IA32_SIGCONTEXT_es + CFI_OFFSET cs, IA32_SIGCONTEXT_cs + CFI_OFFSET ss, IA32_SIGCONTEXT_ss + CFI_OFFSET ds, IA32_SIGCONTEXT_ds + CFI_OFFSET eflags, IA32_SIGCONTEXT_flags +.endm + .text .globl __kernel_sigreturn .type __kernel_sigreturn,@function - nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */ ALIGN __kernel_sigreturn: -.LSTART_sigreturn: - popl %eax /* XXX does this mean it needs unwind info? */ + STARTPROC_SIGNAL_FRAME IA32_SIGFRAME_sigcontext + popl %eax + CFI_ADJUST_CFA_OFFSET -4 movl $__NR_sigreturn, %eax int $0x80 -.LEND_sigreturn: SYM_INNER_LABEL(vdso32_sigreturn_landing_pad, SYM_L_GLOBAL) - nop - .size __kernel_sigreturn,.-.LSTART_sigreturn + ud2a + CFI_ENDPROC + .size __kernel_sigreturn,.-__kernel_sigreturn .globl __kernel_rt_sigreturn .type __kernel_rt_sigreturn,@function ALIGN __kernel_rt_sigreturn: -.LSTART_rt_sigreturn: + STARTPROC_SIGNAL_FRAME IA32_RT_SIGFRAME_sigcontext movl $__NR_rt_sigreturn, %eax int $0x80 -.LEND_rt_sigreturn: SYM_INNER_LABEL(vdso32_rt_sigreturn_landing_pad, SYM_L_GLOBAL) - nop - .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn - .previous - - .section .eh_frame,"a",@progbits -.LSTARTFRAMEDLSI1: - .long .LENDCIEDLSI1-.LSTARTCIEDLSI1 -.LSTARTCIEDLSI1: - .long 0 /* CIE ID */ - .byte 1 /* Version number */ - .string "zRS" /* NUL-terminated augmentation string */ - .uleb128 1 /* Code alignment factor */ - .sleb128 -4 /* Data alignment factor */ - .byte 8 /* Return address register column */ - .uleb128 1 /* Augmentation value length */ - .byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */ - .byte 0 /* DW_CFA_nop */ - .align 4 -.LENDCIEDLSI1: - .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */ -.LSTARTFDEDLSI1: - .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */ - /* HACK: The dwarf2 unwind routines will subtract 1 from the - return address to get an address in the middle of the - presumed call instruction. Since we didn't get here via - a call, we need to include the nop before the real start - to make up for it. */ - .long .LSTART_sigreturn-1-. /* PC-relative start address */ - .long .LEND_sigreturn-.LSTART_sigreturn+1 - .uleb128 0 /* Augmentation */ - /* What follows are the instructions for the table generation. - We record the locations of each register saved. This is - complicated by the fact that the "CFA" is always assumed to - be the value of the stack pointer in the caller. This means - that we must define the CFA of this body of code to be the - saved value of the stack pointer in the sigcontext. Which - also means that there is no fixed relation to the other - saved registers, which means that we must use DW_CFA_expression - to compute their addresses. It also means that when we - adjust the stack with the popl, we have to do it all over again. */ - -#define do_cfa_expr(offset) \ - .byte 0x0f; /* DW_CFA_def_cfa_expression */ \ - .uleb128 1f-0f; /* length */ \ -0: .byte 0x74; /* DW_OP_breg4 */ \ - .sleb128 offset; /* offset */ \ - .byte 0x06; /* DW_OP_deref */ \ -1: - -#define do_expr(regno, offset) \ - .byte 0x10; /* DW_CFA_expression */ \ - .uleb128 regno; /* regno */ \ - .uleb128 1f-0f; /* length */ \ -0: .byte 0x74; /* DW_OP_breg4 */ \ - .sleb128 offset; /* offset */ \ -1: - - do_cfa_expr(IA32_SIGCONTEXT_sp+4) - do_expr(0, IA32_SIGCONTEXT_ax+4) - do_expr(1, IA32_SIGCONTEXT_cx+4) - do_expr(2, IA32_SIGCONTEXT_dx+4) - do_expr(3, IA32_SIGCONTEXT_bx+4) - do_expr(5, IA32_SIGCONTEXT_bp+4) - do_expr(6, IA32_SIGCONTEXT_si+4) - do_expr(7, IA32_SIGCONTEXT_di+4) - do_expr(8, IA32_SIGCONTEXT_ip+4) - - .byte 0x42 /* DW_CFA_advance_loc 2 -- nop; popl eax. */ - - do_cfa_expr(IA32_SIGCONTEXT_sp) - do_expr(0, IA32_SIGCONTEXT_ax) - do_expr(1, IA32_SIGCONTEXT_cx) - do_expr(2, IA32_SIGCONTEXT_dx) - do_expr(3, IA32_SIGCONTEXT_bx) - do_expr(5, IA32_SIGCONTEXT_bp) - do_expr(6, IA32_SIGCONTEXT_si) - do_expr(7, IA32_SIGCONTEXT_di) - do_expr(8, IA32_SIGCONTEXT_ip) - - .align 4 -.LENDFDEDLSI1: - - .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */ -.LSTARTFDEDLSI2: - .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */ - /* HACK: See above wrt unwind library assumptions. */ - .long .LSTART_rt_sigreturn-1-. /* PC-relative start address */ - .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1 - .uleb128 0 /* Augmentation */ - /* What follows are the instructions for the table generation. - We record the locations of each register saved. This is - slightly less complicated than the above, since we don't - modify the stack pointer in the process. */ - - do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp) - do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax) - do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx) - do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx) - do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx) - do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp) - do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si) - do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di) - do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip) - - .align 4 -.LENDFDEDLSI2: + ud2a + CFI_ENDPROC + .size __kernel_rt_sigreturn,.-__kernel_rt_sigreturn .previous diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 302e11b15da8..09c9684d3ad6 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -20,6 +20,7 @@ #define CFI_RESTORE_STATE .cfi_restore_state #define CFI_UNDEFINED .cfi_undefined #define CFI_ESCAPE .cfi_escape +#define CFI_SIGNAL_FRAME .cfi_signal_frame #ifndef BUILD_VDSO /* diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 25fcde525c68..081816888f7a 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -63,8 +63,14 @@ static void __used common(void) OFFSET(IA32_SIGCONTEXT_bp, sigcontext_32, bp); OFFSET(IA32_SIGCONTEXT_sp, sigcontext_32, sp); OFFSET(IA32_SIGCONTEXT_ip, sigcontext_32, ip); + OFFSET(IA32_SIGCONTEXT_es, sigcontext_32, es); + OFFSET(IA32_SIGCONTEXT_cs, sigcontext_32, cs); + OFFSET(IA32_SIGCONTEXT_ss, sigcontext_32, ss); + OFFSET(IA32_SIGCONTEXT_ds, sigcontext_32, ds); + OFFSET(IA32_SIGCONTEXT_flags, sigcontext_32, flags); BLANK(); + OFFSET(IA32_SIGFRAME_sigcontext, sigframe_ia32, sc); OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext); #endif -- cgit v1.2.3 From 8717b02b8c030dc0c4b55781b59e88def0a1a92f Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:26:01 -0800 Subject: x86/entry/vdso: Include GNU_PROPERTY and GNU_STACK PHDRs Currently the vdso doesn't include .note.gnu.property or a GNU noexec stack annotation (the -z noexecstack in the linker script is ineffective because we specify PHDRs explicitly.) The motivation is that the dynamic linker currently do not check these. However, this is a weak excuse: the vdso*.so are also supposed to be usable at link libraries, and there is no reason why the dynamic linker might not want or need to check these in the future, so add them back in -- it is trivial enough. Use symbolic constants for the PHDR permission flags. [ v4: drop unrelated formatting changes ] Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-8-hpa@zytor.com --- arch/x86/entry/vdso/common/vdso-layout.lds.S | 38 +++++++++++++++++----------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vdso/common/vdso-layout.lds.S b/arch/x86/entry/vdso/common/vdso-layout.lds.S index ec1ac191a057..a1e30be3e83d 100644 --- a/arch/x86/entry/vdso/common/vdso-layout.lds.S +++ b/arch/x86/entry/vdso/common/vdso-layout.lds.S @@ -47,18 +47,18 @@ SECTIONS *(.gnu.linkonce.b.*) } :text - /* - * Discard .note.gnu.property sections which are unused and have - * different alignment requirement from vDSO note sections. - */ - /DISCARD/ : { + .note.gnu.property : { *(.note.gnu.property) - } - .note : { *(.note.*) } :text :note - - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr - .eh_frame : { KEEP (*(.eh_frame)) } :text + } :text :note :gnu_property + .note : { + *(.note*) + } :text :note + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { + KEEP (*(.eh_frame)) + *(.eh_frame.*) + } :text /* * Text is well-separated from actual data: there's plenty of @@ -87,15 +87,23 @@ SECTIONS * Very old versions of ld do not recognize this name token; use the constant. */ #define PT_GNU_EH_FRAME 0x6474e550 +#define PT_GNU_STACK 0x6474e551 +#define PT_GNU_PROPERTY 0x6474e553 /* * We must supply the ELF program headers explicitly to get just one * PT_LOAD segment, and set the flags explicitly to make segments read-only. - */ +*/ +#define PF_R FLAGS(4) +#define PF_RW FLAGS(6) +#define PF_RX FLAGS(5) + PHDRS { - text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ - dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ - note PT_NOTE FLAGS(4); /* PF_R */ - eh_frame_hdr PT_GNU_EH_FRAME; + text PT_LOAD PF_RX FILEHDR PHDRS; + dynamic PT_DYNAMIC PF_R; + note PT_NOTE PF_R; + eh_frame_hdr PT_GNU_EH_FRAME PF_R; + gnu_stack PT_GNU_STACK PF_RW; + gnu_property PT_GNU_PROPERTY PF_R; } -- cgit v1.2.3 From a0636d4c3ad0da0cd6069eb6fef5d2b7d3449378 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:26:02 -0800 Subject: x86/vdso: Abstract out vdso system call internals Abstract out the calling of true system calls from the vdso into macros. It has been a very long time since gcc did not allow %ebx or %ebp in inline asm in 32-bit PIC mode; remove the corresponding hacks. Remove the use of memory output constraints in gettimeofday.h in favor of "memory" clobbers. The resulting code is identical for the current use cases, as the system call is usually a terminal fallback anyway, and it merely complicates the macroization. This patch adds only a handful of more lines of code than it removes, and in fact could be made substantially smaller by removing the macros for the argument counts that aren't currently used, however, it seems better to be general from the start. [ v3: remove stray comment from prototyping; remove VDSO_SYSCALL6() since it would require special handling on 32 bits and is currently unused. (Uros Biszjak) Indent nested preprocessor directives. ] Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Acked-by: Uros Bizjak Link: https://patch.msgid.link/20251216212606.1325678-9-hpa@zytor.com --- arch/x86/include/asm/vdso/gettimeofday.h | 108 +++---------------------------- arch/x86/include/asm/vdso/sys_call.h | 103 +++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 100 deletions(-) create mode 100644 arch/x86/include/asm/vdso/sys_call.h (limited to 'arch') diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h index 73b2e7ee8f0f..3cf214cc4a75 100644 --- a/arch/x86/include/asm/vdso/gettimeofday.h +++ b/arch/x86/include/asm/vdso/gettimeofday.h @@ -18,6 +18,7 @@ #include #include #include +#include #define VDSO_HAS_TIME 1 @@ -53,130 +54,37 @@ extern struct ms_hyperv_tsc_page hvclock_page __attribute__((visibility("hidden"))); #endif -#ifndef BUILD_VDSO32 - static __always_inline long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) { - long ret; - - asm ("syscall" : "=a" (ret), "=m" (*_ts) : - "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) : - "rcx", "r11"); - - return ret; + return VDSO_SYSCALL2(clock_gettime,64,_clkid,_ts); } static __always_inline long gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz) { - long ret; - - asm("syscall" : "=a" (ret) : - "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory"); - - return ret; + return VDSO_SYSCALL2(gettimeofday,,_tv,_tz); } static __always_inline long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) { - long ret; - - asm ("syscall" : "=a" (ret), "=m" (*_ts) : - "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) : - "rcx", "r11"); - - return ret; + return VDSO_SYSCALL2(clock_getres,_time64,_clkid,_ts); } -#else - -static __always_inline -long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) -{ - long ret; - - asm ( - "mov %%ebx, %%edx \n" - "mov %[clock], %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret), "=m" (*_ts) - : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts) - : "edx"); - - return ret; -} +#ifndef CONFIG_X86_64 static __always_inline long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) { - long ret; - - asm ( - "mov %%ebx, %%edx \n" - "mov %[clock], %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret), "=m" (*_ts) - : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts) - : "edx"); - - return ret; -} - -static __always_inline -long gettimeofday_fallback(struct __kernel_old_timeval *_tv, - struct timezone *_tz) -{ - long ret; - - asm( - "mov %%ebx, %%edx \n" - "mov %2, %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret) - : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz) - : "memory", "edx"); - - return ret; + return VDSO_SYSCALL2(clock_gettime,,_clkid,_ts); } static __always_inline long -clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) -{ - long ret; - - asm ( - "mov %%ebx, %%edx \n" - "mov %[clock], %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret), "=m" (*_ts) - : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts) - : "edx"); - - return ret; -} - -static __always_inline -long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) +clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) { - long ret; - - asm ( - "mov %%ebx, %%edx \n" - "mov %[clock], %%ebx \n" - "call __kernel_vsyscall \n" - "mov %%edx, %%ebx \n" - : "=a" (ret), "=m" (*_ts) - : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts) - : "edx"); - - return ret; + return VDSO_SYSCALL2(clock_getres,,_clkid,_ts); } #endif diff --git a/arch/x86/include/asm/vdso/sys_call.h b/arch/x86/include/asm/vdso/sys_call.h new file mode 100644 index 000000000000..dcfd17c6dd57 --- /dev/null +++ b/arch/x86/include/asm/vdso/sys_call.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Macros for issuing an inline system call from the vDSO. + */ + +#ifndef X86_ASM_VDSO_SYS_CALL_H +#define X86_ASM_VDSO_SYS_CALL_H + +#include +#include +#include + +#ifdef CONFIG_X86_64 +# define __sys_instr "syscall" +# define __sys_clobber "rcx", "r11", "memory" +# define __sys_nr(x,y) __NR_ ## x +# define __sys_reg1 "rdi" +# define __sys_reg2 "rsi" +# define __sys_reg3 "rdx" +# define __sys_reg4 "r10" +# define __sys_reg5 "r8" +#else +# define __sys_instr "call __kernel_vsyscall" +# define __sys_clobber "memory" +# define __sys_nr(x,y) __NR_ ## x ## y +# define __sys_reg1 "ebx" +# define __sys_reg2 "ecx" +# define __sys_reg3 "edx" +# define __sys_reg4 "esi" +# define __sys_reg5 "edi" +#endif + +/* + * Example usage: + * + * result = VDSO_SYSCALL3(foo,64,x,y,z); + * + * ... calls foo(x,y,z) on 64 bits, and foo64(x,y,z) on 32 bits. + * + * VDSO_SYSCALL6() is currently missing, because it would require + * special handling for %ebp on 32 bits when the vdso is compiled with + * frame pointers enabled (the default on 32 bits.) Add it as a special + * case when and if it becomes necessary. + */ +#define _VDSO_SYSCALL(name,suf32,...) \ + ({ \ + long _sys_num_ret = __sys_nr(name,suf32); \ + asm_inline volatile( \ + __sys_instr \ + : "+a" (_sys_num_ret) \ + : __VA_ARGS__ \ + : __sys_clobber); \ + _sys_num_ret; \ + }) + +#define VDSO_SYSCALL0(name,suf32) \ + _VDSO_SYSCALL(name,suf32) +#define VDSO_SYSCALL1(name,suf32,a1) \ + ({ \ + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ + _VDSO_SYSCALL(name,suf32, \ + "r" (_sys_arg1)); \ + }) +#define VDSO_SYSCALL2(name,suf32,a1,a2) \ + ({ \ + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ + _VDSO_SYSCALL(name,suf32, \ + "r" (_sys_arg1), "r" (_sys_arg2)); \ + }) +#define VDSO_SYSCALL3(name,suf32,a1,a2,a3) \ + ({ \ + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ + register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \ + _VDSO_SYSCALL(name,suf32, \ + "r" (_sys_arg1), "r" (_sys_arg2), \ + "r" (_sys_arg3)); \ + }) +#define VDSO_SYSCALL4(name,suf32,a1,a2,a3,a4) \ + ({ \ + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ + register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \ + register long _sys_arg4 asm(__sys_reg4) = (long)(a4); \ + _VDSO_SYSCALL(name,suf32, \ + "r" (_sys_arg1), "r" (_sys_arg2), \ + "r" (_sys_arg3), "r" (_sys_arg4)); \ + }) +#define VDSO_SYSCALL5(name,suf32,a1,a2,a3,a4,a5) \ + ({ \ + register long _sys_arg1 asm(__sys_reg1) = (long)(a1); \ + register long _sys_arg2 asm(__sys_reg2) = (long)(a2); \ + register long _sys_arg3 asm(__sys_reg3) = (long)(a3); \ + register long _sys_arg4 asm(__sys_reg4) = (long)(a4); \ + register long _sys_arg5 asm(__sys_reg5) = (long)(a5); \ + _VDSO_SYSCALL(name,suf32, \ + "r" (_sys_arg1), "r" (_sys_arg2), \ + "r" (_sys_arg3), "r" (_sys_arg4), \ + "r" (_sys_arg5)); \ + }) + +#endif /* X86_VDSO_SYS_CALL_H */ -- cgit v1.2.3 From f49ecf5e110ab0ed255ddea5e321689faf4e50e6 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:26:03 -0800 Subject: x86/cpufeature: Replace X86_FEATURE_SYSENTER32 with X86_FEATURE_SYSFAST32 In most cases, the use of "fast 32-bit system call" depends either on X86_FEATURE_SEP or X86_FEATURE_SYSENTER32 || X86_FEATURE_SYSCALL32. However, nearly all the logic for both is identical. Define X86_FEATURE_SYSFAST32 which indicates that *either* SYSENTER32 or SYSCALL32 should be used, for either 32- or 64-bit kernels. This defaults to SYSENTER; use SYSCALL if the SYSCALL32 bit is also set. As this removes ALL existing uses of X86_FEATURE_SYSENTER32, which is a kernel-only synthetic feature bit, simply remove it and replace it with X86_FEATURE_SYSFAST32. This leaves an unused alternative for a true 32-bit kernel, but that should really not matter in any way. The clearing of X86_FEATURE_SYSCALL32 can be removed once the patches for automatically clearing disabled features has been merged. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-10-hpa@zytor.com --- arch/x86/Kconfig.cpufeatures | 8 ++++++++ arch/x86/entry/vdso/vdso32/system_call.S | 8 ++------ arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/kernel/cpu/centaur.c | 3 --- arch/x86/kernel/cpu/common.c | 8 ++++++++ arch/x86/kernel/cpu/intel.c | 4 +--- arch/x86/kernel/cpu/zhaoxin.c | 4 +--- arch/x86/kernel/fred.c | 2 +- arch/x86/xen/setup.c | 28 ++++++++++++++++++---------- arch/x86/xen/smp_pv.c | 5 ++--- arch/x86/xen/xen-ops.h | 1 - 11 files changed, 42 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig.cpufeatures b/arch/x86/Kconfig.cpufeatures index 733d5aff2456..423ac795baa7 100644 --- a/arch/x86/Kconfig.cpufeatures +++ b/arch/x86/Kconfig.cpufeatures @@ -56,6 +56,10 @@ config X86_REQUIRED_FEATURE_MOVBE def_bool y depends on MATOM +config X86_REQUIRED_FEATURE_SYSFAST32 + def_bool y + depends on X86_64 && !X86_FRED + config X86_REQUIRED_FEATURE_CPUID def_bool y depends on X86_64 @@ -120,6 +124,10 @@ config X86_DISABLED_FEATURE_CENTAUR_MCR def_bool y depends on X86_64 +config X86_DISABLED_FEATURE_SYSCALL32 + def_bool y + depends on !X86_64 + config X86_DISABLED_FEATURE_PCID def_bool y depends on !X86_64 diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 2a15634bbe75..7b1c0f16e511 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -52,13 +52,9 @@ __kernel_vsyscall: #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" -#ifdef BUILD_VDSO32_64 /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ - ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \ - SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 -#else - ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP -#endif + ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \ + SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 /* Enter using int $0x80 */ int $0x80 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index c3b53beb1300..63b0f9aa9b3e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -84,7 +84,7 @@ #define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */ #define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */ -#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */ +#define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */ #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */ #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */ diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index a3b55db35c96..9833f837141c 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -102,9 +102,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c) (c->x86 >= 7)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); -#ifdef CONFIG_X86_64 - set_cpu_cap(c, X86_FEATURE_SYSENTER32); -#endif if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index e7ab22fce3b5..1c3261cae40c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1068,6 +1068,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c) init_scattered_cpuid_features(c); init_speculation_control(c); + if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP)) + set_cpu_cap(c, X86_FEATURE_SYSFAST32); + /* * Clear/Set all flags overridden by options, after probe. * This needs to happen each time we re-probe, which may happen @@ -1813,6 +1816,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) * that it can't be enabled in 32-bit mode. */ setup_clear_cpu_cap(X86_FEATURE_PCID); + + /* + * Never use SYSCALL on a 32-bit kernel + */ + setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); #endif /* diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 98ae4c37c93e..646ff33c4651 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -236,9 +236,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) clear_cpu_cap(c, X86_FEATURE_PSE); } -#ifdef CONFIG_X86_64 - set_cpu_cap(c, X86_FEATURE_SYSENTER32); -#else +#ifndef CONFIG_X86_64 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 89b1c8a70fe8..031379b7d4fa 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -59,9 +59,7 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) { if (c->x86 >= 0x6) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); -#ifdef CONFIG_X86_64 - set_cpu_cap(c, X86_FEATURE_SYSENTER32); -#endif + if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c index 816187da3a47..e736b19e18de 100644 --- a/arch/x86/kernel/fred.c +++ b/arch/x86/kernel/fred.c @@ -68,7 +68,7 @@ void cpu_init_fred_exceptions(void) idt_invalidate(); /* Use int $0x80 for 32-bit system calls in FRED mode */ - setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); + setup_clear_cpu_cap(X86_FEATURE_SYSFAST32); setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 3823e52aef52..ac8021c3a997 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -990,13 +990,6 @@ static int register_callback(unsigned type, const void *func) return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); } -void xen_enable_sysenter(void) -{ - if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) && - register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat)) - setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); -} - void xen_enable_syscall(void) { int ret; @@ -1008,11 +1001,27 @@ void xen_enable_syscall(void) mechanism for syscalls. */ } - if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) && - register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat)) + if (!cpu_feature_enabled(X86_FEATURE_SYSFAST32)) + return; + + if (cpu_feature_enabled(X86_FEATURE_SYSCALL32)) { + /* Use SYSCALL32 */ + ret = register_callback(CALLBACKTYPE_syscall32, + xen_entry_SYSCALL_compat); + + } else { + /* Use SYSENTER32 */ + ret = register_callback(CALLBACKTYPE_sysenter, + xen_entry_SYSENTER_compat); + } + + if (ret) { setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); + setup_clear_cpu_cap(X86_FEATURE_SYSFAST32); + } } + static void __init xen_pvmmu_arch_setup(void) { HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); @@ -1022,7 +1031,6 @@ static void __init xen_pvmmu_arch_setup(void) register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) BUG(); - xen_enable_sysenter(); xen_enable_syscall(); } diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 9bb8ff8bff30..c40f326f0c3a 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -65,10 +65,9 @@ static void cpu_bringup(void) touch_softlockup_watchdog(); /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ - if (!xen_feature(XENFEAT_supervisor_mode_kernel)) { - xen_enable_sysenter(); + if (!xen_feature(XENFEAT_supervisor_mode_kernel)) xen_enable_syscall(); - } + cpu = smp_processor_id(); identify_secondary_cpu(cpu); set_cpu_sibling_map(cpu); diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 090349baec09..f6c331b20fad 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -60,7 +60,6 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size); char * __init xen_memory_setup(void); void __init xen_arch_setup(void); void xen_banner(void); -void xen_enable_sysenter(void); void xen_enable_syscall(void); void xen_vcpu_restore(void); -- cgit v1.2.3 From 36d83c249e0395a915144eceeb528ddc19b1fbe6 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 16 Dec 2025 13:26:04 -0800 Subject: x86/entry/vdso32: When using int $0x80, use it directly When neither sysenter32 nor syscall32 is available (on either FRED-capable 64-bit hardware or old 32-bit hardware), there is no reason to do a bunch of stack shuffling in __kernel_vsyscall. Unfortunately, just overwriting the initial "push" instructions will mess up the CFI annotations, so suffer the 3-byte NOP if not applicable. Similarly, inline the int $0x80 when doing inline system calls in the vdso instead of calling __kernel_vsyscall. Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20251216212606.1325678-11-hpa@zytor.com --- arch/x86/entry/vdso/vdso32/system_call.S | 18 ++++++++++++++---- arch/x86/include/asm/vdso/sys_call.h | 4 +++- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 7b1c0f16e511..9157cf9c5749 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -14,6 +14,18 @@ ALIGN __kernel_vsyscall: CFI_STARTPROC + + /* + * If using int $0x80, there is no reason to muck about with the + * stack here. Unfortunately just overwriting the push instructions + * would mess up the CFI annotations, but it is only a 3-byte + * NOP in that case. This could be avoided by patching the + * vdso symbol table (not the code) and entry point, but that + * would a fair bit of tooling work or by simply compiling + * two different vDSO images, but that doesn't seem worth it. + */ + ALTERNATIVE "int $0x80; ret", "", X86_FEATURE_SYSFAST32 + /* * Reshuffle regs so that all of any of the entry instructions * will preserve enough state. @@ -52,11 +64,9 @@ __kernel_vsyscall: #define SYSENTER_SEQUENCE "movl %esp, %ebp; sysenter" #define SYSCALL_SEQUENCE "movl %ecx, %ebp; syscall" - /* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */ - ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSFAST32, \ - SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 + ALTERNATIVE SYSENTER_SEQUENCE, SYSCALL_SEQUENCE, X86_FEATURE_SYSCALL32 - /* Enter using int $0x80 */ + /* Re-enter using int $0x80 */ int $0x80 SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) diff --git a/arch/x86/include/asm/vdso/sys_call.h b/arch/x86/include/asm/vdso/sys_call.h index dcfd17c6dd57..5806b1cd6aef 100644 --- a/arch/x86/include/asm/vdso/sys_call.h +++ b/arch/x86/include/asm/vdso/sys_call.h @@ -20,7 +20,9 @@ # define __sys_reg4 "r10" # define __sys_reg5 "r8" #else -# define __sys_instr "call __kernel_vsyscall" +# define __sys_instr ALTERNATIVE("ds;ds;ds;int $0x80", \ + "call __kernel_vsyscall", \ + X86_FEATURE_SYSFAST32) # define __sys_clobber "memory" # define __sys_nr(x,y) __NR_ ## x ## y # define __sys_reg1 "ebx" -- cgit v1.2.3 From b3683f3ba079940f91f4a26004250559f170eda9 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 16 Jan 2026 12:40:54 -0800 Subject: x86/entry/vdso: Update the object paths for "make vdso_install" The location of the vdso binary files in the object tree has changed; update "make vdso_install" to match. Closes: https://lore.kernel.org/16ea64d1-2a9b-46f9-9fcc-42958f599eb6@leemhuis.info Fixes: 693c819fedcd ("x86/entry/vdso: Refactor the vdso build") Reported-by: Thorsten Leemhuis Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20260116204057.386268-2-hpa@zytor.com --- arch/x86/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 9ab7522ced18..5f881460a8b5 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -318,9 +318,9 @@ PHONY += install install: $(call cmd,install) -vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64.so.dbg -vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdsox32.so.dbg -vdso-install-$(CONFIG_COMPAT_32) += arch/x86/entry/vdso/vdso32.so.dbg +vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64/vdso64.so.dbg +vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdso64/vdsox32.so.dbg +vdso-install-$(CONFIG_COMPAT_32) += arch/x86/entry/vdso/vdso32/vdso32.so.dbg archprepare: checkbin checkbin: -- cgit v1.2.3 From a48acbaf99d239e60a09a9e2b7d0f7e9feb62769 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Fri, 16 Jan 2026 12:40:55 -0800 Subject: x86/entry/vdso: Fix filtering of vdso compiler flags This fixes several typos in the filtering of compiler flags for vdso, discovered by Chris Mason using an AI script: 1. "-fno-PIE" was written as "fno-PIE". 2. "CC_PLUGINS_FLAGS" was written as "CC_PLUGIN_FLAGS" To the best of my knowledge, none of these actually had any real impact on the build at this time but they are genuine bugs which could break things at any point in the future. Chris's script also found that "CONFIG_X86_USER_SHADOW_STACK" was missing "CONFIG_", but it needs a different fix. [ dhansen: remove CONFIG_X86_USER_SHADOW_STACK munging, add mention in changelog. ] Closes: https://lore.kernel.org/20260116035807.2307742-1-clm@meta.com Fixes: 693c819fedcd ("x86/entry/vdso: Refactor the vdso build") Reported-by: Chris Mason Signed-off-by: H. Peter Anvin (Intel) Signed-off-by: Dave Hansen Link: https://patch.msgid.link/20260116204057.386268-3-hpa@zytor.com --- arch/x86/entry/vdso/common/Makefile.include | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/entry/vdso/common/Makefile.include b/arch/x86/entry/vdso/common/Makefile.include index 3514b4a6869b..687b3d89b40d 100644 --- a/arch/x86/entry/vdso/common/Makefile.include +++ b/arch/x86/entry/vdso/common/Makefile.include @@ -23,9 +23,9 @@ $(obj)/%.lds : KBUILD_CPPFLAGS += $(CPPFLAGS_VDSO_LDS) # flags-remove-y += \ -D__KERNEL__ -mcmodel=kernel -mregparm=3 \ - -fno-pic -fno-PIC -fno-pie fno-PIE \ + -fno-pic -fno-PIC -fno-pie -fno-PIE \ -mfentry -pg \ - $(RANDSTRUCT_CFLAGS) $(GCC_PLUGIN_CFLAGS) $(KSTACK_ERASE_CFLAGS) \ + $(RANDSTRUCT_CFLAGS) $(GCC_PLUGINS_CFLAGS) $(KSTACK_ERASE_CFLAGS) \ $(RETPOLINE_CFLAGS) $(CC_FLAGS_LTO) $(CC_FLAGS_CFI) \ $(PADDING_CFLAGS) -- cgit v1.2.3 From 3e30278e0c71808e156cac8da5895d636ce819d5 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Fri, 23 Jan 2026 16:20:28 -0700 Subject: x86/entry/vdso32: Omit '.cfi_offset eflags' for LLVM < 16 After commit: 884961618ee5 ("x86/entry/vdso32: Remove open-coded DWARF in sigreturn.S") building arch/x86/entry/vdso/vdso32/sigreturn.S with LLVM 15 fails with: :18:20: error: invalid register name .cfi_offset eflags, 64 ^ arch/x86/entry/vdso/vdso32/sigreturn.S:33:2: note: while in macro instantiation STARTPROC_SIGNAL_FRAME 8 ^ Support for eflags as an argument to .cfi_offset was added in the LLVM 16 development cycle: https://github.com/llvm/llvm-project/commit/67bd3c58c0c7389e39c5a2f4d3b1a30459ccf5b7 [1] Only add this .cfi_offset directive if it is supported by the assembler to clear up the error. [ mingo: Tidied up the changelog and the comment a bit ] Fixes: 884961618ee5 ("x86/entry/vdso32: Remove open-coded DWARF in sigreturn.S") Signed-off-by: Nathan Chancellor Signed-off-by: Ingo Molnar Acked-by: H. Peter Anvin (Intel) Link: https://patch.msgid.link/20260123-x86-vdso32-wa-llvm-15-cfi-offset-eflags-v1-1-0f412e3516a4@kernel.org --- arch/x86/entry/vdso/vdso32/sigreturn.S | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/x86/entry/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S index 25b0ac4b4bfe..b433353bc8e3 100644 --- a/arch/x86/entry/vdso/vdso32/sigreturn.S +++ b/arch/x86/entry/vdso/vdso32/sigreturn.S @@ -22,7 +22,17 @@ CFI_OFFSET cs, IA32_SIGCONTEXT_cs CFI_OFFSET ss, IA32_SIGCONTEXT_ss CFI_OFFSET ds, IA32_SIGCONTEXT_ds +/* + * .cfi_offset eflags requires LLVM 16 or newer: + * + * https://github.com/llvm/llvm-project/commit/67bd3c58c0c7389e39c5a2f4d3b1a30459ccf5b7 + * + * Check for 16.0.1 to ensure the support is present, as 16.0.0 may be a + * prerelease version. + */ +#if defined(CONFIG_AS_IS_GNU) || (defined(CONFIG_AS_IS_LLVM) && CONFIG_AS_VERSION >= 160001) CFI_OFFSET eflags, IA32_SIGCONTEXT_flags +#endif .endm .text -- cgit v1.2.3 From ce9b1c10c3f1c723c3cc7b63aa8331fdb6c57a04 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Tue, 27 Jan 2026 23:09:13 +0100 Subject: x86/entry/vdso: Add vdso2c to .gitignore The commit a76108d05ee1 ("x86/entry/vdso: Move vdso2c to arch/x86/tools") moved vdso2c to arch/x86/tools/ and commit 93d73005bff4 ("x86/entry/vdso: Rename vdso_image_* to vdso*_image") renamed .so files but also dropped vdso2c from arch/x86/entry/vdso/.gitignore. It should've moved it to arch/x86/tools/.gitignore instead. Do that. Signed-off-by: Borislav Petkov (AMD) Link: https://patch.msgid.link/20260127221633.GAaXk5QcG8ILa1VWYR@fat_crate.local --- arch/x86/tools/.gitignore | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/tools/.gitignore b/arch/x86/tools/.gitignore index d36dc7cf9115..51d5c22b38d7 100644 --- a/arch/x86/tools/.gitignore +++ b/arch/x86/tools/.gitignore @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only relocs +vdso2c -- cgit v1.2.3