diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | arch/x86/boot/compressed/aslr.c | 9 | ||||
-rw-r--r-- | arch/x86/vdso/Makefile | 40 | ||||
-rw-r--r-- | arch/x86/vdso/vdso-fakesections.c | 32 | ||||
-rw-r--r-- | arch/x86/vdso/vdso2c.c | 19 | ||||
-rw-r--r-- | arch/x86/vdso/vdso2c.h | 23 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 5 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 60 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 1 |
9 files changed, 132 insertions, 58 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fcefdda5136d..a8f749ef0fdc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1672,7 +1672,6 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" depends on RELOCATABLE - depends on !HIBERNATION default n ---help--- Randomizes the physical and virtual address at which the diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 4dbf967da50d..fc6091abedb7 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -289,10 +289,17 @@ unsigned char *choose_kernel_location(unsigned char *input, unsigned long choice = (unsigned long)output; unsigned long random; +#ifdef CONFIG_HIBERNATION + if (!cmdline_find_option_bool("kaslr")) { + debug_putstr("KASLR disabled by default...\n"); + goto out; + } +#else if (cmdline_find_option_bool("nokaslr")) { - debug_putstr("KASLR disabled...\n"); + debug_putstr("KASLR disabled by cmdline...\n"); goto out; } +#endif /* Record the various known unsafe memory ranges. */ mem_avoid_init((unsigned long)input, input_size, diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 9769df094035..3c0809a0631f 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -9,18 +9,9 @@ VDSOX32-$(CONFIG_X86_X32_ABI) := y VDSO32-$(CONFIG_X86_32) := y VDSO32-$(CONFIG_COMPAT) := y -vdso-install-$(VDSO64-y) += vdso.so -vdso-install-$(VDSOX32-y) += vdsox32.so -vdso-install-$(VDSO32-y) += $(vdso32-images) - - # files to link into the vdso -vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o - -vobjs-$(VDSOX32-y) += $(vobjx32s-compat) - -# Filter out x32 objects. -vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y)) +vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o +vobjs-nox32 := vdso-fakesections.o # files to link into kernel obj-y += vma.o @@ -34,7 +25,7 @@ vdso_img-$(VDSO32-y) += 32-sysenter obj-$(VDSO32-y) += vdso32-setup.o -vobjs := $(foreach F,$(vobj64s),$(obj)/$F) +vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) $(obj)/vdso.o: $(obj)/vdso.so @@ -104,7 +95,13 @@ VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \ -Wl,-z,max-page-size=4096 \ -Wl,-z,common-page-size=4096 -vobjx32s-y := $(vobj64s:.o=-x32.o) +# 64-bit objects to re-brand as x32 +vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y)) + +# x32-rebranded versions +vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o) + +# same thing, but in the output directory vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F) # Convert 64bit object file to x32 for x32 vDSO. @@ -176,15 +173,20 @@ VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ GCOV_PROFILE := n # -# Install the unstripped copy of vdso*.so listed in $(vdso-install-y). +# Install the unstripped copies of vdso*.so. # -quiet_cmd_vdso_install = INSTALL $@ - cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ -$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE +quiet_cmd_vdso_install = INSTALL $(@:install_%=%) + cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%) + +vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) + +$(MODLIB)/vdso: FORCE @mkdir -p $(MODLIB)/vdso + +$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE $(call cmd,vdso_install) -PHONY += vdso_install $(vdso-install-y) -vdso_install: $(vdso-install-y) +PHONY += vdso_install $(vdso_img_insttargets) +vdso_install: $(vdso_img_insttargets) FORCE clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c new file mode 100644 index 000000000000..cb8a8d72c24b --- /dev/null +++ b/arch/x86/vdso/vdso-fakesections.c @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Andy Lutomirski + * Subject to the GNU Public License, v.2 + * + * Hack to keep broken Go programs working. + * + * The Go runtime had a couple of bugs: it would read the section table to try + * to figure out how many dynamic symbols there were (it shouldn't have looked + * at the section table at all) and, if there were no SHT_SYNDYM section table + * entry, it would use an uninitialized value for the number of symbols. As a + * workaround, we supply a minimal section table. vdso2c will adjust the + * in-memory image so that "vdso_fake_sections" becomes the section table. + * + * The bug was introduced by: + * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31) + * and is being addressed in the Go runtime in this issue: + * https://code.google.com/p/go/issues/detail?id=8197 + */ + +#ifndef __x86_64__ +#error This hack is specific to the 64-bit vDSO +#endif + +#include <linux/elf.h> + +extern const __visible struct elf64_shdr vdso_fake_sections[]; +const __visible struct elf64_shdr vdso_fake_sections[] = { + { + .sh_type = SHT_DYNSYM, + .sh_entsize = sizeof(Elf64_Sym), + } +}; diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c index 450ac6eaf613..7a6bf50f9165 100644 --- a/arch/x86/vdso/vdso2c.c +++ b/arch/x86/vdso/vdso2c.c @@ -54,7 +54,7 @@ static void fail(const char *format, ...) } /* - * Evil macros to do a little-endian read. + * Evil macros for little-endian reads and writes */ #define GLE(x, bits, ifnot) \ __builtin_choose_expr( \ @@ -62,11 +62,24 @@ static void fail(const char *format, ...) (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot) extern void bad_get_le(void); -#define LAST_LE(x) \ +#define LAST_GLE(x) \ __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le()) #define GET_LE(x) \ - GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_LE(x)))) + GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x)))) + +#define PLE(x, val, bits, ifnot) \ + __builtin_choose_expr( \ + (sizeof(*(x)) == bits/8), \ + put_unaligned_le##bits((val), (x)), ifnot) + +extern void bad_put_le(void); +#define LAST_PLE(x, val) \ + __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le()) + +#define PUT_LE(x, val) \ + PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val)))) + #define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index 8a074637a576..c6eefaf389b9 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h @@ -18,6 +18,8 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) const char *secstrings; uint64_t syms[NSYMS] = {}; + uint64_t fake_sections_value = 0, fake_sections_size = 0; + Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); /* Walk the segment table. */ @@ -84,6 +86,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) GET_LE(&symtab_hdr->sh_entsize) * i; const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + GET_LE(&sym->st_name); + for (k = 0; k < NSYMS; k++) { if (!strcmp(name, required_syms[k])) { if (syms[k]) { @@ -93,6 +96,13 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) syms[k] = GET_LE(&sym->st_value); } } + + if (!strcmp(name, "vdso_fake_sections")) { + if (fake_sections_value) + fail("duplicate vdso_fake_sections\n"); + fake_sections_value = GET_LE(&sym->st_value); + fake_sections_size = GET_LE(&sym->st_size); + } } /* Validate mapping addresses. */ @@ -112,11 +122,14 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) if (syms[sym_end_mapping] % 4096) fail("end_mapping must be a multiple of 4096\n"); - /* Remove sections. */ - hdr->e_shoff = 0; - hdr->e_shentsize = 0; - hdr->e_shnum = 0; - hdr->e_shstrndx = htole16(SHN_UNDEF); + /* Remove sections or use fakes */ + if (fake_sections_size % sizeof(Elf_Shdr)) + fail("vdso_fake_sections size is not a multiple of %ld\n", + (long)sizeof(Elf_Shdr)); + PUT_LE(&hdr->e_shoff, fake_sections_value); + PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0); + PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr)); + PUT_LE(&hdr->e_shstrndx, SHN_UNDEF); if (!name) { fwrite(addr, load_size, 1, outfile); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index f17b29210ac4..ffb101e45731 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1537,7 +1537,10 @@ asmlinkage __visible void __init xen_start_kernel(void) if (!xen_pvh_domain()) pv_cpu_ops = xen_cpu_ops; - x86_init.resources.memory_setup = xen_memory_setup; + if (xen_feature(XENFEAT_auto_translated_physmap)) + x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; + else + x86_init.resources.memory_setup = xen_memory_setup; x86_init.oem.arch_setup = xen_arch_setup; x86_init.oem.banner = xen_banner; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 821a11ada590..2e555163c2fe 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -27,7 +27,6 @@ #include <xen/interface/memory.h> #include <xen/interface/physdev.h> #include <xen/features.h> -#include "mmu.h" #include "xen-ops.h" #include "vdso.h" @@ -82,9 +81,6 @@ static void __init xen_add_extra_mem(u64 start, u64 size) memblock_reserve(start, size); - if (xen_feature(XENFEAT_auto_translated_physmap)) - return; - xen_max_p2m_pfn = PFN_DOWN(start + size); for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -107,7 +103,6 @@ static unsigned long __init xen_do_chunk(unsigned long start, .domid = DOMID_SELF }; unsigned long len = 0; - int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap); unsigned long pfn; int ret; @@ -121,7 +116,7 @@ static unsigned long __init xen_do_chunk(unsigned long start, continue; frame = mfn; } else { - if (!xlated_phys && mfn != INVALID_P2M_ENTRY) + if (mfn != INVALID_P2M_ENTRY) continue; frame = pfn; } @@ -159,13 +154,6 @@ static unsigned long __init xen_do_chunk(unsigned long start, static unsigned long __init xen_release_chunk(unsigned long start, unsigned long end) { - /* - * Xen already ballooned out the E820 non RAM regions for us - * and set them up properly in EPT. - */ - if (xen_feature(XENFEAT_auto_translated_physmap)) - return end - start; - return xen_do_chunk(start, end, true); } @@ -234,13 +222,7 @@ static void __init xen_set_identity_and_release_chunk( * (except for the ISA region which must be 1:1 mapped) to * release the refcounts (in Xen) on the original frames. */ - - /* - * PVH E820 matches the hypervisor's P2M which means we need to - * account for the proper values of *release and *identity. - */ - for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) && - pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) { + for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) { pte_t pte = __pte_ma(0); if (pfn < PFN_UP(ISA_END_ADDRESS)) @@ -518,6 +500,35 @@ char * __init xen_memory_setup(void) } /* + * Machine specific memory setup for auto-translated guests. + */ +char * __init xen_auto_xlated_memory_setup(void) +{ + static struct e820entry map[E820MAX] __initdata; + + struct xen_memory_map memmap; + int i; + int rc; + + memmap.nr_entries = E820MAX; + set_xen_guest_handle(memmap.buffer, map); + + rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); + if (rc < 0) + panic("No memory map (%d)\n", rc); + + sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries); + + for (i = 0; i < memmap.nr_entries; i++) + e820_add_region(map[i].addr, map[i].size, map[i].type); + + memblock_reserve(__pa(xen_start_info->mfn_list), + xen_start_info->pt_base - xen_start_info->mfn_list); + + return "Xen"; +} + +/* * Set the bit indicating "nosegneg" library variants should be used. * We only need to bother in pure 32-bit mode; compat 32-bit processes * can have un-truncated segments, so wrapping around is allowed. @@ -590,13 +601,7 @@ void xen_enable_syscall(void) } #endif /* CONFIG_X86_64 */ } -void xen_enable_nmi(void) -{ -#ifdef CONFIG_X86_64 - if (register_callback(CALLBACKTYPE_nmi, (char *)nmi)) - BUG(); -#endif -} + void __init xen_pvmmu_arch_setup(void) { HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); @@ -611,7 +616,6 @@ void __init xen_pvmmu_arch_setup(void) xen_enable_sysenter(); xen_enable_syscall(); - xen_enable_nmi(); } /* This function is not called for HVM domains */ diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index c834d4b231f0..97d87659f779 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -36,6 +36,7 @@ void xen_mm_unpin_all(void); void xen_set_pat(u64); char * __init xen_memory_setup(void); +char * xen_auto_xlated_memory_setup(void); void __init xen_arch_setup(void); void xen_enable_sysenter(void); void xen_enable_syscall(void); |