diff options
author | Andy Lutomirski <luto@kernel.org> | 2015-10-05 17:48:02 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-10-09 09:41:07 +0200 |
commit | e62a254a1f93fcc7299497a5c7231639400b8c3c (patch) | |
tree | c06f2ea51c67c3052886ea14f2418900a2b0478a /arch/x86/entry | |
parent | 8242c6c84a644e5f0f721e4ae2bd542f640c89f9 (diff) |
x86/entry/64/compat: Disable SYSENTER and SYSCALL32 entries
We've disabled the vDSO helpers to call them, so turn off the
entries entirely (temporarily) in preparation for cleaning them
up.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/8d6e84bf651519289dc532dcc230adfabbd2a3eb.1444091584.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/entry')
-rw-r--r-- | arch/x86/entry/entry_64_compat.S | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index bc678f0c3c91..06a8966415f9 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -103,6 +103,14 @@ ENTRY(entry_SYSENTER_compat) jnz sysenter_fix_flags sysenter_flags_fixed: + /* Temporary: SYSENTER is disabled. */ +#ifdef CONFIG_CONTEXT_TRACKING + call enter_from_user_mode +#endif + ENABLE_INTERRUPTS(CLBR_NONE) + movl $11, %edi + call do_exit + /* * Re-enable interrupts. IRQ tracing already thinks that IRQs are * on (since we treat user mode as having IRQs on), and the @@ -324,6 +332,11 @@ ENTRY(entry_SYSCALL_compat) * it is too small to ever cause noticeable irq latency. */ SWAPGS_UNSAFE_STACK + + /* Temporary: SYSCALL32 is disabled. */ + movl $-ENOSYS, %eax + USERGS_SYSRET32 + movl %esp, %r8d movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp ENABLE_INTERRUPTS(CLBR_NONE) |