diff options
author | Denys Vlasenko <dvlasenk@redhat.com> | 2015-04-07 22:43:42 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-09 10:31:25 +0200 |
commit | 66ad4efa51805964521db03d8aa827a8dd9058b9 (patch) | |
tree | b03e84458cf58a73d73698110f2662ab6ccf9e32 /arch/x86/kernel | |
parent | a30b0085f54efae11f6256df4e4a16af7eefc1c4 (diff) |
x86/asm/entry/64: Simplify jumps in ret_from_fork
Replace
test
jz 1f
jmp label
1:
with
test
jnz label
Run-tested.
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1428439424-7258-6-git-send-email-dvlasenk@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e8ddd5196ce7..a35e5e4435ef 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -608,18 +608,18 @@ ENTRY(ret_from_fork) RESTORE_EXTRA_REGS testl $3,CS(%rsp) # from kernel_thread? - jz 1f /* * By the time we get here, we have no idea whether our pt_regs, * ti flags, and ti status came from the 64-bit SYSCALL fast path, * the slow path, or one of the ia32entry paths. - * Use int_ret_from_sys_call to return, since it can safely handle + * Use IRET code path to return, since it can safely handle * all of the above. */ - jmp int_ret_from_sys_call + jnz int_ret_from_sys_call -1: + /* We came from kernel_thread */ + /* nb: we depend on RESTORE_EXTRA_REGS above */ movq %rbp, %rdi call *%rbx movl $0, RAX(%rsp) |