x86/entry/64: Merge the fast and slow SYSRET paths
authorAndy Lutomirski <luto@kernel.org>
Thu, 2 Nov 2017 07:59:04 +0000 (00:59 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Dec 2017 13:26:17 +0000 (14:26 +0100)
commit a512210643da8082cb44181dba8b18e752bd68f0 upstream.

They did almost the same thing.  Remove a bunch of pointless
instructions (mostly hidden in macros) and reduce cognitive load by
merging them.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1204e20233fcab9130a1ba80b3b1879b5db3fc1f.1509609304.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/entry/entry_64.S

index 74dd2ec1e4e9fb2a664987d28d2208beca06aa56..e6c75f6ac7e082d6474f6374f5f13288fcc7a73c 100644 (file)
@@ -221,10 +221,9 @@ entry_SYSCALL_64_fastpath:
        TRACE_IRQS_ON           /* user mode is traced as IRQs on */
        movq    RIP(%rsp), %rcx
        movq    EFLAGS(%rsp), %r11
-       RESTORE_C_REGS_EXCEPT_RCX_R11
-       movq    RSP(%rsp), %rsp
+       addq    $6*8, %rsp      /* skip extra regs -- they were preserved */
        UNWIND_HINT_EMPTY
-       USERGS_SYSRET64
+       jmp     .Lpop_c_regs_except_rcx_r11_and_sysret
 
 1:
        /*
@@ -318,6 +317,7 @@ syscall_return_via_sysret:
        /* rcx and r11 are already restored (see code above) */
        UNWIND_HINT_EMPTY
        POP_EXTRA_REGS
+.Lpop_c_regs_except_rcx_r11_and_sysret:
        popq    %rsi    /* skip r11 */
        popq    %r10
        popq    %r9