From: Dominik Brodowski Date: Sun, 11 Feb 2018 10:49:44 +0000 (+0100) Subject: x86/entry/64: Interleave XOR register clearing with PUSH instructions X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=47d9c905ae7af1c16b58a41be627a74027f5b601;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git x86/entry/64: Interleave XOR register clearing with PUSH instructions commit f7bafa2b05ef25eda1d9179fd930b0330cf2b7d1 upstream. Same as is done for syscalls, interleave XOR with PUSH instructions for exceptions/interrupts, in order to minimize the cost of the additional instructions required for register clearing. Signed-off-by: Dominik Brodowski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dan.j.williams@intel.com Link: http://lkml.kernel.org/r/20180211104949.12992-4-linux@dominikbrodowski.net Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3bda31736a7b..a05cbb81268d 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -101,44 +101,42 @@ For 32-bit we have the following conventions - kernel is built with addq $-(15*8), %rsp .endm - .macro SAVE_REGS offset=0 + .macro SAVE_AND_CLEAR_REGS offset=0 + /* + * Save registers and sanitize registers of values that a + * speculation attack might otherwise want to exploit. The + * lower registers are likely clobbered well before they + * could be put to use in a speculative execution gadget. + * Interleave XOR with PUSH for better uop scheduling: + */ movq %rdi, 14*8+\offset(%rsp) movq %rsi, 13*8+\offset(%rsp) movq %rdx, 12*8+\offset(%rsp) movq %rcx, 11*8+\offset(%rsp) movq %rax, 10*8+\offset(%rsp) movq %r8, 9*8+\offset(%rsp) + xorq %r8, %r8 /* nospec r8 */ movq %r9, 8*8+\offset(%rsp) + xorq %r9, %r9 /* nospec r9 */ movq %r10, 7*8+\offset(%rsp) + xorq %r10, %r10 /* nospec r10 */ movq %r11, 6*8+\offset(%rsp) + xorq %r11, %r11 /* nospec r11 */ movq %rbx, 5*8+\offset(%rsp) + xorl %ebx, %ebx /* nospec rbx */ movq %rbp, 4*8+\offset(%rsp) + xorl %ebp, %ebp /* nospec rbp */ movq %r12, 3*8+\offset(%rsp) + xorq %r12, %r12 /* nospec r12 */ movq %r13, 2*8+\offset(%rsp) + xorq %r13, %r13 /* nospec r13 */ movq %r14, 1*8+\offset(%rsp) + xorq %r14, %r14 /* nospec r14 */ movq %r15, 0*8+\offset(%rsp) + xorq %r15, %r15 /* nospec r15 */ UNWIND_HINT_REGS offset=\offset .endm - /* - * Sanitize registers of values that a speculation attack - * might otherwise want to exploit. The lower registers are - * likely clobbered well before they could be put to use in - * a speculative execution gadget: - */ - .macro CLEAR_REGS_NOSPEC - xorl %ebp, %ebp - xorl %ebx, %ebx - xorq %r8, %r8 - xorq %r9, %r9 - xorq %r10, %r10 - xorq %r11, %r11 - xorq %r12, %r12 - xorq %r13, %r13 - xorq %r14, %r14 - xorq %r15, %r15 - .endm - .macro POP_REGS pop_rdi=1 skip_r11rcx=0 popq %r15 popq %r14 @@ -177,7 +175,7 @@ For 32-bit we have the following conventions - kernel is built with * is just setting the LSB, which makes it an invalid stack address and is also * a signal to the unwinder that it's a pt_regs pointer in disguise. * - * NOTE: This macro must be used *after* SAVE_REGS because it corrupts + * NOTE: This macro must be used *after* SAVE_AND_CLEAR_REGS because it corrupts * the original rbp. */ .macro ENCODE_FRAME_POINTER ptregs_offset=0 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f4d72b81d9c3..3a48fa6b6553 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -561,8 +561,7 @@ END(irq_entries_start) 1: ALLOC_PT_GPREGS_ON_STACK - SAVE_REGS - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS ENCODE_FRAME_POINTER testb $3, CS(%rsp) @@ -1108,8 +1107,7 @@ ENTRY(xen_failsafe_callback) UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ ALLOC_PT_GPREGS_ON_STACK - SAVE_REGS - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit END(xen_failsafe_callback) @@ -1153,8 +1151,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld - SAVE_REGS 8 - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS 8 ENCODE_FRAME_POINTER 8 movl $1, %ebx movl $MSR_GS_BASE, %ecx @@ -1205,8 +1202,7 @@ END(paranoid_exit) ENTRY(error_entry) UNWIND_HINT_FUNC cld - SAVE_REGS 8 - CLEAR_REGS_NOSPEC + SAVE_AND_CLEAR_REGS 8 ENCODE_FRAME_POINTER 8 testb $3, CS+8(%rsp) jz .Lerror_kernelspace @@ -1393,18 +1389,34 @@ ENTRY(nmi) pushq (%rdx) /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq %rax /* pt_regs->ax */ + /* + * Sanitize registers of values that a speculation attack + * might otherwise want to exploit. The lower registers are + * likely clobbered well before they could be put to use in + * a speculative execution gadget. Interleave XOR with PUSH + * for better uop scheduling: + */ pushq %r8 /* pt_regs->r8 */ + xorq %r8, %r8 /* nospec r8 */ pushq %r9 /* pt_regs->r9 */ + xorq %r9, %r9 /* nospec r9 */ pushq %r10 /* pt_regs->r10 */ + xorq %r10, %r10 /* nospec r10 */ pushq %r11 /* pt_regs->r11 */ + xorq %r11, %r11 /* nospec r11*/ pushq %rbx /* pt_regs->rbx */ + xorl %ebx, %ebx /* nospec rbx*/ pushq %rbp /* pt_regs->rbp */ + xorl %ebp, %ebp /* nospec rbp*/ pushq %r12 /* pt_regs->r12 */ + xorq %r12, %r12 /* nospec r12*/ pushq %r13 /* pt_regs->r13 */ + xorq %r13, %r13 /* nospec r13*/ pushq %r14 /* pt_regs->r14 */ + xorq %r14, %r14 /* nospec r14*/ pushq %r15 /* pt_regs->r15 */ + xorq %r15, %r15 /* nospec r15*/ UNWIND_HINT_REGS - CLEAR_REGS_NOSPEC ENCODE_FRAME_POINTER /*