Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * Some of this is documented in Documentation/x86/entry_64.txt
13 *
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
16 *
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
19 *
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers up to R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
25 *
26 * Some macro usage:
27 * - CFI macros are used to generate dwarf2 unwind information for better
28 * backtraces. They don't change any code.
29 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
30 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
31 * There are unfortunately lots of special cases where some registers
32 * not touched. The macro is a big mess that should be cleaned up.
33 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
34 * Gives a full stack frame.
35 * - ENTRY/END Define functions in the symbol table.
36 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
37 * frame that is otherwise undefined after a SYSCALL
38 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
39 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 */
41
42 #include <linux/linkage.h>
43 #include <asm/segment.h>
44 #include <asm/cache.h>
45 #include <asm/errno.h>
46 #include <asm/dwarf2.h>
47 #include <asm/calling.h>
48 #include <asm/asm-offsets.h>
49 #include <asm/msr.h>
50 #include <asm/unistd.h>
51 #include <asm/thread_info.h>
52 #include <asm/hw_irq.h>
53 #include <asm/page_types.h>
54 #include <asm/irqflags.h>
55 #include <asm/paravirt.h>
56 #include <asm/ftrace.h>
57 #include <asm/percpu.h>
58 #include <asm/asm.h>
59 #include <asm/rcu.h>
60 #include <asm/smap.h>
61 #include <linux/err.h>
62
63 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
64 #include <linux/elf-em.h>
65 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
66 #define __AUDIT_ARCH_64BIT 0x80000000
67 #define __AUDIT_ARCH_LE 0x40000000
68
69 .code64
70 .section .entry.text, "ax"
71
72 #ifdef CONFIG_FUNCTION_TRACER
73
74 #ifdef CC_USING_FENTRY
75 # define function_hook __fentry__
76 #else
77 # define function_hook mcount
78 #endif
79
80 #ifdef CONFIG_DYNAMIC_FTRACE
81
82 ENTRY(function_hook)
83 retq
84 END(function_hook)
85
86 /* skip is set if stack has been adjusted */
87 .macro ftrace_caller_setup skip=0
88 MCOUNT_SAVE_FRAME \skip
89
90 /* Load the ftrace_ops into the 3rd parameter */
91 leaq function_trace_op, %rdx
92
93 /* Load ip into the first parameter */
94 movq RIP(%rsp), %rdi
95 subq $MCOUNT_INSN_SIZE, %rdi
96 /* Load the parent_ip into the second parameter */
97 #ifdef CC_USING_FENTRY
98 movq SS+16(%rsp), %rsi
99 #else
100 movq 8(%rbp), %rsi
101 #endif
102 .endm
103
104 ENTRY(ftrace_caller)
105 /* Check if tracing was disabled (quick check) */
106 cmpl $0, function_trace_stop
107 jne ftrace_stub
108
109 ftrace_caller_setup
110 /* regs go into 4th parameter (but make it NULL) */
111 movq $0, %rcx
112
113 GLOBAL(ftrace_call)
114 call ftrace_stub
115
116 MCOUNT_RESTORE_FRAME
117 ftrace_return:
118
119 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
120 GLOBAL(ftrace_graph_call)
121 jmp ftrace_stub
122 #endif
123
124 GLOBAL(ftrace_stub)
125 retq
126 END(ftrace_caller)
127
128 ENTRY(ftrace_regs_caller)
129 /* Save the current flags before compare (in SS location)*/
130 pushfq
131
132 /* Check if tracing was disabled (quick check) */
133 cmpl $0, function_trace_stop
134 jne ftrace_restore_flags
135
136 /* skip=8 to skip flags saved in SS */
137 ftrace_caller_setup 8
138
139 /* Save the rest of pt_regs */
140 movq %r15, R15(%rsp)
141 movq %r14, R14(%rsp)
142 movq %r13, R13(%rsp)
143 movq %r12, R12(%rsp)
144 movq %r11, R11(%rsp)
145 movq %r10, R10(%rsp)
146 movq %rbp, RBP(%rsp)
147 movq %rbx, RBX(%rsp)
148 /* Copy saved flags */
149 movq SS(%rsp), %rcx
150 movq %rcx, EFLAGS(%rsp)
151 /* Kernel segments */
152 movq $__KERNEL_DS, %rcx
153 movq %rcx, SS(%rsp)
154 movq $__KERNEL_CS, %rcx
155 movq %rcx, CS(%rsp)
156 /* Stack - skipping return address */
157 leaq SS+16(%rsp), %rcx
158 movq %rcx, RSP(%rsp)
159
160 /* regs go into 4th parameter */
161 leaq (%rsp), %rcx
162
163 GLOBAL(ftrace_regs_call)
164 call ftrace_stub
165
166 /* Copy flags back to SS, to restore them */
167 movq EFLAGS(%rsp), %rax
168 movq %rax, SS(%rsp)
169
170 /* Handlers can change the RIP */
171 movq RIP(%rsp), %rax
172 movq %rax, SS+8(%rsp)
173
174 /* restore the rest of pt_regs */
175 movq R15(%rsp), %r15
176 movq R14(%rsp), %r14
177 movq R13(%rsp), %r13
178 movq R12(%rsp), %r12
179 movq R10(%rsp), %r10
180 movq RBP(%rsp), %rbp
181 movq RBX(%rsp), %rbx
182
183 /* skip=8 to skip flags saved in SS */
184 MCOUNT_RESTORE_FRAME 8
185
186 /* Restore flags */
187 popfq
188
189 jmp ftrace_return
190 ftrace_restore_flags:
191 popfq
192 jmp ftrace_stub
193
194 END(ftrace_regs_caller)
195
196
197 #else /* ! CONFIG_DYNAMIC_FTRACE */
198
199 ENTRY(function_hook)
200 cmpl $0, function_trace_stop
201 jne ftrace_stub
202
203 cmpq $ftrace_stub, ftrace_trace_function
204 jnz trace
205
206 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
207 cmpq $ftrace_stub, ftrace_graph_return
208 jnz ftrace_graph_caller
209
210 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
211 jnz ftrace_graph_caller
212 #endif
213
214 GLOBAL(ftrace_stub)
215 retq
216
217 trace:
218 MCOUNT_SAVE_FRAME
219
220 movq RIP(%rsp), %rdi
221 #ifdef CC_USING_FENTRY
222 movq SS+16(%rsp), %rsi
223 #else
224 movq 8(%rbp), %rsi
225 #endif
226 subq $MCOUNT_INSN_SIZE, %rdi
227
228 call *ftrace_trace_function
229
230 MCOUNT_RESTORE_FRAME
231
232 jmp ftrace_stub
233 END(function_hook)
234 #endif /* CONFIG_DYNAMIC_FTRACE */
235 #endif /* CONFIG_FUNCTION_TRACER */
236
237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
238 ENTRY(ftrace_graph_caller)
239 MCOUNT_SAVE_FRAME
240
241 #ifdef CC_USING_FENTRY
242 leaq SS+16(%rsp), %rdi
243 movq $0, %rdx /* No framepointers needed */
244 #else
245 leaq 8(%rbp), %rdi
246 movq (%rbp), %rdx
247 #endif
248 movq RIP(%rsp), %rsi
249 subq $MCOUNT_INSN_SIZE, %rsi
250
251 call prepare_ftrace_return
252
253 MCOUNT_RESTORE_FRAME
254
255 retq
256 END(ftrace_graph_caller)
257
258 GLOBAL(return_to_handler)
259 subq $24, %rsp
260
261 /* Save the return values */
262 movq %rax, (%rsp)
263 movq %rdx, 8(%rsp)
264 movq %rbp, %rdi
265
266 call ftrace_return_to_handler
267
268 movq %rax, %rdi
269 movq 8(%rsp), %rdx
270 movq (%rsp), %rax
271 addq $24, %rsp
272 jmp *%rdi
273 #endif
274
275
276 #ifndef CONFIG_PREEMPT
277 #define retint_kernel retint_restore_args
278 #endif
279
280 #ifdef CONFIG_PARAVIRT
281 ENTRY(native_usergs_sysret64)
282 swapgs
283 sysretq
284 ENDPROC(native_usergs_sysret64)
285 #endif /* CONFIG_PARAVIRT */
286
287
288 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
289 #ifdef CONFIG_TRACE_IRQFLAGS
290 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
291 jnc 1f
292 TRACE_IRQS_ON
293 1:
294 #endif
295 .endm
296
297 /*
298 * When dynamic function tracer is enabled it will add a breakpoint
299 * to all locations that it is about to modify, sync CPUs, update
300 * all the code, sync CPUs, then remove the breakpoints. In this time
301 * if lockdep is enabled, it might jump back into the debug handler
302 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
303 *
304 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
305 * make sure the stack pointer does not get reset back to the top
306 * of the debug stack, and instead just reuses the current stack.
307 */
308 #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
309
310 .macro TRACE_IRQS_OFF_DEBUG
311 call debug_stack_set_zero
312 TRACE_IRQS_OFF
313 call debug_stack_reset
314 .endm
315
316 .macro TRACE_IRQS_ON_DEBUG
317 call debug_stack_set_zero
318 TRACE_IRQS_ON
319 call debug_stack_reset
320 .endm
321
322 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
323 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
324 jnc 1f
325 TRACE_IRQS_ON_DEBUG
326 1:
327 .endm
328
329 #else
330 # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
331 # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
332 # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
333 #endif
334
335 /*
336 * C code is not supposed to know about undefined top of stack. Every time
337 * a C function with an pt_regs argument is called from the SYSCALL based
338 * fast path FIXUP_TOP_OF_STACK is needed.
339 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
340 * manipulation.
341 */
342
343 /* %rsp:at FRAMEEND */
344 .macro FIXUP_TOP_OF_STACK tmp offset=0
345 movq PER_CPU_VAR(old_rsp),\tmp
346 movq \tmp,RSP+\offset(%rsp)
347 movq $__USER_DS,SS+\offset(%rsp)
348 movq $__USER_CS,CS+\offset(%rsp)
349 movq $-1,RCX+\offset(%rsp)
350 movq R11+\offset(%rsp),\tmp /* get eflags */
351 movq \tmp,EFLAGS+\offset(%rsp)
352 .endm
353
354 .macro RESTORE_TOP_OF_STACK tmp offset=0
355 movq RSP+\offset(%rsp),\tmp
356 movq \tmp,PER_CPU_VAR(old_rsp)
357 movq EFLAGS+\offset(%rsp),\tmp
358 movq \tmp,R11+\offset(%rsp)
359 .endm
360
361 .macro FAKE_STACK_FRAME child_rip
362 /* push in order ss, rsp, eflags, cs, rip */
363 xorl %eax, %eax
364 pushq_cfi $__KERNEL_DS /* ss */
365 /*CFI_REL_OFFSET ss,0*/
366 pushq_cfi %rax /* rsp */
367 CFI_REL_OFFSET rsp,0
368 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */
369 /*CFI_REL_OFFSET rflags,0*/
370 pushq_cfi $__KERNEL_CS /* cs */
371 /*CFI_REL_OFFSET cs,0*/
372 pushq_cfi \child_rip /* rip */
373 CFI_REL_OFFSET rip,0
374 pushq_cfi %rax /* orig rax */
375 .endm
376
377 .macro UNFAKE_STACK_FRAME
378 addq $8*6, %rsp
379 CFI_ADJUST_CFA_OFFSET -(6*8)
380 .endm
381
382 /*
383 * initial frame state for interrupts (and exceptions without error code)
384 */
385 .macro EMPTY_FRAME start=1 offset=0
386 .if \start
387 CFI_STARTPROC simple
388 CFI_SIGNAL_FRAME
389 CFI_DEF_CFA rsp,8+\offset
390 .else
391 CFI_DEF_CFA_OFFSET 8+\offset
392 .endif
393 .endm
394
395 /*
396 * initial frame state for interrupts (and exceptions without error code)
397 */
398 .macro INTR_FRAME start=1 offset=0
399 EMPTY_FRAME \start, SS+8+\offset-RIP
400 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
401 CFI_REL_OFFSET rsp, RSP+\offset-RIP
402 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
403 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
404 CFI_REL_OFFSET rip, RIP+\offset-RIP
405 .endm
406
407 /*
408 * initial frame state for exceptions with error code (and interrupts
409 * with vector already pushed)
410 */
411 .macro XCPT_FRAME start=1 offset=0
412 INTR_FRAME \start, RIP+\offset-ORIG_RAX
413 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
414 .endm
415
416 /*
417 * frame that enables calling into C.
418 */
419 .macro PARTIAL_FRAME start=1 offset=0
420 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
421 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
422 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
423 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
424 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
425 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
426 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
427 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
428 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
429 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
430 .endm
431
432 /*
433 * frame that enables passing a complete pt_regs to a C function.
434 */
435 .macro DEFAULT_FRAME start=1 offset=0
436 PARTIAL_FRAME \start, R11+\offset-R15
437 CFI_REL_OFFSET rbx, RBX+\offset
438 CFI_REL_OFFSET rbp, RBP+\offset
439 CFI_REL_OFFSET r12, R12+\offset
440 CFI_REL_OFFSET r13, R13+\offset
441 CFI_REL_OFFSET r14, R14+\offset
442 CFI_REL_OFFSET r15, R15+\offset
443 .endm
444
445 /* save partial stack frame */
446 .macro SAVE_ARGS_IRQ
447 cld
448 /* start from rbp in pt_regs and jump over */
449 movq_cfi rdi, (RDI-RBP)
450 movq_cfi rsi, (RSI-RBP)
451 movq_cfi rdx, (RDX-RBP)
452 movq_cfi rcx, (RCX-RBP)
453 movq_cfi rax, (RAX-RBP)
454 movq_cfi r8, (R8-RBP)
455 movq_cfi r9, (R9-RBP)
456 movq_cfi r10, (R10-RBP)
457 movq_cfi r11, (R11-RBP)
458
459 /* Save rbp so that we can unwind from get_irq_regs() */
460 movq_cfi rbp, 0
461
462 /* Save previous stack value */
463 movq %rsp, %rsi
464
465 leaq -RBP(%rsp),%rdi /* arg1 for handler */
466 testl $3, CS-RBP(%rsi)
467 je 1f
468 SWAPGS
469 /*
470 * irq_count is used to check if a CPU is already on an interrupt stack
471 * or not. While this is essentially redundant with preempt_count it is
472 * a little cheaper to use a separate counter in the PDA (short of
473 * moving irq_enter into assembly, which would be too much work)
474 */
475 1: incl PER_CPU_VAR(irq_count)
476 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
477 CFI_DEF_CFA_REGISTER rsi
478
479 /* Store previous stack value */
480 pushq %rsi
481 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
482 0x77 /* DW_OP_breg7 */, 0, \
483 0x06 /* DW_OP_deref */, \
484 0x08 /* DW_OP_const1u */, SS+8-RBP, \
485 0x22 /* DW_OP_plus */
486 /* We entered an interrupt context - irqs are off: */
487 TRACE_IRQS_OFF
488 .endm
489
490 ENTRY(save_rest)
491 PARTIAL_FRAME 1 (REST_SKIP+8)
492 movq 5*8+16(%rsp), %r11 /* save return address */
493 movq_cfi rbx, RBX+16
494 movq_cfi rbp, RBP+16
495 movq_cfi r12, R12+16
496 movq_cfi r13, R13+16
497 movq_cfi r14, R14+16
498 movq_cfi r15, R15+16
499 movq %r11, 8(%rsp) /* return address */
500 FIXUP_TOP_OF_STACK %r11, 16
501 ret
502 CFI_ENDPROC
503 END(save_rest)
504
505 /* save complete stack frame */
506 .pushsection .kprobes.text, "ax"
507 ENTRY(save_paranoid)
508 XCPT_FRAME 1 RDI+8
509 cld
510 movq_cfi rdi, RDI+8
511 movq_cfi rsi, RSI+8
512 movq_cfi rdx, RDX+8
513 movq_cfi rcx, RCX+8
514 movq_cfi rax, RAX+8
515 movq_cfi r8, R8+8
516 movq_cfi r9, R9+8
517 movq_cfi r10, R10+8
518 movq_cfi r11, R11+8
519 movq_cfi rbx, RBX+8
520 movq_cfi rbp, RBP+8
521 movq_cfi r12, R12+8
522 movq_cfi r13, R13+8
523 movq_cfi r14, R14+8
524 movq_cfi r15, R15+8
525 movl $1,%ebx
526 movl $MSR_GS_BASE,%ecx
527 rdmsr
528 testl %edx,%edx
529 js 1f /* negative -> in kernel */
530 SWAPGS
531 xorl %ebx,%ebx
532 1: ret
533 CFI_ENDPROC
534 END(save_paranoid)
535 .popsection
536
537 /*
538 * A newly forked process directly context switches into this address.
539 *
540 * rdi: prev task we switched from
541 */
542 ENTRY(ret_from_fork)
543 DEFAULT_FRAME
544
545 LOCK ; btr $TIF_FORK,TI_flags(%r8)
546
547 pushq_cfi $0x0002
548 popfq_cfi # reset kernel eflags
549
550 call schedule_tail # rdi: 'prev' task parameter
551
552 GET_THREAD_INFO(%rcx)
553
554 RESTORE_REST
555
556 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
557 jz 1f
558
559 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
560 jnz int_ret_from_sys_call
561
562 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
563 jmp ret_from_sys_call # go to the SYSRET fastpath
564
565 1:
566 subq $REST_SKIP, %rsp # leave space for volatiles
567 CFI_ADJUST_CFA_OFFSET REST_SKIP
568 movq %rbp, %rdi
569 call *%rbx
570 movl $0, RAX(%rsp)
571 RESTORE_REST
572 jmp int_ret_from_sys_call
573 CFI_ENDPROC
574 END(ret_from_fork)
575
576 /*
577 * System call entry. Up to 6 arguments in registers are supported.
578 *
579 * SYSCALL does not save anything on the stack and does not change the
580 * stack pointer. However, it does mask the flags register for us, so
581 * CLD and CLAC are not needed.
582 */
583
584 /*
585 * Register setup:
586 * rax system call number
587 * rdi arg0
588 * rcx return address for syscall/sysret, C arg3
589 * rsi arg1
590 * rdx arg2
591 * r10 arg3 (--> moved to rcx for C)
592 * r8 arg4
593 * r9 arg5
594 * r11 eflags for syscall/sysret, temporary for C
595 * r12-r15,rbp,rbx saved by C code, not touched.
596 *
597 * Interrupts are off on entry.
598 * Only called from user space.
599 *
600 * XXX if we had a free scratch register we could save the RSP into the stack frame
601 * and report it properly in ps. Unfortunately we haven't.
602 *
603 * When user can change the frames always force IRET. That is because
604 * it deals with uncanonical addresses better. SYSRET has trouble
605 * with them due to bugs in both AMD and Intel CPUs.
606 */
607
608 ENTRY(system_call)
609 CFI_STARTPROC simple
610 CFI_SIGNAL_FRAME
611 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
612 CFI_REGISTER rip,rcx
613 /*CFI_REGISTER rflags,r11*/
614 SWAPGS_UNSAFE_STACK
615 /*
616 * A hypervisor implementation might want to use a label
617 * after the swapgs, so that it can do the swapgs
618 * for the guest and jump here on syscall.
619 */
620 GLOBAL(system_call_after_swapgs)
621
622 movq %rsp,PER_CPU_VAR(old_rsp)
623 movq PER_CPU_VAR(kernel_stack),%rsp
624 /*
625 * No need to follow this irqs off/on section - it's straight
626 * and short:
627 */
628 ENABLE_INTERRUPTS(CLBR_NONE)
629 SAVE_ARGS 8,0
630 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
631 movq %rcx,RIP-ARGOFFSET(%rsp)
632 CFI_REL_OFFSET rip,RIP-ARGOFFSET
633 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
634 jnz tracesys
635 system_call_fastpath:
636 #if __SYSCALL_MASK == ~0
637 cmpq $__NR_syscall_max,%rax
638 #else
639 andl $__SYSCALL_MASK,%eax
640 cmpl $__NR_syscall_max,%eax
641 #endif
642 ja badsys
643 movq %r10,%rcx
644 call *sys_call_table(,%rax,8) # XXX: rip relative
645 movq %rax,RAX-ARGOFFSET(%rsp)
646 /*
647 * Syscall return path ending with SYSRET (fast path)
648 * Has incomplete stack frame and undefined top of stack.
649 */
650 ret_from_sys_call:
651 movl $_TIF_ALLWORK_MASK,%edi
652 /* edi: flagmask */
653 sysret_check:
654 LOCKDEP_SYS_EXIT
655 DISABLE_INTERRUPTS(CLBR_NONE)
656 TRACE_IRQS_OFF
657 movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
658 andl %edi,%edx
659 jnz sysret_careful
660 CFI_REMEMBER_STATE
661 /*
662 * sysretq will re-enable interrupts:
663 */
664 TRACE_IRQS_ON
665 movq RIP-ARGOFFSET(%rsp),%rcx
666 CFI_REGISTER rip,rcx
667 RESTORE_ARGS 1,-ARG_SKIP,0
668 /*CFI_REGISTER rflags,r11*/
669 movq PER_CPU_VAR(old_rsp), %rsp
670 USERGS_SYSRET64
671
672 CFI_RESTORE_STATE
673 /* Handle reschedules */
674 /* edx: work, edi: workmask */
675 sysret_careful:
676 bt $TIF_NEED_RESCHED,%edx
677 jnc sysret_signal
678 TRACE_IRQS_ON
679 ENABLE_INTERRUPTS(CLBR_NONE)
680 pushq_cfi %rdi
681 SCHEDULE_USER
682 popq_cfi %rdi
683 jmp sysret_check
684
685 /* Handle a signal */
686 sysret_signal:
687 TRACE_IRQS_ON
688 ENABLE_INTERRUPTS(CLBR_NONE)
689 #ifdef CONFIG_AUDITSYSCALL
690 bt $TIF_SYSCALL_AUDIT,%edx
691 jc sysret_audit
692 #endif
693 /*
694 * We have a signal, or exit tracing or single-step.
695 * These all wind up with the iret return path anyway,
696 * so just join that path right now.
697 */
698 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
699 jmp int_check_syscall_exit_work
700
701 badsys:
702 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
703 jmp ret_from_sys_call
704
705 #ifdef CONFIG_AUDITSYSCALL
706 /*
707 * Fast path for syscall audit without full syscall trace.
708 * We just call __audit_syscall_entry() directly, and then
709 * jump back to the normal fast path.
710 */
711 auditsys:
712 movq %r10,%r9 /* 6th arg: 4th syscall arg */
713 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
714 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
715 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
716 movq %rax,%rsi /* 2nd arg: syscall number */
717 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
718 call __audit_syscall_entry
719 LOAD_ARGS 0 /* reload call-clobbered registers */
720 jmp system_call_fastpath
721
722 /*
723 * Return fast path for syscall audit. Call __audit_syscall_exit()
724 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
725 * masked off.
726 */
727 sysret_audit:
728 movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
729 cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
730 setbe %al /* 1 if so, 0 if not */
731 movzbl %al,%edi /* zero-extend that into %edi */
732 call __audit_syscall_exit
733 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
734 jmp sysret_check
735 #endif /* CONFIG_AUDITSYSCALL */
736
737 /* Do syscall tracing */
738 tracesys:
739 #ifdef CONFIG_AUDITSYSCALL
740 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
741 jz auditsys
742 #endif
743 SAVE_REST
744 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
745 FIXUP_TOP_OF_STACK %rdi
746 movq %rsp,%rdi
747 call syscall_trace_enter
748 /*
749 * Reload arg registers from stack in case ptrace changed them.
750 * We don't reload %rax because syscall_trace_enter() returned
751 * the value it wants us to use in the table lookup.
752 */
753 LOAD_ARGS ARGOFFSET, 1
754 RESTORE_REST
755 #if __SYSCALL_MASK == ~0
756 cmpq $__NR_syscall_max,%rax
757 #else
758 andl $__SYSCALL_MASK,%eax
759 cmpl $__NR_syscall_max,%eax
760 #endif
761 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
762 movq %r10,%rcx /* fixup for C */
763 call *sys_call_table(,%rax,8)
764 movq %rax,RAX-ARGOFFSET(%rsp)
765 /* Use IRET because user could have changed frame */
766
767 /*
768 * Syscall return path ending with IRET.
769 * Has correct top of stack, but partial stack frame.
770 */
771 GLOBAL(int_ret_from_sys_call)
772 DISABLE_INTERRUPTS(CLBR_NONE)
773 TRACE_IRQS_OFF
774 movl $_TIF_ALLWORK_MASK,%edi
775 /* edi: mask to check */
776 GLOBAL(int_with_check)
777 LOCKDEP_SYS_EXIT_IRQ
778 GET_THREAD_INFO(%rcx)
779 movl TI_flags(%rcx),%edx
780 andl %edi,%edx
781 jnz int_careful
782 andl $~TS_COMPAT,TI_status(%rcx)
783 jmp retint_swapgs
784
785 /* Either reschedule or signal or syscall exit tracking needed. */
786 /* First do a reschedule test. */
787 /* edx: work, edi: workmask */
788 int_careful:
789 bt $TIF_NEED_RESCHED,%edx
790 jnc int_very_careful
791 TRACE_IRQS_ON
792 ENABLE_INTERRUPTS(CLBR_NONE)
793 pushq_cfi %rdi
794 SCHEDULE_USER
795 popq_cfi %rdi
796 DISABLE_INTERRUPTS(CLBR_NONE)
797 TRACE_IRQS_OFF
798 jmp int_with_check
799
800 /* handle signals and tracing -- both require a full stack frame */
801 int_very_careful:
802 TRACE_IRQS_ON
803 ENABLE_INTERRUPTS(CLBR_NONE)
804 int_check_syscall_exit_work:
805 SAVE_REST
806 /* Check for syscall exit trace */
807 testl $_TIF_WORK_SYSCALL_EXIT,%edx
808 jz int_signal
809 pushq_cfi %rdi
810 leaq 8(%rsp),%rdi # &ptregs -> arg1
811 call syscall_trace_leave
812 popq_cfi %rdi
813 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
814 jmp int_restore_rest
815
816 int_signal:
817 testl $_TIF_DO_NOTIFY_MASK,%edx
818 jz 1f
819 movq %rsp,%rdi # &ptregs -> arg1
820 xorl %esi,%esi # oldset -> arg2
821 call do_notify_resume
822 1: movl $_TIF_WORK_MASK,%edi
823 int_restore_rest:
824 RESTORE_REST
825 DISABLE_INTERRUPTS(CLBR_NONE)
826 TRACE_IRQS_OFF
827 jmp int_with_check
828 CFI_ENDPROC
829 END(system_call)
830
831 /*
832 * Certain special system calls that need to save a complete full stack frame.
833 */
834 .macro PTREGSCALL label,func,arg
835 ENTRY(\label)
836 PARTIAL_FRAME 1 8 /* offset 8: return address */
837 subq $REST_SKIP, %rsp
838 CFI_ADJUST_CFA_OFFSET REST_SKIP
839 call save_rest
840 DEFAULT_FRAME 0 8 /* offset 8: return address */
841 leaq 8(%rsp), \arg /* pt_regs pointer */
842 call \func
843 jmp ptregscall_common
844 CFI_ENDPROC
845 END(\label)
846 .endm
847
848 PTREGSCALL stub_clone, sys_clone, %r8
849 PTREGSCALL stub_fork, sys_fork, %rdi
850 PTREGSCALL stub_vfork, sys_vfork, %rdi
851 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
852 PTREGSCALL stub_iopl, sys_iopl, %rsi
853
854 ENTRY(ptregscall_common)
855 DEFAULT_FRAME 1 8 /* offset 8: return address */
856 RESTORE_TOP_OF_STACK %r11, 8
857 movq_cfi_restore R15+8, r15
858 movq_cfi_restore R14+8, r14
859 movq_cfi_restore R13+8, r13
860 movq_cfi_restore R12+8, r12
861 movq_cfi_restore RBP+8, rbp
862 movq_cfi_restore RBX+8, rbx
863 ret $REST_SKIP /* pop extended registers */
864 CFI_ENDPROC
865 END(ptregscall_common)
866
867 ENTRY(stub_execve)
868 CFI_STARTPROC
869 addq $8, %rsp
870 PARTIAL_FRAME 0
871 SAVE_REST
872 FIXUP_TOP_OF_STACK %r11
873 call sys_execve
874 RESTORE_TOP_OF_STACK %r11
875 movq %rax,RAX(%rsp)
876 RESTORE_REST
877 jmp int_ret_from_sys_call
878 CFI_ENDPROC
879 END(stub_execve)
880
881 /*
882 * sigreturn is special because it needs to restore all registers on return.
883 * This cannot be done with SYSRET, so use the IRET return path instead.
884 */
885 ENTRY(stub_rt_sigreturn)
886 CFI_STARTPROC
887 addq $8, %rsp
888 PARTIAL_FRAME 0
889 SAVE_REST
890 movq %rsp,%rdi
891 FIXUP_TOP_OF_STACK %r11
892 call sys_rt_sigreturn
893 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
894 RESTORE_REST
895 jmp int_ret_from_sys_call
896 CFI_ENDPROC
897 END(stub_rt_sigreturn)
898
899 #ifdef CONFIG_X86_X32_ABI
900 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
901
902 ENTRY(stub_x32_rt_sigreturn)
903 CFI_STARTPROC
904 addq $8, %rsp
905 PARTIAL_FRAME 0
906 SAVE_REST
907 movq %rsp,%rdi
908 FIXUP_TOP_OF_STACK %r11
909 call sys32_x32_rt_sigreturn
910 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
911 RESTORE_REST
912 jmp int_ret_from_sys_call
913 CFI_ENDPROC
914 END(stub_x32_rt_sigreturn)
915
916 ENTRY(stub_x32_execve)
917 CFI_STARTPROC
918 addq $8, %rsp
919 PARTIAL_FRAME 0
920 SAVE_REST
921 FIXUP_TOP_OF_STACK %r11
922 call compat_sys_execve
923 RESTORE_TOP_OF_STACK %r11
924 movq %rax,RAX(%rsp)
925 RESTORE_REST
926 jmp int_ret_from_sys_call
927 CFI_ENDPROC
928 END(stub_x32_execve)
929
930 #endif
931
932 /*
933 * Build the entry stubs and pointer table with some assembler magic.
934 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
935 * single cache line on all modern x86 implementations.
936 */
937 .section .init.rodata,"a"
938 ENTRY(interrupt)
939 .section .entry.text
940 .p2align 5
941 .p2align CONFIG_X86_L1_CACHE_SHIFT
942 ENTRY(irq_entries_start)
943 INTR_FRAME
944 vector=FIRST_EXTERNAL_VECTOR
945 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
946 .balign 32
947 .rept 7
948 .if vector < NR_VECTORS
949 .if vector <> FIRST_EXTERNAL_VECTOR
950 CFI_ADJUST_CFA_OFFSET -8
951 .endif
952 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
953 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
954 jmp 2f
955 .endif
956 .previous
957 .quad 1b
958 .section .entry.text
959 vector=vector+1
960 .endif
961 .endr
962 2: jmp common_interrupt
963 .endr
964 CFI_ENDPROC
965 END(irq_entries_start)
966
967 .previous
968 END(interrupt)
969 .previous
970
971 /*
972 * Interrupt entry/exit.
973 *
974 * Interrupt entry points save only callee clobbered registers in fast path.
975 *
976 * Entry runs with interrupts off.
977 */
978
979 /* 0(%rsp): ~(interrupt number) */
980 .macro interrupt func
981 /* reserve pt_regs for scratch regs and rbp */
982 subq $ORIG_RAX-RBP, %rsp
983 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
984 SAVE_ARGS_IRQ
985 call \func
986 .endm
987
988 /*
989 * Interrupt entry/exit should be protected against kprobes
990 */
991 .pushsection .kprobes.text, "ax"
992 /*
993 * The interrupt stubs push (~vector+0x80) onto the stack and
994 * then jump to common_interrupt.
995 */
996 .p2align CONFIG_X86_L1_CACHE_SHIFT
997 common_interrupt:
998 XCPT_FRAME
999 ASM_CLAC
1000 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1001 interrupt do_IRQ
1002 /* 0(%rsp): old_rsp-ARGOFFSET */
1003 ret_from_intr:
1004 DISABLE_INTERRUPTS(CLBR_NONE)
1005 TRACE_IRQS_OFF
1006 decl PER_CPU_VAR(irq_count)
1007
1008 /* Restore saved previous stack */
1009 popq %rsi
1010 CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
1011 leaq ARGOFFSET-RBP(%rsi), %rsp
1012 CFI_DEF_CFA_REGISTER rsp
1013 CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
1014
1015 exit_intr:
1016 GET_THREAD_INFO(%rcx)
1017 testl $3,CS-ARGOFFSET(%rsp)
1018 je retint_kernel
1019
1020 /* Interrupt came from user space */
1021 /*
1022 * Has a correct top of stack, but a partial stack frame
1023 * %rcx: thread info. Interrupts off.
1024 */
1025 retint_with_reschedule:
1026 movl $_TIF_WORK_MASK,%edi
1027 retint_check:
1028 LOCKDEP_SYS_EXIT_IRQ
1029 movl TI_flags(%rcx),%edx
1030 andl %edi,%edx
1031 CFI_REMEMBER_STATE
1032 jnz retint_careful
1033
1034 retint_swapgs: /* return to user-space */
1035 /*
1036 * The iretq could re-enable interrupts:
1037 */
1038 DISABLE_INTERRUPTS(CLBR_ANY)
1039 TRACE_IRQS_IRETQ
1040 SWAPGS
1041 jmp restore_args
1042
1043 retint_restore_args: /* return to kernel space */
1044 DISABLE_INTERRUPTS(CLBR_ANY)
1045 /*
1046 * The iretq could re-enable interrupts:
1047 */
1048 TRACE_IRQS_IRETQ
1049 restore_args:
1050 RESTORE_ARGS 1,8,1
1051
1052 irq_return:
1053 INTERRUPT_RETURN
1054 _ASM_EXTABLE(irq_return, bad_iret)
1055
1056 #ifdef CONFIG_PARAVIRT
1057 ENTRY(native_iret)
1058 iretq
1059 _ASM_EXTABLE(native_iret, bad_iret)
1060 #endif
1061
1062 .section .fixup,"ax"
1063 bad_iret:
1064 /*
1065 * The iret traps when the %cs or %ss being restored is bogus.
1066 * We've lost the original trap vector and error code.
1067 * #GPF is the most likely one to get for an invalid selector.
1068 * So pretend we completed the iret and took the #GPF in user mode.
1069 *
1070 * We are now running with the kernel GS after exception recovery.
1071 * But error_entry expects us to have user GS to match the user %cs,
1072 * so swap back.
1073 */
1074 pushq $0
1075
1076 SWAPGS
1077 jmp general_protection
1078
1079 .previous
1080
1081 /* edi: workmask, edx: work */
1082 retint_careful:
1083 CFI_RESTORE_STATE
1084 bt $TIF_NEED_RESCHED,%edx
1085 jnc retint_signal
1086 TRACE_IRQS_ON
1087 ENABLE_INTERRUPTS(CLBR_NONE)
1088 pushq_cfi %rdi
1089 SCHEDULE_USER
1090 popq_cfi %rdi
1091 GET_THREAD_INFO(%rcx)
1092 DISABLE_INTERRUPTS(CLBR_NONE)
1093 TRACE_IRQS_OFF
1094 jmp retint_check
1095
1096 retint_signal:
1097 testl $_TIF_DO_NOTIFY_MASK,%edx
1098 jz retint_swapgs
1099 TRACE_IRQS_ON
1100 ENABLE_INTERRUPTS(CLBR_NONE)
1101 SAVE_REST
1102 movq $-1,ORIG_RAX(%rsp)
1103 xorl %esi,%esi # oldset
1104 movq %rsp,%rdi # &pt_regs
1105 call do_notify_resume
1106 RESTORE_REST
1107 DISABLE_INTERRUPTS(CLBR_NONE)
1108 TRACE_IRQS_OFF
1109 GET_THREAD_INFO(%rcx)
1110 jmp retint_with_reschedule
1111
1112 #ifdef CONFIG_PREEMPT
1113 /* Returning to kernel space. Check if we need preemption */
1114 /* rcx: threadinfo. interrupts off. */
1115 ENTRY(retint_kernel)
1116 cmpl $0,TI_preempt_count(%rcx)
1117 jnz retint_restore_args
1118 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1119 jnc retint_restore_args
1120 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
1121 jnc retint_restore_args
1122 call preempt_schedule_irq
1123 jmp exit_intr
1124 #endif
1125
1126 CFI_ENDPROC
1127 END(common_interrupt)
1128 /*
1129 * End of kprobes section
1130 */
1131 .popsection
1132
1133 /*
1134 * APIC interrupts.
1135 */
1136 .macro apicinterrupt num sym do_sym
1137 ENTRY(\sym)
1138 INTR_FRAME
1139 ASM_CLAC
1140 pushq_cfi $~(\num)
1141 .Lcommon_\sym:
1142 interrupt \do_sym
1143 jmp ret_from_intr
1144 CFI_ENDPROC
1145 END(\sym)
1146 .endm
1147
1148 #ifdef CONFIG_SMP
1149 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
1150 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
1151 apicinterrupt REBOOT_VECTOR \
1152 reboot_interrupt smp_reboot_interrupt
1153 #endif
1154
1155 #ifdef CONFIG_X86_UV
1156 apicinterrupt UV_BAU_MESSAGE \
1157 uv_bau_message_intr1 uv_bau_message_interrupt
1158 #endif
1159 apicinterrupt LOCAL_TIMER_VECTOR \
1160 apic_timer_interrupt smp_apic_timer_interrupt
1161 apicinterrupt X86_PLATFORM_IPI_VECTOR \
1162 x86_platform_ipi smp_x86_platform_ipi
1163
1164 apicinterrupt THRESHOLD_APIC_VECTOR \
1165 threshold_interrupt smp_threshold_interrupt
1166 apicinterrupt THERMAL_APIC_VECTOR \
1167 thermal_interrupt smp_thermal_interrupt
1168
1169 #ifdef CONFIG_SMP
1170 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1171 call_function_single_interrupt smp_call_function_single_interrupt
1172 apicinterrupt CALL_FUNCTION_VECTOR \
1173 call_function_interrupt smp_call_function_interrupt
1174 apicinterrupt RESCHEDULE_VECTOR \
1175 reschedule_interrupt smp_reschedule_interrupt
1176 #endif
1177
1178 apicinterrupt ERROR_APIC_VECTOR \
1179 error_interrupt smp_error_interrupt
1180 apicinterrupt SPURIOUS_APIC_VECTOR \
1181 spurious_interrupt smp_spurious_interrupt
1182
1183 #ifdef CONFIG_IRQ_WORK
1184 apicinterrupt IRQ_WORK_VECTOR \
1185 irq_work_interrupt smp_irq_work_interrupt
1186 #endif
1187
1188 /*
1189 * Exception entry points.
1190 */
1191 .macro zeroentry sym do_sym
1192 ENTRY(\sym)
1193 INTR_FRAME
1194 ASM_CLAC
1195 PARAVIRT_ADJUST_EXCEPTION_FRAME
1196 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1197 subq $ORIG_RAX-R15, %rsp
1198 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1199 call error_entry
1200 DEFAULT_FRAME 0
1201 movq %rsp,%rdi /* pt_regs pointer */
1202 xorl %esi,%esi /* no error code */
1203 call \do_sym
1204 jmp error_exit /* %ebx: no swapgs flag */
1205 CFI_ENDPROC
1206 END(\sym)
1207 .endm
1208
1209 .macro paranoidzeroentry sym do_sym
1210 ENTRY(\sym)
1211 INTR_FRAME
1212 ASM_CLAC
1213 PARAVIRT_ADJUST_EXCEPTION_FRAME
1214 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1215 subq $ORIG_RAX-R15, %rsp
1216 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1217 call save_paranoid
1218 TRACE_IRQS_OFF
1219 movq %rsp,%rdi /* pt_regs pointer */
1220 xorl %esi,%esi /* no error code */
1221 call \do_sym
1222 jmp paranoid_exit /* %ebx: no swapgs flag */
1223 CFI_ENDPROC
1224 END(\sym)
1225 .endm
1226
1227 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1228 .macro paranoidzeroentry_ist sym do_sym ist
1229 ENTRY(\sym)
1230 INTR_FRAME
1231 ASM_CLAC
1232 PARAVIRT_ADJUST_EXCEPTION_FRAME
1233 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1234 subq $ORIG_RAX-R15, %rsp
1235 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1236 call save_paranoid
1237 TRACE_IRQS_OFF_DEBUG
1238 movq %rsp,%rdi /* pt_regs pointer */
1239 xorl %esi,%esi /* no error code */
1240 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1241 call \do_sym
1242 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1243 jmp paranoid_exit /* %ebx: no swapgs flag */
1244 CFI_ENDPROC
1245 END(\sym)
1246 .endm
1247
1248 .macro errorentry sym do_sym
1249 ENTRY(\sym)
1250 XCPT_FRAME
1251 ASM_CLAC
1252 PARAVIRT_ADJUST_EXCEPTION_FRAME
1253 subq $ORIG_RAX-R15, %rsp
1254 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1255 call error_entry
1256 DEFAULT_FRAME 0
1257 movq %rsp,%rdi /* pt_regs pointer */
1258 movq ORIG_RAX(%rsp),%rsi /* get error code */
1259 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1260 call \do_sym
1261 jmp error_exit /* %ebx: no swapgs flag */
1262 CFI_ENDPROC
1263 END(\sym)
1264 .endm
1265
1266 /* error code is on the stack already */
1267 .macro paranoiderrorentry sym do_sym
1268 ENTRY(\sym)
1269 XCPT_FRAME
1270 ASM_CLAC
1271 PARAVIRT_ADJUST_EXCEPTION_FRAME
1272 subq $ORIG_RAX-R15, %rsp
1273 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1274 call save_paranoid
1275 DEFAULT_FRAME 0
1276 TRACE_IRQS_OFF
1277 movq %rsp,%rdi /* pt_regs pointer */
1278 movq ORIG_RAX(%rsp),%rsi /* get error code */
1279 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1280 call \do_sym
1281 jmp paranoid_exit /* %ebx: no swapgs flag */
1282 CFI_ENDPROC
1283 END(\sym)
1284 .endm
1285
1286 zeroentry divide_error do_divide_error
1287 zeroentry overflow do_overflow
1288 zeroentry bounds do_bounds
1289 zeroentry invalid_op do_invalid_op
1290 zeroentry device_not_available do_device_not_available
1291 paranoiderrorentry double_fault do_double_fault
1292 zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1293 errorentry invalid_TSS do_invalid_TSS
1294 errorentry segment_not_present do_segment_not_present
1295 zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1296 zeroentry coprocessor_error do_coprocessor_error
1297 errorentry alignment_check do_alignment_check
1298 zeroentry simd_coprocessor_error do_simd_coprocessor_error
1299
1300
1301 /* Reload gs selector with exception handling */
1302 /* edi: new selector */
1303 ENTRY(native_load_gs_index)
1304 CFI_STARTPROC
1305 pushfq_cfi
1306 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1307 SWAPGS
1308 gs_change:
1309 movl %edi,%gs
1310 2: mfence /* workaround */
1311 SWAPGS
1312 popfq_cfi
1313 ret
1314 CFI_ENDPROC
1315 END(native_load_gs_index)
1316
1317 _ASM_EXTABLE(gs_change,bad_gs)
1318 .section .fixup,"ax"
1319 /* running with kernelgs */
1320 bad_gs:
1321 SWAPGS /* switch back to user gs */
1322 xorl %eax,%eax
1323 movl %eax,%gs
1324 jmp 2b
1325 .previous
1326
1327 /* Call softirq on interrupt stack. Interrupts are off. */
1328 ENTRY(call_softirq)
1329 CFI_STARTPROC
1330 pushq_cfi %rbp
1331 CFI_REL_OFFSET rbp,0
1332 mov %rsp,%rbp
1333 CFI_DEF_CFA_REGISTER rbp
1334 incl PER_CPU_VAR(irq_count)
1335 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1336 push %rbp # backlink for old unwinder
1337 call __do_softirq
1338 leaveq
1339 CFI_RESTORE rbp
1340 CFI_DEF_CFA_REGISTER rsp
1341 CFI_ADJUST_CFA_OFFSET -8
1342 decl PER_CPU_VAR(irq_count)
1343 ret
1344 CFI_ENDPROC
1345 END(call_softirq)
1346
1347 #ifdef CONFIG_XEN
1348 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1349
1350 /*
1351 * A note on the "critical region" in our callback handler.
1352 * We want to avoid stacking callback handlers due to events occurring
1353 * during handling of the last event. To do this, we keep events disabled
1354 * until we've done all processing. HOWEVER, we must enable events before
1355 * popping the stack frame (can't be done atomically) and so it would still
1356 * be possible to get enough handler activations to overflow the stack.
1357 * Although unlikely, bugs of that kind are hard to track down, so we'd
1358 * like to avoid the possibility.
1359 * So, on entry to the handler we detect whether we interrupted an
1360 * existing activation in its critical region -- if so, we pop the current
1361 * activation and restart the handler using the previous one.
1362 */
1363 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1364 CFI_STARTPROC
1365 /*
1366 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1367 * see the correct pointer to the pt_regs
1368 */
1369 movq %rdi, %rsp # we don't return, adjust the stack frame
1370 CFI_ENDPROC
1371 DEFAULT_FRAME
1372 11: incl PER_CPU_VAR(irq_count)
1373 movq %rsp,%rbp
1374 CFI_DEF_CFA_REGISTER rbp
1375 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1376 pushq %rbp # backlink for old unwinder
1377 call xen_evtchn_do_upcall
1378 popq %rsp
1379 CFI_DEF_CFA_REGISTER rsp
1380 decl PER_CPU_VAR(irq_count)
1381 jmp error_exit
1382 CFI_ENDPROC
1383 END(xen_do_hypervisor_callback)
1384
1385 /*
1386 * Hypervisor uses this for application faults while it executes.
1387 * We get here for two reasons:
1388 * 1. Fault while reloading DS, ES, FS or GS
1389 * 2. Fault while executing IRET
1390 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1391 * registers that could be reloaded and zeroed the others.
1392 * Category 2 we fix up by killing the current process. We cannot use the
1393 * normal Linux return path in this case because if we use the IRET hypercall
1394 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1395 * We distinguish between categories by comparing each saved segment register
1396 * with its current contents: any discrepancy means we in category 1.
1397 */
1398 ENTRY(xen_failsafe_callback)
1399 INTR_FRAME 1 (6*8)
1400 /*CFI_REL_OFFSET gs,GS*/
1401 /*CFI_REL_OFFSET fs,FS*/
1402 /*CFI_REL_OFFSET es,ES*/
1403 /*CFI_REL_OFFSET ds,DS*/
1404 CFI_REL_OFFSET r11,8
1405 CFI_REL_OFFSET rcx,0
1406 movw %ds,%cx
1407 cmpw %cx,0x10(%rsp)
1408 CFI_REMEMBER_STATE
1409 jne 1f
1410 movw %es,%cx
1411 cmpw %cx,0x18(%rsp)
1412 jne 1f
1413 movw %fs,%cx
1414 cmpw %cx,0x20(%rsp)
1415 jne 1f
1416 movw %gs,%cx
1417 cmpw %cx,0x28(%rsp)
1418 jne 1f
1419 /* All segments match their saved values => Category 2 (Bad IRET). */
1420 movq (%rsp),%rcx
1421 CFI_RESTORE rcx
1422 movq 8(%rsp),%r11
1423 CFI_RESTORE r11
1424 addq $0x30,%rsp
1425 CFI_ADJUST_CFA_OFFSET -0x30
1426 pushq_cfi $0 /* RIP */
1427 pushq_cfi %r11
1428 pushq_cfi %rcx
1429 jmp general_protection
1430 CFI_RESTORE_STATE
1431 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1432 movq (%rsp),%rcx
1433 CFI_RESTORE rcx
1434 movq 8(%rsp),%r11
1435 CFI_RESTORE r11
1436 addq $0x30,%rsp
1437 CFI_ADJUST_CFA_OFFSET -0x30
1438 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1439 SAVE_ALL
1440 jmp error_exit
1441 CFI_ENDPROC
1442 END(xen_failsafe_callback)
1443
1444 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
1445 xen_hvm_callback_vector xen_evtchn_do_upcall
1446
1447 #endif /* CONFIG_XEN */
1448
1449 /*
1450 * Some functions should be protected against kprobes
1451 */
1452 .pushsection .kprobes.text, "ax"
1453
1454 paranoidzeroentry_ist debug do_debug DEBUG_STACK
1455 paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1456 paranoiderrorentry stack_segment do_stack_segment
1457 #ifdef CONFIG_XEN
1458 zeroentry xen_debug do_debug
1459 zeroentry xen_int3 do_int3
1460 errorentry xen_stack_segment do_stack_segment
1461 #endif
1462 errorentry general_protection do_general_protection
1463 errorentry page_fault do_page_fault
1464 #ifdef CONFIG_KVM_GUEST
1465 errorentry async_page_fault do_async_page_fault
1466 #endif
1467 #ifdef CONFIG_X86_MCE
1468 paranoidzeroentry machine_check *machine_check_vector(%rip)
1469 #endif
1470
1471 /*
1472 * "Paranoid" exit path from exception stack.
1473 * Paranoid because this is used by NMIs and cannot take
1474 * any kernel state for granted.
1475 * We don't do kernel preemption checks here, because only
1476 * NMI should be common and it does not enable IRQs and
1477 * cannot get reschedule ticks.
1478 *
1479 * "trace" is 0 for the NMI handler only, because irq-tracing
1480 * is fundamentally NMI-unsafe. (we cannot change the soft and
1481 * hard flags at once, atomically)
1482 */
1483
1484 /* ebx: no swapgs flag */
1485 ENTRY(paranoid_exit)
1486 DEFAULT_FRAME
1487 DISABLE_INTERRUPTS(CLBR_NONE)
1488 TRACE_IRQS_OFF_DEBUG
1489 testl %ebx,%ebx /* swapgs needed? */
1490 jnz paranoid_restore
1491 testl $3,CS(%rsp)
1492 jnz paranoid_userspace
1493 paranoid_swapgs:
1494 TRACE_IRQS_IRETQ 0
1495 SWAPGS_UNSAFE_STACK
1496 RESTORE_ALL 8
1497 jmp irq_return
1498 paranoid_restore:
1499 TRACE_IRQS_IRETQ_DEBUG 0
1500 RESTORE_ALL 8
1501 jmp irq_return
1502 paranoid_userspace:
1503 GET_THREAD_INFO(%rcx)
1504 movl TI_flags(%rcx),%ebx
1505 andl $_TIF_WORK_MASK,%ebx
1506 jz paranoid_swapgs
1507 movq %rsp,%rdi /* &pt_regs */
1508 call sync_regs
1509 movq %rax,%rsp /* switch stack for scheduling */
1510 testl $_TIF_NEED_RESCHED,%ebx
1511 jnz paranoid_schedule
1512 movl %ebx,%edx /* arg3: thread flags */
1513 TRACE_IRQS_ON
1514 ENABLE_INTERRUPTS(CLBR_NONE)
1515 xorl %esi,%esi /* arg2: oldset */
1516 movq %rsp,%rdi /* arg1: &pt_regs */
1517 call do_notify_resume
1518 DISABLE_INTERRUPTS(CLBR_NONE)
1519 TRACE_IRQS_OFF
1520 jmp paranoid_userspace
1521 paranoid_schedule:
1522 TRACE_IRQS_ON
1523 ENABLE_INTERRUPTS(CLBR_ANY)
1524 SCHEDULE_USER
1525 DISABLE_INTERRUPTS(CLBR_ANY)
1526 TRACE_IRQS_OFF
1527 jmp paranoid_userspace
1528 CFI_ENDPROC
1529 END(paranoid_exit)
1530
1531 /*
1532 * Exception entry point. This expects an error code/orig_rax on the stack.
1533 * returns in "no swapgs flag" in %ebx.
1534 */
1535 ENTRY(error_entry)
1536 XCPT_FRAME
1537 CFI_ADJUST_CFA_OFFSET 15*8
1538 /* oldrax contains error code */
1539 cld
1540 movq_cfi rdi, RDI+8
1541 movq_cfi rsi, RSI+8
1542 movq_cfi rdx, RDX+8
1543 movq_cfi rcx, RCX+8
1544 movq_cfi rax, RAX+8
1545 movq_cfi r8, R8+8
1546 movq_cfi r9, R9+8
1547 movq_cfi r10, R10+8
1548 movq_cfi r11, R11+8
1549 movq_cfi rbx, RBX+8
1550 movq_cfi rbp, RBP+8
1551 movq_cfi r12, R12+8
1552 movq_cfi r13, R13+8
1553 movq_cfi r14, R14+8
1554 movq_cfi r15, R15+8
1555 xorl %ebx,%ebx
1556 testl $3,CS+8(%rsp)
1557 je error_kernelspace
1558 error_swapgs:
1559 SWAPGS
1560 error_sti:
1561 TRACE_IRQS_OFF
1562 ret
1563
1564 /*
1565 * There are two places in the kernel that can potentially fault with
1566 * usergs. Handle them here. The exception handlers after iret run with
1567 * kernel gs again, so don't set the user space flag. B stepping K8s
1568 * sometimes report an truncated RIP for IRET exceptions returning to
1569 * compat mode. Check for these here too.
1570 */
1571 error_kernelspace:
1572 incl %ebx
1573 leaq irq_return(%rip),%rcx
1574 cmpq %rcx,RIP+8(%rsp)
1575 je error_swapgs
1576 movl %ecx,%eax /* zero extend */
1577 cmpq %rax,RIP+8(%rsp)
1578 je bstep_iret
1579 cmpq $gs_change,RIP+8(%rsp)
1580 je error_swapgs
1581 jmp error_sti
1582
1583 bstep_iret:
1584 /* Fix truncated RIP */
1585 movq %rcx,RIP+8(%rsp)
1586 jmp error_swapgs
1587 CFI_ENDPROC
1588 END(error_entry)
1589
1590
1591 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1592 ENTRY(error_exit)
1593 DEFAULT_FRAME
1594 movl %ebx,%eax
1595 RESTORE_REST
1596 DISABLE_INTERRUPTS(CLBR_NONE)
1597 TRACE_IRQS_OFF
1598 GET_THREAD_INFO(%rcx)
1599 testl %eax,%eax
1600 jne retint_kernel
1601 LOCKDEP_SYS_EXIT_IRQ
1602 movl TI_flags(%rcx),%edx
1603 movl $_TIF_WORK_MASK,%edi
1604 andl %edi,%edx
1605 jnz retint_careful
1606 jmp retint_swapgs
1607 CFI_ENDPROC
1608 END(error_exit)
1609
1610 /*
1611 * Test if a given stack is an NMI stack or not.
1612 */
1613 .macro test_in_nmi reg stack nmi_ret normal_ret
1614 cmpq %\reg, \stack
1615 ja \normal_ret
1616 subq $EXCEPTION_STKSZ, %\reg
1617 cmpq %\reg, \stack
1618 jb \normal_ret
1619 jmp \nmi_ret
1620 .endm
1621
1622 /* runs on exception stack */
1623 ENTRY(nmi)
1624 INTR_FRAME
1625 PARAVIRT_ADJUST_EXCEPTION_FRAME
1626 /*
1627 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1628 * the iretq it performs will take us out of NMI context.
1629 * This means that we can have nested NMIs where the next
1630 * NMI is using the top of the stack of the previous NMI. We
1631 * can't let it execute because the nested NMI will corrupt the
1632 * stack of the previous NMI. NMI handlers are not re-entrant
1633 * anyway.
1634 *
1635 * To handle this case we do the following:
1636 * Check the a special location on the stack that contains
1637 * a variable that is set when NMIs are executing.
1638 * The interrupted task's stack is also checked to see if it
1639 * is an NMI stack.
1640 * If the variable is not set and the stack is not the NMI
1641 * stack then:
1642 * o Set the special variable on the stack
1643 * o Copy the interrupt frame into a "saved" location on the stack
1644 * o Copy the interrupt frame into a "copy" location on the stack
1645 * o Continue processing the NMI
1646 * If the variable is set or the previous stack is the NMI stack:
1647 * o Modify the "copy" location to jump to the repeate_nmi
1648 * o return back to the first NMI
1649 *
1650 * Now on exit of the first NMI, we first clear the stack variable
1651 * The NMI stack will tell any nested NMIs at that point that it is
1652 * nested. Then we pop the stack normally with iret, and if there was
1653 * a nested NMI that updated the copy interrupt stack frame, a
1654 * jump will be made to the repeat_nmi code that will handle the second
1655 * NMI.
1656 */
1657
1658 /* Use %rdx as out temp variable throughout */
1659 pushq_cfi %rdx
1660 CFI_REL_OFFSET rdx, 0
1661
1662 /*
1663 * If %cs was not the kernel segment, then the NMI triggered in user
1664 * space, which means it is definitely not nested.
1665 */
1666 cmpl $__KERNEL_CS, 16(%rsp)
1667 jne first_nmi
1668
1669 /*
1670 * Check the special variable on the stack to see if NMIs are
1671 * executing.
1672 */
1673 cmpl $1, -8(%rsp)
1674 je nested_nmi
1675
1676 /*
1677 * Now test if the previous stack was an NMI stack.
1678 * We need the double check. We check the NMI stack to satisfy the
1679 * race when the first NMI clears the variable before returning.
1680 * We check the variable because the first NMI could be in a
1681 * breakpoint routine using a breakpoint stack.
1682 */
1683 lea 6*8(%rsp), %rdx
1684 test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
1685 CFI_REMEMBER_STATE
1686
1687 nested_nmi:
1688 /*
1689 * Do nothing if we interrupted the fixup in repeat_nmi.
1690 * It's about to repeat the NMI handler, so we are fine
1691 * with ignoring this one.
1692 */
1693 movq $repeat_nmi, %rdx
1694 cmpq 8(%rsp), %rdx
1695 ja 1f
1696 movq $end_repeat_nmi, %rdx
1697 cmpq 8(%rsp), %rdx
1698 ja nested_nmi_out
1699
1700 1:
1701 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
1702 leaq -6*8(%rsp), %rdx
1703 movq %rdx, %rsp
1704 CFI_ADJUST_CFA_OFFSET 6*8
1705 pushq_cfi $__KERNEL_DS
1706 pushq_cfi %rdx
1707 pushfq_cfi
1708 pushq_cfi $__KERNEL_CS
1709 pushq_cfi $repeat_nmi
1710
1711 /* Put stack back */
1712 addq $(11*8), %rsp
1713 CFI_ADJUST_CFA_OFFSET -11*8
1714
1715 nested_nmi_out:
1716 popq_cfi %rdx
1717 CFI_RESTORE rdx
1718
1719 /* No need to check faults here */
1720 INTERRUPT_RETURN
1721
1722 CFI_RESTORE_STATE
1723 first_nmi:
1724 /*
1725 * Because nested NMIs will use the pushed location that we
1726 * stored in rdx, we must keep that space available.
1727 * Here's what our stack frame will look like:
1728 * +-------------------------+
1729 * | original SS |
1730 * | original Return RSP |
1731 * | original RFLAGS |
1732 * | original CS |
1733 * | original RIP |
1734 * +-------------------------+
1735 * | temp storage for rdx |
1736 * +-------------------------+
1737 * | NMI executing variable |
1738 * +-------------------------+
1739 * | Saved SS |
1740 * | Saved Return RSP |
1741 * | Saved RFLAGS |
1742 * | Saved CS |
1743 * | Saved RIP |
1744 * +-------------------------+
1745 * | copied SS |
1746 * | copied Return RSP |
1747 * | copied RFLAGS |
1748 * | copied CS |
1749 * | copied RIP |
1750 * +-------------------------+
1751 * | pt_regs |
1752 * +-------------------------+
1753 *
1754 * The saved stack frame is used to fix up the copied stack frame
1755 * that a nested NMI may change to make the interrupted NMI iret jump
1756 * to the repeat_nmi. The original stack frame and the temp storage
1757 * is also used by nested NMIs and can not be trusted on exit.
1758 */
1759 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1760 movq (%rsp), %rdx
1761 CFI_RESTORE rdx
1762
1763 /* Set the NMI executing variable on the stack. */
1764 pushq_cfi $1
1765
1766 /* Copy the stack frame to the Saved frame */
1767 .rept 5
1768 pushq_cfi 6*8(%rsp)
1769 .endr
1770 CFI_DEF_CFA_OFFSET SS+8-RIP
1771
1772 /* Everything up to here is safe from nested NMIs */
1773
1774 /*
1775 * If there was a nested NMI, the first NMI's iret will return
1776 * here. But NMIs are still enabled and we can take another
1777 * nested NMI. The nested NMI checks the interrupted RIP to see
1778 * if it is between repeat_nmi and end_repeat_nmi, and if so
1779 * it will just return, as we are about to repeat an NMI anyway.
1780 * This makes it safe to copy to the stack frame that a nested
1781 * NMI will update.
1782 */
1783 repeat_nmi:
1784 /*
1785 * Update the stack variable to say we are still in NMI (the update
1786 * is benign for the non-repeat case, where 1 was pushed just above
1787 * to this very stack slot).
1788 */
1789 movq $1, 5*8(%rsp)
1790
1791 /* Make another copy, this one may be modified by nested NMIs */
1792 .rept 5
1793 pushq_cfi 4*8(%rsp)
1794 .endr
1795 CFI_DEF_CFA_OFFSET SS+8-RIP
1796 end_repeat_nmi:
1797
1798 /*
1799 * Everything below this point can be preempted by a nested
1800 * NMI if the first NMI took an exception and reset our iret stack
1801 * so that we repeat another NMI.
1802 */
1803 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1804 subq $ORIG_RAX-R15, %rsp
1805 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1806 /*
1807 * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
1808 * as we should not be calling schedule in NMI context.
1809 * Even with normal interrupts enabled. An NMI should not be
1810 * setting NEED_RESCHED or anything that normal interrupts and
1811 * exceptions might do.
1812 */
1813 call save_paranoid
1814 DEFAULT_FRAME 0
1815
1816 /*
1817 * Save off the CR2 register. If we take a page fault in the NMI then
1818 * it could corrupt the CR2 value. If the NMI preempts a page fault
1819 * handler before it was able to read the CR2 register, and then the
1820 * NMI itself takes a page fault, the page fault that was preempted
1821 * will read the information from the NMI page fault and not the
1822 * origin fault. Save it off and restore it if it changes.
1823 * Use the r12 callee-saved register.
1824 */
1825 movq %cr2, %r12
1826
1827 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1828 movq %rsp,%rdi
1829 movq $-1,%rsi
1830 call do_nmi
1831
1832 /* Did the NMI take a page fault? Restore cr2 if it did */
1833 movq %cr2, %rcx
1834 cmpq %rcx, %r12
1835 je 1f
1836 movq %r12, %cr2
1837 1:
1838
1839 testl %ebx,%ebx /* swapgs needed? */
1840 jnz nmi_restore
1841 nmi_swapgs:
1842 SWAPGS_UNSAFE_STACK
1843 nmi_restore:
1844 RESTORE_ALL 8
1845 /* Clear the NMI executing stack variable */
1846 movq $0, 10*8(%rsp)
1847 jmp irq_return
1848 CFI_ENDPROC
1849 END(nmi)
1850
1851 ENTRY(ignore_sysret)
1852 CFI_STARTPROC
1853 mov $-ENOSYS,%eax
1854 sysret
1855 CFI_ENDPROC
1856 END(ignore_sysret)
1857
1858 /*
1859 * End of kprobes section
1860 */
1861 .popsection