x86: remove conflicting nx6325 and nx6125 quirks
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
1da177e4
LT
54
55 .code64
56
dc37db4d 57#ifndef CONFIG_PREEMPT
1da177e4
LT
58#define retint_kernel retint_restore_args
59#endif
2601e64d 60
72fe4858 61#ifdef CONFIG_PARAVIRT
2be29982 62ENTRY(native_usergs_sysret64)
72fe4858
GOC
63 swapgs
64 sysretq
65#endif /* CONFIG_PARAVIRT */
66
2601e64d
IM
67
68.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
69#ifdef CONFIG_TRACE_IRQFLAGS
70 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
71 jnc 1f
72 TRACE_IRQS_ON
731:
74#endif
75.endm
76
1da177e4
LT
77/*
78 * C code is not supposed to know about undefined top of stack. Every time
79 * a C function with an pt_regs argument is called from the SYSCALL based
80 * fast path FIXUP_TOP_OF_STACK is needed.
81 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
82 * manipulation.
83 */
84
85 /* %rsp:at FRAMEEND */
86 .macro FIXUP_TOP_OF_STACK tmp
87 movq %gs:pda_oldrsp,\tmp
88 movq \tmp,RSP(%rsp)
89 movq $__USER_DS,SS(%rsp)
90 movq $__USER_CS,CS(%rsp)
91 movq $-1,RCX(%rsp)
92 movq R11(%rsp),\tmp /* get eflags */
93 movq \tmp,EFLAGS(%rsp)
94 .endm
95
96 .macro RESTORE_TOP_OF_STACK tmp,offset=0
97 movq RSP-\offset(%rsp),\tmp
98 movq \tmp,%gs:pda_oldrsp
99 movq EFLAGS-\offset(%rsp),\tmp
100 movq \tmp,R11-\offset(%rsp)
101 .endm
102
103 .macro FAKE_STACK_FRAME child_rip
104 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 105 xorl %eax, %eax
e04e0a63 106 pushq $__KERNEL_DS /* ss */
1da177e4 107 CFI_ADJUST_CFA_OFFSET 8
7effaa88 108 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
109 pushq %rax /* rsp */
110 CFI_ADJUST_CFA_OFFSET 8
7effaa88 111 CFI_REL_OFFSET rsp,0
1da177e4
LT
112 pushq $(1<<9) /* eflags - interrupts on */
113 CFI_ADJUST_CFA_OFFSET 8
7effaa88 114 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
115 pushq $__KERNEL_CS /* cs */
116 CFI_ADJUST_CFA_OFFSET 8
7effaa88 117 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
118 pushq \child_rip /* rip */
119 CFI_ADJUST_CFA_OFFSET 8
7effaa88 120 CFI_REL_OFFSET rip,0
1da177e4
LT
121 pushq %rax /* orig rax */
122 CFI_ADJUST_CFA_OFFSET 8
123 .endm
124
125 .macro UNFAKE_STACK_FRAME
126 addq $8*6, %rsp
127 CFI_ADJUST_CFA_OFFSET -(6*8)
128 .endm
129
7effaa88
JB
130 .macro CFI_DEFAULT_STACK start=1
131 .if \start
132 CFI_STARTPROC simple
adf14236 133 CFI_SIGNAL_FRAME
7effaa88
JB
134 CFI_DEF_CFA rsp,SS+8
135 .else
136 CFI_DEF_CFA_OFFSET SS+8
137 .endif
138 CFI_REL_OFFSET r15,R15
139 CFI_REL_OFFSET r14,R14
140 CFI_REL_OFFSET r13,R13
141 CFI_REL_OFFSET r12,R12
142 CFI_REL_OFFSET rbp,RBP
143 CFI_REL_OFFSET rbx,RBX
144 CFI_REL_OFFSET r11,R11
145 CFI_REL_OFFSET r10,R10
146 CFI_REL_OFFSET r9,R9
147 CFI_REL_OFFSET r8,R8
148 CFI_REL_OFFSET rax,RAX
149 CFI_REL_OFFSET rcx,RCX
150 CFI_REL_OFFSET rdx,RDX
151 CFI_REL_OFFSET rsi,RSI
152 CFI_REL_OFFSET rdi,RDI
153 CFI_REL_OFFSET rip,RIP
154 /*CFI_REL_OFFSET cs,CS*/
155 /*CFI_REL_OFFSET rflags,EFLAGS*/
156 CFI_REL_OFFSET rsp,RSP
157 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
158 .endm
159/*
160 * A newly forked process directly context switches into this.
161 */
162/* rdi: prev */
163ENTRY(ret_from_fork)
1da177e4 164 CFI_DEFAULT_STACK
658fdbef
AK
165 push kernel_eflags(%rip)
166 CFI_ADJUST_CFA_OFFSET 4
167 popf # reset kernel eflags
168 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
169 call schedule_tail
170 GET_THREAD_INFO(%rcx)
26ccb8a7 171 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4
LT
172 jnz rff_trace
173rff_action:
174 RESTORE_REST
175 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
176 je int_ret_from_sys_call
26ccb8a7 177 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4
LT
178 jnz int_ret_from_sys_call
179 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
180 jmp ret_from_sys_call
181rff_trace:
182 movq %rsp,%rdi
183 call syscall_trace_leave
184 GET_THREAD_INFO(%rcx)
185 jmp rff_action
186 CFI_ENDPROC
4b787e0b 187END(ret_from_fork)
1da177e4
LT
188
189/*
190 * System call entry. Upto 6 arguments in registers are supported.
191 *
192 * SYSCALL does not save anything on the stack and does not change the
193 * stack pointer.
194 */
195
196/*
197 * Register setup:
198 * rax system call number
199 * rdi arg0
200 * rcx return address for syscall/sysret, C arg3
201 * rsi arg1
202 * rdx arg2
203 * r10 arg3 (--> moved to rcx for C)
204 * r8 arg4
205 * r9 arg5
206 * r11 eflags for syscall/sysret, temporary for C
207 * r12-r15,rbp,rbx saved by C code, not touched.
208 *
209 * Interrupts are off on entry.
210 * Only called from user space.
211 *
212 * XXX if we had a free scratch register we could save the RSP into the stack frame
213 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
214 *
215 * When user can change the frames always force IRET. That is because
216 * it deals with uncanonical addresses better. SYSRET has trouble
217 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
218 */
219
220ENTRY(system_call)
7effaa88 221 CFI_STARTPROC simple
adf14236 222 CFI_SIGNAL_FRAME
dffead4e 223 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
224 CFI_REGISTER rip,rcx
225 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
226 SWAPGS_UNSAFE_STACK
227 /*
228 * A hypervisor implementation might want to use a label
229 * after the swapgs, so that it can do the swapgs
230 * for the guest and jump here on syscall.
231 */
232ENTRY(system_call_after_swapgs)
233
1da177e4
LT
234 movq %rsp,%gs:pda_oldrsp
235 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
236 /*
237 * No need to follow this irqs off/on section - it's straight
238 * and short:
239 */
72fe4858 240 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
241 SAVE_ARGS 8,1
242 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
243 movq %rcx,RIP-ARGOFFSET(%rsp)
244 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 245 GET_THREAD_INFO(%rcx)
26ccb8a7
GC
246 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
247 TI_flags(%rcx)
1da177e4
LT
248 jnz tracesys
249 cmpq $__NR_syscall_max,%rax
250 ja badsys
251 movq %r10,%rcx
252 call *sys_call_table(,%rax,8) # XXX: rip relative
253 movq %rax,RAX-ARGOFFSET(%rsp)
254/*
255 * Syscall return path ending with SYSRET (fast path)
256 * Has incomplete stack frame and undefined top of stack.
257 */
1da177e4 258ret_from_sys_call:
11b854b2 259 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
260 /* edi: flagmask */
261sysret_check:
10cd706d 262 LOCKDEP_SYS_EXIT
1da177e4 263 GET_THREAD_INFO(%rcx)
72fe4858 264 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 265 TRACE_IRQS_OFF
26ccb8a7 266 movl TI_flags(%rcx),%edx
1da177e4
LT
267 andl %edi,%edx
268 jnz sysret_careful
bcddc015 269 CFI_REMEMBER_STATE
2601e64d
IM
270 /*
271 * sysretq will re-enable interrupts:
272 */
273 TRACE_IRQS_ON
1da177e4 274 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 275 CFI_REGISTER rip,rcx
1da177e4 276 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 277 /*CFI_REGISTER rflags,r11*/
c7245da6 278 movq %gs:pda_oldrsp, %rsp
2be29982 279 USERGS_SYSRET64
1da177e4 280
bcddc015 281 CFI_RESTORE_STATE
1da177e4
LT
282 /* Handle reschedules */
283 /* edx: work, edi: workmask */
284sysret_careful:
285 bt $TIF_NEED_RESCHED,%edx
286 jnc sysret_signal
2601e64d 287 TRACE_IRQS_ON
72fe4858 288 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 289 pushq %rdi
7effaa88 290 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
291 call schedule
292 popq %rdi
7effaa88 293 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
294 jmp sysret_check
295
296 /* Handle a signal */
297sysret_signal:
2601e64d 298 TRACE_IRQS_ON
72fe4858 299 ENABLE_INTERRUPTS(CLBR_NONE)
8f4d37ec 300 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8
AK
301 jz 1f
302
303 /* Really a signal */
304 /* edx: work flags (arg3) */
1da177e4
LT
305 leaq do_notify_resume(%rip),%rax
306 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
307 xorl %esi,%esi # oldset -> arg2
308 call ptregscall_common
10ffdbb8 3091: movl $_TIF_NEED_RESCHED,%edi
7bf36bbc
AK
310 /* Use IRET because user could have changed frame. This
311 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 312 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 313 TRACE_IRQS_OFF
7bf36bbc 314 jmp int_with_check
1da177e4 315
7effaa88
JB
316badsys:
317 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
318 jmp ret_from_sys_call
319
1da177e4
LT
320 /* Do syscall tracing */
321tracesys:
322 SAVE_REST
a31f8dd7 323 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
324 FIXUP_TOP_OF_STACK %rdi
325 movq %rsp,%rdi
326 call syscall_trace_enter
327 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
328 RESTORE_REST
329 cmpq $__NR_syscall_max,%rax
a31f8dd7 330 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
331 movq %r10,%rcx /* fixup for C */
332 call *sys_call_table(,%rax,8)
a31f8dd7 333 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 334 /* Use IRET because user could have changed frame */
1da177e4 335
1da177e4
LT
336/*
337 * Syscall return path ending with IRET.
338 * Has correct top of stack, but partial stack frame.
bcddc015
JB
339 */
340 .globl int_ret_from_sys_call
341int_ret_from_sys_call:
72fe4858 342 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 343 TRACE_IRQS_OFF
1da177e4
LT
344 testl $3,CS-ARGOFFSET(%rsp)
345 je retint_restore_args
346 movl $_TIF_ALLWORK_MASK,%edi
347 /* edi: mask to check */
348int_with_check:
10cd706d 349 LOCKDEP_SYS_EXIT_IRQ
1da177e4 350 GET_THREAD_INFO(%rcx)
26ccb8a7 351 movl TI_flags(%rcx),%edx
1da177e4
LT
352 andl %edi,%edx
353 jnz int_careful
26ccb8a7 354 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
355 jmp retint_swapgs
356
357 /* Either reschedule or signal or syscall exit tracking needed. */
358 /* First do a reschedule test. */
359 /* edx: work, edi: workmask */
360int_careful:
361 bt $TIF_NEED_RESCHED,%edx
362 jnc int_very_careful
2601e64d 363 TRACE_IRQS_ON
72fe4858 364 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 365 pushq %rdi
7effaa88 366 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
367 call schedule
368 popq %rdi
7effaa88 369 CFI_ADJUST_CFA_OFFSET -8
72fe4858 370 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 371 TRACE_IRQS_OFF
1da177e4
LT
372 jmp int_with_check
373
374 /* handle signals and tracing -- both require a full stack frame */
375int_very_careful:
2601e64d 376 TRACE_IRQS_ON
72fe4858 377 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
378 SAVE_REST
379 /* Check for syscall exit trace */
380 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
381 jz int_signal
382 pushq %rdi
7effaa88 383 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
384 leaq 8(%rsp),%rdi # &ptregs -> arg1
385 call syscall_trace_leave
386 popq %rdi
7effaa88 387 CFI_ADJUST_CFA_OFFSET -8
36c1104e 388 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
1da177e4
LT
389 jmp int_restore_rest
390
391int_signal:
8f4d37ec 392 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
393 jz 1f
394 movq %rsp,%rdi # &ptregs -> arg1
395 xorl %esi,%esi # oldset -> arg2
396 call do_notify_resume
3971: movl $_TIF_NEED_RESCHED,%edi
398int_restore_rest:
399 RESTORE_REST
72fe4858 400 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 401 TRACE_IRQS_OFF
1da177e4
LT
402 jmp int_with_check
403 CFI_ENDPROC
bcddc015 404END(system_call)
1da177e4
LT
405
406/*
407 * Certain special system calls that need to save a complete full stack frame.
408 */
409
410 .macro PTREGSCALL label,func,arg
411 .globl \label
412\label:
413 leaq \func(%rip),%rax
414 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
415 jmp ptregscall_common
4b787e0b 416END(\label)
1da177e4
LT
417 .endm
418
7effaa88
JB
419 CFI_STARTPROC
420
1da177e4
LT
421 PTREGSCALL stub_clone, sys_clone, %r8
422 PTREGSCALL stub_fork, sys_fork, %rdi
423 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
424 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
425 PTREGSCALL stub_iopl, sys_iopl, %rsi
426
427ENTRY(ptregscall_common)
1da177e4 428 popq %r11
7effaa88
JB
429 CFI_ADJUST_CFA_OFFSET -8
430 CFI_REGISTER rip, r11
1da177e4
LT
431 SAVE_REST
432 movq %r11, %r15
7effaa88 433 CFI_REGISTER rip, r15
1da177e4
LT
434 FIXUP_TOP_OF_STACK %r11
435 call *%rax
436 RESTORE_TOP_OF_STACK %r11
437 movq %r15, %r11
7effaa88 438 CFI_REGISTER rip, r11
1da177e4
LT
439 RESTORE_REST
440 pushq %r11
7effaa88
JB
441 CFI_ADJUST_CFA_OFFSET 8
442 CFI_REL_OFFSET rip, 0
1da177e4
LT
443 ret
444 CFI_ENDPROC
4b787e0b 445END(ptregscall_common)
1da177e4
LT
446
447ENTRY(stub_execve)
448 CFI_STARTPROC
449 popq %r11
7effaa88
JB
450 CFI_ADJUST_CFA_OFFSET -8
451 CFI_REGISTER rip, r11
1da177e4 452 SAVE_REST
1da177e4 453 FIXUP_TOP_OF_STACK %r11
5d119b2c 454 movq %rsp, %rcx
1da177e4 455 call sys_execve
1da177e4 456 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
457 movq %rax,RAX(%rsp)
458 RESTORE_REST
459 jmp int_ret_from_sys_call
460 CFI_ENDPROC
4b787e0b 461END(stub_execve)
1da177e4
LT
462
463/*
464 * sigreturn is special because it needs to restore all registers on return.
465 * This cannot be done with SYSRET, so use the IRET return path instead.
466 */
467ENTRY(stub_rt_sigreturn)
468 CFI_STARTPROC
7effaa88
JB
469 addq $8, %rsp
470 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
471 SAVE_REST
472 movq %rsp,%rdi
473 FIXUP_TOP_OF_STACK %r11
474 call sys_rt_sigreturn
475 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
476 RESTORE_REST
477 jmp int_ret_from_sys_call
478 CFI_ENDPROC
4b787e0b 479END(stub_rt_sigreturn)
1da177e4 480
7effaa88
JB
481/*
482 * initial frame state for interrupts and exceptions
483 */
484 .macro _frame ref
485 CFI_STARTPROC simple
adf14236 486 CFI_SIGNAL_FRAME
7effaa88
JB
487 CFI_DEF_CFA rsp,SS+8-\ref
488 /*CFI_REL_OFFSET ss,SS-\ref*/
489 CFI_REL_OFFSET rsp,RSP-\ref
490 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
491 /*CFI_REL_OFFSET cs,CS-\ref*/
492 CFI_REL_OFFSET rip,RIP-\ref
493 .endm
494
495/* initial frame state for interrupts (and exceptions without error code) */
496#define INTR_FRAME _frame RIP
497/* initial frame state for exceptions with error code (and interrupts with
498 vector already pushed) */
499#define XCPT_FRAME _frame ORIG_RAX
500
1da177e4
LT
501/*
502 * Interrupt entry/exit.
503 *
504 * Interrupt entry points save only callee clobbered registers in fast path.
505 *
506 * Entry runs with interrupts off.
507 */
508
509/* 0(%rsp): interrupt number */
510 .macro interrupt func
1da177e4 511 cld
1da177e4
LT
512 SAVE_ARGS
513 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6
JB
514 pushq %rbp
515 CFI_ADJUST_CFA_OFFSET 8
516 CFI_REL_OFFSET rbp, 0
517 movq %rsp,%rbp
518 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
519 testl $3,CS(%rdi)
520 je 1f
72fe4858 521 SWAPGS
96e54049
AK
522 /* irqcount is used to check if a CPU is already on an interrupt
523 stack or not. While this is essentially redundant with preempt_count
524 it is a little cheaper to use a separate counter in the PDA
525 (short of moving irq_enter into assembly, which would be too
526 much work) */
5271: incl %gs:pda_irqcount
1de9c3f6 528 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 529 push %rbp # backlink for old unwinder
2601e64d
IM
530 /*
531 * We entered an interrupt context - irqs are off:
532 */
533 TRACE_IRQS_OFF
1da177e4
LT
534 call \func
535 .endm
536
537ENTRY(common_interrupt)
7effaa88 538 XCPT_FRAME
1da177e4
LT
539 interrupt do_IRQ
540 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 541ret_from_intr:
72fe4858 542 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 543 TRACE_IRQS_OFF
3829ee6b 544 decl %gs:pda_irqcount
1de9c3f6 545 leaveq
7effaa88 546 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 547 CFI_ADJUST_CFA_OFFSET -8
7effaa88 548exit_intr:
1da177e4
LT
549 GET_THREAD_INFO(%rcx)
550 testl $3,CS-ARGOFFSET(%rsp)
551 je retint_kernel
552
553 /* Interrupt came from user space */
554 /*
555 * Has a correct top of stack, but a partial stack frame
556 * %rcx: thread info. Interrupts off.
557 */
558retint_with_reschedule:
559 movl $_TIF_WORK_MASK,%edi
7effaa88 560retint_check:
10cd706d 561 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 562 movl TI_flags(%rcx),%edx
1da177e4 563 andl %edi,%edx
7effaa88 564 CFI_REMEMBER_STATE
1da177e4 565 jnz retint_careful
10cd706d
PZ
566
567retint_swapgs: /* return to user-space */
2601e64d
IM
568 /*
569 * The iretq could re-enable interrupts:
570 */
72fe4858 571 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 572 TRACE_IRQS_IRETQ
72fe4858 573 SWAPGS
2601e64d
IM
574 jmp restore_args
575
10cd706d 576retint_restore_args: /* return to kernel space */
72fe4858 577 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
578 /*
579 * The iretq could re-enable interrupts:
580 */
581 TRACE_IRQS_IRETQ
582restore_args:
3701d863
IM
583 RESTORE_ARGS 0,8,0
584
f7f3d791 585irq_return:
72fe4858 586 INTERRUPT_RETURN
3701d863
IM
587
588 .section __ex_table, "a"
589 .quad irq_return, bad_iret
590 .previous
591
592#ifdef CONFIG_PARAVIRT
72fe4858 593ENTRY(native_iret)
1da177e4
LT
594 iretq
595
596 .section __ex_table,"a"
72fe4858 597 .quad native_iret, bad_iret
1da177e4 598 .previous
3701d863
IM
599#endif
600
1da177e4 601 .section .fixup,"ax"
1da177e4 602bad_iret:
3aa4b37d
RM
603 /*
604 * The iret traps when the %cs or %ss being restored is bogus.
605 * We've lost the original trap vector and error code.
606 * #GPF is the most likely one to get for an invalid selector.
607 * So pretend we completed the iret and took the #GPF in user mode.
608 *
609 * We are now running with the kernel GS after exception recovery.
610 * But error_entry expects us to have user GS to match the user %cs,
611 * so swap back.
612 */
613 pushq $0
614
615 SWAPGS
616 jmp general_protection
617
72fe4858
GOC
618 .previous
619
7effaa88 620 /* edi: workmask, edx: work */
1da177e4 621retint_careful:
7effaa88 622 CFI_RESTORE_STATE
1da177e4
LT
623 bt $TIF_NEED_RESCHED,%edx
624 jnc retint_signal
2601e64d 625 TRACE_IRQS_ON
72fe4858 626 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 627 pushq %rdi
7effaa88 628 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
629 call schedule
630 popq %rdi
7effaa88 631 CFI_ADJUST_CFA_OFFSET -8
1da177e4 632 GET_THREAD_INFO(%rcx)
72fe4858 633 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 634 TRACE_IRQS_OFF
1da177e4
LT
635 jmp retint_check
636
637retint_signal:
8f4d37ec 638 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 639 jz retint_swapgs
2601e64d 640 TRACE_IRQS_ON
72fe4858 641 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
642 SAVE_REST
643 movq $-1,ORIG_RAX(%rsp)
3829ee6b 644 xorl %esi,%esi # oldset
1da177e4
LT
645 movq %rsp,%rdi # &pt_regs
646 call do_notify_resume
647 RESTORE_REST
72fe4858 648 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 649 TRACE_IRQS_OFF
10ffdbb8 650 movl $_TIF_NEED_RESCHED,%edi
be9e6870 651 GET_THREAD_INFO(%rcx)
1da177e4
LT
652 jmp retint_check
653
654#ifdef CONFIG_PREEMPT
655 /* Returning to kernel space. Check if we need preemption */
656 /* rcx: threadinfo. interrupts off. */
b06babac 657ENTRY(retint_kernel)
26ccb8a7 658 cmpl $0,TI_preempt_count(%rcx)
1da177e4 659 jnz retint_restore_args
26ccb8a7 660 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
661 jnc retint_restore_args
662 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
663 jnc retint_restore_args
664 call preempt_schedule_irq
665 jmp exit_intr
666#endif
4b787e0b 667
1da177e4 668 CFI_ENDPROC
4b787e0b 669END(common_interrupt)
1da177e4
LT
670
671/*
672 * APIC interrupts.
673 */
674 .macro apicinterrupt num,func
7effaa88 675 INTR_FRAME
19eadf98 676 pushq $~(\num)
7effaa88 677 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
678 interrupt \func
679 jmp ret_from_intr
680 CFI_ENDPROC
681 .endm
682
683ENTRY(thermal_interrupt)
684 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 685END(thermal_interrupt)
1da177e4 686
89b831ef
JS
687ENTRY(threshold_interrupt)
688 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 689END(threshold_interrupt)
89b831ef 690
1da177e4
LT
691#ifdef CONFIG_SMP
692ENTRY(reschedule_interrupt)
693 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 694END(reschedule_interrupt)
1da177e4 695
e5bc8b6b
AK
696 .macro INVALIDATE_ENTRY num
697ENTRY(invalidate_interrupt\num)
698 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 699END(invalidate_interrupt\num)
e5bc8b6b
AK
700 .endm
701
702 INVALIDATE_ENTRY 0
703 INVALIDATE_ENTRY 1
704 INVALIDATE_ENTRY 2
705 INVALIDATE_ENTRY 3
706 INVALIDATE_ENTRY 4
707 INVALIDATE_ENTRY 5
708 INVALIDATE_ENTRY 6
709 INVALIDATE_ENTRY 7
1da177e4
LT
710
711ENTRY(call_function_interrupt)
712 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 713END(call_function_interrupt)
61014292
EB
714ENTRY(irq_move_cleanup_interrupt)
715 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
716END(irq_move_cleanup_interrupt)
1da177e4
LT
717#endif
718
1da177e4
LT
719ENTRY(apic_timer_interrupt)
720 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 721END(apic_timer_interrupt)
1da177e4 722
1812924b
CW
723ENTRY(uv_bau_message_intr1)
724 apicinterrupt 220,uv_bau_message_interrupt
725END(uv_bau_message_intr1)
726
1da177e4
LT
727ENTRY(error_interrupt)
728 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 729END(error_interrupt)
1da177e4
LT
730
731ENTRY(spurious_interrupt)
732 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 733END(spurious_interrupt)
1da177e4
LT
734
735/*
736 * Exception entry points.
737 */
738 .macro zeroentry sym
7effaa88 739 INTR_FRAME
fab58420 740 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 741 pushq $0 /* push error code/oldrax */
7effaa88 742 CFI_ADJUST_CFA_OFFSET 8
1da177e4 743 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 744 CFI_ADJUST_CFA_OFFSET 8
37550907 745 CFI_REL_OFFSET rax,0
1da177e4
LT
746 leaq \sym(%rip),%rax
747 jmp error_entry
7effaa88 748 CFI_ENDPROC
1da177e4
LT
749 .endm
750
751 .macro errorentry sym
7effaa88 752 XCPT_FRAME
fab58420 753 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 754 pushq %rax
7effaa88 755 CFI_ADJUST_CFA_OFFSET 8
37550907 756 CFI_REL_OFFSET rax,0
1da177e4
LT
757 leaq \sym(%rip),%rax
758 jmp error_entry
7effaa88 759 CFI_ENDPROC
1da177e4
LT
760 .endm
761
762 /* error code is on the stack already */
763 /* handle NMI like exceptions that can happen everywhere */
2601e64d 764 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
765 SAVE_ALL
766 cld
767 movl $1,%ebx
768 movl $MSR_GS_BASE,%ecx
769 rdmsr
770 testl %edx,%edx
771 js 1f
72fe4858 772 SWAPGS
1da177e4 773 xorl %ebx,%ebx
b556b35e
JB
7741:
775 .if \ist
776 movq %gs:pda_data_offset, %rbp
777 .endif
778 movq %rsp,%rdi
1da177e4
LT
779 movq ORIG_RAX(%rsp),%rsi
780 movq $-1,ORIG_RAX(%rsp)
b556b35e 781 .if \ist
5f8efbb9 782 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 783 .endif
1da177e4 784 call \sym
b556b35e 785 .if \ist
5f8efbb9 786 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 787 .endif
72fe4858 788 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
789 .if \irqtrace
790 TRACE_IRQS_OFF
791 .endif
1da177e4 792 .endm
2601e64d
IM
793
794 /*
795 * "Paranoid" exit path from exception stack.
796 * Paranoid because this is used by NMIs and cannot take
797 * any kernel state for granted.
798 * We don't do kernel preemption checks here, because only
799 * NMI should be common and it does not enable IRQs and
800 * cannot get reschedule ticks.
801 *
802 * "trace" is 0 for the NMI handler only, because irq-tracing
803 * is fundamentally NMI-unsafe. (we cannot change the soft and
804 * hard flags at once, atomically)
805 */
806 .macro paranoidexit trace=1
807 /* ebx: no swapgs flag */
808paranoid_exit\trace:
809 testl %ebx,%ebx /* swapgs needed? */
810 jnz paranoid_restore\trace
811 testl $3,CS(%rsp)
812 jnz paranoid_userspace\trace
813paranoid_swapgs\trace:
7a0a2dff 814 .if \trace
2601e64d 815 TRACE_IRQS_IRETQ 0
7a0a2dff 816 .endif
72fe4858 817 SWAPGS_UNSAFE_STACK
2601e64d
IM
818paranoid_restore\trace:
819 RESTORE_ALL 8
3701d863 820 jmp irq_return
2601e64d
IM
821paranoid_userspace\trace:
822 GET_THREAD_INFO(%rcx)
26ccb8a7 823 movl TI_flags(%rcx),%ebx
2601e64d
IM
824 andl $_TIF_WORK_MASK,%ebx
825 jz paranoid_swapgs\trace
826 movq %rsp,%rdi /* &pt_regs */
827 call sync_regs
828 movq %rax,%rsp /* switch stack for scheduling */
829 testl $_TIF_NEED_RESCHED,%ebx
830 jnz paranoid_schedule\trace
831 movl %ebx,%edx /* arg3: thread flags */
832 .if \trace
833 TRACE_IRQS_ON
834 .endif
72fe4858 835 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
836 xorl %esi,%esi /* arg2: oldset */
837 movq %rsp,%rdi /* arg1: &pt_regs */
838 call do_notify_resume
72fe4858 839 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
840 .if \trace
841 TRACE_IRQS_OFF
842 .endif
843 jmp paranoid_userspace\trace
844paranoid_schedule\trace:
845 .if \trace
846 TRACE_IRQS_ON
847 .endif
72fe4858 848 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 849 call schedule
72fe4858 850 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
851 .if \trace
852 TRACE_IRQS_OFF
853 .endif
854 jmp paranoid_userspace\trace
855 CFI_ENDPROC
856 .endm
857
1da177e4
LT
858/*
859 * Exception entry point. This expects an error code/orig_rax on the stack
860 * and the exception handler in %rax.
861 */
d28c4393 862KPROBE_ENTRY(error_entry)
7effaa88 863 _frame RDI
37550907 864 CFI_REL_OFFSET rax,0
1da177e4
LT
865 /* rdi slot contains rax, oldrax contains error code */
866 cld
867 subq $14*8,%rsp
868 CFI_ADJUST_CFA_OFFSET (14*8)
869 movq %rsi,13*8(%rsp)
870 CFI_REL_OFFSET rsi,RSI
871 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
37550907 872 CFI_REGISTER rax,rsi
1da177e4
LT
873 movq %rdx,12*8(%rsp)
874 CFI_REL_OFFSET rdx,RDX
875 movq %rcx,11*8(%rsp)
876 CFI_REL_OFFSET rcx,RCX
877 movq %rsi,10*8(%rsp) /* store rax */
878 CFI_REL_OFFSET rax,RAX
879 movq %r8, 9*8(%rsp)
880 CFI_REL_OFFSET r8,R8
881 movq %r9, 8*8(%rsp)
882 CFI_REL_OFFSET r9,R9
883 movq %r10,7*8(%rsp)
884 CFI_REL_OFFSET r10,R10
885 movq %r11,6*8(%rsp)
886 CFI_REL_OFFSET r11,R11
887 movq %rbx,5*8(%rsp)
888 CFI_REL_OFFSET rbx,RBX
889 movq %rbp,4*8(%rsp)
890 CFI_REL_OFFSET rbp,RBP
891 movq %r12,3*8(%rsp)
892 CFI_REL_OFFSET r12,R12
893 movq %r13,2*8(%rsp)
894 CFI_REL_OFFSET r13,R13
895 movq %r14,1*8(%rsp)
896 CFI_REL_OFFSET r14,R14
897 movq %r15,(%rsp)
898 CFI_REL_OFFSET r15,R15
899 xorl %ebx,%ebx
900 testl $3,CS(%rsp)
901 je error_kernelspace
902error_swapgs:
72fe4858 903 SWAPGS
1da177e4
LT
904error_sti:
905 movq %rdi,RDI(%rsp)
37550907 906 CFI_REL_OFFSET rdi,RDI
1da177e4
LT
907 movq %rsp,%rdi
908 movq ORIG_RAX(%rsp),%rsi /* get error code */
909 movq $-1,ORIG_RAX(%rsp)
910 call *%rax
10cd706d
PZ
911 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
912error_exit:
913 movl %ebx,%eax
1da177e4 914 RESTORE_REST
72fe4858 915 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 916 TRACE_IRQS_OFF
1da177e4
LT
917 GET_THREAD_INFO(%rcx)
918 testl %eax,%eax
919 jne retint_kernel
10cd706d 920 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 921 movl TI_flags(%rcx),%edx
1da177e4
LT
922 movl $_TIF_WORK_MASK,%edi
923 andl %edi,%edx
924 jnz retint_careful
10cd706d 925 jmp retint_swapgs
1da177e4
LT
926 CFI_ENDPROC
927
928error_kernelspace:
929 incl %ebx
930 /* There are two places in the kernel that can potentially fault with
931 usergs. Handle them here. The exception handlers after
932 iret run with kernel gs again, so don't set the user space flag.
933 B stepping K8s sometimes report an truncated RIP for IRET
934 exceptions returning to compat mode. Check for these here too. */
9d8ad5d6
VN
935 leaq irq_return(%rip),%rcx
936 cmpq %rcx,RIP(%rsp)
1da177e4 937 je error_swapgs
9d8ad5d6
VN
938 movl %ecx,%ecx /* zero extend */
939 cmpq %rcx,RIP(%rsp)
1da177e4
LT
940 je error_swapgs
941 cmpq $gs_change,RIP(%rsp)
942 je error_swapgs
943 jmp error_sti
d28c4393 944KPROBE_END(error_entry)
1da177e4
LT
945
946 /* Reload gs selector with exception handling */
947 /* edi: new selector */
9f9d489a 948ENTRY(native_load_gs_index)
7effaa88 949 CFI_STARTPROC
1da177e4 950 pushf
7effaa88 951 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
952 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
953 SWAPGS
1da177e4
LT
954gs_change:
955 movl %edi,%gs
9562: mfence /* workaround */
72fe4858 957 SWAPGS
1da177e4 958 popf
7effaa88 959 CFI_ADJUST_CFA_OFFSET -8
1da177e4 960 ret
7effaa88 961 CFI_ENDPROC
9f9d489a 962ENDPROC(native_load_gs_index)
1da177e4
LT
963
964 .section __ex_table,"a"
965 .align 8
966 .quad gs_change,bad_gs
967 .previous
968 .section .fixup,"ax"
969 /* running with kernelgs */
970bad_gs:
72fe4858 971 SWAPGS /* switch back to user gs */
1da177e4
LT
972 xorl %eax,%eax
973 movl %eax,%gs
974 jmp 2b
975 .previous
976
977/*
978 * Create a kernel thread.
979 *
980 * C extern interface:
981 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
982 *
983 * asm input arguments:
984 * rdi: fn, rsi: arg, rdx: flags
985 */
986ENTRY(kernel_thread)
987 CFI_STARTPROC
988 FAKE_STACK_FRAME $child_rip
989 SAVE_ALL
990
991 # rdi: flags, rsi: usp, rdx: will be &pt_regs
992 movq %rdx,%rdi
993 orq kernel_thread_flags(%rip),%rdi
994 movq $-1, %rsi
995 movq %rsp, %rdx
996
997 xorl %r8d,%r8d
998 xorl %r9d,%r9d
999
1000 # clone now
1001 call do_fork
1002 movq %rax,RAX(%rsp)
1003 xorl %edi,%edi
1004
1005 /*
1006 * It isn't worth to check for reschedule here,
1007 * so internally to the x86_64 port you can rely on kernel_thread()
1008 * not to reschedule the child before returning, this avoids the need
1009 * of hacks for example to fork off the per-CPU idle tasks.
1010 * [Hopefully no generic code relies on the reschedule -AK]
1011 */
1012 RESTORE_ALL
1013 UNFAKE_STACK_FRAME
1014 ret
1015 CFI_ENDPROC
4b787e0b 1016ENDPROC(kernel_thread)
1da177e4
LT
1017
1018child_rip:
c05991ed
AK
1019 pushq $0 # fake return address
1020 CFI_STARTPROC
1da177e4
LT
1021 /*
1022 * Here we are in the child and the registers are set as they were
1023 * at kernel_thread() invocation in the parent.
1024 */
1025 movq %rdi, %rax
1026 movq %rsi, %rdi
1027 call *%rax
1028 # exit
1c5b5cfd 1029 mov %eax, %edi
1da177e4 1030 call do_exit
c05991ed 1031 CFI_ENDPROC
4b787e0b 1032ENDPROC(child_rip)
1da177e4
LT
1033
1034/*
1035 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1036 *
1037 * C extern interface:
1038 * extern long execve(char *name, char **argv, char **envp)
1039 *
1040 * asm input arguments:
1041 * rdi: name, rsi: argv, rdx: envp
1042 *
1043 * We want to fallback into:
5d119b2c 1044 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1045 *
1046 * do_sys_execve asm fallback arguments:
5d119b2c 1047 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1048 */
3db03b4a 1049ENTRY(kernel_execve)
1da177e4
LT
1050 CFI_STARTPROC
1051 FAKE_STACK_FRAME $0
1052 SAVE_ALL
5d119b2c 1053 movq %rsp,%rcx
1da177e4
LT
1054 call sys_execve
1055 movq %rax, RAX(%rsp)
1056 RESTORE_REST
1057 testq %rax,%rax
1058 je int_ret_from_sys_call
1059 RESTORE_ARGS
1060 UNFAKE_STACK_FRAME
1061 ret
1062 CFI_ENDPROC
3db03b4a 1063ENDPROC(kernel_execve)
1da177e4 1064
0f2fbdcb 1065KPROBE_ENTRY(page_fault)
1da177e4 1066 errorentry do_page_fault
d28c4393 1067KPROBE_END(page_fault)
1da177e4
LT
1068
1069ENTRY(coprocessor_error)
1070 zeroentry do_coprocessor_error
4b787e0b 1071END(coprocessor_error)
1da177e4
LT
1072
1073ENTRY(simd_coprocessor_error)
1074 zeroentry do_simd_coprocessor_error
4b787e0b 1075END(simd_coprocessor_error)
1da177e4
LT
1076
1077ENTRY(device_not_available)
1078 zeroentry math_state_restore
4b787e0b 1079END(device_not_available)
1da177e4
LT
1080
1081 /* runs on exception stack */
0f2fbdcb 1082KPROBE_ENTRY(debug)
7effaa88 1083 INTR_FRAME
1da177e4
LT
1084 pushq $0
1085 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1086 paranoidentry do_debug, DEBUG_STACK
2601e64d 1087 paranoidexit
d28c4393 1088KPROBE_END(debug)
1da177e4
LT
1089
1090 /* runs on exception stack */
eddb6fb9 1091KPROBE_ENTRY(nmi)
7effaa88 1092 INTR_FRAME
1da177e4 1093 pushq $-1
7effaa88 1094 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1095 paranoidentry do_nmi, 0, 0
1096#ifdef CONFIG_TRACE_IRQFLAGS
1097 paranoidexit 0
1098#else
1099 jmp paranoid_exit1
1100 CFI_ENDPROC
1101#endif
d28c4393 1102KPROBE_END(nmi)
6fefb0d1 1103
0f2fbdcb 1104KPROBE_ENTRY(int3)
b556b35e
JB
1105 INTR_FRAME
1106 pushq $0
1107 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1108 paranoidentry do_int3, DEBUG_STACK
2601e64d 1109 jmp paranoid_exit1
b556b35e 1110 CFI_ENDPROC
d28c4393 1111KPROBE_END(int3)
1da177e4
LT
1112
1113ENTRY(overflow)
1114 zeroentry do_overflow
4b787e0b 1115END(overflow)
1da177e4
LT
1116
1117ENTRY(bounds)
1118 zeroentry do_bounds
4b787e0b 1119END(bounds)
1da177e4
LT
1120
1121ENTRY(invalid_op)
1122 zeroentry do_invalid_op
4b787e0b 1123END(invalid_op)
1da177e4
LT
1124
1125ENTRY(coprocessor_segment_overrun)
1126 zeroentry do_coprocessor_segment_overrun
4b787e0b 1127END(coprocessor_segment_overrun)
1da177e4 1128
1da177e4
LT
1129 /* runs on exception stack */
1130ENTRY(double_fault)
7effaa88 1131 XCPT_FRAME
1da177e4 1132 paranoidentry do_double_fault
2601e64d 1133 jmp paranoid_exit1
1da177e4 1134 CFI_ENDPROC
4b787e0b 1135END(double_fault)
1da177e4
LT
1136
1137ENTRY(invalid_TSS)
1138 errorentry do_invalid_TSS
4b787e0b 1139END(invalid_TSS)
1da177e4
LT
1140
1141ENTRY(segment_not_present)
1142 errorentry do_segment_not_present
4b787e0b 1143END(segment_not_present)
1da177e4
LT
1144
1145 /* runs on exception stack */
1146ENTRY(stack_segment)
7effaa88 1147 XCPT_FRAME
1da177e4 1148 paranoidentry do_stack_segment
2601e64d 1149 jmp paranoid_exit1
1da177e4 1150 CFI_ENDPROC
4b787e0b 1151END(stack_segment)
1da177e4 1152
0f2fbdcb 1153KPROBE_ENTRY(general_protection)
1da177e4 1154 errorentry do_general_protection
d28c4393 1155KPROBE_END(general_protection)
1da177e4
LT
1156
1157ENTRY(alignment_check)
1158 errorentry do_alignment_check
4b787e0b 1159END(alignment_check)
1da177e4
LT
1160
1161ENTRY(divide_error)
1162 zeroentry do_divide_error
4b787e0b 1163END(divide_error)
1da177e4
LT
1164
1165ENTRY(spurious_interrupt_bug)
1166 zeroentry do_spurious_interrupt_bug
4b787e0b 1167END(spurious_interrupt_bug)
1da177e4
LT
1168
1169#ifdef CONFIG_X86_MCE
1170 /* runs on exception stack */
1171ENTRY(machine_check)
7effaa88 1172 INTR_FRAME
1da177e4
LT
1173 pushq $0
1174 CFI_ADJUST_CFA_OFFSET 8
1175 paranoidentry do_machine_check
2601e64d 1176 jmp paranoid_exit1
1da177e4 1177 CFI_ENDPROC
4b787e0b 1178END(machine_check)
1da177e4
LT
1179#endif
1180
2699500b 1181/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1182ENTRY(call_softirq)
7effaa88 1183 CFI_STARTPROC
2699500b
AK
1184 push %rbp
1185 CFI_ADJUST_CFA_OFFSET 8
1186 CFI_REL_OFFSET rbp,0
1187 mov %rsp,%rbp
1188 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1189 incl %gs:pda_irqcount
2699500b
AK
1190 cmove %gs:pda_irqstackptr,%rsp
1191 push %rbp # backlink for old unwinder
ed6b676c 1192 call __do_softirq
2699500b 1193 leaveq
7effaa88 1194 CFI_DEF_CFA_REGISTER rsp
2699500b 1195 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1196 decl %gs:pda_irqcount
ed6b676c 1197 ret
7effaa88 1198 CFI_ENDPROC
4b787e0b 1199ENDPROC(call_softirq)
75154f40
AK
1200
1201KPROBE_ENTRY(ignore_sysret)
1202 CFI_STARTPROC
1203 mov $-ENOSYS,%eax
1204 sysret
1205 CFI_ENDPROC
1206ENDPROC(ignore_sysret)