[PATCH] Add the -fstack-protector option to the CFLAGS
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
1da177e4
LT
53
54 .code64
55
dc37db4d 56#ifndef CONFIG_PREEMPT
1da177e4
LT
57#define retint_kernel retint_restore_args
58#endif
2601e64d
IM
59
60
61.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62#ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
64 jnc 1f
65 TRACE_IRQS_ON
661:
67#endif
68.endm
69
1da177e4
LT
70/*
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
75 * manipulation.
76 */
77
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
81 movq \tmp,RSP(%rsp)
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
84 movq $-1,RCX(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
87 .endm
88
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
94 .endm
95
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 98 xorl %eax, %eax
1da177e4
LT
99 pushq %rax /* ss */
100 CFI_ADJUST_CFA_OFFSET 8
7effaa88 101 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
102 pushq %rax /* rsp */
103 CFI_ADJUST_CFA_OFFSET 8
7effaa88 104 CFI_REL_OFFSET rsp,0
1da177e4
LT
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
7effaa88 107 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
7effaa88 110 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
7effaa88 113 CFI_REL_OFFSET rip,0
1da177e4
LT
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
116 .endm
117
118 .macro UNFAKE_STACK_FRAME
119 addq $8*6, %rsp
120 CFI_ADJUST_CFA_OFFSET -(6*8)
121 .endm
122
7effaa88
JB
123 .macro CFI_DEFAULT_STACK start=1
124 .if \start
125 CFI_STARTPROC simple
126 CFI_DEF_CFA rsp,SS+8
127 .else
128 CFI_DEF_CFA_OFFSET SS+8
129 .endif
130 CFI_REL_OFFSET r15,R15
131 CFI_REL_OFFSET r14,R14
132 CFI_REL_OFFSET r13,R13
133 CFI_REL_OFFSET r12,R12
134 CFI_REL_OFFSET rbp,RBP
135 CFI_REL_OFFSET rbx,RBX
136 CFI_REL_OFFSET r11,R11
137 CFI_REL_OFFSET r10,R10
138 CFI_REL_OFFSET r9,R9
139 CFI_REL_OFFSET r8,R8
140 CFI_REL_OFFSET rax,RAX
141 CFI_REL_OFFSET rcx,RCX
142 CFI_REL_OFFSET rdx,RDX
143 CFI_REL_OFFSET rsi,RSI
144 CFI_REL_OFFSET rdi,RDI
145 CFI_REL_OFFSET rip,RIP
146 /*CFI_REL_OFFSET cs,CS*/
147 /*CFI_REL_OFFSET rflags,EFLAGS*/
148 CFI_REL_OFFSET rsp,RSP
149 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
150 .endm
151/*
152 * A newly forked process directly context switches into this.
153 */
154/* rdi: prev */
155ENTRY(ret_from_fork)
1da177e4
LT
156 CFI_DEFAULT_STACK
157 call schedule_tail
158 GET_THREAD_INFO(%rcx)
159 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
160 jnz rff_trace
161rff_action:
162 RESTORE_REST
163 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
164 je int_ret_from_sys_call
165 testl $_TIF_IA32,threadinfo_flags(%rcx)
166 jnz int_ret_from_sys_call
167 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
168 jmp ret_from_sys_call
169rff_trace:
170 movq %rsp,%rdi
171 call syscall_trace_leave
172 GET_THREAD_INFO(%rcx)
173 jmp rff_action
174 CFI_ENDPROC
4b787e0b 175END(ret_from_fork)
1da177e4
LT
176
177/*
178 * System call entry. Upto 6 arguments in registers are supported.
179 *
180 * SYSCALL does not save anything on the stack and does not change the
181 * stack pointer.
182 */
183
184/*
185 * Register setup:
186 * rax system call number
187 * rdi arg0
188 * rcx return address for syscall/sysret, C arg3
189 * rsi arg1
190 * rdx arg2
191 * r10 arg3 (--> moved to rcx for C)
192 * r8 arg4
193 * r9 arg5
194 * r11 eflags for syscall/sysret, temporary for C
195 * r12-r15,rbp,rbx saved by C code, not touched.
196 *
197 * Interrupts are off on entry.
198 * Only called from user space.
199 *
200 * XXX if we had a free scratch register we could save the RSP into the stack frame
201 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
202 *
203 * When user can change the frames always force IRET. That is because
204 * it deals with uncanonical addresses better. SYSRET has trouble
205 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
206 */
207
208ENTRY(system_call)
7effaa88 209 CFI_STARTPROC simple
dffead4e 210 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
211 CFI_REGISTER rip,rcx
212 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
213 swapgs
214 movq %rsp,%gs:pda_oldrsp
215 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
216 /*
217 * No need to follow this irqs off/on section - it's straight
218 * and short:
219 */
1da177e4
LT
220 sti
221 SAVE_ARGS 8,1
222 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
223 movq %rcx,RIP-ARGOFFSET(%rsp)
224 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4
LT
225 GET_THREAD_INFO(%rcx)
226 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
7effaa88 227 CFI_REMEMBER_STATE
1da177e4
LT
228 jnz tracesys
229 cmpq $__NR_syscall_max,%rax
230 ja badsys
231 movq %r10,%rcx
232 call *sys_call_table(,%rax,8) # XXX: rip relative
233 movq %rax,RAX-ARGOFFSET(%rsp)
234/*
235 * Syscall return path ending with SYSRET (fast path)
236 * Has incomplete stack frame and undefined top of stack.
237 */
238 .globl ret_from_sys_call
239ret_from_sys_call:
11b854b2 240 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
241 /* edi: flagmask */
242sysret_check:
243 GET_THREAD_INFO(%rcx)
244 cli
2601e64d 245 TRACE_IRQS_OFF
1da177e4
LT
246 movl threadinfo_flags(%rcx),%edx
247 andl %edi,%edx
7effaa88 248 CFI_REMEMBER_STATE
1da177e4 249 jnz sysret_careful
2601e64d
IM
250 /*
251 * sysretq will re-enable interrupts:
252 */
253 TRACE_IRQS_ON
1da177e4 254 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 255 CFI_REGISTER rip,rcx
1da177e4 256 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 257 /*CFI_REGISTER rflags,r11*/
1da177e4
LT
258 movq %gs:pda_oldrsp,%rsp
259 swapgs
260 sysretq
261
262 /* Handle reschedules */
263 /* edx: work, edi: workmask */
264sysret_careful:
7effaa88 265 CFI_RESTORE_STATE
1da177e4
LT
266 bt $TIF_NEED_RESCHED,%edx
267 jnc sysret_signal
2601e64d 268 TRACE_IRQS_ON
1da177e4
LT
269 sti
270 pushq %rdi
7effaa88 271 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
272 call schedule
273 popq %rdi
7effaa88 274 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
275 jmp sysret_check
276
277 /* Handle a signal */
278sysret_signal:
2601e64d 279 TRACE_IRQS_ON
1da177e4 280 sti
10ffdbb8
AK
281 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
282 jz 1f
283
284 /* Really a signal */
285 /* edx: work flags (arg3) */
1da177e4
LT
286 leaq do_notify_resume(%rip),%rax
287 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
288 xorl %esi,%esi # oldset -> arg2
289 call ptregscall_common
10ffdbb8 2901: movl $_TIF_NEED_RESCHED,%edi
7bf36bbc
AK
291 /* Use IRET because user could have changed frame. This
292 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
293 cli
2601e64d 294 TRACE_IRQS_OFF
7bf36bbc 295 jmp int_with_check
1da177e4 296
7effaa88
JB
297badsys:
298 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
299 jmp ret_from_sys_call
300
1da177e4
LT
301 /* Do syscall tracing */
302tracesys:
7effaa88 303 CFI_RESTORE_STATE
1da177e4
LT
304 SAVE_REST
305 movq $-ENOSYS,RAX(%rsp)
306 FIXUP_TOP_OF_STACK %rdi
307 movq %rsp,%rdi
308 call syscall_trace_enter
309 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
310 RESTORE_REST
311 cmpq $__NR_syscall_max,%rax
312 ja 1f
313 movq %r10,%rcx /* fixup for C */
314 call *sys_call_table(,%rax,8)
822ff019 3151: movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc
AK
316 /* Use IRET because user could have changed frame */
317 jmp int_ret_from_sys_call
7effaa88 318 CFI_ENDPROC
4b787e0b 319END(system_call)
1da177e4 320
1da177e4
LT
321/*
322 * Syscall return path ending with IRET.
323 * Has correct top of stack, but partial stack frame.
324 */
7effaa88
JB
325ENTRY(int_ret_from_sys_call)
326 CFI_STARTPROC simple
327 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
328 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
329 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
330 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
331 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
332 CFI_REL_OFFSET rip,RIP-ARGOFFSET
333 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
334 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
335 CFI_REL_OFFSET rax,RAX-ARGOFFSET
336 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
337 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
338 CFI_REL_OFFSET r8,R8-ARGOFFSET
339 CFI_REL_OFFSET r9,R9-ARGOFFSET
340 CFI_REL_OFFSET r10,R10-ARGOFFSET
341 CFI_REL_OFFSET r11,R11-ARGOFFSET
1da177e4 342 cli
2601e64d 343 TRACE_IRQS_OFF
1da177e4
LT
344 testl $3,CS-ARGOFFSET(%rsp)
345 je retint_restore_args
346 movl $_TIF_ALLWORK_MASK,%edi
347 /* edi: mask to check */
348int_with_check:
349 GET_THREAD_INFO(%rcx)
350 movl threadinfo_flags(%rcx),%edx
351 andl %edi,%edx
352 jnz int_careful
bf2fcc6f 353 andl $~TS_COMPAT,threadinfo_status(%rcx)
1da177e4
LT
354 jmp retint_swapgs
355
356 /* Either reschedule or signal or syscall exit tracking needed. */
357 /* First do a reschedule test. */
358 /* edx: work, edi: workmask */
359int_careful:
360 bt $TIF_NEED_RESCHED,%edx
361 jnc int_very_careful
2601e64d 362 TRACE_IRQS_ON
1da177e4
LT
363 sti
364 pushq %rdi
7effaa88 365 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
366 call schedule
367 popq %rdi
7effaa88 368 CFI_ADJUST_CFA_OFFSET -8
cdd219cd 369 cli
2601e64d 370 TRACE_IRQS_OFF
1da177e4
LT
371 jmp int_with_check
372
373 /* handle signals and tracing -- both require a full stack frame */
374int_very_careful:
2601e64d 375 TRACE_IRQS_ON
1da177e4
LT
376 sti
377 SAVE_REST
378 /* Check for syscall exit trace */
379 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
380 jz int_signal
381 pushq %rdi
7effaa88 382 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
383 leaq 8(%rsp),%rdi # &ptregs -> arg1
384 call syscall_trace_leave
385 popq %rdi
7effaa88 386 CFI_ADJUST_CFA_OFFSET -8
36c1104e 387 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
be9e6870 388 cli
2601e64d 389 TRACE_IRQS_OFF
1da177e4
LT
390 jmp int_restore_rest
391
392int_signal:
393 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
394 jz 1f
395 movq %rsp,%rdi # &ptregs -> arg1
396 xorl %esi,%esi # oldset -> arg2
397 call do_notify_resume
3981: movl $_TIF_NEED_RESCHED,%edi
399int_restore_rest:
400 RESTORE_REST
be9e6870 401 cli
2601e64d 402 TRACE_IRQS_OFF
1da177e4
LT
403 jmp int_with_check
404 CFI_ENDPROC
4b787e0b 405END(int_ret_from_sys_call)
1da177e4
LT
406
407/*
408 * Certain special system calls that need to save a complete full stack frame.
409 */
410
411 .macro PTREGSCALL label,func,arg
412 .globl \label
413\label:
414 leaq \func(%rip),%rax
415 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
416 jmp ptregscall_common
4b787e0b 417END(\label)
1da177e4
LT
418 .endm
419
7effaa88
JB
420 CFI_STARTPROC
421
1da177e4
LT
422 PTREGSCALL stub_clone, sys_clone, %r8
423 PTREGSCALL stub_fork, sys_fork, %rdi
424 PTREGSCALL stub_vfork, sys_vfork, %rdi
425 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
426 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
427 PTREGSCALL stub_iopl, sys_iopl, %rsi
428
429ENTRY(ptregscall_common)
1da177e4 430 popq %r11
7effaa88
JB
431 CFI_ADJUST_CFA_OFFSET -8
432 CFI_REGISTER rip, r11
1da177e4
LT
433 SAVE_REST
434 movq %r11, %r15
7effaa88 435 CFI_REGISTER rip, r15
1da177e4
LT
436 FIXUP_TOP_OF_STACK %r11
437 call *%rax
438 RESTORE_TOP_OF_STACK %r11
439 movq %r15, %r11
7effaa88 440 CFI_REGISTER rip, r11
1da177e4
LT
441 RESTORE_REST
442 pushq %r11
7effaa88
JB
443 CFI_ADJUST_CFA_OFFSET 8
444 CFI_REL_OFFSET rip, 0
1da177e4
LT
445 ret
446 CFI_ENDPROC
4b787e0b 447END(ptregscall_common)
1da177e4
LT
448
449ENTRY(stub_execve)
450 CFI_STARTPROC
451 popq %r11
7effaa88
JB
452 CFI_ADJUST_CFA_OFFSET -8
453 CFI_REGISTER rip, r11
1da177e4 454 SAVE_REST
1da177e4
LT
455 FIXUP_TOP_OF_STACK %r11
456 call sys_execve
1da177e4 457 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
458 movq %rax,RAX(%rsp)
459 RESTORE_REST
460 jmp int_ret_from_sys_call
461 CFI_ENDPROC
4b787e0b 462END(stub_execve)
1da177e4
LT
463
464/*
465 * sigreturn is special because it needs to restore all registers on return.
466 * This cannot be done with SYSRET, so use the IRET return path instead.
467 */
468ENTRY(stub_rt_sigreturn)
469 CFI_STARTPROC
7effaa88
JB
470 addq $8, %rsp
471 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
472 SAVE_REST
473 movq %rsp,%rdi
474 FIXUP_TOP_OF_STACK %r11
475 call sys_rt_sigreturn
476 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
477 RESTORE_REST
478 jmp int_ret_from_sys_call
479 CFI_ENDPROC
4b787e0b 480END(stub_rt_sigreturn)
1da177e4 481
7effaa88
JB
482/*
483 * initial frame state for interrupts and exceptions
484 */
485 .macro _frame ref
486 CFI_STARTPROC simple
487 CFI_DEF_CFA rsp,SS+8-\ref
488 /*CFI_REL_OFFSET ss,SS-\ref*/
489 CFI_REL_OFFSET rsp,RSP-\ref
490 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
491 /*CFI_REL_OFFSET cs,CS-\ref*/
492 CFI_REL_OFFSET rip,RIP-\ref
493 .endm
494
495/* initial frame state for interrupts (and exceptions without error code) */
496#define INTR_FRAME _frame RIP
497/* initial frame state for exceptions with error code (and interrupts with
498 vector already pushed) */
499#define XCPT_FRAME _frame ORIG_RAX
500
1da177e4
LT
501/*
502 * Interrupt entry/exit.
503 *
504 * Interrupt entry points save only callee clobbered registers in fast path.
505 *
506 * Entry runs with interrupts off.
507 */
508
509/* 0(%rsp): interrupt number */
510 .macro interrupt func
1da177e4 511 cld
1da177e4
LT
512 SAVE_ARGS
513 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6
JB
514 pushq %rbp
515 CFI_ADJUST_CFA_OFFSET 8
516 CFI_REL_OFFSET rbp, 0
517 movq %rsp,%rbp
518 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
519 testl $3,CS(%rdi)
520 je 1f
521 swapgs
3829ee6b 5221: incl %gs:pda_irqcount # RED-PEN should check preempt count
1de9c3f6 523 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 524 push %rbp # backlink for old unwinder
2601e64d
IM
525 /*
526 * We entered an interrupt context - irqs are off:
527 */
528 TRACE_IRQS_OFF
1da177e4
LT
529 call \func
530 .endm
531
532ENTRY(common_interrupt)
7effaa88 533 XCPT_FRAME
1da177e4
LT
534 interrupt do_IRQ
535 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 536ret_from_intr:
1da177e4 537 cli
2601e64d 538 TRACE_IRQS_OFF
3829ee6b 539 decl %gs:pda_irqcount
1de9c3f6 540 leaveq
7effaa88 541 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 542 CFI_ADJUST_CFA_OFFSET -8
7effaa88 543exit_intr:
1da177e4
LT
544 GET_THREAD_INFO(%rcx)
545 testl $3,CS-ARGOFFSET(%rsp)
546 je retint_kernel
547
548 /* Interrupt came from user space */
549 /*
550 * Has a correct top of stack, but a partial stack frame
551 * %rcx: thread info. Interrupts off.
552 */
553retint_with_reschedule:
554 movl $_TIF_WORK_MASK,%edi
7effaa88 555retint_check:
1da177e4
LT
556 movl threadinfo_flags(%rcx),%edx
557 andl %edi,%edx
7effaa88 558 CFI_REMEMBER_STATE
1da177e4
LT
559 jnz retint_careful
560retint_swapgs:
2601e64d
IM
561 /*
562 * The iretq could re-enable interrupts:
563 */
564 cli
565 TRACE_IRQS_IRETQ
1da177e4 566 swapgs
2601e64d
IM
567 jmp restore_args
568
1da177e4
LT
569retint_restore_args:
570 cli
2601e64d
IM
571 /*
572 * The iretq could re-enable interrupts:
573 */
574 TRACE_IRQS_IRETQ
575restore_args:
1da177e4
LT
576 RESTORE_ARGS 0,8,0
577iret_label:
578 iretq
579
580 .section __ex_table,"a"
581 .quad iret_label,bad_iret
582 .previous
583 .section .fixup,"ax"
584 /* force a signal here? this matches i386 behaviour */
585 /* running with kernel gs */
586bad_iret:
3076a492 587 movq $11,%rdi /* SIGSEGV */
2601e64d 588 TRACE_IRQS_ON
2391c4b5 589 sti
1da177e4
LT
590 jmp do_exit
591 .previous
592
7effaa88 593 /* edi: workmask, edx: work */
1da177e4 594retint_careful:
7effaa88 595 CFI_RESTORE_STATE
1da177e4
LT
596 bt $TIF_NEED_RESCHED,%edx
597 jnc retint_signal
2601e64d 598 TRACE_IRQS_ON
1da177e4
LT
599 sti
600 pushq %rdi
7effaa88 601 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
602 call schedule
603 popq %rdi
7effaa88 604 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
605 GET_THREAD_INFO(%rcx)
606 cli
2601e64d 607 TRACE_IRQS_OFF
1da177e4
LT
608 jmp retint_check
609
610retint_signal:
10ffdbb8
AK
611 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
612 jz retint_swapgs
2601e64d 613 TRACE_IRQS_ON
1da177e4
LT
614 sti
615 SAVE_REST
616 movq $-1,ORIG_RAX(%rsp)
3829ee6b 617 xorl %esi,%esi # oldset
1da177e4
LT
618 movq %rsp,%rdi # &pt_regs
619 call do_notify_resume
620 RESTORE_REST
621 cli
2601e64d 622 TRACE_IRQS_OFF
10ffdbb8 623 movl $_TIF_NEED_RESCHED,%edi
be9e6870 624 GET_THREAD_INFO(%rcx)
1da177e4
LT
625 jmp retint_check
626
627#ifdef CONFIG_PREEMPT
628 /* Returning to kernel space. Check if we need preemption */
629 /* rcx: threadinfo. interrupts off. */
b06babac 630ENTRY(retint_kernel)
1da177e4
LT
631 cmpl $0,threadinfo_preempt_count(%rcx)
632 jnz retint_restore_args
633 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
634 jnc retint_restore_args
635 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
636 jnc retint_restore_args
637 call preempt_schedule_irq
638 jmp exit_intr
639#endif
4b787e0b 640
1da177e4 641 CFI_ENDPROC
4b787e0b 642END(common_interrupt)
1da177e4
LT
643
644/*
645 * APIC interrupts.
646 */
647 .macro apicinterrupt num,func
7effaa88 648 INTR_FRAME
19eadf98 649 pushq $~(\num)
7effaa88 650 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
651 interrupt \func
652 jmp ret_from_intr
653 CFI_ENDPROC
654 .endm
655
656ENTRY(thermal_interrupt)
657 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 658END(thermal_interrupt)
1da177e4 659
89b831ef
JS
660ENTRY(threshold_interrupt)
661 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 662END(threshold_interrupt)
89b831ef 663
1da177e4
LT
664#ifdef CONFIG_SMP
665ENTRY(reschedule_interrupt)
666 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 667END(reschedule_interrupt)
1da177e4 668
e5bc8b6b
AK
669 .macro INVALIDATE_ENTRY num
670ENTRY(invalidate_interrupt\num)
671 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 672END(invalidate_interrupt\num)
e5bc8b6b
AK
673 .endm
674
675 INVALIDATE_ENTRY 0
676 INVALIDATE_ENTRY 1
677 INVALIDATE_ENTRY 2
678 INVALIDATE_ENTRY 3
679 INVALIDATE_ENTRY 4
680 INVALIDATE_ENTRY 5
681 INVALIDATE_ENTRY 6
682 INVALIDATE_ENTRY 7
1da177e4
LT
683
684ENTRY(call_function_interrupt)
685 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 686END(call_function_interrupt)
1da177e4
LT
687#endif
688
1da177e4
LT
689ENTRY(apic_timer_interrupt)
690 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 691END(apic_timer_interrupt)
1da177e4
LT
692
693ENTRY(error_interrupt)
694 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 695END(error_interrupt)
1da177e4
LT
696
697ENTRY(spurious_interrupt)
698 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 699END(spurious_interrupt)
1da177e4
LT
700
701/*
702 * Exception entry points.
703 */
704 .macro zeroentry sym
7effaa88 705 INTR_FRAME
1da177e4 706 pushq $0 /* push error code/oldrax */
7effaa88 707 CFI_ADJUST_CFA_OFFSET 8
1da177e4 708 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 709 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
710 leaq \sym(%rip),%rax
711 jmp error_entry
7effaa88 712 CFI_ENDPROC
1da177e4
LT
713 .endm
714
715 .macro errorentry sym
7effaa88 716 XCPT_FRAME
1da177e4 717 pushq %rax
7effaa88 718 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
719 leaq \sym(%rip),%rax
720 jmp error_entry
7effaa88 721 CFI_ENDPROC
1da177e4
LT
722 .endm
723
724 /* error code is on the stack already */
725 /* handle NMI like exceptions that can happen everywhere */
2601e64d 726 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
727 SAVE_ALL
728 cld
729 movl $1,%ebx
730 movl $MSR_GS_BASE,%ecx
731 rdmsr
732 testl %edx,%edx
733 js 1f
734 swapgs
735 xorl %ebx,%ebx
b556b35e
JB
7361:
737 .if \ist
738 movq %gs:pda_data_offset, %rbp
739 .endif
740 movq %rsp,%rdi
1da177e4
LT
741 movq ORIG_RAX(%rsp),%rsi
742 movq $-1,ORIG_RAX(%rsp)
b556b35e 743 .if \ist
5f8efbb9 744 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 745 .endif
1da177e4 746 call \sym
b556b35e 747 .if \ist
5f8efbb9 748 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 749 .endif
6fefb0d1 750 cli
2601e64d
IM
751 .if \irqtrace
752 TRACE_IRQS_OFF
753 .endif
1da177e4 754 .endm
2601e64d
IM
755
756 /*
757 * "Paranoid" exit path from exception stack.
758 * Paranoid because this is used by NMIs and cannot take
759 * any kernel state for granted.
760 * We don't do kernel preemption checks here, because only
761 * NMI should be common and it does not enable IRQs and
762 * cannot get reschedule ticks.
763 *
764 * "trace" is 0 for the NMI handler only, because irq-tracing
765 * is fundamentally NMI-unsafe. (we cannot change the soft and
766 * hard flags at once, atomically)
767 */
768 .macro paranoidexit trace=1
769 /* ebx: no swapgs flag */
770paranoid_exit\trace:
771 testl %ebx,%ebx /* swapgs needed? */
772 jnz paranoid_restore\trace
773 testl $3,CS(%rsp)
774 jnz paranoid_userspace\trace
775paranoid_swapgs\trace:
7a0a2dff 776 .if \trace
2601e64d 777 TRACE_IRQS_IRETQ 0
7a0a2dff 778 .endif
2601e64d
IM
779 swapgs
780paranoid_restore\trace:
781 RESTORE_ALL 8
782 iretq
783paranoid_userspace\trace:
784 GET_THREAD_INFO(%rcx)
785 movl threadinfo_flags(%rcx),%ebx
786 andl $_TIF_WORK_MASK,%ebx
787 jz paranoid_swapgs\trace
788 movq %rsp,%rdi /* &pt_regs */
789 call sync_regs
790 movq %rax,%rsp /* switch stack for scheduling */
791 testl $_TIF_NEED_RESCHED,%ebx
792 jnz paranoid_schedule\trace
793 movl %ebx,%edx /* arg3: thread flags */
794 .if \trace
795 TRACE_IRQS_ON
796 .endif
797 sti
798 xorl %esi,%esi /* arg2: oldset */
799 movq %rsp,%rdi /* arg1: &pt_regs */
800 call do_notify_resume
801 cli
802 .if \trace
803 TRACE_IRQS_OFF
804 .endif
805 jmp paranoid_userspace\trace
806paranoid_schedule\trace:
807 .if \trace
808 TRACE_IRQS_ON
809 .endif
810 sti
811 call schedule
812 cli
813 .if \trace
814 TRACE_IRQS_OFF
815 .endif
816 jmp paranoid_userspace\trace
817 CFI_ENDPROC
818 .endm
819
1da177e4
LT
820/*
821 * Exception entry point. This expects an error code/orig_rax on the stack
822 * and the exception handler in %rax.
823 */
d28c4393 824KPROBE_ENTRY(error_entry)
7effaa88 825 _frame RDI
1da177e4
LT
826 /* rdi slot contains rax, oldrax contains error code */
827 cld
828 subq $14*8,%rsp
829 CFI_ADJUST_CFA_OFFSET (14*8)
830 movq %rsi,13*8(%rsp)
831 CFI_REL_OFFSET rsi,RSI
832 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
833 movq %rdx,12*8(%rsp)
834 CFI_REL_OFFSET rdx,RDX
835 movq %rcx,11*8(%rsp)
836 CFI_REL_OFFSET rcx,RCX
837 movq %rsi,10*8(%rsp) /* store rax */
838 CFI_REL_OFFSET rax,RAX
839 movq %r8, 9*8(%rsp)
840 CFI_REL_OFFSET r8,R8
841 movq %r9, 8*8(%rsp)
842 CFI_REL_OFFSET r9,R9
843 movq %r10,7*8(%rsp)
844 CFI_REL_OFFSET r10,R10
845 movq %r11,6*8(%rsp)
846 CFI_REL_OFFSET r11,R11
847 movq %rbx,5*8(%rsp)
848 CFI_REL_OFFSET rbx,RBX
849 movq %rbp,4*8(%rsp)
850 CFI_REL_OFFSET rbp,RBP
851 movq %r12,3*8(%rsp)
852 CFI_REL_OFFSET r12,R12
853 movq %r13,2*8(%rsp)
854 CFI_REL_OFFSET r13,R13
855 movq %r14,1*8(%rsp)
856 CFI_REL_OFFSET r14,R14
857 movq %r15,(%rsp)
858 CFI_REL_OFFSET r15,R15
859 xorl %ebx,%ebx
860 testl $3,CS(%rsp)
861 je error_kernelspace
862error_swapgs:
863 swapgs
864error_sti:
865 movq %rdi,RDI(%rsp)
866 movq %rsp,%rdi
867 movq ORIG_RAX(%rsp),%rsi /* get error code */
868 movq $-1,ORIG_RAX(%rsp)
869 call *%rax
870 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
871error_exit:
872 movl %ebx,%eax
873 RESTORE_REST
874 cli
2601e64d 875 TRACE_IRQS_OFF
1da177e4
LT
876 GET_THREAD_INFO(%rcx)
877 testl %eax,%eax
878 jne retint_kernel
879 movl threadinfo_flags(%rcx),%edx
880 movl $_TIF_WORK_MASK,%edi
881 andl %edi,%edx
882 jnz retint_careful
2601e64d
IM
883 /*
884 * The iret might restore flags:
885 */
886 TRACE_IRQS_IRETQ
1da177e4
LT
887 swapgs
888 RESTORE_ARGS 0,8,0
505cc4e1 889 jmp iret_label
1da177e4
LT
890 CFI_ENDPROC
891
892error_kernelspace:
893 incl %ebx
894 /* There are two places in the kernel that can potentially fault with
895 usergs. Handle them here. The exception handlers after
896 iret run with kernel gs again, so don't set the user space flag.
897 B stepping K8s sometimes report an truncated RIP for IRET
898 exceptions returning to compat mode. Check for these here too. */
899 leaq iret_label(%rip),%rbp
900 cmpq %rbp,RIP(%rsp)
901 je error_swapgs
902 movl %ebp,%ebp /* zero extend */
903 cmpq %rbp,RIP(%rsp)
904 je error_swapgs
905 cmpq $gs_change,RIP(%rsp)
906 je error_swapgs
907 jmp error_sti
d28c4393 908KPROBE_END(error_entry)
1da177e4
LT
909
910 /* Reload gs selector with exception handling */
911 /* edi: new selector */
912ENTRY(load_gs_index)
7effaa88 913 CFI_STARTPROC
1da177e4 914 pushf
7effaa88 915 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
916 cli
917 swapgs
918gs_change:
919 movl %edi,%gs
9202: mfence /* workaround */
921 swapgs
922 popf
7effaa88 923 CFI_ADJUST_CFA_OFFSET -8
1da177e4 924 ret
7effaa88 925 CFI_ENDPROC
4b787e0b 926ENDPROC(load_gs_index)
1da177e4
LT
927
928 .section __ex_table,"a"
929 .align 8
930 .quad gs_change,bad_gs
931 .previous
932 .section .fixup,"ax"
933 /* running with kernelgs */
934bad_gs:
935 swapgs /* switch back to user gs */
936 xorl %eax,%eax
937 movl %eax,%gs
938 jmp 2b
939 .previous
940
941/*
942 * Create a kernel thread.
943 *
944 * C extern interface:
945 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
946 *
947 * asm input arguments:
948 * rdi: fn, rsi: arg, rdx: flags
949 */
950ENTRY(kernel_thread)
951 CFI_STARTPROC
952 FAKE_STACK_FRAME $child_rip
953 SAVE_ALL
954
955 # rdi: flags, rsi: usp, rdx: will be &pt_regs
956 movq %rdx,%rdi
957 orq kernel_thread_flags(%rip),%rdi
958 movq $-1, %rsi
959 movq %rsp, %rdx
960
961 xorl %r8d,%r8d
962 xorl %r9d,%r9d
963
964 # clone now
965 call do_fork
966 movq %rax,RAX(%rsp)
967 xorl %edi,%edi
968
969 /*
970 * It isn't worth to check for reschedule here,
971 * so internally to the x86_64 port you can rely on kernel_thread()
972 * not to reschedule the child before returning, this avoids the need
973 * of hacks for example to fork off the per-CPU idle tasks.
974 * [Hopefully no generic code relies on the reschedule -AK]
975 */
976 RESTORE_ALL
977 UNFAKE_STACK_FRAME
978 ret
979 CFI_ENDPROC
4b787e0b 980ENDPROC(kernel_thread)
1da177e4
LT
981
982child_rip:
c05991ed
AK
983 pushq $0 # fake return address
984 CFI_STARTPROC
1da177e4
LT
985 /*
986 * Here we are in the child and the registers are set as they were
987 * at kernel_thread() invocation in the parent.
988 */
989 movq %rdi, %rax
990 movq %rsi, %rdi
991 call *%rax
992 # exit
3829ee6b 993 xorl %edi, %edi
1da177e4 994 call do_exit
c05991ed 995 CFI_ENDPROC
4b787e0b 996ENDPROC(child_rip)
1da177e4
LT
997
998/*
999 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1000 *
1001 * C extern interface:
1002 * extern long execve(char *name, char **argv, char **envp)
1003 *
1004 * asm input arguments:
1005 * rdi: name, rsi: argv, rdx: envp
1006 *
1007 * We want to fallback into:
1008 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1009 *
1010 * do_sys_execve asm fallback arguments:
1011 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1012 */
1013ENTRY(execve)
1014 CFI_STARTPROC
1015 FAKE_STACK_FRAME $0
1016 SAVE_ALL
1017 call sys_execve
1018 movq %rax, RAX(%rsp)
1019 RESTORE_REST
1020 testq %rax,%rax
1021 je int_ret_from_sys_call
1022 RESTORE_ARGS
1023 UNFAKE_STACK_FRAME
1024 ret
1025 CFI_ENDPROC
4b787e0b 1026ENDPROC(execve)
1da177e4 1027
0f2fbdcb 1028KPROBE_ENTRY(page_fault)
1da177e4 1029 errorentry do_page_fault
d28c4393 1030KPROBE_END(page_fault)
1da177e4
LT
1031
1032ENTRY(coprocessor_error)
1033 zeroentry do_coprocessor_error
4b787e0b 1034END(coprocessor_error)
1da177e4
LT
1035
1036ENTRY(simd_coprocessor_error)
1037 zeroentry do_simd_coprocessor_error
4b787e0b 1038END(simd_coprocessor_error)
1da177e4
LT
1039
1040ENTRY(device_not_available)
1041 zeroentry math_state_restore
4b787e0b 1042END(device_not_available)
1da177e4
LT
1043
1044 /* runs on exception stack */
0f2fbdcb 1045KPROBE_ENTRY(debug)
7effaa88 1046 INTR_FRAME
1da177e4
LT
1047 pushq $0
1048 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1049 paranoidentry do_debug, DEBUG_STACK
2601e64d 1050 paranoidexit
d28c4393 1051KPROBE_END(debug)
1da177e4
LT
1052
1053 /* runs on exception stack */
eddb6fb9 1054KPROBE_ENTRY(nmi)
7effaa88 1055 INTR_FRAME
1da177e4 1056 pushq $-1
7effaa88 1057 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1058 paranoidentry do_nmi, 0, 0
1059#ifdef CONFIG_TRACE_IRQFLAGS
1060 paranoidexit 0
1061#else
1062 jmp paranoid_exit1
1063 CFI_ENDPROC
1064#endif
d28c4393 1065KPROBE_END(nmi)
6fefb0d1 1066
0f2fbdcb 1067KPROBE_ENTRY(int3)
b556b35e
JB
1068 INTR_FRAME
1069 pushq $0
1070 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1071 paranoidentry do_int3, DEBUG_STACK
2601e64d 1072 jmp paranoid_exit1
b556b35e 1073 CFI_ENDPROC
d28c4393 1074KPROBE_END(int3)
1da177e4
LT
1075
1076ENTRY(overflow)
1077 zeroentry do_overflow
4b787e0b 1078END(overflow)
1da177e4
LT
1079
1080ENTRY(bounds)
1081 zeroentry do_bounds
4b787e0b 1082END(bounds)
1da177e4
LT
1083
1084ENTRY(invalid_op)
1085 zeroentry do_invalid_op
4b787e0b 1086END(invalid_op)
1da177e4
LT
1087
1088ENTRY(coprocessor_segment_overrun)
1089 zeroentry do_coprocessor_segment_overrun
4b787e0b 1090END(coprocessor_segment_overrun)
1da177e4
LT
1091
1092ENTRY(reserved)
1093 zeroentry do_reserved
4b787e0b 1094END(reserved)
1da177e4
LT
1095
1096 /* runs on exception stack */
1097ENTRY(double_fault)
7effaa88 1098 XCPT_FRAME
1da177e4 1099 paranoidentry do_double_fault
2601e64d 1100 jmp paranoid_exit1
1da177e4 1101 CFI_ENDPROC
4b787e0b 1102END(double_fault)
1da177e4
LT
1103
1104ENTRY(invalid_TSS)
1105 errorentry do_invalid_TSS
4b787e0b 1106END(invalid_TSS)
1da177e4
LT
1107
1108ENTRY(segment_not_present)
1109 errorentry do_segment_not_present
4b787e0b 1110END(segment_not_present)
1da177e4
LT
1111
1112 /* runs on exception stack */
1113ENTRY(stack_segment)
7effaa88 1114 XCPT_FRAME
1da177e4 1115 paranoidentry do_stack_segment
2601e64d 1116 jmp paranoid_exit1
1da177e4 1117 CFI_ENDPROC
4b787e0b 1118END(stack_segment)
1da177e4 1119
0f2fbdcb 1120KPROBE_ENTRY(general_protection)
1da177e4 1121 errorentry do_general_protection
d28c4393 1122KPROBE_END(general_protection)
1da177e4
LT
1123
1124ENTRY(alignment_check)
1125 errorentry do_alignment_check
4b787e0b 1126END(alignment_check)
1da177e4
LT
1127
1128ENTRY(divide_error)
1129 zeroentry do_divide_error
4b787e0b 1130END(divide_error)
1da177e4
LT
1131
1132ENTRY(spurious_interrupt_bug)
1133 zeroentry do_spurious_interrupt_bug
4b787e0b 1134END(spurious_interrupt_bug)
1da177e4
LT
1135
1136#ifdef CONFIG_X86_MCE
1137 /* runs on exception stack */
1138ENTRY(machine_check)
7effaa88 1139 INTR_FRAME
1da177e4
LT
1140 pushq $0
1141 CFI_ADJUST_CFA_OFFSET 8
1142 paranoidentry do_machine_check
2601e64d 1143 jmp paranoid_exit1
1da177e4 1144 CFI_ENDPROC
4b787e0b 1145END(machine_check)
1da177e4
LT
1146#endif
1147
2699500b 1148/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1149ENTRY(call_softirq)
7effaa88 1150 CFI_STARTPROC
2699500b
AK
1151 push %rbp
1152 CFI_ADJUST_CFA_OFFSET 8
1153 CFI_REL_OFFSET rbp,0
1154 mov %rsp,%rbp
1155 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1156 incl %gs:pda_irqcount
2699500b
AK
1157 cmove %gs:pda_irqstackptr,%rsp
1158 push %rbp # backlink for old unwinder
ed6b676c 1159 call __do_softirq
2699500b 1160 leaveq
7effaa88 1161 CFI_DEF_CFA_REGISTER rsp
2699500b 1162 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1163 decl %gs:pda_irqcount
ed6b676c 1164 ret
7effaa88 1165 CFI_ENDPROC
4b787e0b 1166ENDPROC(call_softirq)
b538ed27
JB
1167
1168#ifdef CONFIG_STACK_UNWIND
1169ENTRY(arch_unwind_init_running)
1170 CFI_STARTPROC
1171 movq %r15, R15(%rdi)
1172 movq %r14, R14(%rdi)
1173 xchgq %rsi, %rdx
1174 movq %r13, R13(%rdi)
1175 movq %r12, R12(%rdi)
1176 xorl %eax, %eax
1177 movq %rbp, RBP(%rdi)
1178 movq %rbx, RBX(%rdi)
1179 movq (%rsp), %rcx
1180 movq %rax, R11(%rdi)
1181 movq %rax, R10(%rdi)
1182 movq %rax, R9(%rdi)
1183 movq %rax, R8(%rdi)
1184 movq %rax, RAX(%rdi)
1185 movq %rax, RCX(%rdi)
1186 movq %rax, RDX(%rdi)
1187 movq %rax, RSI(%rdi)
1188 movq %rax, RDI(%rdi)
1189 movq %rax, ORIG_RAX(%rdi)
1190 movq %rcx, RIP(%rdi)
1191 leaq 8(%rsp), %rcx
1192 movq $__KERNEL_CS, CS(%rdi)
1193 movq %rax, EFLAGS(%rdi)
1194 movq %rcx, RSP(%rdi)
1195 movq $__KERNEL_DS, SS(%rdi)
1196 jmpq *%rdx
1197 CFI_ENDPROC
1198ENDPROC(arch_unwind_init_running)
1199#endif