Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
1da177e4 LT |
7 | */ |
8 | ||
9 | /* | |
10 | * entry.S contains the system-call and fault low-level handling routines. | |
11 | * | |
8b4777a4 AL |
12 | * Some of this is documented in Documentation/x86/entry_64.txt |
13 | * | |
1da177e4 LT |
14 | * NOTE: This code handles signal-recognition, which happens every time |
15 | * after an interrupt and after each system call. | |
0bd7b798 AH |
16 | * |
17 | * Normal syscalls and interrupts don't save a full stack frame, this is | |
1da177e4 | 18 | * only done for syscall tracing, signals or fork/exec et.al. |
0bd7b798 AH |
19 | * |
20 | * A note on terminology: | |
21 | * - top of stack: Architecture defined interrupt frame from SS to RIP | |
22 | * at the top of the kernel process stack. | |
0d2eb44f | 23 | * - partial stack frame: partially saved registers up to R11. |
0bd7b798 | 24 | * - full stack frame: Like partial stack frame, but all register saved. |
2e91a17b AK |
25 | * |
26 | * Some macro usage: | |
27 | * - CFI macros are used to generate dwarf2 unwind information for better | |
28 | * backtraces. They don't change any code. | |
29 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers | |
30 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. | |
31 | * There are unfortunately lots of special cases where some registers | |
32 | * not touched. The macro is a big mess that should be cleaned up. | |
33 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. | |
34 | * Gives a full stack frame. | |
35 | * - ENTRY/END Define functions in the symbol table. | |
36 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack | |
37 | * frame that is otherwise undefined after a SYSCALL | |
38 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. | |
39 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. | |
1da177e4 LT |
40 | */ |
41 | ||
1da177e4 LT |
42 | #include <linux/linkage.h> |
43 | #include <asm/segment.h> | |
1da177e4 LT |
44 | #include <asm/cache.h> |
45 | #include <asm/errno.h> | |
46 | #include <asm/dwarf2.h> | |
47 | #include <asm/calling.h> | |
e2d5df93 | 48 | #include <asm/asm-offsets.h> |
1da177e4 LT |
49 | #include <asm/msr.h> |
50 | #include <asm/unistd.h> | |
51 | #include <asm/thread_info.h> | |
52 | #include <asm/hw_irq.h> | |
0341c14d | 53 | #include <asm/page_types.h> |
2601e64d | 54 | #include <asm/irqflags.h> |
72fe4858 | 55 | #include <asm/paravirt.h> |
395a59d0 | 56 | #include <asm/ftrace.h> |
9939ddaf | 57 | #include <asm/percpu.h> |
d7abc0fa | 58 | #include <asm/asm.h> |
91d1aa43 | 59 | #include <asm/context_tracking.h> |
63bcff2a | 60 | #include <asm/smap.h> |
a7b854c9 | 61 | #include <asm/pgtable_types.h> |
d7e7528b | 62 | #include <linux/err.h> |
1da177e4 | 63 | |
86a1c34a RM |
64 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
65 | #include <linux/elf-em.h> | |
66 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) | |
67 | #define __AUDIT_ARCH_64BIT 0x80000000 | |
68 | #define __AUDIT_ARCH_LE 0x40000000 | |
69 | ||
1da177e4 | 70 | .code64 |
ea714547 JO |
71 | .section .entry.text, "ax" |
72 | ||
606576ce | 73 | #ifdef CONFIG_FUNCTION_TRACER |
d57c5d51 SR |
74 | |
75 | #ifdef CC_USING_FENTRY | |
76 | # define function_hook __fentry__ | |
77 | #else | |
78 | # define function_hook mcount | |
79 | #endif | |
80 | ||
d61f82d0 | 81 | #ifdef CONFIG_DYNAMIC_FTRACE |
d57c5d51 SR |
82 | |
83 | ENTRY(function_hook) | |
d61f82d0 | 84 | retq |
d57c5d51 | 85 | END(function_hook) |
d61f82d0 | 86 | |
08f6fba5 SR |
87 | /* skip is set if stack has been adjusted */ |
88 | .macro ftrace_caller_setup skip=0 | |
89 | MCOUNT_SAVE_FRAME \skip | |
90 | ||
91 | /* Load the ftrace_ops into the 3rd parameter */ | |
b2be7cd9 | 92 | movq function_trace_op(%rip), %rdx |
08f6fba5 SR |
93 | |
94 | /* Load ip into the first parameter */ | |
95 | movq RIP(%rsp), %rdi | |
96 | subq $MCOUNT_INSN_SIZE, %rdi | |
97 | /* Load the parent_ip into the second parameter */ | |
d57c5d51 SR |
98 | #ifdef CC_USING_FENTRY |
99 | movq SS+16(%rsp), %rsi | |
100 | #else | |
08f6fba5 | 101 | movq 8(%rbp), %rsi |
d57c5d51 | 102 | #endif |
08f6fba5 | 103 | .endm |
d61f82d0 SR |
104 | |
105 | ENTRY(ftrace_caller) | |
08f6fba5 | 106 | /* Check if tracing was disabled (quick check) */ |
60a7ecf4 SR |
107 | cmpl $0, function_trace_stop |
108 | jne ftrace_stub | |
d61f82d0 | 109 | |
08f6fba5 SR |
110 | ftrace_caller_setup |
111 | /* regs go into 4th parameter (but make it NULL) */ | |
112 | movq $0, %rcx | |
d61f82d0 | 113 | |
bc8b2b92 | 114 | GLOBAL(ftrace_call) |
d61f82d0 SR |
115 | call ftrace_stub |
116 | ||
d680fe44 | 117 | MCOUNT_RESTORE_FRAME |
08f6fba5 | 118 | ftrace_return: |
d61f82d0 | 119 | |
48d68b20 | 120 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
bc8b2b92 | 121 | GLOBAL(ftrace_graph_call) |
48d68b20 FW |
122 | jmp ftrace_stub |
123 | #endif | |
d61f82d0 | 124 | |
fa51c867 SR |
125 | /* This is weak to keep gas from relaxing the jumps */ |
126 | WEAK(ftrace_stub) | |
d61f82d0 SR |
127 | retq |
128 | END(ftrace_caller) | |
129 | ||
08f6fba5 SR |
130 | ENTRY(ftrace_regs_caller) |
131 | /* Save the current flags before compare (in SS location)*/ | |
132 | pushfq | |
133 | ||
134 | /* Check if tracing was disabled (quick check) */ | |
135 | cmpl $0, function_trace_stop | |
136 | jne ftrace_restore_flags | |
137 | ||
138 | /* skip=8 to skip flags saved in SS */ | |
139 | ftrace_caller_setup 8 | |
140 | ||
141 | /* Save the rest of pt_regs */ | |
142 | movq %r15, R15(%rsp) | |
143 | movq %r14, R14(%rsp) | |
144 | movq %r13, R13(%rsp) | |
145 | movq %r12, R12(%rsp) | |
146 | movq %r11, R11(%rsp) | |
147 | movq %r10, R10(%rsp) | |
148 | movq %rbp, RBP(%rsp) | |
149 | movq %rbx, RBX(%rsp) | |
150 | /* Copy saved flags */ | |
151 | movq SS(%rsp), %rcx | |
152 | movq %rcx, EFLAGS(%rsp) | |
153 | /* Kernel segments */ | |
154 | movq $__KERNEL_DS, %rcx | |
155 | movq %rcx, SS(%rsp) | |
156 | movq $__KERNEL_CS, %rcx | |
157 | movq %rcx, CS(%rsp) | |
158 | /* Stack - skipping return address */ | |
159 | leaq SS+16(%rsp), %rcx | |
160 | movq %rcx, RSP(%rsp) | |
161 | ||
162 | /* regs go into 4th parameter */ | |
163 | leaq (%rsp), %rcx | |
164 | ||
165 | GLOBAL(ftrace_regs_call) | |
166 | call ftrace_stub | |
167 | ||
168 | /* Copy flags back to SS, to restore them */ | |
169 | movq EFLAGS(%rsp), %rax | |
170 | movq %rax, SS(%rsp) | |
171 | ||
47d5a5f8 SR |
172 | /* Handlers can change the RIP */ |
173 | movq RIP(%rsp), %rax | |
174 | movq %rax, SS+8(%rsp) | |
175 | ||
08f6fba5 SR |
176 | /* restore the rest of pt_regs */ |
177 | movq R15(%rsp), %r15 | |
178 | movq R14(%rsp), %r14 | |
179 | movq R13(%rsp), %r13 | |
180 | movq R12(%rsp), %r12 | |
181 | movq R10(%rsp), %r10 | |
182 | movq RBP(%rsp), %rbp | |
183 | movq RBX(%rsp), %rbx | |
184 | ||
185 | /* skip=8 to skip flags saved in SS */ | |
186 | MCOUNT_RESTORE_FRAME 8 | |
187 | ||
188 | /* Restore flags */ | |
189 | popfq | |
190 | ||
191 | jmp ftrace_return | |
192 | ftrace_restore_flags: | |
193 | popfq | |
194 | jmp ftrace_stub | |
195 | ||
196 | END(ftrace_regs_caller) | |
197 | ||
198 | ||
d61f82d0 | 199 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
d57c5d51 SR |
200 | |
201 | ENTRY(function_hook) | |
60a7ecf4 SR |
202 | cmpl $0, function_trace_stop |
203 | jne ftrace_stub | |
204 | ||
16444a8a ACM |
205 | cmpq $ftrace_stub, ftrace_trace_function |
206 | jnz trace | |
48d68b20 FW |
207 | |
208 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
209 | cmpq $ftrace_stub, ftrace_graph_return | |
210 | jnz ftrace_graph_caller | |
e49dc19c SR |
211 | |
212 | cmpq $ftrace_graph_entry_stub, ftrace_graph_entry | |
213 | jnz ftrace_graph_caller | |
48d68b20 FW |
214 | #endif |
215 | ||
bc8b2b92 | 216 | GLOBAL(ftrace_stub) |
16444a8a ACM |
217 | retq |
218 | ||
219 | trace: | |
d680fe44 | 220 | MCOUNT_SAVE_FRAME |
16444a8a | 221 | |
08f6fba5 | 222 | movq RIP(%rsp), %rdi |
d57c5d51 SR |
223 | #ifdef CC_USING_FENTRY |
224 | movq SS+16(%rsp), %rsi | |
225 | #else | |
16444a8a | 226 | movq 8(%rbp), %rsi |
d57c5d51 | 227 | #endif |
395a59d0 | 228 | subq $MCOUNT_INSN_SIZE, %rdi |
16444a8a ACM |
229 | |
230 | call *ftrace_trace_function | |
231 | ||
d680fe44 | 232 | MCOUNT_RESTORE_FRAME |
16444a8a ACM |
233 | |
234 | jmp ftrace_stub | |
d57c5d51 | 235 | END(function_hook) |
d61f82d0 | 236 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 237 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 238 | |
48d68b20 FW |
239 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
240 | ENTRY(ftrace_graph_caller) | |
d680fe44 | 241 | MCOUNT_SAVE_FRAME |
48d68b20 | 242 | |
d57c5d51 SR |
243 | #ifdef CC_USING_FENTRY |
244 | leaq SS+16(%rsp), %rdi | |
245 | movq $0, %rdx /* No framepointers needed */ | |
246 | #else | |
48d68b20 | 247 | leaq 8(%rbp), %rdi |
71e308a2 | 248 | movq (%rbp), %rdx |
d57c5d51 SR |
249 | #endif |
250 | movq RIP(%rsp), %rsi | |
bb4304c7 | 251 | subq $MCOUNT_INSN_SIZE, %rsi |
48d68b20 FW |
252 | |
253 | call prepare_ftrace_return | |
254 | ||
d680fe44 CG |
255 | MCOUNT_RESTORE_FRAME |
256 | ||
48d68b20 FW |
257 | retq |
258 | END(ftrace_graph_caller) | |
259 | ||
bc8b2b92 | 260 | GLOBAL(return_to_handler) |
4818d809 | 261 | subq $24, %rsp |
48d68b20 | 262 | |
e71e99c2 | 263 | /* Save the return values */ |
16444a8a | 264 | movq %rax, (%rsp) |
e71e99c2 | 265 | movq %rdx, 8(%rsp) |
71e308a2 | 266 | movq %rbp, %rdi |
16444a8a | 267 | |
48d68b20 | 268 | call ftrace_return_to_handler |
16444a8a | 269 | |
194ec341 | 270 | movq %rax, %rdi |
e71e99c2 | 271 | movq 8(%rsp), %rdx |
16444a8a | 272 | movq (%rsp), %rax |
194ec341 SR |
273 | addq $24, %rsp |
274 | jmp *%rdi | |
48d68b20 | 275 | #endif |
16444a8a | 276 | |
16444a8a | 277 | |
dc37db4d | 278 | #ifndef CONFIG_PREEMPT |
1da177e4 | 279 | #define retint_kernel retint_restore_args |
0bd7b798 | 280 | #endif |
2601e64d | 281 | |
72fe4858 | 282 | #ifdef CONFIG_PARAVIRT |
2be29982 | 283 | ENTRY(native_usergs_sysret64) |
72fe4858 GOC |
284 | swapgs |
285 | sysretq | |
b3baaa13 | 286 | ENDPROC(native_usergs_sysret64) |
72fe4858 GOC |
287 | #endif /* CONFIG_PARAVIRT */ |
288 | ||
2601e64d IM |
289 | |
290 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | |
291 | #ifdef CONFIG_TRACE_IRQFLAGS | |
292 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
293 | jnc 1f | |
294 | TRACE_IRQS_ON | |
295 | 1: | |
296 | #endif | |
297 | .endm | |
298 | ||
5963e317 SR |
299 | /* |
300 | * When dynamic function tracer is enabled it will add a breakpoint | |
301 | * to all locations that it is about to modify, sync CPUs, update | |
302 | * all the code, sync CPUs, then remove the breakpoints. In this time | |
303 | * if lockdep is enabled, it might jump back into the debug handler | |
304 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | |
305 | * | |
306 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | |
307 | * make sure the stack pointer does not get reset back to the top | |
308 | * of the debug stack, and instead just reuses the current stack. | |
309 | */ | |
310 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | |
311 | ||
312 | .macro TRACE_IRQS_OFF_DEBUG | |
313 | call debug_stack_set_zero | |
314 | TRACE_IRQS_OFF | |
315 | call debug_stack_reset | |
316 | .endm | |
317 | ||
318 | .macro TRACE_IRQS_ON_DEBUG | |
319 | call debug_stack_set_zero | |
320 | TRACE_IRQS_ON | |
321 | call debug_stack_reset | |
322 | .endm | |
323 | ||
324 | .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET | |
325 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
326 | jnc 1f | |
327 | TRACE_IRQS_ON_DEBUG | |
328 | 1: | |
329 | .endm | |
330 | ||
331 | #else | |
332 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF | |
333 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | |
334 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | |
335 | #endif | |
336 | ||
1da177e4 | 337 | /* |
0bd7b798 AH |
338 | * C code is not supposed to know about undefined top of stack. Every time |
339 | * a C function with an pt_regs argument is called from the SYSCALL based | |
1da177e4 LT |
340 | * fast path FIXUP_TOP_OF_STACK is needed. |
341 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | |
342 | * manipulation. | |
0bd7b798 AH |
343 | */ |
344 | ||
345 | /* %rsp:at FRAMEEND */ | |
c002a1e6 | 346 | .macro FIXUP_TOP_OF_STACK tmp offset=0 |
3d1e42a7 | 347 | movq PER_CPU_VAR(old_rsp),\tmp |
c002a1e6 AH |
348 | movq \tmp,RSP+\offset(%rsp) |
349 | movq $__USER_DS,SS+\offset(%rsp) | |
350 | movq $__USER_CS,CS+\offset(%rsp) | |
351 | movq $-1,RCX+\offset(%rsp) | |
352 | movq R11+\offset(%rsp),\tmp /* get eflags */ | |
353 | movq \tmp,EFLAGS+\offset(%rsp) | |
1da177e4 LT |
354 | .endm |
355 | ||
c002a1e6 AH |
356 | .macro RESTORE_TOP_OF_STACK tmp offset=0 |
357 | movq RSP+\offset(%rsp),\tmp | |
3d1e42a7 | 358 | movq \tmp,PER_CPU_VAR(old_rsp) |
c002a1e6 AH |
359 | movq EFLAGS+\offset(%rsp),\tmp |
360 | movq \tmp,R11+\offset(%rsp) | |
1da177e4 LT |
361 | .endm |
362 | ||
363 | .macro FAKE_STACK_FRAME child_rip | |
364 | /* push in order ss, rsp, eflags, cs, rip */ | |
3829ee6b | 365 | xorl %eax, %eax |
df5d1874 | 366 | pushq_cfi $__KERNEL_DS /* ss */ |
7effaa88 | 367 | /*CFI_REL_OFFSET ss,0*/ |
df5d1874 | 368 | pushq_cfi %rax /* rsp */ |
7effaa88 | 369 | CFI_REL_OFFSET rsp,0 |
9f03d6fe | 370 | pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */ |
7effaa88 | 371 | /*CFI_REL_OFFSET rflags,0*/ |
df5d1874 | 372 | pushq_cfi $__KERNEL_CS /* cs */ |
7effaa88 | 373 | /*CFI_REL_OFFSET cs,0*/ |
df5d1874 | 374 | pushq_cfi \child_rip /* rip */ |
7effaa88 | 375 | CFI_REL_OFFSET rip,0 |
df5d1874 | 376 | pushq_cfi %rax /* orig rax */ |
1da177e4 LT |
377 | .endm |
378 | ||
379 | .macro UNFAKE_STACK_FRAME | |
380 | addq $8*6, %rsp | |
381 | CFI_ADJUST_CFA_OFFSET -(6*8) | |
382 | .endm | |
383 | ||
dcd072e2 AH |
384 | /* |
385 | * initial frame state for interrupts (and exceptions without error code) | |
386 | */ | |
387 | .macro EMPTY_FRAME start=1 offset=0 | |
7effaa88 | 388 | .if \start |
dcd072e2 | 389 | CFI_STARTPROC simple |
adf14236 | 390 | CFI_SIGNAL_FRAME |
dcd072e2 | 391 | CFI_DEF_CFA rsp,8+\offset |
7effaa88 | 392 | .else |
dcd072e2 | 393 | CFI_DEF_CFA_OFFSET 8+\offset |
7effaa88 | 394 | .endif |
1da177e4 | 395 | .endm |
d99015b1 AH |
396 | |
397 | /* | |
dcd072e2 | 398 | * initial frame state for interrupts (and exceptions without error code) |
d99015b1 | 399 | */ |
dcd072e2 | 400 | .macro INTR_FRAME start=1 offset=0 |
e8a0e276 IM |
401 | EMPTY_FRAME \start, SS+8+\offset-RIP |
402 | /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ | |
403 | CFI_REL_OFFSET rsp, RSP+\offset-RIP | |
404 | /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ | |
405 | /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ | |
406 | CFI_REL_OFFSET rip, RIP+\offset-RIP | |
d99015b1 AH |
407 | .endm |
408 | ||
d99015b1 AH |
409 | /* |
410 | * initial frame state for exceptions with error code (and interrupts | |
411 | * with vector already pushed) | |
412 | */ | |
dcd072e2 | 413 | .macro XCPT_FRAME start=1 offset=0 |
e8a0e276 | 414 | INTR_FRAME \start, RIP+\offset-ORIG_RAX |
dcd072e2 AH |
415 | /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ |
416 | .endm | |
417 | ||
418 | /* | |
419 | * frame that enables calling into C. | |
420 | */ | |
421 | .macro PARTIAL_FRAME start=1 offset=0 | |
e8a0e276 IM |
422 | XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET |
423 | CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET | |
424 | CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET | |
425 | CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET | |
426 | CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET | |
427 | CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET | |
428 | CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET | |
429 | CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET | |
430 | CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET | |
431 | CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET | |
dcd072e2 AH |
432 | .endm |
433 | ||
434 | /* | |
435 | * frame that enables passing a complete pt_regs to a C function. | |
436 | */ | |
437 | .macro DEFAULT_FRAME start=1 offset=0 | |
e8a0e276 | 438 | PARTIAL_FRAME \start, R11+\offset-R15 |
dcd072e2 AH |
439 | CFI_REL_OFFSET rbx, RBX+\offset |
440 | CFI_REL_OFFSET rbp, RBP+\offset | |
441 | CFI_REL_OFFSET r12, R12+\offset | |
442 | CFI_REL_OFFSET r13, R13+\offset | |
443 | CFI_REL_OFFSET r14, R14+\offset | |
444 | CFI_REL_OFFSET r15, R15+\offset | |
445 | .endm | |
d99015b1 AH |
446 | |
447 | /* save partial stack frame */ | |
1871853f | 448 | .macro SAVE_ARGS_IRQ |
d99015b1 | 449 | cld |
1871853f | 450 | /* start from rbp in pt_regs and jump over */ |
1b2b23d8 TG |
451 | movq_cfi rdi, (RDI-RBP) |
452 | movq_cfi rsi, (RSI-RBP) | |
453 | movq_cfi rdx, (RDX-RBP) | |
454 | movq_cfi rcx, (RCX-RBP) | |
455 | movq_cfi rax, (RAX-RBP) | |
456 | movq_cfi r8, (R8-RBP) | |
457 | movq_cfi r9, (R9-RBP) | |
458 | movq_cfi r10, (R10-RBP) | |
459 | movq_cfi r11, (R11-RBP) | |
1871853f | 460 | |
a2bbe750 FW |
461 | /* Save rbp so that we can unwind from get_irq_regs() */ |
462 | movq_cfi rbp, 0 | |
463 | ||
464 | /* Save previous stack value */ | |
465 | movq %rsp, %rsi | |
3b99a3ef FW |
466 | |
467 | leaq -RBP(%rsp),%rdi /* arg1 for handler */ | |
69466466 | 468 | testl $3, CS-RBP(%rsi) |
d99015b1 AH |
469 | je 1f |
470 | SWAPGS | |
471 | /* | |
56895530 | 472 | * irq_count is used to check if a CPU is already on an interrupt stack |
d99015b1 AH |
473 | * or not. While this is essentially redundant with preempt_count it is |
474 | * a little cheaper to use a separate counter in the PDA (short of | |
475 | * moving irq_enter into assembly, which would be too much work) | |
476 | */ | |
56895530 | 477 | 1: incl PER_CPU_VAR(irq_count) |
69466466 | 478 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
eab9e613 | 479 | CFI_DEF_CFA_REGISTER rsi |
a2bbe750 | 480 | |
69466466 | 481 | /* Store previous stack value */ |
a2bbe750 | 482 | pushq %rsi |
eab9e613 JB |
483 | CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ |
484 | 0x77 /* DW_OP_breg7 */, 0, \ | |
485 | 0x06 /* DW_OP_deref */, \ | |
486 | 0x08 /* DW_OP_const1u */, SS+8-RBP, \ | |
487 | 0x22 /* DW_OP_plus */ | |
a2bbe750 FW |
488 | /* We entered an interrupt context - irqs are off: */ |
489 | TRACE_IRQS_OFF | |
1871853f | 490 | .endm |
d99015b1 | 491 | |
c002a1e6 | 492 | ENTRY(save_rest) |
1b2b23d8 | 493 | PARTIAL_FRAME 1 (REST_SKIP+8) |
c002a1e6 AH |
494 | movq 5*8+16(%rsp), %r11 /* save return address */ |
495 | movq_cfi rbx, RBX+16 | |
496 | movq_cfi rbp, RBP+16 | |
497 | movq_cfi r12, R12+16 | |
498 | movq_cfi r13, R13+16 | |
499 | movq_cfi r14, R14+16 | |
500 | movq_cfi r15, R15+16 | |
501 | movq %r11, 8(%rsp) /* return address */ | |
502 | FIXUP_TOP_OF_STACK %r11, 16 | |
503 | ret | |
504 | CFI_ENDPROC | |
505 | END(save_rest) | |
506 | ||
e2f6bc25 | 507 | /* save complete stack frame */ |
c2810188 | 508 | .pushsection .kprobes.text, "ax" |
e2f6bc25 AH |
509 | ENTRY(save_paranoid) |
510 | XCPT_FRAME 1 RDI+8 | |
511 | cld | |
512 | movq_cfi rdi, RDI+8 | |
513 | movq_cfi rsi, RSI+8 | |
514 | movq_cfi rdx, RDX+8 | |
515 | movq_cfi rcx, RCX+8 | |
516 | movq_cfi rax, RAX+8 | |
517 | movq_cfi r8, R8+8 | |
518 | movq_cfi r9, R9+8 | |
519 | movq_cfi r10, R10+8 | |
520 | movq_cfi r11, R11+8 | |
521 | movq_cfi rbx, RBX+8 | |
522 | movq_cfi rbp, RBP+8 | |
523 | movq_cfi r12, R12+8 | |
524 | movq_cfi r13, R13+8 | |
525 | movq_cfi r14, R14+8 | |
526 | movq_cfi r15, R15+8 | |
527 | movl $1,%ebx | |
528 | movl $MSR_GS_BASE,%ecx | |
529 | rdmsr | |
530 | testl %edx,%edx | |
531 | js 1f /* negative -> in kernel */ | |
532 | SWAPGS | |
533 | xorl %ebx,%ebx | |
534 | 1: ret | |
535 | CFI_ENDPROC | |
536 | END(save_paranoid) | |
c2810188 | 537 | .popsection |
e2f6bc25 | 538 | |
1da177e4 | 539 | /* |
5b3eec0c IM |
540 | * A newly forked process directly context switches into this address. |
541 | * | |
542 | * rdi: prev task we switched from | |
0bd7b798 | 543 | */ |
1da177e4 | 544 | ENTRY(ret_from_fork) |
dcd072e2 | 545 | DEFAULT_FRAME |
5b3eec0c | 546 | |
7106a5ab BL |
547 | LOCK ; btr $TIF_FORK,TI_flags(%r8) |
548 | ||
6eebdda3 | 549 | pushq_cfi $0x0002 |
df5d1874 | 550 | popfq_cfi # reset kernel eflags |
5b3eec0c IM |
551 | |
552 | call schedule_tail # rdi: 'prev' task parameter | |
553 | ||
1da177e4 | 554 | GET_THREAD_INFO(%rcx) |
5b3eec0c | 555 | |
1da177e4 | 556 | RESTORE_REST |
5b3eec0c IM |
557 | |
558 | testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? | |
7076aada | 559 | jz 1f |
5b3eec0c | 560 | |
22e764ee AL |
561 | /* |
562 | * By the time we get here, we have no idea whether our pt_regs, | |
563 | * ti flags, and ti status came from the 64-bit SYSCALL fast path, | |
564 | * the slow path, or one of the ia32entry paths. | |
565 | * Use int_ret_from_sys_call to return, since it can safely handle | |
566 | * all of the above. | |
567 | */ | |
568 | jmp int_ret_from_sys_call | |
5b3eec0c | 569 | |
7076aada | 570 | 1: |
22e2430d | 571 | subq $REST_SKIP, %rsp # leave space for volatiles |
7076aada AV |
572 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
573 | movq %rbp, %rdi | |
574 | call *%rbx | |
22e2430d AV |
575 | movl $0, RAX(%rsp) |
576 | RESTORE_REST | |
577 | jmp int_ret_from_sys_call | |
1da177e4 | 578 | CFI_ENDPROC |
4b787e0b | 579 | END(ret_from_fork) |
1da177e4 LT |
580 | |
581 | /* | |
0d2eb44f | 582 | * System call entry. Up to 6 arguments in registers are supported. |
1da177e4 LT |
583 | * |
584 | * SYSCALL does not save anything on the stack and does not change the | |
63bcff2a PA |
585 | * stack pointer. However, it does mask the flags register for us, so |
586 | * CLD and CLAC are not needed. | |
1da177e4 | 587 | */ |
0bd7b798 | 588 | |
1da177e4 | 589 | /* |
0bd7b798 | 590 | * Register setup: |
1da177e4 LT |
591 | * rax system call number |
592 | * rdi arg0 | |
0bd7b798 | 593 | * rcx return address for syscall/sysret, C arg3 |
1da177e4 | 594 | * rsi arg1 |
0bd7b798 | 595 | * rdx arg2 |
1da177e4 LT |
596 | * r10 arg3 (--> moved to rcx for C) |
597 | * r8 arg4 | |
598 | * r9 arg5 | |
599 | * r11 eflags for syscall/sysret, temporary for C | |
0bd7b798 AH |
600 | * r12-r15,rbp,rbx saved by C code, not touched. |
601 | * | |
1da177e4 LT |
602 | * Interrupts are off on entry. |
603 | * Only called from user space. | |
604 | * | |
605 | * XXX if we had a free scratch register we could save the RSP into the stack frame | |
606 | * and report it properly in ps. Unfortunately we haven't. | |
7bf36bbc AK |
607 | * |
608 | * When user can change the frames always force IRET. That is because | |
609 | * it deals with uncanonical addresses better. SYSRET has trouble | |
610 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 611 | */ |
1da177e4 LT |
612 | |
613 | ENTRY(system_call) | |
7effaa88 | 614 | CFI_STARTPROC simple |
adf14236 | 615 | CFI_SIGNAL_FRAME |
9af45651 | 616 | CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET |
7effaa88 JB |
617 | CFI_REGISTER rip,rcx |
618 | /*CFI_REGISTER rflags,r11*/ | |
72fe4858 GOC |
619 | SWAPGS_UNSAFE_STACK |
620 | /* | |
621 | * A hypervisor implementation might want to use a label | |
622 | * after the swapgs, so that it can do the swapgs | |
623 | * for the guest and jump here on syscall. | |
624 | */ | |
f6b2bc84 | 625 | GLOBAL(system_call_after_swapgs) |
72fe4858 | 626 | |
3d1e42a7 | 627 | movq %rsp,PER_CPU_VAR(old_rsp) |
9af45651 | 628 | movq PER_CPU_VAR(kernel_stack),%rsp |
2601e64d IM |
629 | /* |
630 | * No need to follow this irqs off/on section - it's straight | |
631 | * and short: | |
632 | */ | |
72fe4858 | 633 | ENABLE_INTERRUPTS(CLBR_NONE) |
cac0e0a7 | 634 | SAVE_ARGS 8,0 |
0bd7b798 | 635 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
7effaa88 JB |
636 | movq %rcx,RIP-ARGOFFSET(%rsp) |
637 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
46db09d3 | 638 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
1da177e4 | 639 | jnz tracesys |
86a1c34a | 640 | system_call_fastpath: |
fca460f9 | 641 | #if __SYSCALL_MASK == ~0 |
1da177e4 | 642 | cmpq $__NR_syscall_max,%rax |
fca460f9 PA |
643 | #else |
644 | andl $__SYSCALL_MASK,%eax | |
645 | cmpl $__NR_syscall_max,%eax | |
646 | #endif | |
1da177e4 LT |
647 | ja badsys |
648 | movq %r10,%rcx | |
649 | call *sys_call_table(,%rax,8) # XXX: rip relative | |
650 | movq %rax,RAX-ARGOFFSET(%rsp) | |
651 | /* | |
652 | * Syscall return path ending with SYSRET (fast path) | |
0bd7b798 AH |
653 | * Has incomplete stack frame and undefined top of stack. |
654 | */ | |
1da177e4 | 655 | ret_from_sys_call: |
11b854b2 | 656 | movl $_TIF_ALLWORK_MASK,%edi |
1da177e4 | 657 | /* edi: flagmask */ |
0bd7b798 | 658 | sysret_check: |
10cd706d | 659 | LOCKDEP_SYS_EXIT |
72fe4858 | 660 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 661 | TRACE_IRQS_OFF |
46db09d3 | 662 | movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx |
1da177e4 | 663 | andl %edi,%edx |
0bd7b798 | 664 | jnz sysret_careful |
bcddc015 | 665 | CFI_REMEMBER_STATE |
2601e64d IM |
666 | /* |
667 | * sysretq will re-enable interrupts: | |
668 | */ | |
669 | TRACE_IRQS_ON | |
1da177e4 | 670 | movq RIP-ARGOFFSET(%rsp),%rcx |
7effaa88 | 671 | CFI_REGISTER rip,rcx |
838feb47 | 672 | RESTORE_ARGS 1,-ARG_SKIP,0 |
7effaa88 | 673 | /*CFI_REGISTER rflags,r11*/ |
3d1e42a7 | 674 | movq PER_CPU_VAR(old_rsp), %rsp |
2be29982 | 675 | USERGS_SYSRET64 |
1da177e4 | 676 | |
bcddc015 | 677 | CFI_RESTORE_STATE |
1da177e4 | 678 | /* Handle reschedules */ |
0bd7b798 | 679 | /* edx: work, edi: workmask */ |
1da177e4 LT |
680 | sysret_careful: |
681 | bt $TIF_NEED_RESCHED,%edx | |
682 | jnc sysret_signal | |
2601e64d | 683 | TRACE_IRQS_ON |
72fe4858 | 684 | ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874 | 685 | pushq_cfi %rdi |
0430499c | 686 | SCHEDULE_USER |
df5d1874 | 687 | popq_cfi %rdi |
1da177e4 LT |
688 | jmp sysret_check |
689 | ||
0bd7b798 | 690 | /* Handle a signal */ |
1da177e4 | 691 | sysret_signal: |
2601e64d | 692 | TRACE_IRQS_ON |
72fe4858 | 693 | ENABLE_INTERRUPTS(CLBR_NONE) |
86a1c34a RM |
694 | #ifdef CONFIG_AUDITSYSCALL |
695 | bt $TIF_SYSCALL_AUDIT,%edx | |
696 | jc sysret_audit | |
697 | #endif | |
b60e714d RM |
698 | /* |
699 | * We have a signal, or exit tracing or single-step. | |
700 | * These all wind up with the iret return path anyway, | |
701 | * so just join that path right now. | |
702 | */ | |
703 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | |
704 | jmp int_check_syscall_exit_work | |
0bd7b798 | 705 | |
7effaa88 JB |
706 | badsys: |
707 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | |
708 | jmp ret_from_sys_call | |
709 | ||
86a1c34a RM |
710 | #ifdef CONFIG_AUDITSYSCALL |
711 | /* | |
712 | * Fast path for syscall audit without full syscall trace. | |
b05d8447 | 713 | * We just call __audit_syscall_entry() directly, and then |
86a1c34a RM |
714 | * jump back to the normal fast path. |
715 | */ | |
716 | auditsys: | |
717 | movq %r10,%r9 /* 6th arg: 4th syscall arg */ | |
718 | movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ | |
719 | movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ | |
720 | movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ | |
721 | movq %rax,%rsi /* 2nd arg: syscall number */ | |
722 | movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ | |
b05d8447 | 723 | call __audit_syscall_entry |
86a1c34a RM |
724 | LOAD_ARGS 0 /* reload call-clobbered registers */ |
725 | jmp system_call_fastpath | |
726 | ||
727 | /* | |
d7e7528b | 728 | * Return fast path for syscall audit. Call __audit_syscall_exit() |
86a1c34a RM |
729 | * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT |
730 | * masked off. | |
731 | */ | |
732 | sysret_audit: | |
03275591 | 733 | movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */ |
d7e7528b EP |
734 | cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */ |
735 | setbe %al /* 1 if so, 0 if not */ | |
86a1c34a | 736 | movzbl %al,%edi /* zero-extend that into %edi */ |
d7e7528b | 737 | call __audit_syscall_exit |
86a1c34a RM |
738 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi |
739 | jmp sysret_check | |
740 | #endif /* CONFIG_AUDITSYSCALL */ | |
741 | ||
1da177e4 | 742 | /* Do syscall tracing */ |
0bd7b798 | 743 | tracesys: |
86a1c34a | 744 | #ifdef CONFIG_AUDITSYSCALL |
46db09d3 | 745 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
86a1c34a RM |
746 | jz auditsys |
747 | #endif | |
1da177e4 | 748 | SAVE_REST |
a31f8dd7 | 749 | movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
1da177e4 LT |
750 | FIXUP_TOP_OF_STACK %rdi |
751 | movq %rsp,%rdi | |
752 | call syscall_trace_enter | |
d4d67150 RM |
753 | /* |
754 | * Reload arg registers from stack in case ptrace changed them. | |
755 | * We don't reload %rax because syscall_trace_enter() returned | |
756 | * the value it wants us to use in the table lookup. | |
757 | */ | |
758 | LOAD_ARGS ARGOFFSET, 1 | |
1da177e4 | 759 | RESTORE_REST |
fca460f9 | 760 | #if __SYSCALL_MASK == ~0 |
1da177e4 | 761 | cmpq $__NR_syscall_max,%rax |
fca460f9 PA |
762 | #else |
763 | andl $__SYSCALL_MASK,%eax | |
764 | cmpl $__NR_syscall_max,%eax | |
765 | #endif | |
a31f8dd7 | 766 | ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ |
1da177e4 LT |
767 | movq %r10,%rcx /* fixup for C */ |
768 | call *sys_call_table(,%rax,8) | |
a31f8dd7 | 769 | movq %rax,RAX-ARGOFFSET(%rsp) |
7bf36bbc | 770 | /* Use IRET because user could have changed frame */ |
0bd7b798 AH |
771 | |
772 | /* | |
1da177e4 LT |
773 | * Syscall return path ending with IRET. |
774 | * Has correct top of stack, but partial stack frame. | |
bcddc015 | 775 | */ |
bc8b2b92 | 776 | GLOBAL(int_ret_from_sys_call) |
72fe4858 | 777 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 778 | TRACE_IRQS_OFF |
1da177e4 LT |
779 | movl $_TIF_ALLWORK_MASK,%edi |
780 | /* edi: mask to check */ | |
bc8b2b92 | 781 | GLOBAL(int_with_check) |
10cd706d | 782 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 | 783 | GET_THREAD_INFO(%rcx) |
26ccb8a7 | 784 | movl TI_flags(%rcx),%edx |
1da177e4 LT |
785 | andl %edi,%edx |
786 | jnz int_careful | |
26ccb8a7 | 787 | andl $~TS_COMPAT,TI_status(%rcx) |
1da177e4 LT |
788 | jmp retint_swapgs |
789 | ||
790 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
791 | /* First do a reschedule test. */ | |
792 | /* edx: work, edi: workmask */ | |
793 | int_careful: | |
794 | bt $TIF_NEED_RESCHED,%edx | |
795 | jnc int_very_careful | |
2601e64d | 796 | TRACE_IRQS_ON |
72fe4858 | 797 | ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874 | 798 | pushq_cfi %rdi |
0430499c | 799 | SCHEDULE_USER |
df5d1874 | 800 | popq_cfi %rdi |
72fe4858 | 801 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 802 | TRACE_IRQS_OFF |
1da177e4 LT |
803 | jmp int_with_check |
804 | ||
805 | /* handle signals and tracing -- both require a full stack frame */ | |
806 | int_very_careful: | |
2601e64d | 807 | TRACE_IRQS_ON |
72fe4858 | 808 | ENABLE_INTERRUPTS(CLBR_NONE) |
b60e714d | 809 | int_check_syscall_exit_work: |
1da177e4 | 810 | SAVE_REST |
0bd7b798 | 811 | /* Check for syscall exit trace */ |
d4d67150 | 812 | testl $_TIF_WORK_SYSCALL_EXIT,%edx |
1da177e4 | 813 | jz int_signal |
df5d1874 | 814 | pushq_cfi %rdi |
0bd7b798 | 815 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
1da177e4 | 816 | call syscall_trace_leave |
df5d1874 | 817 | popq_cfi %rdi |
d4d67150 | 818 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
1da177e4 | 819 | jmp int_restore_rest |
0bd7b798 | 820 | |
1da177e4 | 821 | int_signal: |
8f4d37ec | 822 | testl $_TIF_DO_NOTIFY_MASK,%edx |
1da177e4 LT |
823 | jz 1f |
824 | movq %rsp,%rdi # &ptregs -> arg1 | |
825 | xorl %esi,%esi # oldset -> arg2 | |
826 | call do_notify_resume | |
eca91e78 | 827 | 1: movl $_TIF_WORK_MASK,%edi |
1da177e4 LT |
828 | int_restore_rest: |
829 | RESTORE_REST | |
72fe4858 | 830 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 831 | TRACE_IRQS_OFF |
1da177e4 LT |
832 | jmp int_with_check |
833 | CFI_ENDPROC | |
bcddc015 | 834 | END(system_call) |
0bd7b798 | 835 | |
1d4b4b29 AV |
836 | .macro FORK_LIKE func |
837 | ENTRY(stub_\func) | |
838 | CFI_STARTPROC | |
839 | popq %r11 /* save return address */ | |
840 | PARTIAL_FRAME 0 | |
841 | SAVE_REST | |
842 | pushq %r11 /* put it back on stack */ | |
843 | FIXUP_TOP_OF_STACK %r11, 8 | |
844 | DEFAULT_FRAME 0 8 /* offset 8: return address */ | |
845 | call sys_\func | |
846 | RESTORE_TOP_OF_STACK %r11, 8 | |
847 | ret $REST_SKIP /* pop extended registers */ | |
848 | CFI_ENDPROC | |
849 | END(stub_\func) | |
850 | .endm | |
851 | ||
b3af11af AV |
852 | .macro FIXED_FRAME label,func |
853 | ENTRY(\label) | |
854 | CFI_STARTPROC | |
855 | PARTIAL_FRAME 0 8 /* offset 8: return address */ | |
856 | FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET | |
857 | call \func | |
858 | RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET | |
859 | ret | |
860 | CFI_ENDPROC | |
861 | END(\label) | |
862 | .endm | |
863 | ||
1d4b4b29 AV |
864 | FORK_LIKE clone |
865 | FORK_LIKE fork | |
866 | FORK_LIKE vfork | |
b3af11af | 867 | FIXED_FRAME stub_iopl, sys_iopl |
1da177e4 LT |
868 | |
869 | ENTRY(ptregscall_common) | |
c002a1e6 AH |
870 | DEFAULT_FRAME 1 8 /* offset 8: return address */ |
871 | RESTORE_TOP_OF_STACK %r11, 8 | |
872 | movq_cfi_restore R15+8, r15 | |
873 | movq_cfi_restore R14+8, r14 | |
874 | movq_cfi_restore R13+8, r13 | |
875 | movq_cfi_restore R12+8, r12 | |
876 | movq_cfi_restore RBP+8, rbp | |
877 | movq_cfi_restore RBX+8, rbx | |
878 | ret $REST_SKIP /* pop extended registers */ | |
1da177e4 | 879 | CFI_ENDPROC |
4b787e0b | 880 | END(ptregscall_common) |
0bd7b798 | 881 | |
1da177e4 LT |
882 | ENTRY(stub_execve) |
883 | CFI_STARTPROC | |
e6b04b6b JB |
884 | addq $8, %rsp |
885 | PARTIAL_FRAME 0 | |
1da177e4 | 886 | SAVE_REST |
1da177e4 LT |
887 | FIXUP_TOP_OF_STACK %r11 |
888 | call sys_execve | |
1da177e4 LT |
889 | movq %rax,RAX(%rsp) |
890 | RESTORE_REST | |
891 | jmp int_ret_from_sys_call | |
892 | CFI_ENDPROC | |
4b787e0b | 893 | END(stub_execve) |
0bd7b798 | 894 | |
1da177e4 LT |
895 | /* |
896 | * sigreturn is special because it needs to restore all registers on return. | |
897 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
0bd7b798 | 898 | */ |
1da177e4 LT |
899 | ENTRY(stub_rt_sigreturn) |
900 | CFI_STARTPROC | |
7effaa88 | 901 | addq $8, %rsp |
e6b04b6b | 902 | PARTIAL_FRAME 0 |
1da177e4 | 903 | SAVE_REST |
1da177e4 LT |
904 | FIXUP_TOP_OF_STACK %r11 |
905 | call sys_rt_sigreturn | |
906 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
907 | RESTORE_REST | |
908 | jmp int_ret_from_sys_call | |
909 | CFI_ENDPROC | |
4b787e0b | 910 | END(stub_rt_sigreturn) |
1da177e4 | 911 | |
c5a37394 | 912 | #ifdef CONFIG_X86_X32_ABI |
c5a37394 PA |
913 | ENTRY(stub_x32_rt_sigreturn) |
914 | CFI_STARTPROC | |
915 | addq $8, %rsp | |
916 | PARTIAL_FRAME 0 | |
917 | SAVE_REST | |
c5a37394 PA |
918 | FIXUP_TOP_OF_STACK %r11 |
919 | call sys32_x32_rt_sigreturn | |
920 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
921 | RESTORE_REST | |
922 | jmp int_ret_from_sys_call | |
923 | CFI_ENDPROC | |
924 | END(stub_x32_rt_sigreturn) | |
925 | ||
d1a797f3 PA |
926 | ENTRY(stub_x32_execve) |
927 | CFI_STARTPROC | |
928 | addq $8, %rsp | |
929 | PARTIAL_FRAME 0 | |
930 | SAVE_REST | |
931 | FIXUP_TOP_OF_STACK %r11 | |
6783eaa2 | 932 | call compat_sys_execve |
d1a797f3 PA |
933 | RESTORE_TOP_OF_STACK %r11 |
934 | movq %rax,RAX(%rsp) | |
935 | RESTORE_REST | |
936 | jmp int_ret_from_sys_call | |
937 | CFI_ENDPROC | |
938 | END(stub_x32_execve) | |
939 | ||
c5a37394 PA |
940 | #endif |
941 | ||
939b7871 PA |
942 | /* |
943 | * Build the entry stubs and pointer table with some assembler magic. | |
944 | * We pack 7 stubs into a single 32-byte chunk, which will fit in a | |
945 | * single cache line on all modern x86 implementations. | |
946 | */ | |
947 | .section .init.rodata,"a" | |
948 | ENTRY(interrupt) | |
ea714547 | 949 | .section .entry.text |
939b7871 PA |
950 | .p2align 5 |
951 | .p2align CONFIG_X86_L1_CACHE_SHIFT | |
952 | ENTRY(irq_entries_start) | |
953 | INTR_FRAME | |
954 | vector=FIRST_EXTERNAL_VECTOR | |
955 | .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 | |
956 | .balign 32 | |
957 | .rept 7 | |
958 | .if vector < NR_VECTORS | |
8665596e | 959 | .if vector <> FIRST_EXTERNAL_VECTOR |
939b7871 PA |
960 | CFI_ADJUST_CFA_OFFSET -8 |
961 | .endif | |
df5d1874 | 962 | 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
8665596e | 963 | .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
939b7871 PA |
964 | jmp 2f |
965 | .endif | |
966 | .previous | |
967 | .quad 1b | |
ea714547 | 968 | .section .entry.text |
939b7871 PA |
969 | vector=vector+1 |
970 | .endif | |
971 | .endr | |
972 | 2: jmp common_interrupt | |
973 | .endr | |
974 | CFI_ENDPROC | |
975 | END(irq_entries_start) | |
976 | ||
977 | .previous | |
978 | END(interrupt) | |
979 | .previous | |
980 | ||
d99015b1 | 981 | /* |
1da177e4 LT |
982 | * Interrupt entry/exit. |
983 | * | |
984 | * Interrupt entry points save only callee clobbered registers in fast path. | |
d99015b1 AH |
985 | * |
986 | * Entry runs with interrupts off. | |
987 | */ | |
1da177e4 | 988 | |
722024db | 989 | /* 0(%rsp): ~(interrupt number) */ |
1da177e4 | 990 | .macro interrupt func |
625dbc3b FW |
991 | /* reserve pt_regs for scratch regs and rbp */ |
992 | subq $ORIG_RAX-RBP, %rsp | |
993 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP | |
1871853f | 994 | SAVE_ARGS_IRQ |
1da177e4 LT |
995 | call \func |
996 | .endm | |
997 | ||
8222d718 MH |
998 | /* |
999 | * Interrupt entry/exit should be protected against kprobes | |
1000 | */ | |
1001 | .pushsection .kprobes.text, "ax" | |
722024db AH |
1002 | /* |
1003 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
1004 | * then jump to common_interrupt. | |
1005 | */ | |
939b7871 PA |
1006 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1007 | common_interrupt: | |
7effaa88 | 1008 | XCPT_FRAME |
ee4eb87b | 1009 | ASM_CLAC |
722024db | 1010 | addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
1da177e4 | 1011 | interrupt do_IRQ |
3d1e42a7 | 1012 | /* 0(%rsp): old_rsp-ARGOFFSET */ |
7effaa88 | 1013 | ret_from_intr: |
72fe4858 | 1014 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 1015 | TRACE_IRQS_OFF |
56895530 | 1016 | decl PER_CPU_VAR(irq_count) |
625dbc3b | 1017 | |
a2bbe750 FW |
1018 | /* Restore saved previous stack */ |
1019 | popq %rsi | |
928282e4 | 1020 | CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */ |
eab9e613 | 1021 | leaq ARGOFFSET-RBP(%rsi), %rsp |
7effaa88 | 1022 | CFI_DEF_CFA_REGISTER rsp |
eab9e613 | 1023 | CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET |
625dbc3b | 1024 | |
7effaa88 | 1025 | exit_intr: |
1da177e4 LT |
1026 | GET_THREAD_INFO(%rcx) |
1027 | testl $3,CS-ARGOFFSET(%rsp) | |
1028 | je retint_kernel | |
0bd7b798 | 1029 | |
1da177e4 LT |
1030 | /* Interrupt came from user space */ |
1031 | /* | |
1032 | * Has a correct top of stack, but a partial stack frame | |
1033 | * %rcx: thread info. Interrupts off. | |
0bd7b798 | 1034 | */ |
1da177e4 LT |
1035 | retint_with_reschedule: |
1036 | movl $_TIF_WORK_MASK,%edi | |
7effaa88 | 1037 | retint_check: |
10cd706d | 1038 | LOCKDEP_SYS_EXIT_IRQ |
26ccb8a7 | 1039 | movl TI_flags(%rcx),%edx |
1da177e4 | 1040 | andl %edi,%edx |
7effaa88 | 1041 | CFI_REMEMBER_STATE |
1da177e4 | 1042 | jnz retint_careful |
10cd706d PZ |
1043 | |
1044 | retint_swapgs: /* return to user-space */ | |
2601e64d IM |
1045 | /* |
1046 | * The iretq could re-enable interrupts: | |
1047 | */ | |
72fe4858 | 1048 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 1049 | TRACE_IRQS_IRETQ |
72fe4858 | 1050 | SWAPGS |
2601e64d IM |
1051 | jmp restore_args |
1052 | ||
10cd706d | 1053 | retint_restore_args: /* return to kernel space */ |
72fe4858 | 1054 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d IM |
1055 | /* |
1056 | * The iretq could re-enable interrupts: | |
1057 | */ | |
1058 | TRACE_IRQS_IRETQ | |
1059 | restore_args: | |
838feb47 | 1060 | RESTORE_ARGS 1,8,1 |
3701d863 | 1061 | |
f7f3d791 | 1062 | irq_return: |
e8248801 AL |
1063 | INTERRUPT_RETURN |
1064 | ||
1065 | ENTRY(native_iret) | |
a7b854c9 PA |
1066 | /* |
1067 | * Are we returning to a stack segment from the LDT? Note: in | |
1068 | * 64-bit mode SS:RSP on the exception stack is always valid. | |
1069 | */ | |
044f72e1 | 1070 | #ifdef CONFIG_X86_ESPFIX64 |
a7b854c9 | 1071 | testb $4,(SS-RIP)(%rsp) |
e8248801 | 1072 | jnz native_irq_return_ldt |
044f72e1 | 1073 | #endif |
a7b854c9 | 1074 | |
ea21cb1f | 1075 | .global native_irq_return_iret |
e8248801 | 1076 | native_irq_return_iret: |
69c5d32d AL |
1077 | /* |
1078 | * This may fault. Non-paranoid faults on return to userspace are | |
1079 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. | |
1080 | * Double-faults due to espfix64 are handled in do_double_fault. | |
1081 | * Other faults here are fatal. | |
1082 | */ | |
1da177e4 | 1083 | iretq |
3701d863 | 1084 | |
044f72e1 | 1085 | #ifdef CONFIG_X86_ESPFIX64 |
e8248801 | 1086 | native_irq_return_ldt: |
a7b854c9 PA |
1087 | pushq_cfi %rax |
1088 | pushq_cfi %rdi | |
1089 | SWAPGS | |
1090 | movq PER_CPU_VAR(espfix_waddr),%rdi | |
1091 | movq %rax,(0*8)(%rdi) /* RAX */ | |
1092 | movq (2*8)(%rsp),%rax /* RIP */ | |
1093 | movq %rax,(1*8)(%rdi) | |
1094 | movq (3*8)(%rsp),%rax /* CS */ | |
1095 | movq %rax,(2*8)(%rdi) | |
1096 | movq (4*8)(%rsp),%rax /* RFLAGS */ | |
1097 | movq %rax,(3*8)(%rdi) | |
1098 | movq (6*8)(%rsp),%rax /* SS */ | |
1099 | movq %rax,(5*8)(%rdi) | |
1100 | movq (5*8)(%rsp),%rax /* RSP */ | |
1101 | movq %rax,(4*8)(%rdi) | |
1102 | andl $0xffff0000,%eax | |
1103 | popq_cfi %rdi | |
1104 | orq PER_CPU_VAR(espfix_stack),%rax | |
1105 | SWAPGS | |
1106 | movq %rax,%rsp | |
1107 | popq_cfi %rax | |
e8248801 | 1108 | jmp native_irq_return_iret |
044f72e1 | 1109 | #endif |
a7b854c9 | 1110 | |
7effaa88 | 1111 | /* edi: workmask, edx: work */ |
1da177e4 | 1112 | retint_careful: |
7effaa88 | 1113 | CFI_RESTORE_STATE |
1da177e4 LT |
1114 | bt $TIF_NEED_RESCHED,%edx |
1115 | jnc retint_signal | |
2601e64d | 1116 | TRACE_IRQS_ON |
72fe4858 | 1117 | ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874 | 1118 | pushq_cfi %rdi |
0430499c | 1119 | SCHEDULE_USER |
df5d1874 | 1120 | popq_cfi %rdi |
1da177e4 | 1121 | GET_THREAD_INFO(%rcx) |
72fe4858 | 1122 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 1123 | TRACE_IRQS_OFF |
1da177e4 | 1124 | jmp retint_check |
0bd7b798 | 1125 | |
1da177e4 | 1126 | retint_signal: |
8f4d37ec | 1127 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 | 1128 | jz retint_swapgs |
2601e64d | 1129 | TRACE_IRQS_ON |
72fe4858 | 1130 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 1131 | SAVE_REST |
0bd7b798 | 1132 | movq $-1,ORIG_RAX(%rsp) |
3829ee6b | 1133 | xorl %esi,%esi # oldset |
1da177e4 LT |
1134 | movq %rsp,%rdi # &pt_regs |
1135 | call do_notify_resume | |
1136 | RESTORE_REST | |
72fe4858 | 1137 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 1138 | TRACE_IRQS_OFF |
be9e6870 | 1139 | GET_THREAD_INFO(%rcx) |
eca91e78 | 1140 | jmp retint_with_reschedule |
1da177e4 LT |
1141 | |
1142 | #ifdef CONFIG_PREEMPT | |
1143 | /* Returning to kernel space. Check if we need preemption */ | |
1144 | /* rcx: threadinfo. interrupts off. */ | |
b06babac | 1145 | ENTRY(retint_kernel) |
26ccb8a7 | 1146 | cmpl $0,TI_preempt_count(%rcx) |
1da177e4 | 1147 | jnz retint_restore_args |
26ccb8a7 | 1148 | bt $TIF_NEED_RESCHED,TI_flags(%rcx) |
1da177e4 LT |
1149 | jnc retint_restore_args |
1150 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | |
1151 | jnc retint_restore_args | |
1152 | call preempt_schedule_irq | |
1153 | jmp exit_intr | |
0bd7b798 | 1154 | #endif |
1da177e4 | 1155 | CFI_ENDPROC |
4b787e0b | 1156 | END(common_interrupt) |
a7b854c9 | 1157 | |
8222d718 MH |
1158 | /* |
1159 | * End of kprobes section | |
1160 | */ | |
1161 | .popsection | |
0bd7b798 | 1162 | |
1da177e4 LT |
1163 | /* |
1164 | * APIC interrupts. | |
0bd7b798 | 1165 | */ |
322648d1 AH |
1166 | .macro apicinterrupt num sym do_sym |
1167 | ENTRY(\sym) | |
7effaa88 | 1168 | INTR_FRAME |
ee4eb87b | 1169 | ASM_CLAC |
df5d1874 | 1170 | pushq_cfi $~(\num) |
39e95433 | 1171 | .Lcommon_\sym: |
322648d1 | 1172 | interrupt \do_sym |
1da177e4 LT |
1173 | jmp ret_from_intr |
1174 | CFI_ENDPROC | |
322648d1 AH |
1175 | END(\sym) |
1176 | .endm | |
1da177e4 | 1177 | |
322648d1 AH |
1178 | #ifdef CONFIG_SMP |
1179 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ | |
1180 | irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt | |
4ef702c1 AK |
1181 | apicinterrupt REBOOT_VECTOR \ |
1182 | reboot_interrupt smp_reboot_interrupt | |
322648d1 | 1183 | #endif |
1da177e4 | 1184 | |
03b48632 | 1185 | #ifdef CONFIG_X86_UV |
5ae3a139 | 1186 | apicinterrupt UV_BAU_MESSAGE \ |
322648d1 | 1187 | uv_bau_message_intr1 uv_bau_message_interrupt |
03b48632 | 1188 | #endif |
322648d1 AH |
1189 | apicinterrupt LOCAL_TIMER_VECTOR \ |
1190 | apic_timer_interrupt smp_apic_timer_interrupt | |
4a4de9c7 DS |
1191 | apicinterrupt X86_PLATFORM_IPI_VECTOR \ |
1192 | x86_platform_ipi smp_x86_platform_ipi | |
89b831ef | 1193 | |
d78f2664 YZ |
1194 | #ifdef CONFIG_HAVE_KVM |
1195 | apicinterrupt POSTED_INTR_VECTOR \ | |
1196 | kvm_posted_intr_ipi smp_kvm_posted_intr_ipi | |
1197 | #endif | |
1198 | ||
322648d1 | 1199 | apicinterrupt THRESHOLD_APIC_VECTOR \ |
7856f6cc | 1200 | threshold_interrupt smp_threshold_interrupt |
322648d1 AH |
1201 | apicinterrupt THERMAL_APIC_VECTOR \ |
1202 | thermal_interrupt smp_thermal_interrupt | |
1812924b | 1203 | |
322648d1 AH |
1204 | #ifdef CONFIG_SMP |
1205 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | |
1206 | call_function_single_interrupt smp_call_function_single_interrupt | |
1207 | apicinterrupt CALL_FUNCTION_VECTOR \ | |
1208 | call_function_interrupt smp_call_function_interrupt | |
1209 | apicinterrupt RESCHEDULE_VECTOR \ | |
1210 | reschedule_interrupt smp_reschedule_interrupt | |
1211 | #endif | |
1da177e4 | 1212 | |
322648d1 AH |
1213 | apicinterrupt ERROR_APIC_VECTOR \ |
1214 | error_interrupt smp_error_interrupt | |
1215 | apicinterrupt SPURIOUS_APIC_VECTOR \ | |
1216 | spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 1217 | |
e360adbe PZ |
1218 | #ifdef CONFIG_IRQ_WORK |
1219 | apicinterrupt IRQ_WORK_VECTOR \ | |
1220 | irq_work_interrupt smp_irq_work_interrupt | |
241771ef IM |
1221 | #endif |
1222 | ||
1da177e4 LT |
1223 | /* |
1224 | * Exception entry points. | |
0bd7b798 | 1225 | */ |
322648d1 AH |
1226 | .macro zeroentry sym do_sym |
1227 | ENTRY(\sym) | |
7effaa88 | 1228 | INTR_FRAME |
ee4eb87b | 1229 | ASM_CLAC |
fab58420 | 1230 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
14ae22ba | 1231 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
b1cccb1b JB |
1232 | subq $ORIG_RAX-R15, %rsp |
1233 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
d99015b1 | 1234 | call error_entry |
dcd072e2 | 1235 | DEFAULT_FRAME 0 |
d99015b1 AH |
1236 | movq %rsp,%rdi /* pt_regs pointer */ |
1237 | xorl %esi,%esi /* no error code */ | |
322648d1 | 1238 | call \do_sym |
d99015b1 | 1239 | jmp error_exit /* %ebx: no swapgs flag */ |
7effaa88 | 1240 | CFI_ENDPROC |
322648d1 AH |
1241 | END(\sym) |
1242 | .endm | |
1da177e4 | 1243 | |
322648d1 | 1244 | .macro paranoidzeroentry sym do_sym |
ddeb8f21 | 1245 | ENTRY(\sym) |
b8b1d08b | 1246 | INTR_FRAME |
ee4eb87b | 1247 | ASM_CLAC |
b8b1d08b | 1248 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1b JB |
1249 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1250 | subq $ORIG_RAX-R15, %rsp | |
1251 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
b8b1d08b AH |
1252 | call save_paranoid |
1253 | TRACE_IRQS_OFF | |
1254 | movq %rsp,%rdi /* pt_regs pointer */ | |
1255 | xorl %esi,%esi /* no error code */ | |
322648d1 | 1256 | call \do_sym |
b8b1d08b AH |
1257 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1258 | CFI_ENDPROC | |
ddeb8f21 | 1259 | END(\sym) |
322648d1 | 1260 | .endm |
b8b1d08b | 1261 | |
c15a5958 | 1262 | #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) |
322648d1 | 1263 | .macro paranoidzeroentry_ist sym do_sym ist |
ddeb8f21 | 1264 | ENTRY(\sym) |
9f1e87ea | 1265 | INTR_FRAME |
ee4eb87b | 1266 | ASM_CLAC |
b8b1d08b | 1267 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1b JB |
1268 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
1269 | subq $ORIG_RAX-R15, %rsp | |
1270 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
b8b1d08b | 1271 | call save_paranoid |
5963e317 | 1272 | TRACE_IRQS_OFF_DEBUG |
b8b1d08b AH |
1273 | movq %rsp,%rdi /* pt_regs pointer */ |
1274 | xorl %esi,%esi /* no error code */ | |
c15a5958 | 1275 | subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) |
322648d1 | 1276 | call \do_sym |
c15a5958 | 1277 | addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) |
b8b1d08b AH |
1278 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1279 | CFI_ENDPROC | |
ddeb8f21 | 1280 | END(\sym) |
322648d1 | 1281 | .endm |
b8b1d08b | 1282 | |
ddeb8f21 | 1283 | .macro errorentry sym do_sym |
322648d1 | 1284 | ENTRY(\sym) |
7effaa88 | 1285 | XCPT_FRAME |
ee4eb87b | 1286 | ASM_CLAC |
fab58420 | 1287 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1b JB |
1288 | subq $ORIG_RAX-R15, %rsp |
1289 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
d99015b1 | 1290 | call error_entry |
dcd072e2 | 1291 | DEFAULT_FRAME 0 |
d99015b1 AH |
1292 | movq %rsp,%rdi /* pt_regs pointer */ |
1293 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1294 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
322648d1 | 1295 | call \do_sym |
d99015b1 | 1296 | jmp error_exit /* %ebx: no swapgs flag */ |
7effaa88 | 1297 | CFI_ENDPROC |
322648d1 | 1298 | END(\sym) |
322648d1 | 1299 | .endm |
1da177e4 LT |
1300 | |
1301 | /* error code is on the stack already */ | |
ddeb8f21 | 1302 | .macro paranoiderrorentry sym do_sym |
322648d1 | 1303 | ENTRY(\sym) |
b8b1d08b | 1304 | XCPT_FRAME |
ee4eb87b | 1305 | ASM_CLAC |
b8b1d08b | 1306 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1b JB |
1307 | subq $ORIG_RAX-R15, %rsp |
1308 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
e2f6bc25 AH |
1309 | call save_paranoid |
1310 | DEFAULT_FRAME 0 | |
7e61a793 | 1311 | TRACE_IRQS_OFF |
b8b1d08b AH |
1312 | movq %rsp,%rdi /* pt_regs pointer */ |
1313 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
1314 | movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ | |
322648d1 | 1315 | call \do_sym |
b8b1d08b AH |
1316 | jmp paranoid_exit /* %ebx: no swapgs flag */ |
1317 | CFI_ENDPROC | |
322648d1 | 1318 | END(\sym) |
322648d1 AH |
1319 | .endm |
1320 | ||
1321 | zeroentry divide_error do_divide_error | |
322648d1 AH |
1322 | zeroentry overflow do_overflow |
1323 | zeroentry bounds do_bounds | |
1324 | zeroentry invalid_op do_invalid_op | |
1325 | zeroentry device_not_available do_device_not_available | |
ea21cb1f | 1326 | paranoiderrorentry double_fault do_double_fault |
322648d1 AH |
1327 | zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun |
1328 | errorentry invalid_TSS do_invalid_TSS | |
1329 | errorentry segment_not_present do_segment_not_present | |
322648d1 AH |
1330 | zeroentry spurious_interrupt_bug do_spurious_interrupt_bug |
1331 | zeroentry coprocessor_error do_coprocessor_error | |
1332 | errorentry alignment_check do_alignment_check | |
322648d1 | 1333 | zeroentry simd_coprocessor_error do_simd_coprocessor_error |
5cec93c2 | 1334 | |
2601e64d | 1335 | |
9f1e87ea CG |
1336 | /* Reload gs selector with exception handling */ |
1337 | /* edi: new selector */ | |
9f9d489a | 1338 | ENTRY(native_load_gs_index) |
7effaa88 | 1339 | CFI_STARTPROC |
df5d1874 | 1340 | pushfq_cfi |
b8aa287f | 1341 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
9f1e87ea | 1342 | SWAPGS |
0bd7b798 | 1343 | gs_change: |
9f1e87ea | 1344 | movl %edi,%gs |
1da177e4 | 1345 | 2: mfence /* workaround */ |
72fe4858 | 1346 | SWAPGS |
df5d1874 | 1347 | popfq_cfi |
9f1e87ea | 1348 | ret |
7effaa88 | 1349 | CFI_ENDPROC |
6efdcfaf | 1350 | END(native_load_gs_index) |
0bd7b798 | 1351 | |
d7abc0fa | 1352 | _ASM_EXTABLE(gs_change,bad_gs) |
9f1e87ea | 1353 | .section .fixup,"ax" |
1da177e4 | 1354 | /* running with kernelgs */ |
0bd7b798 | 1355 | bad_gs: |
72fe4858 | 1356 | SWAPGS /* switch back to user gs */ |
1da177e4 | 1357 | xorl %eax,%eax |
9f1e87ea CG |
1358 | movl %eax,%gs |
1359 | jmp 2b | |
1360 | .previous | |
0bd7b798 | 1361 | |
2699500b | 1362 | /* Call softirq on interrupt stack. Interrupts are off. */ |
ed6b676c | 1363 | ENTRY(call_softirq) |
7effaa88 | 1364 | CFI_STARTPROC |
df5d1874 | 1365 | pushq_cfi %rbp |
2699500b AK |
1366 | CFI_REL_OFFSET rbp,0 |
1367 | mov %rsp,%rbp | |
1368 | CFI_DEF_CFA_REGISTER rbp | |
56895530 | 1369 | incl PER_CPU_VAR(irq_count) |
26f80bd6 | 1370 | cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
2699500b | 1371 | push %rbp # backlink for old unwinder |
ed6b676c | 1372 | call __do_softirq |
2699500b | 1373 | leaveq |
df5d1874 | 1374 | CFI_RESTORE rbp |
7effaa88 | 1375 | CFI_DEF_CFA_REGISTER rsp |
2699500b | 1376 | CFI_ADJUST_CFA_OFFSET -8 |
56895530 | 1377 | decl PER_CPU_VAR(irq_count) |
ed6b676c | 1378 | ret |
7effaa88 | 1379 | CFI_ENDPROC |
6efdcfaf | 1380 | END(call_softirq) |
75154f40 | 1381 | |
3d75e1b8 | 1382 | #ifdef CONFIG_XEN |
322648d1 | 1383 | zeroentry xen_hypervisor_callback xen_do_hypervisor_callback |
3d75e1b8 JF |
1384 | |
1385 | /* | |
9f1e87ea CG |
1386 | * A note on the "critical region" in our callback handler. |
1387 | * We want to avoid stacking callback handlers due to events occurring | |
1388 | * during handling of the last event. To do this, we keep events disabled | |
1389 | * until we've done all processing. HOWEVER, we must enable events before | |
1390 | * popping the stack frame (can't be done atomically) and so it would still | |
1391 | * be possible to get enough handler activations to overflow the stack. | |
1392 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1393 | * like to avoid the possibility. | |
1394 | * So, on entry to the handler we detect whether we interrupted an | |
1395 | * existing activation in its critical region -- if so, we pop the current | |
1396 | * activation and restart the handler using the previous one. | |
1397 | */ | |
3d75e1b8 JF |
1398 | ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) |
1399 | CFI_STARTPROC | |
9f1e87ea CG |
1400 | /* |
1401 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1402 | * see the correct pointer to the pt_regs | |
1403 | */ | |
3d75e1b8 JF |
1404 | movq %rdi, %rsp # we don't return, adjust the stack frame |
1405 | CFI_ENDPROC | |
dcd072e2 | 1406 | DEFAULT_FRAME |
56895530 | 1407 | 11: incl PER_CPU_VAR(irq_count) |
3d75e1b8 JF |
1408 | movq %rsp,%rbp |
1409 | CFI_DEF_CFA_REGISTER rbp | |
26f80bd6 | 1410 | cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
3d75e1b8 JF |
1411 | pushq %rbp # backlink for old unwinder |
1412 | call xen_evtchn_do_upcall | |
1413 | popq %rsp | |
1414 | CFI_DEF_CFA_REGISTER rsp | |
56895530 | 1415 | decl PER_CPU_VAR(irq_count) |
3d75e1b8 JF |
1416 | jmp error_exit |
1417 | CFI_ENDPROC | |
371c394a | 1418 | END(xen_do_hypervisor_callback) |
3d75e1b8 JF |
1419 | |
1420 | /* | |
9f1e87ea CG |
1421 | * Hypervisor uses this for application faults while it executes. |
1422 | * We get here for two reasons: | |
1423 | * 1. Fault while reloading DS, ES, FS or GS | |
1424 | * 2. Fault while executing IRET | |
1425 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1426 | * registers that could be reloaded and zeroed the others. | |
1427 | * Category 2 we fix up by killing the current process. We cannot use the | |
1428 | * normal Linux return path in this case because if we use the IRET hypercall | |
1429 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1430 | * We distinguish between categories by comparing each saved segment register | |
1431 | * with its current contents: any discrepancy means we in category 1. | |
1432 | */ | |
3d75e1b8 | 1433 | ENTRY(xen_failsafe_callback) |
dcd072e2 AH |
1434 | INTR_FRAME 1 (6*8) |
1435 | /*CFI_REL_OFFSET gs,GS*/ | |
1436 | /*CFI_REL_OFFSET fs,FS*/ | |
1437 | /*CFI_REL_OFFSET es,ES*/ | |
1438 | /*CFI_REL_OFFSET ds,DS*/ | |
1439 | CFI_REL_OFFSET r11,8 | |
1440 | CFI_REL_OFFSET rcx,0 | |
3d75e1b8 JF |
1441 | movw %ds,%cx |
1442 | cmpw %cx,0x10(%rsp) | |
1443 | CFI_REMEMBER_STATE | |
1444 | jne 1f | |
1445 | movw %es,%cx | |
1446 | cmpw %cx,0x18(%rsp) | |
1447 | jne 1f | |
1448 | movw %fs,%cx | |
1449 | cmpw %cx,0x20(%rsp) | |
1450 | jne 1f | |
1451 | movw %gs,%cx | |
1452 | cmpw %cx,0x28(%rsp) | |
1453 | jne 1f | |
1454 | /* All segments match their saved values => Category 2 (Bad IRET). */ | |
1455 | movq (%rsp),%rcx | |
1456 | CFI_RESTORE rcx | |
1457 | movq 8(%rsp),%r11 | |
1458 | CFI_RESTORE r11 | |
1459 | addq $0x30,%rsp | |
1460 | CFI_ADJUST_CFA_OFFSET -0x30 | |
14ae22ba IM |
1461 | pushq_cfi $0 /* RIP */ |
1462 | pushq_cfi %r11 | |
1463 | pushq_cfi %rcx | |
4a5c3e77 | 1464 | jmp general_protection |
3d75e1b8 JF |
1465 | CFI_RESTORE_STATE |
1466 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ | |
1467 | movq (%rsp),%rcx | |
1468 | CFI_RESTORE rcx | |
1469 | movq 8(%rsp),%r11 | |
1470 | CFI_RESTORE r11 | |
1471 | addq $0x30,%rsp | |
1472 | CFI_ADJUST_CFA_OFFSET -0x30 | |
a349e23d | 1473 | pushq_cfi $-1 /* orig_ax = -1 => not a system call */ |
3d75e1b8 JF |
1474 | SAVE_ALL |
1475 | jmp error_exit | |
1476 | CFI_ENDPROC | |
3d75e1b8 JF |
1477 | END(xen_failsafe_callback) |
1478 | ||
bc2b0331 | 1479 | apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ |
38e20b07 SY |
1480 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1481 | ||
3d75e1b8 | 1482 | #endif /* CONFIG_XEN */ |
ddeb8f21 | 1483 | |
bc2b0331 S |
1484 | #if IS_ENABLED(CONFIG_HYPERV) |
1485 | apicinterrupt HYPERVISOR_CALLBACK_VECTOR \ | |
1486 | hyperv_callback_vector hyperv_vector_handler | |
1487 | #endif /* CONFIG_HYPERV */ | |
1488 | ||
ddeb8f21 AH |
1489 | /* |
1490 | * Some functions should be protected against kprobes | |
1491 | */ | |
1492 | .pushsection .kprobes.text, "ax" | |
1493 | ||
1494 | paranoidzeroentry_ist debug do_debug DEBUG_STACK | |
1495 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK | |
fd5683d0 | 1496 | errorentry stack_segment do_stack_segment |
6cac5a92 JF |
1497 | #ifdef CONFIG_XEN |
1498 | zeroentry xen_debug do_debug | |
1499 | zeroentry xen_int3 do_int3 | |
1500 | errorentry xen_stack_segment do_stack_segment | |
1501 | #endif | |
ddeb8f21 AH |
1502 | errorentry general_protection do_general_protection |
1503 | errorentry page_fault do_page_fault | |
631bc487 GN |
1504 | #ifdef CONFIG_KVM_GUEST |
1505 | errorentry async_page_fault do_async_page_fault | |
1506 | #endif | |
ddeb8f21 | 1507 | #ifdef CONFIG_X86_MCE |
5d727926 | 1508 | paranoidzeroentry machine_check *machine_check_vector(%rip) |
ddeb8f21 AH |
1509 | #endif |
1510 | ||
1511 | /* | |
9f1e87ea CG |
1512 | * "Paranoid" exit path from exception stack. |
1513 | * Paranoid because this is used by NMIs and cannot take | |
ddeb8f21 AH |
1514 | * any kernel state for granted. |
1515 | * We don't do kernel preemption checks here, because only | |
1516 | * NMI should be common and it does not enable IRQs and | |
1517 | * cannot get reschedule ticks. | |
1518 | * | |
1519 | * "trace" is 0 for the NMI handler only, because irq-tracing | |
1520 | * is fundamentally NMI-unsafe. (we cannot change the soft and | |
1521 | * hard flags at once, atomically) | |
1522 | */ | |
1523 | ||
1524 | /* ebx: no swapgs flag */ | |
1525 | ENTRY(paranoid_exit) | |
1f130a78 | 1526 | DEFAULT_FRAME |
ddeb8f21 | 1527 | DISABLE_INTERRUPTS(CLBR_NONE) |
5963e317 | 1528 | TRACE_IRQS_OFF_DEBUG |
ddeb8f21 AH |
1529 | testl %ebx,%ebx /* swapgs needed? */ |
1530 | jnz paranoid_restore | |
1531 | testl $3,CS(%rsp) | |
1532 | jnz paranoid_userspace | |
1533 | paranoid_swapgs: | |
1534 | TRACE_IRQS_IRETQ 0 | |
1535 | SWAPGS_UNSAFE_STACK | |
0300e7f1 SR |
1536 | RESTORE_ALL 8 |
1537 | jmp irq_return | |
ddeb8f21 | 1538 | paranoid_restore: |
5963e317 | 1539 | TRACE_IRQS_IRETQ_DEBUG 0 |
ddeb8f21 AH |
1540 | RESTORE_ALL 8 |
1541 | jmp irq_return | |
1542 | paranoid_userspace: | |
1543 | GET_THREAD_INFO(%rcx) | |
1544 | movl TI_flags(%rcx),%ebx | |
1545 | andl $_TIF_WORK_MASK,%ebx | |
1546 | jz paranoid_swapgs | |
1547 | movq %rsp,%rdi /* &pt_regs */ | |
1548 | call sync_regs | |
1549 | movq %rax,%rsp /* switch stack for scheduling */ | |
1550 | testl $_TIF_NEED_RESCHED,%ebx | |
1551 | jnz paranoid_schedule | |
1552 | movl %ebx,%edx /* arg3: thread flags */ | |
1553 | TRACE_IRQS_ON | |
1554 | ENABLE_INTERRUPTS(CLBR_NONE) | |
1555 | xorl %esi,%esi /* arg2: oldset */ | |
1556 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
1557 | call do_notify_resume | |
1558 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1559 | TRACE_IRQS_OFF | |
1560 | jmp paranoid_userspace | |
1561 | paranoid_schedule: | |
1562 | TRACE_IRQS_ON | |
1563 | ENABLE_INTERRUPTS(CLBR_ANY) | |
0430499c | 1564 | SCHEDULE_USER |
ddeb8f21 AH |
1565 | DISABLE_INTERRUPTS(CLBR_ANY) |
1566 | TRACE_IRQS_OFF | |
1567 | jmp paranoid_userspace | |
1568 | CFI_ENDPROC | |
1569 | END(paranoid_exit) | |
1570 | ||
1571 | /* | |
1572 | * Exception entry point. This expects an error code/orig_rax on the stack. | |
1573 | * returns in "no swapgs flag" in %ebx. | |
1574 | */ | |
1575 | ENTRY(error_entry) | |
1576 | XCPT_FRAME | |
1577 | CFI_ADJUST_CFA_OFFSET 15*8 | |
1578 | /* oldrax contains error code */ | |
1579 | cld | |
1580 | movq_cfi rdi, RDI+8 | |
1581 | movq_cfi rsi, RSI+8 | |
1582 | movq_cfi rdx, RDX+8 | |
1583 | movq_cfi rcx, RCX+8 | |
1584 | movq_cfi rax, RAX+8 | |
1585 | movq_cfi r8, R8+8 | |
1586 | movq_cfi r9, R9+8 | |
1587 | movq_cfi r10, R10+8 | |
1588 | movq_cfi r11, R11+8 | |
1589 | movq_cfi rbx, RBX+8 | |
1590 | movq_cfi rbp, RBP+8 | |
1591 | movq_cfi r12, R12+8 | |
1592 | movq_cfi r13, R13+8 | |
1593 | movq_cfi r14, R14+8 | |
1594 | movq_cfi r15, R15+8 | |
1595 | xorl %ebx,%ebx | |
1596 | testl $3,CS+8(%rsp) | |
1597 | je error_kernelspace | |
1598 | error_swapgs: | |
1599 | SWAPGS | |
1600 | error_sti: | |
1601 | TRACE_IRQS_OFF | |
1602 | ret | |
ddeb8f21 AH |
1603 | |
1604 | /* | |
1605 | * There are two places in the kernel that can potentially fault with | |
69c5d32d AL |
1606 | * usergs. Handle them here. B stepping K8s sometimes report a |
1607 | * truncated RIP for IRET exceptions returning to compat mode. Check | |
1608 | * for these here too. | |
ddeb8f21 AH |
1609 | */ |
1610 | error_kernelspace: | |
1611 | incl %ebx | |
e8248801 | 1612 | leaq native_irq_return_iret(%rip),%rcx |
ddeb8f21 | 1613 | cmpq %rcx,RIP+8(%rsp) |
69c5d32d | 1614 | je error_bad_iret |
ae24ffe5 BG |
1615 | movl %ecx,%eax /* zero extend */ |
1616 | cmpq %rax,RIP+8(%rsp) | |
1617 | je bstep_iret | |
ddeb8f21 | 1618 | cmpq $gs_change,RIP+8(%rsp) |
9f1e87ea | 1619 | je error_swapgs |
ddeb8f21 | 1620 | jmp error_sti |
ae24ffe5 BG |
1621 | |
1622 | bstep_iret: | |
1623 | /* Fix truncated RIP */ | |
1624 | movq %rcx,RIP+8(%rsp) | |
69c5d32d AL |
1625 | /* fall through */ |
1626 | ||
1627 | error_bad_iret: | |
1628 | SWAPGS | |
1629 | mov %rsp,%rdi | |
1630 | call fixup_bad_iret | |
1631 | mov %rax,%rsp | |
1632 | decl %ebx /* Return to usergs */ | |
1633 | jmp error_sti | |
e6b04b6b | 1634 | CFI_ENDPROC |
ddeb8f21 AH |
1635 | END(error_entry) |
1636 | ||
1637 | ||
1638 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ | |
1639 | ENTRY(error_exit) | |
1640 | DEFAULT_FRAME | |
1641 | movl %ebx,%eax | |
1642 | RESTORE_REST | |
1643 | DISABLE_INTERRUPTS(CLBR_NONE) | |
1644 | TRACE_IRQS_OFF | |
1645 | GET_THREAD_INFO(%rcx) | |
1646 | testl %eax,%eax | |
1647 | jne retint_kernel | |
1648 | LOCKDEP_SYS_EXIT_IRQ | |
1649 | movl TI_flags(%rcx),%edx | |
1650 | movl $_TIF_WORK_MASK,%edi | |
1651 | andl %edi,%edx | |
1652 | jnz retint_careful | |
1653 | jmp retint_swapgs | |
1654 | CFI_ENDPROC | |
1655 | END(error_exit) | |
1656 | ||
3f3c8b8c SR |
1657 | /* |
1658 | * Test if a given stack is an NMI stack or not. | |
1659 | */ | |
1660 | .macro test_in_nmi reg stack nmi_ret normal_ret | |
1661 | cmpq %\reg, \stack | |
1662 | ja \normal_ret | |
1663 | subq $EXCEPTION_STKSZ, %\reg | |
1664 | cmpq %\reg, \stack | |
1665 | jb \normal_ret | |
1666 | jmp \nmi_ret | |
1667 | .endm | |
ddeb8f21 AH |
1668 | |
1669 | /* runs on exception stack */ | |
1670 | ENTRY(nmi) | |
1671 | INTR_FRAME | |
1672 | PARAVIRT_ADJUST_EXCEPTION_FRAME | |
3f3c8b8c SR |
1673 | /* |
1674 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1675 | * the iretq it performs will take us out of NMI context. | |
1676 | * This means that we can have nested NMIs where the next | |
1677 | * NMI is using the top of the stack of the previous NMI. We | |
1678 | * can't let it execute because the nested NMI will corrupt the | |
1679 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1680 | * anyway. | |
1681 | * | |
1682 | * To handle this case we do the following: | |
1683 | * Check the a special location on the stack that contains | |
1684 | * a variable that is set when NMIs are executing. | |
1685 | * The interrupted task's stack is also checked to see if it | |
1686 | * is an NMI stack. | |
1687 | * If the variable is not set and the stack is not the NMI | |
1688 | * stack then: | |
1689 | * o Set the special variable on the stack | |
1690 | * o Copy the interrupt frame into a "saved" location on the stack | |
1691 | * o Copy the interrupt frame into a "copy" location on the stack | |
1692 | * o Continue processing the NMI | |
1693 | * If the variable is set or the previous stack is the NMI stack: | |
1694 | * o Modify the "copy" location to jump to the repeate_nmi | |
1695 | * o return back to the first NMI | |
1696 | * | |
1697 | * Now on exit of the first NMI, we first clear the stack variable | |
1698 | * The NMI stack will tell any nested NMIs at that point that it is | |
1699 | * nested. Then we pop the stack normally with iret, and if there was | |
1700 | * a nested NMI that updated the copy interrupt stack frame, a | |
1701 | * jump will be made to the repeat_nmi code that will handle the second | |
1702 | * NMI. | |
1703 | */ | |
1704 | ||
1705 | /* Use %rdx as out temp variable throughout */ | |
1706 | pushq_cfi %rdx | |
62610913 | 1707 | CFI_REL_OFFSET rdx, 0 |
3f3c8b8c | 1708 | |
45d5a168 SR |
1709 | /* |
1710 | * If %cs was not the kernel segment, then the NMI triggered in user | |
1711 | * space, which means it is definitely not nested. | |
1712 | */ | |
a38449ef | 1713 | cmpl $__KERNEL_CS, 16(%rsp) |
45d5a168 SR |
1714 | jne first_nmi |
1715 | ||
3f3c8b8c SR |
1716 | /* |
1717 | * Check the special variable on the stack to see if NMIs are | |
1718 | * executing. | |
1719 | */ | |
a38449ef | 1720 | cmpl $1, -8(%rsp) |
3f3c8b8c SR |
1721 | je nested_nmi |
1722 | ||
1723 | /* | |
1724 | * Now test if the previous stack was an NMI stack. | |
1725 | * We need the double check. We check the NMI stack to satisfy the | |
1726 | * race when the first NMI clears the variable before returning. | |
1727 | * We check the variable because the first NMI could be in a | |
1728 | * breakpoint routine using a breakpoint stack. | |
1729 | */ | |
1730 | lea 6*8(%rsp), %rdx | |
1731 | test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi | |
62610913 | 1732 | CFI_REMEMBER_STATE |
3f3c8b8c SR |
1733 | |
1734 | nested_nmi: | |
1735 | /* | |
1736 | * Do nothing if we interrupted the fixup in repeat_nmi. | |
1737 | * It's about to repeat the NMI handler, so we are fine | |
1738 | * with ignoring this one. | |
1739 | */ | |
1740 | movq $repeat_nmi, %rdx | |
1741 | cmpq 8(%rsp), %rdx | |
1742 | ja 1f | |
1743 | movq $end_repeat_nmi, %rdx | |
1744 | cmpq 8(%rsp), %rdx | |
1745 | ja nested_nmi_out | |
1746 | ||
1747 | 1: | |
1748 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ | |
28696f43 | 1749 | leaq -1*8(%rsp), %rdx |
3f3c8b8c | 1750 | movq %rdx, %rsp |
28696f43 SQ |
1751 | CFI_ADJUST_CFA_OFFSET 1*8 |
1752 | leaq -10*8(%rsp), %rdx | |
3f3c8b8c SR |
1753 | pushq_cfi $__KERNEL_DS |
1754 | pushq_cfi %rdx | |
1755 | pushfq_cfi | |
1756 | pushq_cfi $__KERNEL_CS | |
1757 | pushq_cfi $repeat_nmi | |
1758 | ||
1759 | /* Put stack back */ | |
28696f43 SQ |
1760 | addq $(6*8), %rsp |
1761 | CFI_ADJUST_CFA_OFFSET -6*8 | |
3f3c8b8c SR |
1762 | |
1763 | nested_nmi_out: | |
1764 | popq_cfi %rdx | |
62610913 | 1765 | CFI_RESTORE rdx |
3f3c8b8c SR |
1766 | |
1767 | /* No need to check faults here */ | |
1768 | INTERRUPT_RETURN | |
1769 | ||
62610913 | 1770 | CFI_RESTORE_STATE |
3f3c8b8c SR |
1771 | first_nmi: |
1772 | /* | |
1773 | * Because nested NMIs will use the pushed location that we | |
1774 | * stored in rdx, we must keep that space available. | |
1775 | * Here's what our stack frame will look like: | |
1776 | * +-------------------------+ | |
1777 | * | original SS | | |
1778 | * | original Return RSP | | |
1779 | * | original RFLAGS | | |
1780 | * | original CS | | |
1781 | * | original RIP | | |
1782 | * +-------------------------+ | |
1783 | * | temp storage for rdx | | |
1784 | * +-------------------------+ | |
1785 | * | NMI executing variable | | |
1786 | * +-------------------------+ | |
3f3c8b8c SR |
1787 | * | copied SS | |
1788 | * | copied Return RSP | | |
1789 | * | copied RFLAGS | | |
1790 | * | copied CS | | |
1791 | * | copied RIP | | |
1792 | * +-------------------------+ | |
28696f43 SQ |
1793 | * | Saved SS | |
1794 | * | Saved Return RSP | | |
1795 | * | Saved RFLAGS | | |
1796 | * | Saved CS | | |
1797 | * | Saved RIP | | |
1798 | * +-------------------------+ | |
3f3c8b8c SR |
1799 | * | pt_regs | |
1800 | * +-------------------------+ | |
1801 | * | |
79fb4ad6 SR |
1802 | * The saved stack frame is used to fix up the copied stack frame |
1803 | * that a nested NMI may change to make the interrupted NMI iret jump | |
1804 | * to the repeat_nmi. The original stack frame and the temp storage | |
3f3c8b8c SR |
1805 | * is also used by nested NMIs and can not be trusted on exit. |
1806 | */ | |
79fb4ad6 | 1807 | /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ |
62610913 JB |
1808 | movq (%rsp), %rdx |
1809 | CFI_RESTORE rdx | |
1810 | ||
3f3c8b8c SR |
1811 | /* Set the NMI executing variable on the stack. */ |
1812 | pushq_cfi $1 | |
1813 | ||
28696f43 SQ |
1814 | /* |
1815 | * Leave room for the "copied" frame | |
1816 | */ | |
1817 | subq $(5*8), %rsp | |
444723dc | 1818 | CFI_ADJUST_CFA_OFFSET 5*8 |
28696f43 | 1819 | |
3f3c8b8c SR |
1820 | /* Copy the stack frame to the Saved frame */ |
1821 | .rept 5 | |
28696f43 | 1822 | pushq_cfi 11*8(%rsp) |
3f3c8b8c | 1823 | .endr |
62610913 JB |
1824 | CFI_DEF_CFA_OFFSET SS+8-RIP |
1825 | ||
79fb4ad6 SR |
1826 | /* Everything up to here is safe from nested NMIs */ |
1827 | ||
62610913 JB |
1828 | /* |
1829 | * If there was a nested NMI, the first NMI's iret will return | |
1830 | * here. But NMIs are still enabled and we can take another | |
1831 | * nested NMI. The nested NMI checks the interrupted RIP to see | |
1832 | * if it is between repeat_nmi and end_repeat_nmi, and if so | |
1833 | * it will just return, as we are about to repeat an NMI anyway. | |
1834 | * This makes it safe to copy to the stack frame that a nested | |
1835 | * NMI will update. | |
1836 | */ | |
1837 | repeat_nmi: | |
1838 | /* | |
1839 | * Update the stack variable to say we are still in NMI (the update | |
1840 | * is benign for the non-repeat case, where 1 was pushed just above | |
1841 | * to this very stack slot). | |
1842 | */ | |
28696f43 | 1843 | movq $1, 10*8(%rsp) |
3f3c8b8c SR |
1844 | |
1845 | /* Make another copy, this one may be modified by nested NMIs */ | |
28696f43 SQ |
1846 | addq $(10*8), %rsp |
1847 | CFI_ADJUST_CFA_OFFSET -10*8 | |
3f3c8b8c | 1848 | .rept 5 |
28696f43 | 1849 | pushq_cfi -6*8(%rsp) |
3f3c8b8c | 1850 | .endr |
28696f43 | 1851 | subq $(5*8), %rsp |
62610913 JB |
1852 | CFI_DEF_CFA_OFFSET SS+8-RIP |
1853 | end_repeat_nmi: | |
3f3c8b8c SR |
1854 | |
1855 | /* | |
1856 | * Everything below this point can be preempted by a nested | |
79fb4ad6 SR |
1857 | * NMI if the first NMI took an exception and reset our iret stack |
1858 | * so that we repeat another NMI. | |
3f3c8b8c | 1859 | */ |
1fd466ef | 1860 | pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
b1cccb1b JB |
1861 | subq $ORIG_RAX-R15, %rsp |
1862 | CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 | |
1fd466ef SR |
1863 | /* |
1864 | * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit | |
1865 | * as we should not be calling schedule in NMI context. | |
1866 | * Even with normal interrupts enabled. An NMI should not be | |
1867 | * setting NEED_RESCHED or anything that normal interrupts and | |
1868 | * exceptions might do. | |
1869 | */ | |
ddeb8f21 AH |
1870 | call save_paranoid |
1871 | DEFAULT_FRAME 0 | |
7fbb98c5 SR |
1872 | |
1873 | /* | |
1874 | * Save off the CR2 register. If we take a page fault in the NMI then | |
1875 | * it could corrupt the CR2 value. If the NMI preempts a page fault | |
1876 | * handler before it was able to read the CR2 register, and then the | |
1877 | * NMI itself takes a page fault, the page fault that was preempted | |
1878 | * will read the information from the NMI page fault and not the | |
1879 | * origin fault. Save it off and restore it if it changes. | |
1880 | * Use the r12 callee-saved register. | |
1881 | */ | |
1882 | movq %cr2, %r12 | |
1883 | ||
ddeb8f21 AH |
1884 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
1885 | movq %rsp,%rdi | |
1886 | movq $-1,%rsi | |
1887 | call do_nmi | |
7fbb98c5 SR |
1888 | |
1889 | /* Did the NMI take a page fault? Restore cr2 if it did */ | |
1890 | movq %cr2, %rcx | |
1891 | cmpq %rcx, %r12 | |
1892 | je 1f | |
1893 | movq %r12, %cr2 | |
1894 | 1: | |
1895 | ||
ddeb8f21 AH |
1896 | testl %ebx,%ebx /* swapgs needed? */ |
1897 | jnz nmi_restore | |
ddeb8f21 AH |
1898 | nmi_swapgs: |
1899 | SWAPGS_UNSAFE_STACK | |
1900 | nmi_restore: | |
444723dc JB |
1901 | /* Pop the extra iret frame at once */ |
1902 | RESTORE_ALL 6*8 | |
28696f43 | 1903 | |
3f3c8b8c | 1904 | /* Clear the NMI executing stack variable */ |
28696f43 | 1905 | movq $0, 5*8(%rsp) |
ddeb8f21 | 1906 | jmp irq_return |
9f1e87ea | 1907 | CFI_ENDPROC |
ddeb8f21 AH |
1908 | END(nmi) |
1909 | ||
1910 | ENTRY(ignore_sysret) | |
1911 | CFI_STARTPROC | |
1912 | mov $-ENOSYS,%eax | |
1913 | sysret | |
1914 | CFI_ENDPROC | |
1915 | END(ignore_sysret) | |
1916 | ||
1917 | /* | |
1918 | * End of kprobes section | |
1919 | */ | |
1920 | .popsection |