Merge branch 'fixes' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / kernel / entry.S
1 /*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <linux/init.h>
22 #include <linux/linkage.h>
23
24 #include <asm/assembler.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/errno.h>
27 #include <asm/esr.h>
28 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
30 #include <asm/unistd32.h>
31
32 /*
33 * Bad Abort numbers
34 *-----------------
35 */
36 #define BAD_SYNC 0
37 #define BAD_IRQ 1
38 #define BAD_FIQ 2
39 #define BAD_ERROR 3
40
41 .macro kernel_entry, el, regsize = 64
42 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
43 .if \regsize == 32
44 mov w0, w0 // zero upper 32 bits of x0
45 .endif
46 push x28, x29
47 push x26, x27
48 push x24, x25
49 push x22, x23
50 push x20, x21
51 push x18, x19
52 push x16, x17
53 push x14, x15
54 push x12, x13
55 push x10, x11
56 push x8, x9
57 push x6, x7
58 push x4, x5
59 push x2, x3
60 push x0, x1
61 .if \el == 0
62 mrs x21, sp_el0
63 .else
64 add x21, sp, #S_FRAME_SIZE
65 .endif
66 mrs x22, elr_el1
67 mrs x23, spsr_el1
68 stp lr, x21, [sp, #S_LR]
69 stp x22, x23, [sp, #S_PC]
70
71 /*
72 * Set syscallno to -1 by default (overridden later if real syscall).
73 */
74 .if \el == 0
75 mvn x21, xzr
76 str x21, [sp, #S_SYSCALLNO]
77 .endif
78
79 /*
80 * Registers that may be useful after this macro is invoked:
81 *
82 * x21 - aborted SP
83 * x22 - aborted PC
84 * x23 - aborted PSTATE
85 */
86 .endm
87
88 .macro kernel_exit, el, ret = 0
89 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
90 .if \el == 0
91 ldr x23, [sp, #S_SP] // load return stack pointer
92 .endif
93 .if \ret
94 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
95 add sp, sp, S_X2
96 .else
97 pop x0, x1
98 .endif
99 pop x2, x3 // load the rest of the registers
100 pop x4, x5
101 pop x6, x7
102 pop x8, x9
103 msr elr_el1, x21 // set up the return data
104 msr spsr_el1, x22
105 .if \el == 0
106 msr sp_el0, x23
107 .endif
108 pop x10, x11
109 pop x12, x13
110 pop x14, x15
111 pop x16, x17
112 pop x18, x19
113 pop x20, x21
114 pop x22, x23
115 pop x24, x25
116 pop x26, x27
117 pop x28, x29
118 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
119 eret // return to kernel
120 .endm
121
122 .macro get_thread_info, rd
123 mov \rd, sp
124 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
125 .endm
126
127 /*
128 * These are the registers used in the syscall handler, and allow us to
129 * have in theory up to 7 arguments to a function - x0 to x6.
130 *
131 * x7 is reserved for the system call number in 32-bit mode.
132 */
133 sc_nr .req x25 // number of system calls
134 scno .req x26 // syscall number
135 stbl .req x27 // syscall table pointer
136 tsk .req x28 // current thread_info
137
138 /*
139 * Interrupt handling.
140 */
141 .macro irq_handler
142 ldr x1, handle_arch_irq
143 mov x0, sp
144 blr x1
145 .endm
146
147 .text
148
149 /*
150 * Exception vectors.
151 */
152
153 .align 11
154 ENTRY(vectors)
155 ventry el1_sync_invalid // Synchronous EL1t
156 ventry el1_irq_invalid // IRQ EL1t
157 ventry el1_fiq_invalid // FIQ EL1t
158 ventry el1_error_invalid // Error EL1t
159
160 ventry el1_sync // Synchronous EL1h
161 ventry el1_irq // IRQ EL1h
162 ventry el1_fiq_invalid // FIQ EL1h
163 ventry el1_error_invalid // Error EL1h
164
165 ventry el0_sync // Synchronous 64-bit EL0
166 ventry el0_irq // IRQ 64-bit EL0
167 ventry el0_fiq_invalid // FIQ 64-bit EL0
168 ventry el0_error_invalid // Error 64-bit EL0
169
170 #ifdef CONFIG_COMPAT
171 ventry el0_sync_compat // Synchronous 32-bit EL0
172 ventry el0_irq_compat // IRQ 32-bit EL0
173 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
174 ventry el0_error_invalid_compat // Error 32-bit EL0
175 #else
176 ventry el0_sync_invalid // Synchronous 32-bit EL0
177 ventry el0_irq_invalid // IRQ 32-bit EL0
178 ventry el0_fiq_invalid // FIQ 32-bit EL0
179 ventry el0_error_invalid // Error 32-bit EL0
180 #endif
181 END(vectors)
182
183 /*
184 * Invalid mode handlers
185 */
186 .macro inv_entry, el, reason, regsize = 64
187 kernel_entry el, \regsize
188 mov x0, sp
189 mov x1, #\reason
190 mrs x2, esr_el1
191 b bad_mode
192 .endm
193
194 el0_sync_invalid:
195 inv_entry 0, BAD_SYNC
196 ENDPROC(el0_sync_invalid)
197
198 el0_irq_invalid:
199 inv_entry 0, BAD_IRQ
200 ENDPROC(el0_irq_invalid)
201
202 el0_fiq_invalid:
203 inv_entry 0, BAD_FIQ
204 ENDPROC(el0_fiq_invalid)
205
206 el0_error_invalid:
207 inv_entry 0, BAD_ERROR
208 ENDPROC(el0_error_invalid)
209
210 #ifdef CONFIG_COMPAT
211 el0_fiq_invalid_compat:
212 inv_entry 0, BAD_FIQ, 32
213 ENDPROC(el0_fiq_invalid_compat)
214
215 el0_error_invalid_compat:
216 inv_entry 0, BAD_ERROR, 32
217 ENDPROC(el0_error_invalid_compat)
218 #endif
219
220 el1_sync_invalid:
221 inv_entry 1, BAD_SYNC
222 ENDPROC(el1_sync_invalid)
223
224 el1_irq_invalid:
225 inv_entry 1, BAD_IRQ
226 ENDPROC(el1_irq_invalid)
227
228 el1_fiq_invalid:
229 inv_entry 1, BAD_FIQ
230 ENDPROC(el1_fiq_invalid)
231
232 el1_error_invalid:
233 inv_entry 1, BAD_ERROR
234 ENDPROC(el1_error_invalid)
235
236 /*
237 * EL1 mode handlers.
238 */
239 .align 6
240 el1_sync:
241 kernel_entry 1
242 mrs x1, esr_el1 // read the syndrome register
243 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
244 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
245 b.eq el1_da
246 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
247 b.eq el1_undef
248 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
249 b.eq el1_sp_pc
250 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
251 b.eq el1_sp_pc
252 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
253 b.eq el1_undef
254 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
255 b.ge el1_dbg
256 b el1_inv
257 el1_da:
258 /*
259 * Data abort handling
260 */
261 mrs x0, far_el1
262 enable_dbg_if_not_stepping x2
263 // re-enable interrupts if they were enabled in the aborted context
264 tbnz x23, #7, 1f // PSR_I_BIT
265 enable_irq
266 1:
267 mov x2, sp // struct pt_regs
268 bl do_mem_abort
269
270 // disable interrupts before pulling preserved data off the stack
271 disable_irq
272 kernel_exit 1
273 el1_sp_pc:
274 /*
275 * Stack or PC alignment exception handling
276 */
277 mrs x0, far_el1
278 mov x1, x25
279 mov x2, sp
280 b do_sp_pc_abort
281 el1_undef:
282 /*
283 * Undefined instruction
284 */
285 mov x0, sp
286 b do_undefinstr
287 el1_dbg:
288 /*
289 * Debug exception handling
290 */
291 tbz x24, #0, el1_inv // EL1 only
292 mrs x0, far_el1
293 mov x2, sp // struct pt_regs
294 bl do_debug_exception
295
296 kernel_exit 1
297 el1_inv:
298 // TODO: add support for undefined instructions in kernel mode
299 mov x0, sp
300 mov x1, #BAD_SYNC
301 mrs x2, esr_el1
302 b bad_mode
303 ENDPROC(el1_sync)
304
305 .align 6
306 el1_irq:
307 kernel_entry 1
308 enable_dbg_if_not_stepping x0
309 #ifdef CONFIG_TRACE_IRQFLAGS
310 bl trace_hardirqs_off
311 #endif
312 #ifdef CONFIG_PREEMPT
313 get_thread_info tsk
314 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
315 add x0, x24, #1 // increment it
316 str x0, [tsk, #TI_PREEMPT]
317 #endif
318 irq_handler
319 #ifdef CONFIG_PREEMPT
320 str x24, [tsk, #TI_PREEMPT] // restore preempt count
321 cbnz x24, 1f // preempt count != 0
322 ldr x0, [tsk, #TI_FLAGS] // get flags
323 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
324 bl el1_preempt
325 1:
326 #endif
327 #ifdef CONFIG_TRACE_IRQFLAGS
328 bl trace_hardirqs_on
329 #endif
330 kernel_exit 1
331 ENDPROC(el1_irq)
332
333 #ifdef CONFIG_PREEMPT
334 el1_preempt:
335 mov x24, lr
336 1: enable_dbg
337 bl preempt_schedule_irq // irq en/disable is done inside
338 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
339 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
340 ret x24
341 #endif
342
343 /*
344 * EL0 mode handlers.
345 */
346 .align 6
347 el0_sync:
348 kernel_entry 0
349 mrs x25, esr_el1 // read the syndrome register
350 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
351 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
352 b.eq el0_svc
353 adr lr, ret_from_exception
354 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
355 b.eq el0_da
356 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
357 b.eq el0_ia
358 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
359 b.eq el0_fpsimd_acc
360 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
361 b.eq el0_fpsimd_exc
362 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
363 b.eq el0_undef
364 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
365 b.eq el0_sp_pc
366 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
367 b.eq el0_sp_pc
368 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
369 b.eq el0_undef
370 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
371 b.ge el0_dbg
372 b el0_inv
373
374 #ifdef CONFIG_COMPAT
375 .align 6
376 el0_sync_compat:
377 kernel_entry 0, 32
378 mrs x25, esr_el1 // read the syndrome register
379 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
380 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
381 b.eq el0_svc_compat
382 adr lr, ret_from_exception
383 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
384 b.eq el0_da
385 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
386 b.eq el0_ia
387 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
388 b.eq el0_fpsimd_acc
389 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
390 b.eq el0_fpsimd_exc
391 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
392 b.eq el0_undef
393 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
394 b.eq el0_undef
395 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
396 b.eq el0_undef
397 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
398 b.eq el0_undef
399 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
400 b.eq el0_undef
401 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
402 b.eq el0_undef
403 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
404 b.ge el0_dbg
405 b el0_inv
406 el0_svc_compat:
407 /*
408 * AArch32 syscall handling
409 */
410 adr stbl, compat_sys_call_table // load compat syscall table pointer
411 uxtw scno, w7 // syscall number in w7 (r7)
412 mov sc_nr, #__NR_compat_syscalls
413 b el0_svc_naked
414
415 .align 6
416 el0_irq_compat:
417 kernel_entry 0, 32
418 b el0_irq_naked
419 #endif
420
421 el0_da:
422 /*
423 * Data abort handling
424 */
425 mrs x0, far_el1
426 disable_step x1
427 isb
428 enable_dbg
429 // enable interrupts before calling the main handler
430 enable_irq
431 mov x1, x25
432 mov x2, sp
433 b do_mem_abort
434 el0_ia:
435 /*
436 * Instruction abort handling
437 */
438 mrs x0, far_el1
439 disable_step x1
440 isb
441 enable_dbg
442 // enable interrupts before calling the main handler
443 enable_irq
444 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
445 mov x2, sp
446 b do_mem_abort
447 el0_fpsimd_acc:
448 /*
449 * Floating Point or Advanced SIMD access
450 */
451 mov x0, x25
452 mov x1, sp
453 b do_fpsimd_acc
454 el0_fpsimd_exc:
455 /*
456 * Floating Point or Advanced SIMD exception
457 */
458 mov x0, x25
459 mov x1, sp
460 b do_fpsimd_exc
461 el0_sp_pc:
462 /*
463 * Stack or PC alignment exception handling
464 */
465 mrs x0, far_el1
466 disable_step x1
467 isb
468 enable_dbg
469 // enable interrupts before calling the main handler
470 enable_irq
471 mov x1, x25
472 mov x2, sp
473 b do_sp_pc_abort
474 el0_undef:
475 /*
476 * Undefined instruction
477 */
478 mov x0, sp
479 b do_undefinstr
480 el0_dbg:
481 /*
482 * Debug exception handling
483 */
484 tbnz x24, #0, el0_inv // EL0 only
485 mrs x0, far_el1
486 disable_step x1
487 mov x1, x25
488 mov x2, sp
489 b do_debug_exception
490 el0_inv:
491 mov x0, sp
492 mov x1, #BAD_SYNC
493 mrs x2, esr_el1
494 b bad_mode
495 ENDPROC(el0_sync)
496
497 .align 6
498 el0_irq:
499 kernel_entry 0
500 el0_irq_naked:
501 disable_step x1
502 isb
503 enable_dbg
504 #ifdef CONFIG_TRACE_IRQFLAGS
505 bl trace_hardirqs_off
506 #endif
507 get_thread_info tsk
508 #ifdef CONFIG_PREEMPT
509 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
510 add x23, x24, #1 // increment it
511 str x23, [tsk, #TI_PREEMPT]
512 #endif
513 irq_handler
514 #ifdef CONFIG_PREEMPT
515 ldr x0, [tsk, #TI_PREEMPT]
516 str x24, [tsk, #TI_PREEMPT]
517 cmp x0, x23
518 b.eq 1f
519 mov x1, #0
520 str x1, [x1] // BUG
521 1:
522 #endif
523 #ifdef CONFIG_TRACE_IRQFLAGS
524 bl trace_hardirqs_on
525 #endif
526 b ret_to_user
527 ENDPROC(el0_irq)
528
529 /*
530 * This is the return code to user mode for abort handlers
531 */
532 ret_from_exception:
533 get_thread_info tsk
534 b ret_to_user
535 ENDPROC(ret_from_exception)
536
537 /*
538 * Register switch for AArch64. The callee-saved registers need to be saved
539 * and restored. On entry:
540 * x0 = previous task_struct (must be preserved across the switch)
541 * x1 = next task_struct
542 * Previous and next are guaranteed not to be the same.
543 *
544 */
545 ENTRY(cpu_switch_to)
546 add x8, x0, #THREAD_CPU_CONTEXT
547 mov x9, sp
548 stp x19, x20, [x8], #16 // store callee-saved registers
549 stp x21, x22, [x8], #16
550 stp x23, x24, [x8], #16
551 stp x25, x26, [x8], #16
552 stp x27, x28, [x8], #16
553 stp x29, x9, [x8], #16
554 str lr, [x8]
555 add x8, x1, #THREAD_CPU_CONTEXT
556 ldp x19, x20, [x8], #16 // restore callee-saved registers
557 ldp x21, x22, [x8], #16
558 ldp x23, x24, [x8], #16
559 ldp x25, x26, [x8], #16
560 ldp x27, x28, [x8], #16
561 ldp x29, x9, [x8], #16
562 ldr lr, [x8]
563 mov sp, x9
564 ret
565 ENDPROC(cpu_switch_to)
566
567 /*
568 * This is the fast syscall return path. We do as little as possible here,
569 * and this includes saving x0 back into the kernel stack.
570 */
571 ret_fast_syscall:
572 disable_irq // disable interrupts
573 ldr x1, [tsk, #TI_FLAGS]
574 and x2, x1, #_TIF_WORK_MASK
575 cbnz x2, fast_work_pending
576 tbz x1, #TIF_SINGLESTEP, fast_exit
577 disable_dbg
578 enable_step x2
579 fast_exit:
580 kernel_exit 0, ret = 1
581
582 /*
583 * Ok, we need to do extra processing, enter the slow path.
584 */
585 fast_work_pending:
586 str x0, [sp, #S_X0] // returned x0
587 work_pending:
588 tbnz x1, #TIF_NEED_RESCHED, work_resched
589 /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
590 ldr x2, [sp, #S_PSTATE]
591 mov x0, sp // 'regs'
592 tst x2, #PSR_MODE_MASK // user mode regs?
593 b.ne no_work_pending // returning to kernel
594 enable_irq // enable interrupts for do_notify_resume()
595 bl do_notify_resume
596 b ret_to_user
597 work_resched:
598 enable_dbg
599 bl schedule
600
601 /*
602 * "slow" syscall return path.
603 */
604 ret_to_user:
605 disable_irq // disable interrupts
606 ldr x1, [tsk, #TI_FLAGS]
607 and x2, x1, #_TIF_WORK_MASK
608 cbnz x2, work_pending
609 tbz x1, #TIF_SINGLESTEP, no_work_pending
610 disable_dbg
611 enable_step x2
612 no_work_pending:
613 kernel_exit 0, ret = 0
614 ENDPROC(ret_to_user)
615
616 /*
617 * This is how we return from a fork.
618 */
619 ENTRY(ret_from_fork)
620 bl schedule_tail
621 cbz x19, 1f // not a kernel thread
622 mov x0, x20
623 blr x19
624 1: get_thread_info tsk
625 b ret_to_user
626 ENDPROC(ret_from_fork)
627
628 /*
629 * SVC handler.
630 */
631 .align 6
632 el0_svc:
633 adrp stbl, sys_call_table // load syscall table pointer
634 uxtw scno, w8 // syscall number in w8
635 mov sc_nr, #__NR_syscalls
636 el0_svc_naked: // compat entry point
637 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
638 disable_step x16
639 isb
640 enable_dbg
641 enable_irq
642
643 get_thread_info tsk
644 ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
645 tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
646 adr lr, ret_fast_syscall // return address
647 cmp scno, sc_nr // check upper syscall limit
648 b.hs ni_sys
649 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
650 br x16 // call sys_* routine
651 ni_sys:
652 mov x0, sp
653 b do_ni_syscall
654 ENDPROC(el0_svc)
655
656 /*
657 * This is the really slow path. We're going to be doing context
658 * switches, and waiting for our parent to respond.
659 */
660 __sys_trace:
661 mov x1, sp
662 mov w0, #0 // trace entry
663 bl syscall_trace
664 adr lr, __sys_trace_return // return address
665 uxtw scno, w0 // syscall number (possibly new)
666 mov x1, sp // pointer to regs
667 cmp scno, sc_nr // check upper syscall limit
668 b.hs ni_sys
669 ldp x0, x1, [sp] // restore the syscall args
670 ldp x2, x3, [sp, #S_X2]
671 ldp x4, x5, [sp, #S_X4]
672 ldp x6, x7, [sp, #S_X6]
673 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
674 br x16 // call sys_* routine
675
676 __sys_trace_return:
677 str x0, [sp] // save returned x0
678 mov x1, sp
679 mov w0, #1 // trace exit
680 bl syscall_trace
681 b ret_to_user
682
683 /*
684 * Special system call wrappers.
685 */
686 ENTRY(sys_rt_sigreturn_wrapper)
687 mov x0, sp
688 b sys_rt_sigreturn
689 ENDPROC(sys_rt_sigreturn_wrapper)
690
691 ENTRY(handle_arch_irq)
692 .quad 0