import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / kernel / entry.S
1 /*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <linux/init.h>
22 #include <linux/linkage.h>
23
24 #include <asm/assembler.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/errno.h>
27 #include <asm/esr.h>
28 #include <asm/ptrace.h>
29 #include <asm/thread_info.h>
30 #include <asm/unistd.h>
31
32 /*
33 * Bad Abort numbers
34 *-----------------
35 */
36 #define BAD_SYNC 0
37 #define BAD_IRQ 1
38 #define BAD_FIQ 2
39 #define BAD_ERROR 3
40
41 .macro kernel_entry, el, regsize = 64
42 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
43 .if \regsize == 32
44 mov w0, w0 // zero upper 32 bits of x0
45 .endif
46 push x28, x29
47 push x26, x27
48 push x24, x25
49 push x22, x23
50 push x20, x21
51 push x18, x19
52 push x16, x17
53 push x14, x15
54 push x12, x13
55 push x10, x11
56 push x8, x9
57 push x6, x7
58 push x4, x5
59 push x2, x3
60 push x0, x1
61 .if \el == 0
62 mrs x21, sp_el0
63 .else
64 add x21, sp, #S_FRAME_SIZE
65 .endif
66 mrs x22, elr_el1
67 mrs x23, spsr_el1
68 stp lr, x21, [sp, #S_LR]
69 stp x22, x23, [sp, #S_PC]
70
71 /*
72 * Set syscallno to -1 by default (overridden later if real syscall).
73 */
74 .if \el == 0
75 mvn x21, xzr
76 str x21, [sp, #S_SYSCALLNO]
77 .endif
78
79 /*
80 * Registers that may be useful after this macro is invoked:
81 *
82 * x21 - aborted SP
83 * x22 - aborted PC
84 * x23 - aborted PSTATE
85 */
86 .endm
87
88 #ifdef CONFIG_MTK_COMPAT
89 .macro kernel_entry_compat
90 sub sp, sp, #S_FRAME_SIZE - S_X16 // room for LR, SP, SPSR, ELR
91 mov w0, w0 // zero upper 32 bits of x0
92
93 stp x14, x15, [sp, #-16]!
94 stp x12, x13, [sp, #-16]!
95 stp x10, x11, [sp, #-16]!
96 stp x8, x9, [sp, #-16]!
97 stp x6, x7, [sp, #-16]!
98 stp x4, x5, [sp, #-16]!
99 stp x2, x3, [sp, #-16]!
100 stp x0, x1, [sp, #-16]!
101
102 mrs x21, sp_el0
103 mrs x22, elr_el1
104 mrs x23, spsr_el1
105 stp lr, x21, [sp, #S_LR]
106 stp x22, x23, [sp, #S_PC]
107
108 /*
109 * Set syscallno to -1 by default (overridden later if real syscall).
110 */
111 mvn x21, xzr
112 str x21, [sp, #S_SYSCALLNO]
113
114 /*
115 * Registers that may be useful after this macro is invoked:
116 *
117 * x21 - aborted SP
118 * x22 - aborted PC
119 * x23 - aborted PSTATE
120 */
121 .endm
122 #endif
123
124 .macro kernel_exit, el, ret = 0
125 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
126 .if \el == 0
127 ldr x23, [sp, #S_SP] // load return stack pointer
128 .endif
129 .if \ret
130 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
131 add sp, sp, S_X2
132 .else
133 pop x0, x1
134 .endif
135 pop x2, x3 // load the rest of the registers
136 pop x4, x5
137 pop x6, x7
138 pop x8, x9
139 msr elr_el1, x21 // set up the return data
140 msr spsr_el1, x22
141 .if \el == 0
142 msr sp_el0, x23
143 .endif
144 pop x10, x11
145 pop x12, x13
146 pop x14, x15
147 pop x16, x17
148 pop x18, x19
149 pop x20, x21
150 pop x22, x23
151 pop x24, x25
152 pop x26, x27
153 pop x28, x29
154 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
155 eret // return to kernel
156 .endm
157
158 #ifdef CONFIG_MTK_COMPAT
159 .macro kernel_exit_compat, ret = 0
160 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
161 ldr x23, [sp, #S_SP] // load return stack pointer
162 .if \ret
163 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
164 add sp, sp, S_X2
165 .else
166 ldp x0, x1, [sp], #16
167 .endif
168 ldp x2, x3, [sp], #16 // load the rest of the registers
169 ldp x4, x5, [sp], #16
170 ldp x6, x7, [sp], #16
171 ldp x8, x9, [sp], #16
172 msr elr_el1, x21 // set up the return data
173 msr spsr_el1, x22
174 msr sp_el0, x23
175 ldp x10, x11, [sp], #16
176 ldp x12, x13, [sp], #16
177 ldp x14, x15, [sp], #16
178 tbnz x22, #4, 1f
179
180 ldp x16, x17, [sp], #16
181 ldp x18, x19, [sp], #16
182 ldp x20, x21, [sp], #16
183 ldp x22, x23, [sp], #16
184 ldp x24, x25, [sp], #16
185 ldp x26, x27, [sp], #16
186 ldp x28, x29, [sp], #16
187 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
188 eret // return to kernel
189 // could not run here
190
191 1: add sp, sp, #S_X29-S_X15
192 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
193 eret // return to kernel
194 .endm
195 #endif
196
197 .macro get_thread_info, rd
198 mov \rd, sp
199 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
200 .endm
201
202 /*
203 * These are the registers used in the syscall handler, and allow us to
204 * have in theory up to 7 arguments to a function - x0 to x6.
205 *
206 * x7 is reserved for the system call number in 32-bit mode.
207 */
208 sc_nr .req x25 // number of system calls
209 scno .req x26 // syscall number
210 stbl .req x27 // syscall table pointer
211 tsk .req x28 // current thread_info
212
213 /*
214 * Interrupt handling.
215 */
216 .macro irq_handler
217 ldr x1, handle_arch_irq
218 mov x0, sp
219 blr x1
220 .endm
221
222 .text
223
224 /*
225 * Exception vectors.
226 */
227
228 .align 11
229 ENTRY(vectors)
230 ventry el1_sync_invalid // Synchronous EL1t
231 ventry el1_irq_invalid // IRQ EL1t
232 ventry el1_fiq_invalid // FIQ EL1t
233 ventry el1_error_invalid // Error EL1t
234
235 ventry el1_sync // Synchronous EL1h
236 ventry el1_irq // IRQ EL1h
237 ventry el1_fiq_invalid // FIQ EL1h
238 ventry el1_error_invalid // Error EL1h
239
240 ventry el0_sync // Synchronous 64-bit EL0
241 ventry el0_irq // IRQ 64-bit EL0
242 ventry el0_fiq_invalid // FIQ 64-bit EL0
243 ventry el0_error_invalid // Error 64-bit EL0
244
245 #ifdef CONFIG_COMPAT
246 ventry el0_sync_compat // Synchronous 32-bit EL0
247 ventry el0_irq_compat // IRQ 32-bit EL0
248 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
249 ventry el0_error_invalid_compat // Error 32-bit EL0
250 #else
251 ventry el0_sync_invalid // Synchronous 32-bit EL0
252 ventry el0_irq_invalid // IRQ 32-bit EL0
253 ventry el0_fiq_invalid // FIQ 32-bit EL0
254 ventry el0_error_invalid // Error 32-bit EL0
255 #endif
256 END(vectors)
257
258 /*
259 * Invalid mode handlers
260 */
261 .macro inv_entry, el, reason, regsize = 64
262 kernel_entry el, \regsize
263 mov x0, sp
264 mov x1, #\reason
265 mrs x2, esr_el1
266 b bad_mode
267 .endm
268
269 el0_sync_invalid:
270 inv_entry 0, BAD_SYNC
271 ENDPROC(el0_sync_invalid)
272
273 el0_irq_invalid:
274 inv_entry 0, BAD_IRQ
275 ENDPROC(el0_irq_invalid)
276
277 el0_fiq_invalid:
278 inv_entry 0, BAD_FIQ
279 ENDPROC(el0_fiq_invalid)
280
281 el0_error_invalid:
282 inv_entry 0, BAD_ERROR
283 ENDPROC(el0_error_invalid)
284
285 #ifdef CONFIG_COMPAT
286 el0_fiq_invalid_compat:
287 inv_entry 0, BAD_FIQ, 32
288 ENDPROC(el0_fiq_invalid_compat)
289
290 el0_error_invalid_compat:
291 inv_entry 0, BAD_ERROR, 32
292 ENDPROC(el0_error_invalid_compat)
293 #endif
294
295 el1_sync_invalid:
296 inv_entry 1, BAD_SYNC
297 ENDPROC(el1_sync_invalid)
298
299 el1_irq_invalid:
300 inv_entry 1, BAD_IRQ
301 ENDPROC(el1_irq_invalid)
302
303 el1_fiq_invalid:
304 inv_entry 1, BAD_FIQ
305 ENDPROC(el1_fiq_invalid)
306
307 el1_error_invalid:
308 inv_entry 1, BAD_ERROR
309 ENDPROC(el1_error_invalid)
310
311 /*
312 * EL1 mode handlers.
313 */
314 .align 6
315 el1_sync:
316 kernel_entry 1
317 mov x0, sp
318 and x20, x0, #0xffffffffffffc000
319 ldr w4, [x20, #TI_CPU_EXCP]
320 add w4, w4, #0x1
321 str w4, [x20, #TI_CPU_EXCP]
322 cmp w4, #0x1
323 b.ne el1_sync_nest
324 str x0, [x20, #TI_REGS_ON_EXCP]
325 el1_sync_nest:
326 cmp w4, #0x2
327 b.lt el1_sync_nest_skip
328 bl aee_stop_nested_panic
329 el1_sync_nest_skip:
330 mrs x1, esr_el1 // read the syndrome register
331 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
332 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
333 b.eq el1_da
334 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
335 b.eq el1_undef
336 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
337 b.eq el1_sp_pc
338 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
339 b.eq el1_sp_pc
340 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
341 b.eq el1_undef
342 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
343 b.ge el1_dbg
344 b el1_inv
345 el1_da:
346 /*
347 * Data abort handling
348 */
349 mrs x0, far_el1
350 enable_dbg_if_not_stepping x2
351 // re-enable interrupts if they were enabled in the aborted context
352 tbnz x23, #7, 1f // PSR_I_BIT
353 enable_irq
354 1:
355 mov x2, sp // struct pt_regs
356 bl do_mem_abort
357 mov x5, sp
358 and x20, x5, #0xffffffffffffc000
359 ldr w4, [x20, #TI_CPU_EXCP]
360 sub w4, w4, #0x1
361 str w4, [x20, #TI_CPU_EXCP]
362
363 // disable interrupts before pulling preserved data off the stack
364 disable_irq
365 kernel_exit 1
366 el1_sp_pc:
367 /*
368 * Stack or PC alignment exception handling
369 */
370 mrs x0, far_el1
371 mov x2, sp
372 b do_sp_pc_abort
373 el1_undef:
374 /*
375 * Undefined instruction
376 */
377 mov x0, sp
378 bl do_undefinstr
379 kernel_exit 1
380 el1_dbg:
381 /*
382 * Debug exception handling
383 */
384 cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
385 cinc x24, x24, eq // set bit '0'
386 tbz x24, #0, el1_inv // EL1 only
387 mrs x0, far_el1
388 mov x2, sp // struct pt_regs
389 bl do_debug_exception
390 mov x5, sp
391 and x20, x5, #0xffffffffffffc000
392 ldr w4, [x20, #TI_CPU_EXCP]
393 sub w4, w4, #0x1
394 str w4, [x20, #TI_CPU_EXCP]
395
396 kernel_exit 1
397 el1_inv:
398 // TODO: add support for undefined instructions in kernel mode
399 mov x0, sp
400 mov x1, #BAD_SYNC
401 mrs x2, esr_el1
402 b bad_mode
403 ENDPROC(el1_sync)
404
405 .align 6
406 el1_irq:
407 kernel_entry 1
408 enable_dbg_if_not_stepping x0
409 #ifdef CONFIG_TRACE_IRQFLAGS
410 bl trace_hardirqs_off
411 #endif
412 #ifdef CONFIG_PREEMPT
413 get_thread_info tsk
414 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
415 add x0, x24, #1 // increment it
416 str x0, [tsk, #TI_PREEMPT]
417 #endif
418 irq_handler
419 #ifdef CONFIG_PREEMPT
420 str x24, [tsk, #TI_PREEMPT] // restore preempt count
421 cbnz x24, 1f // preempt count != 0
422 ldr x0, [tsk, #TI_FLAGS] // get flags
423 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
424 bl el1_preempt
425 1:
426 #endif
427 #ifdef CONFIG_TRACE_IRQFLAGS
428 bl trace_hardirqs_on
429 #endif
430 kernel_exit 1
431 ENDPROC(el1_irq)
432
433 #ifdef CONFIG_PREEMPT
434 el1_preempt:
435 mov x24, lr
436 1: enable_dbg
437 bl preempt_schedule_irq // irq en/disable is done inside
438 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
439 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
440 ret x24
441 #endif
442
443 /*
444 * EL0 mode handlers.
445 */
446 .align 6
447 el0_sync:
448 kernel_entry 0
449 mrs x25, esr_el1 // read the syndrome register
450 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
451 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
452 b.eq el0_svc
453 adr lr, ret_from_exception
454 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
455 b.eq el0_da
456 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
457 b.eq el0_ia
458 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
459 b.eq el0_fpsimd_acc
460 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
461 b.eq el0_fpsimd_exc
462 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
463 b.eq el0_undef
464 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
465 b.eq el0_sp_pc
466 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
467 b.eq el0_sp_pc
468 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
469 b.eq el0_undef
470 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
471 b.ge el0_dbg
472 b el0_inv
473
474 #ifdef CONFIG_COMPAT
475 .align 6
476 el0_sync_compat:
477 #ifdef CONFIG_MTK_COMPAT
478 kernel_entry_compat
479 #else
480 kernel_entry 0, 32
481 #endif
482 mrs x25, esr_el1 // read the syndrome register
483 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
484 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
485 b.eq el0_svc_compat
486 adr lr, ret_from_exception
487 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
488 b.eq el0_da
489 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
490 b.eq el0_ia
491 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
492 b.eq el0_fpsimd_acc
493 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
494 b.eq el0_fpsimd_exc
495 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
496 b.eq el0_undef
497 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
498 b.eq el0_undef
499 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
500 b.eq el0_undef
501 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
502 b.eq el0_undef
503 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
504 b.eq el0_undef
505 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
506 b.eq el0_undef
507 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
508 b.ge el0_dbg
509 b el0_inv
510 el0_svc_compat:
511 /*
512 * AArch32 syscall handling
513 */
514 adr stbl, compat_sys_call_table // load compat syscall table pointer
515 uxtw scno, w7 // syscall number in w7 (r7)
516 mov sc_nr, #__NR_compat_syscalls
517 b el0_svc_naked
518
519 .align 6
520 el0_irq_compat:
521 #ifdef CONFIG_MTK_COMPAT
522 kernel_entry_compat
523 #else
524 kernel_entry 0, 32
525 #endif
526 b el0_irq_naked
527 #endif
528
529 el0_da:
530 /*
531 * Data abort handling
532 */
533 mrs x0, far_el1
534 bic x0, x0, #(0xff << 56)
535 disable_step x1
536 isb
537 enable_dbg
538 // enable interrupts before calling the main handler
539 enable_irq
540 mov x1, x25
541 mov x2, sp
542 b do_mem_abort
543 el0_ia:
544 /*
545 * Instruction abort handling
546 */
547 mrs x0, far_el1
548 disable_step x1
549 isb
550 enable_dbg
551 // enable interrupts before calling the main handler
552 enable_irq
553 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
554 mov x2, sp
555 b do_mem_abort
556 el0_fpsimd_acc:
557 /*
558 * Floating Point or Advanced SIMD access
559 */
560 mov x0, x25
561 mov x1, sp
562 b do_fpsimd_acc
563 el0_fpsimd_exc:
564 /*
565 * Floating Point or Advanced SIMD exception
566 */
567 mov x0, x25
568 mov x1, sp
569 b do_fpsimd_exc
570 el0_sp_pc:
571 /*
572 * Stack or PC alignment exception handling
573 */
574 mrs x0, far_el1
575 disable_step x1
576 isb
577 enable_dbg
578 // enable interrupts before calling the main handler
579 enable_irq
580 mov x1, x25
581 mov x2, sp
582 b do_sp_pc_abort
583 el0_undef:
584 /*
585 * Undefined instruction
586 */
587 mov x0, sp
588 b do_undefinstr
589 el0_dbg:
590 /*
591 * Debug exception handling
592 */
593 tbnz x24, #0, el0_inv // EL0 only
594 mrs x0, far_el1
595 disable_step x1
596 mov x1, x25
597 mov x2, sp
598 b do_debug_exception
599 el0_inv:
600 mov x0, sp
601 mov x1, #BAD_SYNC
602 mrs x2, esr_el1
603 b bad_mode
604 ENDPROC(el0_sync)
605
606 .align 6
607 el0_irq:
608 kernel_entry 0
609 el0_irq_naked:
610 disable_step x1
611 isb
612 enable_dbg
613 #ifdef CONFIG_TRACE_IRQFLAGS
614 bl trace_hardirqs_off
615 #endif
616 get_thread_info tsk
617 #ifdef CONFIG_PREEMPT
618 ldr x24, [tsk, #TI_PREEMPT] // get preempt count
619 add x23, x24, #1 // increment it
620 str x23, [tsk, #TI_PREEMPT]
621 #endif
622 irq_handler
623 #ifdef CONFIG_PREEMPT
624 ldr x0, [tsk, #TI_PREEMPT]
625 str x24, [tsk, #TI_PREEMPT]
626 cmp x0, x23
627 b.eq 1f
628 mov x1, #0
629 str x1, [x1] // BUG
630 1:
631 #endif
632 #ifdef CONFIG_TRACE_IRQFLAGS
633 bl trace_hardirqs_on
634 #endif
635 b ret_to_user
636 ENDPROC(el0_irq)
637
638 /*
639 * This is the return code to user mode for abort handlers
640 */
641 ret_from_exception:
642 get_thread_info tsk
643 b ret_to_user
644 ENDPROC(ret_from_exception)
645
646 /*
647 * Register switch for AArch64. The callee-saved registers need to be saved
648 * and restored. On entry:
649 * x0 = previous task_struct (must be preserved across the switch)
650 * x1 = next task_struct
651 * Previous and next are guaranteed not to be the same.
652 *
653 */
654 ENTRY(cpu_switch_to)
655 add x8, x0, #THREAD_CPU_CONTEXT
656 mov x9, sp
657 stp x19, x20, [x8], #16 // store callee-saved registers
658 stp x21, x22, [x8], #16
659 stp x23, x24, [x8], #16
660 stp x25, x26, [x8], #16
661 stp x27, x28, [x8], #16
662 stp x29, x9, [x8], #16
663 str lr, [x8]
664 add x8, x1, #THREAD_CPU_CONTEXT
665 ldp x19, x20, [x8], #16 // restore callee-saved registers
666 ldp x21, x22, [x8], #16
667 ldp x23, x24, [x8], #16
668 ldp x25, x26, [x8], #16
669 ldp x27, x28, [x8], #16
670 ldp x29, x9, [x8], #16
671 ldr lr, [x8]
672 mov sp, x9
673 ret
674 ENDPROC(cpu_switch_to)
675
676 /*
677 * This is the fast syscall return path. We do as little as possible here,
678 * and this includes saving x0 back into the kernel stack.
679 */
680 ret_fast_syscall:
681 disable_irq // disable interrupts
682 ldr x1, [tsk, #TI_FLAGS]
683 and x2, x1, #_TIF_WORK_MASK
684 cbnz x2, fast_work_pending
685 tbz x1, #TIF_SINGLESTEP, fast_exit
686 disable_dbg
687 enable_step x2
688 fast_exit:
689 #ifdef CONFIG_MTK_COMPAT
690 kernel_exit_compat ret = 1
691 #else
692 kernel_exit 0, ret = 1
693 #endif
694
695 /*
696 * Ok, we need to do extra processing, enter the slow path.
697 */
698 fast_work_pending:
699 str x0, [sp, #S_X0] // returned x0
700 work_pending:
701 tbnz x1, #TIF_NEED_RESCHED, work_resched
702 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
703 ldr x2, [sp, #S_PSTATE]
704 mov x0, sp // 'regs'
705 tst x2, #PSR_MODE_MASK // user mode regs?
706 b.ne no_work_pending // returning to kernel
707 enable_irq // enable interrupts for do_notify_resume()
708 bl do_notify_resume
709 b ret_to_user
710 work_resched:
711 enable_dbg
712 bl schedule
713
714 /*
715 * "slow" syscall return path.
716 */
717 ret_to_user:
718 disable_irq // disable interrupts
719 ldr x1, [tsk, #TI_FLAGS]
720 and x2, x1, #_TIF_WORK_MASK
721 cbnz x2, work_pending
722 tbz x1, #TIF_SINGLESTEP, no_work_pending
723 disable_dbg
724 enable_step x2
725 no_work_pending:
726 #ifdef CONFIG_MTK_COMPAT
727 kernel_exit_compat ret = 0
728 #else
729 kernel_exit 0, ret = 0
730 #endif
731 ENDPROC(ret_to_user)
732
733 /*
734 * This is how we return from a fork.
735 */
736 ENTRY(ret_from_fork)
737 bl schedule_tail
738 cbz x19, 1f // not a kernel thread
739 mov x0, x20
740 blr x19
741 1: get_thread_info tsk
742 b ret_to_user
743 ENDPROC(ret_from_fork)
744
745 /*
746 * SVC handler.
747 */
748 .align 6
749 el0_svc:
750 adrp stbl, sys_call_table // load syscall table pointer
751 uxtw scno, w8 // syscall number in w8
752 mov sc_nr, #__NR_syscalls
753 el0_svc_naked: // compat entry point
754 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
755 disable_step x16
756 isb
757 enable_dbg
758 enable_irq
759
760 get_thread_info tsk
761 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
762 tst x16, #_TIF_SYSCALL_WORK
763 b.ne __sys_trace
764 adr lr, ret_fast_syscall // return address
765 cmp scno, sc_nr // check upper syscall limit
766 b.hs ni_sys
767 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
768 br x16 // call sys_* routine
769 ni_sys:
770 mov x0, sp
771 b do_ni_syscall
772 ENDPROC(el0_svc)
773
774 /*
775 * This is the really slow path. We're going to be doing context
776 * switches, and waiting for our parent to respond.
777 */
778 __sys_trace:
779 mov x0, sp
780 bl syscall_trace_enter
781 adr lr, __sys_trace_return // return address
782 cmp w0, #RET_SKIP_SYSCALL_TRACE // skip syscall and tracing?
783 b.eq ret_to_user
784 cmp w0, #RET_SKIP_SYSCALL // skip syscall?
785 b.eq __sys_trace_return_skipped
786 uxtw scno, w0 // syscall number (possibly new)
787 mov x1, sp // pointer to regs
788 cmp scno, sc_nr // check upper syscall limit
789 b.hs ni_sys
790 ldp x0, x1, [sp] // restore the syscall args
791 ldp x2, x3, [sp, #S_X2]
792 ldp x4, x5, [sp, #S_X4]
793 ldp x6, x7, [sp, #S_X6]
794 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
795 br x16 // call sys_* routine
796
797 __sys_trace_return:
798 str x0, [sp] // save returned x0
799 __sys_trace_return_skipped: // x0 already in regs[0]
800 mov x0, sp
801 bl syscall_trace_exit
802 b ret_to_user
803
804 /*
805 * Special system call wrappers.
806 */
807 ENTRY(sys_rt_sigreturn_wrapper)
808 mov x0, sp
809 b sys_rt_sigreturn
810 ENDPROC(sys_rt_sigreturn_wrapper)
811
812 ENTRY(handle_arch_irq)
813 .quad 0