#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/esr.h>
+#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
-#include <asm/unistd32.h>
/*
* Bad Abort numbers
*/
.endm
+#ifdef CONFIG_MTK_COMPAT
+ .macro kernel_entry_compat
+ sub sp, sp, #S_FRAME_SIZE - S_X16 // room for LR, SP, SPSR, ELR
+ mov w0, w0 // zero upper 32 bits of x0
+
+ stp x14, x15, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x2, x3, [sp, #-16]!
+ stp x0, x1, [sp, #-16]!
+
+ mrs x21, sp_el0
+ mrs x22, elr_el1
+ mrs x23, spsr_el1
+ stp lr, x21, [sp, #S_LR]
+ stp x22, x23, [sp, #S_PC]
+
+ /*
+ * Set syscallno to -1 by default (overridden later if real syscall).
+ */
+ mvn x21, xzr
+ str x21, [sp, #S_SYSCALLNO]
+
+ /*
+ * Registers that may be useful after this macro is invoked:
+ *
+ * x21 - aborted SP
+ * x22 - aborted PC
+ * x23 - aborted PSTATE
+ */
+ .endm
+#endif
+
.macro kernel_exit, el, ret = 0
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
eret // return to kernel
.endm
+#ifdef CONFIG_MTK_COMPAT
+ .macro kernel_exit_compat, ret = 0
+ ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
+ ldr x23, [sp, #S_SP] // load return stack pointer
+ .if \ret
+ ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
+ add sp, sp, S_X2
+ .else
+ ldp x0, x1, [sp], #16
+ .endif
+ ldp x2, x3, [sp], #16 // load the rest of the registers
+ ldp x4, x5, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x8, x9, [sp], #16
+ msr elr_el1, x21 // set up the return data
+ msr spsr_el1, x22
+ msr sp_el0, x23
+ ldp x10, x11, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x14, x15, [sp], #16
+ tbnz x22, #4, 1f
+
+ ldp x16, x17, [sp], #16
+ ldp x18, x19, [sp], #16
+ ldp x20, x21, [sp], #16
+ ldp x22, x23, [sp], #16
+ ldp x24, x25, [sp], #16
+ ldp x26, x27, [sp], #16
+ ldp x28, x29, [sp], #16
+ ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
+ eret // return to kernel
+ // could not run here
+
+1: add sp, sp, #S_X29-S_X15
+ ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
+ eret // return to kernel
+ .endm
+#endif
+
.macro get_thread_info, rd
mov \rd, sp
and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.align 6
el1_sync:
kernel_entry 1
- mrs x1, esr_el1 // read the syndrome register
+ mov x0, sp
+ and x20, x0, #0xffffffffffffc000
+ ldr w4, [x20, #TI_CPU_EXCP]
+ add w4, w4, #0x1
+ str w4, [x20, #TI_CPU_EXCP]
+ cmp w4, #0x1
+ b.ne el1_sync_nest
+ str x0, [x20, #TI_REGS_ON_EXCP]
+el1_sync_nest:
+ cmp w4, #0x2
+ b.lt el1_sync_nest_skip
+ bl aee_stop_nested_panic
+el1_sync_nest_skip:
+ mrs x1, esr_el1 // read the syndrome register
lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
b.eq el1_da
1:
mov x2, sp // struct pt_regs
bl do_mem_abort
+ mov x5, sp
+ and x20, x5, #0xffffffffffffc000
+ ldr w4, [x20, #TI_CPU_EXCP]
+ sub w4, w4, #0x1
+ str w4, [x20, #TI_CPU_EXCP]
// disable interrupts before pulling preserved data off the stack
disable_irq
* Undefined instruction
*/
mov x0, sp
- b do_undefinstr
+ bl do_undefinstr
+ kernel_exit 1
el1_dbg:
/*
* Debug exception handling
*/
+ cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
+ mov x5, sp
+ and x20, x5, #0xffffffffffffc000
+ ldr w4, [x20, #TI_CPU_EXCP]
+ sub w4, w4, #0x1
+ str w4, [x20, #TI_CPU_EXCP]
kernel_exit 1
el1_inv:
#ifdef CONFIG_COMPAT
.align 6
el0_sync_compat:
+#ifdef CONFIG_MTK_COMPAT
+ kernel_entry_compat
+#else
kernel_entry 0, 32
+#endif
mrs x25, esr_el1 // read the syndrome register
lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
.align 6
el0_irq_compat:
+#ifdef CONFIG_MTK_COMPAT
+ kernel_entry_compat
+#else
kernel_entry 0, 32
+#endif
b el0_irq_naked
#endif
* Data abort handling
*/
mrs x0, far_el1
+ bic x0, x0, #(0xff << 56)
disable_step x1
isb
enable_dbg
disable_dbg
enable_step x2
fast_exit:
+#ifdef CONFIG_MTK_COMPAT
+ kernel_exit_compat ret = 1
+#else
kernel_exit 0, ret = 1
+#endif
/*
* Ok, we need to do extra processing, enter the slow path.
str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
- /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
+ /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
ldr x2, [sp, #S_PSTATE]
mov x0, sp // 'regs'
tst x2, #PSR_MODE_MASK // user mode regs?
disable_dbg
enable_step x2
no_work_pending:
+#ifdef CONFIG_MTK_COMPAT
+ kernel_exit_compat ret = 0
+#else
kernel_exit 0, ret = 0
+#endif
ENDPROC(ret_to_user)
/*
enable_irq
get_thread_info tsk
- ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
- tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
+ ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ tst x16, #_TIF_SYSCALL_WORK
+ b.ne __sys_trace
adr lr, ret_fast_syscall // return address
cmp scno, sc_nr // check upper syscall limit
b.hs ni_sys
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov x1, sp
- mov w0, #0 // trace entry
- bl syscall_trace
+ mov x0, sp
+ bl syscall_trace_enter
adr lr, __sys_trace_return // return address
+ cmp w0, #RET_SKIP_SYSCALL_TRACE // skip syscall and tracing?
+ b.eq ret_to_user
+ cmp w0, #RET_SKIP_SYSCALL // skip syscall?
+ b.eq __sys_trace_return_skipped
uxtw scno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
cmp scno, sc_nr // check upper syscall limit
__sys_trace_return:
str x0, [sp] // save returned x0
- mov x1, sp
- mov w0, #1 // trace exit
- bl syscall_trace
+__sys_trace_return_skipped: // x0 already in regs[0]
+ mov x0, sp
+ bl syscall_trace_exit
b ret_to_user
/*