Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / arch / arm / kernel / entry-header.S
CommitLineData
bce495d8 1#include <linux/init.h>
1da177e4
LT
2#include <linux/linkage.h>
3
4#include <asm/assembler.h>
e6ae744d 5#include <asm/asm-offsets.h>
1da177e4 6#include <asm/errno.h>
bce495d8 7#include <asm/thread_info.h>
1da177e4
LT
8
9@ Bad Abort numbers
10@ -----------------
11@
12#define BAD_PREFETCH 0
13#define BAD_DATA 1
14#define BAD_ADDREXCPTN 2
15#define BAD_IRQ 3
16#define BAD_UNDEFINSTR 4
17
1da177e4 18@
925c8a1a
RK
19@ Most of the stack format comes from struct pt_regs, but with
20@ the addition of 8 bytes for storing syscall args 5 and 6.
2dede2d8 21@ This _must_ remain a multiple of 8 for EABI.
1da177e4 22@
1da177e4
LT
23#define S_OFF 8
24
925c8a1a
RK
25/*
26 * The SWI code relies on the fact that R0 is at the bottom of the stack
27 * (due to slow/fast restore user regs).
28 */
29#if S_R0 != 0
30#error "Please fix"
31#endif
32
bce495d8
RK
33 .macro zero_fp
34#ifdef CONFIG_FRAME_POINTER
35 mov fp, #0
36#endif
37 .endm
38
49f680ea 39 .macro alignment_trap, rtemp
1da177e4 40#ifdef CONFIG_ALIGNMENT_TRAP
49f680ea
RK
41 ldr \rtemp, .LCcralign
42 ldr \rtemp, [\rtemp]
1da177e4
LT
43 mcr p15, 0, \rtemp, c1, c0
44#endif
45 .endm
46
b86040a5
CM
47 @
48 @ Store/load the USER SP and LR registers by switching to the SYS
49 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
50 @ available. Should only be called from SVC mode
51 @
52 .macro store_user_sp_lr, rd, rtemp, offset = 0
53 mrs \rtemp, cpsr
54 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
55 msr cpsr_c, \rtemp @ switch to the SYS mode
56
57 str sp, [\rd, #\offset] @ save sp_usr
58 str lr, [\rd, #\offset + 4] @ save lr_usr
59
60 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
61 msr cpsr_c, \rtemp @ switch back to the SVC mode
62 .endm
63
64 .macro load_user_sp_lr, rd, rtemp, offset = 0
65 mrs \rtemp, cpsr
66 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
67 msr cpsr_c, \rtemp @ switch to the SYS mode
68
69 ldr sp, [\rd, #\offset] @ load sp_usr
70 ldr lr, [\rd, #\offset + 4] @ load lr_usr
71
72 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
73 msr cpsr_c, \rtemp @ switch back to the SVC mode
74 .endm
75
76#ifndef CONFIG_THUMB2_KERNEL
77 .macro svc_exit, rpsr
78 msr spsr_cxsf, \rpsr
200b812d
CM
79#if defined(CONFIG_CPU_32v6K)
80 clrex @ clear the exclusive monitor
b86040a5 81 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
200b812d
CM
82#elif defined (CONFIG_CPU_V6)
83 ldr r0, [sp]
84 strex r1, r2, [sp] @ clear the exclusive monitor
85 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
9e6ec39b
NP
86#else
87 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
200b812d 88#endif
b86040a5
CM
89 .endm
90
91 .macro restore_user_regs, fast = 0, offset = 0
92 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
93 ldr lr, [sp, #\offset + S_PC]! @ get pc
94 msr spsr_cxsf, r1 @ save in spsr_svc
200b812d
CM
95#if defined(CONFIG_CPU_32v6K)
96 clrex @ clear the exclusive monitor
97#elif defined (CONFIG_CPU_V6)
98 strex r1, r2, [sp] @ clear the exclusive monitor
99#endif
b86040a5
CM
100 .if \fast
101 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
102 .else
103 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
104 .endif
105 add sp, sp, #S_FRAME_SIZE - S_PC
106 movs pc, lr @ return & move spsr_svc into cpsr
107 .endm
108
109 .macro get_thread_info, rd
110 mov \rd, sp, lsr #13
111 mov \rd, \rd, lsl #13
112 .endm
113#else /* CONFIG_THUMB2_KERNEL */
114 .macro svc_exit, rpsr
200b812d 115 clrex @ clear the exclusive monitor
b86040a5
CM
116 ldr r0, [sp, #S_SP] @ top of the stack
117 ldr r1, [sp, #S_PC] @ return address
118 tst r0, #4 @ orig stack 8-byte aligned?
119 stmdb r0, {r1, \rpsr} @ rfe context
120 ldmia sp, {r0 - r12}
121 ldr lr, [sp, #S_LR]
122 addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned
123 addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
124 rfeia sp!
125 .endm
126
127 .macro restore_user_regs, fast = 0, offset = 0
200b812d 128 clrex @ clear the exclusive monitor
b86040a5
CM
129 mov r2, sp
130 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
131 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
132 ldr lr, [sp, #\offset + S_PC] @ get pc
133 add sp, sp, #\offset + S_SP
134 msr spsr_cxsf, r1 @ save in spsr_svc
135 .if \fast
136 ldmdb sp, {r1 - r12} @ get calling r1 - r12
137 .else
138 ldmdb sp, {r0 - r12} @ get calling r0 - r12
139 .endif
140 add sp, sp, #S_FRAME_SIZE - S_SP
141 movs pc, lr @ return & move spsr_svc into cpsr
142 .endm
143
144 .macro get_thread_info, rd
145 mov \rd, sp
146 lsr \rd, \rd, #13
147 mov \rd, \rd, lsl #13
148 .endm
149#endif /* !CONFIG_THUMB2_KERNEL */
1da177e4
LT
150
151/*
152 * These are the registers used in the syscall handler, and allow us to
153 * have in theory up to 7 arguments to a function - r0 to r6.
154 *
155 * r7 is reserved for the system call number for thumb mode.
156 *
157 * Note that tbl == why is intentional.
158 *
159 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
160 */
161scno .req r7 @ syscall number
162tbl .req r8 @ syscall table pointer
163why .req r8 @ Linux syscall (!= 0)
164tsk .req r9 @ current thread_info