Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/entry-armv.S | |
3 | * | |
4 | * Copyright (C) 1996,1997,1998 Russell King. | |
5 | * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) | |
afeb90ca | 6 | * nommu support by Hyok S. Choi (hyok.choi@samsung.com) |
1da177e4 LT |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * Low-level vector interface routines | |
13 | * | |
70b6f2b4 NP |
14 | * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction |
15 | * that causes it to save wrong values... Be aware! | |
1da177e4 | 16 | */ |
1da177e4 | 17 | |
f09b9979 | 18 | #include <asm/memory.h> |
1da177e4 | 19 | #include <asm/glue.h> |
1da177e4 | 20 | #include <asm/vfpmacros.h> |
a09e64fb | 21 | #include <mach/entry-macro.S> |
d6551e88 | 22 | #include <asm/thread_notify.h> |
c4c5716e | 23 | #include <asm/unwind.h> |
cc20d429 | 24 | #include <asm/unistd.h> |
f159f4ed | 25 | #include <asm/tls.h> |
1da177e4 LT |
26 | |
27 | #include "entry-header.S" | |
28 | ||
187a51ad RK |
29 | /* |
30 | * Interrupt handling. Preserves r7, r8, r9 | |
31 | */ | |
32 | .macro irq_handler | |
f80dff9d | 33 | get_irqnr_preamble r5, lr |
187a51ad RK |
34 | 1: get_irqnr_and_base r0, r6, r5, lr |
35 | movne r1, sp | |
36 | @ | |
37 | @ routine called with r0 = irq number, r1 = struct pt_regs * | |
38 | @ | |
b86040a5 | 39 | adrne lr, BSYM(1b) |
187a51ad | 40 | bne asm_do_IRQ |
791be9b9 RK |
41 | |
42 | #ifdef CONFIG_SMP | |
43 | /* | |
44 | * XXX | |
45 | * | |
46 | * this macro assumes that irqstat (r6) and base (r5) are | |
47 | * preserved from get_irqnr_and_base above | |
48 | */ | |
49 | test_for_ipi r0, r6, r5, lr | |
50 | movne r0, sp | |
b86040a5 | 51 | adrne lr, BSYM(1b) |
791be9b9 | 52 | bne do_IPI |
37ee16ae RK |
53 | |
54 | #ifdef CONFIG_LOCAL_TIMERS | |
55 | test_for_ltirq r0, r6, r5, lr | |
56 | movne r0, sp | |
b86040a5 | 57 | adrne lr, BSYM(1b) |
37ee16ae RK |
58 | bne do_local_timer |
59 | #endif | |
791be9b9 RK |
60 | #endif |
61 | ||
187a51ad RK |
62 | .endm |
63 | ||
785d3cd2 NP |
64 | #ifdef CONFIG_KPROBES |
65 | .section .kprobes.text,"ax",%progbits | |
66 | #else | |
67 | .text | |
68 | #endif | |
69 | ||
1da177e4 LT |
70 | /* |
71 | * Invalid mode handlers | |
72 | */ | |
ccea7a19 RK |
73 | .macro inv_entry, reason |
74 | sub sp, sp, #S_FRAME_SIZE | |
b86040a5 CM |
75 | ARM( stmib sp, {r1 - lr} ) |
76 | THUMB( stmia sp, {r0 - r12} ) | |
77 | THUMB( str sp, [sp, #S_SP] ) | |
78 | THUMB( str lr, [sp, #S_LR] ) | |
1da177e4 LT |
79 | mov r1, #\reason |
80 | .endm | |
81 | ||
82 | __pabt_invalid: | |
ccea7a19 RK |
83 | inv_entry BAD_PREFETCH |
84 | b common_invalid | |
93ed3970 | 85 | ENDPROC(__pabt_invalid) |
1da177e4 LT |
86 | |
87 | __dabt_invalid: | |
ccea7a19 RK |
88 | inv_entry BAD_DATA |
89 | b common_invalid | |
93ed3970 | 90 | ENDPROC(__dabt_invalid) |
1da177e4 LT |
91 | |
92 | __irq_invalid: | |
ccea7a19 RK |
93 | inv_entry BAD_IRQ |
94 | b common_invalid | |
93ed3970 | 95 | ENDPROC(__irq_invalid) |
1da177e4 LT |
96 | |
97 | __und_invalid: | |
ccea7a19 RK |
98 | inv_entry BAD_UNDEFINSTR |
99 | ||
100 | @ | |
101 | @ XXX fall through to common_invalid | |
102 | @ | |
103 | ||
104 | @ | |
105 | @ common_invalid - generic code for failed exception (re-entrant version of handlers) | |
106 | @ | |
107 | common_invalid: | |
108 | zero_fp | |
109 | ||
110 | ldmia r0, {r4 - r6} | |
111 | add r0, sp, #S_PC @ here for interlock avoidance | |
112 | mov r7, #-1 @ "" "" "" "" | |
113 | str r4, [sp] @ save preserved r0 | |
114 | stmia r0, {r5 - r7} @ lr_<exception>, | |
115 | @ cpsr_<exception>, "old_r0" | |
1da177e4 | 116 | |
1da177e4 | 117 | mov r0, sp |
1da177e4 | 118 | b bad_mode |
93ed3970 | 119 | ENDPROC(__und_invalid) |
1da177e4 LT |
120 | |
121 | /* | |
122 | * SVC mode handlers | |
123 | */ | |
2dede2d8 NP |
124 | |
125 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) | |
126 | #define SPFIX(code...) code | |
127 | #else | |
128 | #define SPFIX(code...) | |
129 | #endif | |
130 | ||
d30a0c8b | 131 | .macro svc_entry, stack_hole=0 |
c4c5716e CM |
132 | UNWIND(.fnstart ) |
133 | UNWIND(.save {r0 - pc} ) | |
b86040a5 CM |
134 | sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
135 | #ifdef CONFIG_THUMB2_KERNEL | |
136 | SPFIX( str r0, [sp] ) @ temporarily saved | |
137 | SPFIX( mov r0, sp ) | |
138 | SPFIX( tst r0, #4 ) @ test original stack alignment | |
139 | SPFIX( ldr r0, [sp] ) @ restored | |
140 | #else | |
2dede2d8 | 141 | SPFIX( tst sp, #4 ) |
b86040a5 CM |
142 | #endif |
143 | SPFIX( subeq sp, sp, #4 ) | |
144 | stmia sp, {r1 - r12} | |
ccea7a19 RK |
145 | |
146 | ldmia r0, {r1 - r3} | |
b86040a5 | 147 | add r5, sp, #S_SP - 4 @ here for interlock avoidance |
ccea7a19 | 148 | mov r4, #-1 @ "" "" "" "" |
b86040a5 CM |
149 | add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
150 | SPFIX( addeq r0, r0, #4 ) | |
151 | str r1, [sp, #-4]! @ save the "real" r0 copied | |
ccea7a19 RK |
152 | @ from the exception stack |
153 | ||
1da177e4 LT |
154 | mov r1, lr |
155 | ||
156 | @ | |
157 | @ We are now ready to fill in the remaining blanks on the stack: | |
158 | @ | |
159 | @ r0 - sp_svc | |
160 | @ r1 - lr_svc | |
161 | @ r2 - lr_<exception>, already fixed up for correct return/restart | |
162 | @ r3 - spsr_<exception> | |
163 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | |
164 | @ | |
165 | stmia r5, {r0 - r4} | |
166 | .endm | |
167 | ||
168 | .align 5 | |
169 | __dabt_svc: | |
ccea7a19 | 170 | svc_entry |
1da177e4 LT |
171 | |
172 | @ | |
173 | @ get ready to re-enable interrupts if appropriate | |
174 | @ | |
175 | mrs r9, cpsr | |
176 | tst r3, #PSR_I_BIT | |
177 | biceq r9, r9, #PSR_I_BIT | |
178 | ||
179 | @ | |
180 | @ Call the processor-specific abort handler: | |
181 | @ | |
182 | @ r2 - aborted context pc | |
183 | @ r3 - aborted context cpsr | |
184 | @ | |
185 | @ The abort handler must return the aborted address in r0, and | |
186 | @ the fault status register in r1. r9 must be preserved. | |
187 | @ | |
48d7927b | 188 | #ifdef MULTI_DABORT |
1da177e4 LT |
189 | ldr r4, .LCprocfns |
190 | mov lr, pc | |
48d7927b | 191 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] |
1da177e4 | 192 | #else |
48d7927b | 193 | bl CPU_DABORT_HANDLER |
1da177e4 LT |
194 | #endif |
195 | ||
196 | @ | |
197 | @ set desired IRQ state, then call main handler | |
198 | @ | |
199 | msr cpsr_c, r9 | |
200 | mov r2, sp | |
201 | bl do_DataAbort | |
202 | ||
203 | @ | |
204 | @ IRQs off again before pulling preserved data off the stack | |
205 | @ | |
ac78884e | 206 | disable_irq_notrace |
1da177e4 LT |
207 | |
208 | @ | |
209 | @ restore SPSR and restart the instruction | |
210 | @ | |
b86040a5 CM |
211 | ldr r2, [sp, #S_PSR] |
212 | svc_exit r2 @ return from exception | |
c4c5716e | 213 | UNWIND(.fnend ) |
93ed3970 | 214 | ENDPROC(__dabt_svc) |
1da177e4 LT |
215 | |
216 | .align 5 | |
217 | __irq_svc: | |
ccea7a19 RK |
218 | svc_entry |
219 | ||
ac78884e RK |
220 | #ifdef CONFIG_TRACE_IRQFLAGS |
221 | bl trace_hardirqs_off | |
222 | #endif | |
1da177e4 | 223 | #ifdef CONFIG_PREEMPT |
706fdd9f RK |
224 | get_thread_info tsk |
225 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | |
226 | add r7, r8, #1 @ increment it | |
227 | str r7, [tsk, #TI_PREEMPT] | |
1da177e4 | 228 | #endif |
ccea7a19 | 229 | |
187a51ad | 230 | irq_handler |
1da177e4 | 231 | #ifdef CONFIG_PREEMPT |
28fab1a2 | 232 | str r8, [tsk, #TI_PREEMPT] @ restore preempt count |
706fdd9f | 233 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
28fab1a2 RK |
234 | teq r8, #0 @ if preempt count != 0 |
235 | movne r0, #0 @ force flags to 0 | |
1da177e4 LT |
236 | tst r0, #_TIF_NEED_RESCHED |
237 | blne svc_preempt | |
1da177e4 | 238 | #endif |
b86040a5 | 239 | ldr r4, [sp, #S_PSR] @ irqs are already disabled |
7ad1bcb2 | 240 | #ifdef CONFIG_TRACE_IRQFLAGS |
b86040a5 | 241 | tst r4, #PSR_I_BIT |
7ad1bcb2 RK |
242 | bleq trace_hardirqs_on |
243 | #endif | |
b86040a5 | 244 | svc_exit r4 @ return from exception |
c4c5716e | 245 | UNWIND(.fnend ) |
93ed3970 | 246 | ENDPROC(__irq_svc) |
1da177e4 LT |
247 | |
248 | .ltorg | |
249 | ||
250 | #ifdef CONFIG_PREEMPT | |
251 | svc_preempt: | |
28fab1a2 | 252 | mov r8, lr |
1da177e4 | 253 | 1: bl preempt_schedule_irq @ irq en/disable is done inside |
706fdd9f | 254 | ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
1da177e4 | 255 | tst r0, #_TIF_NEED_RESCHED |
28fab1a2 | 256 | moveq pc, r8 @ go again |
1da177e4 LT |
257 | b 1b |
258 | #endif | |
259 | ||
260 | .align 5 | |
261 | __und_svc: | |
d30a0c8b NP |
262 | #ifdef CONFIG_KPROBES |
263 | @ If a kprobe is about to simulate a "stmdb sp..." instruction, | |
264 | @ it obviously needs free stack space which then will belong to | |
265 | @ the saved context. | |
266 | svc_entry 64 | |
267 | #else | |
ccea7a19 | 268 | svc_entry |
d30a0c8b | 269 | #endif |
1da177e4 LT |
270 | |
271 | @ | |
272 | @ call emulation code, which returns using r9 if it has emulated | |
273 | @ the instruction, or the more conventional lr if we are to treat | |
274 | @ this as a real undefined instruction | |
275 | @ | |
276 | @ r0 - instruction | |
277 | @ | |
83e686ea | 278 | #ifndef CONFIG_THUMB2_KERNEL |
1da177e4 | 279 | ldr r0, [r2, #-4] |
83e686ea CM |
280 | #else |
281 | ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 | |
282 | and r9, r0, #0xf800 | |
283 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 | |
284 | ldrhhs r9, [r2] @ bottom 16 bits | |
285 | orrhs r0, r9, r0, lsl #16 | |
286 | #endif | |
b86040a5 | 287 | adr r9, BSYM(1f) |
1da177e4 LT |
288 | bl call_fpe |
289 | ||
290 | mov r0, sp @ struct pt_regs *regs | |
291 | bl do_undefinstr | |
292 | ||
293 | @ | |
294 | @ IRQs off again before pulling preserved data off the stack | |
295 | @ | |
ac78884e | 296 | 1: disable_irq_notrace |
1da177e4 LT |
297 | |
298 | @ | |
299 | @ restore SPSR and restart the instruction | |
300 | @ | |
b86040a5 CM |
301 | ldr r2, [sp, #S_PSR] @ Get SVC cpsr |
302 | svc_exit r2 @ return from exception | |
c4c5716e | 303 | UNWIND(.fnend ) |
93ed3970 | 304 | ENDPROC(__und_svc) |
1da177e4 LT |
305 | |
306 | .align 5 | |
307 | __pabt_svc: | |
ccea7a19 | 308 | svc_entry |
1da177e4 LT |
309 | |
310 | @ | |
311 | @ re-enable interrupts if appropriate | |
312 | @ | |
313 | mrs r9, cpsr | |
314 | tst r3, #PSR_I_BIT | |
315 | biceq r9, r9, #PSR_I_BIT | |
1da177e4 | 316 | |
48d7927b | 317 | mov r0, r2 @ pass address of aborted instruction. |
4fb28474 | 318 | #ifdef MULTI_PABORT |
48d7927b PB |
319 | ldr r4, .LCprocfns |
320 | mov lr, pc | |
321 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | |
322 | #else | |
4fb28474 | 323 | bl CPU_PABORT_HANDLER |
48d7927b PB |
324 | #endif |
325 | msr cpsr_c, r9 @ Maybe enable interrupts | |
4fb28474 | 326 | mov r2, sp @ regs |
1da177e4 LT |
327 | bl do_PrefetchAbort @ call abort handler |
328 | ||
329 | @ | |
330 | @ IRQs off again before pulling preserved data off the stack | |
331 | @ | |
ac78884e | 332 | disable_irq_notrace |
1da177e4 LT |
333 | |
334 | @ | |
335 | @ restore SPSR and restart the instruction | |
336 | @ | |
b86040a5 CM |
337 | ldr r2, [sp, #S_PSR] |
338 | svc_exit r2 @ return from exception | |
c4c5716e | 339 | UNWIND(.fnend ) |
93ed3970 | 340 | ENDPROC(__pabt_svc) |
1da177e4 LT |
341 | |
342 | .align 5 | |
49f680ea RK |
343 | .LCcralign: |
344 | .word cr_alignment | |
48d7927b | 345 | #ifdef MULTI_DABORT |
1da177e4 LT |
346 | .LCprocfns: |
347 | .word processor | |
348 | #endif | |
349 | .LCfp: | |
350 | .word fp_enter | |
1da177e4 LT |
351 | |
352 | /* | |
353 | * User mode handlers | |
2dede2d8 NP |
354 | * |
355 | * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE | |
1da177e4 | 356 | */ |
2dede2d8 NP |
357 | |
358 | #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) | |
359 | #error "sizeof(struct pt_regs) must be a multiple of 8" | |
360 | #endif | |
361 | ||
ccea7a19 | 362 | .macro usr_entry |
c4c5716e CM |
363 | UNWIND(.fnstart ) |
364 | UNWIND(.cantunwind ) @ don't unwind the user space | |
ccea7a19 | 365 | sub sp, sp, #S_FRAME_SIZE |
b86040a5 CM |
366 | ARM( stmib sp, {r1 - r12} ) |
367 | THUMB( stmia sp, {r0 - r12} ) | |
ccea7a19 RK |
368 | |
369 | ldmia r0, {r1 - r3} | |
370 | add r0, sp, #S_PC @ here for interlock avoidance | |
371 | mov r4, #-1 @ "" "" "" "" | |
372 | ||
373 | str r1, [sp] @ save the "real" r0 copied | |
374 | @ from the exception stack | |
1da177e4 LT |
375 | |
376 | @ | |
377 | @ We are now ready to fill in the remaining blanks on the stack: | |
378 | @ | |
379 | @ r2 - lr_<exception>, already fixed up for correct return/restart | |
380 | @ r3 - spsr_<exception> | |
381 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | |
382 | @ | |
383 | @ Also, separately save sp_usr and lr_usr | |
384 | @ | |
ccea7a19 | 385 | stmia r0, {r2 - r4} |
b86040a5 CM |
386 | ARM( stmdb r0, {sp, lr}^ ) |
387 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) | |
1da177e4 LT |
388 | |
389 | @ | |
390 | @ Enable the alignment trap while in kernel mode | |
391 | @ | |
49f680ea | 392 | alignment_trap r0 |
1da177e4 LT |
393 | |
394 | @ | |
395 | @ Clear FP to mark the first stack frame | |
396 | @ | |
397 | zero_fp | |
398 | .endm | |
399 | ||
b49c0f24 NP |
400 | .macro kuser_cmpxchg_check |
401 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | |
402 | #ifndef CONFIG_MMU | |
403 | #warning "NPTL on non MMU needs fixing" | |
404 | #else | |
405 | @ Make sure our user space atomic helper is restarted | |
406 | @ if it was interrupted in a critical region. Here we | |
407 | @ perform a quick test inline since it should be false | |
408 | @ 99.9999% of the time. The rest is done out of line. | |
409 | cmp r2, #TASK_SIZE | |
410 | blhs kuser_cmpxchg_fixup | |
411 | #endif | |
412 | #endif | |
413 | .endm | |
414 | ||
1da177e4 LT |
415 | .align 5 |
416 | __dabt_usr: | |
ccea7a19 | 417 | usr_entry |
b49c0f24 | 418 | kuser_cmpxchg_check |
1da177e4 LT |
419 | |
420 | @ | |
421 | @ Call the processor-specific abort handler: | |
422 | @ | |
423 | @ r2 - aborted context pc | |
424 | @ r3 - aborted context cpsr | |
425 | @ | |
426 | @ The abort handler must return the aborted address in r0, and | |
427 | @ the fault status register in r1. | |
428 | @ | |
48d7927b | 429 | #ifdef MULTI_DABORT |
1da177e4 LT |
430 | ldr r4, .LCprocfns |
431 | mov lr, pc | |
48d7927b | 432 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] |
1da177e4 | 433 | #else |
48d7927b | 434 | bl CPU_DABORT_HANDLER |
1da177e4 LT |
435 | #endif |
436 | ||
437 | @ | |
438 | @ IRQs on, then call the main handler | |
439 | @ | |
1ec42c0c | 440 | enable_irq |
1da177e4 | 441 | mov r2, sp |
b86040a5 | 442 | adr lr, BSYM(ret_from_exception) |
1da177e4 | 443 | b do_DataAbort |
c4c5716e | 444 | UNWIND(.fnend ) |
93ed3970 | 445 | ENDPROC(__dabt_usr) |
1da177e4 LT |
446 | |
447 | .align 5 | |
448 | __irq_usr: | |
ccea7a19 | 449 | usr_entry |
b49c0f24 | 450 | kuser_cmpxchg_check |
1da177e4 | 451 | |
706fdd9f | 452 | get_thread_info tsk |
1da177e4 | 453 | #ifdef CONFIG_PREEMPT |
706fdd9f RK |
454 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
455 | add r7, r8, #1 @ increment it | |
456 | str r7, [tsk, #TI_PREEMPT] | |
1da177e4 | 457 | #endif |
ccea7a19 | 458 | |
187a51ad | 459 | irq_handler |
1da177e4 | 460 | #ifdef CONFIG_PREEMPT |
706fdd9f RK |
461 | ldr r0, [tsk, #TI_PREEMPT] |
462 | str r8, [tsk, #TI_PREEMPT] | |
1da177e4 | 463 | teq r0, r7 |
b86040a5 CM |
464 | ARM( strne r0, [r0, -r0] ) |
465 | THUMB( movne r0, #0 ) | |
466 | THUMB( strne r0, [r0] ) | |
1da177e4 | 467 | #endif |
ccea7a19 | 468 | |
1da177e4 LT |
469 | mov why, #0 |
470 | b ret_to_user | |
c4c5716e | 471 | UNWIND(.fnend ) |
93ed3970 | 472 | ENDPROC(__irq_usr) |
1da177e4 LT |
473 | |
474 | .ltorg | |
475 | ||
476 | .align 5 | |
477 | __und_usr: | |
ccea7a19 | 478 | usr_entry |
1da177e4 | 479 | |
1da177e4 LT |
480 | @ |
481 | @ fall through to the emulation code, which returns using r9 if | |
482 | @ it has emulated the instruction, or the more conventional lr | |
483 | @ if we are to treat this as a real undefined instruction | |
484 | @ | |
485 | @ r0 - instruction | |
486 | @ | |
b86040a5 CM |
487 | adr r9, BSYM(ret_from_exception) |
488 | adr lr, BSYM(__und_usr_unknown) | |
cb170a45 | 489 | tst r3, #PSR_T_BIT @ Thumb mode? |
b86040a5 | 490 | itet eq @ explicit IT needed for the 1f label |
cb170a45 PB |
491 | subeq r4, r2, #4 @ ARM instr at LR - 4 |
492 | subne r4, r2, #2 @ Thumb instr at LR - 2 | |
493 | 1: ldreqt r0, [r4] | |
26584853 CM |
494 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
495 | reveq r0, r0 @ little endian instruction | |
496 | #endif | |
cb170a45 PB |
497 | beq call_fpe |
498 | @ Thumb instruction | |
499 | #if __LINUX_ARM_ARCH__ >= 7 | |
b86040a5 CM |
500 | 2: |
501 | ARM( ldrht r5, [r4], #2 ) | |
502 | THUMB( ldrht r5, [r4] ) | |
503 | THUMB( add r4, r4, #2 ) | |
cb170a45 PB |
504 | and r0, r5, #0xf800 @ mask bits 111x x... .... .... |
505 | cmp r0, #0xe800 @ 32bit instruction if xx != 0 | |
506 | blo __und_usr_unknown | |
507 | 3: ldrht r0, [r4] | |
508 | add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 | |
509 | orr r0, r0, r5, lsl #16 | |
510 | #else | |
511 | b __und_usr_unknown | |
512 | #endif | |
c4c5716e | 513 | UNWIND(.fnend ) |
93ed3970 | 514 | ENDPROC(__und_usr) |
cb170a45 | 515 | |
1da177e4 LT |
516 | @ |
517 | @ fallthrough to call_fpe | |
518 | @ | |
519 | ||
520 | /* | |
521 | * The out of line fixup for the ldrt above. | |
522 | */ | |
4260415f | 523 | .pushsection .fixup, "ax" |
cb170a45 | 524 | 4: mov pc, r9 |
4260415f RK |
525 | .popsection |
526 | .pushsection __ex_table,"a" | |
cb170a45 PB |
527 | .long 1b, 4b |
528 | #if __LINUX_ARM_ARCH__ >= 7 | |
529 | .long 2b, 4b | |
530 | .long 3b, 4b | |
531 | #endif | |
4260415f | 532 | .popsection |
1da177e4 LT |
533 | |
534 | /* | |
535 | * Check whether the instruction is a co-processor instruction. | |
536 | * If yes, we need to call the relevant co-processor handler. | |
537 | * | |
538 | * Note that we don't do a full check here for the co-processor | |
539 | * instructions; all instructions with bit 27 set are well | |
540 | * defined. The only instructions that should fault are the | |
541 | * co-processor instructions. However, we have to watch out | |
542 | * for the ARM6/ARM7 SWI bug. | |
543 | * | |
b5872db4 CM |
544 | * NEON is a special case that has to be handled here. Not all |
545 | * NEON instructions are co-processor instructions, so we have | |
546 | * to make a special case of checking for them. Plus, there's | |
547 | * five groups of them, so we have a table of mask/opcode pairs | |
548 | * to check against, and if any match then we branch off into the | |
549 | * NEON handler code. | |
550 | * | |
1da177e4 LT |
551 | * Emulators may wish to make use of the following registers: |
552 | * r0 = instruction opcode. | |
553 | * r2 = PC+4 | |
db6ccbb6 | 554 | * r9 = normal "successful" return address |
1da177e4 | 555 | * r10 = this threads thread_info structure. |
db6ccbb6 | 556 | * lr = unrecognised instruction return address |
1da177e4 | 557 | */ |
cb170a45 PB |
558 | @ |
559 | @ Fall-through from Thumb-2 __und_usr | |
560 | @ | |
561 | #ifdef CONFIG_NEON | |
562 | adr r6, .LCneon_thumb_opcodes | |
563 | b 2f | |
564 | #endif | |
1da177e4 | 565 | call_fpe: |
b5872db4 | 566 | #ifdef CONFIG_NEON |
cb170a45 | 567 | adr r6, .LCneon_arm_opcodes |
b5872db4 CM |
568 | 2: |
569 | ldr r7, [r6], #4 @ mask value | |
570 | cmp r7, #0 @ end mask? | |
571 | beq 1f | |
572 | and r8, r0, r7 | |
573 | ldr r7, [r6], #4 @ opcode bits matching in mask | |
574 | cmp r8, r7 @ NEON instruction? | |
575 | bne 2b | |
576 | get_thread_info r10 | |
577 | mov r7, #1 | |
578 | strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used | |
579 | strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used | |
580 | b do_vfp @ let VFP handler handle this | |
581 | 1: | |
582 | #endif | |
1da177e4 | 583 | tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
cb170a45 | 584 | tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 |
1da177e4 LT |
585 | #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) |
586 | and r8, r0, #0x0f000000 @ mask out op-code bits | |
587 | teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? | |
588 | #endif | |
589 | moveq pc, lr | |
590 | get_thread_info r10 @ get current thread | |
591 | and r8, r0, #0x00000f00 @ mask out CP number | |
b86040a5 | 592 | THUMB( lsr r8, r8, #8 ) |
1da177e4 LT |
593 | mov r7, #1 |
594 | add r6, r10, #TI_USED_CP | |
b86040a5 CM |
595 | ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] |
596 | THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] | |
1da177e4 LT |
597 | #ifdef CONFIG_IWMMXT |
598 | @ Test if we need to give access to iWMMXt coprocessors | |
599 | ldr r5, [r10, #TI_FLAGS] | |
600 | rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only | |
601 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | |
602 | bcs iwmmxt_task_enable | |
603 | #endif | |
b86040a5 CM |
604 | ARM( add pc, pc, r8, lsr #6 ) |
605 | THUMB( lsl r8, r8, #2 ) | |
606 | THUMB( add pc, r8 ) | |
607 | nop | |
608 | ||
a771fe6e | 609 | movw_pc lr @ CP#0 |
b86040a5 CM |
610 | W(b) do_fpe @ CP#1 (FPE) |
611 | W(b) do_fpe @ CP#2 (FPE) | |
a771fe6e | 612 | movw_pc lr @ CP#3 |
c17fad11 LB |
613 | #ifdef CONFIG_CRUNCH |
614 | b crunch_task_enable @ CP#4 (MaverickCrunch) | |
615 | b crunch_task_enable @ CP#5 (MaverickCrunch) | |
616 | b crunch_task_enable @ CP#6 (MaverickCrunch) | |
617 | #else | |
a771fe6e CM |
618 | movw_pc lr @ CP#4 |
619 | movw_pc lr @ CP#5 | |
620 | movw_pc lr @ CP#6 | |
c17fad11 | 621 | #endif |
a771fe6e CM |
622 | movw_pc lr @ CP#7 |
623 | movw_pc lr @ CP#8 | |
624 | movw_pc lr @ CP#9 | |
1da177e4 | 625 | #ifdef CONFIG_VFP |
b86040a5 CM |
626 | W(b) do_vfp @ CP#10 (VFP) |
627 | W(b) do_vfp @ CP#11 (VFP) | |
1da177e4 | 628 | #else |
a771fe6e CM |
629 | movw_pc lr @ CP#10 (VFP) |
630 | movw_pc lr @ CP#11 (VFP) | |
1da177e4 | 631 | #endif |
a771fe6e CM |
632 | movw_pc lr @ CP#12 |
633 | movw_pc lr @ CP#13 | |
634 | movw_pc lr @ CP#14 (Debug) | |
635 | movw_pc lr @ CP#15 (Control) | |
1da177e4 | 636 | |
b5872db4 CM |
637 | #ifdef CONFIG_NEON |
638 | .align 6 | |
639 | ||
cb170a45 | 640 | .LCneon_arm_opcodes: |
b5872db4 CM |
641 | .word 0xfe000000 @ mask |
642 | .word 0xf2000000 @ opcode | |
643 | ||
644 | .word 0xff100000 @ mask | |
645 | .word 0xf4000000 @ opcode | |
646 | ||
cb170a45 PB |
647 | .word 0x00000000 @ mask |
648 | .word 0x00000000 @ opcode | |
649 | ||
650 | .LCneon_thumb_opcodes: | |
651 | .word 0xef000000 @ mask | |
652 | .word 0xef000000 @ opcode | |
653 | ||
654 | .word 0xff100000 @ mask | |
655 | .word 0xf9000000 @ opcode | |
656 | ||
b5872db4 CM |
657 | .word 0x00000000 @ mask |
658 | .word 0x00000000 @ opcode | |
659 | #endif | |
660 | ||
1da177e4 | 661 | do_fpe: |
5d25ac03 | 662 | enable_irq |
1da177e4 LT |
663 | ldr r4, .LCfp |
664 | add r10, r10, #TI_FPSTATE @ r10 = workspace | |
665 | ldr pc, [r4] @ Call FP module USR entry point | |
666 | ||
667 | /* | |
668 | * The FP module is called with these registers set: | |
669 | * r0 = instruction | |
670 | * r2 = PC+4 | |
671 | * r9 = normal "successful" return address | |
672 | * r10 = FP workspace | |
673 | * lr = unrecognised FP instruction return address | |
674 | */ | |
675 | ||
124efc27 | 676 | .pushsection .data |
1da177e4 | 677 | ENTRY(fp_enter) |
db6ccbb6 | 678 | .word no_fp |
124efc27 | 679 | .popsection |
1da177e4 | 680 | |
83e686ea CM |
681 | ENTRY(no_fp) |
682 | mov pc, lr | |
683 | ENDPROC(no_fp) | |
db6ccbb6 RK |
684 | |
685 | __und_usr_unknown: | |
ecbab71c | 686 | enable_irq |
1da177e4 | 687 | mov r0, sp |
b86040a5 | 688 | adr lr, BSYM(ret_from_exception) |
1da177e4 | 689 | b do_undefinstr |
93ed3970 | 690 | ENDPROC(__und_usr_unknown) |
1da177e4 LT |
691 | |
692 | .align 5 | |
693 | __pabt_usr: | |
ccea7a19 | 694 | usr_entry |
1da177e4 | 695 | |
48d7927b | 696 | mov r0, r2 @ pass address of aborted instruction. |
4fb28474 | 697 | #ifdef MULTI_PABORT |
48d7927b PB |
698 | ldr r4, .LCprocfns |
699 | mov lr, pc | |
700 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | |
701 | #else | |
4fb28474 | 702 | bl CPU_PABORT_HANDLER |
48d7927b | 703 | #endif |
1ec42c0c | 704 | enable_irq @ Enable interrupts |
4fb28474 | 705 | mov r2, sp @ regs |
1da177e4 | 706 | bl do_PrefetchAbort @ call abort handler |
c4c5716e | 707 | UNWIND(.fnend ) |
1da177e4 LT |
708 | /* fall through */ |
709 | /* | |
710 | * This is the return code to user mode for abort handlers | |
711 | */ | |
712 | ENTRY(ret_from_exception) | |
c4c5716e CM |
713 | UNWIND(.fnstart ) |
714 | UNWIND(.cantunwind ) | |
1da177e4 LT |
715 | get_thread_info tsk |
716 | mov why, #0 | |
717 | b ret_to_user | |
c4c5716e | 718 | UNWIND(.fnend ) |
93ed3970 CM |
719 | ENDPROC(__pabt_usr) |
720 | ENDPROC(ret_from_exception) | |
1da177e4 LT |
721 | |
722 | /* | |
723 | * Register switch for ARMv3 and ARMv4 processors | |
724 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | |
725 | * previous and next are guaranteed not to be the same. | |
726 | */ | |
727 | ENTRY(__switch_to) | |
c4c5716e CM |
728 | UNWIND(.fnstart ) |
729 | UNWIND(.cantunwind ) | |
1da177e4 LT |
730 | add ip, r1, #TI_CPU_SAVE |
731 | ldr r3, [r2, #TI_TP_VALUE] | |
b86040a5 CM |
732 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
733 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack | |
734 | THUMB( str sp, [ip], #4 ) | |
735 | THUMB( str lr, [ip], #4 ) | |
d6551e88 RK |
736 | #ifdef CONFIG_MMU |
737 | ldr r6, [r2, #TI_CPU_DOMAIN] | |
afeb90ca | 738 | #endif |
f159f4ed | 739 | set_tls r3, r4, r5 |
df0698be NP |
740 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
741 | ldr r7, [r2, #TI_TASK] | |
742 | ldr r8, =__stack_chk_guard | |
743 | ldr r7, [r7, #TSK_STACK_CANARY] | |
744 | #endif | |
afeb90ca | 745 | #ifdef CONFIG_MMU |
1da177e4 | 746 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
1da177e4 | 747 | #endif |
d6551e88 RK |
748 | mov r5, r0 |
749 | add r4, r2, #TI_CPU_SAVE | |
750 | ldr r0, =thread_notify_head | |
751 | mov r1, #THREAD_NOTIFY_SWITCH | |
752 | bl atomic_notifier_call_chain | |
df0698be NP |
753 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
754 | str r7, [r8] | |
755 | #endif | |
b86040a5 | 756 | THUMB( mov ip, r4 ) |
d6551e88 | 757 | mov r0, r5 |
b86040a5 CM |
758 | ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously |
759 | THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously | |
760 | THUMB( ldr sp, [ip], #4 ) | |
761 | THUMB( ldr pc, [ip] ) | |
c4c5716e | 762 | UNWIND(.fnend ) |
93ed3970 | 763 | ENDPROC(__switch_to) |
1da177e4 LT |
764 | |
765 | __INIT | |
2d2669b6 NP |
766 | |
767 | /* | |
768 | * User helpers. | |
769 | * | |
770 | * These are segment of kernel provided user code reachable from user space | |
771 | * at a fixed address in kernel memory. This is used to provide user space | |
772 | * with some operations which require kernel help because of unimplemented | |
773 | * native feature and/or instructions in many ARM CPUs. The idea is for | |
774 | * this code to be executed directly in user mode for best efficiency but | |
775 | * which is too intimate with the kernel counter part to be left to user | |
776 | * libraries. In fact this code might even differ from one CPU to another | |
777 | * depending on the available instruction set and restrictions like on | |
778 | * SMP systems. In other words, the kernel reserves the right to change | |
779 | * this code as needed without warning. Only the entry points and their | |
780 | * results are guaranteed to be stable. | |
781 | * | |
782 | * Each segment is 32-byte aligned and will be moved to the top of the high | |
783 | * vector page. New segments (if ever needed) must be added in front of | |
784 | * existing ones. This mechanism should be used only for things that are | |
785 | * really small and justified, and not be abused freely. | |
786 | * | |
787 | * User space is expected to implement those things inline when optimizing | |
788 | * for a processor that has the necessary native support, but only if such | |
789 | * resulting binaries are already to be incompatible with earlier ARM | |
790 | * processors due to the use of unsupported instructions other than what | |
791 | * is provided here. In other words don't make binaries unable to run on | |
792 | * earlier processors just for the sake of not using these kernel helpers | |
793 | * if your compiled code is not going to use the new instructions for other | |
794 | * purpose. | |
795 | */ | |
b86040a5 | 796 | THUMB( .arm ) |
2d2669b6 | 797 | |
ba9b5d76 NP |
798 | .macro usr_ret, reg |
799 | #ifdef CONFIG_ARM_THUMB | |
800 | bx \reg | |
801 | #else | |
802 | mov pc, \reg | |
803 | #endif | |
804 | .endm | |
805 | ||
2d2669b6 NP |
806 | .align 5 |
807 | .globl __kuser_helper_start | |
808 | __kuser_helper_start: | |
809 | ||
7c612bfd NP |
810 | /* |
811 | * Reference prototype: | |
812 | * | |
813 | * void __kernel_memory_barrier(void) | |
814 | * | |
815 | * Input: | |
816 | * | |
817 | * lr = return address | |
818 | * | |
819 | * Output: | |
820 | * | |
821 | * none | |
822 | * | |
823 | * Clobbered: | |
824 | * | |
b49c0f24 | 825 | * none |
7c612bfd NP |
826 | * |
827 | * Definition and user space usage example: | |
828 | * | |
829 | * typedef void (__kernel_dmb_t)(void); | |
830 | * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) | |
831 | * | |
832 | * Apply any needed memory barrier to preserve consistency with data modified | |
833 | * manually and __kuser_cmpxchg usage. | |
834 | * | |
835 | * This could be used as follows: | |
836 | * | |
837 | * #define __kernel_dmb() \ | |
838 | * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ | |
6896eec0 | 839 | * : : : "r0", "lr","cc" ) |
7c612bfd NP |
840 | */ |
841 | ||
842 | __kuser_memory_barrier: @ 0xffff0fa0 | |
bac4e960 | 843 | smp_dmb |
ba9b5d76 | 844 | usr_ret lr |
7c612bfd NP |
845 | |
846 | .align 5 | |
847 | ||
2d2669b6 NP |
848 | /* |
849 | * Reference prototype: | |
850 | * | |
851 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | |
852 | * | |
853 | * Input: | |
854 | * | |
855 | * r0 = oldval | |
856 | * r1 = newval | |
857 | * r2 = ptr | |
858 | * lr = return address | |
859 | * | |
860 | * Output: | |
861 | * | |
862 | * r0 = returned value (zero or non-zero) | |
863 | * C flag = set if r0 == 0, clear if r0 != 0 | |
864 | * | |
865 | * Clobbered: | |
866 | * | |
867 | * r3, ip, flags | |
868 | * | |
869 | * Definition and user space usage example: | |
870 | * | |
871 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | |
872 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | |
873 | * | |
874 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | |
875 | * Return zero if *ptr was changed or non-zero if no exchange happened. | |
876 | * The C flag is also set if *ptr was changed to allow for assembly | |
877 | * optimization in the calling code. | |
878 | * | |
5964eae8 NP |
879 | * Notes: |
880 | * | |
881 | * - This routine already includes memory barriers as needed. | |
882 | * | |
2d2669b6 NP |
883 | * For example, a user space atomic_add implementation could look like this: |
884 | * | |
885 | * #define atomic_add(ptr, val) \ | |
886 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | |
887 | * register unsigned int __result asm("r1"); \ | |
888 | * asm volatile ( \ | |
889 | * "1: @ atomic_add\n\t" \ | |
890 | * "ldr r0, [r2]\n\t" \ | |
891 | * "mov r3, #0xffff0fff\n\t" \ | |
892 | * "add lr, pc, #4\n\t" \ | |
893 | * "add r1, r0, %2\n\t" \ | |
894 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | |
895 | * "bcc 1b" \ | |
896 | * : "=&r" (__result) \ | |
897 | * : "r" (__ptr), "rIL" (val) \ | |
898 | * : "r0","r3","ip","lr","cc","memory" ); \ | |
899 | * __result; }) | |
900 | */ | |
901 | ||
902 | __kuser_cmpxchg: @ 0xffff0fc0 | |
903 | ||
dcef1f63 | 904 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
2d2669b6 | 905 | |
dcef1f63 NP |
906 | /* |
907 | * Poor you. No fast solution possible... | |
908 | * The kernel itself must perform the operation. | |
909 | * A special ghost syscall is used for that (see traps.c). | |
910 | */ | |
5e097445 | 911 | stmfd sp!, {r7, lr} |
cc20d429 RK |
912 | ldr r7, =1f @ it's 20 bits |
913 | swi __ARM_NR_cmpxchg | |
5e097445 | 914 | ldmfd sp!, {r7, pc} |
cc20d429 | 915 | 1: .word __ARM_NR_cmpxchg |
dcef1f63 NP |
916 | |
917 | #elif __LINUX_ARM_ARCH__ < 6 | |
2d2669b6 | 918 | |
b49c0f24 NP |
919 | #ifdef CONFIG_MMU |
920 | ||
2d2669b6 | 921 | /* |
b49c0f24 NP |
922 | * The only thing that can break atomicity in this cmpxchg |
923 | * implementation is either an IRQ or a data abort exception | |
924 | * causing another process/thread to be scheduled in the middle | |
925 | * of the critical sequence. To prevent this, code is added to | |
926 | * the IRQ and data abort exception handlers to set the pc back | |
927 | * to the beginning of the critical section if it is found to be | |
928 | * within that critical section (see kuser_cmpxchg_fixup). | |
2d2669b6 | 929 | */ |
b49c0f24 NP |
930 | 1: ldr r3, [r2] @ load current val |
931 | subs r3, r3, r0 @ compare with oldval | |
932 | 2: streq r1, [r2] @ store newval if eq | |
933 | rsbs r0, r3, #0 @ set return val and C flag | |
934 | usr_ret lr | |
935 | ||
936 | .text | |
937 | kuser_cmpxchg_fixup: | |
938 | @ Called from kuser_cmpxchg_check macro. | |
939 | @ r2 = address of interrupted insn (must be preserved). | |
940 | @ sp = saved regs. r7 and r8 are clobbered. | |
941 | @ 1b = first critical insn, 2b = last critical insn. | |
942 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | |
943 | mov r7, #0xffff0fff | |
944 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | |
945 | subs r8, r2, r7 | |
946 | rsbcss r8, r8, #(2b - 1b) | |
947 | strcs r7, [sp, #S_PC] | |
948 | mov pc, lr | |
949 | .previous | |
950 | ||
49bca4c2 NP |
951 | #else |
952 | #warning "NPTL on non MMU needs fixing" | |
953 | mov r0, #-1 | |
954 | adds r0, r0, #0 | |
ba9b5d76 | 955 | usr_ret lr |
b49c0f24 | 956 | #endif |
2d2669b6 NP |
957 | |
958 | #else | |
959 | ||
7511bce4 | 960 | smp_dmb |
b49c0f24 | 961 | 1: ldrex r3, [r2] |
2d2669b6 NP |
962 | subs r3, r3, r0 |
963 | strexeq r3, r1, [r2] | |
b49c0f24 NP |
964 | teqeq r3, #1 |
965 | beq 1b | |
2d2669b6 | 966 | rsbs r0, r3, #0 |
b49c0f24 | 967 | /* beware -- each __kuser slot must be 8 instructions max */ |
7c612bfd | 968 | #ifdef CONFIG_SMP |
b49c0f24 NP |
969 | b __kuser_memory_barrier |
970 | #else | |
ba9b5d76 | 971 | usr_ret lr |
b49c0f24 | 972 | #endif |
2d2669b6 NP |
973 | |
974 | #endif | |
975 | ||
976 | .align 5 | |
977 | ||
978 | /* | |
979 | * Reference prototype: | |
980 | * | |
981 | * int __kernel_get_tls(void) | |
982 | * | |
983 | * Input: | |
984 | * | |
985 | * lr = return address | |
986 | * | |
987 | * Output: | |
988 | * | |
989 | * r0 = TLS value | |
990 | * | |
991 | * Clobbered: | |
992 | * | |
b49c0f24 | 993 | * none |
2d2669b6 NP |
994 | * |
995 | * Definition and user space usage example: | |
996 | * | |
997 | * typedef int (__kernel_get_tls_t)(void); | |
998 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | |
999 | * | |
1000 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | |
1001 | * | |
1002 | * This could be used as follows: | |
1003 | * | |
1004 | * #define __kernel_get_tls() \ | |
1005 | * ({ register unsigned int __val asm("r0"); \ | |
1006 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | |
1007 | * : "=r" (__val) : : "lr","cc" ); \ | |
1008 | * __val; }) | |
1009 | */ | |
1010 | ||
1011 | __kuser_get_tls: @ 0xffff0fe0 | |
f159f4ed | 1012 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
ba9b5d76 | 1013 | usr_ret lr |
f159f4ed TL |
1014 | mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code |
1015 | .rep 4 | |
1016 | .word 0 @ 0xffff0ff0 software TLS value, then | |
1017 | .endr @ pad up to __kuser_helper_version | |
2d2669b6 NP |
1018 | |
1019 | /* | |
1020 | * Reference declaration: | |
1021 | * | |
1022 | * extern unsigned int __kernel_helper_version; | |
1023 | * | |
1024 | * Definition and user space usage example: | |
1025 | * | |
1026 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | |
1027 | * | |
1028 | * User space may read this to determine the curent number of helpers | |
1029 | * available. | |
1030 | */ | |
1031 | ||
1032 | __kuser_helper_version: @ 0xffff0ffc | |
1033 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | |
1034 | ||
1035 | .globl __kuser_helper_end | |
1036 | __kuser_helper_end: | |
1037 | ||
b86040a5 | 1038 | THUMB( .thumb ) |
2d2669b6 | 1039 | |
1da177e4 LT |
1040 | /* |
1041 | * Vector stubs. | |
1042 | * | |
7933523d RK |
1043 | * This code is copied to 0xffff0200 so we can use branches in the |
1044 | * vectors, rather than ldr's. Note that this code must not | |
1045 | * exceed 0x300 bytes. | |
1da177e4 LT |
1046 | * |
1047 | * Common stub entry macro: | |
1048 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
ccea7a19 RK |
1049 | * |
1050 | * SP points to a minimal amount of processor-private memory, the address | |
1051 | * of which is copied into r0 for the mode specific abort handler. | |
1da177e4 | 1052 | */ |
b7ec4795 | 1053 | .macro vector_stub, name, mode, correction=0 |
1da177e4 LT |
1054 | .align 5 |
1055 | ||
1056 | vector_\name: | |
1da177e4 LT |
1057 | .if \correction |
1058 | sub lr, lr, #\correction | |
1059 | .endif | |
ccea7a19 RK |
1060 | |
1061 | @ | |
1062 | @ Save r0, lr_<exception> (parent PC) and spsr_<exception> | |
1063 | @ (parent CPSR) | |
1064 | @ | |
1065 | stmia sp, {r0, lr} @ save r0, lr | |
1da177e4 | 1066 | mrs lr, spsr |
ccea7a19 RK |
1067 | str lr, [sp, #8] @ save spsr |
1068 | ||
1da177e4 | 1069 | @ |
ccea7a19 | 1070 | @ Prepare for SVC32 mode. IRQs remain disabled. |
1da177e4 | 1071 | @ |
ccea7a19 | 1072 | mrs r0, cpsr |
b86040a5 | 1073 | eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) |
ccea7a19 | 1074 | msr spsr_cxsf, r0 |
1da177e4 | 1075 | |
ccea7a19 RK |
1076 | @ |
1077 | @ the branch table must immediately follow this code | |
1078 | @ | |
ccea7a19 | 1079 | and lr, lr, #0x0f |
b86040a5 CM |
1080 | THUMB( adr r0, 1f ) |
1081 | THUMB( ldr lr, [r0, lr, lsl #2] ) | |
b7ec4795 | 1082 | mov r0, sp |
b86040a5 | 1083 | ARM( ldr lr, [pc, lr, lsl #2] ) |
ccea7a19 | 1084 | movs pc, lr @ branch to handler in SVC mode |
93ed3970 | 1085 | ENDPROC(vector_\name) |
88987ef9 CM |
1086 | |
1087 | .align 2 | |
1088 | @ handler addresses follow this label | |
1089 | 1: | |
1da177e4 LT |
1090 | .endm |
1091 | ||
7933523d | 1092 | .globl __stubs_start |
1da177e4 LT |
1093 | __stubs_start: |
1094 | /* | |
1095 | * Interrupt dispatcher | |
1096 | */ | |
b7ec4795 | 1097 | vector_stub irq, IRQ_MODE, 4 |
1da177e4 LT |
1098 | |
1099 | .long __irq_usr @ 0 (USR_26 / USR_32) | |
1100 | .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) | |
1101 | .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) | |
1102 | .long __irq_svc @ 3 (SVC_26 / SVC_32) | |
1103 | .long __irq_invalid @ 4 | |
1104 | .long __irq_invalid @ 5 | |
1105 | .long __irq_invalid @ 6 | |
1106 | .long __irq_invalid @ 7 | |
1107 | .long __irq_invalid @ 8 | |
1108 | .long __irq_invalid @ 9 | |
1109 | .long __irq_invalid @ a | |
1110 | .long __irq_invalid @ b | |
1111 | .long __irq_invalid @ c | |
1112 | .long __irq_invalid @ d | |
1113 | .long __irq_invalid @ e | |
1114 | .long __irq_invalid @ f | |
1115 | ||
1116 | /* | |
1117 | * Data abort dispatcher | |
1118 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1119 | */ | |
b7ec4795 | 1120 | vector_stub dabt, ABT_MODE, 8 |
1da177e4 LT |
1121 | |
1122 | .long __dabt_usr @ 0 (USR_26 / USR_32) | |
1123 | .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1124 | .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1125 | .long __dabt_svc @ 3 (SVC_26 / SVC_32) | |
1126 | .long __dabt_invalid @ 4 | |
1127 | .long __dabt_invalid @ 5 | |
1128 | .long __dabt_invalid @ 6 | |
1129 | .long __dabt_invalid @ 7 | |
1130 | .long __dabt_invalid @ 8 | |
1131 | .long __dabt_invalid @ 9 | |
1132 | .long __dabt_invalid @ a | |
1133 | .long __dabt_invalid @ b | |
1134 | .long __dabt_invalid @ c | |
1135 | .long __dabt_invalid @ d | |
1136 | .long __dabt_invalid @ e | |
1137 | .long __dabt_invalid @ f | |
1138 | ||
1139 | /* | |
1140 | * Prefetch abort dispatcher | |
1141 | * Enter in ABT mode, spsr = USR CPSR, lr = USR PC | |
1142 | */ | |
b7ec4795 | 1143 | vector_stub pabt, ABT_MODE, 4 |
1da177e4 LT |
1144 | |
1145 | .long __pabt_usr @ 0 (USR_26 / USR_32) | |
1146 | .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) | |
1147 | .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) | |
1148 | .long __pabt_svc @ 3 (SVC_26 / SVC_32) | |
1149 | .long __pabt_invalid @ 4 | |
1150 | .long __pabt_invalid @ 5 | |
1151 | .long __pabt_invalid @ 6 | |
1152 | .long __pabt_invalid @ 7 | |
1153 | .long __pabt_invalid @ 8 | |
1154 | .long __pabt_invalid @ 9 | |
1155 | .long __pabt_invalid @ a | |
1156 | .long __pabt_invalid @ b | |
1157 | .long __pabt_invalid @ c | |
1158 | .long __pabt_invalid @ d | |
1159 | .long __pabt_invalid @ e | |
1160 | .long __pabt_invalid @ f | |
1161 | ||
1162 | /* | |
1163 | * Undef instr entry dispatcher | |
1164 | * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | |
1165 | */ | |
b7ec4795 | 1166 | vector_stub und, UND_MODE |
1da177e4 LT |
1167 | |
1168 | .long __und_usr @ 0 (USR_26 / USR_32) | |
1169 | .long __und_invalid @ 1 (FIQ_26 / FIQ_32) | |
1170 | .long __und_invalid @ 2 (IRQ_26 / IRQ_32) | |
1171 | .long __und_svc @ 3 (SVC_26 / SVC_32) | |
1172 | .long __und_invalid @ 4 | |
1173 | .long __und_invalid @ 5 | |
1174 | .long __und_invalid @ 6 | |
1175 | .long __und_invalid @ 7 | |
1176 | .long __und_invalid @ 8 | |
1177 | .long __und_invalid @ 9 | |
1178 | .long __und_invalid @ a | |
1179 | .long __und_invalid @ b | |
1180 | .long __und_invalid @ c | |
1181 | .long __und_invalid @ d | |
1182 | .long __und_invalid @ e | |
1183 | .long __und_invalid @ f | |
1184 | ||
1185 | .align 5 | |
1186 | ||
1187 | /*============================================================================= | |
1188 | * Undefined FIQs | |
1189 | *----------------------------------------------------------------------------- | |
1190 | * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC | |
1191 | * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. | |
1192 | * Basically to switch modes, we *HAVE* to clobber one register... brain | |
1193 | * damage alert! I don't think that we can execute any code in here in any | |
1194 | * other mode than FIQ... Ok you can switch to another mode, but you can't | |
1195 | * get out of that mode without clobbering one register. | |
1196 | */ | |
1197 | vector_fiq: | |
1198 | disable_fiq | |
1199 | subs pc, lr, #4 | |
1200 | ||
1201 | /*============================================================================= | |
1202 | * Address exception handler | |
1203 | *----------------------------------------------------------------------------- | |
1204 | * These aren't too critical. | |
1205 | * (they're not supposed to happen, and won't happen in 32-bit data mode). | |
1206 | */ | |
1207 | ||
1208 | vector_addrexcptn: | |
1209 | b vector_addrexcptn | |
1210 | ||
1211 | /* | |
1212 | * We group all the following data together to optimise | |
1213 | * for CPUs with separate I & D caches. | |
1214 | */ | |
1215 | .align 5 | |
1216 | ||
1217 | .LCvswi: | |
1218 | .word vector_swi | |
1219 | ||
7933523d | 1220 | .globl __stubs_end |
1da177e4 LT |
1221 | __stubs_end: |
1222 | ||
7933523d | 1223 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start |
1da177e4 | 1224 | |
7933523d RK |
1225 | .globl __vectors_start |
1226 | __vectors_start: | |
b86040a5 CM |
1227 | ARM( swi SYS_ERROR0 ) |
1228 | THUMB( svc #0 ) | |
1229 | THUMB( nop ) | |
1230 | W(b) vector_und + stubs_offset | |
1231 | W(ldr) pc, .LCvswi + stubs_offset | |
1232 | W(b) vector_pabt + stubs_offset | |
1233 | W(b) vector_dabt + stubs_offset | |
1234 | W(b) vector_addrexcptn + stubs_offset | |
1235 | W(b) vector_irq + stubs_offset | |
1236 | W(b) vector_fiq + stubs_offset | |
7933523d RK |
1237 | |
1238 | .globl __vectors_end | |
1239 | __vectors_end: | |
1da177e4 LT |
1240 | |
1241 | .data | |
1242 | ||
1da177e4 LT |
1243 | .globl cr_alignment |
1244 | .globl cr_no_alignment | |
1245 | cr_alignment: | |
1246 | .space 4 | |
1247 | cr_no_alignment: | |
1248 | .space 4 |