Linux 2.6.31-rc4
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
afeb90ca 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
70b6f2b4
NP
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
1da177e4 16 */
1da177e4 17
f09b9979 18#include <asm/memory.h>
1da177e4 19#include <asm/glue.h>
1da177e4 20#include <asm/vfpmacros.h>
a09e64fb 21#include <mach/entry-macro.S>
d6551e88 22#include <asm/thread_notify.h>
c4c5716e 23#include <asm/unwind.h>
1da177e4
LT
24
25#include "entry-header.S"
26
187a51ad
RK
27/*
28 * Interrupt handling. Preserves r7, r8, r9
29 */
30 .macro irq_handler
f80dff9d 31 get_irqnr_preamble r5, lr
187a51ad
RK
321: get_irqnr_and_base r0, r6, r5, lr
33 movne r1, sp
34 @
35 @ routine called with r0 = irq number, r1 = struct pt_regs *
36 @
37 adrne lr, 1b
38 bne asm_do_IRQ
791be9b9
RK
39
40#ifdef CONFIG_SMP
41 /*
42 * XXX
43 *
44 * this macro assumes that irqstat (r6) and base (r5) are
45 * preserved from get_irqnr_and_base above
46 */
47 test_for_ipi r0, r6, r5, lr
48 movne r0, sp
49 adrne lr, 1b
50 bne do_IPI
37ee16ae
RK
51
52#ifdef CONFIG_LOCAL_TIMERS
53 test_for_ltirq r0, r6, r5, lr
54 movne r0, sp
55 adrne lr, 1b
56 bne do_local_timer
57#endif
791be9b9
RK
58#endif
59
187a51ad
RK
60 .endm
61
785d3cd2
NP
62#ifdef CONFIG_KPROBES
63 .section .kprobes.text,"ax",%progbits
64#else
65 .text
66#endif
67
1da177e4
LT
68/*
69 * Invalid mode handlers
70 */
ccea7a19
RK
71 .macro inv_entry, reason
72 sub sp, sp, #S_FRAME_SIZE
73 stmib sp, {r1 - lr}
1da177e4
LT
74 mov r1, #\reason
75 .endm
76
77__pabt_invalid:
ccea7a19
RK
78 inv_entry BAD_PREFETCH
79 b common_invalid
93ed3970 80ENDPROC(__pabt_invalid)
1da177e4
LT
81
82__dabt_invalid:
ccea7a19
RK
83 inv_entry BAD_DATA
84 b common_invalid
93ed3970 85ENDPROC(__dabt_invalid)
1da177e4
LT
86
87__irq_invalid:
ccea7a19
RK
88 inv_entry BAD_IRQ
89 b common_invalid
93ed3970 90ENDPROC(__irq_invalid)
1da177e4
LT
91
92__und_invalid:
ccea7a19
RK
93 inv_entry BAD_UNDEFINSTR
94
95 @
96 @ XXX fall through to common_invalid
97 @
98
99@
100@ common_invalid - generic code for failed exception (re-entrant version of handlers)
101@
102common_invalid:
103 zero_fp
104
105 ldmia r0, {r4 - r6}
106 add r0, sp, #S_PC @ here for interlock avoidance
107 mov r7, #-1 @ "" "" "" ""
108 str r4, [sp] @ save preserved r0
109 stmia r0, {r5 - r7} @ lr_<exception>,
110 @ cpsr_<exception>, "old_r0"
1da177e4 111
1da177e4 112 mov r0, sp
1da177e4 113 b bad_mode
93ed3970 114ENDPROC(__und_invalid)
1da177e4
LT
115
116/*
117 * SVC mode handlers
118 */
2dede2d8
NP
119
120#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
121#define SPFIX(code...) code
122#else
123#define SPFIX(code...)
124#endif
125
d30a0c8b 126 .macro svc_entry, stack_hole=0
c4c5716e
CM
127 UNWIND(.fnstart )
128 UNWIND(.save {r0 - pc} )
d30a0c8b 129 sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
2dede2d8
NP
130 SPFIX( tst sp, #4 )
131 SPFIX( bicne sp, sp, #4 )
ccea7a19
RK
132 stmib sp, {r1 - r12}
133
134 ldmia r0, {r1 - r3}
135 add r5, sp, #S_SP @ here for interlock avoidance
136 mov r4, #-1 @ "" "" "" ""
d30a0c8b 137 add r0, sp, #(S_FRAME_SIZE + \stack_hole)
2dede2d8 138 SPFIX( addne r0, r0, #4 )
ccea7a19
RK
139 str r1, [sp] @ save the "real" r0 copied
140 @ from the exception stack
141
1da177e4
LT
142 mov r1, lr
143
144 @
145 @ We are now ready to fill in the remaining blanks on the stack:
146 @
147 @ r0 - sp_svc
148 @ r1 - lr_svc
149 @ r2 - lr_<exception>, already fixed up for correct return/restart
150 @ r3 - spsr_<exception>
151 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
152 @
153 stmia r5, {r0 - r4}
154 .endm
155
156 .align 5
157__dabt_svc:
ccea7a19 158 svc_entry
1da177e4
LT
159
160 @
161 @ get ready to re-enable interrupts if appropriate
162 @
163 mrs r9, cpsr
164 tst r3, #PSR_I_BIT
165 biceq r9, r9, #PSR_I_BIT
166
167 @
168 @ Call the processor-specific abort handler:
169 @
170 @ r2 - aborted context pc
171 @ r3 - aborted context cpsr
172 @
173 @ The abort handler must return the aborted address in r0, and
174 @ the fault status register in r1. r9 must be preserved.
175 @
48d7927b 176#ifdef MULTI_DABORT
1da177e4
LT
177 ldr r4, .LCprocfns
178 mov lr, pc
48d7927b 179 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
1da177e4 180#else
48d7927b 181 bl CPU_DABORT_HANDLER
1da177e4
LT
182#endif
183
184 @
185 @ set desired IRQ state, then call main handler
186 @
187 msr cpsr_c, r9
188 mov r2, sp
189 bl do_DataAbort
190
191 @
192 @ IRQs off again before pulling preserved data off the stack
193 @
1ec42c0c 194 disable_irq
1da177e4
LT
195
196 @
197 @ restore SPSR and restart the instruction
198 @
199 ldr r0, [sp, #S_PSR]
200 msr spsr_cxsf, r0
201 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
c4c5716e 202 UNWIND(.fnend )
93ed3970 203ENDPROC(__dabt_svc)
1da177e4
LT
204
205 .align 5
206__irq_svc:
ccea7a19
RK
207 svc_entry
208
7ad1bcb2
RK
209#ifdef CONFIG_TRACE_IRQFLAGS
210 bl trace_hardirqs_off
211#endif
1da177e4 212#ifdef CONFIG_PREEMPT
706fdd9f
RK
213 get_thread_info tsk
214 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
215 add r7, r8, #1 @ increment it
216 str r7, [tsk, #TI_PREEMPT]
1da177e4 217#endif
ccea7a19 218
187a51ad 219 irq_handler
1da177e4 220#ifdef CONFIG_PREEMPT
28fab1a2 221 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
706fdd9f 222 ldr r0, [tsk, #TI_FLAGS] @ get flags
28fab1a2
RK
223 teq r8, #0 @ if preempt count != 0
224 movne r0, #0 @ force flags to 0
1da177e4
LT
225 tst r0, #_TIF_NEED_RESCHED
226 blne svc_preempt
1da177e4
LT
227#endif
228 ldr r0, [sp, #S_PSR] @ irqs are already disabled
229 msr spsr_cxsf, r0
7ad1bcb2
RK
230#ifdef CONFIG_TRACE_IRQFLAGS
231 tst r0, #PSR_I_BIT
232 bleq trace_hardirqs_on
233#endif
1da177e4 234 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
c4c5716e 235 UNWIND(.fnend )
93ed3970 236ENDPROC(__irq_svc)
1da177e4
LT
237
238 .ltorg
239
240#ifdef CONFIG_PREEMPT
241svc_preempt:
28fab1a2 242 mov r8, lr
1da177e4 2431: bl preempt_schedule_irq @ irq en/disable is done inside
706fdd9f 244 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
1da177e4 245 tst r0, #_TIF_NEED_RESCHED
28fab1a2 246 moveq pc, r8 @ go again
1da177e4
LT
247 b 1b
248#endif
249
250 .align 5
251__und_svc:
d30a0c8b
NP
252#ifdef CONFIG_KPROBES
253 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
254 @ it obviously needs free stack space which then will belong to
255 @ the saved context.
256 svc_entry 64
257#else
ccea7a19 258 svc_entry
d30a0c8b 259#endif
1da177e4
LT
260
261 @
262 @ call emulation code, which returns using r9 if it has emulated
263 @ the instruction, or the more conventional lr if we are to treat
264 @ this as a real undefined instruction
265 @
266 @ r0 - instruction
267 @
268 ldr r0, [r2, #-4]
269 adr r9, 1f
270 bl call_fpe
271
272 mov r0, sp @ struct pt_regs *regs
273 bl do_undefinstr
274
275 @
276 @ IRQs off again before pulling preserved data off the stack
277 @
1ec42c0c 2781: disable_irq
1da177e4
LT
279
280 @
281 @ restore SPSR and restart the instruction
282 @
283 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
284 msr spsr_cxsf, lr
285 ldmia sp, {r0 - pc}^ @ Restore SVC registers
c4c5716e 286 UNWIND(.fnend )
93ed3970 287ENDPROC(__und_svc)
1da177e4
LT
288
289 .align 5
290__pabt_svc:
ccea7a19 291 svc_entry
1da177e4
LT
292
293 @
294 @ re-enable interrupts if appropriate
295 @
296 mrs r9, cpsr
297 tst r3, #PSR_I_BIT
298 biceq r9, r9, #PSR_I_BIT
1da177e4
LT
299
300 @
301 @ set args, then call main handler
302 @
303 @ r0 - address of faulting instruction
304 @ r1 - pointer to registers on stack
305 @
48d7927b
PB
306#ifdef MULTI_PABORT
307 mov r0, r2 @ pass address of aborted instruction.
308 ldr r4, .LCprocfns
309 mov lr, pc
310 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
311#else
312 CPU_PABORT_HANDLER(r0, r2)
313#endif
314 msr cpsr_c, r9 @ Maybe enable interrupts
1da177e4
LT
315 mov r1, sp @ regs
316 bl do_PrefetchAbort @ call abort handler
317
318 @
319 @ IRQs off again before pulling preserved data off the stack
320 @
1ec42c0c 321 disable_irq
1da177e4
LT
322
323 @
324 @ restore SPSR and restart the instruction
325 @
326 ldr r0, [sp, #S_PSR]
327 msr spsr_cxsf, r0
328 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
c4c5716e 329 UNWIND(.fnend )
93ed3970 330ENDPROC(__pabt_svc)
1da177e4
LT
331
332 .align 5
49f680ea
RK
333.LCcralign:
334 .word cr_alignment
48d7927b 335#ifdef MULTI_DABORT
1da177e4
LT
336.LCprocfns:
337 .word processor
338#endif
339.LCfp:
340 .word fp_enter
1da177e4
LT
341
342/*
343 * User mode handlers
2dede2d8
NP
344 *
345 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
1da177e4 346 */
2dede2d8
NP
347
348#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
349#error "sizeof(struct pt_regs) must be a multiple of 8"
350#endif
351
ccea7a19 352 .macro usr_entry
c4c5716e
CM
353 UNWIND(.fnstart )
354 UNWIND(.cantunwind ) @ don't unwind the user space
ccea7a19
RK
355 sub sp, sp, #S_FRAME_SIZE
356 stmib sp, {r1 - r12}
357
358 ldmia r0, {r1 - r3}
359 add r0, sp, #S_PC @ here for interlock avoidance
360 mov r4, #-1 @ "" "" "" ""
361
362 str r1, [sp] @ save the "real" r0 copied
363 @ from the exception stack
1da177e4
LT
364
365 @
366 @ We are now ready to fill in the remaining blanks on the stack:
367 @
368 @ r2 - lr_<exception>, already fixed up for correct return/restart
369 @ r3 - spsr_<exception>
370 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
371 @
372 @ Also, separately save sp_usr and lr_usr
373 @
ccea7a19
RK
374 stmia r0, {r2 - r4}
375 stmdb r0, {sp, lr}^
1da177e4
LT
376
377 @
378 @ Enable the alignment trap while in kernel mode
379 @
49f680ea 380 alignment_trap r0
1da177e4
LT
381
382 @
383 @ Clear FP to mark the first stack frame
384 @
385 zero_fp
386 .endm
387
b49c0f24
NP
388 .macro kuser_cmpxchg_check
389#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
390#ifndef CONFIG_MMU
391#warning "NPTL on non MMU needs fixing"
392#else
393 @ Make sure our user space atomic helper is restarted
394 @ if it was interrupted in a critical region. Here we
395 @ perform a quick test inline since it should be false
396 @ 99.9999% of the time. The rest is done out of line.
397 cmp r2, #TASK_SIZE
398 blhs kuser_cmpxchg_fixup
399#endif
400#endif
401 .endm
402
1da177e4
LT
403 .align 5
404__dabt_usr:
ccea7a19 405 usr_entry
b49c0f24 406 kuser_cmpxchg_check
1da177e4
LT
407
408 @
409 @ Call the processor-specific abort handler:
410 @
411 @ r2 - aborted context pc
412 @ r3 - aborted context cpsr
413 @
414 @ The abort handler must return the aborted address in r0, and
415 @ the fault status register in r1.
416 @
48d7927b 417#ifdef MULTI_DABORT
1da177e4
LT
418 ldr r4, .LCprocfns
419 mov lr, pc
48d7927b 420 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
1da177e4 421#else
48d7927b 422 bl CPU_DABORT_HANDLER
1da177e4
LT
423#endif
424
425 @
426 @ IRQs on, then call the main handler
427 @
1ec42c0c 428 enable_irq
1da177e4
LT
429 mov r2, sp
430 adr lr, ret_from_exception
431 b do_DataAbort
c4c5716e 432 UNWIND(.fnend )
93ed3970 433ENDPROC(__dabt_usr)
1da177e4
LT
434
435 .align 5
436__irq_usr:
ccea7a19 437 usr_entry
b49c0f24 438 kuser_cmpxchg_check
1da177e4 439
7ad1bcb2
RK
440#ifdef CONFIG_TRACE_IRQFLAGS
441 bl trace_hardirqs_off
442#endif
706fdd9f 443 get_thread_info tsk
1da177e4 444#ifdef CONFIG_PREEMPT
706fdd9f
RK
445 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
446 add r7, r8, #1 @ increment it
447 str r7, [tsk, #TI_PREEMPT]
1da177e4 448#endif
ccea7a19 449
187a51ad 450 irq_handler
1da177e4 451#ifdef CONFIG_PREEMPT
706fdd9f
RK
452 ldr r0, [tsk, #TI_PREEMPT]
453 str r8, [tsk, #TI_PREEMPT]
1da177e4 454 teq r0, r7
1da177e4 455 strne r0, [r0, -r0]
1da177e4 456#endif
7ad1bcb2
RK
457#ifdef CONFIG_TRACE_IRQFLAGS
458 bl trace_hardirqs_on
459#endif
ccea7a19 460
1da177e4
LT
461 mov why, #0
462 b ret_to_user
c4c5716e 463 UNWIND(.fnend )
93ed3970 464ENDPROC(__irq_usr)
1da177e4
LT
465
466 .ltorg
467
468 .align 5
469__und_usr:
ccea7a19 470 usr_entry
1da177e4 471
1da177e4
LT
472 @
473 @ fall through to the emulation code, which returns using r9 if
474 @ it has emulated the instruction, or the more conventional lr
475 @ if we are to treat this as a real undefined instruction
476 @
477 @ r0 - instruction
478 @
1da177e4 479 adr r9, ret_from_exception
db6ccbb6 480 adr lr, __und_usr_unknown
cb170a45
PB
481 tst r3, #PSR_T_BIT @ Thumb mode?
482 subeq r4, r2, #4 @ ARM instr at LR - 4
483 subne r4, r2, #2 @ Thumb instr at LR - 2
4841: ldreqt r0, [r4]
26584853
CM
485#ifdef CONFIG_CPU_ENDIAN_BE8
486 reveq r0, r0 @ little endian instruction
487#endif
cb170a45
PB
488 beq call_fpe
489 @ Thumb instruction
490#if __LINUX_ARM_ARCH__ >= 7
4912: ldrht r5, [r4], #2
492 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
493 cmp r0, #0xe800 @ 32bit instruction if xx != 0
494 blo __und_usr_unknown
4953: ldrht r0, [r4]
496 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
497 orr r0, r0, r5, lsl #16
498#else
499 b __und_usr_unknown
500#endif
c4c5716e 501 UNWIND(.fnend )
93ed3970 502ENDPROC(__und_usr)
cb170a45 503
1da177e4
LT
504 @
505 @ fallthrough to call_fpe
506 @
507
508/*
509 * The out of line fixup for the ldrt above.
510 */
511 .section .fixup, "ax"
cb170a45 5124: mov pc, r9
1da177e4
LT
513 .previous
514 .section __ex_table,"a"
cb170a45
PB
515 .long 1b, 4b
516#if __LINUX_ARM_ARCH__ >= 7
517 .long 2b, 4b
518 .long 3b, 4b
519#endif
1da177e4
LT
520 .previous
521
522/*
523 * Check whether the instruction is a co-processor instruction.
524 * If yes, we need to call the relevant co-processor handler.
525 *
526 * Note that we don't do a full check here for the co-processor
527 * instructions; all instructions with bit 27 set are well
528 * defined. The only instructions that should fault are the
529 * co-processor instructions. However, we have to watch out
530 * for the ARM6/ARM7 SWI bug.
531 *
b5872db4
CM
532 * NEON is a special case that has to be handled here. Not all
533 * NEON instructions are co-processor instructions, so we have
534 * to make a special case of checking for them. Plus, there's
535 * five groups of them, so we have a table of mask/opcode pairs
536 * to check against, and if any match then we branch off into the
537 * NEON handler code.
538 *
1da177e4
LT
539 * Emulators may wish to make use of the following registers:
540 * r0 = instruction opcode.
541 * r2 = PC+4
db6ccbb6 542 * r9 = normal "successful" return address
1da177e4 543 * r10 = this threads thread_info structure.
db6ccbb6 544 * lr = unrecognised instruction return address
1da177e4 545 */
cb170a45
PB
546 @
547 @ Fall-through from Thumb-2 __und_usr
548 @
549#ifdef CONFIG_NEON
550 adr r6, .LCneon_thumb_opcodes
551 b 2f
552#endif
1da177e4 553call_fpe:
b5872db4 554#ifdef CONFIG_NEON
cb170a45 555 adr r6, .LCneon_arm_opcodes
b5872db4
CM
5562:
557 ldr r7, [r6], #4 @ mask value
558 cmp r7, #0 @ end mask?
559 beq 1f
560 and r8, r0, r7
561 ldr r7, [r6], #4 @ opcode bits matching in mask
562 cmp r8, r7 @ NEON instruction?
563 bne 2b
564 get_thread_info r10
565 mov r7, #1
566 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
567 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
568 b do_vfp @ let VFP handler handle this
5691:
570#endif
1da177e4 571 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
cb170a45 572 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
1da177e4
LT
573#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
574 and r8, r0, #0x0f000000 @ mask out op-code bits
575 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
576#endif
577 moveq pc, lr
578 get_thread_info r10 @ get current thread
579 and r8, r0, #0x00000f00 @ mask out CP number
580 mov r7, #1
581 add r6, r10, #TI_USED_CP
582 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
583#ifdef CONFIG_IWMMXT
584 @ Test if we need to give access to iWMMXt coprocessors
585 ldr r5, [r10, #TI_FLAGS]
586 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
587 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
588 bcs iwmmxt_task_enable
589#endif
1da177e4
LT
590 add pc, pc, r8, lsr #6
591 mov r0, r0
592
593 mov pc, lr @ CP#0
594 b do_fpe @ CP#1 (FPE)
595 b do_fpe @ CP#2 (FPE)
596 mov pc, lr @ CP#3
c17fad11
LB
597#ifdef CONFIG_CRUNCH
598 b crunch_task_enable @ CP#4 (MaverickCrunch)
599 b crunch_task_enable @ CP#5 (MaverickCrunch)
600 b crunch_task_enable @ CP#6 (MaverickCrunch)
601#else
1da177e4
LT
602 mov pc, lr @ CP#4
603 mov pc, lr @ CP#5
604 mov pc, lr @ CP#6
c17fad11 605#endif
1da177e4
LT
606 mov pc, lr @ CP#7
607 mov pc, lr @ CP#8
608 mov pc, lr @ CP#9
609#ifdef CONFIG_VFP
610 b do_vfp @ CP#10 (VFP)
611 b do_vfp @ CP#11 (VFP)
612#else
613 mov pc, lr @ CP#10 (VFP)
614 mov pc, lr @ CP#11 (VFP)
615#endif
616 mov pc, lr @ CP#12
617 mov pc, lr @ CP#13
618 mov pc, lr @ CP#14 (Debug)
619 mov pc, lr @ CP#15 (Control)
620
b5872db4
CM
621#ifdef CONFIG_NEON
622 .align 6
623
cb170a45 624.LCneon_arm_opcodes:
b5872db4
CM
625 .word 0xfe000000 @ mask
626 .word 0xf2000000 @ opcode
627
628 .word 0xff100000 @ mask
629 .word 0xf4000000 @ opcode
630
cb170a45
PB
631 .word 0x00000000 @ mask
632 .word 0x00000000 @ opcode
633
634.LCneon_thumb_opcodes:
635 .word 0xef000000 @ mask
636 .word 0xef000000 @ opcode
637
638 .word 0xff100000 @ mask
639 .word 0xf9000000 @ opcode
640
b5872db4
CM
641 .word 0x00000000 @ mask
642 .word 0x00000000 @ opcode
643#endif
644
1da177e4 645do_fpe:
5d25ac03 646 enable_irq
1da177e4
LT
647 ldr r4, .LCfp
648 add r10, r10, #TI_FPSTATE @ r10 = workspace
649 ldr pc, [r4] @ Call FP module USR entry point
650
651/*
652 * The FP module is called with these registers set:
653 * r0 = instruction
654 * r2 = PC+4
655 * r9 = normal "successful" return address
656 * r10 = FP workspace
657 * lr = unrecognised FP instruction return address
658 */
659
660 .data
661ENTRY(fp_enter)
db6ccbb6 662 .word no_fp
785d3cd2 663 .previous
1da177e4 664
db6ccbb6
RK
665no_fp: mov pc, lr
666
667__und_usr_unknown:
ecbab71c 668 enable_irq
1da177e4
LT
669 mov r0, sp
670 adr lr, ret_from_exception
671 b do_undefinstr
93ed3970 672ENDPROC(__und_usr_unknown)
1da177e4
LT
673
674 .align 5
675__pabt_usr:
ccea7a19 676 usr_entry
1da177e4 677
48d7927b
PB
678#ifdef MULTI_PABORT
679 mov r0, r2 @ pass address of aborted instruction.
680 ldr r4, .LCprocfns
681 mov lr, pc
682 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
683#else
684 CPU_PABORT_HANDLER(r0, r2)
685#endif
1ec42c0c 686 enable_irq @ Enable interrupts
1da177e4
LT
687 mov r1, sp @ regs
688 bl do_PrefetchAbort @ call abort handler
c4c5716e 689 UNWIND(.fnend )
1da177e4
LT
690 /* fall through */
691/*
692 * This is the return code to user mode for abort handlers
693 */
694ENTRY(ret_from_exception)
c4c5716e
CM
695 UNWIND(.fnstart )
696 UNWIND(.cantunwind )
1da177e4
LT
697 get_thread_info tsk
698 mov why, #0
699 b ret_to_user
c4c5716e 700 UNWIND(.fnend )
93ed3970
CM
701ENDPROC(__pabt_usr)
702ENDPROC(ret_from_exception)
1da177e4
LT
703
704/*
705 * Register switch for ARMv3 and ARMv4 processors
706 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
707 * previous and next are guaranteed not to be the same.
708 */
709ENTRY(__switch_to)
c4c5716e
CM
710 UNWIND(.fnstart )
711 UNWIND(.cantunwind )
1da177e4
LT
712 add ip, r1, #TI_CPU_SAVE
713 ldr r3, [r2, #TI_TP_VALUE]
714 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
d6551e88
RK
715#ifdef CONFIG_MMU
716 ldr r6, [r2, #TI_CPU_DOMAIN]
afeb90ca 717#endif
b876386e 718#if __LINUX_ARM_ARCH__ >= 6
43cc1981 719#ifdef CONFIG_CPU_32v6K
b876386e
RK
720 clrex
721#else
73394322 722 strex r5, r4, [ip] @ Clear exclusive monitor
b876386e
RK
723#endif
724#endif
4b0e07a5 725#if defined(CONFIG_HAS_TLS_REG)
2d2669b6 726 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
4b0e07a5 727#elif !defined(CONFIG_TLS_REG_EMUL)
1da177e4 728 mov r4, #0xffff0fff
2d2669b6
NP
729 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
730#endif
afeb90ca 731#ifdef CONFIG_MMU
1da177e4 732 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
1da177e4 733#endif
d6551e88
RK
734 mov r5, r0
735 add r4, r2, #TI_CPU_SAVE
736 ldr r0, =thread_notify_head
737 mov r1, #THREAD_NOTIFY_SWITCH
738 bl atomic_notifier_call_chain
739 mov r0, r5
740 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
c4c5716e 741 UNWIND(.fnend )
93ed3970 742ENDPROC(__switch_to)
1da177e4
LT
743
744 __INIT
2d2669b6
NP
745
746/*
747 * User helpers.
748 *
749 * These are segment of kernel provided user code reachable from user space
750 * at a fixed address in kernel memory. This is used to provide user space
751 * with some operations which require kernel help because of unimplemented
752 * native feature and/or instructions in many ARM CPUs. The idea is for
753 * this code to be executed directly in user mode for best efficiency but
754 * which is too intimate with the kernel counter part to be left to user
755 * libraries. In fact this code might even differ from one CPU to another
756 * depending on the available instruction set and restrictions like on
757 * SMP systems. In other words, the kernel reserves the right to change
758 * this code as needed without warning. Only the entry points and their
759 * results are guaranteed to be stable.
760 *
761 * Each segment is 32-byte aligned and will be moved to the top of the high
762 * vector page. New segments (if ever needed) must be added in front of
763 * existing ones. This mechanism should be used only for things that are
764 * really small and justified, and not be abused freely.
765 *
766 * User space is expected to implement those things inline when optimizing
767 * for a processor that has the necessary native support, but only if such
768 * resulting binaries are already to be incompatible with earlier ARM
769 * processors due to the use of unsupported instructions other than what
770 * is provided here. In other words don't make binaries unable to run on
771 * earlier processors just for the sake of not using these kernel helpers
772 * if your compiled code is not going to use the new instructions for other
773 * purpose.
774 */
775
ba9b5d76
NP
776 .macro usr_ret, reg
777#ifdef CONFIG_ARM_THUMB
778 bx \reg
779#else
780 mov pc, \reg
781#endif
782 .endm
783
2d2669b6
NP
784 .align 5
785 .globl __kuser_helper_start
786__kuser_helper_start:
787
7c612bfd
NP
788/*
789 * Reference prototype:
790 *
791 * void __kernel_memory_barrier(void)
792 *
793 * Input:
794 *
795 * lr = return address
796 *
797 * Output:
798 *
799 * none
800 *
801 * Clobbered:
802 *
b49c0f24 803 * none
7c612bfd
NP
804 *
805 * Definition and user space usage example:
806 *
807 * typedef void (__kernel_dmb_t)(void);
808 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
809 *
810 * Apply any needed memory barrier to preserve consistency with data modified
811 * manually and __kuser_cmpxchg usage.
812 *
813 * This could be used as follows:
814 *
815 * #define __kernel_dmb() \
816 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
6896eec0 817 * : : : "r0", "lr","cc" )
7c612bfd
NP
818 */
819
820__kuser_memory_barrier: @ 0xffff0fa0
bac4e960 821 smp_dmb
ba9b5d76 822 usr_ret lr
7c612bfd
NP
823
824 .align 5
825
2d2669b6
NP
826/*
827 * Reference prototype:
828 *
829 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
830 *
831 * Input:
832 *
833 * r0 = oldval
834 * r1 = newval
835 * r2 = ptr
836 * lr = return address
837 *
838 * Output:
839 *
840 * r0 = returned value (zero or non-zero)
841 * C flag = set if r0 == 0, clear if r0 != 0
842 *
843 * Clobbered:
844 *
845 * r3, ip, flags
846 *
847 * Definition and user space usage example:
848 *
849 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
850 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
851 *
852 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
853 * Return zero if *ptr was changed or non-zero if no exchange happened.
854 * The C flag is also set if *ptr was changed to allow for assembly
855 * optimization in the calling code.
856 *
5964eae8
NP
857 * Notes:
858 *
859 * - This routine already includes memory barriers as needed.
860 *
2d2669b6
NP
861 * For example, a user space atomic_add implementation could look like this:
862 *
863 * #define atomic_add(ptr, val) \
864 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
865 * register unsigned int __result asm("r1"); \
866 * asm volatile ( \
867 * "1: @ atomic_add\n\t" \
868 * "ldr r0, [r2]\n\t" \
869 * "mov r3, #0xffff0fff\n\t" \
870 * "add lr, pc, #4\n\t" \
871 * "add r1, r0, %2\n\t" \
872 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
873 * "bcc 1b" \
874 * : "=&r" (__result) \
875 * : "r" (__ptr), "rIL" (val) \
876 * : "r0","r3","ip","lr","cc","memory" ); \
877 * __result; })
878 */
879
880__kuser_cmpxchg: @ 0xffff0fc0
881
dcef1f63 882#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2d2669b6 883
dcef1f63
NP
884 /*
885 * Poor you. No fast solution possible...
886 * The kernel itself must perform the operation.
887 * A special ghost syscall is used for that (see traps.c).
888 */
5e097445
NP
889 stmfd sp!, {r7, lr}
890 mov r7, #0xff00 @ 0xfff0 into r7 for EABI
891 orr r7, r7, #0xf0
dcef1f63 892 swi #0x9ffff0
5e097445 893 ldmfd sp!, {r7, pc}
dcef1f63
NP
894
895#elif __LINUX_ARM_ARCH__ < 6
2d2669b6 896
b49c0f24
NP
897#ifdef CONFIG_MMU
898
2d2669b6 899 /*
b49c0f24
NP
900 * The only thing that can break atomicity in this cmpxchg
901 * implementation is either an IRQ or a data abort exception
902 * causing another process/thread to be scheduled in the middle
903 * of the critical sequence. To prevent this, code is added to
904 * the IRQ and data abort exception handlers to set the pc back
905 * to the beginning of the critical section if it is found to be
906 * within that critical section (see kuser_cmpxchg_fixup).
2d2669b6 907 */
b49c0f24
NP
9081: ldr r3, [r2] @ load current val
909 subs r3, r3, r0 @ compare with oldval
9102: streq r1, [r2] @ store newval if eq
911 rsbs r0, r3, #0 @ set return val and C flag
912 usr_ret lr
913
914 .text
915kuser_cmpxchg_fixup:
916 @ Called from kuser_cmpxchg_check macro.
917 @ r2 = address of interrupted insn (must be preserved).
918 @ sp = saved regs. r7 and r8 are clobbered.
919 @ 1b = first critical insn, 2b = last critical insn.
920 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
921 mov r7, #0xffff0fff
922 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
923 subs r8, r2, r7
924 rsbcss r8, r8, #(2b - 1b)
925 strcs r7, [sp, #S_PC]
926 mov pc, lr
927 .previous
928
49bca4c2
NP
929#else
930#warning "NPTL on non MMU needs fixing"
931 mov r0, #-1
932 adds r0, r0, #0
ba9b5d76 933 usr_ret lr
b49c0f24 934#endif
2d2669b6
NP
935
936#else
937
7c612bfd
NP
938#ifdef CONFIG_SMP
939 mcr p15, 0, r0, c7, c10, 5 @ dmb
940#endif
b49c0f24 9411: ldrex r3, [r2]
2d2669b6
NP
942 subs r3, r3, r0
943 strexeq r3, r1, [r2]
b49c0f24
NP
944 teqeq r3, #1
945 beq 1b
2d2669b6 946 rsbs r0, r3, #0
b49c0f24 947 /* beware -- each __kuser slot must be 8 instructions max */
7c612bfd 948#ifdef CONFIG_SMP
b49c0f24
NP
949 b __kuser_memory_barrier
950#else
ba9b5d76 951 usr_ret lr
b49c0f24 952#endif
2d2669b6
NP
953
954#endif
955
956 .align 5
957
958/*
959 * Reference prototype:
960 *
961 * int __kernel_get_tls(void)
962 *
963 * Input:
964 *
965 * lr = return address
966 *
967 * Output:
968 *
969 * r0 = TLS value
970 *
971 * Clobbered:
972 *
b49c0f24 973 * none
2d2669b6
NP
974 *
975 * Definition and user space usage example:
976 *
977 * typedef int (__kernel_get_tls_t)(void);
978 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
979 *
980 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
981 *
982 * This could be used as follows:
983 *
984 * #define __kernel_get_tls() \
985 * ({ register unsigned int __val asm("r0"); \
986 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
987 * : "=r" (__val) : : "lr","cc" ); \
988 * __val; })
989 */
990
991__kuser_get_tls: @ 0xffff0fe0
992
4b0e07a5 993#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
2d2669b6 994 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
2d2669b6 995#else
2d2669b6 996 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
2d2669b6 997#endif
ba9b5d76 998 usr_ret lr
2d2669b6
NP
999
1000 .rep 5
1001 .word 0 @ pad up to __kuser_helper_version
1002 .endr
1003
1004/*
1005 * Reference declaration:
1006 *
1007 * extern unsigned int __kernel_helper_version;
1008 *
1009 * Definition and user space usage example:
1010 *
1011 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
1012 *
1013 * User space may read this to determine the curent number of helpers
1014 * available.
1015 */
1016
1017__kuser_helper_version: @ 0xffff0ffc
1018 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1019
1020 .globl __kuser_helper_end
1021__kuser_helper_end:
1022
1023
1da177e4
LT
1024/*
1025 * Vector stubs.
1026 *
7933523d
RK
1027 * This code is copied to 0xffff0200 so we can use branches in the
1028 * vectors, rather than ldr's. Note that this code must not
1029 * exceed 0x300 bytes.
1da177e4
LT
1030 *
1031 * Common stub entry macro:
1032 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
ccea7a19
RK
1033 *
1034 * SP points to a minimal amount of processor-private memory, the address
1035 * of which is copied into r0 for the mode specific abort handler.
1da177e4 1036 */
b7ec4795 1037 .macro vector_stub, name, mode, correction=0
1da177e4
LT
1038 .align 5
1039
1040vector_\name:
1da177e4
LT
1041 .if \correction
1042 sub lr, lr, #\correction
1043 .endif
ccea7a19
RK
1044
1045 @
1046 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1047 @ (parent CPSR)
1048 @
1049 stmia sp, {r0, lr} @ save r0, lr
1da177e4 1050 mrs lr, spsr
ccea7a19
RK
1051 str lr, [sp, #8] @ save spsr
1052
1da177e4 1053 @
ccea7a19 1054 @ Prepare for SVC32 mode. IRQs remain disabled.
1da177e4 1055 @
ccea7a19 1056 mrs r0, cpsr
b7ec4795 1057 eor r0, r0, #(\mode ^ SVC_MODE)
ccea7a19 1058 msr spsr_cxsf, r0
1da177e4 1059
ccea7a19
RK
1060 @
1061 @ the branch table must immediately follow this code
1062 @
ccea7a19 1063 and lr, lr, #0x0f
b7ec4795 1064 mov r0, sp
1da177e4 1065 ldr lr, [pc, lr, lsl #2]
ccea7a19 1066 movs pc, lr @ branch to handler in SVC mode
93ed3970 1067ENDPROC(vector_\name)
1da177e4
LT
1068 .endm
1069
7933523d 1070 .globl __stubs_start
1da177e4
LT
1071__stubs_start:
1072/*
1073 * Interrupt dispatcher
1074 */
b7ec4795 1075 vector_stub irq, IRQ_MODE, 4
1da177e4
LT
1076
1077 .long __irq_usr @ 0 (USR_26 / USR_32)
1078 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1079 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1080 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1081 .long __irq_invalid @ 4
1082 .long __irq_invalid @ 5
1083 .long __irq_invalid @ 6
1084 .long __irq_invalid @ 7
1085 .long __irq_invalid @ 8
1086 .long __irq_invalid @ 9
1087 .long __irq_invalid @ a
1088 .long __irq_invalid @ b
1089 .long __irq_invalid @ c
1090 .long __irq_invalid @ d
1091 .long __irq_invalid @ e
1092 .long __irq_invalid @ f
1093
1094/*
1095 * Data abort dispatcher
1096 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1097 */
b7ec4795 1098 vector_stub dabt, ABT_MODE, 8
1da177e4
LT
1099
1100 .long __dabt_usr @ 0 (USR_26 / USR_32)
1101 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1102 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1103 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1104 .long __dabt_invalid @ 4
1105 .long __dabt_invalid @ 5
1106 .long __dabt_invalid @ 6
1107 .long __dabt_invalid @ 7
1108 .long __dabt_invalid @ 8
1109 .long __dabt_invalid @ 9
1110 .long __dabt_invalid @ a
1111 .long __dabt_invalid @ b
1112 .long __dabt_invalid @ c
1113 .long __dabt_invalid @ d
1114 .long __dabt_invalid @ e
1115 .long __dabt_invalid @ f
1116
1117/*
1118 * Prefetch abort dispatcher
1119 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1120 */
b7ec4795 1121 vector_stub pabt, ABT_MODE, 4
1da177e4
LT
1122
1123 .long __pabt_usr @ 0 (USR_26 / USR_32)
1124 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1125 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1126 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1127 .long __pabt_invalid @ 4
1128 .long __pabt_invalid @ 5
1129 .long __pabt_invalid @ 6
1130 .long __pabt_invalid @ 7
1131 .long __pabt_invalid @ 8
1132 .long __pabt_invalid @ 9
1133 .long __pabt_invalid @ a
1134 .long __pabt_invalid @ b
1135 .long __pabt_invalid @ c
1136 .long __pabt_invalid @ d
1137 .long __pabt_invalid @ e
1138 .long __pabt_invalid @ f
1139
1140/*
1141 * Undef instr entry dispatcher
1142 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1143 */
b7ec4795 1144 vector_stub und, UND_MODE
1da177e4
LT
1145
1146 .long __und_usr @ 0 (USR_26 / USR_32)
1147 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1148 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1149 .long __und_svc @ 3 (SVC_26 / SVC_32)
1150 .long __und_invalid @ 4
1151 .long __und_invalid @ 5
1152 .long __und_invalid @ 6
1153 .long __und_invalid @ 7
1154 .long __und_invalid @ 8
1155 .long __und_invalid @ 9
1156 .long __und_invalid @ a
1157 .long __und_invalid @ b
1158 .long __und_invalid @ c
1159 .long __und_invalid @ d
1160 .long __und_invalid @ e
1161 .long __und_invalid @ f
1162
1163 .align 5
1164
1165/*=============================================================================
1166 * Undefined FIQs
1167 *-----------------------------------------------------------------------------
1168 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1169 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1170 * Basically to switch modes, we *HAVE* to clobber one register... brain
1171 * damage alert! I don't think that we can execute any code in here in any
1172 * other mode than FIQ... Ok you can switch to another mode, but you can't
1173 * get out of that mode without clobbering one register.
1174 */
1175vector_fiq:
1176 disable_fiq
1177 subs pc, lr, #4
1178
1179/*=============================================================================
1180 * Address exception handler
1181 *-----------------------------------------------------------------------------
1182 * These aren't too critical.
1183 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1184 */
1185
1186vector_addrexcptn:
1187 b vector_addrexcptn
1188
1189/*
1190 * We group all the following data together to optimise
1191 * for CPUs with separate I & D caches.
1192 */
1193 .align 5
1194
1195.LCvswi:
1196 .word vector_swi
1197
7933523d 1198 .globl __stubs_end
1da177e4
LT
1199__stubs_end:
1200
7933523d 1201 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 1202
7933523d
RK
1203 .globl __vectors_start
1204__vectors_start:
1da177e4 1205 swi SYS_ERROR0
7933523d
RK
1206 b vector_und + stubs_offset
1207 ldr pc, .LCvswi + stubs_offset
1208 b vector_pabt + stubs_offset
1209 b vector_dabt + stubs_offset
1210 b vector_addrexcptn + stubs_offset
1211 b vector_irq + stubs_offset
1212 b vector_fiq + stubs_offset
1213
1214 .globl __vectors_end
1215__vectors_end:
1da177e4
LT
1216
1217 .data
1218
1da177e4
LT
1219 .globl cr_alignment
1220 .globl cr_no_alignment
1221cr_alignment:
1222 .space 4
1223cr_no_alignment:
1224 .space 4