ARM: move CP15 definitions to separate header file
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
afeb90ca 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
70b6f2b4
NP
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
1da177e4 16 */
1da177e4 17
f09b9979 18#include <asm/memory.h>
753790e7
RK
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
1da177e4 21#include <asm/vfpmacros.h>
a09e64fb 22#include <mach/entry-macro.S>
d6551e88 23#include <asm/thread_notify.h>
c4c5716e 24#include <asm/unwind.h>
cc20d429 25#include <asm/unistd.h>
f159f4ed 26#include <asm/tls.h>
ef4c5368 27#include <asm/system.h>
1da177e4
LT
28
29#include "entry-header.S"
cd544ce7 30#include <asm/entry-macro-multi.S>
1da177e4 31
187a51ad 32/*
d9600c99 33 * Interrupt handling.
187a51ad
RK
34 */
35 .macro irq_handler
52108641 36#ifdef CONFIG_MULTI_IRQ_HANDLER
d9600c99 37 ldr r1, =handle_arch_irq
52108641 38 mov r0, sp
52108641 39 adr lr, BSYM(9997f)
abeb24ae
MZ
40 ldr pc, [r1]
41#else
cd544ce7 42 arch_irq_handler_default
abeb24ae 43#endif
f00ec48f 449997:
187a51ad
RK
45 .endm
46
ac8b9c1c 47 .macro pabt_helper
8dfe7ac9 48 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
ac8b9c1c 49#ifdef MULTI_PABORT
0402bece 50 ldr ip, .LCprocfns
ac8b9c1c 51 mov lr, pc
0402bece 52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
ac8b9c1c
RK
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
59
60 @
61 @ Call the processor-specific abort handler:
62 @
da740472 63 @ r2 - pt_regs
3e287bec
RK
64 @ r4 - aborted context pc
65 @ r5 - aborted context psr
ac8b9c1c
RK
66 @
67 @ The abort handler must return the aborted address in r0, and
68 @ the fault status register in r1. r9 must be preserved.
69 @
70#ifdef MULTI_DABORT
0402bece 71 ldr ip, .LCprocfns
ac8b9c1c 72 mov lr, pc
0402bece 73 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
ac8b9c1c
RK
74#else
75 bl CPU_DABORT_HANDLER
76#endif
77 .endm
78
785d3cd2
NP
79#ifdef CONFIG_KPROBES
80 .section .kprobes.text,"ax",%progbits
81#else
82 .text
83#endif
84
1da177e4
LT
85/*
86 * Invalid mode handlers
87 */
ccea7a19
RK
88 .macro inv_entry, reason
89 sub sp, sp, #S_FRAME_SIZE
b86040a5
CM
90 ARM( stmib sp, {r1 - lr} )
91 THUMB( stmia sp, {r0 - r12} )
92 THUMB( str sp, [sp, #S_SP] )
93 THUMB( str lr, [sp, #S_LR] )
1da177e4
LT
94 mov r1, #\reason
95 .endm
96
97__pabt_invalid:
ccea7a19
RK
98 inv_entry BAD_PREFETCH
99 b common_invalid
93ed3970 100ENDPROC(__pabt_invalid)
1da177e4
LT
101
102__dabt_invalid:
ccea7a19
RK
103 inv_entry BAD_DATA
104 b common_invalid
93ed3970 105ENDPROC(__dabt_invalid)
1da177e4
LT
106
107__irq_invalid:
ccea7a19
RK
108 inv_entry BAD_IRQ
109 b common_invalid
93ed3970 110ENDPROC(__irq_invalid)
1da177e4
LT
111
112__und_invalid:
ccea7a19
RK
113 inv_entry BAD_UNDEFINSTR
114
115 @
116 @ XXX fall through to common_invalid
117 @
118
119@
120@ common_invalid - generic code for failed exception (re-entrant version of handlers)
121@
122common_invalid:
123 zero_fp
124
125 ldmia r0, {r4 - r6}
126 add r0, sp, #S_PC @ here for interlock avoidance
127 mov r7, #-1 @ "" "" "" ""
128 str r4, [sp] @ save preserved r0
129 stmia r0, {r5 - r7} @ lr_<exception>,
130 @ cpsr_<exception>, "old_r0"
1da177e4 131
1da177e4 132 mov r0, sp
1da177e4 133 b bad_mode
93ed3970 134ENDPROC(__und_invalid)
1da177e4
LT
135
136/*
137 * SVC mode handlers
138 */
2dede2d8
NP
139
140#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
141#define SPFIX(code...) code
142#else
143#define SPFIX(code...)
144#endif
145
d30a0c8b 146 .macro svc_entry, stack_hole=0
c4c5716e
CM
147 UNWIND(.fnstart )
148 UNWIND(.save {r0 - pc} )
b86040a5
CM
149 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
150#ifdef CONFIG_THUMB2_KERNEL
151 SPFIX( str r0, [sp] ) @ temporarily saved
152 SPFIX( mov r0, sp )
153 SPFIX( tst r0, #4 ) @ test original stack alignment
154 SPFIX( ldr r0, [sp] ) @ restored
155#else
2dede2d8 156 SPFIX( tst sp, #4 )
b86040a5
CM
157#endif
158 SPFIX( subeq sp, sp, #4 )
159 stmia sp, {r1 - r12}
ccea7a19 160
b059bdc3
RK
161 ldmia r0, {r3 - r5}
162 add r7, sp, #S_SP - 4 @ here for interlock avoidance
163 mov r6, #-1 @ "" "" "" ""
164 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
165 SPFIX( addeq r2, r2, #4 )
166 str r3, [sp, #-4]! @ save the "real" r0 copied
ccea7a19
RK
167 @ from the exception stack
168
b059bdc3 169 mov r3, lr
1da177e4
LT
170
171 @
172 @ We are now ready to fill in the remaining blanks on the stack:
173 @
b059bdc3
RK
174 @ r2 - sp_svc
175 @ r3 - lr_svc
176 @ r4 - lr_<exception>, already fixed up for correct return/restart
177 @ r5 - spsr_<exception>
178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
1da177e4 179 @
b059bdc3 180 stmia r7, {r2 - r6}
1da177e4 181
02fe2845
RK
182#ifdef CONFIG_TRACE_IRQFLAGS
183 bl trace_hardirqs_off
184#endif
f2741b78 185 .endm
1da177e4 186
f2741b78
RK
187 .align 5
188__dabt_svc:
189 svc_entry
1da177e4 190 mov r2, sp
da740472 191 dabt_helper
1da177e4
LT
192
193 @
194 @ IRQs off again before pulling preserved data off the stack
195 @
ac78884e 196 disable_irq_notrace
1da177e4 197
02fe2845
RK
198#ifdef CONFIG_TRACE_IRQFLAGS
199 tst r5, #PSR_I_BIT
200 bleq trace_hardirqs_on
201 tst r5, #PSR_I_BIT
202 blne trace_hardirqs_off
203#endif
b059bdc3 204 svc_exit r5 @ return from exception
c4c5716e 205 UNWIND(.fnend )
93ed3970 206ENDPROC(__dabt_svc)
1da177e4
LT
207
208 .align 5
209__irq_svc:
ccea7a19 210 svc_entry
187a51ad 211 irq_handler
1613cc11 212
1da177e4 213#ifdef CONFIG_PREEMPT
1613cc11
RK
214 get_thread_info tsk
215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
706fdd9f 216 ldr r0, [tsk, #TI_FLAGS] @ get flags
28fab1a2
RK
217 teq r8, #0 @ if preempt count != 0
218 movne r0, #0 @ force flags to 0
1da177e4
LT
219 tst r0, #_TIF_NEED_RESCHED
220 blne svc_preempt
1da177e4 221#endif
30891c90 222
7ad1bcb2 223#ifdef CONFIG_TRACE_IRQFLAGS
fbab1c80
RK
224 @ The parent context IRQs must have been enabled to get here in
225 @ the first place, so there's no point checking the PSR I bit.
226 bl trace_hardirqs_on
7ad1bcb2 227#endif
b059bdc3 228 svc_exit r5 @ return from exception
c4c5716e 229 UNWIND(.fnend )
93ed3970 230ENDPROC(__irq_svc)
1da177e4
LT
231
232 .ltorg
233
234#ifdef CONFIG_PREEMPT
235svc_preempt:
28fab1a2 236 mov r8, lr
1da177e4 2371: bl preempt_schedule_irq @ irq en/disable is done inside
706fdd9f 238 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
1da177e4 239 tst r0, #_TIF_NEED_RESCHED
28fab1a2 240 moveq pc, r8 @ go again
1da177e4
LT
241 b 1b
242#endif
243
244 .align 5
245__und_svc:
d30a0c8b
NP
246#ifdef CONFIG_KPROBES
247 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
248 @ it obviously needs free stack space which then will belong to
249 @ the saved context.
250 svc_entry 64
251#else
ccea7a19 252 svc_entry
d30a0c8b 253#endif
1da177e4
LT
254 @
255 @ call emulation code, which returns using r9 if it has emulated
256 @ the instruction, or the more conventional lr if we are to treat
257 @ this as a real undefined instruction
258 @
259 @ r0 - instruction
260 @
83e686ea 261#ifndef CONFIG_THUMB2_KERNEL
b059bdc3 262 ldr r0, [r4, #-4]
83e686ea 263#else
b059bdc3 264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
85519189 265 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
b059bdc3 266 ldrhhs r9, [r4] @ bottom 16 bits
83e686ea
CM
267 orrhs r0, r9, r0, lsl #16
268#endif
b86040a5 269 adr r9, BSYM(1f)
b059bdc3 270 mov r2, r4
1da177e4
LT
271 bl call_fpe
272
273 mov r0, sp @ struct pt_regs *regs
274 bl do_undefinstr
275
276 @
277 @ IRQs off again before pulling preserved data off the stack
278 @
ac78884e 2791: disable_irq_notrace
1da177e4
LT
280
281 @
282 @ restore SPSR and restart the instruction
283 @
b059bdc3 284 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
df295df6
RK
285#ifdef CONFIG_TRACE_IRQFLAGS
286 tst r5, #PSR_I_BIT
287 bleq trace_hardirqs_on
288 tst r5, #PSR_I_BIT
289 blne trace_hardirqs_off
290#endif
b059bdc3 291 svc_exit r5 @ return from exception
c4c5716e 292 UNWIND(.fnend )
93ed3970 293ENDPROC(__und_svc)
1da177e4
LT
294
295 .align 5
296__pabt_svc:
ccea7a19 297 svc_entry
4fb28474 298 mov r2, sp @ regs
8dfe7ac9 299 pabt_helper
1da177e4
LT
300
301 @
302 @ IRQs off again before pulling preserved data off the stack
303 @
ac78884e 304 disable_irq_notrace
1da177e4 305
02fe2845
RK
306#ifdef CONFIG_TRACE_IRQFLAGS
307 tst r5, #PSR_I_BIT
308 bleq trace_hardirqs_on
309 tst r5, #PSR_I_BIT
310 blne trace_hardirqs_off
311#endif
b059bdc3 312 svc_exit r5 @ return from exception
c4c5716e 313 UNWIND(.fnend )
93ed3970 314ENDPROC(__pabt_svc)
1da177e4
LT
315
316 .align 5
49f680ea
RK
317.LCcralign:
318 .word cr_alignment
48d7927b 319#ifdef MULTI_DABORT
1da177e4
LT
320.LCprocfns:
321 .word processor
322#endif
323.LCfp:
324 .word fp_enter
1da177e4
LT
325
326/*
327 * User mode handlers
2dede2d8
NP
328 *
329 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
1da177e4 330 */
2dede2d8
NP
331
332#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
333#error "sizeof(struct pt_regs) must be a multiple of 8"
334#endif
335
ccea7a19 336 .macro usr_entry
c4c5716e
CM
337 UNWIND(.fnstart )
338 UNWIND(.cantunwind ) @ don't unwind the user space
ccea7a19 339 sub sp, sp, #S_FRAME_SIZE
b86040a5
CM
340 ARM( stmib sp, {r1 - r12} )
341 THUMB( stmia sp, {r0 - r12} )
ccea7a19 342
b059bdc3 343 ldmia r0, {r3 - r5}
ccea7a19 344 add r0, sp, #S_PC @ here for interlock avoidance
b059bdc3 345 mov r6, #-1 @ "" "" "" ""
ccea7a19 346
b059bdc3 347 str r3, [sp] @ save the "real" r0 copied
ccea7a19 348 @ from the exception stack
1da177e4
LT
349
350 @
351 @ We are now ready to fill in the remaining blanks on the stack:
352 @
b059bdc3
RK
353 @ r4 - lr_<exception>, already fixed up for correct return/restart
354 @ r5 - spsr_<exception>
355 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
1da177e4
LT
356 @
357 @ Also, separately save sp_usr and lr_usr
358 @
b059bdc3 359 stmia r0, {r4 - r6}
b86040a5
CM
360 ARM( stmdb r0, {sp, lr}^ )
361 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
1da177e4
LT
362
363 @
364 @ Enable the alignment trap while in kernel mode
365 @
49f680ea 366 alignment_trap r0
1da177e4
LT
367
368 @
369 @ Clear FP to mark the first stack frame
370 @
371 zero_fp
f2741b78
RK
372
373#ifdef CONFIG_IRQSOFF_TRACER
374 bl trace_hardirqs_off
375#endif
1da177e4
LT
376 .endm
377
b49c0f24 378 .macro kuser_cmpxchg_check
40fb79c8 379#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
b49c0f24
NP
380#ifndef CONFIG_MMU
381#warning "NPTL on non MMU needs fixing"
382#else
383 @ Make sure our user space atomic helper is restarted
384 @ if it was interrupted in a critical region. Here we
385 @ perform a quick test inline since it should be false
386 @ 99.9999% of the time. The rest is done out of line.
b059bdc3 387 cmp r4, #TASK_SIZE
40fb79c8 388 blhs kuser_cmpxchg64_fixup
b49c0f24
NP
389#endif
390#endif
391 .endm
392
1da177e4
LT
393 .align 5
394__dabt_usr:
ccea7a19 395 usr_entry
b49c0f24 396 kuser_cmpxchg_check
1da177e4 397 mov r2, sp
da740472
RK
398 dabt_helper
399 b ret_from_exception
c4c5716e 400 UNWIND(.fnend )
93ed3970 401ENDPROC(__dabt_usr)
1da177e4
LT
402
403 .align 5
404__irq_usr:
ccea7a19 405 usr_entry
bc089602 406 kuser_cmpxchg_check
187a51ad 407 irq_handler
1613cc11 408 get_thread_info tsk
1da177e4 409 mov why, #0
9fc2552a 410 b ret_to_user_from_irq
c4c5716e 411 UNWIND(.fnend )
93ed3970 412ENDPROC(__irq_usr)
1da177e4
LT
413
414 .ltorg
415
416 .align 5
417__und_usr:
ccea7a19 418 usr_entry
bc089602 419
b059bdc3
RK
420 mov r2, r4
421 mov r3, r5
1da177e4 422
1da177e4
LT
423 @
424 @ fall through to the emulation code, which returns using r9 if
425 @ it has emulated the instruction, or the more conventional lr
426 @ if we are to treat this as a real undefined instruction
427 @
428 @ r0 - instruction
429 @
b86040a5
CM
430 adr r9, BSYM(ret_from_exception)
431 adr lr, BSYM(__und_usr_unknown)
cb170a45 432 tst r3, #PSR_T_BIT @ Thumb mode?
b86040a5 433 itet eq @ explicit IT needed for the 1f label
cb170a45
PB
434 subeq r4, r2, #4 @ ARM instr at LR - 4
435 subne r4, r2, #2 @ Thumb instr at LR - 2
4361: ldreqt r0, [r4]
26584853
CM
437#ifdef CONFIG_CPU_ENDIAN_BE8
438 reveq r0, r0 @ little endian instruction
439#endif
cb170a45
PB
440 beq call_fpe
441 @ Thumb instruction
ef4c5368
DM
442#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
443/*
444 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
445 * can never be supported in a single kernel, this code is not applicable at
446 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
447 * made about .arch directives.
448 */
449#if __LINUX_ARM_ARCH__ < 7
450/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
451#define NEED_CPU_ARCHITECTURE
452 ldr r5, .LCcpu_architecture
453 ldr r5, [r5]
454 cmp r5, #CPU_ARCH_ARMv7
455 blo __und_usr_unknown
456/*
457 * The following code won't get run unless the running CPU really is v7, so
458 * coding round the lack of ldrht on older arches is pointless. Temporarily
459 * override the assembler target arch with the minimum required instead:
460 */
461 .arch armv6t2
462#endif
b86040a5
CM
4632:
464 ARM( ldrht r5, [r4], #2 )
465 THUMB( ldrht r5, [r4] )
466 THUMB( add r4, r4, #2 )
85519189 467 cmp r5, #0xe800 @ 32bit instruction if xx != 0
cb170a45
PB
468 blo __und_usr_unknown
4693: ldrht r0, [r4]
470 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
471 orr r0, r0, r5, lsl #16
ef4c5368
DM
472
473#if __LINUX_ARM_ARCH__ < 7
474/* If the target arch was overridden, change it back: */
475#ifdef CONFIG_CPU_32v6K
476 .arch armv6k
cb170a45 477#else
ef4c5368
DM
478 .arch armv6
479#endif
480#endif /* __LINUX_ARM_ARCH__ < 7 */
481#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
cb170a45
PB
482 b __und_usr_unknown
483#endif
c4c5716e 484 UNWIND(.fnend )
93ed3970 485ENDPROC(__und_usr)
cb170a45 486
1da177e4
LT
487 @
488 @ fallthrough to call_fpe
489 @
490
491/*
492 * The out of line fixup for the ldrt above.
493 */
4260415f 494 .pushsection .fixup, "ax"
cb170a45 4954: mov pc, r9
4260415f
RK
496 .popsection
497 .pushsection __ex_table,"a"
cb170a45 498 .long 1b, 4b
c89cefed 499#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
cb170a45
PB
500 .long 2b, 4b
501 .long 3b, 4b
502#endif
4260415f 503 .popsection
1da177e4
LT
504
505/*
506 * Check whether the instruction is a co-processor instruction.
507 * If yes, we need to call the relevant co-processor handler.
508 *
509 * Note that we don't do a full check here for the co-processor
510 * instructions; all instructions with bit 27 set are well
511 * defined. The only instructions that should fault are the
512 * co-processor instructions. However, we have to watch out
513 * for the ARM6/ARM7 SWI bug.
514 *
b5872db4
CM
515 * NEON is a special case that has to be handled here. Not all
516 * NEON instructions are co-processor instructions, so we have
517 * to make a special case of checking for them. Plus, there's
518 * five groups of them, so we have a table of mask/opcode pairs
519 * to check against, and if any match then we branch off into the
520 * NEON handler code.
521 *
1da177e4
LT
522 * Emulators may wish to make use of the following registers:
523 * r0 = instruction opcode.
524 * r2 = PC+4
db6ccbb6 525 * r9 = normal "successful" return address
1da177e4 526 * r10 = this threads thread_info structure.
db6ccbb6 527 * lr = unrecognised instruction return address
1da177e4 528 */
cb170a45
PB
529 @
530 @ Fall-through from Thumb-2 __und_usr
531 @
532#ifdef CONFIG_NEON
533 adr r6, .LCneon_thumb_opcodes
534 b 2f
535#endif
1da177e4 536call_fpe:
b5872db4 537#ifdef CONFIG_NEON
cb170a45 538 adr r6, .LCneon_arm_opcodes
b5872db4
CM
5392:
540 ldr r7, [r6], #4 @ mask value
541 cmp r7, #0 @ end mask?
542 beq 1f
543 and r8, r0, r7
544 ldr r7, [r6], #4 @ opcode bits matching in mask
545 cmp r8, r7 @ NEON instruction?
546 bne 2b
547 get_thread_info r10
548 mov r7, #1
549 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
550 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
551 b do_vfp @ let VFP handler handle this
5521:
553#endif
1da177e4 554 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
cb170a45 555 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
1da177e4
LT
556#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
557 and r8, r0, #0x0f000000 @ mask out op-code bits
558 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
559#endif
560 moveq pc, lr
561 get_thread_info r10 @ get current thread
562 and r8, r0, #0x00000f00 @ mask out CP number
b86040a5 563 THUMB( lsr r8, r8, #8 )
1da177e4
LT
564 mov r7, #1
565 add r6, r10, #TI_USED_CP
b86040a5
CM
566 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
567 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
1da177e4
LT
568#ifdef CONFIG_IWMMXT
569 @ Test if we need to give access to iWMMXt coprocessors
570 ldr r5, [r10, #TI_FLAGS]
571 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
572 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
573 bcs iwmmxt_task_enable
574#endif
b86040a5
CM
575 ARM( add pc, pc, r8, lsr #6 )
576 THUMB( lsl r8, r8, #2 )
577 THUMB( add pc, r8 )
578 nop
579
a771fe6e 580 movw_pc lr @ CP#0
b86040a5
CM
581 W(b) do_fpe @ CP#1 (FPE)
582 W(b) do_fpe @ CP#2 (FPE)
a771fe6e 583 movw_pc lr @ CP#3
c17fad11
LB
584#ifdef CONFIG_CRUNCH
585 b crunch_task_enable @ CP#4 (MaverickCrunch)
586 b crunch_task_enable @ CP#5 (MaverickCrunch)
587 b crunch_task_enable @ CP#6 (MaverickCrunch)
588#else
a771fe6e
CM
589 movw_pc lr @ CP#4
590 movw_pc lr @ CP#5
591 movw_pc lr @ CP#6
c17fad11 592#endif
a771fe6e
CM
593 movw_pc lr @ CP#7
594 movw_pc lr @ CP#8
595 movw_pc lr @ CP#9
1da177e4 596#ifdef CONFIG_VFP
b86040a5
CM
597 W(b) do_vfp @ CP#10 (VFP)
598 W(b) do_vfp @ CP#11 (VFP)
1da177e4 599#else
a771fe6e
CM
600 movw_pc lr @ CP#10 (VFP)
601 movw_pc lr @ CP#11 (VFP)
1da177e4 602#endif
a771fe6e
CM
603 movw_pc lr @ CP#12
604 movw_pc lr @ CP#13
605 movw_pc lr @ CP#14 (Debug)
606 movw_pc lr @ CP#15 (Control)
1da177e4 607
ef4c5368
DM
608#ifdef NEED_CPU_ARCHITECTURE
609 .align 2
610.LCcpu_architecture:
611 .word __cpu_architecture
612#endif
613
b5872db4
CM
614#ifdef CONFIG_NEON
615 .align 6
616
cb170a45 617.LCneon_arm_opcodes:
b5872db4
CM
618 .word 0xfe000000 @ mask
619 .word 0xf2000000 @ opcode
620
621 .word 0xff100000 @ mask
622 .word 0xf4000000 @ opcode
623
cb170a45
PB
624 .word 0x00000000 @ mask
625 .word 0x00000000 @ opcode
626
627.LCneon_thumb_opcodes:
628 .word 0xef000000 @ mask
629 .word 0xef000000 @ opcode
630
631 .word 0xff100000 @ mask
632 .word 0xf9000000 @ opcode
633
b5872db4
CM
634 .word 0x00000000 @ mask
635 .word 0x00000000 @ opcode
636#endif
637
1da177e4 638do_fpe:
5d25ac03 639 enable_irq
1da177e4
LT
640 ldr r4, .LCfp
641 add r10, r10, #TI_FPSTATE @ r10 = workspace
642 ldr pc, [r4] @ Call FP module USR entry point
643
644/*
645 * The FP module is called with these registers set:
646 * r0 = instruction
647 * r2 = PC+4
648 * r9 = normal "successful" return address
649 * r10 = FP workspace
650 * lr = unrecognised FP instruction return address
651 */
652
124efc27 653 .pushsection .data
1da177e4 654ENTRY(fp_enter)
db6ccbb6 655 .word no_fp
124efc27 656 .popsection
1da177e4 657
83e686ea
CM
658ENTRY(no_fp)
659 mov pc, lr
660ENDPROC(no_fp)
db6ccbb6
RK
661
662__und_usr_unknown:
ecbab71c 663 enable_irq
1da177e4 664 mov r0, sp
b86040a5 665 adr lr, BSYM(ret_from_exception)
1da177e4 666 b do_undefinstr
93ed3970 667ENDPROC(__und_usr_unknown)
1da177e4
LT
668
669 .align 5
670__pabt_usr:
ccea7a19 671 usr_entry
4fb28474 672 mov r2, sp @ regs
8dfe7ac9 673 pabt_helper
c4c5716e 674 UNWIND(.fnend )
1da177e4
LT
675 /* fall through */
676/*
677 * This is the return code to user mode for abort handlers
678 */
679ENTRY(ret_from_exception)
c4c5716e
CM
680 UNWIND(.fnstart )
681 UNWIND(.cantunwind )
1da177e4
LT
682 get_thread_info tsk
683 mov why, #0
684 b ret_to_user
c4c5716e 685 UNWIND(.fnend )
93ed3970
CM
686ENDPROC(__pabt_usr)
687ENDPROC(ret_from_exception)
1da177e4
LT
688
689/*
690 * Register switch for ARMv3 and ARMv4 processors
691 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
692 * previous and next are guaranteed not to be the same.
693 */
694ENTRY(__switch_to)
c4c5716e
CM
695 UNWIND(.fnstart )
696 UNWIND(.cantunwind )
1da177e4
LT
697 add ip, r1, #TI_CPU_SAVE
698 ldr r3, [r2, #TI_TP_VALUE]
b86040a5
CM
699 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
700 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
701 THUMB( str sp, [ip], #4 )
702 THUMB( str lr, [ip], #4 )
247055aa 703#ifdef CONFIG_CPU_USE_DOMAINS
d6551e88 704 ldr r6, [r2, #TI_CPU_DOMAIN]
afeb90ca 705#endif
f159f4ed 706 set_tls r3, r4, r5
df0698be
NP
707#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
708 ldr r7, [r2, #TI_TASK]
709 ldr r8, =__stack_chk_guard
710 ldr r7, [r7, #TSK_STACK_CANARY]
711#endif
247055aa 712#ifdef CONFIG_CPU_USE_DOMAINS
1da177e4 713 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
1da177e4 714#endif
d6551e88
RK
715 mov r5, r0
716 add r4, r2, #TI_CPU_SAVE
717 ldr r0, =thread_notify_head
718 mov r1, #THREAD_NOTIFY_SWITCH
719 bl atomic_notifier_call_chain
df0698be
NP
720#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
721 str r7, [r8]
722#endif
b86040a5 723 THUMB( mov ip, r4 )
d6551e88 724 mov r0, r5
b86040a5
CM
725 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
726 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
727 THUMB( ldr sp, [ip], #4 )
728 THUMB( ldr pc, [ip] )
c4c5716e 729 UNWIND(.fnend )
93ed3970 730ENDPROC(__switch_to)
1da177e4
LT
731
732 __INIT
2d2669b6
NP
733
734/*
735 * User helpers.
736 *
2d2669b6
NP
737 * Each segment is 32-byte aligned and will be moved to the top of the high
738 * vector page. New segments (if ever needed) must be added in front of
739 * existing ones. This mechanism should be used only for things that are
740 * really small and justified, and not be abused freely.
741 *
37b83046 742 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
2d2669b6 743 */
b86040a5 744 THUMB( .arm )
2d2669b6 745
ba9b5d76
NP
746 .macro usr_ret, reg
747#ifdef CONFIG_ARM_THUMB
748 bx \reg
749#else
750 mov pc, \reg
751#endif
752 .endm
753
2d2669b6
NP
754 .align 5
755 .globl __kuser_helper_start
756__kuser_helper_start:
757
7c612bfd 758/*
40fb79c8
NP
759 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
760 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
7c612bfd
NP
761 */
762
40fb79c8
NP
763__kuser_cmpxchg64: @ 0xffff0f60
764
765#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
766
767 /*
768 * Poor you. No fast solution possible...
769 * The kernel itself must perform the operation.
770 * A special ghost syscall is used for that (see traps.c).
771 */
772 stmfd sp!, {r7, lr}
773 ldr r7, 1f @ it's 20 bits
774 swi __ARM_NR_cmpxchg64
775 ldmfd sp!, {r7, pc}
7761: .word __ARM_NR_cmpxchg64
777
778#elif defined(CONFIG_CPU_32v6K)
779
780 stmfd sp!, {r4, r5, r6, r7}
781 ldrd r4, r5, [r0] @ load old val
782 ldrd r6, r7, [r1] @ load new val
783 smp_dmb arm
7841: ldrexd r0, r1, [r2] @ load current val
785 eors r3, r0, r4 @ compare with oldval (1)
786 eoreqs r3, r1, r5 @ compare with oldval (2)
787 strexdeq r3, r6, r7, [r2] @ store newval if eq
788 teqeq r3, #1 @ success?
789 beq 1b @ if no then retry
ed3768a8 790 smp_dmb arm
40fb79c8
NP
791 rsbs r0, r3, #0 @ set returned val and C flag
792 ldmfd sp!, {r4, r5, r6, r7}
5a97d0ae 793 usr_ret lr
40fb79c8
NP
794
795#elif !defined(CONFIG_SMP)
796
797#ifdef CONFIG_MMU
798
799 /*
800 * The only thing that can break atomicity in this cmpxchg64
801 * implementation is either an IRQ or a data abort exception
802 * causing another process/thread to be scheduled in the middle of
803 * the critical sequence. The same strategy as for cmpxchg is used.
804 */
805 stmfd sp!, {r4, r5, r6, lr}
806 ldmia r0, {r4, r5} @ load old val
807 ldmia r1, {r6, lr} @ load new val
8081: ldmia r2, {r0, r1} @ load current val
809 eors r3, r0, r4 @ compare with oldval (1)
810 eoreqs r3, r1, r5 @ compare with oldval (2)
8112: stmeqia r2, {r6, lr} @ store newval if eq
812 rsbs r0, r3, #0 @ set return val and C flag
813 ldmfd sp!, {r4, r5, r6, pc}
814
815 .text
816kuser_cmpxchg64_fixup:
817 @ Called from kuser_cmpxchg_fixup.
3ad55155 818 @ r4 = address of interrupted insn (must be preserved).
40fb79c8
NP
819 @ sp = saved regs. r7 and r8 are clobbered.
820 @ 1b = first critical insn, 2b = last critical insn.
3ad55155 821 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
40fb79c8
NP
822 mov r7, #0xffff0fff
823 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
3ad55155 824 subs r8, r4, r7
40fb79c8
NP
825 rsbcss r8, r8, #(2b - 1b)
826 strcs r7, [sp, #S_PC]
827#if __LINUX_ARM_ARCH__ < 6
828 bcc kuser_cmpxchg32_fixup
829#endif
830 mov pc, lr
831 .previous
832
833#else
834#warning "NPTL on non MMU needs fixing"
835 mov r0, #-1
836 adds r0, r0, #0
ba9b5d76 837 usr_ret lr
40fb79c8
NP
838#endif
839
840#else
841#error "incoherent kernel configuration"
842#endif
843
844 /* pad to next slot */
845 .rept (16 - (. - __kuser_cmpxchg64)/4)
846 .word 0
847 .endr
7c612bfd
NP
848
849 .align 5
850
7c612bfd 851__kuser_memory_barrier: @ 0xffff0fa0
ed3768a8 852 smp_dmb arm
ba9b5d76 853 usr_ret lr
7c612bfd
NP
854
855 .align 5
2d2669b6
NP
856
857__kuser_cmpxchg: @ 0xffff0fc0
858
dcef1f63 859#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2d2669b6 860
dcef1f63
NP
861 /*
862 * Poor you. No fast solution possible...
863 * The kernel itself must perform the operation.
864 * A special ghost syscall is used for that (see traps.c).
865 */
5e097445 866 stmfd sp!, {r7, lr}
55afd264 867 ldr r7, 1f @ it's 20 bits
cc20d429 868 swi __ARM_NR_cmpxchg
5e097445 869 ldmfd sp!, {r7, pc}
cc20d429 8701: .word __ARM_NR_cmpxchg
dcef1f63
NP
871
872#elif __LINUX_ARM_ARCH__ < 6
2d2669b6 873
b49c0f24
NP
874#ifdef CONFIG_MMU
875
2d2669b6 876 /*
b49c0f24
NP
877 * The only thing that can break atomicity in this cmpxchg
878 * implementation is either an IRQ or a data abort exception
879 * causing another process/thread to be scheduled in the middle
880 * of the critical sequence. To prevent this, code is added to
881 * the IRQ and data abort exception handlers to set the pc back
882 * to the beginning of the critical section if it is found to be
883 * within that critical section (see kuser_cmpxchg_fixup).
2d2669b6 884 */
b49c0f24
NP
8851: ldr r3, [r2] @ load current val
886 subs r3, r3, r0 @ compare with oldval
8872: streq r1, [r2] @ store newval if eq
888 rsbs r0, r3, #0 @ set return val and C flag
889 usr_ret lr
890
891 .text
40fb79c8 892kuser_cmpxchg32_fixup:
b49c0f24 893 @ Called from kuser_cmpxchg_check macro.
b059bdc3 894 @ r4 = address of interrupted insn (must be preserved).
b49c0f24
NP
895 @ sp = saved regs. r7 and r8 are clobbered.
896 @ 1b = first critical insn, 2b = last critical insn.
b059bdc3 897 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
b49c0f24
NP
898 mov r7, #0xffff0fff
899 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
b059bdc3 900 subs r8, r4, r7
b49c0f24
NP
901 rsbcss r8, r8, #(2b - 1b)
902 strcs r7, [sp, #S_PC]
903 mov pc, lr
904 .previous
905
49bca4c2
NP
906#else
907#warning "NPTL on non MMU needs fixing"
908 mov r0, #-1
909 adds r0, r0, #0
ba9b5d76 910 usr_ret lr
b49c0f24 911#endif
2d2669b6
NP
912
913#else
914
ed3768a8 915 smp_dmb arm
b49c0f24 9161: ldrex r3, [r2]
2d2669b6
NP
917 subs r3, r3, r0
918 strexeq r3, r1, [r2]
b49c0f24
NP
919 teqeq r3, #1
920 beq 1b
2d2669b6 921 rsbs r0, r3, #0
b49c0f24 922 /* beware -- each __kuser slot must be 8 instructions max */
f00ec48f
RK
923 ALT_SMP(b __kuser_memory_barrier)
924 ALT_UP(usr_ret lr)
2d2669b6
NP
925
926#endif
927
928 .align 5
929
2d2669b6 930__kuser_get_tls: @ 0xffff0fe0
f159f4ed 931 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
ba9b5d76 932 usr_ret lr
f159f4ed
TL
933 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
934 .rep 4
935 .word 0 @ 0xffff0ff0 software TLS value, then
936 .endr @ pad up to __kuser_helper_version
2d2669b6 937
2d2669b6
NP
938__kuser_helper_version: @ 0xffff0ffc
939 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
940
941 .globl __kuser_helper_end
942__kuser_helper_end:
943
b86040a5 944 THUMB( .thumb )
2d2669b6 945
1da177e4
LT
946/*
947 * Vector stubs.
948 *
7933523d
RK
949 * This code is copied to 0xffff0200 so we can use branches in the
950 * vectors, rather than ldr's. Note that this code must not
951 * exceed 0x300 bytes.
1da177e4
LT
952 *
953 * Common stub entry macro:
954 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
ccea7a19
RK
955 *
956 * SP points to a minimal amount of processor-private memory, the address
957 * of which is copied into r0 for the mode specific abort handler.
1da177e4 958 */
b7ec4795 959 .macro vector_stub, name, mode, correction=0
1da177e4
LT
960 .align 5
961
962vector_\name:
1da177e4
LT
963 .if \correction
964 sub lr, lr, #\correction
965 .endif
ccea7a19
RK
966
967 @
968 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
969 @ (parent CPSR)
970 @
971 stmia sp, {r0, lr} @ save r0, lr
1da177e4 972 mrs lr, spsr
ccea7a19
RK
973 str lr, [sp, #8] @ save spsr
974
1da177e4 975 @
ccea7a19 976 @ Prepare for SVC32 mode. IRQs remain disabled.
1da177e4 977 @
ccea7a19 978 mrs r0, cpsr
b86040a5 979 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
ccea7a19 980 msr spsr_cxsf, r0
1da177e4 981
ccea7a19
RK
982 @
983 @ the branch table must immediately follow this code
984 @
ccea7a19 985 and lr, lr, #0x0f
b86040a5
CM
986 THUMB( adr r0, 1f )
987 THUMB( ldr lr, [r0, lr, lsl #2] )
b7ec4795 988 mov r0, sp
b86040a5 989 ARM( ldr lr, [pc, lr, lsl #2] )
ccea7a19 990 movs pc, lr @ branch to handler in SVC mode
93ed3970 991ENDPROC(vector_\name)
88987ef9
CM
992
993 .align 2
994 @ handler addresses follow this label
9951:
1da177e4
LT
996 .endm
997
7933523d 998 .globl __stubs_start
1da177e4
LT
999__stubs_start:
1000/*
1001 * Interrupt dispatcher
1002 */
b7ec4795 1003 vector_stub irq, IRQ_MODE, 4
1da177e4
LT
1004
1005 .long __irq_usr @ 0 (USR_26 / USR_32)
1006 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1007 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1008 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1009 .long __irq_invalid @ 4
1010 .long __irq_invalid @ 5
1011 .long __irq_invalid @ 6
1012 .long __irq_invalid @ 7
1013 .long __irq_invalid @ 8
1014 .long __irq_invalid @ 9
1015 .long __irq_invalid @ a
1016 .long __irq_invalid @ b
1017 .long __irq_invalid @ c
1018 .long __irq_invalid @ d
1019 .long __irq_invalid @ e
1020 .long __irq_invalid @ f
1021
1022/*
1023 * Data abort dispatcher
1024 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1025 */
b7ec4795 1026 vector_stub dabt, ABT_MODE, 8
1da177e4
LT
1027
1028 .long __dabt_usr @ 0 (USR_26 / USR_32)
1029 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1030 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1031 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1032 .long __dabt_invalid @ 4
1033 .long __dabt_invalid @ 5
1034 .long __dabt_invalid @ 6
1035 .long __dabt_invalid @ 7
1036 .long __dabt_invalid @ 8
1037 .long __dabt_invalid @ 9
1038 .long __dabt_invalid @ a
1039 .long __dabt_invalid @ b
1040 .long __dabt_invalid @ c
1041 .long __dabt_invalid @ d
1042 .long __dabt_invalid @ e
1043 .long __dabt_invalid @ f
1044
1045/*
1046 * Prefetch abort dispatcher
1047 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1048 */
b7ec4795 1049 vector_stub pabt, ABT_MODE, 4
1da177e4
LT
1050
1051 .long __pabt_usr @ 0 (USR_26 / USR_32)
1052 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1053 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1054 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1055 .long __pabt_invalid @ 4
1056 .long __pabt_invalid @ 5
1057 .long __pabt_invalid @ 6
1058 .long __pabt_invalid @ 7
1059 .long __pabt_invalid @ 8
1060 .long __pabt_invalid @ 9
1061 .long __pabt_invalid @ a
1062 .long __pabt_invalid @ b
1063 .long __pabt_invalid @ c
1064 .long __pabt_invalid @ d
1065 .long __pabt_invalid @ e
1066 .long __pabt_invalid @ f
1067
1068/*
1069 * Undef instr entry dispatcher
1070 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1071 */
b7ec4795 1072 vector_stub und, UND_MODE
1da177e4
LT
1073
1074 .long __und_usr @ 0 (USR_26 / USR_32)
1075 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1076 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1077 .long __und_svc @ 3 (SVC_26 / SVC_32)
1078 .long __und_invalid @ 4
1079 .long __und_invalid @ 5
1080 .long __und_invalid @ 6
1081 .long __und_invalid @ 7
1082 .long __und_invalid @ 8
1083 .long __und_invalid @ 9
1084 .long __und_invalid @ a
1085 .long __und_invalid @ b
1086 .long __und_invalid @ c
1087 .long __und_invalid @ d
1088 .long __und_invalid @ e
1089 .long __und_invalid @ f
1090
1091 .align 5
1092
1093/*=============================================================================
1094 * Undefined FIQs
1095 *-----------------------------------------------------------------------------
1096 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1097 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1098 * Basically to switch modes, we *HAVE* to clobber one register... brain
1099 * damage alert! I don't think that we can execute any code in here in any
1100 * other mode than FIQ... Ok you can switch to another mode, but you can't
1101 * get out of that mode without clobbering one register.
1102 */
1103vector_fiq:
1104 disable_fiq
1105 subs pc, lr, #4
1106
1107/*=============================================================================
1108 * Address exception handler
1109 *-----------------------------------------------------------------------------
1110 * These aren't too critical.
1111 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1112 */
1113
1114vector_addrexcptn:
1115 b vector_addrexcptn
1116
1117/*
1118 * We group all the following data together to optimise
1119 * for CPUs with separate I & D caches.
1120 */
1121 .align 5
1122
1123.LCvswi:
1124 .word vector_swi
1125
7933523d 1126 .globl __stubs_end
1da177e4
LT
1127__stubs_end:
1128
7933523d 1129 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 1130
7933523d
RK
1131 .globl __vectors_start
1132__vectors_start:
b86040a5
CM
1133 ARM( swi SYS_ERROR0 )
1134 THUMB( svc #0 )
1135 THUMB( nop )
1136 W(b) vector_und + stubs_offset
1137 W(ldr) pc, .LCvswi + stubs_offset
1138 W(b) vector_pabt + stubs_offset
1139 W(b) vector_dabt + stubs_offset
1140 W(b) vector_addrexcptn + stubs_offset
1141 W(b) vector_irq + stubs_offset
1142 W(b) vector_fiq + stubs_offset
7933523d
RK
1143
1144 .globl __vectors_end
1145__vectors_end:
1da177e4
LT
1146
1147 .data
1148
1da177e4
LT
1149 .globl cr_alignment
1150 .globl cr_no_alignment
1151cr_alignment:
1152 .space 4
1153cr_no_alignment:
1154 .space 4
52108641 1155
1156#ifdef CONFIG_MULTI_IRQ_HANDLER
1157 .globl handle_arch_irq
1158handle_arch_irq:
1159 .space 4
1160#endif