[PATCH] ARM SMP: convert alignment enable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Low-level vector interface routines
12 *
13 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
14 * it to save wrong values... Be aware!
15 */
16#include <linux/config.h>
1da177e4 17
1da177e4 18#include <asm/glue.h>
1da177e4 19#include <asm/vfpmacros.h>
41e46d6a
NP
20#include <asm/hardware.h> /* should be moved into entry-macro.S */
21#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
bce495d8 22#include <asm/arch/entry-macro.S>
1da177e4
LT
23
24#include "entry-header.S"
25
187a51ad
RK
26/*
27 * Interrupt handling. Preserves r7, r8, r9
28 */
29 .macro irq_handler
301: get_irqnr_and_base r0, r6, r5, lr
31 movne r1, sp
32 @
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
34 @
35 adrne lr, 1b
36 bne asm_do_IRQ
791be9b9
RK
37
38#ifdef CONFIG_SMP
39 /*
40 * XXX
41 *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
44 */
45 test_for_ipi r0, r6, r5, lr
46 movne r0, sp
47 adrne lr, 1b
48 bne do_IPI
49#endif
50
187a51ad
RK
51 .endm
52
1da177e4
LT
53/*
54 * Invalid mode handlers
55 */
56 .macro inv_entry, sym, reason
57 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
58 stmia sp, {r0 - lr} @ Save XXX r0 - lr
59 ldr r4, .LC\sym
60 mov r1, #\reason
61 .endm
62
63__pabt_invalid:
64 inv_entry abt, BAD_PREFETCH
65 b 1f
66
67__dabt_invalid:
68 inv_entry abt, BAD_DATA
69 b 1f
70
71__irq_invalid:
72 inv_entry irq, BAD_IRQ
73 b 1f
74
75__und_invalid:
76 inv_entry und, BAD_UNDEFINSTR
77
781: zero_fp
79 ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0
80 add r4, sp, #S_PC
81 stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
82 mov r0, sp
83 and r2, r6, #31 @ int mode
84 b bad_mode
85
86/*
87 * SVC mode handlers
88 */
89 .macro svc_entry, sym
90 sub sp, sp, #S_FRAME_SIZE
91 stmia sp, {r0 - r12} @ save r0 - r12
92 ldr r2, .LC\sym
93 add r0, sp, #S_FRAME_SIZE
94 ldmia r2, {r2 - r4} @ get pc, cpsr
95 add r5, sp, #S_SP
96 mov r1, lr
97
98 @
99 @ We are now ready to fill in the remaining blanks on the stack:
100 @
101 @ r0 - sp_svc
102 @ r1 - lr_svc
103 @ r2 - lr_<exception>, already fixed up for correct return/restart
104 @ r3 - spsr_<exception>
105 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
106 @
107 stmia r5, {r0 - r4}
108 .endm
109
110 .align 5
111__dabt_svc:
112 svc_entry abt
113
114 @
115 @ get ready to re-enable interrupts if appropriate
116 @
117 mrs r9, cpsr
118 tst r3, #PSR_I_BIT
119 biceq r9, r9, #PSR_I_BIT
120
121 @
122 @ Call the processor-specific abort handler:
123 @
124 @ r2 - aborted context pc
125 @ r3 - aborted context cpsr
126 @
127 @ The abort handler must return the aborted address in r0, and
128 @ the fault status register in r1. r9 must be preserved.
129 @
130#ifdef MULTI_ABORT
131 ldr r4, .LCprocfns
132 mov lr, pc
133 ldr pc, [r4]
134#else
135 bl CPU_ABORT_HANDLER
136#endif
137
138 @
139 @ set desired IRQ state, then call main handler
140 @
141 msr cpsr_c, r9
142 mov r2, sp
143 bl do_DataAbort
144
145 @
146 @ IRQs off again before pulling preserved data off the stack
147 @
1ec42c0c 148 disable_irq
1da177e4
LT
149
150 @
151 @ restore SPSR and restart the instruction
152 @
153 ldr r0, [sp, #S_PSR]
154 msr spsr_cxsf, r0
155 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
156
157 .align 5
158__irq_svc:
159 svc_entry irq
160#ifdef CONFIG_PREEMPT
706fdd9f
RK
161 get_thread_info tsk
162 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
163 add r7, r8, #1 @ increment it
164 str r7, [tsk, #TI_PREEMPT]
1da177e4 165#endif
187a51ad 166 irq_handler
1da177e4 167#ifdef CONFIG_PREEMPT
706fdd9f 168 ldr r0, [tsk, #TI_FLAGS] @ get flags
1da177e4
LT
169 tst r0, #_TIF_NEED_RESCHED
170 blne svc_preempt
171preempt_return:
706fdd9f
RK
172 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
173 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
1da177e4 174 teq r0, r7
1da177e4
LT
175 strne r0, [r0, -r0] @ bug()
176#endif
177 ldr r0, [sp, #S_PSR] @ irqs are already disabled
178 msr spsr_cxsf, r0
179 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
180
181 .ltorg
182
183#ifdef CONFIG_PREEMPT
184svc_preempt:
706fdd9f 185 teq r8, #0 @ was preempt count = 0
1da177e4
LT
186 ldreq r6, .LCirq_stat
187 movne pc, lr @ no
188 ldr r0, [r6, #4] @ local_irq_count
189 ldr r1, [r6, #8] @ local_bh_count
190 adds r0, r0, r1
191 movne pc, lr
192 mov r7, #0 @ preempt_schedule_irq
706fdd9f 193 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
1da177e4 1941: bl preempt_schedule_irq @ irq en/disable is done inside
706fdd9f 195 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
1da177e4
LT
196 tst r0, #_TIF_NEED_RESCHED
197 beq preempt_return @ go again
198 b 1b
199#endif
200
201 .align 5
202__und_svc:
203 svc_entry und
204
205 @
206 @ call emulation code, which returns using r9 if it has emulated
207 @ the instruction, or the more conventional lr if we are to treat
208 @ this as a real undefined instruction
209 @
210 @ r0 - instruction
211 @
212 ldr r0, [r2, #-4]
213 adr r9, 1f
214 bl call_fpe
215
216 mov r0, sp @ struct pt_regs *regs
217 bl do_undefinstr
218
219 @
220 @ IRQs off again before pulling preserved data off the stack
221 @
1ec42c0c 2221: disable_irq
1da177e4
LT
223
224 @
225 @ restore SPSR and restart the instruction
226 @
227 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
228 msr spsr_cxsf, lr
229 ldmia sp, {r0 - pc}^ @ Restore SVC registers
230
231 .align 5
232__pabt_svc:
233 svc_entry abt
234
235 @
236 @ re-enable interrupts if appropriate
237 @
238 mrs r9, cpsr
239 tst r3, #PSR_I_BIT
240 biceq r9, r9, #PSR_I_BIT
241 msr cpsr_c, r9
242
243 @
244 @ set args, then call main handler
245 @
246 @ r0 - address of faulting instruction
247 @ r1 - pointer to registers on stack
248 @
249 mov r0, r2 @ address (pc)
250 mov r1, sp @ regs
251 bl do_PrefetchAbort @ call abort handler
252
253 @
254 @ IRQs off again before pulling preserved data off the stack
255 @
1ec42c0c 256 disable_irq
1da177e4
LT
257
258 @
259 @ restore SPSR and restart the instruction
260 @
261 ldr r0, [sp, #S_PSR]
262 msr spsr_cxsf, r0
263 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
264
265 .align 5
266.LCirq:
267 .word __temp_irq
268.LCund:
269 .word __temp_und
270.LCabt:
271 .word __temp_abt
49f680ea
RK
272.LCcralign:
273 .word cr_alignment
1da177e4
LT
274#ifdef MULTI_ABORT
275.LCprocfns:
276 .word processor
277#endif
278.LCfp:
279 .word fp_enter
280#ifdef CONFIG_PREEMPT
281.LCirq_stat:
282 .word irq_stat
283#endif
284
285/*
286 * User mode handlers
287 */
288 .macro usr_entry, sym
289 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
290 stmia sp, {r0 - r12} @ save r0 - r12
291 ldr r7, .LC\sym
292 add r5, sp, #S_PC
293 ldmia r7, {r2 - r4} @ Get USR pc, cpsr
294
2d2669b6
NP
295#if __LINUX_ARM_ARCH__ < 6
296 @ make sure our user space atomic helper is aborted
297 cmp r2, #VIRT_OFFSET
298 bichs r3, r3, #PSR_Z_BIT
299#endif
300
1da177e4
LT
301 @
302 @ We are now ready to fill in the remaining blanks on the stack:
303 @
304 @ r2 - lr_<exception>, already fixed up for correct return/restart
305 @ r3 - spsr_<exception>
306 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
307 @
308 @ Also, separately save sp_usr and lr_usr
309 @
310 stmia r5, {r2 - r4}
311 stmdb r5, {sp, lr}^
312
313 @
314 @ Enable the alignment trap while in kernel mode
315 @
49f680ea 316 alignment_trap r0
1da177e4
LT
317
318 @
319 @ Clear FP to mark the first stack frame
320 @
321 zero_fp
322 .endm
323
324 .align 5
325__dabt_usr:
326 usr_entry abt
327
328 @
329 @ Call the processor-specific abort handler:
330 @
331 @ r2 - aborted context pc
332 @ r3 - aborted context cpsr
333 @
334 @ The abort handler must return the aborted address in r0, and
335 @ the fault status register in r1.
336 @
337#ifdef MULTI_ABORT
338 ldr r4, .LCprocfns
339 mov lr, pc
340 ldr pc, [r4]
341#else
342 bl CPU_ABORT_HANDLER
343#endif
344
345 @
346 @ IRQs on, then call the main handler
347 @
1ec42c0c 348 enable_irq
1da177e4
LT
349 mov r2, sp
350 adr lr, ret_from_exception
351 b do_DataAbort
352
353 .align 5
354__irq_usr:
355 usr_entry irq
356
706fdd9f 357 get_thread_info tsk
1da177e4 358#ifdef CONFIG_PREEMPT
706fdd9f
RK
359 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
360 add r7, r8, #1 @ increment it
361 str r7, [tsk, #TI_PREEMPT]
1da177e4 362#endif
187a51ad 363 irq_handler
1da177e4 364#ifdef CONFIG_PREEMPT
706fdd9f
RK
365 ldr r0, [tsk, #TI_PREEMPT]
366 str r8, [tsk, #TI_PREEMPT]
1da177e4 367 teq r0, r7
1da177e4 368 strne r0, [r0, -r0]
1da177e4
LT
369#endif
370 mov why, #0
371 b ret_to_user
372
373 .ltorg
374
375 .align 5
376__und_usr:
377 usr_entry und
378
379 tst r3, #PSR_T_BIT @ Thumb mode?
380 bne fpundefinstr @ ignore FP
381 sub r4, r2, #4
382
383 @
384 @ fall through to the emulation code, which returns using r9 if
385 @ it has emulated the instruction, or the more conventional lr
386 @ if we are to treat this as a real undefined instruction
387 @
388 @ r0 - instruction
389 @
3901: ldrt r0, [r4]
391 adr r9, ret_from_exception
392 adr lr, fpundefinstr
393 @
394 @ fallthrough to call_fpe
395 @
396
397/*
398 * The out of line fixup for the ldrt above.
399 */
400 .section .fixup, "ax"
4012: mov pc, r9
402 .previous
403 .section __ex_table,"a"
404 .long 1b, 2b
405 .previous
406
407/*
408 * Check whether the instruction is a co-processor instruction.
409 * If yes, we need to call the relevant co-processor handler.
410 *
411 * Note that we don't do a full check here for the co-processor
412 * instructions; all instructions with bit 27 set are well
413 * defined. The only instructions that should fault are the
414 * co-processor instructions. However, we have to watch out
415 * for the ARM6/ARM7 SWI bug.
416 *
417 * Emulators may wish to make use of the following registers:
418 * r0 = instruction opcode.
419 * r2 = PC+4
420 * r10 = this threads thread_info structure.
421 */
422call_fpe:
423 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
424#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
425 and r8, r0, #0x0f000000 @ mask out op-code bits
426 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
427#endif
428 moveq pc, lr
429 get_thread_info r10 @ get current thread
430 and r8, r0, #0x00000f00 @ mask out CP number
431 mov r7, #1
432 add r6, r10, #TI_USED_CP
433 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
434#ifdef CONFIG_IWMMXT
435 @ Test if we need to give access to iWMMXt coprocessors
436 ldr r5, [r10, #TI_FLAGS]
437 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
438 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
439 bcs iwmmxt_task_enable
440#endif
1ec42c0c 441 enable_irq
1da177e4
LT
442 add pc, pc, r8, lsr #6
443 mov r0, r0
444
445 mov pc, lr @ CP#0
446 b do_fpe @ CP#1 (FPE)
447 b do_fpe @ CP#2 (FPE)
448 mov pc, lr @ CP#3
449 mov pc, lr @ CP#4
450 mov pc, lr @ CP#5
451 mov pc, lr @ CP#6
452 mov pc, lr @ CP#7
453 mov pc, lr @ CP#8
454 mov pc, lr @ CP#9
455#ifdef CONFIG_VFP
456 b do_vfp @ CP#10 (VFP)
457 b do_vfp @ CP#11 (VFP)
458#else
459 mov pc, lr @ CP#10 (VFP)
460 mov pc, lr @ CP#11 (VFP)
461#endif
462 mov pc, lr @ CP#12
463 mov pc, lr @ CP#13
464 mov pc, lr @ CP#14 (Debug)
465 mov pc, lr @ CP#15 (Control)
466
467do_fpe:
468 ldr r4, .LCfp
469 add r10, r10, #TI_FPSTATE @ r10 = workspace
470 ldr pc, [r4] @ Call FP module USR entry point
471
472/*
473 * The FP module is called with these registers set:
474 * r0 = instruction
475 * r2 = PC+4
476 * r9 = normal "successful" return address
477 * r10 = FP workspace
478 * lr = unrecognised FP instruction return address
479 */
480
481 .data
482ENTRY(fp_enter)
483 .word fpundefinstr
484 .text
485
486fpundefinstr:
487 mov r0, sp
488 adr lr, ret_from_exception
489 b do_undefinstr
490
491 .align 5
492__pabt_usr:
493 usr_entry abt
494
1ec42c0c 495 enable_irq @ Enable interrupts
1da177e4
LT
496 mov r0, r2 @ address (pc)
497 mov r1, sp @ regs
498 bl do_PrefetchAbort @ call abort handler
499 /* fall through */
500/*
501 * This is the return code to user mode for abort handlers
502 */
503ENTRY(ret_from_exception)
504 get_thread_info tsk
505 mov why, #0
506 b ret_to_user
507
508/*
509 * Register switch for ARMv3 and ARMv4 processors
510 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
511 * previous and next are guaranteed not to be the same.
512 */
513ENTRY(__switch_to)
514 add ip, r1, #TI_CPU_SAVE
515 ldr r3, [r2, #TI_TP_VALUE]
516 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
517 ldr r6, [r2, #TI_CPU_DOMAIN]!
518#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
519 mra r4, r5, acc0
520 stmia ip, {r4, r5}
521#endif
4b0e07a5 522#if defined(CONFIG_HAS_TLS_REG)
2d2669b6 523 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
4b0e07a5 524#elif !defined(CONFIG_TLS_REG_EMUL)
1da177e4 525 mov r4, #0xffff0fff
2d2669b6
NP
526 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
527#endif
1da177e4
LT
528 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
529#ifdef CONFIG_VFP
530 @ Always disable VFP so we can lazily save/restore the old
531 @ state. This occurs in the context of the previous thread.
532 VFPFMRX r4, FPEXC
533 bic r4, r4, #FPEXC_ENABLE
534 VFPFMXR FPEXC, r4
535#endif
536#if defined(CONFIG_IWMMXT)
537 bl iwmmxt_task_switch
538#elif defined(CONFIG_CPU_XSCALE)
539 add r4, r2, #40 @ cpu_context_save->extra
540 ldmib r4, {r4, r5}
541 mar acc0, r4, r5
542#endif
543 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
544
545 __INIT
2d2669b6
NP
546
547/*
548 * User helpers.
549 *
550 * These are segment of kernel provided user code reachable from user space
551 * at a fixed address in kernel memory. This is used to provide user space
552 * with some operations which require kernel help because of unimplemented
553 * native feature and/or instructions in many ARM CPUs. The idea is for
554 * this code to be executed directly in user mode for best efficiency but
555 * which is too intimate with the kernel counter part to be left to user
556 * libraries. In fact this code might even differ from one CPU to another
557 * depending on the available instruction set and restrictions like on
558 * SMP systems. In other words, the kernel reserves the right to change
559 * this code as needed without warning. Only the entry points and their
560 * results are guaranteed to be stable.
561 *
562 * Each segment is 32-byte aligned and will be moved to the top of the high
563 * vector page. New segments (if ever needed) must be added in front of
564 * existing ones. This mechanism should be used only for things that are
565 * really small and justified, and not be abused freely.
566 *
567 * User space is expected to implement those things inline when optimizing
568 * for a processor that has the necessary native support, but only if such
569 * resulting binaries are already to be incompatible with earlier ARM
570 * processors due to the use of unsupported instructions other than what
571 * is provided here. In other words don't make binaries unable to run on
572 * earlier processors just for the sake of not using these kernel helpers
573 * if your compiled code is not going to use the new instructions for other
574 * purpose.
575 */
576
577 .align 5
578 .globl __kuser_helper_start
579__kuser_helper_start:
580
581/*
582 * Reference prototype:
583 *
584 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
585 *
586 * Input:
587 *
588 * r0 = oldval
589 * r1 = newval
590 * r2 = ptr
591 * lr = return address
592 *
593 * Output:
594 *
595 * r0 = returned value (zero or non-zero)
596 * C flag = set if r0 == 0, clear if r0 != 0
597 *
598 * Clobbered:
599 *
600 * r3, ip, flags
601 *
602 * Definition and user space usage example:
603 *
604 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
605 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
606 *
607 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
608 * Return zero if *ptr was changed or non-zero if no exchange happened.
609 * The C flag is also set if *ptr was changed to allow for assembly
610 * optimization in the calling code.
611 *
612 * For example, a user space atomic_add implementation could look like this:
613 *
614 * #define atomic_add(ptr, val) \
615 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
616 * register unsigned int __result asm("r1"); \
617 * asm volatile ( \
618 * "1: @ atomic_add\n\t" \
619 * "ldr r0, [r2]\n\t" \
620 * "mov r3, #0xffff0fff\n\t" \
621 * "add lr, pc, #4\n\t" \
622 * "add r1, r0, %2\n\t" \
623 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
624 * "bcc 1b" \
625 * : "=&r" (__result) \
626 * : "r" (__ptr), "rIL" (val) \
627 * : "r0","r3","ip","lr","cc","memory" ); \
628 * __result; })
629 */
630
631__kuser_cmpxchg: @ 0xffff0fc0
632
633#if __LINUX_ARM_ARCH__ < 6
634
635#ifdef CONFIG_SMP /* sanity check */
636#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
637#endif
638
639 /*
640 * Theory of operation:
641 *
642 * We set the Z flag before loading oldval. If ever an exception
643 * occurs we can not be sure the loaded value will still be the same
644 * when the exception returns, therefore the user exception handler
645 * will clear the Z flag whenever the interrupted user code was
646 * actually from the kernel address space (see the usr_entry macro).
647 *
648 * The post-increment on the str is used to prevent a race with an
649 * exception happening just after the str instruction which would
650 * clear the Z flag although the exchange was done.
651 */
652 teq ip, ip @ set Z flag
653 ldr ip, [r2] @ load current val
654 add r3, r2, #1 @ prepare store ptr
655 teqeq ip, r0 @ compare with oldval if still allowed
656 streq r1, [r3, #-1]! @ store newval if still allowed
657 subs r0, r2, r3 @ if r2 == r3 the str occured
658 mov pc, lr
659
660#else
661
662 ldrex r3, [r2]
663 subs r3, r3, r0
664 strexeq r3, r1, [r2]
665 rsbs r0, r3, #0
666 mov pc, lr
667
668#endif
669
670 .align 5
671
672/*
673 * Reference prototype:
674 *
675 * int __kernel_get_tls(void)
676 *
677 * Input:
678 *
679 * lr = return address
680 *
681 * Output:
682 *
683 * r0 = TLS value
684 *
685 * Clobbered:
686 *
687 * the Z flag might be lost
688 *
689 * Definition and user space usage example:
690 *
691 * typedef int (__kernel_get_tls_t)(void);
692 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
693 *
694 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
695 *
696 * This could be used as follows:
697 *
698 * #define __kernel_get_tls() \
699 * ({ register unsigned int __val asm("r0"); \
700 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
701 * : "=r" (__val) : : "lr","cc" ); \
702 * __val; })
703 */
704
705__kuser_get_tls: @ 0xffff0fe0
706
4b0e07a5 707#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
2d2669b6
NP
708
709 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
710 mov pc, lr
711
712#else
713
714 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
715 mov pc, lr
716
717#endif
718
719 .rep 5
720 .word 0 @ pad up to __kuser_helper_version
721 .endr
722
723/*
724 * Reference declaration:
725 *
726 * extern unsigned int __kernel_helper_version;
727 *
728 * Definition and user space usage example:
729 *
730 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
731 *
732 * User space may read this to determine the curent number of helpers
733 * available.
734 */
735
736__kuser_helper_version: @ 0xffff0ffc
737 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
738
739 .globl __kuser_helper_end
740__kuser_helper_end:
741
742
1da177e4
LT
743/*
744 * Vector stubs.
745 *
7933523d
RK
746 * This code is copied to 0xffff0200 so we can use branches in the
747 * vectors, rather than ldr's. Note that this code must not
748 * exceed 0x300 bytes.
1da177e4
LT
749 *
750 * Common stub entry macro:
751 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
752 */
753 .macro vector_stub, name, sym, correction=0
754 .align 5
755
756vector_\name:
757 ldr r13, .LCs\sym
758 .if \correction
759 sub lr, lr, #\correction
760 .endif
761 str lr, [r13] @ save lr_IRQ
762 mrs lr, spsr
763 str lr, [r13, #4] @ save spsr_IRQ
764 @
765 @ now branch to the relevant MODE handling routine
766 @
767 mrs r13, cpsr
768 bic r13, r13, #MODE_MASK
acaca3c9 769 orr r13, r13, #SVC_MODE
1da177e4
LT
770 msr spsr_cxsf, r13 @ switch to SVC_32 mode
771
772 and lr, lr, #15
773 ldr lr, [pc, lr, lsl #2]
774 movs pc, lr @ Changes mode and branches
775 .endm
776
7933523d 777 .globl __stubs_start
1da177e4
LT
778__stubs_start:
779/*
780 * Interrupt dispatcher
781 */
782 vector_stub irq, irq, 4
783
784 .long __irq_usr @ 0 (USR_26 / USR_32)
785 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
786 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
787 .long __irq_svc @ 3 (SVC_26 / SVC_32)
788 .long __irq_invalid @ 4
789 .long __irq_invalid @ 5
790 .long __irq_invalid @ 6
791 .long __irq_invalid @ 7
792 .long __irq_invalid @ 8
793 .long __irq_invalid @ 9
794 .long __irq_invalid @ a
795 .long __irq_invalid @ b
796 .long __irq_invalid @ c
797 .long __irq_invalid @ d
798 .long __irq_invalid @ e
799 .long __irq_invalid @ f
800
801/*
802 * Data abort dispatcher
803 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
804 */
805 vector_stub dabt, abt, 8
806
807 .long __dabt_usr @ 0 (USR_26 / USR_32)
808 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
809 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
810 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
811 .long __dabt_invalid @ 4
812 .long __dabt_invalid @ 5
813 .long __dabt_invalid @ 6
814 .long __dabt_invalid @ 7
815 .long __dabt_invalid @ 8
816 .long __dabt_invalid @ 9
817 .long __dabt_invalid @ a
818 .long __dabt_invalid @ b
819 .long __dabt_invalid @ c
820 .long __dabt_invalid @ d
821 .long __dabt_invalid @ e
822 .long __dabt_invalid @ f
823
824/*
825 * Prefetch abort dispatcher
826 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
827 */
828 vector_stub pabt, abt, 4
829
830 .long __pabt_usr @ 0 (USR_26 / USR_32)
831 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
832 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
833 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
834 .long __pabt_invalid @ 4
835 .long __pabt_invalid @ 5
836 .long __pabt_invalid @ 6
837 .long __pabt_invalid @ 7
838 .long __pabt_invalid @ 8
839 .long __pabt_invalid @ 9
840 .long __pabt_invalid @ a
841 .long __pabt_invalid @ b
842 .long __pabt_invalid @ c
843 .long __pabt_invalid @ d
844 .long __pabt_invalid @ e
845 .long __pabt_invalid @ f
846
847/*
848 * Undef instr entry dispatcher
849 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
850 */
851 vector_stub und, und
852
853 .long __und_usr @ 0 (USR_26 / USR_32)
854 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
855 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
856 .long __und_svc @ 3 (SVC_26 / SVC_32)
857 .long __und_invalid @ 4
858 .long __und_invalid @ 5
859 .long __und_invalid @ 6
860 .long __und_invalid @ 7
861 .long __und_invalid @ 8
862 .long __und_invalid @ 9
863 .long __und_invalid @ a
864 .long __und_invalid @ b
865 .long __und_invalid @ c
866 .long __und_invalid @ d
867 .long __und_invalid @ e
868 .long __und_invalid @ f
869
870 .align 5
871
872/*=============================================================================
873 * Undefined FIQs
874 *-----------------------------------------------------------------------------
875 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
876 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
877 * Basically to switch modes, we *HAVE* to clobber one register... brain
878 * damage alert! I don't think that we can execute any code in here in any
879 * other mode than FIQ... Ok you can switch to another mode, but you can't
880 * get out of that mode without clobbering one register.
881 */
882vector_fiq:
883 disable_fiq
884 subs pc, lr, #4
885
886/*=============================================================================
887 * Address exception handler
888 *-----------------------------------------------------------------------------
889 * These aren't too critical.
890 * (they're not supposed to happen, and won't happen in 32-bit data mode).
891 */
892
893vector_addrexcptn:
894 b vector_addrexcptn
895
896/*
897 * We group all the following data together to optimise
898 * for CPUs with separate I & D caches.
899 */
900 .align 5
901
902.LCvswi:
903 .word vector_swi
904
905.LCsirq:
906 .word __temp_irq
907.LCsund:
908 .word __temp_und
909.LCsabt:
910 .word __temp_abt
911
7933523d 912 .globl __stubs_end
1da177e4
LT
913__stubs_end:
914
7933523d 915 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 916
7933523d
RK
917 .globl __vectors_start
918__vectors_start:
1da177e4 919 swi SYS_ERROR0
7933523d
RK
920 b vector_und + stubs_offset
921 ldr pc, .LCvswi + stubs_offset
922 b vector_pabt + stubs_offset
923 b vector_dabt + stubs_offset
924 b vector_addrexcptn + stubs_offset
925 b vector_irq + stubs_offset
926 b vector_fiq + stubs_offset
927
928 .globl __vectors_end
929__vectors_end:
1da177e4
LT
930
931 .data
932
933/*
934 * Do not reorder these, and do not insert extra data between...
935 */
936
937__temp_irq:
938 .word 0 @ saved lr_irq
939 .word 0 @ saved spsr_irq
940 .word -1 @ old_r0
941__temp_und:
942 .word 0 @ Saved lr_und
943 .word 0 @ Saved spsr_und
944 .word -1 @ old_r0
945__temp_abt:
946 .word 0 @ Saved lr_abt
947 .word 0 @ Saved spsr_abt
948 .word -1 @ old_r0
949
950 .globl cr_alignment
951 .globl cr_no_alignment
952cr_alignment:
953 .space 4
954cr_no_alignment:
955 .space 4