[PATCH] ARM SMP: consolidate main IRQ handler code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Low-level vector interface routines
12 *
13 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
14 * it to save wrong values... Be aware!
15 */
16#include <linux/config.h>
1da177e4 17
1da177e4 18#include <asm/glue.h>
1da177e4 19#include <asm/vfpmacros.h>
41e46d6a
NP
20#include <asm/hardware.h> /* should be moved into entry-macro.S */
21#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
bce495d8 22#include <asm/arch/entry-macro.S>
1da177e4
LT
23
24#include "entry-header.S"
25
187a51ad
RK
26/*
27 * Interrupt handling. Preserves r7, r8, r9
28 */
29 .macro irq_handler
301: get_irqnr_and_base r0, r6, r5, lr
31 movne r1, sp
32 @
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
34 @
35 adrne lr, 1b
36 bne asm_do_IRQ
37 .endm
38
1da177e4
LT
39/*
40 * Invalid mode handlers
41 */
42 .macro inv_entry, sym, reason
43 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
44 stmia sp, {r0 - lr} @ Save XXX r0 - lr
45 ldr r4, .LC\sym
46 mov r1, #\reason
47 .endm
48
49__pabt_invalid:
50 inv_entry abt, BAD_PREFETCH
51 b 1f
52
53__dabt_invalid:
54 inv_entry abt, BAD_DATA
55 b 1f
56
57__irq_invalid:
58 inv_entry irq, BAD_IRQ
59 b 1f
60
61__und_invalid:
62 inv_entry und, BAD_UNDEFINSTR
63
641: zero_fp
65 ldmia r4, {r5 - r7} @ Get XXX pc, cpsr, old_r0
66 add r4, sp, #S_PC
67 stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
68 mov r0, sp
69 and r2, r6, #31 @ int mode
70 b bad_mode
71
72/*
73 * SVC mode handlers
74 */
75 .macro svc_entry, sym
76 sub sp, sp, #S_FRAME_SIZE
77 stmia sp, {r0 - r12} @ save r0 - r12
78 ldr r2, .LC\sym
79 add r0, sp, #S_FRAME_SIZE
80 ldmia r2, {r2 - r4} @ get pc, cpsr
81 add r5, sp, #S_SP
82 mov r1, lr
83
84 @
85 @ We are now ready to fill in the remaining blanks on the stack:
86 @
87 @ r0 - sp_svc
88 @ r1 - lr_svc
89 @ r2 - lr_<exception>, already fixed up for correct return/restart
90 @ r3 - spsr_<exception>
91 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
92 @
93 stmia r5, {r0 - r4}
94 .endm
95
96 .align 5
97__dabt_svc:
98 svc_entry abt
99
100 @
101 @ get ready to re-enable interrupts if appropriate
102 @
103 mrs r9, cpsr
104 tst r3, #PSR_I_BIT
105 biceq r9, r9, #PSR_I_BIT
106
107 @
108 @ Call the processor-specific abort handler:
109 @
110 @ r2 - aborted context pc
111 @ r3 - aborted context cpsr
112 @
113 @ The abort handler must return the aborted address in r0, and
114 @ the fault status register in r1. r9 must be preserved.
115 @
116#ifdef MULTI_ABORT
117 ldr r4, .LCprocfns
118 mov lr, pc
119 ldr pc, [r4]
120#else
121 bl CPU_ABORT_HANDLER
122#endif
123
124 @
125 @ set desired IRQ state, then call main handler
126 @
127 msr cpsr_c, r9
128 mov r2, sp
129 bl do_DataAbort
130
131 @
132 @ IRQs off again before pulling preserved data off the stack
133 @
1ec42c0c 134 disable_irq
1da177e4
LT
135
136 @
137 @ restore SPSR and restart the instruction
138 @
139 ldr r0, [sp, #S_PSR]
140 msr spsr_cxsf, r0
141 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
142
143 .align 5
144__irq_svc:
145 svc_entry irq
146#ifdef CONFIG_PREEMPT
147 get_thread_info r8
148 ldr r9, [r8, #TI_PREEMPT] @ get preempt count
149 add r7, r9, #1 @ increment it
150 str r7, [r8, #TI_PREEMPT]
151#endif
187a51ad 152 irq_handler
1da177e4
LT
153#ifdef CONFIG_PREEMPT
154 ldr r0, [r8, #TI_FLAGS] @ get flags
155 tst r0, #_TIF_NEED_RESCHED
156 blne svc_preempt
157preempt_return:
158 ldr r0, [r8, #TI_PREEMPT] @ read preempt value
159 teq r0, r7
160 str r9, [r8, #TI_PREEMPT] @ restore preempt count
161 strne r0, [r0, -r0] @ bug()
162#endif
163 ldr r0, [sp, #S_PSR] @ irqs are already disabled
164 msr spsr_cxsf, r0
165 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
166
167 .ltorg
168
169#ifdef CONFIG_PREEMPT
170svc_preempt:
171 teq r9, #0 @ was preempt count = 0
172 ldreq r6, .LCirq_stat
173 movne pc, lr @ no
174 ldr r0, [r6, #4] @ local_irq_count
175 ldr r1, [r6, #8] @ local_bh_count
176 adds r0, r0, r1
177 movne pc, lr
178 mov r7, #0 @ preempt_schedule_irq
179 str r7, [r8, #TI_PREEMPT] @ expects preempt_count == 0
1801: bl preempt_schedule_irq @ irq en/disable is done inside
181 ldr r0, [r8, #TI_FLAGS] @ get new tasks TI_FLAGS
182 tst r0, #_TIF_NEED_RESCHED
183 beq preempt_return @ go again
184 b 1b
185#endif
186
187 .align 5
188__und_svc:
189 svc_entry und
190
191 @
192 @ call emulation code, which returns using r9 if it has emulated
193 @ the instruction, or the more conventional lr if we are to treat
194 @ this as a real undefined instruction
195 @
196 @ r0 - instruction
197 @
198 ldr r0, [r2, #-4]
199 adr r9, 1f
200 bl call_fpe
201
202 mov r0, sp @ struct pt_regs *regs
203 bl do_undefinstr
204
205 @
206 @ IRQs off again before pulling preserved data off the stack
207 @
1ec42c0c 2081: disable_irq
1da177e4
LT
209
210 @
211 @ restore SPSR and restart the instruction
212 @
213 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
214 msr spsr_cxsf, lr
215 ldmia sp, {r0 - pc}^ @ Restore SVC registers
216
217 .align 5
218__pabt_svc:
219 svc_entry abt
220
221 @
222 @ re-enable interrupts if appropriate
223 @
224 mrs r9, cpsr
225 tst r3, #PSR_I_BIT
226 biceq r9, r9, #PSR_I_BIT
227 msr cpsr_c, r9
228
229 @
230 @ set args, then call main handler
231 @
232 @ r0 - address of faulting instruction
233 @ r1 - pointer to registers on stack
234 @
235 mov r0, r2 @ address (pc)
236 mov r1, sp @ regs
237 bl do_PrefetchAbort @ call abort handler
238
239 @
240 @ IRQs off again before pulling preserved data off the stack
241 @
1ec42c0c 242 disable_irq
1da177e4
LT
243
244 @
245 @ restore SPSR and restart the instruction
246 @
247 ldr r0, [sp, #S_PSR]
248 msr spsr_cxsf, r0
249 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
250
251 .align 5
252.LCirq:
253 .word __temp_irq
254.LCund:
255 .word __temp_und
256.LCabt:
257 .word __temp_abt
258#ifdef MULTI_ABORT
259.LCprocfns:
260 .word processor
261#endif
262.LCfp:
263 .word fp_enter
264#ifdef CONFIG_PREEMPT
265.LCirq_stat:
266 .word irq_stat
267#endif
268
269/*
270 * User mode handlers
271 */
272 .macro usr_entry, sym
273 sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
274 stmia sp, {r0 - r12} @ save r0 - r12
275 ldr r7, .LC\sym
276 add r5, sp, #S_PC
277 ldmia r7, {r2 - r4} @ Get USR pc, cpsr
278
2d2669b6
NP
279#if __LINUX_ARM_ARCH__ < 6
280 @ make sure our user space atomic helper is aborted
281 cmp r2, #VIRT_OFFSET
282 bichs r3, r3, #PSR_Z_BIT
283#endif
284
1da177e4
LT
285 @
286 @ We are now ready to fill in the remaining blanks on the stack:
287 @
288 @ r2 - lr_<exception>, already fixed up for correct return/restart
289 @ r3 - spsr_<exception>
290 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
291 @
292 @ Also, separately save sp_usr and lr_usr
293 @
294 stmia r5, {r2 - r4}
295 stmdb r5, {sp, lr}^
296
297 @
298 @ Enable the alignment trap while in kernel mode
299 @
300 alignment_trap r7, r0, __temp_\sym
301
302 @
303 @ Clear FP to mark the first stack frame
304 @
305 zero_fp
306 .endm
307
308 .align 5
309__dabt_usr:
310 usr_entry abt
311
312 @
313 @ Call the processor-specific abort handler:
314 @
315 @ r2 - aborted context pc
316 @ r3 - aborted context cpsr
317 @
318 @ The abort handler must return the aborted address in r0, and
319 @ the fault status register in r1.
320 @
321#ifdef MULTI_ABORT
322 ldr r4, .LCprocfns
323 mov lr, pc
324 ldr pc, [r4]
325#else
326 bl CPU_ABORT_HANDLER
327#endif
328
329 @
330 @ IRQs on, then call the main handler
331 @
1ec42c0c 332 enable_irq
1da177e4
LT
333 mov r2, sp
334 adr lr, ret_from_exception
335 b do_DataAbort
336
337 .align 5
338__irq_usr:
339 usr_entry irq
340
341#ifdef CONFIG_PREEMPT
342 get_thread_info r8
343 ldr r9, [r8, #TI_PREEMPT] @ get preempt count
344 add r7, r9, #1 @ increment it
345 str r7, [r8, #TI_PREEMPT]
346#endif
187a51ad 347 irq_handler
1da177e4
LT
348#ifdef CONFIG_PREEMPT
349 ldr r0, [r8, #TI_PREEMPT]
350 teq r0, r7
351 str r9, [r8, #TI_PREEMPT]
352 strne r0, [r0, -r0]
353 mov tsk, r8
354#else
355 get_thread_info tsk
356#endif
357 mov why, #0
358 b ret_to_user
359
360 .ltorg
361
362 .align 5
363__und_usr:
364 usr_entry und
365
366 tst r3, #PSR_T_BIT @ Thumb mode?
367 bne fpundefinstr @ ignore FP
368 sub r4, r2, #4
369
370 @
371 @ fall through to the emulation code, which returns using r9 if
372 @ it has emulated the instruction, or the more conventional lr
373 @ if we are to treat this as a real undefined instruction
374 @
375 @ r0 - instruction
376 @
3771: ldrt r0, [r4]
378 adr r9, ret_from_exception
379 adr lr, fpundefinstr
380 @
381 @ fallthrough to call_fpe
382 @
383
384/*
385 * The out of line fixup for the ldrt above.
386 */
387 .section .fixup, "ax"
3882: mov pc, r9
389 .previous
390 .section __ex_table,"a"
391 .long 1b, 2b
392 .previous
393
394/*
395 * Check whether the instruction is a co-processor instruction.
396 * If yes, we need to call the relevant co-processor handler.
397 *
398 * Note that we don't do a full check here for the co-processor
399 * instructions; all instructions with bit 27 set are well
400 * defined. The only instructions that should fault are the
401 * co-processor instructions. However, we have to watch out
402 * for the ARM6/ARM7 SWI bug.
403 *
404 * Emulators may wish to make use of the following registers:
405 * r0 = instruction opcode.
406 * r2 = PC+4
407 * r10 = this threads thread_info structure.
408 */
409call_fpe:
410 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
411#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
412 and r8, r0, #0x0f000000 @ mask out op-code bits
413 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
414#endif
415 moveq pc, lr
416 get_thread_info r10 @ get current thread
417 and r8, r0, #0x00000f00 @ mask out CP number
418 mov r7, #1
419 add r6, r10, #TI_USED_CP
420 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
421#ifdef CONFIG_IWMMXT
422 @ Test if we need to give access to iWMMXt coprocessors
423 ldr r5, [r10, #TI_FLAGS]
424 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
425 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
426 bcs iwmmxt_task_enable
427#endif
1ec42c0c 428 enable_irq
1da177e4
LT
429 add pc, pc, r8, lsr #6
430 mov r0, r0
431
432 mov pc, lr @ CP#0
433 b do_fpe @ CP#1 (FPE)
434 b do_fpe @ CP#2 (FPE)
435 mov pc, lr @ CP#3
436 mov pc, lr @ CP#4
437 mov pc, lr @ CP#5
438 mov pc, lr @ CP#6
439 mov pc, lr @ CP#7
440 mov pc, lr @ CP#8
441 mov pc, lr @ CP#9
442#ifdef CONFIG_VFP
443 b do_vfp @ CP#10 (VFP)
444 b do_vfp @ CP#11 (VFP)
445#else
446 mov pc, lr @ CP#10 (VFP)
447 mov pc, lr @ CP#11 (VFP)
448#endif
449 mov pc, lr @ CP#12
450 mov pc, lr @ CP#13
451 mov pc, lr @ CP#14 (Debug)
452 mov pc, lr @ CP#15 (Control)
453
454do_fpe:
455 ldr r4, .LCfp
456 add r10, r10, #TI_FPSTATE @ r10 = workspace
457 ldr pc, [r4] @ Call FP module USR entry point
458
459/*
460 * The FP module is called with these registers set:
461 * r0 = instruction
462 * r2 = PC+4
463 * r9 = normal "successful" return address
464 * r10 = FP workspace
465 * lr = unrecognised FP instruction return address
466 */
467
468 .data
469ENTRY(fp_enter)
470 .word fpundefinstr
471 .text
472
473fpundefinstr:
474 mov r0, sp
475 adr lr, ret_from_exception
476 b do_undefinstr
477
478 .align 5
479__pabt_usr:
480 usr_entry abt
481
1ec42c0c 482 enable_irq @ Enable interrupts
1da177e4
LT
483 mov r0, r2 @ address (pc)
484 mov r1, sp @ regs
485 bl do_PrefetchAbort @ call abort handler
486 /* fall through */
487/*
488 * This is the return code to user mode for abort handlers
489 */
490ENTRY(ret_from_exception)
491 get_thread_info tsk
492 mov why, #0
493 b ret_to_user
494
495/*
496 * Register switch for ARMv3 and ARMv4 processors
497 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
498 * previous and next are guaranteed not to be the same.
499 */
500ENTRY(__switch_to)
501 add ip, r1, #TI_CPU_SAVE
502 ldr r3, [r2, #TI_TP_VALUE]
503 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
504 ldr r6, [r2, #TI_CPU_DOMAIN]!
505#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
506 mra r4, r5, acc0
507 stmia ip, {r4, r5}
508#endif
4b0e07a5 509#if defined(CONFIG_HAS_TLS_REG)
2d2669b6 510 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
4b0e07a5 511#elif !defined(CONFIG_TLS_REG_EMUL)
1da177e4 512 mov r4, #0xffff0fff
2d2669b6
NP
513 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
514#endif
1da177e4
LT
515 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
516#ifdef CONFIG_VFP
517 @ Always disable VFP so we can lazily save/restore the old
518 @ state. This occurs in the context of the previous thread.
519 VFPFMRX r4, FPEXC
520 bic r4, r4, #FPEXC_ENABLE
521 VFPFMXR FPEXC, r4
522#endif
523#if defined(CONFIG_IWMMXT)
524 bl iwmmxt_task_switch
525#elif defined(CONFIG_CPU_XSCALE)
526 add r4, r2, #40 @ cpu_context_save->extra
527 ldmib r4, {r4, r5}
528 mar acc0, r4, r5
529#endif
530 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
531
532 __INIT
2d2669b6
NP
533
534/*
535 * User helpers.
536 *
537 * These are segment of kernel provided user code reachable from user space
538 * at a fixed address in kernel memory. This is used to provide user space
539 * with some operations which require kernel help because of unimplemented
540 * native feature and/or instructions in many ARM CPUs. The idea is for
541 * this code to be executed directly in user mode for best efficiency but
542 * which is too intimate with the kernel counter part to be left to user
543 * libraries. In fact this code might even differ from one CPU to another
544 * depending on the available instruction set and restrictions like on
545 * SMP systems. In other words, the kernel reserves the right to change
546 * this code as needed without warning. Only the entry points and their
547 * results are guaranteed to be stable.
548 *
549 * Each segment is 32-byte aligned and will be moved to the top of the high
550 * vector page. New segments (if ever needed) must be added in front of
551 * existing ones. This mechanism should be used only for things that are
552 * really small and justified, and not be abused freely.
553 *
554 * User space is expected to implement those things inline when optimizing
555 * for a processor that has the necessary native support, but only if such
556 * resulting binaries are already to be incompatible with earlier ARM
557 * processors due to the use of unsupported instructions other than what
558 * is provided here. In other words don't make binaries unable to run on
559 * earlier processors just for the sake of not using these kernel helpers
560 * if your compiled code is not going to use the new instructions for other
561 * purpose.
562 */
563
564 .align 5
565 .globl __kuser_helper_start
566__kuser_helper_start:
567
568/*
569 * Reference prototype:
570 *
571 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
572 *
573 * Input:
574 *
575 * r0 = oldval
576 * r1 = newval
577 * r2 = ptr
578 * lr = return address
579 *
580 * Output:
581 *
582 * r0 = returned value (zero or non-zero)
583 * C flag = set if r0 == 0, clear if r0 != 0
584 *
585 * Clobbered:
586 *
587 * r3, ip, flags
588 *
589 * Definition and user space usage example:
590 *
591 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
592 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
593 *
594 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
595 * Return zero if *ptr was changed or non-zero if no exchange happened.
596 * The C flag is also set if *ptr was changed to allow for assembly
597 * optimization in the calling code.
598 *
599 * For example, a user space atomic_add implementation could look like this:
600 *
601 * #define atomic_add(ptr, val) \
602 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
603 * register unsigned int __result asm("r1"); \
604 * asm volatile ( \
605 * "1: @ atomic_add\n\t" \
606 * "ldr r0, [r2]\n\t" \
607 * "mov r3, #0xffff0fff\n\t" \
608 * "add lr, pc, #4\n\t" \
609 * "add r1, r0, %2\n\t" \
610 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
611 * "bcc 1b" \
612 * : "=&r" (__result) \
613 * : "r" (__ptr), "rIL" (val) \
614 * : "r0","r3","ip","lr","cc","memory" ); \
615 * __result; })
616 */
617
618__kuser_cmpxchg: @ 0xffff0fc0
619
620#if __LINUX_ARM_ARCH__ < 6
621
622#ifdef CONFIG_SMP /* sanity check */
623#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?"
624#endif
625
626 /*
627 * Theory of operation:
628 *
629 * We set the Z flag before loading oldval. If ever an exception
630 * occurs we can not be sure the loaded value will still be the same
631 * when the exception returns, therefore the user exception handler
632 * will clear the Z flag whenever the interrupted user code was
633 * actually from the kernel address space (see the usr_entry macro).
634 *
635 * The post-increment on the str is used to prevent a race with an
636 * exception happening just after the str instruction which would
637 * clear the Z flag although the exchange was done.
638 */
639 teq ip, ip @ set Z flag
640 ldr ip, [r2] @ load current val
641 add r3, r2, #1 @ prepare store ptr
642 teqeq ip, r0 @ compare with oldval if still allowed
643 streq r1, [r3, #-1]! @ store newval if still allowed
644 subs r0, r2, r3 @ if r2 == r3 the str occured
645 mov pc, lr
646
647#else
648
649 ldrex r3, [r2]
650 subs r3, r3, r0
651 strexeq r3, r1, [r2]
652 rsbs r0, r3, #0
653 mov pc, lr
654
655#endif
656
657 .align 5
658
659/*
660 * Reference prototype:
661 *
662 * int __kernel_get_tls(void)
663 *
664 * Input:
665 *
666 * lr = return address
667 *
668 * Output:
669 *
670 * r0 = TLS value
671 *
672 * Clobbered:
673 *
674 * the Z flag might be lost
675 *
676 * Definition and user space usage example:
677 *
678 * typedef int (__kernel_get_tls_t)(void);
679 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
680 *
681 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
682 *
683 * This could be used as follows:
684 *
685 * #define __kernel_get_tls() \
686 * ({ register unsigned int __val asm("r0"); \
687 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
688 * : "=r" (__val) : : "lr","cc" ); \
689 * __val; })
690 */
691
692__kuser_get_tls: @ 0xffff0fe0
693
4b0e07a5 694#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
2d2669b6
NP
695
696 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
697 mov pc, lr
698
699#else
700
701 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
702 mov pc, lr
703
704#endif
705
706 .rep 5
707 .word 0 @ pad up to __kuser_helper_version
708 .endr
709
710/*
711 * Reference declaration:
712 *
713 * extern unsigned int __kernel_helper_version;
714 *
715 * Definition and user space usage example:
716 *
717 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
718 *
719 * User space may read this to determine the curent number of helpers
720 * available.
721 */
722
723__kuser_helper_version: @ 0xffff0ffc
724 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
725
726 .globl __kuser_helper_end
727__kuser_helper_end:
728
729
1da177e4
LT
730/*
731 * Vector stubs.
732 *
7933523d
RK
733 * This code is copied to 0xffff0200 so we can use branches in the
734 * vectors, rather than ldr's. Note that this code must not
735 * exceed 0x300 bytes.
1da177e4
LT
736 *
737 * Common stub entry macro:
738 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
739 */
740 .macro vector_stub, name, sym, correction=0
741 .align 5
742
743vector_\name:
744 ldr r13, .LCs\sym
745 .if \correction
746 sub lr, lr, #\correction
747 .endif
748 str lr, [r13] @ save lr_IRQ
749 mrs lr, spsr
750 str lr, [r13, #4] @ save spsr_IRQ
751 @
752 @ now branch to the relevant MODE handling routine
753 @
754 mrs r13, cpsr
755 bic r13, r13, #MODE_MASK
acaca3c9 756 orr r13, r13, #SVC_MODE
1da177e4
LT
757 msr spsr_cxsf, r13 @ switch to SVC_32 mode
758
759 and lr, lr, #15
760 ldr lr, [pc, lr, lsl #2]
761 movs pc, lr @ Changes mode and branches
762 .endm
763
7933523d 764 .globl __stubs_start
1da177e4
LT
765__stubs_start:
766/*
767 * Interrupt dispatcher
768 */
769 vector_stub irq, irq, 4
770
771 .long __irq_usr @ 0 (USR_26 / USR_32)
772 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
773 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
774 .long __irq_svc @ 3 (SVC_26 / SVC_32)
775 .long __irq_invalid @ 4
776 .long __irq_invalid @ 5
777 .long __irq_invalid @ 6
778 .long __irq_invalid @ 7
779 .long __irq_invalid @ 8
780 .long __irq_invalid @ 9
781 .long __irq_invalid @ a
782 .long __irq_invalid @ b
783 .long __irq_invalid @ c
784 .long __irq_invalid @ d
785 .long __irq_invalid @ e
786 .long __irq_invalid @ f
787
788/*
789 * Data abort dispatcher
790 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
791 */
792 vector_stub dabt, abt, 8
793
794 .long __dabt_usr @ 0 (USR_26 / USR_32)
795 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
796 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
797 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
798 .long __dabt_invalid @ 4
799 .long __dabt_invalid @ 5
800 .long __dabt_invalid @ 6
801 .long __dabt_invalid @ 7
802 .long __dabt_invalid @ 8
803 .long __dabt_invalid @ 9
804 .long __dabt_invalid @ a
805 .long __dabt_invalid @ b
806 .long __dabt_invalid @ c
807 .long __dabt_invalid @ d
808 .long __dabt_invalid @ e
809 .long __dabt_invalid @ f
810
811/*
812 * Prefetch abort dispatcher
813 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
814 */
815 vector_stub pabt, abt, 4
816
817 .long __pabt_usr @ 0 (USR_26 / USR_32)
818 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
819 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
820 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
821 .long __pabt_invalid @ 4
822 .long __pabt_invalid @ 5
823 .long __pabt_invalid @ 6
824 .long __pabt_invalid @ 7
825 .long __pabt_invalid @ 8
826 .long __pabt_invalid @ 9
827 .long __pabt_invalid @ a
828 .long __pabt_invalid @ b
829 .long __pabt_invalid @ c
830 .long __pabt_invalid @ d
831 .long __pabt_invalid @ e
832 .long __pabt_invalid @ f
833
834/*
835 * Undef instr entry dispatcher
836 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
837 */
838 vector_stub und, und
839
840 .long __und_usr @ 0 (USR_26 / USR_32)
841 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
842 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
843 .long __und_svc @ 3 (SVC_26 / SVC_32)
844 .long __und_invalid @ 4
845 .long __und_invalid @ 5
846 .long __und_invalid @ 6
847 .long __und_invalid @ 7
848 .long __und_invalid @ 8
849 .long __und_invalid @ 9
850 .long __und_invalid @ a
851 .long __und_invalid @ b
852 .long __und_invalid @ c
853 .long __und_invalid @ d
854 .long __und_invalid @ e
855 .long __und_invalid @ f
856
857 .align 5
858
859/*=============================================================================
860 * Undefined FIQs
861 *-----------------------------------------------------------------------------
862 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
863 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
864 * Basically to switch modes, we *HAVE* to clobber one register... brain
865 * damage alert! I don't think that we can execute any code in here in any
866 * other mode than FIQ... Ok you can switch to another mode, but you can't
867 * get out of that mode without clobbering one register.
868 */
869vector_fiq:
870 disable_fiq
871 subs pc, lr, #4
872
873/*=============================================================================
874 * Address exception handler
875 *-----------------------------------------------------------------------------
876 * These aren't too critical.
877 * (they're not supposed to happen, and won't happen in 32-bit data mode).
878 */
879
880vector_addrexcptn:
881 b vector_addrexcptn
882
883/*
884 * We group all the following data together to optimise
885 * for CPUs with separate I & D caches.
886 */
887 .align 5
888
889.LCvswi:
890 .word vector_swi
891
892.LCsirq:
893 .word __temp_irq
894.LCsund:
895 .word __temp_und
896.LCsabt:
897 .word __temp_abt
898
7933523d 899 .globl __stubs_end
1da177e4
LT
900__stubs_end:
901
7933523d 902 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 903
7933523d
RK
904 .globl __vectors_start
905__vectors_start:
1da177e4 906 swi SYS_ERROR0
7933523d
RK
907 b vector_und + stubs_offset
908 ldr pc, .LCvswi + stubs_offset
909 b vector_pabt + stubs_offset
910 b vector_dabt + stubs_offset
911 b vector_addrexcptn + stubs_offset
912 b vector_irq + stubs_offset
913 b vector_fiq + stubs_offset
914
915 .globl __vectors_end
916__vectors_end:
1da177e4
LT
917
918 .data
919
920/*
921 * Do not reorder these, and do not insert extra data between...
922 */
923
924__temp_irq:
925 .word 0 @ saved lr_irq
926 .word 0 @ saved spsr_irq
927 .word -1 @ old_r0
928__temp_und:
929 .word 0 @ Saved lr_und
930 .word 0 @ Saved spsr_und
931 .word -1 @ old_r0
932__temp_abt:
933 .word 0 @ Saved lr_abt
934 .word 0 @ Saved spsr_abt
935 .word -1 @ old_r0
936
937 .globl cr_alignment
938 .globl cr_no_alignment
939cr_alignment:
940 .space 4
941cr_no_alignment:
942 .space 4