2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
18 #include <asm/assembler.h>
19 #include <asm/memory.h>
20 #include <asm/glue-df.h>
21 #include <asm/glue-pf.h>
22 #include <asm/vfpmacros.h>
23 #ifndef CONFIG_MULTI_IRQ_HANDLER
24 #include <mach/entry-macro.S>
26 #include <asm/thread_notify.h>
27 #include <asm/unwind.h>
28 #include <asm/unistd.h>
30 #include <asm/system_info.h>
32 #include "entry-header.S"
33 #include <asm/entry-macro-multi.S>
39 #ifdef CONFIG_MULTI_IRQ_HANDLER
40 ldr r1, =handle_arch_irq
45 arch_irq_handler_default
51 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
55 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
64 @ Call the processor-specific abort handler:
67 @ r4 - aborted context pc
68 @ r5 - aborted context psr
70 @ The abort handler must return the aborted address in r0, and
71 @ the fault status register in r1. r9 must be preserved.
76 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
83 .section .kprobes.text,"ax",%progbits
89 * Invalid mode handlers
91 .macro inv_entry, reason
92 sub sp, sp, #S_FRAME_SIZE
93 ARM( stmib sp, {r1 - lr} )
94 THUMB( stmia sp, {r0 - r12} )
95 THUMB( str sp, [sp, #S_SP] )
96 THUMB( str lr, [sp, #S_LR] )
101 inv_entry BAD_PREFETCH
103 ENDPROC(__pabt_invalid)
108 ENDPROC(__dabt_invalid)
113 ENDPROC(__irq_invalid)
116 inv_entry BAD_UNDEFINSTR
119 @ XXX fall through to common_invalid
123 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
129 add r0, sp, #S_PC @ here for interlock avoidance
130 mov r7, #-1 @ "" "" "" ""
131 str r4, [sp] @ save preserved r0
132 stmia r0, {r5 - r7} @ lr_<exception>,
133 @ cpsr_<exception>, "old_r0"
137 ENDPROC(__und_invalid)
143 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144 #define SPFIX(code...) code
146 #define SPFIX(code...)
149 .macro svc_entry, stack_hole=0
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153 #ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored
161 SPFIX( subeq sp, sp, #4 )
165 add r7, sp, #S_SP - 4 @ here for interlock avoidance
166 mov r6, #-1 @ "" "" "" ""
167 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
168 SPFIX( addeq r2, r2, #4 )
169 str r3, [sp, #-4]! @ save the "real" r0 copied
170 @ from the exception stack
175 @ We are now ready to fill in the remaining blanks on the stack:
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
185 #ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off
195 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
196 svc_exit r5 @ return from exception
205 #ifdef CONFIG_PREEMPT
207 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
208 ldr r0, [tsk, #TI_FLAGS] @ get flags
209 teq r8, #0 @ if preempt count != 0
210 movne r0, #0 @ force flags to 0
211 tst r0, #_TIF_NEED_RESCHED
215 svc_exit r5, irq = 1 @ return from exception
221 #ifdef CONFIG_PREEMPT
224 1: bl preempt_schedule_irq @ irq en/disable is done inside
225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
226 tst r0, #_TIF_NEED_RESCHED
227 moveq pc, r8 @ go again
232 @ Correct the PC such that it is pointing at the instruction
233 @ which caused the fault. If the faulting instruction was ARM
234 @ the PC will be pointing at the next instruction, and have to
235 @ subtract 4. Otherwise, it is Thumb, and the PC will be
236 @ pointing at the second half of the Thumb instruction. We
237 @ have to subtract 2.
246 #ifdef CONFIG_KPROBES
247 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
248 @ it obviously needs free stack space which then will belong to
255 @ call emulation code, which returns using r9 if it has emulated
256 @ the instruction, or the more conventional lr if we are to treat
257 @ this as a real undefined instruction
261 #ifndef CONFIG_THUMB2_KERNEL
265 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
266 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
268 ldrh r9, [r4] @ bottom 16 bits
271 orr r0, r9, r0, lsl #16
273 adr r9, BSYM(__und_svc_finish)
277 mov r1, #4 @ PC correction to apply
279 mov r0, sp @ struct pt_regs *regs
283 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
284 svc_exit r5 @ return from exception
293 svc_exit r5 @ return from exception
310 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
313 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
314 #error "sizeof(struct pt_regs) must be a multiple of 8"
319 UNWIND(.cantunwind ) @ don't unwind the user space
320 sub sp, sp, #S_FRAME_SIZE
321 ARM( stmib sp, {r1 - r12} )
322 THUMB( stmia sp, {r0 - r12} )
325 add r0, sp, #S_PC @ here for interlock avoidance
326 mov r6, #-1 @ "" "" "" ""
328 str r3, [sp] @ save the "real" r0 copied
329 @ from the exception stack
332 @ We are now ready to fill in the remaining blanks on the stack:
334 @ r4 - lr_<exception>, already fixed up for correct return/restart
335 @ r5 - spsr_<exception>
336 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
338 @ Also, separately save sp_usr and lr_usr
341 ARM( stmdb r0, {sp, lr}^ )
342 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
345 @ Enable the alignment trap while in kernel mode
350 @ Clear FP to mark the first stack frame
354 #ifdef CONFIG_IRQSOFF_TRACER
355 bl trace_hardirqs_off
357 ct_user_exit save = 0
360 .macro kuser_cmpxchg_check
361 #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
363 #warning "NPTL on non MMU needs fixing"
365 @ Make sure our user space atomic helper is restarted
366 @ if it was interrupted in a critical region. Here we
367 @ perform a quick test inline since it should be false
368 @ 99.9999% of the time. The rest is done out of line.
370 blhs kuser_cmpxchg64_fixup
392 b ret_to_user_from_irq
405 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
406 @ faulting instruction depending on Thumb mode.
407 @ r3 = regs->ARM_cpsr
409 @ The emulation code returns using r9 if it has emulated the
410 @ instruction, or the more conventional lr if we are to treat
411 @ this as a real undefined instruction
413 adr r9, BSYM(ret_from_exception)
415 tst r3, #PSR_T_BIT @ Thumb mode?
417 sub r4, r2, #4 @ ARM instr at LR - 4
419 #ifdef CONFIG_CPU_ENDIAN_BE8
420 rev r0, r0 @ little endian instruction
422 @ r0 = 32-bit ARM instruction which caused the exception
423 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
424 @ r4 = PC value for the faulting instruction
425 @ lr = 32-bit undefined instruction function
426 adr lr, BSYM(__und_usr_fault_32)
431 sub r4, r2, #2 @ First half of thumb instr at LR - 2
432 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
434 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
435 * can never be supported in a single kernel, this code is not applicable at
436 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
437 * made about .arch directives.
439 #if __LINUX_ARM_ARCH__ < 7
440 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
441 #define NEED_CPU_ARCHITECTURE
442 ldr r5, .LCcpu_architecture
444 cmp r5, #CPU_ARCH_ARMv7
445 blo __und_usr_fault_16 @ 16bit undefined instruction
447 * The following code won't get run unless the running CPU really is v7, so
448 * coding round the lack of ldrht on older arches is pointless. Temporarily
449 * override the assembler target arch with the minimum required instead:
454 cmp r5, #0xe800 @ 32bit instruction if xx != 0
455 blo __und_usr_fault_16 @ 16bit undefined instruction
457 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
458 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
459 orr r0, r0, r5, lsl #16
460 adr lr, BSYM(__und_usr_fault_32)
461 @ r0 = the two 16-bit Thumb instructions which caused the exception
462 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
463 @ r4 = PC value for the first 16-bit Thumb instruction
464 @ lr = 32bit undefined instruction function
466 #if __LINUX_ARM_ARCH__ < 7
467 /* If the target arch was overridden, change it back: */
468 #ifdef CONFIG_CPU_32v6K
473 #endif /* __LINUX_ARM_ARCH__ < 7 */
474 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
481 * The out of line fixup for the ldrt instructions above.
483 .pushsection .fixup, "ax"
487 .pushsection __ex_table,"a"
489 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
496 * Check whether the instruction is a co-processor instruction.
497 * If yes, we need to call the relevant co-processor handler.
499 * Note that we don't do a full check here for the co-processor
500 * instructions; all instructions with bit 27 set are well
501 * defined. The only instructions that should fault are the
502 * co-processor instructions. However, we have to watch out
503 * for the ARM6/ARM7 SWI bug.
505 * NEON is a special case that has to be handled here. Not all
506 * NEON instructions are co-processor instructions, so we have
507 * to make a special case of checking for them. Plus, there's
508 * five groups of them, so we have a table of mask/opcode pairs
509 * to check against, and if any match then we branch off into the
512 * Emulators may wish to make use of the following registers:
513 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
514 * r2 = PC value to resume execution after successful emulation
515 * r9 = normal "successful" return address
516 * r10 = this threads thread_info structure
517 * lr = unrecognised instruction return address
518 * IRQs disabled, FIQs enabled.
521 @ Fall-through from Thumb-2 __und_usr
524 get_thread_info r10 @ get current thread
525 adr r6, .LCneon_thumb_opcodes
529 get_thread_info r10 @ get current thread
531 adr r6, .LCneon_arm_opcodes
532 2: ldr r5, [r6], #4 @ mask value
533 ldr r7, [r6], #4 @ opcode bits matching in mask
534 cmp r5, #0 @ end mask?
537 cmp r8, r7 @ NEON instruction?
540 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
541 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
542 b do_vfp @ let VFP handler handle this
545 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
546 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
548 and r8, r0, #0x00000f00 @ mask out CP number
549 THUMB( lsr r8, r8, #8 )
551 add r6, r10, #TI_USED_CP
552 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
553 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
555 @ Test if we need to give access to iWMMXt coprocessors
556 ldr r5, [r10, #TI_FLAGS]
557 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
558 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
559 bcs iwmmxt_task_enable
561 ARM( add pc, pc, r8, lsr #6 )
562 THUMB( lsl r8, r8, #2 )
567 W(b) do_fpe @ CP#1 (FPE)
568 W(b) do_fpe @ CP#2 (FPE)
571 b crunch_task_enable @ CP#4 (MaverickCrunch)
572 b crunch_task_enable @ CP#5 (MaverickCrunch)
573 b crunch_task_enable @ CP#6 (MaverickCrunch)
583 W(b) do_vfp @ CP#10 (VFP)
584 W(b) do_vfp @ CP#11 (VFP)
586 movw_pc lr @ CP#10 (VFP)
587 movw_pc lr @ CP#11 (VFP)
591 movw_pc lr @ CP#14 (Debug)
592 movw_pc lr @ CP#15 (Control)
594 #ifdef NEED_CPU_ARCHITECTURE
597 .word __cpu_architecture
604 .word 0xfe000000 @ mask
605 .word 0xf2000000 @ opcode
607 .word 0xff100000 @ mask
608 .word 0xf4000000 @ opcode
610 .word 0x00000000 @ mask
611 .word 0x00000000 @ opcode
613 .LCneon_thumb_opcodes:
614 .word 0xef000000 @ mask
615 .word 0xef000000 @ opcode
617 .word 0xff100000 @ mask
618 .word 0xf9000000 @ opcode
620 .word 0x00000000 @ mask
621 .word 0x00000000 @ opcode
627 add r10, r10, #TI_FPSTATE @ r10 = workspace
628 ldr pc, [r4] @ Call FP module USR entry point
631 * The FP module is called with these registers set:
634 * r9 = normal "successful" return address
636 * lr = unrecognised FP instruction return address
655 adr lr, BSYM(ret_from_exception)
657 ENDPROC(__und_usr_fault_32)
658 ENDPROC(__und_usr_fault_16)
668 * This is the return code to user mode for abort handlers
670 ENTRY(ret_from_exception)
678 ENDPROC(ret_from_exception)
681 * Register switch for ARMv3 and ARMv4 processors
682 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
683 * previous and next are guaranteed not to be the same.
688 #ifdef CONFIG_VFP_OPT
689 add ip, r1, #TI_CPU_SAVE
690 stmfa ip!, {r0, r1, r2, r5, r6, r8, lr}
692 @1. save vfp state for previous thread_info
694 add r0, r0, #TI_VFPSTATE @ r0 = workspace
696 mov r5, ip @ save ip to r5, because vfp_save_state may change ip
697 mov r6, r2 @ save r2 to r6, because vfp_save_state may change r2
702 @ 2. restore vfp state from next thread_info
703 add r2, r2, #TI_VFPSTATE @ r2 = workspace
704 VFPFLDMIA r2, r0 @ reload the working registers while
705 @ FPEXC is in a safe state
706 ldmia r2, {r1, r5, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2
707 VFPFMXR FPSCR, r5 @ restore status
708 ldmfa ip!, {r0, r1, r2, r5, r6, r8, lr}
710 add ip, r1, #TI_CPU_SAVE
711 ldr r3, [r2, #TI_TP_VALUE]
712 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
713 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
714 THUMB( str sp, [ip], #4 )
715 THUMB( str lr, [ip], #4 )
716 #ifdef CONFIG_CPU_USE_DOMAINS
717 ldr r6, [r2, #TI_CPU_DOMAIN]
720 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
721 ldr r7, [r2, #TI_TASK]
722 ldr r8, =__stack_chk_guard
723 ldr r7, [r7, #TSK_STACK_CANARY]
725 #ifdef CONFIG_CPU_USE_DOMAINS
726 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
729 add r4, r2, #TI_CPU_SAVE
730 ldr r0, =thread_notify_head
731 mov r1, #THREAD_NOTIFY_SWITCH
732 bl atomic_notifier_call_chain
733 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
738 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
739 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
740 THUMB( ldr sp, [ip], #4 )
741 THUMB( ldr pc, [ip] )
750 * Each segment is 32-byte aligned and will be moved to the top of the high
751 * vector page. New segments (if ever needed) must be added in front of
752 * existing ones. This mechanism should be used only for things that are
753 * really small and justified, and not be abused freely.
755 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
760 #ifdef CONFIG_ARM_THUMB
767 .macro kuser_pad, sym, size
769 .rept 4 - (. - \sym) & 3
773 .rept (\size - (. - \sym)) / 4
778 #ifdef CONFIG_KUSER_HELPERS
780 .globl __kuser_helper_start
781 __kuser_helper_start:
784 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
785 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
788 __kuser_cmpxchg64: @ 0xffff0f60
790 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
793 * Poor you. No fast solution possible...
794 * The kernel itself must perform the operation.
795 * A special ghost syscall is used for that (see traps.c).
798 ldr r7, 1f @ it's 20 bits
799 swi __ARM_NR_cmpxchg64
801 1: .word __ARM_NR_cmpxchg64
803 #elif defined(CONFIG_CPU_32v6K)
805 stmfd sp!, {r4, r5, r6, r7}
806 ldrd r4, r5, [r0] @ load old val
807 ldrd r6, r7, [r1] @ load new val
809 1: ldrexd r0, r1, [r2] @ load current val
810 eors r3, r0, r4 @ compare with oldval (1)
811 eoreqs r3, r1, r5 @ compare with oldval (2)
812 strexdeq r3, r6, r7, [r2] @ store newval if eq
813 teqeq r3, #1 @ success?
814 beq 1b @ if no then retry
816 rsbs r0, r3, #0 @ set returned val and C flag
817 ldmfd sp!, {r4, r5, r6, r7}
820 #elif !defined(CONFIG_SMP)
825 * The only thing that can break atomicity in this cmpxchg64
826 * implementation is either an IRQ or a data abort exception
827 * causing another process/thread to be scheduled in the middle of
828 * the critical sequence. The same strategy as for cmpxchg is used.
830 stmfd sp!, {r4, r5, r6, lr}
831 ldmia r0, {r4, r5} @ load old val
832 ldmia r1, {r6, lr} @ load new val
833 1: ldmia r2, {r0, r1} @ load current val
834 eors r3, r0, r4 @ compare with oldval (1)
835 eoreqs r3, r1, r5 @ compare with oldval (2)
836 2: stmeqia r2, {r6, lr} @ store newval if eq
837 rsbs r0, r3, #0 @ set return val and C flag
838 ldmfd sp!, {r4, r5, r6, pc}
841 kuser_cmpxchg64_fixup:
842 @ Called from kuser_cmpxchg_fixup.
843 @ r4 = address of interrupted insn (must be preserved).
844 @ sp = saved regs. r7 and r8 are clobbered.
845 @ 1b = first critical insn, 2b = last critical insn.
846 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
848 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
850 rsbcss r8, r8, #(2b - 1b)
851 strcs r7, [sp, #S_PC]
852 #if __LINUX_ARM_ARCH__ < 6
853 bcc kuser_cmpxchg32_fixup
859 #warning "NPTL on non MMU needs fixing"
866 #error "incoherent kernel configuration"
869 kuser_pad __kuser_cmpxchg64, 64
871 __kuser_memory_barrier: @ 0xffff0fa0
875 kuser_pad __kuser_memory_barrier, 32
877 __kuser_cmpxchg: @ 0xffff0fc0
879 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
882 * Poor you. No fast solution possible...
883 * The kernel itself must perform the operation.
884 * A special ghost syscall is used for that (see traps.c).
887 ldr r7, 1f @ it's 20 bits
890 1: .word __ARM_NR_cmpxchg
892 #elif __LINUX_ARM_ARCH__ < 6
897 * The only thing that can break atomicity in this cmpxchg
898 * implementation is either an IRQ or a data abort exception
899 * causing another process/thread to be scheduled in the middle
900 * of the critical sequence. To prevent this, code is added to
901 * the IRQ and data abort exception handlers to set the pc back
902 * to the beginning of the critical section if it is found to be
903 * within that critical section (see kuser_cmpxchg_fixup).
905 1: ldr r3, [r2] @ load current val
906 subs r3, r3, r0 @ compare with oldval
907 2: streq r1, [r2] @ store newval if eq
908 rsbs r0, r3, #0 @ set return val and C flag
912 kuser_cmpxchg32_fixup:
913 @ Called from kuser_cmpxchg_check macro.
914 @ r4 = address of interrupted insn (must be preserved).
915 @ sp = saved regs. r7 and r8 are clobbered.
916 @ 1b = first critical insn, 2b = last critical insn.
917 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
919 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
921 rsbcss r8, r8, #(2b - 1b)
922 strcs r7, [sp, #S_PC]
927 #warning "NPTL on non MMU needs fixing"
942 /* beware -- each __kuser slot must be 8 instructions max */
943 ALT_SMP(b __kuser_memory_barrier)
948 kuser_pad __kuser_cmpxchg, 32
950 __kuser_get_tls: @ 0xffff0fe0
951 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
953 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
954 kuser_pad __kuser_get_tls, 16
956 .word 0 @ 0xffff0ff0 software TLS value, then
957 .endr @ pad up to __kuser_helper_version
959 __kuser_helper_version: @ 0xffff0ffc
960 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
962 .globl __kuser_helper_end
972 * This code is copied to 0xffff1000 so we can use branches in the
973 * vectors, rather than ldr's. Note that this code must not exceed
976 * Common stub entry macro:
977 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
979 * SP points to a minimal amount of processor-private memory, the address
980 * of which is copied into r0 for the mode specific abort handler.
982 .macro vector_stub, name, mode, correction=0
987 sub lr, lr, #\correction
991 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
994 stmia sp, {r0, lr} @ save r0, lr
996 str lr, [sp, #8] @ save spsr
999 @ Prepare for SVC32 mode. IRQs remain disabled.
1002 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1006 @ the branch table must immediately follow this code
1010 THUMB( ldr lr, [r0, lr, lsl #2] )
1012 ARM( ldr lr, [pc, lr, lsl #2] )
1013 movs pc, lr @ branch to handler in SVC mode
1014 ENDPROC(vector_\name)
1017 @ handler addresses follow this label
1021 .section .stubs, "ax", %progbits
1023 @ This must be the first word
1027 ARM( swi SYS_ERROR0 )
1033 * Interrupt dispatcher
1035 vector_stub irq, IRQ_MODE, 4
1037 .long __irq_usr @ 0 (USR_26 / USR_32)
1038 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1039 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1040 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1041 .long __irq_invalid @ 4
1042 .long __irq_invalid @ 5
1043 .long __irq_invalid @ 6
1044 .long __irq_invalid @ 7
1045 .long __irq_invalid @ 8
1046 .long __irq_invalid @ 9
1047 .long __irq_invalid @ a
1048 .long __irq_invalid @ b
1049 .long __irq_invalid @ c
1050 .long __irq_invalid @ d
1051 .long __irq_invalid @ e
1052 .long __irq_invalid @ f
1055 * Data abort dispatcher
1056 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1058 vector_stub dabt, ABT_MODE, 8
1060 .long __dabt_usr @ 0 (USR_26 / USR_32)
1061 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1062 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1063 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1064 .long __dabt_invalid @ 4
1065 .long __dabt_invalid @ 5
1066 .long __dabt_invalid @ 6
1067 .long __dabt_invalid @ 7
1068 .long __dabt_invalid @ 8
1069 .long __dabt_invalid @ 9
1070 .long __dabt_invalid @ a
1071 .long __dabt_invalid @ b
1072 .long __dabt_invalid @ c
1073 .long __dabt_invalid @ d
1074 .long __dabt_invalid @ e
1075 .long __dabt_invalid @ f
1078 * Prefetch abort dispatcher
1079 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1081 vector_stub pabt, ABT_MODE, 4
1083 .long __pabt_usr @ 0 (USR_26 / USR_32)
1084 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1085 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1086 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1087 .long __pabt_invalid @ 4
1088 .long __pabt_invalid @ 5
1089 .long __pabt_invalid @ 6
1090 .long __pabt_invalid @ 7
1091 .long __pabt_invalid @ 8
1092 .long __pabt_invalid @ 9
1093 .long __pabt_invalid @ a
1094 .long __pabt_invalid @ b
1095 .long __pabt_invalid @ c
1096 .long __pabt_invalid @ d
1097 .long __pabt_invalid @ e
1098 .long __pabt_invalid @ f
1101 * Undef instr entry dispatcher
1102 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1104 vector_stub und, UND_MODE
1106 .long __und_usr @ 0 (USR_26 / USR_32)
1107 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1108 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1109 .long __und_svc @ 3 (SVC_26 / SVC_32)
1110 .long __und_invalid @ 4
1111 .long __und_invalid @ 5
1112 .long __und_invalid @ 6
1113 .long __und_invalid @ 7
1114 .long __und_invalid @ 8
1115 .long __und_invalid @ 9
1116 .long __und_invalid @ a
1117 .long __und_invalid @ b
1118 .long __und_invalid @ c
1119 .long __und_invalid @ d
1120 .long __und_invalid @ e
1121 .long __und_invalid @ f
1125 /*=============================================================================
1126 * Address exception handler
1127 *-----------------------------------------------------------------------------
1128 * These aren't too critical.
1129 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1135 /*=============================================================================
1137 *-----------------------------------------------------------------------------
1138 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1139 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1140 * Basically to switch modes, we *HAVE* to clobber one register... brain
1141 * damage alert! I don't think that we can execute any code in here in any
1142 * other mode than FIQ... Ok you can switch to another mode, but you can't
1143 * get out of that mode without clobbering one register.
1148 .globl vector_fiq_offset
1149 .equ vector_fiq_offset, vector_fiq
1151 .section .vectors, "ax", %progbits
1155 W(ldr) pc, __vectors_start + 0x1000
1158 W(b) vector_addrexcptn
1165 .globl cr_no_alignment
1171 #ifdef CONFIG_MULTI_IRQ_HANDLER
1172 .globl handle_arch_irq