ARM: entry: abort-macro: simplify do_ldrd_abort
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
afeb90ca 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
70b6f2b4
NP
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
1da177e4 16 */
1da177e4 17
f09b9979 18#include <asm/memory.h>
753790e7
RK
19#include <asm/glue-df.h>
20#include <asm/glue-pf.h>
1da177e4 21#include <asm/vfpmacros.h>
a09e64fb 22#include <mach/entry-macro.S>
d6551e88 23#include <asm/thread_notify.h>
c4c5716e 24#include <asm/unwind.h>
cc20d429 25#include <asm/unistd.h>
f159f4ed 26#include <asm/tls.h>
1da177e4
LT
27
28#include "entry-header.S"
cd544ce7 29#include <asm/entry-macro-multi.S>
1da177e4 30
187a51ad
RK
31/*
32 * Interrupt handling. Preserves r7, r8, r9
33 */
34 .macro irq_handler
52108641 35#ifdef CONFIG_MULTI_IRQ_HANDLER
36 ldr r5, =handle_arch_irq
37 mov r0, sp
38 ldr r5, [r5]
39 adr lr, BSYM(9997f)
40 teq r5, #0
41 movne pc, r5
37ee16ae 42#endif
cd544ce7 43 arch_irq_handler_default
f00ec48f 449997:
187a51ad
RK
45 .endm
46
ac8b9c1c
RK
47 .macro pabt_helper
48 mov r0, r2 @ pass address of aborted instruction.
49#ifdef MULTI_PABORT
0402bece 50 ldr ip, .LCprocfns
ac8b9c1c 51 mov lr, pc
0402bece 52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
ac8b9c1c
RK
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
59
60 @
61 @ Call the processor-specific abort handler:
62 @
63 @ r2 - aborted context pc
64 @ r3 - aborted context cpsr
65 @
66 @ The abort handler must return the aborted address in r0, and
67 @ the fault status register in r1. r9 must be preserved.
68 @
69#ifdef MULTI_DABORT
0402bece 70 ldr ip, .LCprocfns
ac8b9c1c 71 mov lr, pc
0402bece 72 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
ac8b9c1c
RK
73#else
74 bl CPU_DABORT_HANDLER
75#endif
76 .endm
77
785d3cd2
NP
78#ifdef CONFIG_KPROBES
79 .section .kprobes.text,"ax",%progbits
80#else
81 .text
82#endif
83
1da177e4
LT
84/*
85 * Invalid mode handlers
86 */
ccea7a19
RK
87 .macro inv_entry, reason
88 sub sp, sp, #S_FRAME_SIZE
b86040a5
CM
89 ARM( stmib sp, {r1 - lr} )
90 THUMB( stmia sp, {r0 - r12} )
91 THUMB( str sp, [sp, #S_SP] )
92 THUMB( str lr, [sp, #S_LR] )
1da177e4
LT
93 mov r1, #\reason
94 .endm
95
96__pabt_invalid:
ccea7a19
RK
97 inv_entry BAD_PREFETCH
98 b common_invalid
93ed3970 99ENDPROC(__pabt_invalid)
1da177e4
LT
100
101__dabt_invalid:
ccea7a19
RK
102 inv_entry BAD_DATA
103 b common_invalid
93ed3970 104ENDPROC(__dabt_invalid)
1da177e4
LT
105
106__irq_invalid:
ccea7a19
RK
107 inv_entry BAD_IRQ
108 b common_invalid
93ed3970 109ENDPROC(__irq_invalid)
1da177e4
LT
110
111__und_invalid:
ccea7a19
RK
112 inv_entry BAD_UNDEFINSTR
113
114 @
115 @ XXX fall through to common_invalid
116 @
117
118@
119@ common_invalid - generic code for failed exception (re-entrant version of handlers)
120@
121common_invalid:
122 zero_fp
123
124 ldmia r0, {r4 - r6}
125 add r0, sp, #S_PC @ here for interlock avoidance
126 mov r7, #-1 @ "" "" "" ""
127 str r4, [sp] @ save preserved r0
128 stmia r0, {r5 - r7} @ lr_<exception>,
129 @ cpsr_<exception>, "old_r0"
1da177e4 130
1da177e4 131 mov r0, sp
1da177e4 132 b bad_mode
93ed3970 133ENDPROC(__und_invalid)
1da177e4
LT
134
135/*
136 * SVC mode handlers
137 */
2dede2d8
NP
138
139#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
140#define SPFIX(code...) code
141#else
142#define SPFIX(code...)
143#endif
144
d30a0c8b 145 .macro svc_entry, stack_hole=0
c4c5716e
CM
146 UNWIND(.fnstart )
147 UNWIND(.save {r0 - pc} )
b86040a5
CM
148 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
149#ifdef CONFIG_THUMB2_KERNEL
150 SPFIX( str r0, [sp] ) @ temporarily saved
151 SPFIX( mov r0, sp )
152 SPFIX( tst r0, #4 ) @ test original stack alignment
153 SPFIX( ldr r0, [sp] ) @ restored
154#else
2dede2d8 155 SPFIX( tst sp, #4 )
b86040a5
CM
156#endif
157 SPFIX( subeq sp, sp, #4 )
158 stmia sp, {r1 - r12}
ccea7a19
RK
159
160 ldmia r0, {r1 - r3}
b86040a5 161 add r5, sp, #S_SP - 4 @ here for interlock avoidance
ccea7a19 162 mov r4, #-1 @ "" "" "" ""
b86040a5
CM
163 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
164 SPFIX( addeq r0, r0, #4 )
165 str r1, [sp, #-4]! @ save the "real" r0 copied
ccea7a19
RK
166 @ from the exception stack
167
1da177e4
LT
168 mov r1, lr
169
170 @
171 @ We are now ready to fill in the remaining blanks on the stack:
172 @
173 @ r0 - sp_svc
174 @ r1 - lr_svc
175 @ r2 - lr_<exception>, already fixed up for correct return/restart
176 @ r3 - spsr_<exception>
177 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
178 @
179 stmia r5, {r0 - r4}
180 .endm
181
182 .align 5
183__dabt_svc:
ccea7a19 184 svc_entry
1da177e4
LT
185
186 @
187 @ get ready to re-enable interrupts if appropriate
188 @
189 mrs r9, cpsr
190 tst r3, #PSR_I_BIT
191 biceq r9, r9, #PSR_I_BIT
192
ac8b9c1c 193 dabt_helper
1da177e4
LT
194
195 @
196 @ set desired IRQ state, then call main handler
197 @
7e202696 198 debug_entry r1
1da177e4
LT
199 msr cpsr_c, r9
200 mov r2, sp
201 bl do_DataAbort
202
203 @
204 @ IRQs off again before pulling preserved data off the stack
205 @
ac78884e 206 disable_irq_notrace
1da177e4
LT
207
208 @
209 @ restore SPSR and restart the instruction
210 @
b86040a5
CM
211 ldr r2, [sp, #S_PSR]
212 svc_exit r2 @ return from exception
c4c5716e 213 UNWIND(.fnend )
93ed3970 214ENDPROC(__dabt_svc)
1da177e4
LT
215
216 .align 5
217__irq_svc:
ccea7a19
RK
218 svc_entry
219
ac78884e
RK
220#ifdef CONFIG_TRACE_IRQFLAGS
221 bl trace_hardirqs_off
222#endif
1da177e4 223#ifdef CONFIG_PREEMPT
706fdd9f
RK
224 get_thread_info tsk
225 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
226 add r7, r8, #1 @ increment it
227 str r7, [tsk, #TI_PREEMPT]
1da177e4 228#endif
ccea7a19 229
187a51ad 230 irq_handler
1da177e4 231#ifdef CONFIG_PREEMPT
28fab1a2 232 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
706fdd9f 233 ldr r0, [tsk, #TI_FLAGS] @ get flags
28fab1a2
RK
234 teq r8, #0 @ if preempt count != 0
235 movne r0, #0 @ force flags to 0
1da177e4
LT
236 tst r0, #_TIF_NEED_RESCHED
237 blne svc_preempt
1da177e4 238#endif
b86040a5 239 ldr r4, [sp, #S_PSR] @ irqs are already disabled
7ad1bcb2 240#ifdef CONFIG_TRACE_IRQFLAGS
b86040a5 241 tst r4, #PSR_I_BIT
7ad1bcb2
RK
242 bleq trace_hardirqs_on
243#endif
b86040a5 244 svc_exit r4 @ return from exception
c4c5716e 245 UNWIND(.fnend )
93ed3970 246ENDPROC(__irq_svc)
1da177e4
LT
247
248 .ltorg
249
250#ifdef CONFIG_PREEMPT
251svc_preempt:
28fab1a2 252 mov r8, lr
1da177e4 2531: bl preempt_schedule_irq @ irq en/disable is done inside
706fdd9f 254 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
1da177e4 255 tst r0, #_TIF_NEED_RESCHED
28fab1a2 256 moveq pc, r8 @ go again
1da177e4
LT
257 b 1b
258#endif
259
260 .align 5
261__und_svc:
d30a0c8b
NP
262#ifdef CONFIG_KPROBES
263 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
264 @ it obviously needs free stack space which then will belong to
265 @ the saved context.
266 svc_entry 64
267#else
ccea7a19 268 svc_entry
d30a0c8b 269#endif
1da177e4
LT
270
271 @
272 @ call emulation code, which returns using r9 if it has emulated
273 @ the instruction, or the more conventional lr if we are to treat
274 @ this as a real undefined instruction
275 @
276 @ r0 - instruction
277 @
83e686ea 278#ifndef CONFIG_THUMB2_KERNEL
1da177e4 279 ldr r0, [r2, #-4]
83e686ea
CM
280#else
281 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
282 and r9, r0, #0xf800
283 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
284 ldrhhs r9, [r2] @ bottom 16 bits
285 orrhs r0, r9, r0, lsl #16
286#endif
b86040a5 287 adr r9, BSYM(1f)
1da177e4
LT
288 bl call_fpe
289
290 mov r0, sp @ struct pt_regs *regs
291 bl do_undefinstr
292
293 @
294 @ IRQs off again before pulling preserved data off the stack
295 @
ac78884e 2961: disable_irq_notrace
1da177e4
LT
297
298 @
299 @ restore SPSR and restart the instruction
300 @
b86040a5
CM
301 ldr r2, [sp, #S_PSR] @ Get SVC cpsr
302 svc_exit r2 @ return from exception
c4c5716e 303 UNWIND(.fnend )
93ed3970 304ENDPROC(__und_svc)
1da177e4
LT
305
306 .align 5
307__pabt_svc:
ccea7a19 308 svc_entry
1da177e4
LT
309
310 @
311 @ re-enable interrupts if appropriate
312 @
313 mrs r9, cpsr
314 tst r3, #PSR_I_BIT
315 biceq r9, r9, #PSR_I_BIT
1da177e4 316
ac8b9c1c 317 pabt_helper
7e202696 318 debug_entry r1
48d7927b 319 msr cpsr_c, r9 @ Maybe enable interrupts
4fb28474 320 mov r2, sp @ regs
1da177e4
LT
321 bl do_PrefetchAbort @ call abort handler
322
323 @
324 @ IRQs off again before pulling preserved data off the stack
325 @
ac78884e 326 disable_irq_notrace
1da177e4
LT
327
328 @
329 @ restore SPSR and restart the instruction
330 @
b86040a5
CM
331 ldr r2, [sp, #S_PSR]
332 svc_exit r2 @ return from exception
c4c5716e 333 UNWIND(.fnend )
93ed3970 334ENDPROC(__pabt_svc)
1da177e4
LT
335
336 .align 5
49f680ea
RK
337.LCcralign:
338 .word cr_alignment
48d7927b 339#ifdef MULTI_DABORT
1da177e4
LT
340.LCprocfns:
341 .word processor
342#endif
343.LCfp:
344 .word fp_enter
1da177e4
LT
345
346/*
347 * User mode handlers
2dede2d8
NP
348 *
349 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
1da177e4 350 */
2dede2d8
NP
351
352#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
353#error "sizeof(struct pt_regs) must be a multiple of 8"
354#endif
355
ccea7a19 356 .macro usr_entry
c4c5716e
CM
357 UNWIND(.fnstart )
358 UNWIND(.cantunwind ) @ don't unwind the user space
ccea7a19 359 sub sp, sp, #S_FRAME_SIZE
b86040a5
CM
360 ARM( stmib sp, {r1 - r12} )
361 THUMB( stmia sp, {r0 - r12} )
ccea7a19
RK
362
363 ldmia r0, {r1 - r3}
364 add r0, sp, #S_PC @ here for interlock avoidance
365 mov r4, #-1 @ "" "" "" ""
366
367 str r1, [sp] @ save the "real" r0 copied
368 @ from the exception stack
1da177e4
LT
369
370 @
371 @ We are now ready to fill in the remaining blanks on the stack:
372 @
373 @ r2 - lr_<exception>, already fixed up for correct return/restart
374 @ r3 - spsr_<exception>
375 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
376 @
377 @ Also, separately save sp_usr and lr_usr
378 @
ccea7a19 379 stmia r0, {r2 - r4}
b86040a5
CM
380 ARM( stmdb r0, {sp, lr}^ )
381 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
1da177e4
LT
382
383 @
384 @ Enable the alignment trap while in kernel mode
385 @
49f680ea 386 alignment_trap r0
1da177e4
LT
387
388 @
389 @ Clear FP to mark the first stack frame
390 @
391 zero_fp
392 .endm
393
b49c0f24
NP
394 .macro kuser_cmpxchg_check
395#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
396#ifndef CONFIG_MMU
397#warning "NPTL on non MMU needs fixing"
398#else
399 @ Make sure our user space atomic helper is restarted
400 @ if it was interrupted in a critical region. Here we
401 @ perform a quick test inline since it should be false
402 @ 99.9999% of the time. The rest is done out of line.
403 cmp r2, #TASK_SIZE
404 blhs kuser_cmpxchg_fixup
405#endif
406#endif
407 .endm
408
1da177e4
LT
409 .align 5
410__dabt_usr:
ccea7a19 411 usr_entry
b49c0f24 412 kuser_cmpxchg_check
ac8b9c1c 413 dabt_helper
1da177e4
LT
414
415 @
416 @ IRQs on, then call the main handler
417 @
7e202696 418 debug_entry r1
1ec42c0c 419 enable_irq
1da177e4 420 mov r2, sp
b86040a5 421 adr lr, BSYM(ret_from_exception)
1da177e4 422 b do_DataAbort
c4c5716e 423 UNWIND(.fnend )
93ed3970 424ENDPROC(__dabt_usr)
1da177e4
LT
425
426 .align 5
427__irq_usr:
ccea7a19 428 usr_entry
b49c0f24 429 kuser_cmpxchg_check
1da177e4 430
9fc2552a
ML
431#ifdef CONFIG_IRQSOFF_TRACER
432 bl trace_hardirqs_off
433#endif
434
706fdd9f 435 get_thread_info tsk
1da177e4 436#ifdef CONFIG_PREEMPT
706fdd9f
RK
437 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
438 add r7, r8, #1 @ increment it
439 str r7, [tsk, #TI_PREEMPT]
1da177e4 440#endif
ccea7a19 441
187a51ad 442 irq_handler
1da177e4 443#ifdef CONFIG_PREEMPT
706fdd9f
RK
444 ldr r0, [tsk, #TI_PREEMPT]
445 str r8, [tsk, #TI_PREEMPT]
1da177e4 446 teq r0, r7
b86040a5
CM
447 ARM( strne r0, [r0, -r0] )
448 THUMB( movne r0, #0 )
449 THUMB( strne r0, [r0] )
1da177e4 450#endif
ccea7a19 451
1da177e4 452 mov why, #0
9fc2552a 453 b ret_to_user_from_irq
c4c5716e 454 UNWIND(.fnend )
93ed3970 455ENDPROC(__irq_usr)
1da177e4
LT
456
457 .ltorg
458
459 .align 5
460__und_usr:
ccea7a19 461 usr_entry
1da177e4 462
1da177e4
LT
463 @
464 @ fall through to the emulation code, which returns using r9 if
465 @ it has emulated the instruction, or the more conventional lr
466 @ if we are to treat this as a real undefined instruction
467 @
468 @ r0 - instruction
469 @
b86040a5
CM
470 adr r9, BSYM(ret_from_exception)
471 adr lr, BSYM(__und_usr_unknown)
cb170a45 472 tst r3, #PSR_T_BIT @ Thumb mode?
b86040a5 473 itet eq @ explicit IT needed for the 1f label
cb170a45
PB
474 subeq r4, r2, #4 @ ARM instr at LR - 4
475 subne r4, r2, #2 @ Thumb instr at LR - 2
4761: ldreqt r0, [r4]
26584853
CM
477#ifdef CONFIG_CPU_ENDIAN_BE8
478 reveq r0, r0 @ little endian instruction
479#endif
cb170a45
PB
480 beq call_fpe
481 @ Thumb instruction
482#if __LINUX_ARM_ARCH__ >= 7
b86040a5
CM
4832:
484 ARM( ldrht r5, [r4], #2 )
485 THUMB( ldrht r5, [r4] )
486 THUMB( add r4, r4, #2 )
cb170a45
PB
487 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
488 cmp r0, #0xe800 @ 32bit instruction if xx != 0
489 blo __und_usr_unknown
4903: ldrht r0, [r4]
491 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
492 orr r0, r0, r5, lsl #16
493#else
494 b __und_usr_unknown
495#endif
c4c5716e 496 UNWIND(.fnend )
93ed3970 497ENDPROC(__und_usr)
cb170a45 498
1da177e4
LT
499 @
500 @ fallthrough to call_fpe
501 @
502
503/*
504 * The out of line fixup for the ldrt above.
505 */
4260415f 506 .pushsection .fixup, "ax"
cb170a45 5074: mov pc, r9
4260415f
RK
508 .popsection
509 .pushsection __ex_table,"a"
cb170a45
PB
510 .long 1b, 4b
511#if __LINUX_ARM_ARCH__ >= 7
512 .long 2b, 4b
513 .long 3b, 4b
514#endif
4260415f 515 .popsection
1da177e4
LT
516
517/*
518 * Check whether the instruction is a co-processor instruction.
519 * If yes, we need to call the relevant co-processor handler.
520 *
521 * Note that we don't do a full check here for the co-processor
522 * instructions; all instructions with bit 27 set are well
523 * defined. The only instructions that should fault are the
524 * co-processor instructions. However, we have to watch out
525 * for the ARM6/ARM7 SWI bug.
526 *
b5872db4
CM
527 * NEON is a special case that has to be handled here. Not all
528 * NEON instructions are co-processor instructions, so we have
529 * to make a special case of checking for them. Plus, there's
530 * five groups of them, so we have a table of mask/opcode pairs
531 * to check against, and if any match then we branch off into the
532 * NEON handler code.
533 *
1da177e4
LT
534 * Emulators may wish to make use of the following registers:
535 * r0 = instruction opcode.
536 * r2 = PC+4
db6ccbb6 537 * r9 = normal "successful" return address
1da177e4 538 * r10 = this threads thread_info structure.
db6ccbb6 539 * lr = unrecognised instruction return address
1da177e4 540 */
cb170a45
PB
541 @
542 @ Fall-through from Thumb-2 __und_usr
543 @
544#ifdef CONFIG_NEON
545 adr r6, .LCneon_thumb_opcodes
546 b 2f
547#endif
1da177e4 548call_fpe:
b5872db4 549#ifdef CONFIG_NEON
cb170a45 550 adr r6, .LCneon_arm_opcodes
b5872db4
CM
5512:
552 ldr r7, [r6], #4 @ mask value
553 cmp r7, #0 @ end mask?
554 beq 1f
555 and r8, r0, r7
556 ldr r7, [r6], #4 @ opcode bits matching in mask
557 cmp r8, r7 @ NEON instruction?
558 bne 2b
559 get_thread_info r10
560 mov r7, #1
561 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
562 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
563 b do_vfp @ let VFP handler handle this
5641:
565#endif
1da177e4 566 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
cb170a45 567 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
1da177e4
LT
568#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
569 and r8, r0, #0x0f000000 @ mask out op-code bits
570 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
571#endif
572 moveq pc, lr
573 get_thread_info r10 @ get current thread
574 and r8, r0, #0x00000f00 @ mask out CP number
b86040a5 575 THUMB( lsr r8, r8, #8 )
1da177e4
LT
576 mov r7, #1
577 add r6, r10, #TI_USED_CP
b86040a5
CM
578 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
579 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
1da177e4
LT
580#ifdef CONFIG_IWMMXT
581 @ Test if we need to give access to iWMMXt coprocessors
582 ldr r5, [r10, #TI_FLAGS]
583 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
584 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
585 bcs iwmmxt_task_enable
586#endif
b86040a5
CM
587 ARM( add pc, pc, r8, lsr #6 )
588 THUMB( lsl r8, r8, #2 )
589 THUMB( add pc, r8 )
590 nop
591
a771fe6e 592 movw_pc lr @ CP#0
b86040a5
CM
593 W(b) do_fpe @ CP#1 (FPE)
594 W(b) do_fpe @ CP#2 (FPE)
a771fe6e 595 movw_pc lr @ CP#3
c17fad11
LB
596#ifdef CONFIG_CRUNCH
597 b crunch_task_enable @ CP#4 (MaverickCrunch)
598 b crunch_task_enable @ CP#5 (MaverickCrunch)
599 b crunch_task_enable @ CP#6 (MaverickCrunch)
600#else
a771fe6e
CM
601 movw_pc lr @ CP#4
602 movw_pc lr @ CP#5
603 movw_pc lr @ CP#6
c17fad11 604#endif
a771fe6e
CM
605 movw_pc lr @ CP#7
606 movw_pc lr @ CP#8
607 movw_pc lr @ CP#9
1da177e4 608#ifdef CONFIG_VFP
b86040a5
CM
609 W(b) do_vfp @ CP#10 (VFP)
610 W(b) do_vfp @ CP#11 (VFP)
1da177e4 611#else
a771fe6e
CM
612 movw_pc lr @ CP#10 (VFP)
613 movw_pc lr @ CP#11 (VFP)
1da177e4 614#endif
a771fe6e
CM
615 movw_pc lr @ CP#12
616 movw_pc lr @ CP#13
617 movw_pc lr @ CP#14 (Debug)
618 movw_pc lr @ CP#15 (Control)
1da177e4 619
b5872db4
CM
620#ifdef CONFIG_NEON
621 .align 6
622
cb170a45 623.LCneon_arm_opcodes:
b5872db4
CM
624 .word 0xfe000000 @ mask
625 .word 0xf2000000 @ opcode
626
627 .word 0xff100000 @ mask
628 .word 0xf4000000 @ opcode
629
cb170a45
PB
630 .word 0x00000000 @ mask
631 .word 0x00000000 @ opcode
632
633.LCneon_thumb_opcodes:
634 .word 0xef000000 @ mask
635 .word 0xef000000 @ opcode
636
637 .word 0xff100000 @ mask
638 .word 0xf9000000 @ opcode
639
b5872db4
CM
640 .word 0x00000000 @ mask
641 .word 0x00000000 @ opcode
642#endif
643
1da177e4 644do_fpe:
5d25ac03 645 enable_irq
1da177e4
LT
646 ldr r4, .LCfp
647 add r10, r10, #TI_FPSTATE @ r10 = workspace
648 ldr pc, [r4] @ Call FP module USR entry point
649
650/*
651 * The FP module is called with these registers set:
652 * r0 = instruction
653 * r2 = PC+4
654 * r9 = normal "successful" return address
655 * r10 = FP workspace
656 * lr = unrecognised FP instruction return address
657 */
658
124efc27 659 .pushsection .data
1da177e4 660ENTRY(fp_enter)
db6ccbb6 661 .word no_fp
124efc27 662 .popsection
1da177e4 663
83e686ea
CM
664ENTRY(no_fp)
665 mov pc, lr
666ENDPROC(no_fp)
db6ccbb6
RK
667
668__und_usr_unknown:
ecbab71c 669 enable_irq
1da177e4 670 mov r0, sp
b86040a5 671 adr lr, BSYM(ret_from_exception)
1da177e4 672 b do_undefinstr
93ed3970 673ENDPROC(__und_usr_unknown)
1da177e4
LT
674
675 .align 5
676__pabt_usr:
ccea7a19 677 usr_entry
ac8b9c1c 678 pabt_helper
7e202696 679 debug_entry r1
1ec42c0c 680 enable_irq @ Enable interrupts
4fb28474 681 mov r2, sp @ regs
1da177e4 682 bl do_PrefetchAbort @ call abort handler
c4c5716e 683 UNWIND(.fnend )
1da177e4
LT
684 /* fall through */
685/*
686 * This is the return code to user mode for abort handlers
687 */
688ENTRY(ret_from_exception)
c4c5716e
CM
689 UNWIND(.fnstart )
690 UNWIND(.cantunwind )
1da177e4
LT
691 get_thread_info tsk
692 mov why, #0
693 b ret_to_user
c4c5716e 694 UNWIND(.fnend )
93ed3970
CM
695ENDPROC(__pabt_usr)
696ENDPROC(ret_from_exception)
1da177e4
LT
697
698/*
699 * Register switch for ARMv3 and ARMv4 processors
700 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
701 * previous and next are guaranteed not to be the same.
702 */
703ENTRY(__switch_to)
c4c5716e
CM
704 UNWIND(.fnstart )
705 UNWIND(.cantunwind )
1da177e4
LT
706 add ip, r1, #TI_CPU_SAVE
707 ldr r3, [r2, #TI_TP_VALUE]
b86040a5
CM
708 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
709 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
710 THUMB( str sp, [ip], #4 )
711 THUMB( str lr, [ip], #4 )
247055aa 712#ifdef CONFIG_CPU_USE_DOMAINS
d6551e88 713 ldr r6, [r2, #TI_CPU_DOMAIN]
afeb90ca 714#endif
f159f4ed 715 set_tls r3, r4, r5
df0698be
NP
716#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
717 ldr r7, [r2, #TI_TASK]
718 ldr r8, =__stack_chk_guard
719 ldr r7, [r7, #TSK_STACK_CANARY]
720#endif
247055aa 721#ifdef CONFIG_CPU_USE_DOMAINS
1da177e4 722 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
1da177e4 723#endif
d6551e88
RK
724 mov r5, r0
725 add r4, r2, #TI_CPU_SAVE
726 ldr r0, =thread_notify_head
727 mov r1, #THREAD_NOTIFY_SWITCH
728 bl atomic_notifier_call_chain
df0698be
NP
729#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
730 str r7, [r8]
731#endif
b86040a5 732 THUMB( mov ip, r4 )
d6551e88 733 mov r0, r5
b86040a5
CM
734 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
735 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
736 THUMB( ldr sp, [ip], #4 )
737 THUMB( ldr pc, [ip] )
c4c5716e 738 UNWIND(.fnend )
93ed3970 739ENDPROC(__switch_to)
1da177e4
LT
740
741 __INIT
2d2669b6
NP
742
743/*
744 * User helpers.
745 *
746 * These are segment of kernel provided user code reachable from user space
747 * at a fixed address in kernel memory. This is used to provide user space
748 * with some operations which require kernel help because of unimplemented
749 * native feature and/or instructions in many ARM CPUs. The idea is for
750 * this code to be executed directly in user mode for best efficiency but
751 * which is too intimate with the kernel counter part to be left to user
752 * libraries. In fact this code might even differ from one CPU to another
753 * depending on the available instruction set and restrictions like on
754 * SMP systems. In other words, the kernel reserves the right to change
755 * this code as needed without warning. Only the entry points and their
756 * results are guaranteed to be stable.
757 *
758 * Each segment is 32-byte aligned and will be moved to the top of the high
759 * vector page. New segments (if ever needed) must be added in front of
760 * existing ones. This mechanism should be used only for things that are
761 * really small and justified, and not be abused freely.
762 *
763 * User space is expected to implement those things inline when optimizing
764 * for a processor that has the necessary native support, but only if such
765 * resulting binaries are already to be incompatible with earlier ARM
766 * processors due to the use of unsupported instructions other than what
767 * is provided here. In other words don't make binaries unable to run on
768 * earlier processors just for the sake of not using these kernel helpers
769 * if your compiled code is not going to use the new instructions for other
770 * purpose.
771 */
b86040a5 772 THUMB( .arm )
2d2669b6 773
ba9b5d76
NP
774 .macro usr_ret, reg
775#ifdef CONFIG_ARM_THUMB
776 bx \reg
777#else
778 mov pc, \reg
779#endif
780 .endm
781
2d2669b6
NP
782 .align 5
783 .globl __kuser_helper_start
784__kuser_helper_start:
785
7c612bfd
NP
786/*
787 * Reference prototype:
788 *
789 * void __kernel_memory_barrier(void)
790 *
791 * Input:
792 *
793 * lr = return address
794 *
795 * Output:
796 *
797 * none
798 *
799 * Clobbered:
800 *
b49c0f24 801 * none
7c612bfd
NP
802 *
803 * Definition and user space usage example:
804 *
805 * typedef void (__kernel_dmb_t)(void);
806 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
807 *
808 * Apply any needed memory barrier to preserve consistency with data modified
809 * manually and __kuser_cmpxchg usage.
810 *
811 * This could be used as follows:
812 *
813 * #define __kernel_dmb() \
814 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
6896eec0 815 * : : : "r0", "lr","cc" )
7c612bfd
NP
816 */
817
818__kuser_memory_barrier: @ 0xffff0fa0
ed3768a8 819 smp_dmb arm
ba9b5d76 820 usr_ret lr
7c612bfd
NP
821
822 .align 5
823
2d2669b6
NP
824/*
825 * Reference prototype:
826 *
827 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
828 *
829 * Input:
830 *
831 * r0 = oldval
832 * r1 = newval
833 * r2 = ptr
834 * lr = return address
835 *
836 * Output:
837 *
838 * r0 = returned value (zero or non-zero)
839 * C flag = set if r0 == 0, clear if r0 != 0
840 *
841 * Clobbered:
842 *
843 * r3, ip, flags
844 *
845 * Definition and user space usage example:
846 *
847 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
848 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
849 *
850 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
851 * Return zero if *ptr was changed or non-zero if no exchange happened.
852 * The C flag is also set if *ptr was changed to allow for assembly
853 * optimization in the calling code.
854 *
5964eae8
NP
855 * Notes:
856 *
857 * - This routine already includes memory barriers as needed.
858 *
2d2669b6
NP
859 * For example, a user space atomic_add implementation could look like this:
860 *
861 * #define atomic_add(ptr, val) \
862 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
863 * register unsigned int __result asm("r1"); \
864 * asm volatile ( \
865 * "1: @ atomic_add\n\t" \
866 * "ldr r0, [r2]\n\t" \
867 * "mov r3, #0xffff0fff\n\t" \
868 * "add lr, pc, #4\n\t" \
869 * "add r1, r0, %2\n\t" \
870 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
871 * "bcc 1b" \
872 * : "=&r" (__result) \
873 * : "r" (__ptr), "rIL" (val) \
874 * : "r0","r3","ip","lr","cc","memory" ); \
875 * __result; })
876 */
877
878__kuser_cmpxchg: @ 0xffff0fc0
879
dcef1f63 880#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2d2669b6 881
dcef1f63
NP
882 /*
883 * Poor you. No fast solution possible...
884 * The kernel itself must perform the operation.
885 * A special ghost syscall is used for that (see traps.c).
886 */
5e097445 887 stmfd sp!, {r7, lr}
55afd264 888 ldr r7, 1f @ it's 20 bits
cc20d429 889 swi __ARM_NR_cmpxchg
5e097445 890 ldmfd sp!, {r7, pc}
cc20d429 8911: .word __ARM_NR_cmpxchg
dcef1f63
NP
892
893#elif __LINUX_ARM_ARCH__ < 6
2d2669b6 894
b49c0f24
NP
895#ifdef CONFIG_MMU
896
2d2669b6 897 /*
b49c0f24
NP
898 * The only thing that can break atomicity in this cmpxchg
899 * implementation is either an IRQ or a data abort exception
900 * causing another process/thread to be scheduled in the middle
901 * of the critical sequence. To prevent this, code is added to
902 * the IRQ and data abort exception handlers to set the pc back
903 * to the beginning of the critical section if it is found to be
904 * within that critical section (see kuser_cmpxchg_fixup).
2d2669b6 905 */
b49c0f24
NP
9061: ldr r3, [r2] @ load current val
907 subs r3, r3, r0 @ compare with oldval
9082: streq r1, [r2] @ store newval if eq
909 rsbs r0, r3, #0 @ set return val and C flag
910 usr_ret lr
911
912 .text
913kuser_cmpxchg_fixup:
914 @ Called from kuser_cmpxchg_check macro.
915 @ r2 = address of interrupted insn (must be preserved).
916 @ sp = saved regs. r7 and r8 are clobbered.
917 @ 1b = first critical insn, 2b = last critical insn.
918 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
919 mov r7, #0xffff0fff
920 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
921 subs r8, r2, r7
922 rsbcss r8, r8, #(2b - 1b)
923 strcs r7, [sp, #S_PC]
924 mov pc, lr
925 .previous
926
49bca4c2
NP
927#else
928#warning "NPTL on non MMU needs fixing"
929 mov r0, #-1
930 adds r0, r0, #0
ba9b5d76 931 usr_ret lr
b49c0f24 932#endif
2d2669b6
NP
933
934#else
935
ed3768a8 936 smp_dmb arm
b49c0f24 9371: ldrex r3, [r2]
2d2669b6
NP
938 subs r3, r3, r0
939 strexeq r3, r1, [r2]
b49c0f24
NP
940 teqeq r3, #1
941 beq 1b
2d2669b6 942 rsbs r0, r3, #0
b49c0f24 943 /* beware -- each __kuser slot must be 8 instructions max */
f00ec48f
RK
944 ALT_SMP(b __kuser_memory_barrier)
945 ALT_UP(usr_ret lr)
2d2669b6
NP
946
947#endif
948
949 .align 5
950
951/*
952 * Reference prototype:
953 *
954 * int __kernel_get_tls(void)
955 *
956 * Input:
957 *
958 * lr = return address
959 *
960 * Output:
961 *
962 * r0 = TLS value
963 *
964 * Clobbered:
965 *
b49c0f24 966 * none
2d2669b6
NP
967 *
968 * Definition and user space usage example:
969 *
970 * typedef int (__kernel_get_tls_t)(void);
971 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
972 *
973 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
974 *
975 * This could be used as follows:
976 *
977 * #define __kernel_get_tls() \
978 * ({ register unsigned int __val asm("r0"); \
979 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
980 * : "=r" (__val) : : "lr","cc" ); \
981 * __val; })
982 */
983
984__kuser_get_tls: @ 0xffff0fe0
f159f4ed 985 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
ba9b5d76 986 usr_ret lr
f159f4ed
TL
987 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
988 .rep 4
989 .word 0 @ 0xffff0ff0 software TLS value, then
990 .endr @ pad up to __kuser_helper_version
2d2669b6
NP
991
992/*
993 * Reference declaration:
994 *
995 * extern unsigned int __kernel_helper_version;
996 *
997 * Definition and user space usage example:
998 *
999 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
1000 *
1001 * User space may read this to determine the curent number of helpers
1002 * available.
1003 */
1004
1005__kuser_helper_version: @ 0xffff0ffc
1006 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1007
1008 .globl __kuser_helper_end
1009__kuser_helper_end:
1010
b86040a5 1011 THUMB( .thumb )
2d2669b6 1012
1da177e4
LT
1013/*
1014 * Vector stubs.
1015 *
7933523d
RK
1016 * This code is copied to 0xffff0200 so we can use branches in the
1017 * vectors, rather than ldr's. Note that this code must not
1018 * exceed 0x300 bytes.
1da177e4
LT
1019 *
1020 * Common stub entry macro:
1021 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
ccea7a19
RK
1022 *
1023 * SP points to a minimal amount of processor-private memory, the address
1024 * of which is copied into r0 for the mode specific abort handler.
1da177e4 1025 */
b7ec4795 1026 .macro vector_stub, name, mode, correction=0
1da177e4
LT
1027 .align 5
1028
1029vector_\name:
1da177e4
LT
1030 .if \correction
1031 sub lr, lr, #\correction
1032 .endif
ccea7a19
RK
1033
1034 @
1035 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1036 @ (parent CPSR)
1037 @
1038 stmia sp, {r0, lr} @ save r0, lr
1da177e4 1039 mrs lr, spsr
ccea7a19
RK
1040 str lr, [sp, #8] @ save spsr
1041
1da177e4 1042 @
ccea7a19 1043 @ Prepare for SVC32 mode. IRQs remain disabled.
1da177e4 1044 @
ccea7a19 1045 mrs r0, cpsr
b86040a5 1046 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
ccea7a19 1047 msr spsr_cxsf, r0
1da177e4 1048
ccea7a19
RK
1049 @
1050 @ the branch table must immediately follow this code
1051 @
ccea7a19 1052 and lr, lr, #0x0f
b86040a5
CM
1053 THUMB( adr r0, 1f )
1054 THUMB( ldr lr, [r0, lr, lsl #2] )
b7ec4795 1055 mov r0, sp
b86040a5 1056 ARM( ldr lr, [pc, lr, lsl #2] )
ccea7a19 1057 movs pc, lr @ branch to handler in SVC mode
93ed3970 1058ENDPROC(vector_\name)
88987ef9
CM
1059
1060 .align 2
1061 @ handler addresses follow this label
10621:
1da177e4
LT
1063 .endm
1064
7933523d 1065 .globl __stubs_start
1da177e4
LT
1066__stubs_start:
1067/*
1068 * Interrupt dispatcher
1069 */
b7ec4795 1070 vector_stub irq, IRQ_MODE, 4
1da177e4
LT
1071
1072 .long __irq_usr @ 0 (USR_26 / USR_32)
1073 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1074 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1075 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1076 .long __irq_invalid @ 4
1077 .long __irq_invalid @ 5
1078 .long __irq_invalid @ 6
1079 .long __irq_invalid @ 7
1080 .long __irq_invalid @ 8
1081 .long __irq_invalid @ 9
1082 .long __irq_invalid @ a
1083 .long __irq_invalid @ b
1084 .long __irq_invalid @ c
1085 .long __irq_invalid @ d
1086 .long __irq_invalid @ e
1087 .long __irq_invalid @ f
1088
1089/*
1090 * Data abort dispatcher
1091 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1092 */
b7ec4795 1093 vector_stub dabt, ABT_MODE, 8
1da177e4
LT
1094
1095 .long __dabt_usr @ 0 (USR_26 / USR_32)
1096 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1097 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1098 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1099 .long __dabt_invalid @ 4
1100 .long __dabt_invalid @ 5
1101 .long __dabt_invalid @ 6
1102 .long __dabt_invalid @ 7
1103 .long __dabt_invalid @ 8
1104 .long __dabt_invalid @ 9
1105 .long __dabt_invalid @ a
1106 .long __dabt_invalid @ b
1107 .long __dabt_invalid @ c
1108 .long __dabt_invalid @ d
1109 .long __dabt_invalid @ e
1110 .long __dabt_invalid @ f
1111
1112/*
1113 * Prefetch abort dispatcher
1114 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1115 */
b7ec4795 1116 vector_stub pabt, ABT_MODE, 4
1da177e4
LT
1117
1118 .long __pabt_usr @ 0 (USR_26 / USR_32)
1119 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1120 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1121 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1122 .long __pabt_invalid @ 4
1123 .long __pabt_invalid @ 5
1124 .long __pabt_invalid @ 6
1125 .long __pabt_invalid @ 7
1126 .long __pabt_invalid @ 8
1127 .long __pabt_invalid @ 9
1128 .long __pabt_invalid @ a
1129 .long __pabt_invalid @ b
1130 .long __pabt_invalid @ c
1131 .long __pabt_invalid @ d
1132 .long __pabt_invalid @ e
1133 .long __pabt_invalid @ f
1134
1135/*
1136 * Undef instr entry dispatcher
1137 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1138 */
b7ec4795 1139 vector_stub und, UND_MODE
1da177e4
LT
1140
1141 .long __und_usr @ 0 (USR_26 / USR_32)
1142 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1143 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1144 .long __und_svc @ 3 (SVC_26 / SVC_32)
1145 .long __und_invalid @ 4
1146 .long __und_invalid @ 5
1147 .long __und_invalid @ 6
1148 .long __und_invalid @ 7
1149 .long __und_invalid @ 8
1150 .long __und_invalid @ 9
1151 .long __und_invalid @ a
1152 .long __und_invalid @ b
1153 .long __und_invalid @ c
1154 .long __und_invalid @ d
1155 .long __und_invalid @ e
1156 .long __und_invalid @ f
1157
1158 .align 5
1159
1160/*=============================================================================
1161 * Undefined FIQs
1162 *-----------------------------------------------------------------------------
1163 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1164 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1165 * Basically to switch modes, we *HAVE* to clobber one register... brain
1166 * damage alert! I don't think that we can execute any code in here in any
1167 * other mode than FIQ... Ok you can switch to another mode, but you can't
1168 * get out of that mode without clobbering one register.
1169 */
1170vector_fiq:
1171 disable_fiq
1172 subs pc, lr, #4
1173
1174/*=============================================================================
1175 * Address exception handler
1176 *-----------------------------------------------------------------------------
1177 * These aren't too critical.
1178 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1179 */
1180
1181vector_addrexcptn:
1182 b vector_addrexcptn
1183
1184/*
1185 * We group all the following data together to optimise
1186 * for CPUs with separate I & D caches.
1187 */
1188 .align 5
1189
1190.LCvswi:
1191 .word vector_swi
1192
7933523d 1193 .globl __stubs_end
1da177e4
LT
1194__stubs_end:
1195
7933523d 1196 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 1197
7933523d
RK
1198 .globl __vectors_start
1199__vectors_start:
b86040a5
CM
1200 ARM( swi SYS_ERROR0 )
1201 THUMB( svc #0 )
1202 THUMB( nop )
1203 W(b) vector_und + stubs_offset
1204 W(ldr) pc, .LCvswi + stubs_offset
1205 W(b) vector_pabt + stubs_offset
1206 W(b) vector_dabt + stubs_offset
1207 W(b) vector_addrexcptn + stubs_offset
1208 W(b) vector_irq + stubs_offset
1209 W(b) vector_fiq + stubs_offset
7933523d
RK
1210
1211 .globl __vectors_end
1212__vectors_end:
1da177e4
LT
1213
1214 .data
1215
1da177e4
LT
1216 .globl cr_alignment
1217 .globl cr_no_alignment
1218cr_alignment:
1219 .space 4
1220cr_no_alignment:
1221 .space 4
52108641 1222
1223#ifdef CONFIG_MULTI_IRQ_HANDLER
1224 .globl handle_arch_irq
1225handle_arch_irq:
1226 .space 4
1227#endif