Merge master.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
1 /*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
15 * it to save wrong values... Be aware!
16 */
17 #include <linux/config.h>
18
19 #include <asm/memory.h>
20 #include <asm/glue.h>
21 #include <asm/vfpmacros.h>
22 #include <asm/arch/entry-macro.S>
23
24 #include "entry-header.S"
25
26 /*
27 * Interrupt handling. Preserves r7, r8, r9
28 */
29 .macro irq_handler
30 1: get_irqnr_and_base r0, r6, r5, lr
31 movne r1, sp
32 @
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
34 @
35 adrne lr, 1b
36 bne asm_do_IRQ
37
38 #ifdef CONFIG_SMP
39 /*
40 * XXX
41 *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
44 */
45 test_for_ipi r0, r6, r5, lr
46 movne r0, sp
47 adrne lr, 1b
48 bne do_IPI
49
50 #ifdef CONFIG_LOCAL_TIMERS
51 test_for_ltirq r0, r6, r5, lr
52 movne r0, sp
53 adrne lr, 1b
54 bne do_local_timer
55 #endif
56 #endif
57
58 .endm
59
60 /*
61 * Invalid mode handlers
62 */
63 .macro inv_entry, reason
64 sub sp, sp, #S_FRAME_SIZE
65 stmib sp, {r1 - lr}
66 mov r1, #\reason
67 .endm
68
69 __pabt_invalid:
70 inv_entry BAD_PREFETCH
71 b common_invalid
72
73 __dabt_invalid:
74 inv_entry BAD_DATA
75 b common_invalid
76
77 __irq_invalid:
78 inv_entry BAD_IRQ
79 b common_invalid
80
81 __und_invalid:
82 inv_entry BAD_UNDEFINSTR
83
84 @
85 @ XXX fall through to common_invalid
86 @
87
88 @
89 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
90 @
91 common_invalid:
92 zero_fp
93
94 ldmia r0, {r4 - r6}
95 add r0, sp, #S_PC @ here for interlock avoidance
96 mov r7, #-1 @ "" "" "" ""
97 str r4, [sp] @ save preserved r0
98 stmia r0, {r5 - r7} @ lr_<exception>,
99 @ cpsr_<exception>, "old_r0"
100
101 mov r0, sp
102 and r2, r6, #0x1f
103 b bad_mode
104
105 /*
106 * SVC mode handlers
107 */
108
109 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
110 #define SPFIX(code...) code
111 #else
112 #define SPFIX(code...)
113 #endif
114
115 .macro svc_entry
116 sub sp, sp, #S_FRAME_SIZE
117 SPFIX( tst sp, #4 )
118 SPFIX( bicne sp, sp, #4 )
119 stmib sp, {r1 - r12}
120
121 ldmia r0, {r1 - r3}
122 add r5, sp, #S_SP @ here for interlock avoidance
123 mov r4, #-1 @ "" "" "" ""
124 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
125 SPFIX( addne r0, r0, #4 )
126 str r1, [sp] @ save the "real" r0 copied
127 @ from the exception stack
128
129 mov r1, lr
130
131 @
132 @ We are now ready to fill in the remaining blanks on the stack:
133 @
134 @ r0 - sp_svc
135 @ r1 - lr_svc
136 @ r2 - lr_<exception>, already fixed up for correct return/restart
137 @ r3 - spsr_<exception>
138 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
139 @
140 stmia r5, {r0 - r4}
141 .endm
142
143 .align 5
144 __dabt_svc:
145 svc_entry
146
147 @
148 @ get ready to re-enable interrupts if appropriate
149 @
150 mrs r9, cpsr
151 tst r3, #PSR_I_BIT
152 biceq r9, r9, #PSR_I_BIT
153
154 @
155 @ Call the processor-specific abort handler:
156 @
157 @ r2 - aborted context pc
158 @ r3 - aborted context cpsr
159 @
160 @ The abort handler must return the aborted address in r0, and
161 @ the fault status register in r1. r9 must be preserved.
162 @
163 #ifdef MULTI_ABORT
164 ldr r4, .LCprocfns
165 mov lr, pc
166 ldr pc, [r4]
167 #else
168 bl CPU_ABORT_HANDLER
169 #endif
170
171 @
172 @ set desired IRQ state, then call main handler
173 @
174 msr cpsr_c, r9
175 mov r2, sp
176 bl do_DataAbort
177
178 @
179 @ IRQs off again before pulling preserved data off the stack
180 @
181 disable_irq
182
183 @
184 @ restore SPSR and restart the instruction
185 @
186 ldr r0, [sp, #S_PSR]
187 msr spsr_cxsf, r0
188 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
189
190 .align 5
191 __irq_svc:
192 svc_entry
193
194 #ifdef CONFIG_PREEMPT
195 get_thread_info tsk
196 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
197 add r7, r8, #1 @ increment it
198 str r7, [tsk, #TI_PREEMPT]
199 #endif
200
201 irq_handler
202 #ifdef CONFIG_PREEMPT
203 ldr r0, [tsk, #TI_FLAGS] @ get flags
204 tst r0, #_TIF_NEED_RESCHED
205 blne svc_preempt
206 preempt_return:
207 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
208 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
209 teq r0, r7
210 strne r0, [r0, -r0] @ bug()
211 #endif
212 ldr r0, [sp, #S_PSR] @ irqs are already disabled
213 msr spsr_cxsf, r0
214 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
215
216 .ltorg
217
218 #ifdef CONFIG_PREEMPT
219 svc_preempt:
220 teq r8, #0 @ was preempt count = 0
221 ldreq r6, .LCirq_stat
222 movne pc, lr @ no
223 ldr r0, [r6, #4] @ local_irq_count
224 ldr r1, [r6, #8] @ local_bh_count
225 adds r0, r0, r1
226 movne pc, lr
227 mov r7, #0 @ preempt_schedule_irq
228 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
229 1: bl preempt_schedule_irq @ irq en/disable is done inside
230 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
231 tst r0, #_TIF_NEED_RESCHED
232 beq preempt_return @ go again
233 b 1b
234 #endif
235
236 .align 5
237 __und_svc:
238 svc_entry
239
240 @
241 @ call emulation code, which returns using r9 if it has emulated
242 @ the instruction, or the more conventional lr if we are to treat
243 @ this as a real undefined instruction
244 @
245 @ r0 - instruction
246 @
247 ldr r0, [r2, #-4]
248 adr r9, 1f
249 bl call_fpe
250
251 mov r0, sp @ struct pt_regs *regs
252 bl do_undefinstr
253
254 @
255 @ IRQs off again before pulling preserved data off the stack
256 @
257 1: disable_irq
258
259 @
260 @ restore SPSR and restart the instruction
261 @
262 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
263 msr spsr_cxsf, lr
264 ldmia sp, {r0 - pc}^ @ Restore SVC registers
265
266 .align 5
267 __pabt_svc:
268 svc_entry
269
270 @
271 @ re-enable interrupts if appropriate
272 @
273 mrs r9, cpsr
274 tst r3, #PSR_I_BIT
275 biceq r9, r9, #PSR_I_BIT
276 msr cpsr_c, r9
277
278 @
279 @ set args, then call main handler
280 @
281 @ r0 - address of faulting instruction
282 @ r1 - pointer to registers on stack
283 @
284 mov r0, r2 @ address (pc)
285 mov r1, sp @ regs
286 bl do_PrefetchAbort @ call abort handler
287
288 @
289 @ IRQs off again before pulling preserved data off the stack
290 @
291 disable_irq
292
293 @
294 @ restore SPSR and restart the instruction
295 @
296 ldr r0, [sp, #S_PSR]
297 msr spsr_cxsf, r0
298 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
299
300 .align 5
301 .LCcralign:
302 .word cr_alignment
303 #ifdef MULTI_ABORT
304 .LCprocfns:
305 .word processor
306 #endif
307 .LCfp:
308 .word fp_enter
309 #ifdef CONFIG_PREEMPT
310 .LCirq_stat:
311 .word irq_stat
312 #endif
313
314 /*
315 * User mode handlers
316 *
317 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
318 */
319
320 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
321 #error "sizeof(struct pt_regs) must be a multiple of 8"
322 #endif
323
324 .macro usr_entry
325 sub sp, sp, #S_FRAME_SIZE
326 stmib sp, {r1 - r12}
327
328 ldmia r0, {r1 - r3}
329 add r0, sp, #S_PC @ here for interlock avoidance
330 mov r4, #-1 @ "" "" "" ""
331
332 str r1, [sp] @ save the "real" r0 copied
333 @ from the exception stack
334
335 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
336 #ifndef CONFIG_MMU
337 #warning "NPTL on non MMU needs fixing"
338 #else
339 @ make sure our user space atomic helper is aborted
340 cmp r2, #TASK_SIZE
341 bichs r3, r3, #PSR_Z_BIT
342 #endif
343 #endif
344
345 @
346 @ We are now ready to fill in the remaining blanks on the stack:
347 @
348 @ r2 - lr_<exception>, already fixed up for correct return/restart
349 @ r3 - spsr_<exception>
350 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
351 @
352 @ Also, separately save sp_usr and lr_usr
353 @
354 stmia r0, {r2 - r4}
355 stmdb r0, {sp, lr}^
356
357 @
358 @ Enable the alignment trap while in kernel mode
359 @
360 alignment_trap r0
361
362 @
363 @ Clear FP to mark the first stack frame
364 @
365 zero_fp
366 .endm
367
368 .align 5
369 __dabt_usr:
370 usr_entry
371
372 @
373 @ Call the processor-specific abort handler:
374 @
375 @ r2 - aborted context pc
376 @ r3 - aborted context cpsr
377 @
378 @ The abort handler must return the aborted address in r0, and
379 @ the fault status register in r1.
380 @
381 #ifdef MULTI_ABORT
382 ldr r4, .LCprocfns
383 mov lr, pc
384 ldr pc, [r4]
385 #else
386 bl CPU_ABORT_HANDLER
387 #endif
388
389 @
390 @ IRQs on, then call the main handler
391 @
392 enable_irq
393 mov r2, sp
394 adr lr, ret_from_exception
395 b do_DataAbort
396
397 .align 5
398 __irq_usr:
399 usr_entry
400
401 get_thread_info tsk
402 #ifdef CONFIG_PREEMPT
403 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
404 add r7, r8, #1 @ increment it
405 str r7, [tsk, #TI_PREEMPT]
406 #endif
407
408 irq_handler
409 #ifdef CONFIG_PREEMPT
410 ldr r0, [tsk, #TI_PREEMPT]
411 str r8, [tsk, #TI_PREEMPT]
412 teq r0, r7
413 strne r0, [r0, -r0]
414 #endif
415
416 mov why, #0
417 b ret_to_user
418
419 .ltorg
420
421 .align 5
422 __und_usr:
423 usr_entry
424
425 tst r3, #PSR_T_BIT @ Thumb mode?
426 bne fpundefinstr @ ignore FP
427 sub r4, r2, #4
428
429 @
430 @ fall through to the emulation code, which returns using r9 if
431 @ it has emulated the instruction, or the more conventional lr
432 @ if we are to treat this as a real undefined instruction
433 @
434 @ r0 - instruction
435 @
436 1: ldrt r0, [r4]
437 adr r9, ret_from_exception
438 adr lr, fpundefinstr
439 @
440 @ fallthrough to call_fpe
441 @
442
443 /*
444 * The out of line fixup for the ldrt above.
445 */
446 .section .fixup, "ax"
447 2: mov pc, r9
448 .previous
449 .section __ex_table,"a"
450 .long 1b, 2b
451 .previous
452
453 /*
454 * Check whether the instruction is a co-processor instruction.
455 * If yes, we need to call the relevant co-processor handler.
456 *
457 * Note that we don't do a full check here for the co-processor
458 * instructions; all instructions with bit 27 set are well
459 * defined. The only instructions that should fault are the
460 * co-processor instructions. However, we have to watch out
461 * for the ARM6/ARM7 SWI bug.
462 *
463 * Emulators may wish to make use of the following registers:
464 * r0 = instruction opcode.
465 * r2 = PC+4
466 * r10 = this threads thread_info structure.
467 */
468 call_fpe:
469 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
470 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
471 and r8, r0, #0x0f000000 @ mask out op-code bits
472 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
473 #endif
474 moveq pc, lr
475 get_thread_info r10 @ get current thread
476 and r8, r0, #0x00000f00 @ mask out CP number
477 mov r7, #1
478 add r6, r10, #TI_USED_CP
479 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
480 #ifdef CONFIG_IWMMXT
481 @ Test if we need to give access to iWMMXt coprocessors
482 ldr r5, [r10, #TI_FLAGS]
483 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
484 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
485 bcs iwmmxt_task_enable
486 #endif
487 add pc, pc, r8, lsr #6
488 mov r0, r0
489
490 mov pc, lr @ CP#0
491 b do_fpe @ CP#1 (FPE)
492 b do_fpe @ CP#2 (FPE)
493 mov pc, lr @ CP#3
494 mov pc, lr @ CP#4
495 mov pc, lr @ CP#5
496 mov pc, lr @ CP#6
497 mov pc, lr @ CP#7
498 mov pc, lr @ CP#8
499 mov pc, lr @ CP#9
500 #ifdef CONFIG_VFP
501 b do_vfp @ CP#10 (VFP)
502 b do_vfp @ CP#11 (VFP)
503 #else
504 mov pc, lr @ CP#10 (VFP)
505 mov pc, lr @ CP#11 (VFP)
506 #endif
507 mov pc, lr @ CP#12
508 mov pc, lr @ CP#13
509 mov pc, lr @ CP#14 (Debug)
510 mov pc, lr @ CP#15 (Control)
511
512 do_fpe:
513 enable_irq
514 ldr r4, .LCfp
515 add r10, r10, #TI_FPSTATE @ r10 = workspace
516 ldr pc, [r4] @ Call FP module USR entry point
517
518 /*
519 * The FP module is called with these registers set:
520 * r0 = instruction
521 * r2 = PC+4
522 * r9 = normal "successful" return address
523 * r10 = FP workspace
524 * lr = unrecognised FP instruction return address
525 */
526
527 .data
528 ENTRY(fp_enter)
529 .word fpundefinstr
530 .text
531
532 fpundefinstr:
533 mov r0, sp
534 adr lr, ret_from_exception
535 b do_undefinstr
536
537 .align 5
538 __pabt_usr:
539 usr_entry
540
541 enable_irq @ Enable interrupts
542 mov r0, r2 @ address (pc)
543 mov r1, sp @ regs
544 bl do_PrefetchAbort @ call abort handler
545 /* fall through */
546 /*
547 * This is the return code to user mode for abort handlers
548 */
549 ENTRY(ret_from_exception)
550 get_thread_info tsk
551 mov why, #0
552 b ret_to_user
553
554 /*
555 * Register switch for ARMv3 and ARMv4 processors
556 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
557 * previous and next are guaranteed not to be the same.
558 */
559 ENTRY(__switch_to)
560 add ip, r1, #TI_CPU_SAVE
561 ldr r3, [r2, #TI_TP_VALUE]
562 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
563 #ifndef CONFIG_MMU
564 add r2, r2, #TI_CPU_DOMAIN
565 #else
566 ldr r6, [r2, #TI_CPU_DOMAIN]!
567 #endif
568 #if __LINUX_ARM_ARCH__ >= 6
569 #ifdef CONFIG_CPU_32v6K
570 clrex
571 #else
572 strex r5, r4, [ip] @ Clear exclusive monitor
573 #endif
574 #endif
575 #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
576 mra r4, r5, acc0
577 stmia ip, {r4, r5}
578 #endif
579 #if defined(CONFIG_HAS_TLS_REG)
580 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
581 #elif !defined(CONFIG_TLS_REG_EMUL)
582 mov r4, #0xffff0fff
583 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
584 #endif
585 #ifdef CONFIG_MMU
586 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
587 #endif
588 #ifdef CONFIG_VFP
589 @ Always disable VFP so we can lazily save/restore the old
590 @ state. This occurs in the context of the previous thread.
591 VFPFMRX r4, FPEXC
592 bic r4, r4, #FPEXC_ENABLE
593 VFPFMXR FPEXC, r4
594 #endif
595 #if defined(CONFIG_IWMMXT)
596 bl iwmmxt_task_switch
597 #elif defined(CONFIG_CPU_XSCALE)
598 add r4, r2, #40 @ cpu_context_save->extra
599 ldmib r4, {r4, r5}
600 mar acc0, r4, r5
601 #endif
602 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
603
604 __INIT
605
606 /*
607 * User helpers.
608 *
609 * These are segment of kernel provided user code reachable from user space
610 * at a fixed address in kernel memory. This is used to provide user space
611 * with some operations which require kernel help because of unimplemented
612 * native feature and/or instructions in many ARM CPUs. The idea is for
613 * this code to be executed directly in user mode for best efficiency but
614 * which is too intimate with the kernel counter part to be left to user
615 * libraries. In fact this code might even differ from one CPU to another
616 * depending on the available instruction set and restrictions like on
617 * SMP systems. In other words, the kernel reserves the right to change
618 * this code as needed without warning. Only the entry points and their
619 * results are guaranteed to be stable.
620 *
621 * Each segment is 32-byte aligned and will be moved to the top of the high
622 * vector page. New segments (if ever needed) must be added in front of
623 * existing ones. This mechanism should be used only for things that are
624 * really small and justified, and not be abused freely.
625 *
626 * User space is expected to implement those things inline when optimizing
627 * for a processor that has the necessary native support, but only if such
628 * resulting binaries are already to be incompatible with earlier ARM
629 * processors due to the use of unsupported instructions other than what
630 * is provided here. In other words don't make binaries unable to run on
631 * earlier processors just for the sake of not using these kernel helpers
632 * if your compiled code is not going to use the new instructions for other
633 * purpose.
634 */
635
636 .align 5
637 .globl __kuser_helper_start
638 __kuser_helper_start:
639
640 /*
641 * Reference prototype:
642 *
643 * void __kernel_memory_barrier(void)
644 *
645 * Input:
646 *
647 * lr = return address
648 *
649 * Output:
650 *
651 * none
652 *
653 * Clobbered:
654 *
655 * the Z flag might be lost
656 *
657 * Definition and user space usage example:
658 *
659 * typedef void (__kernel_dmb_t)(void);
660 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
661 *
662 * Apply any needed memory barrier to preserve consistency with data modified
663 * manually and __kuser_cmpxchg usage.
664 *
665 * This could be used as follows:
666 *
667 * #define __kernel_dmb() \
668 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
669 * : : : "r0", "lr","cc" )
670 */
671
672 __kuser_memory_barrier: @ 0xffff0fa0
673
674 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
675 mcr p15, 0, r0, c7, c10, 5 @ dmb
676 #endif
677 mov pc, lr
678
679 .align 5
680
681 /*
682 * Reference prototype:
683 *
684 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
685 *
686 * Input:
687 *
688 * r0 = oldval
689 * r1 = newval
690 * r2 = ptr
691 * lr = return address
692 *
693 * Output:
694 *
695 * r0 = returned value (zero or non-zero)
696 * C flag = set if r0 == 0, clear if r0 != 0
697 *
698 * Clobbered:
699 *
700 * r3, ip, flags
701 *
702 * Definition and user space usage example:
703 *
704 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
705 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
706 *
707 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
708 * Return zero if *ptr was changed or non-zero if no exchange happened.
709 * The C flag is also set if *ptr was changed to allow for assembly
710 * optimization in the calling code.
711 *
712 * Notes:
713 *
714 * - This routine already includes memory barriers as needed.
715 *
716 * - A failure might be transient, i.e. it is possible, although unlikely,
717 * that "failure" be returned even if *ptr == oldval.
718 *
719 * For example, a user space atomic_add implementation could look like this:
720 *
721 * #define atomic_add(ptr, val) \
722 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
723 * register unsigned int __result asm("r1"); \
724 * asm volatile ( \
725 * "1: @ atomic_add\n\t" \
726 * "ldr r0, [r2]\n\t" \
727 * "mov r3, #0xffff0fff\n\t" \
728 * "add lr, pc, #4\n\t" \
729 * "add r1, r0, %2\n\t" \
730 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
731 * "bcc 1b" \
732 * : "=&r" (__result) \
733 * : "r" (__ptr), "rIL" (val) \
734 * : "r0","r3","ip","lr","cc","memory" ); \
735 * __result; })
736 */
737
738 __kuser_cmpxchg: @ 0xffff0fc0
739
740 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
741
742 /*
743 * Poor you. No fast solution possible...
744 * The kernel itself must perform the operation.
745 * A special ghost syscall is used for that (see traps.c).
746 */
747 stmfd sp!, {r7, lr}
748 mov r7, #0xff00 @ 0xfff0 into r7 for EABI
749 orr r7, r7, #0xf0
750 swi #0x9ffff0
751 ldmfd sp!, {r7, pc}
752
753 #elif __LINUX_ARM_ARCH__ < 6
754
755 /*
756 * Theory of operation:
757 *
758 * We set the Z flag before loading oldval. If ever an exception
759 * occurs we can not be sure the loaded value will still be the same
760 * when the exception returns, therefore the user exception handler
761 * will clear the Z flag whenever the interrupted user code was
762 * actually from the kernel address space (see the usr_entry macro).
763 *
764 * The post-increment on the str is used to prevent a race with an
765 * exception happening just after the str instruction which would
766 * clear the Z flag although the exchange was done.
767 */
768 #ifdef CONFIG_MMU
769 teq ip, ip @ set Z flag
770 ldr ip, [r2] @ load current val
771 add r3, r2, #1 @ prepare store ptr
772 teqeq ip, r0 @ compare with oldval if still allowed
773 streq r1, [r3, #-1]! @ store newval if still allowed
774 subs r0, r2, r3 @ if r2 == r3 the str occured
775 #else
776 #warning "NPTL on non MMU needs fixing"
777 mov r0, #-1
778 adds r0, r0, #0
779 #endif
780 mov pc, lr
781
782 #else
783
784 #ifdef CONFIG_SMP
785 mcr p15, 0, r0, c7, c10, 5 @ dmb
786 #endif
787 ldrex r3, [r2]
788 subs r3, r3, r0
789 strexeq r3, r1, [r2]
790 rsbs r0, r3, #0
791 #ifdef CONFIG_SMP
792 mcr p15, 0, r0, c7, c10, 5 @ dmb
793 #endif
794 mov pc, lr
795
796 #endif
797
798 .align 5
799
800 /*
801 * Reference prototype:
802 *
803 * int __kernel_get_tls(void)
804 *
805 * Input:
806 *
807 * lr = return address
808 *
809 * Output:
810 *
811 * r0 = TLS value
812 *
813 * Clobbered:
814 *
815 * the Z flag might be lost
816 *
817 * Definition and user space usage example:
818 *
819 * typedef int (__kernel_get_tls_t)(void);
820 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
821 *
822 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
823 *
824 * This could be used as follows:
825 *
826 * #define __kernel_get_tls() \
827 * ({ register unsigned int __val asm("r0"); \
828 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
829 * : "=r" (__val) : : "lr","cc" ); \
830 * __val; })
831 */
832
833 __kuser_get_tls: @ 0xffff0fe0
834
835 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
836
837 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
838 mov pc, lr
839
840 #else
841
842 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
843 mov pc, lr
844
845 #endif
846
847 .rep 5
848 .word 0 @ pad up to __kuser_helper_version
849 .endr
850
851 /*
852 * Reference declaration:
853 *
854 * extern unsigned int __kernel_helper_version;
855 *
856 * Definition and user space usage example:
857 *
858 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
859 *
860 * User space may read this to determine the curent number of helpers
861 * available.
862 */
863
864 __kuser_helper_version: @ 0xffff0ffc
865 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
866
867 .globl __kuser_helper_end
868 __kuser_helper_end:
869
870
871 /*
872 * Vector stubs.
873 *
874 * This code is copied to 0xffff0200 so we can use branches in the
875 * vectors, rather than ldr's. Note that this code must not
876 * exceed 0x300 bytes.
877 *
878 * Common stub entry macro:
879 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
880 *
881 * SP points to a minimal amount of processor-private memory, the address
882 * of which is copied into r0 for the mode specific abort handler.
883 */
884 .macro vector_stub, name, mode, correction=0
885 .align 5
886
887 vector_\name:
888 .if \correction
889 sub lr, lr, #\correction
890 .endif
891
892 @
893 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
894 @ (parent CPSR)
895 @
896 stmia sp, {r0, lr} @ save r0, lr
897 mrs lr, spsr
898 str lr, [sp, #8] @ save spsr
899
900 @
901 @ Prepare for SVC32 mode. IRQs remain disabled.
902 @
903 mrs r0, cpsr
904 eor r0, r0, #(\mode ^ SVC_MODE)
905 msr spsr_cxsf, r0
906
907 @
908 @ the branch table must immediately follow this code
909 @
910 and lr, lr, #0x0f
911 mov r0, sp
912 ldr lr, [pc, lr, lsl #2]
913 movs pc, lr @ branch to handler in SVC mode
914 .endm
915
916 .globl __stubs_start
917 __stubs_start:
918 /*
919 * Interrupt dispatcher
920 */
921 vector_stub irq, IRQ_MODE, 4
922
923 .long __irq_usr @ 0 (USR_26 / USR_32)
924 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
925 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
926 .long __irq_svc @ 3 (SVC_26 / SVC_32)
927 .long __irq_invalid @ 4
928 .long __irq_invalid @ 5
929 .long __irq_invalid @ 6
930 .long __irq_invalid @ 7
931 .long __irq_invalid @ 8
932 .long __irq_invalid @ 9
933 .long __irq_invalid @ a
934 .long __irq_invalid @ b
935 .long __irq_invalid @ c
936 .long __irq_invalid @ d
937 .long __irq_invalid @ e
938 .long __irq_invalid @ f
939
940 /*
941 * Data abort dispatcher
942 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
943 */
944 vector_stub dabt, ABT_MODE, 8
945
946 .long __dabt_usr @ 0 (USR_26 / USR_32)
947 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
948 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
949 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
950 .long __dabt_invalid @ 4
951 .long __dabt_invalid @ 5
952 .long __dabt_invalid @ 6
953 .long __dabt_invalid @ 7
954 .long __dabt_invalid @ 8
955 .long __dabt_invalid @ 9
956 .long __dabt_invalid @ a
957 .long __dabt_invalid @ b
958 .long __dabt_invalid @ c
959 .long __dabt_invalid @ d
960 .long __dabt_invalid @ e
961 .long __dabt_invalid @ f
962
963 /*
964 * Prefetch abort dispatcher
965 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
966 */
967 vector_stub pabt, ABT_MODE, 4
968
969 .long __pabt_usr @ 0 (USR_26 / USR_32)
970 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
971 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
972 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
973 .long __pabt_invalid @ 4
974 .long __pabt_invalid @ 5
975 .long __pabt_invalid @ 6
976 .long __pabt_invalid @ 7
977 .long __pabt_invalid @ 8
978 .long __pabt_invalid @ 9
979 .long __pabt_invalid @ a
980 .long __pabt_invalid @ b
981 .long __pabt_invalid @ c
982 .long __pabt_invalid @ d
983 .long __pabt_invalid @ e
984 .long __pabt_invalid @ f
985
986 /*
987 * Undef instr entry dispatcher
988 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
989 */
990 vector_stub und, UND_MODE
991
992 .long __und_usr @ 0 (USR_26 / USR_32)
993 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
994 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
995 .long __und_svc @ 3 (SVC_26 / SVC_32)
996 .long __und_invalid @ 4
997 .long __und_invalid @ 5
998 .long __und_invalid @ 6
999 .long __und_invalid @ 7
1000 .long __und_invalid @ 8
1001 .long __und_invalid @ 9
1002 .long __und_invalid @ a
1003 .long __und_invalid @ b
1004 .long __und_invalid @ c
1005 .long __und_invalid @ d
1006 .long __und_invalid @ e
1007 .long __und_invalid @ f
1008
1009 .align 5
1010
1011 /*=============================================================================
1012 * Undefined FIQs
1013 *-----------------------------------------------------------------------------
1014 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1015 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1016 * Basically to switch modes, we *HAVE* to clobber one register... brain
1017 * damage alert! I don't think that we can execute any code in here in any
1018 * other mode than FIQ... Ok you can switch to another mode, but you can't
1019 * get out of that mode without clobbering one register.
1020 */
1021 vector_fiq:
1022 disable_fiq
1023 subs pc, lr, #4
1024
1025 /*=============================================================================
1026 * Address exception handler
1027 *-----------------------------------------------------------------------------
1028 * These aren't too critical.
1029 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1030 */
1031
1032 vector_addrexcptn:
1033 b vector_addrexcptn
1034
1035 /*
1036 * We group all the following data together to optimise
1037 * for CPUs with separate I & D caches.
1038 */
1039 .align 5
1040
1041 .LCvswi:
1042 .word vector_swi
1043
1044 .globl __stubs_end
1045 __stubs_end:
1046
1047 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1048
1049 .globl __vectors_start
1050 __vectors_start:
1051 swi SYS_ERROR0
1052 b vector_und + stubs_offset
1053 ldr pc, .LCvswi + stubs_offset
1054 b vector_pabt + stubs_offset
1055 b vector_dabt + stubs_offset
1056 b vector_addrexcptn + stubs_offset
1057 b vector_irq + stubs_offset
1058 b vector_fiq + stubs_offset
1059
1060 .globl __vectors_end
1061 __vectors_end:
1062
1063 .data
1064
1065 .globl cr_alignment
1066 .globl cr_no_alignment
1067 cr_alignment:
1068 .space 4
1069 cr_no_alignment:
1070 .space 4