ARM: update FIQ support for relocation of vectors
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
1 /*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
16 */
17
18 #include <asm/assembler.h>
19 #include <asm/memory.h>
20 #include <asm/glue-df.h>
21 #include <asm/glue-pf.h>
22 #include <asm/vfpmacros.h>
23 #ifndef CONFIG_MULTI_IRQ_HANDLER
24 #include <mach/entry-macro.S>
25 #endif
26 #include <asm/thread_notify.h>
27 #include <asm/unwind.h>
28 #include <asm/unistd.h>
29 #include <asm/tls.h>
30 #include <asm/system_info.h>
31
32 #include "entry-header.S"
33 #include <asm/entry-macro-multi.S>
34
35 /*
36 * Interrupt handling.
37 */
38 .macro irq_handler
39 #ifdef CONFIG_MULTI_IRQ_HANDLER
40 ldr r1, =handle_arch_irq
41 mov r0, sp
42 adr lr, BSYM(9997f)
43 ldr pc, [r1]
44 #else
45 arch_irq_handler_default
46 #endif
47 9997:
48 .endm
49
50 .macro pabt_helper
51 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
52 #ifdef MULTI_PABORT
53 ldr ip, .LCprocfns
54 mov lr, pc
55 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
56 #else
57 bl CPU_PABORT_HANDLER
58 #endif
59 .endm
60
61 .macro dabt_helper
62
63 @
64 @ Call the processor-specific abort handler:
65 @
66 @ r2 - pt_regs
67 @ r4 - aborted context pc
68 @ r5 - aborted context psr
69 @
70 @ The abort handler must return the aborted address in r0, and
71 @ the fault status register in r1. r9 must be preserved.
72 @
73 #ifdef MULTI_DABORT
74 ldr ip, .LCprocfns
75 mov lr, pc
76 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
77 #else
78 bl CPU_DABORT_HANDLER
79 #endif
80 .endm
81
82 #ifdef CONFIG_KPROBES
83 .section .kprobes.text,"ax",%progbits
84 #else
85 .text
86 #endif
87
88 /*
89 * Invalid mode handlers
90 */
91 .macro inv_entry, reason
92 sub sp, sp, #S_FRAME_SIZE
93 ARM( stmib sp, {r1 - lr} )
94 THUMB( stmia sp, {r0 - r12} )
95 THUMB( str sp, [sp, #S_SP] )
96 THUMB( str lr, [sp, #S_LR] )
97 mov r1, #\reason
98 .endm
99
100 __pabt_invalid:
101 inv_entry BAD_PREFETCH
102 b common_invalid
103 ENDPROC(__pabt_invalid)
104
105 __dabt_invalid:
106 inv_entry BAD_DATA
107 b common_invalid
108 ENDPROC(__dabt_invalid)
109
110 __irq_invalid:
111 inv_entry BAD_IRQ
112 b common_invalid
113 ENDPROC(__irq_invalid)
114
115 __und_invalid:
116 inv_entry BAD_UNDEFINSTR
117
118 @
119 @ XXX fall through to common_invalid
120 @
121
122 @
123 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
124 @
125 common_invalid:
126 zero_fp
127
128 ldmia r0, {r4 - r6}
129 add r0, sp, #S_PC @ here for interlock avoidance
130 mov r7, #-1 @ "" "" "" ""
131 str r4, [sp] @ save preserved r0
132 stmia r0, {r5 - r7} @ lr_<exception>,
133 @ cpsr_<exception>, "old_r0"
134
135 mov r0, sp
136 b bad_mode
137 ENDPROC(__und_invalid)
138
139 /*
140 * SVC mode handlers
141 */
142
143 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
144 #define SPFIX(code...) code
145 #else
146 #define SPFIX(code...)
147 #endif
148
149 .macro svc_entry, stack_hole=0
150 UNWIND(.fnstart )
151 UNWIND(.save {r0 - pc} )
152 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
153 #ifdef CONFIG_THUMB2_KERNEL
154 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( mov r0, sp )
156 SPFIX( tst r0, #4 ) @ test original stack alignment
157 SPFIX( ldr r0, [sp] ) @ restored
158 #else
159 SPFIX( tst sp, #4 )
160 #endif
161 SPFIX( subeq sp, sp, #4 )
162 stmia sp, {r1 - r12}
163
164 ldmia r0, {r3 - r5}
165 add r7, sp, #S_SP - 4 @ here for interlock avoidance
166 mov r6, #-1 @ "" "" "" ""
167 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
168 SPFIX( addeq r2, r2, #4 )
169 str r3, [sp, #-4]! @ save the "real" r0 copied
170 @ from the exception stack
171
172 mov r3, lr
173
174 @
175 @ We are now ready to fill in the remaining blanks on the stack:
176 @
177 @ r2 - sp_svc
178 @ r3 - lr_svc
179 @ r4 - lr_<exception>, already fixed up for correct return/restart
180 @ r5 - spsr_<exception>
181 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stmia r7, {r2 - r6}
184
185 #ifdef CONFIG_TRACE_IRQFLAGS
186 bl trace_hardirqs_off
187 #endif
188 .endm
189
190 .align 5
191 __dabt_svc:
192 svc_entry
193 mov r2, sp
194 dabt_helper
195 svc_exit r5 @ return from exception
196 UNWIND(.fnend )
197 ENDPROC(__dabt_svc)
198
199 .align 5
200 __irq_svc:
201 svc_entry
202 irq_handler
203
204 #ifdef CONFIG_PREEMPT
205 get_thread_info tsk
206 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
207 ldr r0, [tsk, #TI_FLAGS] @ get flags
208 teq r8, #0 @ if preempt count != 0
209 movne r0, #0 @ force flags to 0
210 tst r0, #_TIF_NEED_RESCHED
211 blne svc_preempt
212 #endif
213
214 svc_exit r5, irq = 1 @ return from exception
215 UNWIND(.fnend )
216 ENDPROC(__irq_svc)
217
218 .ltorg
219
220 #ifdef CONFIG_PREEMPT
221 svc_preempt:
222 mov r8, lr
223 1: bl preempt_schedule_irq @ irq en/disable is done inside
224 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
225 tst r0, #_TIF_NEED_RESCHED
226 moveq pc, r8 @ go again
227 b 1b
228 #endif
229
230 __und_fault:
231 @ Correct the PC such that it is pointing at the instruction
232 @ which caused the fault. If the faulting instruction was ARM
233 @ the PC will be pointing at the next instruction, and have to
234 @ subtract 4. Otherwise, it is Thumb, and the PC will be
235 @ pointing at the second half of the Thumb instruction. We
236 @ have to subtract 2.
237 ldr r2, [r0, #S_PC]
238 sub r2, r2, r1
239 str r2, [r0, #S_PC]
240 b do_undefinstr
241 ENDPROC(__und_fault)
242
243 .align 5
244 __und_svc:
245 #ifdef CONFIG_KPROBES
246 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
247 @ it obviously needs free stack space which then will belong to
248 @ the saved context.
249 svc_entry 64
250 #else
251 svc_entry
252 #endif
253 @
254 @ call emulation code, which returns using r9 if it has emulated
255 @ the instruction, or the more conventional lr if we are to treat
256 @ this as a real undefined instruction
257 @
258 @ r0 - instruction
259 @
260 #ifndef CONFIG_THUMB2_KERNEL
261 ldr r0, [r4, #-4]
262 #else
263 mov r1, #2
264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
265 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
266 blo __und_svc_fault
267 ldrh r9, [r4] @ bottom 16 bits
268 add r4, r4, #2
269 str r4, [sp, #S_PC]
270 orr r0, r9, r0, lsl #16
271 #endif
272 adr r9, BSYM(__und_svc_finish)
273 mov r2, r4
274 bl call_fpe
275
276 mov r1, #4 @ PC correction to apply
277 __und_svc_fault:
278 mov r0, sp @ struct pt_regs *regs
279 bl __und_fault
280
281 __und_svc_finish:
282 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
283 svc_exit r5 @ return from exception
284 UNWIND(.fnend )
285 ENDPROC(__und_svc)
286
287 .align 5
288 __pabt_svc:
289 svc_entry
290 mov r2, sp @ regs
291 pabt_helper
292 svc_exit r5 @ return from exception
293 UNWIND(.fnend )
294 ENDPROC(__pabt_svc)
295
296 .align 5
297 .LCcralign:
298 .word cr_alignment
299 #ifdef MULTI_DABORT
300 .LCprocfns:
301 .word processor
302 #endif
303 .LCfp:
304 .word fp_enter
305
306 /*
307 * User mode handlers
308 *
309 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
310 */
311
312 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
313 #error "sizeof(struct pt_regs) must be a multiple of 8"
314 #endif
315
316 .macro usr_entry
317 UNWIND(.fnstart )
318 UNWIND(.cantunwind ) @ don't unwind the user space
319 sub sp, sp, #S_FRAME_SIZE
320 ARM( stmib sp, {r1 - r12} )
321 THUMB( stmia sp, {r0 - r12} )
322
323 ldmia r0, {r3 - r5}
324 add r0, sp, #S_PC @ here for interlock avoidance
325 mov r6, #-1 @ "" "" "" ""
326
327 str r3, [sp] @ save the "real" r0 copied
328 @ from the exception stack
329
330 @
331 @ We are now ready to fill in the remaining blanks on the stack:
332 @
333 @ r4 - lr_<exception>, already fixed up for correct return/restart
334 @ r5 - spsr_<exception>
335 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
336 @
337 @ Also, separately save sp_usr and lr_usr
338 @
339 stmia r0, {r4 - r6}
340 ARM( stmdb r0, {sp, lr}^ )
341 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
342
343 @
344 @ Enable the alignment trap while in kernel mode
345 @
346 alignment_trap r0
347
348 @
349 @ Clear FP to mark the first stack frame
350 @
351 zero_fp
352
353 #ifdef CONFIG_IRQSOFF_TRACER
354 bl trace_hardirqs_off
355 #endif
356 ct_user_exit save = 0
357 .endm
358
359 .macro kuser_cmpxchg_check
360 #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
361 #ifndef CONFIG_MMU
362 #warning "NPTL on non MMU needs fixing"
363 #else
364 @ Make sure our user space atomic helper is restarted
365 @ if it was interrupted in a critical region. Here we
366 @ perform a quick test inline since it should be false
367 @ 99.9999% of the time. The rest is done out of line.
368 cmp r4, #TASK_SIZE
369 blhs kuser_cmpxchg64_fixup
370 #endif
371 #endif
372 .endm
373
374 .align 5
375 __dabt_usr:
376 usr_entry
377 kuser_cmpxchg_check
378 mov r2, sp
379 dabt_helper
380 b ret_from_exception
381 UNWIND(.fnend )
382 ENDPROC(__dabt_usr)
383
384 .align 5
385 __irq_usr:
386 usr_entry
387 kuser_cmpxchg_check
388 irq_handler
389 get_thread_info tsk
390 mov why, #0
391 b ret_to_user_from_irq
392 UNWIND(.fnend )
393 ENDPROC(__irq_usr)
394
395 .ltorg
396
397 .align 5
398 __und_usr:
399 usr_entry
400
401 mov r2, r4
402 mov r3, r5
403
404 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
405 @ faulting instruction depending on Thumb mode.
406 @ r3 = regs->ARM_cpsr
407 @
408 @ The emulation code returns using r9 if it has emulated the
409 @ instruction, or the more conventional lr if we are to treat
410 @ this as a real undefined instruction
411 @
412 adr r9, BSYM(ret_from_exception)
413
414 tst r3, #PSR_T_BIT @ Thumb mode?
415 bne __und_usr_thumb
416 sub r4, r2, #4 @ ARM instr at LR - 4
417 1: ldrt r0, [r4]
418 #ifdef CONFIG_CPU_ENDIAN_BE8
419 rev r0, r0 @ little endian instruction
420 #endif
421 @ r0 = 32-bit ARM instruction which caused the exception
422 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
423 @ r4 = PC value for the faulting instruction
424 @ lr = 32-bit undefined instruction function
425 adr lr, BSYM(__und_usr_fault_32)
426 b call_fpe
427
428 __und_usr_thumb:
429 @ Thumb instruction
430 sub r4, r2, #2 @ First half of thumb instr at LR - 2
431 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
432 /*
433 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
434 * can never be supported in a single kernel, this code is not applicable at
435 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
436 * made about .arch directives.
437 */
438 #if __LINUX_ARM_ARCH__ < 7
439 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
440 #define NEED_CPU_ARCHITECTURE
441 ldr r5, .LCcpu_architecture
442 ldr r5, [r5]
443 cmp r5, #CPU_ARCH_ARMv7
444 blo __und_usr_fault_16 @ 16bit undefined instruction
445 /*
446 * The following code won't get run unless the running CPU really is v7, so
447 * coding round the lack of ldrht on older arches is pointless. Temporarily
448 * override the assembler target arch with the minimum required instead:
449 */
450 .arch armv6t2
451 #endif
452 2: ldrht r5, [r4]
453 cmp r5, #0xe800 @ 32bit instruction if xx != 0
454 blo __und_usr_fault_16 @ 16bit undefined instruction
455 3: ldrht r0, [r2]
456 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
457 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
458 orr r0, r0, r5, lsl #16
459 adr lr, BSYM(__und_usr_fault_32)
460 @ r0 = the two 16-bit Thumb instructions which caused the exception
461 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
462 @ r4 = PC value for the first 16-bit Thumb instruction
463 @ lr = 32bit undefined instruction function
464
465 #if __LINUX_ARM_ARCH__ < 7
466 /* If the target arch was overridden, change it back: */
467 #ifdef CONFIG_CPU_32v6K
468 .arch armv6k
469 #else
470 .arch armv6
471 #endif
472 #endif /* __LINUX_ARM_ARCH__ < 7 */
473 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
474 b __und_usr_fault_16
475 #endif
476 UNWIND(.fnend)
477 ENDPROC(__und_usr)
478
479 /*
480 * The out of line fixup for the ldrt instructions above.
481 */
482 .pushsection .fixup, "ax"
483 .align 2
484 4: mov pc, r9
485 .popsection
486 .pushsection __ex_table,"a"
487 .long 1b, 4b
488 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
489 .long 2b, 4b
490 .long 3b, 4b
491 #endif
492 .popsection
493
494 /*
495 * Check whether the instruction is a co-processor instruction.
496 * If yes, we need to call the relevant co-processor handler.
497 *
498 * Note that we don't do a full check here for the co-processor
499 * instructions; all instructions with bit 27 set are well
500 * defined. The only instructions that should fault are the
501 * co-processor instructions. However, we have to watch out
502 * for the ARM6/ARM7 SWI bug.
503 *
504 * NEON is a special case that has to be handled here. Not all
505 * NEON instructions are co-processor instructions, so we have
506 * to make a special case of checking for them. Plus, there's
507 * five groups of them, so we have a table of mask/opcode pairs
508 * to check against, and if any match then we branch off into the
509 * NEON handler code.
510 *
511 * Emulators may wish to make use of the following registers:
512 * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
513 * r2 = PC value to resume execution after successful emulation
514 * r9 = normal "successful" return address
515 * r10 = this threads thread_info structure
516 * lr = unrecognised instruction return address
517 * IRQs disabled, FIQs enabled.
518 */
519 @
520 @ Fall-through from Thumb-2 __und_usr
521 @
522 #ifdef CONFIG_NEON
523 get_thread_info r10 @ get current thread
524 adr r6, .LCneon_thumb_opcodes
525 b 2f
526 #endif
527 call_fpe:
528 get_thread_info r10 @ get current thread
529 #ifdef CONFIG_NEON
530 adr r6, .LCneon_arm_opcodes
531 2: ldr r5, [r6], #4 @ mask value
532 ldr r7, [r6], #4 @ opcode bits matching in mask
533 cmp r5, #0 @ end mask?
534 beq 1f
535 and r8, r0, r5
536 cmp r8, r7 @ NEON instruction?
537 bne 2b
538 mov r7, #1
539 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
540 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
541 b do_vfp @ let VFP handler handle this
542 1:
543 #endif
544 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
545 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
546 moveq pc, lr
547 and r8, r0, #0x00000f00 @ mask out CP number
548 THUMB( lsr r8, r8, #8 )
549 mov r7, #1
550 add r6, r10, #TI_USED_CP
551 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
552 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
553 #ifdef CONFIG_IWMMXT
554 @ Test if we need to give access to iWMMXt coprocessors
555 ldr r5, [r10, #TI_FLAGS]
556 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
557 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
558 bcs iwmmxt_task_enable
559 #endif
560 ARM( add pc, pc, r8, lsr #6 )
561 THUMB( lsl r8, r8, #2 )
562 THUMB( add pc, r8 )
563 nop
564
565 movw_pc lr @ CP#0
566 W(b) do_fpe @ CP#1 (FPE)
567 W(b) do_fpe @ CP#2 (FPE)
568 movw_pc lr @ CP#3
569 #ifdef CONFIG_CRUNCH
570 b crunch_task_enable @ CP#4 (MaverickCrunch)
571 b crunch_task_enable @ CP#5 (MaverickCrunch)
572 b crunch_task_enable @ CP#6 (MaverickCrunch)
573 #else
574 movw_pc lr @ CP#4
575 movw_pc lr @ CP#5
576 movw_pc lr @ CP#6
577 #endif
578 movw_pc lr @ CP#7
579 movw_pc lr @ CP#8
580 movw_pc lr @ CP#9
581 #ifdef CONFIG_VFP
582 W(b) do_vfp @ CP#10 (VFP)
583 W(b) do_vfp @ CP#11 (VFP)
584 #else
585 movw_pc lr @ CP#10 (VFP)
586 movw_pc lr @ CP#11 (VFP)
587 #endif
588 movw_pc lr @ CP#12
589 movw_pc lr @ CP#13
590 movw_pc lr @ CP#14 (Debug)
591 movw_pc lr @ CP#15 (Control)
592
593 #ifdef NEED_CPU_ARCHITECTURE
594 .align 2
595 .LCcpu_architecture:
596 .word __cpu_architecture
597 #endif
598
599 #ifdef CONFIG_NEON
600 .align 6
601
602 .LCneon_arm_opcodes:
603 .word 0xfe000000 @ mask
604 .word 0xf2000000 @ opcode
605
606 .word 0xff100000 @ mask
607 .word 0xf4000000 @ opcode
608
609 .word 0x00000000 @ mask
610 .word 0x00000000 @ opcode
611
612 .LCneon_thumb_opcodes:
613 .word 0xef000000 @ mask
614 .word 0xef000000 @ opcode
615
616 .word 0xff100000 @ mask
617 .word 0xf9000000 @ opcode
618
619 .word 0x00000000 @ mask
620 .word 0x00000000 @ opcode
621 #endif
622
623 do_fpe:
624 enable_irq
625 ldr r4, .LCfp
626 add r10, r10, #TI_FPSTATE @ r10 = workspace
627 ldr pc, [r4] @ Call FP module USR entry point
628
629 /*
630 * The FP module is called with these registers set:
631 * r0 = instruction
632 * r2 = PC+4
633 * r9 = normal "successful" return address
634 * r10 = FP workspace
635 * lr = unrecognised FP instruction return address
636 */
637
638 .pushsection .data
639 ENTRY(fp_enter)
640 .word no_fp
641 .popsection
642
643 ENTRY(no_fp)
644 mov pc, lr
645 ENDPROC(no_fp)
646
647 __und_usr_fault_32:
648 mov r1, #4
649 b 1f
650 __und_usr_fault_16:
651 mov r1, #2
652 1: enable_irq
653 mov r0, sp
654 adr lr, BSYM(ret_from_exception)
655 b __und_fault
656 ENDPROC(__und_usr_fault_32)
657 ENDPROC(__und_usr_fault_16)
658
659 .align 5
660 __pabt_usr:
661 usr_entry
662 mov r2, sp @ regs
663 pabt_helper
664 UNWIND(.fnend )
665 /* fall through */
666 /*
667 * This is the return code to user mode for abort handlers
668 */
669 ENTRY(ret_from_exception)
670 UNWIND(.fnstart )
671 UNWIND(.cantunwind )
672 get_thread_info tsk
673 mov why, #0
674 b ret_to_user
675 UNWIND(.fnend )
676 ENDPROC(__pabt_usr)
677 ENDPROC(ret_from_exception)
678
679 /*
680 * Register switch for ARMv3 and ARMv4 processors
681 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
682 * previous and next are guaranteed not to be the same.
683 */
684 ENTRY(__switch_to)
685 UNWIND(.fnstart )
686 UNWIND(.cantunwind )
687 add ip, r1, #TI_CPU_SAVE
688 ldr r3, [r2, #TI_TP_VALUE]
689 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
690 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
691 THUMB( str sp, [ip], #4 )
692 THUMB( str lr, [ip], #4 )
693 #ifdef CONFIG_CPU_USE_DOMAINS
694 ldr r6, [r2, #TI_CPU_DOMAIN]
695 #endif
696 set_tls r3, r4, r5
697 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
698 ldr r7, [r2, #TI_TASK]
699 ldr r8, =__stack_chk_guard
700 ldr r7, [r7, #TSK_STACK_CANARY]
701 #endif
702 #ifdef CONFIG_CPU_USE_DOMAINS
703 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
704 #endif
705 mov r5, r0
706 add r4, r2, #TI_CPU_SAVE
707 ldr r0, =thread_notify_head
708 mov r1, #THREAD_NOTIFY_SWITCH
709 bl atomic_notifier_call_chain
710 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
711 str r7, [r8]
712 #endif
713 THUMB( mov ip, r4 )
714 mov r0, r5
715 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
716 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
717 THUMB( ldr sp, [ip], #4 )
718 THUMB( ldr pc, [ip] )
719 UNWIND(.fnend )
720 ENDPROC(__switch_to)
721
722 __INIT
723
724 /*
725 * User helpers.
726 *
727 * Each segment is 32-byte aligned and will be moved to the top of the high
728 * vector page. New segments (if ever needed) must be added in front of
729 * existing ones. This mechanism should be used only for things that are
730 * really small and justified, and not be abused freely.
731 *
732 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
733 */
734 THUMB( .arm )
735
736 .macro usr_ret, reg
737 #ifdef CONFIG_ARM_THUMB
738 bx \reg
739 #else
740 mov pc, \reg
741 #endif
742 .endm
743
744 .macro kuser_pad, sym, size
745 .if (. - \sym) & 3
746 .rept 4 - (. - \sym) & 3
747 .byte 0
748 .endr
749 .endif
750 .rept (\size - (. - \sym)) / 4
751 .word 0xe7fddef1
752 .endr
753 .endm
754
755 .align 5
756 .globl __kuser_helper_start
757 __kuser_helper_start:
758
759 /*
760 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
761 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
762 */
763
764 __kuser_cmpxchg64: @ 0xffff0f60
765
766 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
767
768 /*
769 * Poor you. No fast solution possible...
770 * The kernel itself must perform the operation.
771 * A special ghost syscall is used for that (see traps.c).
772 */
773 stmfd sp!, {r7, lr}
774 ldr r7, 1f @ it's 20 bits
775 swi __ARM_NR_cmpxchg64
776 ldmfd sp!, {r7, pc}
777 1: .word __ARM_NR_cmpxchg64
778
779 #elif defined(CONFIG_CPU_32v6K)
780
781 stmfd sp!, {r4, r5, r6, r7}
782 ldrd r4, r5, [r0] @ load old val
783 ldrd r6, r7, [r1] @ load new val
784 smp_dmb arm
785 1: ldrexd r0, r1, [r2] @ load current val
786 eors r3, r0, r4 @ compare with oldval (1)
787 eoreqs r3, r1, r5 @ compare with oldval (2)
788 strexdeq r3, r6, r7, [r2] @ store newval if eq
789 teqeq r3, #1 @ success?
790 beq 1b @ if no then retry
791 smp_dmb arm
792 rsbs r0, r3, #0 @ set returned val and C flag
793 ldmfd sp!, {r4, r5, r6, r7}
794 usr_ret lr
795
796 #elif !defined(CONFIG_SMP)
797
798 #ifdef CONFIG_MMU
799
800 /*
801 * The only thing that can break atomicity in this cmpxchg64
802 * implementation is either an IRQ or a data abort exception
803 * causing another process/thread to be scheduled in the middle of
804 * the critical sequence. The same strategy as for cmpxchg is used.
805 */
806 stmfd sp!, {r4, r5, r6, lr}
807 ldmia r0, {r4, r5} @ load old val
808 ldmia r1, {r6, lr} @ load new val
809 1: ldmia r2, {r0, r1} @ load current val
810 eors r3, r0, r4 @ compare with oldval (1)
811 eoreqs r3, r1, r5 @ compare with oldval (2)
812 2: stmeqia r2, {r6, lr} @ store newval if eq
813 rsbs r0, r3, #0 @ set return val and C flag
814 ldmfd sp!, {r4, r5, r6, pc}
815
816 .text
817 kuser_cmpxchg64_fixup:
818 @ Called from kuser_cmpxchg_fixup.
819 @ r4 = address of interrupted insn (must be preserved).
820 @ sp = saved regs. r7 and r8 are clobbered.
821 @ 1b = first critical insn, 2b = last critical insn.
822 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
823 mov r7, #0xffff0fff
824 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
825 subs r8, r4, r7
826 rsbcss r8, r8, #(2b - 1b)
827 strcs r7, [sp, #S_PC]
828 #if __LINUX_ARM_ARCH__ < 6
829 bcc kuser_cmpxchg32_fixup
830 #endif
831 mov pc, lr
832 .previous
833
834 #else
835 #warning "NPTL on non MMU needs fixing"
836 mov r0, #-1
837 adds r0, r0, #0
838 usr_ret lr
839 #endif
840
841 #else
842 #error "incoherent kernel configuration"
843 #endif
844
845 kuser_pad __kuser_cmpxchg64, 64
846
847 __kuser_memory_barrier: @ 0xffff0fa0
848 smp_dmb arm
849 usr_ret lr
850
851 kuser_pad __kuser_memory_barrier, 32
852
853 __kuser_cmpxchg: @ 0xffff0fc0
854
855 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
856
857 /*
858 * Poor you. No fast solution possible...
859 * The kernel itself must perform the operation.
860 * A special ghost syscall is used for that (see traps.c).
861 */
862 stmfd sp!, {r7, lr}
863 ldr r7, 1f @ it's 20 bits
864 swi __ARM_NR_cmpxchg
865 ldmfd sp!, {r7, pc}
866 1: .word __ARM_NR_cmpxchg
867
868 #elif __LINUX_ARM_ARCH__ < 6
869
870 #ifdef CONFIG_MMU
871
872 /*
873 * The only thing that can break atomicity in this cmpxchg
874 * implementation is either an IRQ or a data abort exception
875 * causing another process/thread to be scheduled in the middle
876 * of the critical sequence. To prevent this, code is added to
877 * the IRQ and data abort exception handlers to set the pc back
878 * to the beginning of the critical section if it is found to be
879 * within that critical section (see kuser_cmpxchg_fixup).
880 */
881 1: ldr r3, [r2] @ load current val
882 subs r3, r3, r0 @ compare with oldval
883 2: streq r1, [r2] @ store newval if eq
884 rsbs r0, r3, #0 @ set return val and C flag
885 usr_ret lr
886
887 .text
888 kuser_cmpxchg32_fixup:
889 @ Called from kuser_cmpxchg_check macro.
890 @ r4 = address of interrupted insn (must be preserved).
891 @ sp = saved regs. r7 and r8 are clobbered.
892 @ 1b = first critical insn, 2b = last critical insn.
893 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
894 mov r7, #0xffff0fff
895 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
896 subs r8, r4, r7
897 rsbcss r8, r8, #(2b - 1b)
898 strcs r7, [sp, #S_PC]
899 mov pc, lr
900 .previous
901
902 #else
903 #warning "NPTL on non MMU needs fixing"
904 mov r0, #-1
905 adds r0, r0, #0
906 usr_ret lr
907 #endif
908
909 #else
910
911 smp_dmb arm
912 1: ldrex r3, [r2]
913 subs r3, r3, r0
914 strexeq r3, r1, [r2]
915 teqeq r3, #1
916 beq 1b
917 rsbs r0, r3, #0
918 /* beware -- each __kuser slot must be 8 instructions max */
919 ALT_SMP(b __kuser_memory_barrier)
920 ALT_UP(usr_ret lr)
921
922 #endif
923
924 kuser_pad __kuser_cmpxchg, 32
925
926 __kuser_get_tls: @ 0xffff0fe0
927 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
928 usr_ret lr
929 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
930 kuser_pad __kuser_get_tls, 16
931 .rep 3
932 .word 0 @ 0xffff0ff0 software TLS value, then
933 .endr @ pad up to __kuser_helper_version
934
935 __kuser_helper_version: @ 0xffff0ffc
936 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
937
938 .globl __kuser_helper_end
939 __kuser_helper_end:
940
941 THUMB( .thumb )
942
943 /*
944 * Vector stubs.
945 *
946 * This code is copied to 0xffff1000 so we can use branches in the
947 * vectors, rather than ldr's. Note that this code must not exceed
948 * a page size.
949 *
950 * Common stub entry macro:
951 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
952 *
953 * SP points to a minimal amount of processor-private memory, the address
954 * of which is copied into r0 for the mode specific abort handler.
955 */
956 .macro vector_stub, name, mode, correction=0
957 .align 5
958
959 vector_\name:
960 .if \correction
961 sub lr, lr, #\correction
962 .endif
963
964 @
965 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
966 @ (parent CPSR)
967 @
968 stmia sp, {r0, lr} @ save r0, lr
969 mrs lr, spsr
970 str lr, [sp, #8] @ save spsr
971
972 @
973 @ Prepare for SVC32 mode. IRQs remain disabled.
974 @
975 mrs r0, cpsr
976 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
977 msr spsr_cxsf, r0
978
979 @
980 @ the branch table must immediately follow this code
981 @
982 and lr, lr, #0x0f
983 THUMB( adr r0, 1f )
984 THUMB( ldr lr, [r0, lr, lsl #2] )
985 mov r0, sp
986 ARM( ldr lr, [pc, lr, lsl #2] )
987 movs pc, lr @ branch to handler in SVC mode
988 ENDPROC(vector_\name)
989
990 .align 2
991 @ handler addresses follow this label
992 1:
993 .endm
994
995 .section .stubs, "ax", %progbits
996 __stubs_start:
997 @ This must be the first word
998 .word vector_swi
999
1000 vector_rst:
1001 ARM( swi SYS_ERROR0 )
1002 THUMB( svc #0 )
1003 THUMB( nop )
1004 b vector_und
1005
1006 /*
1007 * Interrupt dispatcher
1008 */
1009 vector_stub irq, IRQ_MODE, 4
1010
1011 .long __irq_usr @ 0 (USR_26 / USR_32)
1012 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1013 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1014 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1015 .long __irq_invalid @ 4
1016 .long __irq_invalid @ 5
1017 .long __irq_invalid @ 6
1018 .long __irq_invalid @ 7
1019 .long __irq_invalid @ 8
1020 .long __irq_invalid @ 9
1021 .long __irq_invalid @ a
1022 .long __irq_invalid @ b
1023 .long __irq_invalid @ c
1024 .long __irq_invalid @ d
1025 .long __irq_invalid @ e
1026 .long __irq_invalid @ f
1027
1028 /*
1029 * Data abort dispatcher
1030 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1031 */
1032 vector_stub dabt, ABT_MODE, 8
1033
1034 .long __dabt_usr @ 0 (USR_26 / USR_32)
1035 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1036 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1037 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1038 .long __dabt_invalid @ 4
1039 .long __dabt_invalid @ 5
1040 .long __dabt_invalid @ 6
1041 .long __dabt_invalid @ 7
1042 .long __dabt_invalid @ 8
1043 .long __dabt_invalid @ 9
1044 .long __dabt_invalid @ a
1045 .long __dabt_invalid @ b
1046 .long __dabt_invalid @ c
1047 .long __dabt_invalid @ d
1048 .long __dabt_invalid @ e
1049 .long __dabt_invalid @ f
1050
1051 /*
1052 * Prefetch abort dispatcher
1053 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1054 */
1055 vector_stub pabt, ABT_MODE, 4
1056
1057 .long __pabt_usr @ 0 (USR_26 / USR_32)
1058 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1059 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1060 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1061 .long __pabt_invalid @ 4
1062 .long __pabt_invalid @ 5
1063 .long __pabt_invalid @ 6
1064 .long __pabt_invalid @ 7
1065 .long __pabt_invalid @ 8
1066 .long __pabt_invalid @ 9
1067 .long __pabt_invalid @ a
1068 .long __pabt_invalid @ b
1069 .long __pabt_invalid @ c
1070 .long __pabt_invalid @ d
1071 .long __pabt_invalid @ e
1072 .long __pabt_invalid @ f
1073
1074 /*
1075 * Undef instr entry dispatcher
1076 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1077 */
1078 vector_stub und, UND_MODE
1079
1080 .long __und_usr @ 0 (USR_26 / USR_32)
1081 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1082 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1083 .long __und_svc @ 3 (SVC_26 / SVC_32)
1084 .long __und_invalid @ 4
1085 .long __und_invalid @ 5
1086 .long __und_invalid @ 6
1087 .long __und_invalid @ 7
1088 .long __und_invalid @ 8
1089 .long __und_invalid @ 9
1090 .long __und_invalid @ a
1091 .long __und_invalid @ b
1092 .long __und_invalid @ c
1093 .long __und_invalid @ d
1094 .long __und_invalid @ e
1095 .long __und_invalid @ f
1096
1097 .align 5
1098
1099 /*=============================================================================
1100 * Address exception handler
1101 *-----------------------------------------------------------------------------
1102 * These aren't too critical.
1103 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1104 */
1105
1106 vector_addrexcptn:
1107 b vector_addrexcptn
1108
1109 /*=============================================================================
1110 * Undefined FIQs
1111 *-----------------------------------------------------------------------------
1112 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1113 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1114 * Basically to switch modes, we *HAVE* to clobber one register... brain
1115 * damage alert! I don't think that we can execute any code in here in any
1116 * other mode than FIQ... Ok you can switch to another mode, but you can't
1117 * get out of that mode without clobbering one register.
1118 */
1119 vector_fiq:
1120 subs pc, lr, #4
1121
1122 .globl vector_fiq_offset
1123 .equ vector_fiq_offset, vector_fiq
1124
1125 .section .vectors, "ax", %progbits
1126 __vectors_start:
1127 W(b) vector_rst
1128 W(b) vector_und
1129 W(ldr) pc, __vectors_start + 0x1000
1130 W(b) vector_pabt
1131 W(b) vector_dabt
1132 W(b) vector_addrexcptn
1133 W(b) vector_irq
1134 W(b) vector_fiq
1135
1136 .data
1137
1138 .globl cr_alignment
1139 .globl cr_no_alignment
1140 cr_alignment:
1141 .space 4
1142 cr_no_alignment:
1143 .space 4
1144
1145 #ifdef CONFIG_MULTI_IRQ_HANDLER
1146 .globl handle_arch_irq
1147 handle_arch_irq:
1148 .space 4
1149 #endif