[ARM] 3757/1: Use PROCINFO_INITFUNC in head.S
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / entry-armv.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/entry-armv.S
3 *
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
afeb90ca 6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Low-level vector interface routines
13 *
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
15 * it to save wrong values... Be aware!
16 */
1da177e4 17
f09b9979 18#include <asm/memory.h>
1da177e4 19#include <asm/glue.h>
1da177e4 20#include <asm/vfpmacros.h>
bce495d8 21#include <asm/arch/entry-macro.S>
d6551e88 22#include <asm/thread_notify.h>
1da177e4
LT
23
24#include "entry-header.S"
25
187a51ad
RK
26/*
27 * Interrupt handling. Preserves r7, r8, r9
28 */
29 .macro irq_handler
301: get_irqnr_and_base r0, r6, r5, lr
31 movne r1, sp
32 @
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
34 @
35 adrne lr, 1b
36 bne asm_do_IRQ
791be9b9
RK
37
38#ifdef CONFIG_SMP
39 /*
40 * XXX
41 *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
44 */
45 test_for_ipi r0, r6, r5, lr
46 movne r0, sp
47 adrne lr, 1b
48 bne do_IPI
37ee16ae
RK
49
50#ifdef CONFIG_LOCAL_TIMERS
51 test_for_ltirq r0, r6, r5, lr
52 movne r0, sp
53 adrne lr, 1b
54 bne do_local_timer
55#endif
791be9b9
RK
56#endif
57
187a51ad
RK
58 .endm
59
1da177e4
LT
60/*
61 * Invalid mode handlers
62 */
ccea7a19
RK
63 .macro inv_entry, reason
64 sub sp, sp, #S_FRAME_SIZE
65 stmib sp, {r1 - lr}
1da177e4
LT
66 mov r1, #\reason
67 .endm
68
69__pabt_invalid:
ccea7a19
RK
70 inv_entry BAD_PREFETCH
71 b common_invalid
1da177e4
LT
72
73__dabt_invalid:
ccea7a19
RK
74 inv_entry BAD_DATA
75 b common_invalid
1da177e4
LT
76
77__irq_invalid:
ccea7a19
RK
78 inv_entry BAD_IRQ
79 b common_invalid
1da177e4
LT
80
81__und_invalid:
ccea7a19
RK
82 inv_entry BAD_UNDEFINSTR
83
84 @
85 @ XXX fall through to common_invalid
86 @
87
88@
89@ common_invalid - generic code for failed exception (re-entrant version of handlers)
90@
91common_invalid:
92 zero_fp
93
94 ldmia r0, {r4 - r6}
95 add r0, sp, #S_PC @ here for interlock avoidance
96 mov r7, #-1 @ "" "" "" ""
97 str r4, [sp] @ save preserved r0
98 stmia r0, {r5 - r7} @ lr_<exception>,
99 @ cpsr_<exception>, "old_r0"
1da177e4 100
1da177e4 101 mov r0, sp
ccea7a19 102 and r2, r6, #0x1f
1da177e4
LT
103 b bad_mode
104
105/*
106 * SVC mode handlers
107 */
2dede2d8
NP
108
109#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
110#define SPFIX(code...) code
111#else
112#define SPFIX(code...)
113#endif
114
ccea7a19 115 .macro svc_entry
1da177e4 116 sub sp, sp, #S_FRAME_SIZE
2dede2d8
NP
117 SPFIX( tst sp, #4 )
118 SPFIX( bicne sp, sp, #4 )
ccea7a19
RK
119 stmib sp, {r1 - r12}
120
121 ldmia r0, {r1 - r3}
122 add r5, sp, #S_SP @ here for interlock avoidance
123 mov r4, #-1 @ "" "" "" ""
124 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
2dede2d8 125 SPFIX( addne r0, r0, #4 )
ccea7a19
RK
126 str r1, [sp] @ save the "real" r0 copied
127 @ from the exception stack
128
1da177e4
LT
129 mov r1, lr
130
131 @
132 @ We are now ready to fill in the remaining blanks on the stack:
133 @
134 @ r0 - sp_svc
135 @ r1 - lr_svc
136 @ r2 - lr_<exception>, already fixed up for correct return/restart
137 @ r3 - spsr_<exception>
138 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
139 @
140 stmia r5, {r0 - r4}
141 .endm
142
143 .align 5
144__dabt_svc:
ccea7a19 145 svc_entry
1da177e4
LT
146
147 @
148 @ get ready to re-enable interrupts if appropriate
149 @
150 mrs r9, cpsr
151 tst r3, #PSR_I_BIT
152 biceq r9, r9, #PSR_I_BIT
153
154 @
155 @ Call the processor-specific abort handler:
156 @
157 @ r2 - aborted context pc
158 @ r3 - aborted context cpsr
159 @
160 @ The abort handler must return the aborted address in r0, and
161 @ the fault status register in r1. r9 must be preserved.
162 @
163#ifdef MULTI_ABORT
164 ldr r4, .LCprocfns
165 mov lr, pc
166 ldr pc, [r4]
167#else
168 bl CPU_ABORT_HANDLER
169#endif
170
171 @
172 @ set desired IRQ state, then call main handler
173 @
174 msr cpsr_c, r9
175 mov r2, sp
176 bl do_DataAbort
177
178 @
179 @ IRQs off again before pulling preserved data off the stack
180 @
1ec42c0c 181 disable_irq
1da177e4
LT
182
183 @
184 @ restore SPSR and restart the instruction
185 @
186 ldr r0, [sp, #S_PSR]
187 msr spsr_cxsf, r0
188 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
189
190 .align 5
191__irq_svc:
ccea7a19
RK
192 svc_entry
193
1da177e4 194#ifdef CONFIG_PREEMPT
706fdd9f
RK
195 get_thread_info tsk
196 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
197 add r7, r8, #1 @ increment it
198 str r7, [tsk, #TI_PREEMPT]
1da177e4 199#endif
ccea7a19 200
187a51ad 201 irq_handler
1da177e4 202#ifdef CONFIG_PREEMPT
706fdd9f 203 ldr r0, [tsk, #TI_FLAGS] @ get flags
1da177e4
LT
204 tst r0, #_TIF_NEED_RESCHED
205 blne svc_preempt
206preempt_return:
706fdd9f
RK
207 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
208 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
1da177e4 209 teq r0, r7
1da177e4
LT
210 strne r0, [r0, -r0] @ bug()
211#endif
212 ldr r0, [sp, #S_PSR] @ irqs are already disabled
213 msr spsr_cxsf, r0
214 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
215
216 .ltorg
217
218#ifdef CONFIG_PREEMPT
219svc_preempt:
706fdd9f 220 teq r8, #0 @ was preempt count = 0
1da177e4
LT
221 ldreq r6, .LCirq_stat
222 movne pc, lr @ no
223 ldr r0, [r6, #4] @ local_irq_count
224 ldr r1, [r6, #8] @ local_bh_count
225 adds r0, r0, r1
226 movne pc, lr
227 mov r7, #0 @ preempt_schedule_irq
706fdd9f 228 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
1da177e4 2291: bl preempt_schedule_irq @ irq en/disable is done inside
706fdd9f 230 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
1da177e4
LT
231 tst r0, #_TIF_NEED_RESCHED
232 beq preempt_return @ go again
233 b 1b
234#endif
235
236 .align 5
237__und_svc:
ccea7a19 238 svc_entry
1da177e4
LT
239
240 @
241 @ call emulation code, which returns using r9 if it has emulated
242 @ the instruction, or the more conventional lr if we are to treat
243 @ this as a real undefined instruction
244 @
245 @ r0 - instruction
246 @
247 ldr r0, [r2, #-4]
248 adr r9, 1f
249 bl call_fpe
250
251 mov r0, sp @ struct pt_regs *regs
252 bl do_undefinstr
253
254 @
255 @ IRQs off again before pulling preserved data off the stack
256 @
1ec42c0c 2571: disable_irq
1da177e4
LT
258
259 @
260 @ restore SPSR and restart the instruction
261 @
262 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
263 msr spsr_cxsf, lr
264 ldmia sp, {r0 - pc}^ @ Restore SVC registers
265
266 .align 5
267__pabt_svc:
ccea7a19 268 svc_entry
1da177e4
LT
269
270 @
271 @ re-enable interrupts if appropriate
272 @
273 mrs r9, cpsr
274 tst r3, #PSR_I_BIT
275 biceq r9, r9, #PSR_I_BIT
276 msr cpsr_c, r9
277
278 @
279 @ set args, then call main handler
280 @
281 @ r0 - address of faulting instruction
282 @ r1 - pointer to registers on stack
283 @
284 mov r0, r2 @ address (pc)
285 mov r1, sp @ regs
286 bl do_PrefetchAbort @ call abort handler
287
288 @
289 @ IRQs off again before pulling preserved data off the stack
290 @
1ec42c0c 291 disable_irq
1da177e4
LT
292
293 @
294 @ restore SPSR and restart the instruction
295 @
296 ldr r0, [sp, #S_PSR]
297 msr spsr_cxsf, r0
298 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
299
300 .align 5
49f680ea
RK
301.LCcralign:
302 .word cr_alignment
1da177e4
LT
303#ifdef MULTI_ABORT
304.LCprocfns:
305 .word processor
306#endif
307.LCfp:
308 .word fp_enter
309#ifdef CONFIG_PREEMPT
310.LCirq_stat:
311 .word irq_stat
312#endif
313
314/*
315 * User mode handlers
2dede2d8
NP
316 *
317 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
1da177e4 318 */
2dede2d8
NP
319
320#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
321#error "sizeof(struct pt_regs) must be a multiple of 8"
322#endif
323
ccea7a19
RK
324 .macro usr_entry
325 sub sp, sp, #S_FRAME_SIZE
326 stmib sp, {r1 - r12}
327
328 ldmia r0, {r1 - r3}
329 add r0, sp, #S_PC @ here for interlock avoidance
330 mov r4, #-1 @ "" "" "" ""
331
332 str r1, [sp] @ save the "real" r0 copied
333 @ from the exception stack
1da177e4 334
dcef1f63 335#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
49bca4c2
NP
336#ifndef CONFIG_MMU
337#warning "NPTL on non MMU needs fixing"
338#else
2d2669b6 339 @ make sure our user space atomic helper is aborted
f09b9979 340 cmp r2, #TASK_SIZE
2d2669b6 341 bichs r3, r3, #PSR_Z_BIT
49bca4c2 342#endif
2d2669b6
NP
343#endif
344
1da177e4
LT
345 @
346 @ We are now ready to fill in the remaining blanks on the stack:
347 @
348 @ r2 - lr_<exception>, already fixed up for correct return/restart
349 @ r3 - spsr_<exception>
350 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
351 @
352 @ Also, separately save sp_usr and lr_usr
353 @
ccea7a19
RK
354 stmia r0, {r2 - r4}
355 stmdb r0, {sp, lr}^
1da177e4
LT
356
357 @
358 @ Enable the alignment trap while in kernel mode
359 @
49f680ea 360 alignment_trap r0
1da177e4
LT
361
362 @
363 @ Clear FP to mark the first stack frame
364 @
365 zero_fp
366 .endm
367
368 .align 5
369__dabt_usr:
ccea7a19 370 usr_entry
1da177e4
LT
371
372 @
373 @ Call the processor-specific abort handler:
374 @
375 @ r2 - aborted context pc
376 @ r3 - aborted context cpsr
377 @
378 @ The abort handler must return the aborted address in r0, and
379 @ the fault status register in r1.
380 @
381#ifdef MULTI_ABORT
382 ldr r4, .LCprocfns
383 mov lr, pc
384 ldr pc, [r4]
385#else
386 bl CPU_ABORT_HANDLER
387#endif
388
389 @
390 @ IRQs on, then call the main handler
391 @
1ec42c0c 392 enable_irq
1da177e4
LT
393 mov r2, sp
394 adr lr, ret_from_exception
395 b do_DataAbort
396
397 .align 5
398__irq_usr:
ccea7a19 399 usr_entry
1da177e4 400
706fdd9f 401 get_thread_info tsk
1da177e4 402#ifdef CONFIG_PREEMPT
706fdd9f
RK
403 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
404 add r7, r8, #1 @ increment it
405 str r7, [tsk, #TI_PREEMPT]
1da177e4 406#endif
ccea7a19 407
187a51ad 408 irq_handler
1da177e4 409#ifdef CONFIG_PREEMPT
706fdd9f
RK
410 ldr r0, [tsk, #TI_PREEMPT]
411 str r8, [tsk, #TI_PREEMPT]
1da177e4 412 teq r0, r7
1da177e4 413 strne r0, [r0, -r0]
1da177e4 414#endif
ccea7a19 415
1da177e4
LT
416 mov why, #0
417 b ret_to_user
418
419 .ltorg
420
421 .align 5
422__und_usr:
ccea7a19 423 usr_entry
1da177e4
LT
424
425 tst r3, #PSR_T_BIT @ Thumb mode?
426 bne fpundefinstr @ ignore FP
427 sub r4, r2, #4
428
429 @
430 @ fall through to the emulation code, which returns using r9 if
431 @ it has emulated the instruction, or the more conventional lr
432 @ if we are to treat this as a real undefined instruction
433 @
434 @ r0 - instruction
435 @
4361: ldrt r0, [r4]
437 adr r9, ret_from_exception
438 adr lr, fpundefinstr
439 @
440 @ fallthrough to call_fpe
441 @
442
443/*
444 * The out of line fixup for the ldrt above.
445 */
446 .section .fixup, "ax"
4472: mov pc, r9
448 .previous
449 .section __ex_table,"a"
450 .long 1b, 2b
451 .previous
452
453/*
454 * Check whether the instruction is a co-processor instruction.
455 * If yes, we need to call the relevant co-processor handler.
456 *
457 * Note that we don't do a full check here for the co-processor
458 * instructions; all instructions with bit 27 set are well
459 * defined. The only instructions that should fault are the
460 * co-processor instructions. However, we have to watch out
461 * for the ARM6/ARM7 SWI bug.
462 *
463 * Emulators may wish to make use of the following registers:
464 * r0 = instruction opcode.
465 * r2 = PC+4
466 * r10 = this threads thread_info structure.
467 */
468call_fpe:
469 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
470#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
471 and r8, r0, #0x0f000000 @ mask out op-code bits
472 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
473#endif
474 moveq pc, lr
475 get_thread_info r10 @ get current thread
476 and r8, r0, #0x00000f00 @ mask out CP number
477 mov r7, #1
478 add r6, r10, #TI_USED_CP
479 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
480#ifdef CONFIG_IWMMXT
481 @ Test if we need to give access to iWMMXt coprocessors
482 ldr r5, [r10, #TI_FLAGS]
483 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
484 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
485 bcs iwmmxt_task_enable
486#endif
1da177e4
LT
487 add pc, pc, r8, lsr #6
488 mov r0, r0
489
490 mov pc, lr @ CP#0
491 b do_fpe @ CP#1 (FPE)
492 b do_fpe @ CP#2 (FPE)
493 mov pc, lr @ CP#3
c17fad11
LB
494#ifdef CONFIG_CRUNCH
495 b crunch_task_enable @ CP#4 (MaverickCrunch)
496 b crunch_task_enable @ CP#5 (MaverickCrunch)
497 b crunch_task_enable @ CP#6 (MaverickCrunch)
498#else
1da177e4
LT
499 mov pc, lr @ CP#4
500 mov pc, lr @ CP#5
501 mov pc, lr @ CP#6
c17fad11 502#endif
1da177e4
LT
503 mov pc, lr @ CP#7
504 mov pc, lr @ CP#8
505 mov pc, lr @ CP#9
506#ifdef CONFIG_VFP
507 b do_vfp @ CP#10 (VFP)
508 b do_vfp @ CP#11 (VFP)
509#else
510 mov pc, lr @ CP#10 (VFP)
511 mov pc, lr @ CP#11 (VFP)
512#endif
513 mov pc, lr @ CP#12
514 mov pc, lr @ CP#13
515 mov pc, lr @ CP#14 (Debug)
516 mov pc, lr @ CP#15 (Control)
517
518do_fpe:
5d25ac03 519 enable_irq
1da177e4
LT
520 ldr r4, .LCfp
521 add r10, r10, #TI_FPSTATE @ r10 = workspace
522 ldr pc, [r4] @ Call FP module USR entry point
523
524/*
525 * The FP module is called with these registers set:
526 * r0 = instruction
527 * r2 = PC+4
528 * r9 = normal "successful" return address
529 * r10 = FP workspace
530 * lr = unrecognised FP instruction return address
531 */
532
533 .data
534ENTRY(fp_enter)
535 .word fpundefinstr
536 .text
537
538fpundefinstr:
539 mov r0, sp
540 adr lr, ret_from_exception
541 b do_undefinstr
542
543 .align 5
544__pabt_usr:
ccea7a19 545 usr_entry
1da177e4 546
1ec42c0c 547 enable_irq @ Enable interrupts
1da177e4
LT
548 mov r0, r2 @ address (pc)
549 mov r1, sp @ regs
550 bl do_PrefetchAbort @ call abort handler
551 /* fall through */
552/*
553 * This is the return code to user mode for abort handlers
554 */
555ENTRY(ret_from_exception)
556 get_thread_info tsk
557 mov why, #0
558 b ret_to_user
559
560/*
561 * Register switch for ARMv3 and ARMv4 processors
562 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
563 * previous and next are guaranteed not to be the same.
564 */
565ENTRY(__switch_to)
566 add ip, r1, #TI_CPU_SAVE
567 ldr r3, [r2, #TI_TP_VALUE]
568 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
d6551e88
RK
569#ifdef CONFIG_MMU
570 ldr r6, [r2, #TI_CPU_DOMAIN]
afeb90ca 571#endif
b876386e 572#if __LINUX_ARM_ARCH__ >= 6
43cc1981 573#ifdef CONFIG_CPU_32v6K
b876386e
RK
574 clrex
575#else
73394322 576 strex r5, r4, [ip] @ Clear exclusive monitor
b876386e
RK
577#endif
578#endif
1da177e4
LT
579#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
580 mra r4, r5, acc0
581 stmia ip, {r4, r5}
582#endif
4b0e07a5 583#if defined(CONFIG_HAS_TLS_REG)
2d2669b6 584 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
4b0e07a5 585#elif !defined(CONFIG_TLS_REG_EMUL)
1da177e4 586 mov r4, #0xffff0fff
2d2669b6
NP
587 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
588#endif
afeb90ca 589#ifdef CONFIG_MMU
1da177e4 590 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
afeb90ca 591#endif
ae95bfbb 592#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
d6551e88 593 add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra
1da177e4
LT
594 ldmib r4, {r4, r5}
595 mar acc0, r4, r5
596#endif
d6551e88
RK
597 mov r5, r0
598 add r4, r2, #TI_CPU_SAVE
599 ldr r0, =thread_notify_head
600 mov r1, #THREAD_NOTIFY_SWITCH
601 bl atomic_notifier_call_chain
602 mov r0, r5
603 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
1da177e4
LT
604
605 __INIT
2d2669b6
NP
606
607/*
608 * User helpers.
609 *
610 * These are segment of kernel provided user code reachable from user space
611 * at a fixed address in kernel memory. This is used to provide user space
612 * with some operations which require kernel help because of unimplemented
613 * native feature and/or instructions in many ARM CPUs. The idea is for
614 * this code to be executed directly in user mode for best efficiency but
615 * which is too intimate with the kernel counter part to be left to user
616 * libraries. In fact this code might even differ from one CPU to another
617 * depending on the available instruction set and restrictions like on
618 * SMP systems. In other words, the kernel reserves the right to change
619 * this code as needed without warning. Only the entry points and their
620 * results are guaranteed to be stable.
621 *
622 * Each segment is 32-byte aligned and will be moved to the top of the high
623 * vector page. New segments (if ever needed) must be added in front of
624 * existing ones. This mechanism should be used only for things that are
625 * really small and justified, and not be abused freely.
626 *
627 * User space is expected to implement those things inline when optimizing
628 * for a processor that has the necessary native support, but only if such
629 * resulting binaries are already to be incompatible with earlier ARM
630 * processors due to the use of unsupported instructions other than what
631 * is provided here. In other words don't make binaries unable to run on
632 * earlier processors just for the sake of not using these kernel helpers
633 * if your compiled code is not going to use the new instructions for other
634 * purpose.
635 */
636
637 .align 5
638 .globl __kuser_helper_start
639__kuser_helper_start:
640
7c612bfd
NP
641/*
642 * Reference prototype:
643 *
644 * void __kernel_memory_barrier(void)
645 *
646 * Input:
647 *
648 * lr = return address
649 *
650 * Output:
651 *
652 * none
653 *
654 * Clobbered:
655 *
656 * the Z flag might be lost
657 *
658 * Definition and user space usage example:
659 *
660 * typedef void (__kernel_dmb_t)(void);
661 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
662 *
663 * Apply any needed memory barrier to preserve consistency with data modified
664 * manually and __kuser_cmpxchg usage.
665 *
666 * This could be used as follows:
667 *
668 * #define __kernel_dmb() \
669 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
6896eec0 670 * : : : "r0", "lr","cc" )
7c612bfd
NP
671 */
672
673__kuser_memory_barrier: @ 0xffff0fa0
674
675#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
676 mcr p15, 0, r0, c7, c10, 5 @ dmb
677#endif
678 mov pc, lr
679
680 .align 5
681
2d2669b6
NP
682/*
683 * Reference prototype:
684 *
685 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
686 *
687 * Input:
688 *
689 * r0 = oldval
690 * r1 = newval
691 * r2 = ptr
692 * lr = return address
693 *
694 * Output:
695 *
696 * r0 = returned value (zero or non-zero)
697 * C flag = set if r0 == 0, clear if r0 != 0
698 *
699 * Clobbered:
700 *
701 * r3, ip, flags
702 *
703 * Definition and user space usage example:
704 *
705 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
706 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
707 *
708 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
709 * Return zero if *ptr was changed or non-zero if no exchange happened.
710 * The C flag is also set if *ptr was changed to allow for assembly
711 * optimization in the calling code.
712 *
5964eae8
NP
713 * Notes:
714 *
715 * - This routine already includes memory barriers as needed.
716 *
717 * - A failure might be transient, i.e. it is possible, although unlikely,
718 * that "failure" be returned even if *ptr == oldval.
7c612bfd 719 *
2d2669b6
NP
720 * For example, a user space atomic_add implementation could look like this:
721 *
722 * #define atomic_add(ptr, val) \
723 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
724 * register unsigned int __result asm("r1"); \
725 * asm volatile ( \
726 * "1: @ atomic_add\n\t" \
727 * "ldr r0, [r2]\n\t" \
728 * "mov r3, #0xffff0fff\n\t" \
729 * "add lr, pc, #4\n\t" \
730 * "add r1, r0, %2\n\t" \
731 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
732 * "bcc 1b" \
733 * : "=&r" (__result) \
734 * : "r" (__ptr), "rIL" (val) \
735 * : "r0","r3","ip","lr","cc","memory" ); \
736 * __result; })
737 */
738
739__kuser_cmpxchg: @ 0xffff0fc0
740
dcef1f63 741#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2d2669b6 742
dcef1f63
NP
743 /*
744 * Poor you. No fast solution possible...
745 * The kernel itself must perform the operation.
746 * A special ghost syscall is used for that (see traps.c).
747 */
5e097445
NP
748 stmfd sp!, {r7, lr}
749 mov r7, #0xff00 @ 0xfff0 into r7 for EABI
750 orr r7, r7, #0xf0
dcef1f63 751 swi #0x9ffff0
5e097445 752 ldmfd sp!, {r7, pc}
dcef1f63
NP
753
754#elif __LINUX_ARM_ARCH__ < 6
2d2669b6
NP
755
756 /*
757 * Theory of operation:
758 *
759 * We set the Z flag before loading oldval. If ever an exception
760 * occurs we can not be sure the loaded value will still be the same
761 * when the exception returns, therefore the user exception handler
762 * will clear the Z flag whenever the interrupted user code was
763 * actually from the kernel address space (see the usr_entry macro).
764 *
765 * The post-increment on the str is used to prevent a race with an
766 * exception happening just after the str instruction which would
767 * clear the Z flag although the exchange was done.
768 */
49bca4c2 769#ifdef CONFIG_MMU
2d2669b6
NP
770 teq ip, ip @ set Z flag
771 ldr ip, [r2] @ load current val
772 add r3, r2, #1 @ prepare store ptr
773 teqeq ip, r0 @ compare with oldval if still allowed
774 streq r1, [r3, #-1]! @ store newval if still allowed
775 subs r0, r2, r3 @ if r2 == r3 the str occured
49bca4c2
NP
776#else
777#warning "NPTL on non MMU needs fixing"
778 mov r0, #-1
779 adds r0, r0, #0
780#endif
2d2669b6
NP
781 mov pc, lr
782
783#else
784
7c612bfd
NP
785#ifdef CONFIG_SMP
786 mcr p15, 0, r0, c7, c10, 5 @ dmb
787#endif
2d2669b6
NP
788 ldrex r3, [r2]
789 subs r3, r3, r0
790 strexeq r3, r1, [r2]
791 rsbs r0, r3, #0
7c612bfd
NP
792#ifdef CONFIG_SMP
793 mcr p15, 0, r0, c7, c10, 5 @ dmb
794#endif
2d2669b6
NP
795 mov pc, lr
796
797#endif
798
799 .align 5
800
801/*
802 * Reference prototype:
803 *
804 * int __kernel_get_tls(void)
805 *
806 * Input:
807 *
808 * lr = return address
809 *
810 * Output:
811 *
812 * r0 = TLS value
813 *
814 * Clobbered:
815 *
816 * the Z flag might be lost
817 *
818 * Definition and user space usage example:
819 *
820 * typedef int (__kernel_get_tls_t)(void);
821 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
822 *
823 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
824 *
825 * This could be used as follows:
826 *
827 * #define __kernel_get_tls() \
828 * ({ register unsigned int __val asm("r0"); \
829 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
830 * : "=r" (__val) : : "lr","cc" ); \
831 * __val; })
832 */
833
834__kuser_get_tls: @ 0xffff0fe0
835
4b0e07a5 836#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
2d2669b6
NP
837
838 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
839 mov pc, lr
840
841#else
842
843 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
844 mov pc, lr
845
846#endif
847
848 .rep 5
849 .word 0 @ pad up to __kuser_helper_version
850 .endr
851
852/*
853 * Reference declaration:
854 *
855 * extern unsigned int __kernel_helper_version;
856 *
857 * Definition and user space usage example:
858 *
859 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
860 *
861 * User space may read this to determine the curent number of helpers
862 * available.
863 */
864
865__kuser_helper_version: @ 0xffff0ffc
866 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
867
868 .globl __kuser_helper_end
869__kuser_helper_end:
870
871
1da177e4
LT
872/*
873 * Vector stubs.
874 *
7933523d
RK
875 * This code is copied to 0xffff0200 so we can use branches in the
876 * vectors, rather than ldr's. Note that this code must not
877 * exceed 0x300 bytes.
1da177e4
LT
878 *
879 * Common stub entry macro:
880 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
ccea7a19
RK
881 *
882 * SP points to a minimal amount of processor-private memory, the address
883 * of which is copied into r0 for the mode specific abort handler.
1da177e4 884 */
b7ec4795 885 .macro vector_stub, name, mode, correction=0
1da177e4
LT
886 .align 5
887
888vector_\name:
1da177e4
LT
889 .if \correction
890 sub lr, lr, #\correction
891 .endif
ccea7a19
RK
892
893 @
894 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
895 @ (parent CPSR)
896 @
897 stmia sp, {r0, lr} @ save r0, lr
1da177e4 898 mrs lr, spsr
ccea7a19
RK
899 str lr, [sp, #8] @ save spsr
900
1da177e4 901 @
ccea7a19 902 @ Prepare for SVC32 mode. IRQs remain disabled.
1da177e4 903 @
ccea7a19 904 mrs r0, cpsr
b7ec4795 905 eor r0, r0, #(\mode ^ SVC_MODE)
ccea7a19 906 msr spsr_cxsf, r0
1da177e4 907
ccea7a19
RK
908 @
909 @ the branch table must immediately follow this code
910 @
ccea7a19 911 and lr, lr, #0x0f
b7ec4795 912 mov r0, sp
1da177e4 913 ldr lr, [pc, lr, lsl #2]
ccea7a19 914 movs pc, lr @ branch to handler in SVC mode
1da177e4
LT
915 .endm
916
7933523d 917 .globl __stubs_start
1da177e4
LT
918__stubs_start:
919/*
920 * Interrupt dispatcher
921 */
b7ec4795 922 vector_stub irq, IRQ_MODE, 4
1da177e4
LT
923
924 .long __irq_usr @ 0 (USR_26 / USR_32)
925 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
926 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
927 .long __irq_svc @ 3 (SVC_26 / SVC_32)
928 .long __irq_invalid @ 4
929 .long __irq_invalid @ 5
930 .long __irq_invalid @ 6
931 .long __irq_invalid @ 7
932 .long __irq_invalid @ 8
933 .long __irq_invalid @ 9
934 .long __irq_invalid @ a
935 .long __irq_invalid @ b
936 .long __irq_invalid @ c
937 .long __irq_invalid @ d
938 .long __irq_invalid @ e
939 .long __irq_invalid @ f
940
941/*
942 * Data abort dispatcher
943 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
944 */
b7ec4795 945 vector_stub dabt, ABT_MODE, 8
1da177e4
LT
946
947 .long __dabt_usr @ 0 (USR_26 / USR_32)
948 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
949 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
950 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
951 .long __dabt_invalid @ 4
952 .long __dabt_invalid @ 5
953 .long __dabt_invalid @ 6
954 .long __dabt_invalid @ 7
955 .long __dabt_invalid @ 8
956 .long __dabt_invalid @ 9
957 .long __dabt_invalid @ a
958 .long __dabt_invalid @ b
959 .long __dabt_invalid @ c
960 .long __dabt_invalid @ d
961 .long __dabt_invalid @ e
962 .long __dabt_invalid @ f
963
964/*
965 * Prefetch abort dispatcher
966 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
967 */
b7ec4795 968 vector_stub pabt, ABT_MODE, 4
1da177e4
LT
969
970 .long __pabt_usr @ 0 (USR_26 / USR_32)
971 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
972 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
973 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
974 .long __pabt_invalid @ 4
975 .long __pabt_invalid @ 5
976 .long __pabt_invalid @ 6
977 .long __pabt_invalid @ 7
978 .long __pabt_invalid @ 8
979 .long __pabt_invalid @ 9
980 .long __pabt_invalid @ a
981 .long __pabt_invalid @ b
982 .long __pabt_invalid @ c
983 .long __pabt_invalid @ d
984 .long __pabt_invalid @ e
985 .long __pabt_invalid @ f
986
987/*
988 * Undef instr entry dispatcher
989 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
990 */
b7ec4795 991 vector_stub und, UND_MODE
1da177e4
LT
992
993 .long __und_usr @ 0 (USR_26 / USR_32)
994 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
995 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
996 .long __und_svc @ 3 (SVC_26 / SVC_32)
997 .long __und_invalid @ 4
998 .long __und_invalid @ 5
999 .long __und_invalid @ 6
1000 .long __und_invalid @ 7
1001 .long __und_invalid @ 8
1002 .long __und_invalid @ 9
1003 .long __und_invalid @ a
1004 .long __und_invalid @ b
1005 .long __und_invalid @ c
1006 .long __und_invalid @ d
1007 .long __und_invalid @ e
1008 .long __und_invalid @ f
1009
1010 .align 5
1011
1012/*=============================================================================
1013 * Undefined FIQs
1014 *-----------------------------------------------------------------------------
1015 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1016 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1017 * Basically to switch modes, we *HAVE* to clobber one register... brain
1018 * damage alert! I don't think that we can execute any code in here in any
1019 * other mode than FIQ... Ok you can switch to another mode, but you can't
1020 * get out of that mode without clobbering one register.
1021 */
1022vector_fiq:
1023 disable_fiq
1024 subs pc, lr, #4
1025
1026/*=============================================================================
1027 * Address exception handler
1028 *-----------------------------------------------------------------------------
1029 * These aren't too critical.
1030 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1031 */
1032
1033vector_addrexcptn:
1034 b vector_addrexcptn
1035
1036/*
1037 * We group all the following data together to optimise
1038 * for CPUs with separate I & D caches.
1039 */
1040 .align 5
1041
1042.LCvswi:
1043 .word vector_swi
1044
7933523d 1045 .globl __stubs_end
1da177e4
LT
1046__stubs_end:
1047
7933523d 1048 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1da177e4 1049
7933523d
RK
1050 .globl __vectors_start
1051__vectors_start:
1da177e4 1052 swi SYS_ERROR0
7933523d
RK
1053 b vector_und + stubs_offset
1054 ldr pc, .LCvswi + stubs_offset
1055 b vector_pabt + stubs_offset
1056 b vector_dabt + stubs_offset
1057 b vector_addrexcptn + stubs_offset
1058 b vector_irq + stubs_offset
1059 b vector_fiq + stubs_offset
1060
1061 .globl __vectors_end
1062__vectors_end:
1da177e4
LT
1063
1064 .data
1065
1da177e4
LT
1066 .globl cr_alignment
1067 .globl cr_no_alignment
1068cr_alignment:
1069 .space 4
1070cr_no_alignment:
1071 .space 4