[PATCH] avr32 architecture
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / avr32 / kernel / entry-avr32b.S
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 /*
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
13 */
14 #include <linux/errno.h>
15
16 #include <asm/asm.h>
17 #include <asm/hardirq.h>
18 #include <asm/irq.h>
19 #include <asm/ocd.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
26
27 #ifdef CONFIG_PREEMPT
28 # define preempt_stop mask_interrupts
29 #else
30 # define preempt_stop
31 # define fault_resume_kernel fault_restore_all
32 #endif
33
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
37
38 .section .ex.text,"ax",@progbits
39 .align 2
40 exception_vectors:
41 bral handle_critical
42 .align 2
43 bral handle_critical
44 .align 2
45 bral do_bus_error_write
46 .align 2
47 bral do_bus_error_read
48 .align 2
49 bral do_nmi_ll
50 .align 2
51 bral handle_address_fault
52 .align 2
53 bral handle_protection_fault
54 .align 2
55 bral handle_debug
56 .align 2
57 bral do_illegal_opcode_ll
58 .align 2
59 bral do_illegal_opcode_ll
60 .align 2
61 bral do_illegal_opcode_ll
62 .align 2
63 bral do_fpe_ll
64 .align 2
65 bral do_illegal_opcode_ll
66 .align 2
67 bral handle_address_fault
68 .align 2
69 bral handle_address_fault
70 .align 2
71 bral handle_protection_fault
72 .align 2
73 bral handle_protection_fault
74 .align 2
75 bral do_dtlb_modified
76
77 /*
78 * r0 : PGD/PT/PTE
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
82 */
83 #define tlbmiss_save pushm r0-r3
84 #define tlbmiss_restore popm r0-r3
85
86 .section .tlbx.ex.text,"ax",@progbits
87 .global itlb_miss
88 itlb_miss:
89 tlbmiss_save
90 rjmp tlb_miss_common
91
92 .section .tlbr.ex.text,"ax",@progbits
93 dtlb_miss_read:
94 tlbmiss_save
95 rjmp tlb_miss_common
96
97 .section .tlbw.ex.text,"ax",@progbits
98 dtlb_miss_write:
99 tlbmiss_save
100
101 .global tlb_miss_common
102 tlb_miss_common:
103 mfsr r0, SYSREG_PTBR
104 mfsr r1, SYSREG_TLBEAR
105
106 /* Is it the vmalloc space? */
107 bld r1, 31
108 brcs handle_vmalloc_miss
109
110 /* First level lookup */
111 pgtbl_lookup:
112 lsr r2, r1, PGDIR_SHIFT
113 ld.w r0, r0[r2 << 2]
114 bld r0, _PAGE_BIT_PRESENT
115 brcc page_table_not_present
116
117 /* TODO: Check access rights on page table if necessary */
118
119 /* Translate to virtual address in P1. */
120 andl r0, 0xf000
121 sbr r0, 31
122
123 /* Second level lookup */
124 lsl r1, (32 - PGDIR_SHIFT)
125 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
126 add r2, r0, r1 << 2
127 ld.w r1, r2[0]
128 bld r1, _PAGE_BIT_PRESENT
129 brcc page_not_present
130
131 /* Mark the page as accessed */
132 sbr r1, _PAGE_BIT_ACCESSED
133 st.w r2[0], r1
134
135 /* Drop software flags */
136 andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
137 mtsr SYSREG_TLBELO, r1
138
139 /* Figure out which entry we want to replace */
140 mfsr r0, SYSREG_TLBARLO
141 clz r2, r0
142 brcc 1f
143 mov r1, -1 /* All entries have been accessed, */
144 mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
145 mov r2, 0 /* and start at 0 */
146 1: mfsr r1, SYSREG_MMUCR
147 lsl r2, 14
148 andl r1, 0x3fff, COH
149 or r1, r2
150 mtsr SYSREG_MMUCR, r1
151
152 tlbw
153
154 tlbmiss_restore
155 rete
156
157 handle_vmalloc_miss:
158 /* Simply do the lookup in init's page table */
159 mov r0, lo(swapper_pg_dir)
160 orh r0, hi(swapper_pg_dir)
161 rjmp pgtbl_lookup
162
163
164 /* --- System Call --- */
165
166 .section .scall.text,"ax",@progbits
167 system_call:
168 pushm r12 /* r12_orig */
169 stmts --sp, r0-lr
170 zero_fp
171 mfsr r0, SYSREG_RAR_SUP
172 mfsr r1, SYSREG_RSR_SUP
173 stm --sp, r0-r1
174
175 /* check for syscall tracing */
176 get_thread_info r0
177 ld.w r1, r0[TI_flags]
178 bld r1, TIF_SYSCALL_TRACE
179 brcs syscall_trace_enter
180
181 syscall_trace_cont:
182 cp.w r8, NR_syscalls
183 brhs syscall_badsys
184
185 lddpc lr, syscall_table_addr
186 ld.w lr, lr[r8 << 2]
187 mov r8, r5 /* 5th argument (6th is pushed by stub) */
188 icall lr
189
190 .global syscall_return
191 syscall_return:
192 get_thread_info r0
193 mask_interrupts /* make sure we don't miss an interrupt
194 setting need_resched or sigpending
195 between sampling and the rets */
196
197 /* Store the return value so that the correct value is loaded below */
198 stdsp sp[REG_R12], r12
199
200 ld.w r1, r0[TI_flags]
201 andl r1, _TIF_ALLWORK_MASK, COH
202 brne syscall_exit_work
203
204 syscall_exit_cont:
205 popm r8-r9
206 mtsr SYSREG_RAR_SUP, r8
207 mtsr SYSREG_RSR_SUP, r9
208 ldmts sp++, r0-lr
209 sub sp, -4 /* r12_orig */
210 rets
211
212 .align 2
213 syscall_table_addr:
214 .long sys_call_table
215
216 syscall_badsys:
217 mov r12, -ENOSYS
218 rjmp syscall_return
219
220 .global ret_from_fork
221 ret_from_fork:
222 rcall schedule_tail
223
224 /* check for syscall tracing */
225 get_thread_info r0
226 ld.w r1, r0[TI_flags]
227 andl r1, _TIF_ALLWORK_MASK, COH
228 brne syscall_exit_work
229 rjmp syscall_exit_cont
230
231 syscall_trace_enter:
232 pushm r8-r12
233 rcall syscall_trace
234 popm r8-r12
235 rjmp syscall_trace_cont
236
237 syscall_exit_work:
238 bld r1, TIF_SYSCALL_TRACE
239 brcc 1f
240 unmask_interrupts
241 rcall syscall_trace
242 mask_interrupts
243 ld.w r1, r0[TI_flags]
244
245 1: bld r1, TIF_NEED_RESCHED
246 brcc 2f
247 unmask_interrupts
248 rcall schedule
249 mask_interrupts
250 ld.w r1, r0[TI_flags]
251 rjmp 1b
252
253 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
254 tst r1, r2
255 breq 3f
256 unmask_interrupts
257 mov r12, sp
258 mov r11, r0
259 rcall do_notify_resume
260 mask_interrupts
261 ld.w r1, r0[TI_flags]
262 rjmp 1b
263
264 3: bld r1, TIF_BREAKPOINT
265 brcc syscall_exit_cont
266 mfsr r3, SYSREG_TLBEHI
267 lddsp r2, sp[REG_PC]
268 andl r3, 0xff, COH
269 lsl r3, 1
270 sbr r3, 30
271 sbr r3, 0
272 mtdr DBGREG_BWA2A, r2
273 mtdr DBGREG_BWC2A, r3
274 rjmp syscall_exit_cont
275
276
277 /* The slow path of the TLB miss handler */
278 page_table_not_present:
279 page_not_present:
280 tlbmiss_restore
281 sub sp, 4
282 stmts --sp, r0-lr
283 rcall save_full_context_ex
284 mfsr r12, SYSREG_ECR
285 mov r11, sp
286 rcall do_page_fault
287 rjmp ret_from_exception
288
289 /* This function expects to find offending PC in SYSREG_RAR_EX */
290 save_full_context_ex:
291 mfsr r8, SYSREG_RSR_EX
292 mov r12, r8
293 andh r8, (MODE_MASK >> 16), COH
294 mfsr r11, SYSREG_RAR_EX
295 brne 2f
296
297 1: pushm r11, r12 /* PC and SR */
298 unmask_exceptions
299 ret r12
300
301 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
302 stdsp sp[4], r10 /* replace saved SP */
303 rjmp 1b
304
305 /* Low-level exception handlers */
306 handle_critical:
307 pushm r12
308 pushm r0-r12
309 rcall save_full_context_ex
310 mfsr r12, SYSREG_ECR
311 mov r11, sp
312 rcall do_critical_exception
313
314 /* We should never get here... */
315 bad_return:
316 sub r12, pc, (. - 1f)
317 bral panic
318 .align 2
319 1: .asciz "Return from critical exception!"
320
321 .align 1
322 do_bus_error_write:
323 sub sp, 4
324 stmts --sp, r0-lr
325 rcall save_full_context_ex
326 mov r11, 1
327 rjmp 1f
328
329 do_bus_error_read:
330 sub sp, 4
331 stmts --sp, r0-lr
332 rcall save_full_context_ex
333 mov r11, 0
334 1: mfsr r12, SYSREG_BEAR
335 mov r10, sp
336 rcall do_bus_error
337 rjmp ret_from_exception
338
339 .align 1
340 do_nmi_ll:
341 sub sp, 4
342 stmts --sp, r0-lr
343 /* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */
344 rcall save_full_context_ex
345 mfsr r12, SYSREG_ECR
346 mov r11, sp
347 rcall do_nmi
348 rjmp bad_return
349
350 handle_address_fault:
351 sub sp, 4
352 stmts --sp, r0-lr
353 rcall save_full_context_ex
354 mfsr r12, SYSREG_ECR
355 mov r11, sp
356 rcall do_address_exception
357 rjmp ret_from_exception
358
359 handle_protection_fault:
360 sub sp, 4
361 stmts --sp, r0-lr
362 rcall save_full_context_ex
363 mfsr r12, SYSREG_ECR
364 mov r11, sp
365 rcall do_page_fault
366 rjmp ret_from_exception
367
368 .align 1
369 do_illegal_opcode_ll:
370 sub sp, 4
371 stmts --sp, r0-lr
372 rcall save_full_context_ex
373 mfsr r12, SYSREG_ECR
374 mov r11, sp
375 rcall do_illegal_opcode
376 rjmp ret_from_exception
377
378 do_dtlb_modified:
379 pushm r0-r3
380 mfsr r1, SYSREG_TLBEAR
381 mfsr r0, SYSREG_PTBR
382 lsr r2, r1, PGDIR_SHIFT
383 ld.w r0, r0[r2 << 2]
384 lsl r1, (32 - PGDIR_SHIFT)
385 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
386
387 /* Translate to virtual address in P1 */
388 andl r0, 0xf000
389 sbr r0, 31
390 add r2, r0, r1 << 2
391 ld.w r3, r2[0]
392 sbr r3, _PAGE_BIT_DIRTY
393 mov r0, r3
394 st.w r2[0], r3
395
396 /* The page table is up-to-date. Update the TLB entry as well */
397 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
398 mtsr SYSREG_TLBELO, r0
399
400 /* MMUCR[DRP] is updated automatically, so let's go... */
401 tlbw
402
403 popm r0-r3
404 rete
405
406 do_fpe_ll:
407 sub sp, 4
408 stmts --sp, r0-lr
409 rcall save_full_context_ex
410 unmask_interrupts
411 mov r12, 26
412 mov r11, sp
413 rcall do_fpe
414 rjmp ret_from_exception
415
416 ret_from_exception:
417 mask_interrupts
418 lddsp r4, sp[REG_SR]
419 andh r4, (MODE_MASK >> 16), COH
420 brne fault_resume_kernel
421
422 get_thread_info r0
423 ld.w r1, r0[TI_flags]
424 andl r1, _TIF_WORK_MASK, COH
425 brne fault_exit_work
426
427 fault_resume_user:
428 popm r8-r9
429 mask_exceptions
430 mtsr SYSREG_RAR_EX, r8
431 mtsr SYSREG_RSR_EX, r9
432 ldmts sp++, r0-lr
433 sub sp, -4
434 rete
435
436 fault_resume_kernel:
437 #ifdef CONFIG_PREEMPT
438 get_thread_info r0
439 ld.w r2, r0[TI_preempt_count]
440 cp.w r2, 0
441 brne 1f
442 ld.w r1, r0[TI_flags]
443 bld r1, TIF_NEED_RESCHED
444 brcc 1f
445 lddsp r4, sp[REG_SR]
446 bld r4, SYSREG_GM_OFFSET
447 brcs 1f
448 rcall preempt_schedule_irq
449 1:
450 #endif
451
452 popm r8-r9
453 mask_exceptions
454 mfsr r1, SYSREG_SR
455 mtsr SYSREG_RAR_EX, r8
456 mtsr SYSREG_RSR_EX, r9
457 popm lr
458 sub sp, -4 /* ignore SP */
459 popm r0-r12
460 sub sp, -4 /* ignore r12_orig */
461 rete
462
463 irq_exit_work:
464 /* Switch to exception mode so that we can share the same code. */
465 mfsr r8, SYSREG_SR
466 cbr r8, SYSREG_M0_OFFSET
467 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
468 mtsr SYSREG_SR, r8
469 sub pc, -2
470 get_thread_info r0
471 ld.w r1, r0[TI_flags]
472
473 fault_exit_work:
474 bld r1, TIF_NEED_RESCHED
475 brcc 1f
476 unmask_interrupts
477 rcall schedule
478 mask_interrupts
479 ld.w r1, r0[TI_flags]
480 rjmp fault_exit_work
481
482 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
483 tst r1, r2
484 breq 2f
485 unmask_interrupts
486 mov r12, sp
487 mov r11, r0
488 rcall do_notify_resume
489 mask_interrupts
490 ld.w r1, r0[TI_flags]
491 rjmp fault_exit_work
492
493 2: bld r1, TIF_BREAKPOINT
494 brcc fault_resume_user
495 mfsr r3, SYSREG_TLBEHI
496 lddsp r2, sp[REG_PC]
497 andl r3, 0xff, COH
498 lsl r3, 1
499 sbr r3, 30
500 sbr r3, 0
501 mtdr DBGREG_BWA2A, r2
502 mtdr DBGREG_BWC2A, r3
503 rjmp fault_resume_user
504
505 /* If we get a debug trap from privileged context we end up here */
506 handle_debug_priv:
507 /* Fix up LR and SP in regs. r11 contains the mode we came from */
508 mfsr r8, SYSREG_SR
509 mov r9, r8
510 andh r8, hi(~MODE_MASK)
511 or r8, r11
512 mtsr SYSREG_SR, r8
513 sub pc, -2
514 stdsp sp[REG_LR], lr
515 mtsr SYSREG_SR, r9
516 sub pc, -2
517 sub r10, sp, -FRAME_SIZE_FULL
518 stdsp sp[REG_SP], r10
519 mov r12, sp
520 rcall do_debug_priv
521
522 /* Now, put everything back */
523 ssrf SR_EM_BIT
524 popm r10, r11
525 mtsr SYSREG_RAR_DBG, r10
526 mtsr SYSREG_RSR_DBG, r11
527 mfsr r8, SYSREG_SR
528 mov r9, r8
529 andh r8, hi(~MODE_MASK)
530 andh r11, hi(MODE_MASK)
531 or r8, r11
532 mtsr SYSREG_SR, r8
533 sub pc, -2
534 popm lr
535 mtsr SYSREG_SR, r9
536 sub pc, -2
537 sub sp, -4 /* skip SP */
538 popm r0-r12
539 sub sp, -4
540 retd
541
542 /*
543 * At this point, everything is masked, that is, interrupts,
544 * exceptions and debugging traps. We might get called from
545 * interrupt or exception context in some rare cases, but this
546 * will be taken care of by do_debug(), so we're not going to
547 * do a 100% correct context save here.
548 */
549 handle_debug:
550 sub sp, 4 /* r12_orig */
551 stmts --sp, r0-lr
552 mfsr r10, SYSREG_RAR_DBG
553 mfsr r11, SYSREG_RSR_DBG
554 unmask_exceptions
555 pushm r10,r11
556 andh r11, (MODE_MASK >> 16), COH
557 brne handle_debug_priv
558
559 mov r12, sp
560 rcall do_debug
561
562 lddsp r10, sp[REG_SR]
563 andh r10, (MODE_MASK >> 16), COH
564 breq debug_resume_user
565
566 debug_restore_all:
567 popm r10,r11
568 mask_exceptions
569 mtsr SYSREG_RSR_DBG, r11
570 mtsr SYSREG_RAR_DBG, r10
571 ldmts sp++, r0-lr
572 sub sp, -4
573 retd
574
575 debug_resume_user:
576 get_thread_info r0
577 mask_interrupts
578
579 ld.w r1, r0[TI_flags]
580 andl r1, _TIF_DBGWORK_MASK, COH
581 breq debug_restore_all
582
583 1: bld r1, TIF_NEED_RESCHED
584 brcc 2f
585 unmask_interrupts
586 rcall schedule
587 mask_interrupts
588 ld.w r1, r0[TI_flags]
589 rjmp 1b
590
591 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
592 tst r1, r2
593 breq 3f
594 unmask_interrupts
595 mov r12, sp
596 mov r11, r0
597 rcall do_notify_resume
598 mask_interrupts
599 ld.w r1, r0[TI_flags]
600 rjmp 1b
601
602 3: bld r1, TIF_SINGLE_STEP
603 brcc debug_restore_all
604 mfdr r2, DBGREG_DC
605 sbr r2, DC_SS_BIT
606 mtdr DBGREG_DC, r2
607 rjmp debug_restore_all
608
609 .set rsr_int0, SYSREG_RSR_INT0
610 .set rsr_int1, SYSREG_RSR_INT1
611 .set rsr_int2, SYSREG_RSR_INT2
612 .set rsr_int3, SYSREG_RSR_INT3
613 .set rar_int0, SYSREG_RAR_INT0
614 .set rar_int1, SYSREG_RAR_INT1
615 .set rar_int2, SYSREG_RAR_INT2
616 .set rar_int3, SYSREG_RAR_INT3
617
618 .macro IRQ_LEVEL level
619 .type irq_level\level, @function
620 irq_level\level:
621 sub sp, 4 /* r12_orig */
622 stmts --sp,r0-lr
623 mfsr r8, rar_int\level
624 mfsr r9, rsr_int\level
625 pushm r8-r9
626
627 mov r11, sp
628 mov r12, \level
629
630 rcall do_IRQ
631
632 lddsp r4, sp[REG_SR]
633 andh r4, (MODE_MASK >> 16), COH
634 #ifdef CONFIG_PREEMPT
635 brne 2f
636 #else
637 brne 1f
638 #endif
639
640 get_thread_info r0
641 ld.w r1, r0[TI_flags]
642 andl r1, _TIF_WORK_MASK, COH
643 brne irq_exit_work
644
645 1: popm r8-r9
646 mtsr rar_int\level, r8
647 mtsr rsr_int\level, r9
648 ldmts sp++,r0-lr
649 sub sp, -4 /* ignore r12_orig */
650 rete
651
652 #ifdef CONFIG_PREEMPT
653 2:
654 get_thread_info r0
655 ld.w r2, r0[TI_preempt_count]
656 cp.w r2, 0
657 brne 1b
658 ld.w r1, r0[TI_flags]
659 bld r1, TIF_NEED_RESCHED
660 brcc 1b
661 lddsp r4, sp[REG_SR]
662 bld r4, SYSREG_GM_OFFSET
663 brcs 1b
664 rcall preempt_schedule_irq
665 rjmp 1b
666 #endif
667 .endm
668
669 .section .irq.text,"ax",@progbits
670
671 .global irq_level0
672 .global irq_level1
673 .global irq_level2
674 .global irq_level3
675 IRQ_LEVEL 0
676 IRQ_LEVEL 1
677 IRQ_LEVEL 2
678 IRQ_LEVEL 3