Merge branch 'audit.b50' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / avr32 / kernel / entry-avr32b.S
CommitLineData
5f97f7f9
HS
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
13 */
14#include <linux/errno.h>
15
16#include <asm/asm.h>
17#include <asm/hardirq.h>
18#include <asm/irq.h>
19#include <asm/ocd.h>
20#include <asm/page.h>
21#include <asm/pgtable.h>
22#include <asm/ptrace.h>
23#include <asm/sysreg.h>
24#include <asm/thread_info.h>
25#include <asm/unistd.h>
26
27#ifdef CONFIG_PREEMPT
28# define preempt_stop mask_interrupts
29#else
30# define preempt_stop
31# define fault_resume_kernel fault_restore_all
32#endif
33
34#define __MASK(x) ((1 << (x)) - 1)
35#define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
37
38 .section .ex.text,"ax",@progbits
39 .align 2
40exception_vectors:
41 bral handle_critical
42 .align 2
43 bral handle_critical
44 .align 2
45 bral do_bus_error_write
46 .align 2
47 bral do_bus_error_read
48 .align 2
49 bral do_nmi_ll
50 .align 2
51 bral handle_address_fault
52 .align 2
53 bral handle_protection_fault
54 .align 2
55 bral handle_debug
56 .align 2
57 bral do_illegal_opcode_ll
58 .align 2
59 bral do_illegal_opcode_ll
60 .align 2
61 bral do_illegal_opcode_ll
62 .align 2
63 bral do_fpe_ll
64 .align 2
65 bral do_illegal_opcode_ll
66 .align 2
67 bral handle_address_fault
68 .align 2
69 bral handle_address_fault
70 .align 2
71 bral handle_protection_fault
72 .align 2
73 bral handle_protection_fault
74 .align 2
75 bral do_dtlb_modified
76
77 /*
78 * r0 : PGD/PT/PTE
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
82 */
83#define tlbmiss_save pushm r0-r3
84#define tlbmiss_restore popm r0-r3
85
86 .section .tlbx.ex.text,"ax",@progbits
87 .global itlb_miss
88itlb_miss:
89 tlbmiss_save
90 rjmp tlb_miss_common
91
92 .section .tlbr.ex.text,"ax",@progbits
93dtlb_miss_read:
94 tlbmiss_save
95 rjmp tlb_miss_common
96
97 .section .tlbw.ex.text,"ax",@progbits
98dtlb_miss_write:
99 tlbmiss_save
100
101 .global tlb_miss_common
102tlb_miss_common:
c0c3e816
HS
103 mfsr r0, SYSREG_TLBEAR
104 mfsr r1, SYSREG_PTBR
5f97f7f9
HS
105
106 /* Is it the vmalloc space? */
c0c3e816 107 bld r0, 31
5f97f7f9
HS
108 brcs handle_vmalloc_miss
109
110 /* First level lookup */
111pgtbl_lookup:
c0c3e816
HS
112 lsr r2, r0, PGDIR_SHIFT
113 ld.w r3, r1[r2 << 2]
114 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
115 bld r3, _PAGE_BIT_PRESENT
5f97f7f9
HS
116 brcc page_table_not_present
117
5f97f7f9 118 /* Translate to virtual address in P1. */
c0c3e816
HS
119 andl r3, 0xf000
120 sbr r3, 31
5f97f7f9
HS
121
122 /* Second level lookup */
c0c3e816
HS
123 ld.w r2, r3[r1 << 2]
124 mfsr r0, SYSREG_TLBARLO
125 bld r2, _PAGE_BIT_PRESENT
5f97f7f9
HS
126 brcc page_not_present
127
128 /* Mark the page as accessed */
c0c3e816
HS
129 sbr r2, _PAGE_BIT_ACCESSED
130 st.w r3[r1 << 2], r2
5f97f7f9
HS
131
132 /* Drop software flags */
c0c3e816
HS
133 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
134 mtsr SYSREG_TLBELO, r2
5f97f7f9
HS
135
136 /* Figure out which entry we want to replace */
c0c3e816 137 mfsr r1, SYSREG_MMUCR
5f97f7f9
HS
138 clz r2, r0
139 brcc 1f
c0c3e816
HS
140 mov r3, -1 /* All entries have been accessed, */
141 mov r2, 0 /* so start at 0 */
142 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
5f97f7f9 143
c0c3e816
HS
1441: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
145 mtsr SYSREG_MMUCR, r1
5f97f7f9
HS
146 tlbw
147
148 tlbmiss_restore
149 rete
150
151handle_vmalloc_miss:
152 /* Simply do the lookup in init's page table */
c0c3e816
HS
153 mov r1, lo(swapper_pg_dir)
154 orh r1, hi(swapper_pg_dir)
5f97f7f9
HS
155 rjmp pgtbl_lookup
156
157
158 /* --- System Call --- */
159
160 .section .scall.text,"ax",@progbits
161system_call:
a7e30b8d
PR
162#ifdef CONFIG_PREEMPT
163 mask_interrupts
164#endif
5f97f7f9
HS
165 pushm r12 /* r12_orig */
166 stmts --sp, r0-lr
a7e30b8d 167
5f97f7f9
HS
168 mfsr r0, SYSREG_RAR_SUP
169 mfsr r1, SYSREG_RSR_SUP
a7e30b8d
PR
170#ifdef CONFIG_PREEMPT
171 unmask_interrupts
172#endif
173 zero_fp
5f97f7f9
HS
174 stm --sp, r0-r1
175
176 /* check for syscall tracing */
177 get_thread_info r0
178 ld.w r1, r0[TI_flags]
179 bld r1, TIF_SYSCALL_TRACE
180 brcs syscall_trace_enter
181
182syscall_trace_cont:
183 cp.w r8, NR_syscalls
184 brhs syscall_badsys
185
186 lddpc lr, syscall_table_addr
187 ld.w lr, lr[r8 << 2]
188 mov r8, r5 /* 5th argument (6th is pushed by stub) */
189 icall lr
190
191 .global syscall_return
192syscall_return:
193 get_thread_info r0
194 mask_interrupts /* make sure we don't miss an interrupt
195 setting need_resched or sigpending
196 between sampling and the rets */
197
198 /* Store the return value so that the correct value is loaded below */
199 stdsp sp[REG_R12], r12
200
201 ld.w r1, r0[TI_flags]
202 andl r1, _TIF_ALLWORK_MASK, COH
203 brne syscall_exit_work
204
205syscall_exit_cont:
206 popm r8-r9
207 mtsr SYSREG_RAR_SUP, r8
208 mtsr SYSREG_RSR_SUP, r9
209 ldmts sp++, r0-lr
210 sub sp, -4 /* r12_orig */
211 rets
212
213 .align 2
214syscall_table_addr:
215 .long sys_call_table
216
217syscall_badsys:
218 mov r12, -ENOSYS
219 rjmp syscall_return
220
221 .global ret_from_fork
222ret_from_fork:
223 rcall schedule_tail
224
225 /* check for syscall tracing */
226 get_thread_info r0
227 ld.w r1, r0[TI_flags]
228 andl r1, _TIF_ALLWORK_MASK, COH
229 brne syscall_exit_work
230 rjmp syscall_exit_cont
231
232syscall_trace_enter:
233 pushm r8-r12
234 rcall syscall_trace
235 popm r8-r12
236 rjmp syscall_trace_cont
237
238syscall_exit_work:
239 bld r1, TIF_SYSCALL_TRACE
240 brcc 1f
241 unmask_interrupts
242 rcall syscall_trace
243 mask_interrupts
244 ld.w r1, r0[TI_flags]
245
2461: bld r1, TIF_NEED_RESCHED
247 brcc 2f
248 unmask_interrupts
249 rcall schedule
250 mask_interrupts
251 ld.w r1, r0[TI_flags]
252 rjmp 1b
253
2542: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
255 tst r1, r2
256 breq 3f
257 unmask_interrupts
258 mov r12, sp
259 mov r11, r0
260 rcall do_notify_resume
261 mask_interrupts
262 ld.w r1, r0[TI_flags]
263 rjmp 1b
264
2653: bld r1, TIF_BREAKPOINT
266 brcc syscall_exit_cont
2507bc13 267 rjmp enter_monitor_mode
5f97f7f9
HS
268
269 /* The slow path of the TLB miss handler */
270page_table_not_present:
271page_not_present:
272 tlbmiss_restore
273 sub sp, 4
274 stmts --sp, r0-lr
275 rcall save_full_context_ex
276 mfsr r12, SYSREG_ECR
277 mov r11, sp
278 rcall do_page_fault
279 rjmp ret_from_exception
280
281 /* This function expects to find offending PC in SYSREG_RAR_EX */
2507bc13
HS
282 .type save_full_context_ex, @function
283 .align 2
5f97f7f9 284save_full_context_ex:
2507bc13
HS
285 mfsr r11, SYSREG_RAR_EX
286 sub r9, pc, . - debug_trampoline
5f97f7f9 287 mfsr r8, SYSREG_RSR_EX
2507bc13
HS
288 cp.w r9, r11
289 breq 3f
5f97f7f9
HS
290 mov r12, r8
291 andh r8, (MODE_MASK >> 16), COH
5f97f7f9
HS
292 brne 2f
293
2941: pushm r11, r12 /* PC and SR */
295 unmask_exceptions
296 ret r12
297
2982: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
299 stdsp sp[4], r10 /* replace saved SP */
300 rjmp 1b
301
2507bc13
HS
302 /*
303 * The debug handler set up a trampoline to make us
304 * automatically enter monitor mode upon return, but since
305 * we're saving the full context, we must assume that the
306 * exception handler might want to alter the return address
307 * and/or status register. So we need to restore the original
308 * context and enter monitor mode manually after the exception
309 * has been handled.
310 */
3113: get_thread_info r8
312 ld.w r11, r8[TI_rar_saved]
313 ld.w r12, r8[TI_rsr_saved]
314 rjmp 1b
315 .size save_full_context_ex, . - save_full_context_ex
316
5f97f7f9
HS
317 /* Low-level exception handlers */
318handle_critical:
5998a3cf
HS
319 sub sp, 4
320 stmts --sp, r0-lr
5f97f7f9
HS
321 rcall save_full_context_ex
322 mfsr r12, SYSREG_ECR
323 mov r11, sp
324 rcall do_critical_exception
325
326 /* We should never get here... */
327bad_return:
328 sub r12, pc, (. - 1f)
329 bral panic
330 .align 2
3311: .asciz "Return from critical exception!"
332
333 .align 1
334do_bus_error_write:
335 sub sp, 4
336 stmts --sp, r0-lr
337 rcall save_full_context_ex
338 mov r11, 1
339 rjmp 1f
340
341do_bus_error_read:
342 sub sp, 4
343 stmts --sp, r0-lr
344 rcall save_full_context_ex
345 mov r11, 0
3461: mfsr r12, SYSREG_BEAR
347 mov r10, sp
348 rcall do_bus_error
349 rjmp ret_from_exception
350
351 .align 1
352do_nmi_ll:
353 sub sp, 4
354 stmts --sp, r0-lr
92b728c1
HS
355 mfsr r9, SYSREG_RSR_NMI
356 mfsr r8, SYSREG_RAR_NMI
357 bfextu r0, r9, MODE_SHIFT, 3
358 brne 2f
359
3601: pushm r8, r9 /* PC and SR */
5f97f7f9
HS
361 mfsr r12, SYSREG_ECR
362 mov r11, sp
363 rcall do_nmi
92b728c1
HS
364 popm r8-r9
365 mtsr SYSREG_RAR_NMI, r8
366 tst r0, r0
367 mtsr SYSREG_RSR_NMI, r9
368 brne 3f
369
370 ldmts sp++, r0-lr
371 sub sp, -4 /* skip r12_orig */
372 rete
373
3742: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
375 stdsp sp[4], r10 /* replace saved SP */
376 rjmp 1b
377
3783: popm lr
379 sub sp, -4 /* skip sp */
380 popm r0-r12
381 sub sp, -4 /* skip r12_orig */
382 rete
5f97f7f9
HS
383
384handle_address_fault:
385 sub sp, 4
386 stmts --sp, r0-lr
387 rcall save_full_context_ex
388 mfsr r12, SYSREG_ECR
389 mov r11, sp
390 rcall do_address_exception
391 rjmp ret_from_exception
392
393handle_protection_fault:
394 sub sp, 4
395 stmts --sp, r0-lr
396 rcall save_full_context_ex
397 mfsr r12, SYSREG_ECR
398 mov r11, sp
399 rcall do_page_fault
400 rjmp ret_from_exception
401
402 .align 1
403do_illegal_opcode_ll:
404 sub sp, 4
405 stmts --sp, r0-lr
406 rcall save_full_context_ex
407 mfsr r12, SYSREG_ECR
408 mov r11, sp
409 rcall do_illegal_opcode
410 rjmp ret_from_exception
411
412do_dtlb_modified:
413 pushm r0-r3
414 mfsr r1, SYSREG_TLBEAR
415 mfsr r0, SYSREG_PTBR
416 lsr r2, r1, PGDIR_SHIFT
417 ld.w r0, r0[r2 << 2]
418 lsl r1, (32 - PGDIR_SHIFT)
419 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
420
421 /* Translate to virtual address in P1 */
422 andl r0, 0xf000
423 sbr r0, 31
424 add r2, r0, r1 << 2
425 ld.w r3, r2[0]
426 sbr r3, _PAGE_BIT_DIRTY
427 mov r0, r3
428 st.w r2[0], r3
429
430 /* The page table is up-to-date. Update the TLB entry as well */
431 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
432 mtsr SYSREG_TLBELO, r0
433
434 /* MMUCR[DRP] is updated automatically, so let's go... */
435 tlbw
436
437 popm r0-r3
438 rete
439
440do_fpe_ll:
441 sub sp, 4
442 stmts --sp, r0-lr
443 rcall save_full_context_ex
444 unmask_interrupts
445 mov r12, 26
446 mov r11, sp
447 rcall do_fpe
448 rjmp ret_from_exception
449
450ret_from_exception:
451 mask_interrupts
452 lddsp r4, sp[REG_SR]
2507bc13 453
5f97f7f9
HS
454 andh r4, (MODE_MASK >> 16), COH
455 brne fault_resume_kernel
456
457 get_thread_info r0
458 ld.w r1, r0[TI_flags]
459 andl r1, _TIF_WORK_MASK, COH
460 brne fault_exit_work
461
462fault_resume_user:
463 popm r8-r9
464 mask_exceptions
465 mtsr SYSREG_RAR_EX, r8
466 mtsr SYSREG_RSR_EX, r9
467 ldmts sp++, r0-lr
468 sub sp, -4
469 rete
470
471fault_resume_kernel:
472#ifdef CONFIG_PREEMPT
473 get_thread_info r0
474 ld.w r2, r0[TI_preempt_count]
475 cp.w r2, 0
476 brne 1f
477 ld.w r1, r0[TI_flags]
478 bld r1, TIF_NEED_RESCHED
479 brcc 1f
480 lddsp r4, sp[REG_SR]
481 bld r4, SYSREG_GM_OFFSET
482 brcs 1f
483 rcall preempt_schedule_irq
4841:
485#endif
486
487 popm r8-r9
488 mask_exceptions
489 mfsr r1, SYSREG_SR
490 mtsr SYSREG_RAR_EX, r8
491 mtsr SYSREG_RSR_EX, r9
492 popm lr
493 sub sp, -4 /* ignore SP */
494 popm r0-r12
495 sub sp, -4 /* ignore r12_orig */
496 rete
497
498irq_exit_work:
499 /* Switch to exception mode so that we can share the same code. */
500 mfsr r8, SYSREG_SR
501 cbr r8, SYSREG_M0_OFFSET
502 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
503 mtsr SYSREG_SR, r8
504 sub pc, -2
505 get_thread_info r0
506 ld.w r1, r0[TI_flags]
507
508fault_exit_work:
509 bld r1, TIF_NEED_RESCHED
510 brcc 1f
511 unmask_interrupts
512 rcall schedule
513 mask_interrupts
514 ld.w r1, r0[TI_flags]
515 rjmp fault_exit_work
516
5171: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
518 tst r1, r2
519 breq 2f
520 unmask_interrupts
521 mov r12, sp
522 mov r11, r0
523 rcall do_notify_resume
524 mask_interrupts
525 ld.w r1, r0[TI_flags]
526 rjmp fault_exit_work
527
5282: bld r1, TIF_BREAKPOINT
529 brcc fault_resume_user
2507bc13
HS
530 rjmp enter_monitor_mode
531
532 .section .kprobes.text, "ax", @progbits
533 .type handle_debug, @function
534handle_debug:
535 sub sp, 4 /* r12_orig */
536 stmts --sp, r0-lr
537 mfsr r8, SYSREG_RAR_DBG
538 mfsr r9, SYSREG_RSR_DBG
539 unmask_exceptions
540 pushm r8-r9
541 bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
542 brne debug_fixup_regs
543
544.Ldebug_fixup_cont:
545#ifdef CONFIG_TRACE_IRQFLAGS
546 rcall trace_hardirqs_off
547#endif
548 mov r12, sp
549 rcall do_debug
550 mov sp, r12
551
552 lddsp r2, sp[REG_SR]
553 bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
554 brne debug_resume_kernel
555
556 get_thread_info r0
557 ld.w r1, r0[TI_flags]
558 mov r2, _TIF_DBGWORK_MASK
559 tst r1, r2
560 brne debug_exit_work
561
562 bld r1, TIF_SINGLE_STEP
563 brcc 1f
564 mfdr r4, OCD_DC
565 sbr r4, OCD_DC_SS_BIT
566 mtdr OCD_DC, r4
567
5681: popm r10,r11
569 mask_exceptions
570 mtsr SYSREG_RSR_DBG, r11
571 mtsr SYSREG_RAR_DBG, r10
572#ifdef CONFIG_TRACE_IRQFLAGS
573 rcall trace_hardirqs_on
5741:
575#endif
576 ldmts sp++, r0-lr
577 sub sp, -4
578 retd
579 .size handle_debug, . - handle_debug
580
581 /* Mode of the trapped context is in r9 */
582 .type debug_fixup_regs, @function
583debug_fixup_regs:
584 mfsr r8, SYSREG_SR
585 mov r10, r8
586 bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
587 mtsr SYSREG_SR, r8
5f97f7f9
HS
588 sub pc, -2
589 stdsp sp[REG_LR], lr
2507bc13 590 mtsr SYSREG_SR, r10
5f97f7f9 591 sub pc, -2
2507bc13
HS
592 sub r8, sp, -FRAME_SIZE_FULL
593 stdsp sp[REG_SP], r8
594 rjmp .Ldebug_fixup_cont
595 .size debug_fixup_regs, . - debug_fixup_regs
5f97f7f9 596
2507bc13
HS
597 .type debug_resume_kernel, @function
598debug_resume_kernel:
599 mask_exceptions
5f97f7f9
HS
600 popm r10, r11
601 mtsr SYSREG_RAR_DBG, r10
602 mtsr SYSREG_RSR_DBG, r11
320516b7
HS
603#ifdef CONFIG_TRACE_IRQFLAGS
604 bld r11, SYSREG_GM_OFFSET
605 brcc 1f
606 rcall trace_hardirqs_on
6071:
608#endif
609 mfsr r2, SYSREG_SR
2507bc13
HS
610 mov r1, r2
611 bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
320516b7 612 mtsr SYSREG_SR, r2
5f97f7f9
HS
613 sub pc, -2
614 popm lr
2507bc13 615 mtsr SYSREG_SR, r1
5f97f7f9
HS
616 sub pc, -2
617 sub sp, -4 /* skip SP */
618 popm r0-r12
619 sub sp, -4
620 retd
2507bc13 621 .size debug_resume_kernel, . - debug_resume_kernel
5f97f7f9 622
2507bc13
HS
623 .type debug_exit_work, @function
624debug_exit_work:
5f97f7f9 625 /*
2507bc13
HS
626 * We must return from Monitor Mode using a retd, and we must
627 * not schedule since that involves the D bit in SR getting
628 * cleared by something other than the debug hardware. This
629 * may cause undefined behaviour according to the Architecture
630 * manual.
631 *
632 * So we fix up the return address and status and return to a
633 * stub below in Exception mode. From there, we can follow the
634 * normal exception return path.
635 *
636 * The real return address and status registers are stored on
637 * the stack in the way the exception return path understands,
638 * so no need to fix anything up there.
5f97f7f9 639 */
2507bc13
HS
640 sub r8, pc, . - fault_exit_work
641 mtsr SYSREG_RAR_DBG, r8
642 mov r9, 0
643 orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
644 mtsr SYSREG_RSR_DBG, r9
645 sub pc, -2
5f97f7f9 646 retd
2507bc13 647 .size debug_exit_work, . - debug_exit_work
5f97f7f9
HS
648
649 .set rsr_int0, SYSREG_RSR_INT0
650 .set rsr_int1, SYSREG_RSR_INT1
651 .set rsr_int2, SYSREG_RSR_INT2
652 .set rsr_int3, SYSREG_RSR_INT3
653 .set rar_int0, SYSREG_RAR_INT0
654 .set rar_int1, SYSREG_RAR_INT1
655 .set rar_int2, SYSREG_RAR_INT2
656 .set rar_int3, SYSREG_RAR_INT3
657
658 .macro IRQ_LEVEL level
659 .type irq_level\level, @function
660irq_level\level:
661 sub sp, 4 /* r12_orig */
662 stmts --sp,r0-lr
663 mfsr r8, rar_int\level
664 mfsr r9, rsr_int\level
a7e30b8d
PR
665
666#ifdef CONFIG_PREEMPT
667 sub r11, pc, (. - system_call)
668 cp.w r11, r8
669 breq 4f
670#endif
671
5f97f7f9
HS
672 pushm r8-r9
673
674 mov r11, sp
675 mov r12, \level
676
677 rcall do_IRQ
678
679 lddsp r4, sp[REG_SR]
19b7ce8b
HCE
680 bfextu r4, r4, SYSREG_M0_OFFSET, 3
681 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
682 breq 2f
683 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
5f97f7f9 684#ifdef CONFIG_PREEMPT
19b7ce8b 685 brne 3f
5f97f7f9
HS
686#else
687 brne 1f
688#endif
689
690 get_thread_info r0
691 ld.w r1, r0[TI_flags]
692 andl r1, _TIF_WORK_MASK, COH
693 brne irq_exit_work
694
320516b7
HS
6951:
696#ifdef CONFIG_TRACE_IRQFLAGS
697 rcall trace_hardirqs_on
698#endif
699 popm r8-r9
5f97f7f9
HS
700 mtsr rar_int\level, r8
701 mtsr rsr_int\level, r9
702 ldmts sp++,r0-lr
703 sub sp, -4 /* ignore r12_orig */
704 rete
705
a7e30b8d
PR
706#ifdef CONFIG_PREEMPT
7074: mask_interrupts
708 mfsr r8, rsr_int\level
709 sbr r8, 16
710 mtsr rsr_int\level, r8
711 ldmts sp++, r0-lr
712 sub sp, -4 /* ignore r12_orig */
713 rete
714#endif
715
19b7ce8b
HCE
7162: get_thread_info r0
717 ld.w r1, r0[TI_flags]
718 bld r1, TIF_CPU_GOING_TO_SLEEP
5f97f7f9 719#ifdef CONFIG_PREEMPT
19b7ce8b
HCE
720 brcc 3f
721#else
722 brcc 1b
723#endif
724 sub r1, pc, . - cpu_idle_skip_sleep
725 stdsp sp[REG_PC], r1
726#ifdef CONFIG_PREEMPT
7273: get_thread_info r0
5f97f7f9
HS
728 ld.w r2, r0[TI_preempt_count]
729 cp.w r2, 0
730 brne 1b
731 ld.w r1, r0[TI_flags]
732 bld r1, TIF_NEED_RESCHED
733 brcc 1b
734 lddsp r4, sp[REG_SR]
735 bld r4, SYSREG_GM_OFFSET
736 brcs 1b
737 rcall preempt_schedule_irq
5f97f7f9 738#endif
19b7ce8b 739 rjmp 1b
5f97f7f9
HS
740 .endm
741
742 .section .irq.text,"ax",@progbits
743
744 .global irq_level0
745 .global irq_level1
746 .global irq_level2
747 .global irq_level3
748 IRQ_LEVEL 0
749 IRQ_LEVEL 1
750 IRQ_LEVEL 2
751 IRQ_LEVEL 3
2507bc13
HS
752
753 .section .kprobes.text, "ax", @progbits
754 .type enter_monitor_mode, @function
755enter_monitor_mode:
756 /*
757 * We need to enter monitor mode to do a single step. The
758 * monitor code will alter the return address so that we
759 * return directly to the user instead of returning here.
760 */
761 breakpoint
762 rjmp breakpoint_failed
763
764 .size enter_monitor_mode, . - enter_monitor_mode
765
766 .type debug_trampoline, @function
767 .global debug_trampoline
768debug_trampoline:
769 /*
770 * Save the registers on the stack so that the monitor code
771 * can find them easily.
772 */
773 sub sp, 4 /* r12_orig */
774 stmts --sp, r0-lr
775 get_thread_info r0
776 ld.w r8, r0[TI_rar_saved]
777 ld.w r9, r0[TI_rsr_saved]
778 pushm r8-r9
779
780 /*
781 * The monitor code will alter the return address so we don't
782 * return here.
783 */
784 breakpoint
785 rjmp breakpoint_failed
786 .size debug_trampoline, . - debug_trampoline
787
788 .type breakpoint_failed, @function
789breakpoint_failed:
790 /*
791 * Something went wrong. Perhaps the debug hardware isn't
792 * enabled?
793 */
794 lda.w r12, msg_breakpoint_failed
795 mov r11, sp
796 mov r10, 9 /* SIGKILL */
797 call die
7981: rjmp 1b
799
800msg_breakpoint_failed:
801 .asciz "Failed to enter Debug Mode"