KEYS: Extend TIF_NOTIFY_RESUME to (almost) all architectures [try #6]
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / avr32 / kernel / entry-avr32b.S
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 /*
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
13 */
14 #include <linux/errno.h>
15
16 #include <asm/asm.h>
17 #include <asm/hardirq.h>
18 #include <asm/irq.h>
19 #include <asm/ocd.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
26
27 #ifdef CONFIG_PREEMPT
28 # define preempt_stop mask_interrupts
29 #else
30 # define preempt_stop
31 # define fault_resume_kernel fault_restore_all
32 #endif
33
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
37
38 .section .ex.text,"ax",@progbits
39 .align 2
40 exception_vectors:
41 bral handle_critical
42 .align 2
43 bral handle_critical
44 .align 2
45 bral do_bus_error_write
46 .align 2
47 bral do_bus_error_read
48 .align 2
49 bral do_nmi_ll
50 .align 2
51 bral handle_address_fault
52 .align 2
53 bral handle_protection_fault
54 .align 2
55 bral handle_debug
56 .align 2
57 bral do_illegal_opcode_ll
58 .align 2
59 bral do_illegal_opcode_ll
60 .align 2
61 bral do_illegal_opcode_ll
62 .align 2
63 bral do_fpe_ll
64 .align 2
65 bral do_illegal_opcode_ll
66 .align 2
67 bral handle_address_fault
68 .align 2
69 bral handle_address_fault
70 .align 2
71 bral handle_protection_fault
72 .align 2
73 bral handle_protection_fault
74 .align 2
75 bral do_dtlb_modified
76
77 #define tlbmiss_save pushm r0-r3
78 #define tlbmiss_restore popm r0-r3
79
80 .org 0x50
81 .global itlb_miss
82 itlb_miss:
83 tlbmiss_save
84 rjmp tlb_miss_common
85
86 .org 0x60
87 dtlb_miss_read:
88 tlbmiss_save
89 rjmp tlb_miss_common
90
91 .org 0x70
92 dtlb_miss_write:
93 tlbmiss_save
94
95 .global tlb_miss_common
96 .align 2
97 tlb_miss_common:
98 mfsr r0, SYSREG_TLBEAR
99 mfsr r1, SYSREG_PTBR
100
101 /*
102 * First level lookup: The PGD contains virtual pointers to
103 * the second-level page tables, but they may be NULL if not
104 * present.
105 */
106 pgtbl_lookup:
107 lsr r2, r0, PGDIR_SHIFT
108 ld.w r3, r1[r2 << 2]
109 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
110 cp.w r3, 0
111 breq page_table_not_present
112
113 /* Second level lookup */
114 ld.w r2, r3[r1 << 2]
115 mfsr r0, SYSREG_TLBARLO
116 bld r2, _PAGE_BIT_PRESENT
117 brcc page_not_present
118
119 /* Mark the page as accessed */
120 sbr r2, _PAGE_BIT_ACCESSED
121 st.w r3[r1 << 2], r2
122
123 /* Drop software flags */
124 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
125 mtsr SYSREG_TLBELO, r2
126
127 /* Figure out which entry we want to replace */
128 mfsr r1, SYSREG_MMUCR
129 clz r2, r0
130 brcc 1f
131 mov r3, -1 /* All entries have been accessed, */
132 mov r2, 0 /* so start at 0 */
133 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
134
135 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
136 mtsr SYSREG_MMUCR, r1
137 tlbw
138
139 tlbmiss_restore
140 rete
141
142 /* The slow path of the TLB miss handler */
143 .align 2
144 page_table_not_present:
145 /* Do we need to synchronize with swapper_pg_dir? */
146 bld r0, 31
147 brcs sync_with_swapper_pg_dir
148
149 page_not_present:
150 tlbmiss_restore
151 sub sp, 4
152 stmts --sp, r0-lr
153 call save_full_context_ex
154 mfsr r12, SYSREG_ECR
155 mov r11, sp
156 call do_page_fault
157 rjmp ret_from_exception
158
159 .align 2
160 sync_with_swapper_pg_dir:
161 /*
162 * If swapper_pg_dir contains a non-NULL second-level page
163 * table pointer, copy it into the current PGD. If not, we
164 * must handle it as a full-blown page fault.
165 *
166 * Jumping back to pgtbl_lookup causes an unnecessary lookup,
167 * but it is guaranteed to be a cache hit, it won't happen
168 * very often, and we absolutely do not want to sacrifice any
169 * performance in the fast path in order to improve this.
170 */
171 mov r1, lo(swapper_pg_dir)
172 orh r1, hi(swapper_pg_dir)
173 ld.w r3, r1[r2 << 2]
174 cp.w r3, 0
175 breq page_not_present
176 mfsr r1, SYSREG_PTBR
177 st.w r1[r2 << 2], r3
178 rjmp pgtbl_lookup
179
180 /*
181 * We currently have two bytes left at this point until we
182 * crash into the system call handler...
183 *
184 * Don't worry, the assembler will let us know.
185 */
186
187
188 /* --- System Call --- */
189
190 .org 0x100
191 system_call:
192 #ifdef CONFIG_PREEMPT
193 mask_interrupts
194 #endif
195 pushm r12 /* r12_orig */
196 stmts --sp, r0-lr
197
198 mfsr r0, SYSREG_RAR_SUP
199 mfsr r1, SYSREG_RSR_SUP
200 #ifdef CONFIG_PREEMPT
201 unmask_interrupts
202 #endif
203 zero_fp
204 stm --sp, r0-r1
205
206 /* check for syscall tracing */
207 get_thread_info r0
208 ld.w r1, r0[TI_flags]
209 bld r1, TIF_SYSCALL_TRACE
210 brcs syscall_trace_enter
211
212 syscall_trace_cont:
213 cp.w r8, NR_syscalls
214 brhs syscall_badsys
215
216 lddpc lr, syscall_table_addr
217 ld.w lr, lr[r8 << 2]
218 mov r8, r5 /* 5th argument (6th is pushed by stub) */
219 icall lr
220
221 .global syscall_return
222 syscall_return:
223 get_thread_info r0
224 mask_interrupts /* make sure we don't miss an interrupt
225 setting need_resched or sigpending
226 between sampling and the rets */
227
228 /* Store the return value so that the correct value is loaded below */
229 stdsp sp[REG_R12], r12
230
231 ld.w r1, r0[TI_flags]
232 andl r1, _TIF_ALLWORK_MASK, COH
233 brne syscall_exit_work
234
235 syscall_exit_cont:
236 popm r8-r9
237 mtsr SYSREG_RAR_SUP, r8
238 mtsr SYSREG_RSR_SUP, r9
239 ldmts sp++, r0-lr
240 sub sp, -4 /* r12_orig */
241 rets
242
243 .align 2
244 syscall_table_addr:
245 .long sys_call_table
246
247 syscall_badsys:
248 mov r12, -ENOSYS
249 rjmp syscall_return
250
251 .global ret_from_fork
252 ret_from_fork:
253 call schedule_tail
254
255 /* check for syscall tracing */
256 get_thread_info r0
257 ld.w r1, r0[TI_flags]
258 andl r1, _TIF_ALLWORK_MASK, COH
259 brne syscall_exit_work
260 rjmp syscall_exit_cont
261
262 syscall_trace_enter:
263 pushm r8-r12
264 call syscall_trace
265 popm r8-r12
266 rjmp syscall_trace_cont
267
268 syscall_exit_work:
269 bld r1, TIF_SYSCALL_TRACE
270 brcc 1f
271 unmask_interrupts
272 call syscall_trace
273 mask_interrupts
274 ld.w r1, r0[TI_flags]
275
276 1: bld r1, TIF_NEED_RESCHED
277 brcc 2f
278 unmask_interrupts
279 call schedule
280 mask_interrupts
281 ld.w r1, r0[TI_flags]
282 rjmp 1b
283
284 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
285 tst r1, r2
286 breq 3f
287 unmask_interrupts
288 mov r12, sp
289 mov r11, r0
290 call do_notify_resume
291 mask_interrupts
292 ld.w r1, r0[TI_flags]
293 rjmp 1b
294
295 3: bld r1, TIF_BREAKPOINT
296 brcc syscall_exit_cont
297 rjmp enter_monitor_mode
298
299 /* This function expects to find offending PC in SYSREG_RAR_EX */
300 .type save_full_context_ex, @function
301 .align 2
302 save_full_context_ex:
303 mfsr r11, SYSREG_RAR_EX
304 sub r9, pc, . - debug_trampoline
305 mfsr r8, SYSREG_RSR_EX
306 cp.w r9, r11
307 breq 3f
308 mov r12, r8
309 andh r8, (MODE_MASK >> 16), COH
310 brne 2f
311
312 1: pushm r11, r12 /* PC and SR */
313 unmask_exceptions
314 ret r12
315
316 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
317 stdsp sp[4], r10 /* replace saved SP */
318 rjmp 1b
319
320 /*
321 * The debug handler set up a trampoline to make us
322 * automatically enter monitor mode upon return, but since
323 * we're saving the full context, we must assume that the
324 * exception handler might want to alter the return address
325 * and/or status register. So we need to restore the original
326 * context and enter monitor mode manually after the exception
327 * has been handled.
328 */
329 3: get_thread_info r8
330 ld.w r11, r8[TI_rar_saved]
331 ld.w r12, r8[TI_rsr_saved]
332 rjmp 1b
333 .size save_full_context_ex, . - save_full_context_ex
334
335 /* Low-level exception handlers */
336 handle_critical:
337 /*
338 * AT32AP700x errata:
339 *
340 * After a Java stack overflow or underflow trap, any CPU
341 * memory access may cause erratic behavior. This will happen
342 * when the four least significant bits of the JOSP system
343 * register contains any value between 9 and 15 (inclusive).
344 *
345 * Possible workarounds:
346 * - Don't use the Java Extension Module
347 * - Ensure that the stack overflow and underflow trap
348 * handlers do not do any memory access or trigger any
349 * exceptions before the overflow/underflow condition is
350 * cleared (by incrementing or decrementing the JOSP)
351 * - Make sure that JOSP does not contain any problematic
352 * value before doing any exception or interrupt
353 * processing.
354 * - Set up a critical exception handler which writes a
355 * known-to-be-safe value, e.g. 4, to JOSP before doing
356 * any further processing.
357 *
358 * We'll use the last workaround for now since we cannot
359 * guarantee that user space processes don't use Java mode.
360 * Non-well-behaving userland will be terminated with extreme
361 * prejudice.
362 */
363 #ifdef CONFIG_CPU_AT32AP700X
364 /*
365 * There's a chance we can't touch memory, so temporarily
366 * borrow PTBR to save the stack pointer while we fix things
367 * up...
368 */
369 mtsr SYSREG_PTBR, sp
370 mov sp, 4
371 mtsr SYSREG_JOSP, sp
372 mfsr sp, SYSREG_PTBR
373 sub pc, -2
374
375 /* Push most of pt_regs on stack. We'll do the rest later */
376 sub sp, 4
377 pushm r0-r12
378
379 /* PTBR mirrors current_thread_info()->task->active_mm->pgd */
380 get_thread_info r0
381 ld.w r1, r0[TI_task]
382 ld.w r2, r1[TSK_active_mm]
383 ld.w r3, r2[MM_pgd]
384 mtsr SYSREG_PTBR, r3
385 #else
386 sub sp, 4
387 pushm r0-r12
388 #endif
389 sub r0, sp, -(14 * 4)
390 mov r1, lr
391 mfsr r2, SYSREG_RAR_EX
392 mfsr r3, SYSREG_RSR_EX
393 pushm r0-r3
394
395 mfsr r12, SYSREG_ECR
396 mov r11, sp
397 call do_critical_exception
398
399 /* We should never get here... */
400 bad_return:
401 sub r12, pc, (. - 1f)
402 bral panic
403 .align 2
404 1: .asciz "Return from critical exception!"
405
406 .align 1
407 do_bus_error_write:
408 sub sp, 4
409 stmts --sp, r0-lr
410 call save_full_context_ex
411 mov r11, 1
412 rjmp 1f
413
414 do_bus_error_read:
415 sub sp, 4
416 stmts --sp, r0-lr
417 call save_full_context_ex
418 mov r11, 0
419 1: mfsr r12, SYSREG_BEAR
420 mov r10, sp
421 call do_bus_error
422 rjmp ret_from_exception
423
424 .align 1
425 do_nmi_ll:
426 sub sp, 4
427 stmts --sp, r0-lr
428 mfsr r9, SYSREG_RSR_NMI
429 mfsr r8, SYSREG_RAR_NMI
430 bfextu r0, r9, MODE_SHIFT, 3
431 brne 2f
432
433 1: pushm r8, r9 /* PC and SR */
434 mfsr r12, SYSREG_ECR
435 mov r11, sp
436 call do_nmi
437 popm r8-r9
438 mtsr SYSREG_RAR_NMI, r8
439 tst r0, r0
440 mtsr SYSREG_RSR_NMI, r9
441 brne 3f
442
443 ldmts sp++, r0-lr
444 sub sp, -4 /* skip r12_orig */
445 rete
446
447 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
448 stdsp sp[4], r10 /* replace saved SP */
449 rjmp 1b
450
451 3: popm lr
452 sub sp, -4 /* skip sp */
453 popm r0-r12
454 sub sp, -4 /* skip r12_orig */
455 rete
456
457 handle_address_fault:
458 sub sp, 4
459 stmts --sp, r0-lr
460 call save_full_context_ex
461 mfsr r12, SYSREG_ECR
462 mov r11, sp
463 call do_address_exception
464 rjmp ret_from_exception
465
466 handle_protection_fault:
467 sub sp, 4
468 stmts --sp, r0-lr
469 call save_full_context_ex
470 mfsr r12, SYSREG_ECR
471 mov r11, sp
472 call do_page_fault
473 rjmp ret_from_exception
474
475 .align 1
476 do_illegal_opcode_ll:
477 sub sp, 4
478 stmts --sp, r0-lr
479 call save_full_context_ex
480 mfsr r12, SYSREG_ECR
481 mov r11, sp
482 call do_illegal_opcode
483 rjmp ret_from_exception
484
485 do_dtlb_modified:
486 pushm r0-r3
487 mfsr r1, SYSREG_TLBEAR
488 mfsr r0, SYSREG_PTBR
489 lsr r2, r1, PGDIR_SHIFT
490 ld.w r0, r0[r2 << 2]
491 lsl r1, (32 - PGDIR_SHIFT)
492 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
493
494 /* Translate to virtual address in P1 */
495 andl r0, 0xf000
496 sbr r0, 31
497 add r2, r0, r1 << 2
498 ld.w r3, r2[0]
499 sbr r3, _PAGE_BIT_DIRTY
500 mov r0, r3
501 st.w r2[0], r3
502
503 /* The page table is up-to-date. Update the TLB entry as well */
504 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
505 mtsr SYSREG_TLBELO, r0
506
507 /* MMUCR[DRP] is updated automatically, so let's go... */
508 tlbw
509
510 popm r0-r3
511 rete
512
513 do_fpe_ll:
514 sub sp, 4
515 stmts --sp, r0-lr
516 call save_full_context_ex
517 unmask_interrupts
518 mov r12, 26
519 mov r11, sp
520 call do_fpe
521 rjmp ret_from_exception
522
523 ret_from_exception:
524 mask_interrupts
525 lddsp r4, sp[REG_SR]
526
527 andh r4, (MODE_MASK >> 16), COH
528 brne fault_resume_kernel
529
530 get_thread_info r0
531 ld.w r1, r0[TI_flags]
532 andl r1, _TIF_WORK_MASK, COH
533 brne fault_exit_work
534
535 fault_resume_user:
536 popm r8-r9
537 mask_exceptions
538 mtsr SYSREG_RAR_EX, r8
539 mtsr SYSREG_RSR_EX, r9
540 ldmts sp++, r0-lr
541 sub sp, -4
542 rete
543
544 fault_resume_kernel:
545 #ifdef CONFIG_PREEMPT
546 get_thread_info r0
547 ld.w r2, r0[TI_preempt_count]
548 cp.w r2, 0
549 brne 1f
550 ld.w r1, r0[TI_flags]
551 bld r1, TIF_NEED_RESCHED
552 brcc 1f
553 lddsp r4, sp[REG_SR]
554 bld r4, SYSREG_GM_OFFSET
555 brcs 1f
556 call preempt_schedule_irq
557 1:
558 #endif
559
560 popm r8-r9
561 mask_exceptions
562 mfsr r1, SYSREG_SR
563 mtsr SYSREG_RAR_EX, r8
564 mtsr SYSREG_RSR_EX, r9
565 popm lr
566 sub sp, -4 /* ignore SP */
567 popm r0-r12
568 sub sp, -4 /* ignore r12_orig */
569 rete
570
571 irq_exit_work:
572 /* Switch to exception mode so that we can share the same code. */
573 mfsr r8, SYSREG_SR
574 cbr r8, SYSREG_M0_OFFSET
575 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
576 mtsr SYSREG_SR, r8
577 sub pc, -2
578 get_thread_info r0
579 ld.w r1, r0[TI_flags]
580
581 fault_exit_work:
582 bld r1, TIF_NEED_RESCHED
583 brcc 1f
584 unmask_interrupts
585 call schedule
586 mask_interrupts
587 ld.w r1, r0[TI_flags]
588 rjmp fault_exit_work
589
590 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
591 tst r1, r2
592 breq 2f
593 unmask_interrupts
594 mov r12, sp
595 mov r11, r0
596 call do_notify_resume
597 mask_interrupts
598 ld.w r1, r0[TI_flags]
599 rjmp fault_exit_work
600
601 2: bld r1, TIF_BREAKPOINT
602 brcc fault_resume_user
603 rjmp enter_monitor_mode
604
605 .section .kprobes.text, "ax", @progbits
606 .type handle_debug, @function
607 handle_debug:
608 sub sp, 4 /* r12_orig */
609 stmts --sp, r0-lr
610 mfsr r8, SYSREG_RAR_DBG
611 mfsr r9, SYSREG_RSR_DBG
612 unmask_exceptions
613 pushm r8-r9
614 bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
615 brne debug_fixup_regs
616
617 .Ldebug_fixup_cont:
618 #ifdef CONFIG_TRACE_IRQFLAGS
619 call trace_hardirqs_off
620 #endif
621 mov r12, sp
622 call do_debug
623 mov sp, r12
624
625 lddsp r2, sp[REG_SR]
626 bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
627 brne debug_resume_kernel
628
629 get_thread_info r0
630 ld.w r1, r0[TI_flags]
631 mov r2, _TIF_DBGWORK_MASK
632 tst r1, r2
633 brne debug_exit_work
634
635 bld r1, TIF_SINGLE_STEP
636 brcc 1f
637 mfdr r4, OCD_DC
638 sbr r4, OCD_DC_SS_BIT
639 mtdr OCD_DC, r4
640
641 1: popm r10,r11
642 mask_exceptions
643 mtsr SYSREG_RSR_DBG, r11
644 mtsr SYSREG_RAR_DBG, r10
645 #ifdef CONFIG_TRACE_IRQFLAGS
646 call trace_hardirqs_on
647 1:
648 #endif
649 ldmts sp++, r0-lr
650 sub sp, -4
651 retd
652 .size handle_debug, . - handle_debug
653
654 /* Mode of the trapped context is in r9 */
655 .type debug_fixup_regs, @function
656 debug_fixup_regs:
657 mfsr r8, SYSREG_SR
658 mov r10, r8
659 bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
660 mtsr SYSREG_SR, r8
661 sub pc, -2
662 stdsp sp[REG_LR], lr
663 mtsr SYSREG_SR, r10
664 sub pc, -2
665 sub r8, sp, -FRAME_SIZE_FULL
666 stdsp sp[REG_SP], r8
667 rjmp .Ldebug_fixup_cont
668 .size debug_fixup_regs, . - debug_fixup_regs
669
670 .type debug_resume_kernel, @function
671 debug_resume_kernel:
672 mask_exceptions
673 popm r10, r11
674 mtsr SYSREG_RAR_DBG, r10
675 mtsr SYSREG_RSR_DBG, r11
676 #ifdef CONFIG_TRACE_IRQFLAGS
677 bld r11, SYSREG_GM_OFFSET
678 brcc 1f
679 call trace_hardirqs_on
680 1:
681 #endif
682 mfsr r2, SYSREG_SR
683 mov r1, r2
684 bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
685 mtsr SYSREG_SR, r2
686 sub pc, -2
687 popm lr
688 mtsr SYSREG_SR, r1
689 sub pc, -2
690 sub sp, -4 /* skip SP */
691 popm r0-r12
692 sub sp, -4
693 retd
694 .size debug_resume_kernel, . - debug_resume_kernel
695
696 .type debug_exit_work, @function
697 debug_exit_work:
698 /*
699 * We must return from Monitor Mode using a retd, and we must
700 * not schedule since that involves the D bit in SR getting
701 * cleared by something other than the debug hardware. This
702 * may cause undefined behaviour according to the Architecture
703 * manual.
704 *
705 * So we fix up the return address and status and return to a
706 * stub below in Exception mode. From there, we can follow the
707 * normal exception return path.
708 *
709 * The real return address and status registers are stored on
710 * the stack in the way the exception return path understands,
711 * so no need to fix anything up there.
712 */
713 sub r8, pc, . - fault_exit_work
714 mtsr SYSREG_RAR_DBG, r8
715 mov r9, 0
716 orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
717 mtsr SYSREG_RSR_DBG, r9
718 sub pc, -2
719 retd
720 .size debug_exit_work, . - debug_exit_work
721
722 .set rsr_int0, SYSREG_RSR_INT0
723 .set rsr_int1, SYSREG_RSR_INT1
724 .set rsr_int2, SYSREG_RSR_INT2
725 .set rsr_int3, SYSREG_RSR_INT3
726 .set rar_int0, SYSREG_RAR_INT0
727 .set rar_int1, SYSREG_RAR_INT1
728 .set rar_int2, SYSREG_RAR_INT2
729 .set rar_int3, SYSREG_RAR_INT3
730
731 .macro IRQ_LEVEL level
732 .type irq_level\level, @function
733 irq_level\level:
734 sub sp, 4 /* r12_orig */
735 stmts --sp,r0-lr
736 mfsr r8, rar_int\level
737 mfsr r9, rsr_int\level
738
739 #ifdef CONFIG_PREEMPT
740 sub r11, pc, (. - system_call)
741 cp.w r11, r8
742 breq 4f
743 #endif
744
745 pushm r8-r9
746
747 mov r11, sp
748 mov r12, \level
749
750 call do_IRQ
751
752 lddsp r4, sp[REG_SR]
753 bfextu r4, r4, SYSREG_M0_OFFSET, 3
754 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
755 breq 2f
756 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
757 #ifdef CONFIG_PREEMPT
758 brne 3f
759 #else
760 brne 1f
761 #endif
762
763 get_thread_info r0
764 ld.w r1, r0[TI_flags]
765 andl r1, _TIF_WORK_MASK, COH
766 brne irq_exit_work
767
768 1:
769 #ifdef CONFIG_TRACE_IRQFLAGS
770 call trace_hardirqs_on
771 #endif
772 popm r8-r9
773 mtsr rar_int\level, r8
774 mtsr rsr_int\level, r9
775 ldmts sp++,r0-lr
776 sub sp, -4 /* ignore r12_orig */
777 rete
778
779 #ifdef CONFIG_PREEMPT
780 4: mask_interrupts
781 mfsr r8, rsr_int\level
782 sbr r8, 16
783 mtsr rsr_int\level, r8
784 ldmts sp++, r0-lr
785 sub sp, -4 /* ignore r12_orig */
786 rete
787 #endif
788
789 2: get_thread_info r0
790 ld.w r1, r0[TI_flags]
791 bld r1, TIF_CPU_GOING_TO_SLEEP
792 #ifdef CONFIG_PREEMPT
793 brcc 3f
794 #else
795 brcc 1b
796 #endif
797 sub r1, pc, . - cpu_idle_skip_sleep
798 stdsp sp[REG_PC], r1
799 #ifdef CONFIG_PREEMPT
800 3: get_thread_info r0
801 ld.w r2, r0[TI_preempt_count]
802 cp.w r2, 0
803 brne 1b
804 ld.w r1, r0[TI_flags]
805 bld r1, TIF_NEED_RESCHED
806 brcc 1b
807 lddsp r4, sp[REG_SR]
808 bld r4, SYSREG_GM_OFFSET
809 brcs 1b
810 call preempt_schedule_irq
811 #endif
812 rjmp 1b
813 .endm
814
815 .section .irq.text,"ax",@progbits
816
817 .global irq_level0
818 .global irq_level1
819 .global irq_level2
820 .global irq_level3
821 IRQ_LEVEL 0
822 IRQ_LEVEL 1
823 IRQ_LEVEL 2
824 IRQ_LEVEL 3
825
826 .section .kprobes.text, "ax", @progbits
827 .type enter_monitor_mode, @function
828 enter_monitor_mode:
829 /*
830 * We need to enter monitor mode to do a single step. The
831 * monitor code will alter the return address so that we
832 * return directly to the user instead of returning here.
833 */
834 breakpoint
835 rjmp breakpoint_failed
836
837 .size enter_monitor_mode, . - enter_monitor_mode
838
839 .type debug_trampoline, @function
840 .global debug_trampoline
841 debug_trampoline:
842 /*
843 * Save the registers on the stack so that the monitor code
844 * can find them easily.
845 */
846 sub sp, 4 /* r12_orig */
847 stmts --sp, r0-lr
848 get_thread_info r0
849 ld.w r8, r0[TI_rar_saved]
850 ld.w r9, r0[TI_rsr_saved]
851 pushm r8-r9
852
853 /*
854 * The monitor code will alter the return address so we don't
855 * return here.
856 */
857 breakpoint
858 rjmp breakpoint_failed
859 .size debug_trampoline, . - debug_trampoline
860
861 .type breakpoint_failed, @function
862 breakpoint_failed:
863 /*
864 * Something went wrong. Perhaps the debug hardware isn't
865 * enabled?
866 */
867 lda.w r12, msg_breakpoint_failed
868 mov r11, sp
869 mov r10, 9 /* SIGKILL */
870 call die
871 1: rjmp 1b
872
873 msg_breakpoint_failed:
874 .asciz "Failed to enter Debug Mode"