drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / avr32 / kernel / entry-avr32b.S
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9 /*
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
13 */
14 #include <linux/errno.h>
15
16 #include <asm/asm.h>
17 #include <asm/hardirq.h>
18 #include <asm/irq.h>
19 #include <asm/ocd.h>
20 #include <asm/page.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
26
27 #ifdef CONFIG_PREEMPT
28 # define preempt_stop mask_interrupts
29 #else
30 # define preempt_stop
31 # define fault_resume_kernel fault_restore_all
32 #endif
33
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
37
38 .section .ex.text,"ax",@progbits
39 .align 2
40 exception_vectors:
41 bral handle_critical
42 .align 2
43 bral handle_critical
44 .align 2
45 bral do_bus_error_write
46 .align 2
47 bral do_bus_error_read
48 .align 2
49 bral do_nmi_ll
50 .align 2
51 bral handle_address_fault
52 .align 2
53 bral handle_protection_fault
54 .align 2
55 bral handle_debug
56 .align 2
57 bral do_illegal_opcode_ll
58 .align 2
59 bral do_illegal_opcode_ll
60 .align 2
61 bral do_illegal_opcode_ll
62 .align 2
63 bral do_fpe_ll
64 .align 2
65 bral do_illegal_opcode_ll
66 .align 2
67 bral handle_address_fault
68 .align 2
69 bral handle_address_fault
70 .align 2
71 bral handle_protection_fault
72 .align 2
73 bral handle_protection_fault
74 .align 2
75 bral do_dtlb_modified
76
77 #define tlbmiss_save pushm r0-r3
78 #define tlbmiss_restore popm r0-r3
79
80 .org 0x50
81 .global itlb_miss
82 itlb_miss:
83 tlbmiss_save
84 rjmp tlb_miss_common
85
86 .org 0x60
87 dtlb_miss_read:
88 tlbmiss_save
89 rjmp tlb_miss_common
90
91 .org 0x70
92 dtlb_miss_write:
93 tlbmiss_save
94
95 .global tlb_miss_common
96 .align 2
97 tlb_miss_common:
98 mfsr r0, SYSREG_TLBEAR
99 mfsr r1, SYSREG_PTBR
100
101 /*
102 * First level lookup: The PGD contains virtual pointers to
103 * the second-level page tables, but they may be NULL if not
104 * present.
105 */
106 pgtbl_lookup:
107 lsr r2, r0, PGDIR_SHIFT
108 ld.w r3, r1[r2 << 2]
109 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
110 cp.w r3, 0
111 breq page_table_not_present
112
113 /* Second level lookup */
114 ld.w r2, r3[r1 << 2]
115 mfsr r0, SYSREG_TLBARLO
116 bld r2, _PAGE_BIT_PRESENT
117 brcc page_not_present
118
119 /* Mark the page as accessed */
120 sbr r2, _PAGE_BIT_ACCESSED
121 st.w r3[r1 << 2], r2
122
123 /* Drop software flags */
124 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
125 mtsr SYSREG_TLBELO, r2
126
127 /* Figure out which entry we want to replace */
128 mfsr r1, SYSREG_MMUCR
129 clz r2, r0
130 brcc 1f
131 mov r3, -1 /* All entries have been accessed, */
132 mov r2, 0 /* so start at 0 */
133 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
134
135 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
136 mtsr SYSREG_MMUCR, r1
137 tlbw
138
139 tlbmiss_restore
140 rete
141
142 /* The slow path of the TLB miss handler */
143 .align 2
144 page_table_not_present:
145 /* Do we need to synchronize with swapper_pg_dir? */
146 bld r0, 31
147 brcs sync_with_swapper_pg_dir
148
149 page_not_present:
150 tlbmiss_restore
151 sub sp, 4
152 stmts --sp, r0-lr
153 call save_full_context_ex
154 mfsr r12, SYSREG_ECR
155 mov r11, sp
156 call do_page_fault
157 rjmp ret_from_exception
158
159 .align 2
160 sync_with_swapper_pg_dir:
161 /*
162 * If swapper_pg_dir contains a non-NULL second-level page
163 * table pointer, copy it into the current PGD. If not, we
164 * must handle it as a full-blown page fault.
165 *
166 * Jumping back to pgtbl_lookup causes an unnecessary lookup,
167 * but it is guaranteed to be a cache hit, it won't happen
168 * very often, and we absolutely do not want to sacrifice any
169 * performance in the fast path in order to improve this.
170 */
171 mov r1, lo(swapper_pg_dir)
172 orh r1, hi(swapper_pg_dir)
173 ld.w r3, r1[r2 << 2]
174 cp.w r3, 0
175 breq page_not_present
176 mfsr r1, SYSREG_PTBR
177 st.w r1[r2 << 2], r3
178 rjmp pgtbl_lookup
179
180 /*
181 * We currently have two bytes left at this point until we
182 * crash into the system call handler...
183 *
184 * Don't worry, the assembler will let us know.
185 */
186
187
188 /* --- System Call --- */
189
190 .org 0x100
191 system_call:
192 #ifdef CONFIG_PREEMPT
193 mask_interrupts
194 #endif
195 pushm r12 /* r12_orig */
196 stmts --sp, r0-lr
197
198 mfsr r0, SYSREG_RAR_SUP
199 mfsr r1, SYSREG_RSR_SUP
200 #ifdef CONFIG_PREEMPT
201 unmask_interrupts
202 #endif
203 zero_fp
204 stm --sp, r0-r1
205
206 /* check for syscall tracing */
207 get_thread_info r0
208 ld.w r1, r0[TI_flags]
209 bld r1, TIF_SYSCALL_TRACE
210 brcs syscall_trace_enter
211
212 syscall_trace_cont:
213 cp.w r8, NR_syscalls
214 brhs syscall_badsys
215
216 lddpc lr, syscall_table_addr
217 ld.w lr, lr[r8 << 2]
218 mov r8, r5 /* 5th argument (6th is pushed by stub) */
219 icall lr
220
221 .global syscall_return
222 syscall_return:
223 get_thread_info r0
224 mask_interrupts /* make sure we don't miss an interrupt
225 setting need_resched or sigpending
226 between sampling and the rets */
227
228 /* Store the return value so that the correct value is loaded below */
229 stdsp sp[REG_R12], r12
230
231 ld.w r1, r0[TI_flags]
232 andl r1, _TIF_ALLWORK_MASK, COH
233 brne syscall_exit_work
234
235 syscall_exit_cont:
236 popm r8-r9
237 mtsr SYSREG_RAR_SUP, r8
238 mtsr SYSREG_RSR_SUP, r9
239 ldmts sp++, r0-lr
240 sub sp, -4 /* r12_orig */
241 rets
242
243 .align 2
244 syscall_table_addr:
245 .long sys_call_table
246
247 syscall_badsys:
248 mov r12, -ENOSYS
249 rjmp syscall_return
250
251 .global ret_from_fork
252 ret_from_fork:
253 call schedule_tail
254 mov r12, 0
255 rjmp syscall_return
256
257 .global ret_from_kernel_thread
258 ret_from_kernel_thread:
259 call schedule_tail
260 mov r12, r0
261 mov lr, r2 /* syscall_return */
262 mov pc, r1
263
264 syscall_trace_enter:
265 pushm r8-r12
266 call syscall_trace
267 popm r8-r12
268 rjmp syscall_trace_cont
269
270 syscall_exit_work:
271 bld r1, TIF_SYSCALL_TRACE
272 brcc 1f
273 unmask_interrupts
274 call syscall_trace
275 mask_interrupts
276 ld.w r1, r0[TI_flags]
277
278 1: bld r1, TIF_NEED_RESCHED
279 brcc 2f
280 unmask_interrupts
281 call schedule
282 mask_interrupts
283 ld.w r1, r0[TI_flags]
284 rjmp 1b
285
286 2: mov r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
287 tst r1, r2
288 breq 3f
289 unmask_interrupts
290 mov r12, sp
291 mov r11, r0
292 call do_notify_resume
293 mask_interrupts
294 ld.w r1, r0[TI_flags]
295 rjmp 1b
296
297 3: bld r1, TIF_BREAKPOINT
298 brcc syscall_exit_cont
299 rjmp enter_monitor_mode
300
301 /* This function expects to find offending PC in SYSREG_RAR_EX */
302 .type save_full_context_ex, @function
303 .align 2
304 save_full_context_ex:
305 mfsr r11, SYSREG_RAR_EX
306 sub r9, pc, . - debug_trampoline
307 mfsr r8, SYSREG_RSR_EX
308 cp.w r9, r11
309 breq 3f
310 mov r12, r8
311 andh r8, (MODE_MASK >> 16), COH
312 brne 2f
313
314 1: pushm r11, r12 /* PC and SR */
315 unmask_exceptions
316 ret r12
317
318 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
319 stdsp sp[4], r10 /* replace saved SP */
320 rjmp 1b
321
322 /*
323 * The debug handler set up a trampoline to make us
324 * automatically enter monitor mode upon return, but since
325 * we're saving the full context, we must assume that the
326 * exception handler might want to alter the return address
327 * and/or status register. So we need to restore the original
328 * context and enter monitor mode manually after the exception
329 * has been handled.
330 */
331 3: get_thread_info r8
332 ld.w r11, r8[TI_rar_saved]
333 ld.w r12, r8[TI_rsr_saved]
334 rjmp 1b
335 .size save_full_context_ex, . - save_full_context_ex
336
337 /* Low-level exception handlers */
338 handle_critical:
339 /*
340 * AT32AP700x errata:
341 *
342 * After a Java stack overflow or underflow trap, any CPU
343 * memory access may cause erratic behavior. This will happen
344 * when the four least significant bits of the JOSP system
345 * register contains any value between 9 and 15 (inclusive).
346 *
347 * Possible workarounds:
348 * - Don't use the Java Extension Module
349 * - Ensure that the stack overflow and underflow trap
350 * handlers do not do any memory access or trigger any
351 * exceptions before the overflow/underflow condition is
352 * cleared (by incrementing or decrementing the JOSP)
353 * - Make sure that JOSP does not contain any problematic
354 * value before doing any exception or interrupt
355 * processing.
356 * - Set up a critical exception handler which writes a
357 * known-to-be-safe value, e.g. 4, to JOSP before doing
358 * any further processing.
359 *
360 * We'll use the last workaround for now since we cannot
361 * guarantee that user space processes don't use Java mode.
362 * Non-well-behaving userland will be terminated with extreme
363 * prejudice.
364 */
365 #ifdef CONFIG_CPU_AT32AP700X
366 /*
367 * There's a chance we can't touch memory, so temporarily
368 * borrow PTBR to save the stack pointer while we fix things
369 * up...
370 */
371 mtsr SYSREG_PTBR, sp
372 mov sp, 4
373 mtsr SYSREG_JOSP, sp
374 mfsr sp, SYSREG_PTBR
375 sub pc, -2
376
377 /* Push most of pt_regs on stack. We'll do the rest later */
378 sub sp, 4
379 pushm r0-r12
380
381 /* PTBR mirrors current_thread_info()->task->active_mm->pgd */
382 get_thread_info r0
383 ld.w r1, r0[TI_task]
384 ld.w r2, r1[TSK_active_mm]
385 ld.w r3, r2[MM_pgd]
386 mtsr SYSREG_PTBR, r3
387 #else
388 sub sp, 4
389 pushm r0-r12
390 #endif
391 sub r0, sp, -(14 * 4)
392 mov r1, lr
393 mfsr r2, SYSREG_RAR_EX
394 mfsr r3, SYSREG_RSR_EX
395 pushm r0-r3
396
397 mfsr r12, SYSREG_ECR
398 mov r11, sp
399 call do_critical_exception
400
401 /* We should never get here... */
402 bad_return:
403 sub r12, pc, (. - 1f)
404 lddpc pc, 2f
405 .align 2
406 1: .asciz "Return from critical exception!"
407 2: .long panic
408
409 .align 1
410 do_bus_error_write:
411 sub sp, 4
412 stmts --sp, r0-lr
413 call save_full_context_ex
414 mov r11, 1
415 rjmp 1f
416
417 do_bus_error_read:
418 sub sp, 4
419 stmts --sp, r0-lr
420 call save_full_context_ex
421 mov r11, 0
422 1: mfsr r12, SYSREG_BEAR
423 mov r10, sp
424 call do_bus_error
425 rjmp ret_from_exception
426
427 .align 1
428 do_nmi_ll:
429 sub sp, 4
430 stmts --sp, r0-lr
431 mfsr r9, SYSREG_RSR_NMI
432 mfsr r8, SYSREG_RAR_NMI
433 bfextu r0, r9, MODE_SHIFT, 3
434 brne 2f
435
436 1: pushm r8, r9 /* PC and SR */
437 mfsr r12, SYSREG_ECR
438 mov r11, sp
439 call do_nmi
440 popm r8-r9
441 mtsr SYSREG_RAR_NMI, r8
442 tst r0, r0
443 mtsr SYSREG_RSR_NMI, r9
444 brne 3f
445
446 ldmts sp++, r0-lr
447 sub sp, -4 /* skip r12_orig */
448 rete
449
450 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
451 stdsp sp[4], r10 /* replace saved SP */
452 rjmp 1b
453
454 3: popm lr
455 sub sp, -4 /* skip sp */
456 popm r0-r12
457 sub sp, -4 /* skip r12_orig */
458 rete
459
460 handle_address_fault:
461 sub sp, 4
462 stmts --sp, r0-lr
463 call save_full_context_ex
464 mfsr r12, SYSREG_ECR
465 mov r11, sp
466 call do_address_exception
467 rjmp ret_from_exception
468
469 handle_protection_fault:
470 sub sp, 4
471 stmts --sp, r0-lr
472 call save_full_context_ex
473 mfsr r12, SYSREG_ECR
474 mov r11, sp
475 call do_page_fault
476 rjmp ret_from_exception
477
478 .align 1
479 do_illegal_opcode_ll:
480 sub sp, 4
481 stmts --sp, r0-lr
482 call save_full_context_ex
483 mfsr r12, SYSREG_ECR
484 mov r11, sp
485 call do_illegal_opcode
486 rjmp ret_from_exception
487
488 do_dtlb_modified:
489 pushm r0-r3
490 mfsr r1, SYSREG_TLBEAR
491 mfsr r0, SYSREG_PTBR
492 lsr r2, r1, PGDIR_SHIFT
493 ld.w r0, r0[r2 << 2]
494 lsl r1, (32 - PGDIR_SHIFT)
495 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
496
497 /* Translate to virtual address in P1 */
498 andl r0, 0xf000
499 sbr r0, 31
500 add r2, r0, r1 << 2
501 ld.w r3, r2[0]
502 sbr r3, _PAGE_BIT_DIRTY
503 mov r0, r3
504 st.w r2[0], r3
505
506 /* The page table is up-to-date. Update the TLB entry as well */
507 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
508 mtsr SYSREG_TLBELO, r0
509
510 /* MMUCR[DRP] is updated automatically, so let's go... */
511 tlbw
512
513 popm r0-r3
514 rete
515
516 do_fpe_ll:
517 sub sp, 4
518 stmts --sp, r0-lr
519 call save_full_context_ex
520 unmask_interrupts
521 mov r12, 26
522 mov r11, sp
523 call do_fpe
524 rjmp ret_from_exception
525
526 ret_from_exception:
527 mask_interrupts
528 lddsp r4, sp[REG_SR]
529
530 andh r4, (MODE_MASK >> 16), COH
531 brne fault_resume_kernel
532
533 get_thread_info r0
534 ld.w r1, r0[TI_flags]
535 andl r1, _TIF_WORK_MASK, COH
536 brne fault_exit_work
537
538 fault_resume_user:
539 popm r8-r9
540 mask_exceptions
541 mtsr SYSREG_RAR_EX, r8
542 mtsr SYSREG_RSR_EX, r9
543 ldmts sp++, r0-lr
544 sub sp, -4
545 rete
546
547 fault_resume_kernel:
548 #ifdef CONFIG_PREEMPT
549 get_thread_info r0
550 ld.w r2, r0[TI_preempt_count]
551 cp.w r2, 0
552 brne 1f
553 ld.w r1, r0[TI_flags]
554 bld r1, TIF_NEED_RESCHED
555 brcc 1f
556 lddsp r4, sp[REG_SR]
557 bld r4, SYSREG_GM_OFFSET
558 brcs 1f
559 call preempt_schedule_irq
560 1:
561 #endif
562
563 popm r8-r9
564 mask_exceptions
565 mfsr r1, SYSREG_SR
566 mtsr SYSREG_RAR_EX, r8
567 mtsr SYSREG_RSR_EX, r9
568 popm lr
569 sub sp, -4 /* ignore SP */
570 popm r0-r12
571 sub sp, -4 /* ignore r12_orig */
572 rete
573
574 irq_exit_work:
575 /* Switch to exception mode so that we can share the same code. */
576 mfsr r8, SYSREG_SR
577 cbr r8, SYSREG_M0_OFFSET
578 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
579 mtsr SYSREG_SR, r8
580 sub pc, -2
581 get_thread_info r0
582 ld.w r1, r0[TI_flags]
583
584 fault_exit_work:
585 bld r1, TIF_NEED_RESCHED
586 brcc 1f
587 unmask_interrupts
588 call schedule
589 mask_interrupts
590 ld.w r1, r0[TI_flags]
591 rjmp fault_exit_work
592
593 1: mov r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
594 tst r1, r2
595 breq 2f
596 unmask_interrupts
597 mov r12, sp
598 mov r11, r0
599 call do_notify_resume
600 mask_interrupts
601 ld.w r1, r0[TI_flags]
602 rjmp fault_exit_work
603
604 2: bld r1, TIF_BREAKPOINT
605 brcc fault_resume_user
606 rjmp enter_monitor_mode
607
608 .section .kprobes.text, "ax", @progbits
609 .type handle_debug, @function
610 handle_debug:
611 sub sp, 4 /* r12_orig */
612 stmts --sp, r0-lr
613 mfsr r8, SYSREG_RAR_DBG
614 mfsr r9, SYSREG_RSR_DBG
615 unmask_exceptions
616 pushm r8-r9
617 bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
618 brne debug_fixup_regs
619
620 .Ldebug_fixup_cont:
621 #ifdef CONFIG_TRACE_IRQFLAGS
622 call trace_hardirqs_off
623 #endif
624 mov r12, sp
625 call do_debug
626 mov sp, r12
627
628 lddsp r2, sp[REG_SR]
629 bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
630 brne debug_resume_kernel
631
632 get_thread_info r0
633 ld.w r1, r0[TI_flags]
634 mov r2, _TIF_DBGWORK_MASK
635 tst r1, r2
636 brne debug_exit_work
637
638 bld r1, TIF_SINGLE_STEP
639 brcc 1f
640 mfdr r4, OCD_DC
641 sbr r4, OCD_DC_SS_BIT
642 mtdr OCD_DC, r4
643
644 1: popm r10,r11
645 mask_exceptions
646 mtsr SYSREG_RSR_DBG, r11
647 mtsr SYSREG_RAR_DBG, r10
648 #ifdef CONFIG_TRACE_IRQFLAGS
649 call trace_hardirqs_on
650 1:
651 #endif
652 ldmts sp++, r0-lr
653 sub sp, -4
654 retd
655 .size handle_debug, . - handle_debug
656
657 /* Mode of the trapped context is in r9 */
658 .type debug_fixup_regs, @function
659 debug_fixup_regs:
660 mfsr r8, SYSREG_SR
661 mov r10, r8
662 bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
663 mtsr SYSREG_SR, r8
664 sub pc, -2
665 stdsp sp[REG_LR], lr
666 mtsr SYSREG_SR, r10
667 sub pc, -2
668 sub r8, sp, -FRAME_SIZE_FULL
669 stdsp sp[REG_SP], r8
670 rjmp .Ldebug_fixup_cont
671 .size debug_fixup_regs, . - debug_fixup_regs
672
673 .type debug_resume_kernel, @function
674 debug_resume_kernel:
675 mask_exceptions
676 popm r10, r11
677 mtsr SYSREG_RAR_DBG, r10
678 mtsr SYSREG_RSR_DBG, r11
679 #ifdef CONFIG_TRACE_IRQFLAGS
680 bld r11, SYSREG_GM_OFFSET
681 brcc 1f
682 call trace_hardirqs_on
683 1:
684 #endif
685 mfsr r2, SYSREG_SR
686 mov r1, r2
687 bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
688 mtsr SYSREG_SR, r2
689 sub pc, -2
690 popm lr
691 mtsr SYSREG_SR, r1
692 sub pc, -2
693 sub sp, -4 /* skip SP */
694 popm r0-r12
695 sub sp, -4
696 retd
697 .size debug_resume_kernel, . - debug_resume_kernel
698
699 .type debug_exit_work, @function
700 debug_exit_work:
701 /*
702 * We must return from Monitor Mode using a retd, and we must
703 * not schedule since that involves the D bit in SR getting
704 * cleared by something other than the debug hardware. This
705 * may cause undefined behaviour according to the Architecture
706 * manual.
707 *
708 * So we fix up the return address and status and return to a
709 * stub below in Exception mode. From there, we can follow the
710 * normal exception return path.
711 *
712 * The real return address and status registers are stored on
713 * the stack in the way the exception return path understands,
714 * so no need to fix anything up there.
715 */
716 sub r8, pc, . - fault_exit_work
717 mtsr SYSREG_RAR_DBG, r8
718 mov r9, 0
719 orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
720 mtsr SYSREG_RSR_DBG, r9
721 sub pc, -2
722 retd
723 .size debug_exit_work, . - debug_exit_work
724
725 .set rsr_int0, SYSREG_RSR_INT0
726 .set rsr_int1, SYSREG_RSR_INT1
727 .set rsr_int2, SYSREG_RSR_INT2
728 .set rsr_int3, SYSREG_RSR_INT3
729 .set rar_int0, SYSREG_RAR_INT0
730 .set rar_int1, SYSREG_RAR_INT1
731 .set rar_int2, SYSREG_RAR_INT2
732 .set rar_int3, SYSREG_RAR_INT3
733
734 .macro IRQ_LEVEL level
735 .type irq_level\level, @function
736 irq_level\level:
737 sub sp, 4 /* r12_orig */
738 stmts --sp,r0-lr
739 mfsr r8, rar_int\level
740 mfsr r9, rsr_int\level
741
742 #ifdef CONFIG_PREEMPT
743 sub r11, pc, (. - system_call)
744 cp.w r11, r8
745 breq 4f
746 #endif
747
748 pushm r8-r9
749
750 mov r11, sp
751 mov r12, \level
752
753 call do_IRQ
754
755 lddsp r4, sp[REG_SR]
756 bfextu r4, r4, SYSREG_M0_OFFSET, 3
757 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
758 breq 2f
759 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
760 #ifdef CONFIG_PREEMPT
761 brne 3f
762 #else
763 brne 1f
764 #endif
765
766 get_thread_info r0
767 ld.w r1, r0[TI_flags]
768 andl r1, _TIF_WORK_MASK, COH
769 brne irq_exit_work
770
771 1:
772 #ifdef CONFIG_TRACE_IRQFLAGS
773 call trace_hardirqs_on
774 #endif
775 popm r8-r9
776 mtsr rar_int\level, r8
777 mtsr rsr_int\level, r9
778 ldmts sp++,r0-lr
779 sub sp, -4 /* ignore r12_orig */
780 rete
781
782 #ifdef CONFIG_PREEMPT
783 4: mask_interrupts
784 mfsr r8, rsr_int\level
785 sbr r8, 16
786 mtsr rsr_int\level, r8
787 ldmts sp++, r0-lr
788 sub sp, -4 /* ignore r12_orig */
789 rete
790 #endif
791
792 2: get_thread_info r0
793 ld.w r1, r0[TI_flags]
794 bld r1, TIF_CPU_GOING_TO_SLEEP
795 #ifdef CONFIG_PREEMPT
796 brcc 3f
797 #else
798 brcc 1b
799 #endif
800 sub r1, pc, . - cpu_idle_skip_sleep
801 stdsp sp[REG_PC], r1
802 #ifdef CONFIG_PREEMPT
803 3: get_thread_info r0
804 ld.w r2, r0[TI_preempt_count]
805 cp.w r2, 0
806 brne 1b
807 ld.w r1, r0[TI_flags]
808 bld r1, TIF_NEED_RESCHED
809 brcc 1b
810 lddsp r4, sp[REG_SR]
811 bld r4, SYSREG_GM_OFFSET
812 brcs 1b
813 call preempt_schedule_irq
814 #endif
815 rjmp 1b
816 .endm
817
818 .section .irq.text,"ax",@progbits
819
820 .global irq_level0
821 .global irq_level1
822 .global irq_level2
823 .global irq_level3
824 IRQ_LEVEL 0
825 IRQ_LEVEL 1
826 IRQ_LEVEL 2
827 IRQ_LEVEL 3
828
829 .section .kprobes.text, "ax", @progbits
830 .type enter_monitor_mode, @function
831 enter_monitor_mode:
832 /*
833 * We need to enter monitor mode to do a single step. The
834 * monitor code will alter the return address so that we
835 * return directly to the user instead of returning here.
836 */
837 breakpoint
838 rjmp breakpoint_failed
839
840 .size enter_monitor_mode, . - enter_monitor_mode
841
842 .type debug_trampoline, @function
843 .global debug_trampoline
844 debug_trampoline:
845 /*
846 * Save the registers on the stack so that the monitor code
847 * can find them easily.
848 */
849 sub sp, 4 /* r12_orig */
850 stmts --sp, r0-lr
851 get_thread_info r0
852 ld.w r8, r0[TI_rar_saved]
853 ld.w r9, r0[TI_rsr_saved]
854 pushm r8-r9
855
856 /*
857 * The monitor code will alter the return address so we don't
858 * return here.
859 */
860 breakpoint
861 rjmp breakpoint_failed
862 .size debug_trampoline, . - debug_trampoline
863
864 .type breakpoint_failed, @function
865 breakpoint_failed:
866 /*
867 * Something went wrong. Perhaps the debug hardware isn't
868 * enabled?
869 */
870 lda.w r12, msg_breakpoint_failed
871 mov r11, sp
872 mov r10, 9 /* SIGKILL */
873 call die
874 1: rjmp 1b
875
876 msg_breakpoint_failed:
877 .asciz "Failed to enter Debug Mode"