ftrace: store mcount address in rec->ip
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / entry_32.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22 #include <linux/errno.h>
23 #include <linux/sys.h>
24 #include <linux/threads.h>
25 #include <asm/reg.h>
26 #include <asm/page.h>
27 #include <asm/mmu.h>
28 #include <asm/cputable.h>
29 #include <asm/thread_info.h>
30 #include <asm/ppc_asm.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/unistd.h>
33 #include <asm/ftrace.h>
34
35 #undef SHOW_SYSCALLS
36 #undef SHOW_SYSCALLS_TASK
37
38 /*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43 #else
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
45 #endif
46
47 #ifdef CONFIG_BOOKE
48 #include "head_booke.h"
49 #define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59 mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64 debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69 crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72 #endif
73
74 #ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76 crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82 #endif
83
84 /*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92 transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97 transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 internal debug mode bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IDM@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 #ifdef CONFIG_SMP
125 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
126 lwz r9,TI_CPU(r9)
127 slwi r9,r9,3
128 add r11,r11,r9
129 #endif
130 lwz r12,0(r11)
131 mtspr SPRN_DBCR0,r12
132 lwz r12,4(r11)
133 addi r12,r12,-1
134 stw r12,4(r11)
135 #endif
136 b 3f
137
138 2: /* if from kernel, check interrupted DOZE/NAP mode and
139 * check for stack overflow
140 */
141 lwz r9,KSP_LIMIT(r12)
142 cmplw r1,r9 /* if r1 <= ksp_limit */
143 ble- stack_ovf /* then the kernel stack overflowed */
144 5:
145 #ifdef CONFIG_6xx
146 rlwinm r9,r1,0,0,31-THREAD_SHIFT
147 tophys(r9,r9) /* check local flags */
148 lwz r12,TI_LOCAL_FLAGS(r9)
149 mtcrf 0x01,r12
150 bt- 31-TLF_NAPPING,4f
151 #endif /* CONFIG_6xx */
152 .globl transfer_to_handler_cont
153 transfer_to_handler_cont:
154 3:
155 mflr r9
156 lwz r11,0(r9) /* virtual address of handler */
157 lwz r9,4(r9) /* where to go when done */
158 mtspr SPRN_SRR0,r11
159 mtspr SPRN_SRR1,r10
160 mtlr r9
161 SYNC
162 RFI /* jump to handler, enable MMU */
163
164 #ifdef CONFIG_6xx
165 4: rlwinm r12,r12,0,~_TLF_NAPPING
166 stw r12,TI_LOCAL_FLAGS(r9)
167 b power_save_6xx_restore
168 #endif
169
170 /*
171 * On kernel stack overflow, load up an initial stack pointer
172 * and call StackOverflow(regs), which should not return.
173 */
174 stack_ovf:
175 /* sometimes we use a statically-allocated stack, which is OK. */
176 lis r12,_end@h
177 ori r12,r12,_end@l
178 cmplw r1,r12
179 ble 5b /* r1 <= &_end is OK */
180 SAVE_NVGPRS(r11)
181 addi r3,r1,STACK_FRAME_OVERHEAD
182 lis r1,init_thread_union@ha
183 addi r1,r1,init_thread_union@l
184 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
185 lis r9,StackOverflow@ha
186 addi r9,r9,StackOverflow@l
187 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
188 FIX_SRR1(r10,r12)
189 mtspr SPRN_SRR0,r9
190 mtspr SPRN_SRR1,r10
191 SYNC
192 RFI
193
194 /*
195 * Handle a system call.
196 */
197 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
198 .stabs "entry_32.S",N_SO,0,0,0f
199 0:
200
201 _GLOBAL(DoSyscall)
202 stw r3,ORIG_GPR3(r1)
203 li r12,0
204 stw r12,RESULT(r1)
205 lwz r11,_CCR(r1) /* Clear SO bit in CR */
206 rlwinm r11,r11,0,4,2
207 stw r11,_CCR(r1)
208 #ifdef SHOW_SYSCALLS
209 bl do_show_syscall
210 #endif /* SHOW_SYSCALLS */
211 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
212 lwz r11,TI_FLAGS(r10)
213 andi. r11,r11,_TIF_SYSCALL_T_OR_A
214 bne- syscall_dotrace
215 syscall_dotrace_cont:
216 cmplwi 0,r0,NR_syscalls
217 lis r10,sys_call_table@h
218 ori r10,r10,sys_call_table@l
219 slwi r0,r0,2
220 bge- 66f
221 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
222 mtlr r10
223 addi r9,r1,STACK_FRAME_OVERHEAD
224 PPC440EP_ERR42
225 blrl /* Call handler */
226 .globl ret_from_syscall
227 ret_from_syscall:
228 #ifdef SHOW_SYSCALLS
229 bl do_show_syscall_exit
230 #endif
231 mr r6,r3
232 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
233 /* disable interrupts so current_thread_info()->flags can't change */
234 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
235 SYNC
236 MTMSRD(r10)
237 lwz r9,TI_FLAGS(r12)
238 li r8,-_LAST_ERRNO
239 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
240 bne- syscall_exit_work
241 cmplw 0,r3,r8
242 blt+ syscall_exit_cont
243 lwz r11,_CCR(r1) /* Load CR */
244 neg r3,r3
245 oris r11,r11,0x1000 /* Set SO bit in CR */
246 stw r11,_CCR(r1)
247 syscall_exit_cont:
248 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
249 /* If the process has its own DBCR0 value, load it up. The internal
250 debug mode bit tells us that dbcr0 should be loaded. */
251 lwz r0,THREAD+THREAD_DBCR0(r2)
252 andis. r10,r0,DBCR0_IDM@h
253 bnel- load_dbcr0
254 #endif
255 #ifdef CONFIG_44x
256 lis r4,icache_44x_need_flush@ha
257 lwz r5,icache_44x_need_flush@l(r4)
258 cmplwi cr0,r5,0
259 bne- 2f
260 1:
261 #endif /* CONFIG_44x */
262 BEGIN_FTR_SECTION
263 lwarx r7,0,r1
264 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
265 stwcx. r0,0,r1 /* to clear the reservation */
266 lwz r4,_LINK(r1)
267 lwz r5,_CCR(r1)
268 mtlr r4
269 mtcr r5
270 lwz r7,_NIP(r1)
271 lwz r8,_MSR(r1)
272 FIX_SRR1(r8, r0)
273 lwz r2,GPR2(r1)
274 lwz r1,GPR1(r1)
275 mtspr SPRN_SRR0,r7
276 mtspr SPRN_SRR1,r8
277 SYNC
278 RFI
279 #ifdef CONFIG_44x
280 2: li r7,0
281 iccci r0,r0
282 stw r7,icache_44x_need_flush@l(r4)
283 b 1b
284 #endif /* CONFIG_44x */
285
286 66: li r3,-ENOSYS
287 b ret_from_syscall
288
289 .globl ret_from_fork
290 ret_from_fork:
291 REST_NVGPRS(r1)
292 bl schedule_tail
293 li r3,0
294 b ret_from_syscall
295
296 /* Traced system call support */
297 syscall_dotrace:
298 SAVE_NVGPRS(r1)
299 li r0,0xc00
300 stw r0,_TRAP(r1)
301 addi r3,r1,STACK_FRAME_OVERHEAD
302 bl do_syscall_trace_enter
303 lwz r0,GPR0(r1) /* Restore original registers */
304 lwz r3,GPR3(r1)
305 lwz r4,GPR4(r1)
306 lwz r5,GPR5(r1)
307 lwz r6,GPR6(r1)
308 lwz r7,GPR7(r1)
309 lwz r8,GPR8(r1)
310 REST_NVGPRS(r1)
311 b syscall_dotrace_cont
312
313 syscall_exit_work:
314 andi. r0,r9,_TIF_RESTOREALL
315 beq+ 0f
316 REST_NVGPRS(r1)
317 b 2f
318 0: cmplw 0,r3,r8
319 blt+ 1f
320 andi. r0,r9,_TIF_NOERROR
321 bne- 1f
322 lwz r11,_CCR(r1) /* Load CR */
323 neg r3,r3
324 oris r11,r11,0x1000 /* Set SO bit in CR */
325 stw r11,_CCR(r1)
326
327 1: stw r6,RESULT(r1) /* Save result */
328 stw r3,GPR3(r1) /* Update return value */
329 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
330 beq 4f
331
332 /* Clear per-syscall TIF flags if any are set. */
333
334 li r11,_TIF_PERSYSCALL_MASK
335 addi r12,r12,TI_FLAGS
336 3: lwarx r8,0,r12
337 andc r8,r8,r11
338 #ifdef CONFIG_IBM405_ERR77
339 dcbt 0,r12
340 #endif
341 stwcx. r8,0,r12
342 bne- 3b
343 subi r12,r12,TI_FLAGS
344
345 4: /* Anything which requires enabling interrupts? */
346 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
347 beq ret_from_except
348
349 /* Re-enable interrupts */
350 ori r10,r10,MSR_EE
351 SYNC
352 MTMSRD(r10)
353
354 /* Save NVGPRS if they're not saved already */
355 lwz r4,_TRAP(r1)
356 andi. r4,r4,1
357 beq 5f
358 SAVE_NVGPRS(r1)
359 li r4,0xc00
360 stw r4,_TRAP(r1)
361 5:
362 addi r3,r1,STACK_FRAME_OVERHEAD
363 bl do_syscall_trace_leave
364 b ret_from_except_full
365
366 #ifdef SHOW_SYSCALLS
367 do_show_syscall:
368 #ifdef SHOW_SYSCALLS_TASK
369 lis r11,show_syscalls_task@ha
370 lwz r11,show_syscalls_task@l(r11)
371 cmp 0,r2,r11
372 bnelr
373 #endif
374 stw r31,GPR31(r1)
375 mflr r31
376 lis r3,7f@ha
377 addi r3,r3,7f@l
378 lwz r4,GPR0(r1)
379 lwz r5,GPR3(r1)
380 lwz r6,GPR4(r1)
381 lwz r7,GPR5(r1)
382 lwz r8,GPR6(r1)
383 lwz r9,GPR7(r1)
384 bl printk
385 lis r3,77f@ha
386 addi r3,r3,77f@l
387 lwz r4,GPR8(r1)
388 mr r5,r2
389 bl printk
390 lwz r0,GPR0(r1)
391 lwz r3,GPR3(r1)
392 lwz r4,GPR4(r1)
393 lwz r5,GPR5(r1)
394 lwz r6,GPR6(r1)
395 lwz r7,GPR7(r1)
396 lwz r8,GPR8(r1)
397 mtlr r31
398 lwz r31,GPR31(r1)
399 blr
400
401 do_show_syscall_exit:
402 #ifdef SHOW_SYSCALLS_TASK
403 lis r11,show_syscalls_task@ha
404 lwz r11,show_syscalls_task@l(r11)
405 cmp 0,r2,r11
406 bnelr
407 #endif
408 stw r31,GPR31(r1)
409 mflr r31
410 stw r3,RESULT(r1) /* Save result */
411 mr r4,r3
412 lis r3,79f@ha
413 addi r3,r3,79f@l
414 bl printk
415 lwz r3,RESULT(r1)
416 mtlr r31
417 lwz r31,GPR31(r1)
418 blr
419
420 7: .string "syscall %d(%x, %x, %x, %x, %x, "
421 77: .string "%x), current=%p\n"
422 79: .string " -> %x\n"
423 .align 2,0
424
425 #ifdef SHOW_SYSCALLS_TASK
426 .data
427 .globl show_syscalls_task
428 show_syscalls_task:
429 .long -1
430 .text
431 #endif
432 #endif /* SHOW_SYSCALLS */
433
434 /*
435 * The fork/clone functions need to copy the full register set into
436 * the child process. Therefore we need to save all the nonvolatile
437 * registers (r13 - r31) before calling the C code.
438 */
439 .globl ppc_fork
440 ppc_fork:
441 SAVE_NVGPRS(r1)
442 lwz r0,_TRAP(r1)
443 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
444 stw r0,_TRAP(r1) /* register set saved */
445 b sys_fork
446
447 .globl ppc_vfork
448 ppc_vfork:
449 SAVE_NVGPRS(r1)
450 lwz r0,_TRAP(r1)
451 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
452 stw r0,_TRAP(r1) /* register set saved */
453 b sys_vfork
454
455 .globl ppc_clone
456 ppc_clone:
457 SAVE_NVGPRS(r1)
458 lwz r0,_TRAP(r1)
459 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
460 stw r0,_TRAP(r1) /* register set saved */
461 b sys_clone
462
463 .globl ppc_swapcontext
464 ppc_swapcontext:
465 SAVE_NVGPRS(r1)
466 lwz r0,_TRAP(r1)
467 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
468 stw r0,_TRAP(r1) /* register set saved */
469 b sys_swapcontext
470
471 /*
472 * Top-level page fault handling.
473 * This is in assembler because if do_page_fault tells us that
474 * it is a bad kernel page fault, we want to save the non-volatile
475 * registers before calling bad_page_fault.
476 */
477 .globl handle_page_fault
478 handle_page_fault:
479 stw r4,_DAR(r1)
480 addi r3,r1,STACK_FRAME_OVERHEAD
481 bl do_page_fault
482 cmpwi r3,0
483 beq+ ret_from_except
484 SAVE_NVGPRS(r1)
485 lwz r0,_TRAP(r1)
486 clrrwi r0,r0,1
487 stw r0,_TRAP(r1)
488 mr r5,r3
489 addi r3,r1,STACK_FRAME_OVERHEAD
490 lwz r4,_DAR(r1)
491 bl bad_page_fault
492 b ret_from_except_full
493
494 /*
495 * This routine switches between two different tasks. The process
496 * state of one is saved on its kernel stack. Then the state
497 * of the other is restored from its kernel stack. The memory
498 * management hardware is updated to the second process's state.
499 * Finally, we can return to the second process.
500 * On entry, r3 points to the THREAD for the current task, r4
501 * points to the THREAD for the new task.
502 *
503 * This routine is always called with interrupts disabled.
504 *
505 * Note: there are two ways to get to the "going out" portion
506 * of this code; either by coming in via the entry (_switch)
507 * or via "fork" which must set up an environment equivalent
508 * to the "_switch" path. If you change this , you'll have to
509 * change the fork code also.
510 *
511 * The code which creates the new task context is in 'copy_thread'
512 * in arch/ppc/kernel/process.c
513 */
514 _GLOBAL(_switch)
515 stwu r1,-INT_FRAME_SIZE(r1)
516 mflr r0
517 stw r0,INT_FRAME_SIZE+4(r1)
518 /* r3-r12 are caller saved -- Cort */
519 SAVE_NVGPRS(r1)
520 stw r0,_NIP(r1) /* Return to switch caller */
521 mfmsr r11
522 li r0,MSR_FP /* Disable floating-point */
523 #ifdef CONFIG_ALTIVEC
524 BEGIN_FTR_SECTION
525 oris r0,r0,MSR_VEC@h /* Disable altivec */
526 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
527 stw r12,THREAD+THREAD_VRSAVE(r2)
528 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
529 #endif /* CONFIG_ALTIVEC */
530 #ifdef CONFIG_SPE
531 BEGIN_FTR_SECTION
532 oris r0,r0,MSR_SPE@h /* Disable SPE */
533 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
534 stw r12,THREAD+THREAD_SPEFSCR(r2)
535 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
536 #endif /* CONFIG_SPE */
537 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
538 beq+ 1f
539 andc r11,r11,r0
540 MTMSRD(r11)
541 isync
542 1: stw r11,_MSR(r1)
543 mfcr r10
544 stw r10,_CCR(r1)
545 stw r1,KSP(r3) /* Set old stack pointer */
546
547 #ifdef CONFIG_SMP
548 /* We need a sync somewhere here to make sure that if the
549 * previous task gets rescheduled on another CPU, it sees all
550 * stores it has performed on this one.
551 */
552 sync
553 #endif /* CONFIG_SMP */
554
555 tophys(r0,r4)
556 CLR_TOP32(r0)
557 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
558 lwz r1,KSP(r4) /* Load new stack pointer */
559
560 /* save the old current 'last' for return value */
561 mr r3,r2
562 addi r2,r4,-THREAD /* Update current */
563
564 #ifdef CONFIG_ALTIVEC
565 BEGIN_FTR_SECTION
566 lwz r0,THREAD+THREAD_VRSAVE(r2)
567 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
568 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
569 #endif /* CONFIG_ALTIVEC */
570 #ifdef CONFIG_SPE
571 BEGIN_FTR_SECTION
572 lwz r0,THREAD+THREAD_SPEFSCR(r2)
573 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
574 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
575 #endif /* CONFIG_SPE */
576
577 lwz r0,_CCR(r1)
578 mtcrf 0xFF,r0
579 /* r3-r12 are destroyed -- Cort */
580 REST_NVGPRS(r1)
581
582 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
583 mtlr r4
584 addi r1,r1,INT_FRAME_SIZE
585 blr
586
587 .globl fast_exception_return
588 fast_exception_return:
589 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
590 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
591 beq 1f /* if not, we've got problems */
592 #endif
593
594 2: REST_4GPRS(3, r11)
595 lwz r10,_CCR(r11)
596 REST_GPR(1, r11)
597 mtcr r10
598 lwz r10,_LINK(r11)
599 mtlr r10
600 REST_GPR(10, r11)
601 mtspr SPRN_SRR1,r9
602 mtspr SPRN_SRR0,r12
603 REST_GPR(9, r11)
604 REST_GPR(12, r11)
605 lwz r11,GPR11(r11)
606 SYNC
607 RFI
608
609 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
610 /* check if the exception happened in a restartable section */
611 1: lis r3,exc_exit_restart_end@ha
612 addi r3,r3,exc_exit_restart_end@l
613 cmplw r12,r3
614 bge 3f
615 lis r4,exc_exit_restart@ha
616 addi r4,r4,exc_exit_restart@l
617 cmplw r12,r4
618 blt 3f
619 lis r3,fee_restarts@ha
620 tophys(r3,r3)
621 lwz r5,fee_restarts@l(r3)
622 addi r5,r5,1
623 stw r5,fee_restarts@l(r3)
624 mr r12,r4 /* restart at exc_exit_restart */
625 b 2b
626
627 .section .bss
628 .align 2
629 fee_restarts:
630 .space 4
631 .previous
632
633 /* aargh, a nonrecoverable interrupt, panic */
634 /* aargh, we don't know which trap this is */
635 /* but the 601 doesn't implement the RI bit, so assume it's OK */
636 3:
637 BEGIN_FTR_SECTION
638 b 2b
639 END_FTR_SECTION_IFSET(CPU_FTR_601)
640 li r10,-1
641 stw r10,_TRAP(r11)
642 addi r3,r1,STACK_FRAME_OVERHEAD
643 lis r10,MSR_KERNEL@h
644 ori r10,r10,MSR_KERNEL@l
645 bl transfer_to_handler_full
646 .long nonrecoverable_exception
647 .long ret_from_except
648 #endif
649
650 .globl ret_from_except_full
651 ret_from_except_full:
652 REST_NVGPRS(r1)
653 /* fall through */
654
655 .globl ret_from_except
656 ret_from_except:
657 /* Hard-disable interrupts so that current_thread_info()->flags
658 * can't change between when we test it and when we return
659 * from the interrupt. */
660 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
661 SYNC /* Some chip revs have problems here... */
662 MTMSRD(r10) /* disable interrupts */
663
664 lwz r3,_MSR(r1) /* Returning to user mode? */
665 andi. r0,r3,MSR_PR
666 beq resume_kernel
667
668 user_exc_return: /* r10 contains MSR_KERNEL here */
669 /* Check current_thread_info()->flags */
670 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
671 lwz r9,TI_FLAGS(r9)
672 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
673 bne do_work
674
675 restore_user:
676 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
677 /* Check whether this process has its own DBCR0 value. The internal
678 debug mode bit tells us that dbcr0 should be loaded. */
679 lwz r0,THREAD+THREAD_DBCR0(r2)
680 andis. r10,r0,DBCR0_IDM@h
681 bnel- load_dbcr0
682 #endif
683
684 #ifdef CONFIG_PREEMPT
685 b restore
686
687 /* N.B. the only way to get here is from the beq following ret_from_except. */
688 resume_kernel:
689 /* check current_thread_info->preempt_count */
690 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
691 lwz r0,TI_PREEMPT(r9)
692 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
693 bne restore
694 lwz r0,TI_FLAGS(r9)
695 andi. r0,r0,_TIF_NEED_RESCHED
696 beq+ restore
697 andi. r0,r3,MSR_EE /* interrupts off? */
698 beq restore /* don't schedule if so */
699 1: bl preempt_schedule_irq
700 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
701 lwz r3,TI_FLAGS(r9)
702 andi. r0,r3,_TIF_NEED_RESCHED
703 bne- 1b
704 #else
705 resume_kernel:
706 #endif /* CONFIG_PREEMPT */
707
708 /* interrupts are hard-disabled at this point */
709 restore:
710 #ifdef CONFIG_44x
711 lis r4,icache_44x_need_flush@ha
712 lwz r5,icache_44x_need_flush@l(r4)
713 cmplwi cr0,r5,0
714 beq+ 1f
715 li r6,0
716 iccci r0,r0
717 stw r6,icache_44x_need_flush@l(r4)
718 1:
719 #endif /* CONFIG_44x */
720 lwz r0,GPR0(r1)
721 lwz r2,GPR2(r1)
722 REST_4GPRS(3, r1)
723 REST_2GPRS(7, r1)
724
725 lwz r10,_XER(r1)
726 lwz r11,_CTR(r1)
727 mtspr SPRN_XER,r10
728 mtctr r11
729
730 PPC405_ERR77(0,r1)
731 BEGIN_FTR_SECTION
732 lwarx r11,0,r1
733 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
734 stwcx. r0,0,r1 /* to clear the reservation */
735
736 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
737 lwz r9,_MSR(r1)
738 andi. r10,r9,MSR_RI /* check if this exception occurred */
739 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
740
741 lwz r10,_CCR(r1)
742 lwz r11,_LINK(r1)
743 mtcrf 0xFF,r10
744 mtlr r11
745
746 /*
747 * Once we put values in SRR0 and SRR1, we are in a state
748 * where exceptions are not recoverable, since taking an
749 * exception will trash SRR0 and SRR1. Therefore we clear the
750 * MSR:RI bit to indicate this. If we do take an exception,
751 * we can't return to the point of the exception but we
752 * can restart the exception exit path at the label
753 * exc_exit_restart below. -- paulus
754 */
755 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
756 SYNC
757 MTMSRD(r10) /* clear the RI bit */
758 .globl exc_exit_restart
759 exc_exit_restart:
760 lwz r9,_MSR(r1)
761 lwz r12,_NIP(r1)
762 FIX_SRR1(r9,r10)
763 mtspr SPRN_SRR0,r12
764 mtspr SPRN_SRR1,r9
765 REST_4GPRS(9, r1)
766 lwz r1,GPR1(r1)
767 .globl exc_exit_restart_end
768 exc_exit_restart_end:
769 SYNC
770 RFI
771
772 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
773 /*
774 * This is a bit different on 4xx/Book-E because it doesn't have
775 * the RI bit in the MSR.
776 * The TLB miss handler checks if we have interrupted
777 * the exception exit path and restarts it if so
778 * (well maybe one day it will... :).
779 */
780 lwz r11,_LINK(r1)
781 mtlr r11
782 lwz r10,_CCR(r1)
783 mtcrf 0xff,r10
784 REST_2GPRS(9, r1)
785 .globl exc_exit_restart
786 exc_exit_restart:
787 lwz r11,_NIP(r1)
788 lwz r12,_MSR(r1)
789 exc_exit_start:
790 mtspr SPRN_SRR0,r11
791 mtspr SPRN_SRR1,r12
792 REST_2GPRS(11, r1)
793 lwz r1,GPR1(r1)
794 .globl exc_exit_restart_end
795 exc_exit_restart_end:
796 PPC405_ERR77_SYNC
797 rfi
798 b . /* prevent prefetch past rfi */
799
800 /*
801 * Returning from a critical interrupt in user mode doesn't need
802 * to be any different from a normal exception. For a critical
803 * interrupt in the kernel, we just return (without checking for
804 * preemption) since the interrupt may have happened at some crucial
805 * place (e.g. inside the TLB miss handler), and because we will be
806 * running with r1 pointing into critical_stack, not the current
807 * process's kernel stack (and therefore current_thread_info() will
808 * give the wrong answer).
809 * We have to restore various SPRs that may have been in use at the
810 * time of the critical interrupt.
811 *
812 */
813 #ifdef CONFIG_40x
814 #define PPC_40x_TURN_OFF_MSR_DR \
815 /* avoid any possible TLB misses here by turning off MSR.DR, we \
816 * assume the instructions here are mapped by a pinned TLB entry */ \
817 li r10,MSR_IR; \
818 mtmsr r10; \
819 isync; \
820 tophys(r1, r1);
821 #else
822 #define PPC_40x_TURN_OFF_MSR_DR
823 #endif
824
825 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
826 REST_NVGPRS(r1); \
827 lwz r3,_MSR(r1); \
828 andi. r3,r3,MSR_PR; \
829 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
830 bne user_exc_return; \
831 lwz r0,GPR0(r1); \
832 lwz r2,GPR2(r1); \
833 REST_4GPRS(3, r1); \
834 REST_2GPRS(7, r1); \
835 lwz r10,_XER(r1); \
836 lwz r11,_CTR(r1); \
837 mtspr SPRN_XER,r10; \
838 mtctr r11; \
839 PPC405_ERR77(0,r1); \
840 stwcx. r0,0,r1; /* to clear the reservation */ \
841 lwz r11,_LINK(r1); \
842 mtlr r11; \
843 lwz r10,_CCR(r1); \
844 mtcrf 0xff,r10; \
845 PPC_40x_TURN_OFF_MSR_DR; \
846 lwz r9,_DEAR(r1); \
847 lwz r10,_ESR(r1); \
848 mtspr SPRN_DEAR,r9; \
849 mtspr SPRN_ESR,r10; \
850 lwz r11,_NIP(r1); \
851 lwz r12,_MSR(r1); \
852 mtspr exc_lvl_srr0,r11; \
853 mtspr exc_lvl_srr1,r12; \
854 lwz r9,GPR9(r1); \
855 lwz r12,GPR12(r1); \
856 lwz r10,GPR10(r1); \
857 lwz r11,GPR11(r1); \
858 lwz r1,GPR1(r1); \
859 PPC405_ERR77_SYNC; \
860 exc_lvl_rfi; \
861 b .; /* prevent prefetch past exc_lvl_rfi */
862
863 .globl ret_from_crit_exc
864 ret_from_crit_exc:
865 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
866
867 #ifdef CONFIG_BOOKE
868 .globl ret_from_debug_exc
869 ret_from_debug_exc:
870 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
871
872 .globl ret_from_mcheck_exc
873 ret_from_mcheck_exc:
874 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
875 #endif /* CONFIG_BOOKE */
876
877 /*
878 * Load the DBCR0 value for a task that is being ptraced,
879 * having first saved away the global DBCR0. Note that r0
880 * has the dbcr0 value to set upon entry to this.
881 */
882 load_dbcr0:
883 mfmsr r10 /* first disable debug exceptions */
884 rlwinm r10,r10,0,~MSR_DE
885 mtmsr r10
886 isync
887 mfspr r10,SPRN_DBCR0
888 lis r11,global_dbcr0@ha
889 addi r11,r11,global_dbcr0@l
890 #ifdef CONFIG_SMP
891 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
892 lwz r9,TI_CPU(r9)
893 slwi r9,r9,3
894 add r11,r11,r9
895 #endif
896 stw r10,0(r11)
897 mtspr SPRN_DBCR0,r0
898 lwz r10,4(r11)
899 addi r10,r10,1
900 stw r10,4(r11)
901 li r11,-1
902 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
903 blr
904
905 .section .bss
906 .align 4
907 global_dbcr0:
908 .space 8*NR_CPUS
909 .previous
910 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
911
912 do_work: /* r10 contains MSR_KERNEL here */
913 andi. r0,r9,_TIF_NEED_RESCHED
914 beq do_user_signal
915
916 do_resched: /* r10 contains MSR_KERNEL here */
917 ori r10,r10,MSR_EE
918 SYNC
919 MTMSRD(r10) /* hard-enable interrupts */
920 bl schedule
921 recheck:
922 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
923 SYNC
924 MTMSRD(r10) /* disable interrupts */
925 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
926 lwz r9,TI_FLAGS(r9)
927 andi. r0,r9,_TIF_NEED_RESCHED
928 bne- do_resched
929 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
930 beq restore_user
931 do_user_signal: /* r10 contains MSR_KERNEL here */
932 ori r10,r10,MSR_EE
933 SYNC
934 MTMSRD(r10) /* hard-enable interrupts */
935 /* save r13-r31 in the exception frame, if not already done */
936 lwz r3,_TRAP(r1)
937 andi. r0,r3,1
938 beq 2f
939 SAVE_NVGPRS(r1)
940 rlwinm r3,r3,0,0,30
941 stw r3,_TRAP(r1)
942 2: li r3,0
943 addi r4,r1,STACK_FRAME_OVERHEAD
944 bl do_signal
945 REST_NVGPRS(r1)
946 b recheck
947
948 /*
949 * We come here when we are at the end of handling an exception
950 * that occurred at a place where taking an exception will lose
951 * state information, such as the contents of SRR0 and SRR1.
952 */
953 nonrecoverable:
954 lis r10,exc_exit_restart_end@ha
955 addi r10,r10,exc_exit_restart_end@l
956 cmplw r12,r10
957 bge 3f
958 lis r11,exc_exit_restart@ha
959 addi r11,r11,exc_exit_restart@l
960 cmplw r12,r11
961 blt 3f
962 lis r10,ee_restarts@ha
963 lwz r12,ee_restarts@l(r10)
964 addi r12,r12,1
965 stw r12,ee_restarts@l(r10)
966 mr r12,r11 /* restart at exc_exit_restart */
967 blr
968 3: /* OK, we can't recover, kill this process */
969 /* but the 601 doesn't implement the RI bit, so assume it's OK */
970 BEGIN_FTR_SECTION
971 blr
972 END_FTR_SECTION_IFSET(CPU_FTR_601)
973 lwz r3,_TRAP(r1)
974 andi. r0,r3,1
975 beq 4f
976 SAVE_NVGPRS(r1)
977 rlwinm r3,r3,0,0,30
978 stw r3,_TRAP(r1)
979 4: addi r3,r1,STACK_FRAME_OVERHEAD
980 bl nonrecoverable_exception
981 /* shouldn't return */
982 b 4b
983
984 .section .bss
985 .align 2
986 ee_restarts:
987 .space 4
988 .previous
989
990 /*
991 * PROM code for specific machines follows. Put it
992 * here so it's easy to add arch-specific sections later.
993 * -- Cort
994 */
995 #ifdef CONFIG_PPC_RTAS
996 /*
997 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
998 * called with the MMU off.
999 */
1000 _GLOBAL(enter_rtas)
1001 stwu r1,-INT_FRAME_SIZE(r1)
1002 mflr r0
1003 stw r0,INT_FRAME_SIZE+4(r1)
1004 LOAD_REG_ADDR(r4, rtas)
1005 lis r6,1f@ha /* physical return address for rtas */
1006 addi r6,r6,1f@l
1007 tophys(r6,r6)
1008 tophys(r7,r1)
1009 lwz r8,RTASENTRY(r4)
1010 lwz r4,RTASBASE(r4)
1011 mfmsr r9
1012 stw r9,8(r1)
1013 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1014 SYNC /* disable interrupts so SRR0/1 */
1015 MTMSRD(r0) /* don't get trashed */
1016 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1017 mtlr r6
1018 mtspr SPRN_SPRG2,r7
1019 mtspr SPRN_SRR0,r8
1020 mtspr SPRN_SRR1,r9
1021 RFI
1022 1: tophys(r9,r1)
1023 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1024 lwz r9,8(r9) /* original msr value */
1025 FIX_SRR1(r9,r0)
1026 addi r1,r1,INT_FRAME_SIZE
1027 li r0,0
1028 mtspr SPRN_SPRG2,r0
1029 mtspr SPRN_SRR0,r8
1030 mtspr SPRN_SRR1,r9
1031 RFI /* return to caller */
1032
1033 .globl machine_check_in_rtas
1034 machine_check_in_rtas:
1035 twi 31,0,0
1036 /* XXX load up BATs and panic */
1037
1038 #endif /* CONFIG_PPC_RTAS */
1039
1040 #ifdef CONFIG_FTRACE
1041 #ifdef CONFIG_DYNAMIC_FTRACE
1042 _GLOBAL(mcount)
1043 _GLOBAL(_mcount)
1044 stwu r1,-48(r1)
1045 stw r3, 12(r1)
1046 stw r4, 16(r1)
1047 stw r5, 20(r1)
1048 stw r6, 24(r1)
1049 mflr r3
1050 stw r7, 28(r1)
1051 mfcr r5
1052 stw r8, 32(r1)
1053 stw r9, 36(r1)
1054 stw r10,40(r1)
1055 stw r3, 44(r1)
1056 stw r5, 8(r1)
1057 subi r3, r3, MCOUNT_INSN_SIZE
1058 .globl mcount_call
1059 mcount_call:
1060 bl ftrace_stub
1061 nop
1062 lwz r6, 8(r1)
1063 lwz r0, 44(r1)
1064 lwz r3, 12(r1)
1065 mtctr r0
1066 lwz r4, 16(r1)
1067 mtcr r6
1068 lwz r5, 20(r1)
1069 lwz r6, 24(r1)
1070 lwz r0, 52(r1)
1071 lwz r7, 28(r1)
1072 lwz r8, 32(r1)
1073 mtlr r0
1074 lwz r9, 36(r1)
1075 lwz r10,40(r1)
1076 addi r1, r1, 48
1077 bctr
1078
1079 _GLOBAL(ftrace_caller)
1080 /* Based off of objdump optput from glibc */
1081 stwu r1,-48(r1)
1082 stw r3, 12(r1)
1083 stw r4, 16(r1)
1084 stw r5, 20(r1)
1085 stw r6, 24(r1)
1086 mflr r3
1087 lwz r4, 52(r1)
1088 mfcr r5
1089 stw r7, 28(r1)
1090 stw r8, 32(r1)
1091 stw r9, 36(r1)
1092 stw r10,40(r1)
1093 stw r3, 44(r1)
1094 stw r5, 8(r1)
1095 subi r3, r3, MCOUNT_INSN_SIZE
1096 .globl ftrace_call
1097 ftrace_call:
1098 bl ftrace_stub
1099 nop
1100 lwz r6, 8(r1)
1101 lwz r0, 44(r1)
1102 lwz r3, 12(r1)
1103 mtctr r0
1104 lwz r4, 16(r1)
1105 mtcr r6
1106 lwz r5, 20(r1)
1107 lwz r6, 24(r1)
1108 lwz r0, 52(r1)
1109 lwz r7, 28(r1)
1110 lwz r8, 32(r1)
1111 mtlr r0
1112 lwz r9, 36(r1)
1113 lwz r10,40(r1)
1114 addi r1, r1, 48
1115 bctr
1116 #else
1117 _GLOBAL(mcount)
1118 _GLOBAL(_mcount)
1119 stwu r1,-48(r1)
1120 stw r3, 12(r1)
1121 stw r4, 16(r1)
1122 stw r5, 20(r1)
1123 stw r6, 24(r1)
1124 mflr r3
1125 lwz r4, 52(r1)
1126 mfcr r5
1127 stw r7, 28(r1)
1128 stw r8, 32(r1)
1129 stw r9, 36(r1)
1130 stw r10,40(r1)
1131 stw r3, 44(r1)
1132 stw r5, 8(r1)
1133
1134 subi r3, r3, MCOUNT_INSN_SIZE
1135 LOAD_REG_ADDR(r5, ftrace_trace_function)
1136 lwz r5,0(r5)
1137
1138 mtctr r5
1139 bctrl
1140
1141 nop
1142
1143 lwz r6, 8(r1)
1144 lwz r0, 44(r1)
1145 lwz r3, 12(r1)
1146 mtctr r0
1147 lwz r4, 16(r1)
1148 mtcr r6
1149 lwz r5, 20(r1)
1150 lwz r6, 24(r1)
1151 lwz r0, 52(r1)
1152 lwz r7, 28(r1)
1153 lwz r8, 32(r1)
1154 mtlr r0
1155 lwz r9, 36(r1)
1156 lwz r10,40(r1)
1157 addi r1, r1, 48
1158 bctr
1159 #endif
1160
1161 _GLOBAL(ftrace_stub)
1162 blr
1163
1164 #endif /* CONFIG_MCOUNT */