[POWERPC] Remove ioremap64 and fixup_bigphys_addr
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
9994a338
PM
31
32/*
33 * System calls.
34 */
35 .section ".toc","aw"
36.SYS_CALL_TABLE:
37 .tc .sys_call_table[TC],.sys_call_table
38
39/* This value is used to mark exception frames on the stack. */
40exception_marker:
41 .tc ID_72656773_68657265[TC],0x7265677368657265
42
43 .section ".text"
44 .align 7
45
46#undef SHOW_SYSCALLS
47
48 .globl system_call_common
49system_call_common:
50 andi. r10,r12,MSR_PR
51 mr r10,r1
52 addi r1,r1,-INT_FRAME_SIZE
53 beq- 1f
54 ld r1,PACAKSAVE(r13)
551: std r10,0(r1)
bd19c899 56 crclr so
9994a338
PM
57 std r11,_NIP(r1)
58 std r12,_MSR(r1)
59 std r0,GPR0(r1)
60 std r10,GPR1(r1)
c6622f63 61 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
62 std r2,GPR2(r1)
63 std r3,GPR3(r1)
64 std r4,GPR4(r1)
65 std r5,GPR5(r1)
66 std r6,GPR6(r1)
67 std r7,GPR7(r1)
68 std r8,GPR8(r1)
69 li r11,0
70 std r11,GPR9(r1)
71 std r11,GPR10(r1)
72 std r11,GPR11(r1)
73 std r11,GPR12(r1)
74 std r9,GPR13(r1)
9994a338
PM
75 mfcr r9
76 mflr r10
77 li r11,0xc01
78 std r9,_CCR(r1)
79 std r10,_LINK(r1)
80 std r11,_TRAP(r1)
81 mfxer r9
82 mfctr r10
83 std r9,_XER(r1)
84 std r10,_CTR(r1)
85 std r3,ORIG_GPR3(r1)
86 ld r2,PACATOC(r13)
87 addi r9,r1,STACK_FRAME_OVERHEAD
88 ld r11,exception_marker@toc(r2)
89 std r11,-16(r9) /* "regshere" marker */
d04c56f7
PM
90 li r10,1
91 stb r10,PACASOFTIRQEN(r13)
92 stb r10,PACAHARDIRQEN(r13)
93 std r10,SOFTE(r1)
9994a338 94#ifdef CONFIG_PPC_ISERIES
3f639ee8 95BEGIN_FW_FTR_SECTION
9994a338
PM
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
3f639ee8 101END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338
PM
102#endif
103 mfmsr r11
104 ori r11,r11,MSR_EE
105 mtmsrd r11,1
106
107#ifdef SHOW_SYSCALLS
108 bl .do_show_syscall
109 REST_GPR(0,r1)
110 REST_4GPRS(3,r1)
111 REST_2GPRS(7,r1)
112 addi r9,r1,STACK_FRAME_OVERHEAD
113#endif
114 clrrdi r11,r1,THREAD_SHIFT
9994a338 115 ld r10,TI_FLAGS(r11)
9994a338
PM
116 andi. r11,r10,_TIF_SYSCALL_T_OR_A
117 bne- syscall_dotrace
118syscall_dotrace_cont:
119 cmpldi 0,r0,NR_syscalls
120 bge- syscall_enosys
121
122system_call: /* label this so stack traces look sane */
123/*
124 * Need to vector to 32 Bit or default sys_call_table here,
125 * based on caller's run-mode / personality.
126 */
127 ld r11,.SYS_CALL_TABLE@toc(2)
128 andi. r10,r10,_TIF_32BIT
129 beq 15f
130 addi r11,r11,8 /* use 32-bit syscall entries */
131 clrldi r3,r3,32
132 clrldi r4,r4,32
133 clrldi r5,r5,32
134 clrldi r6,r6,32
135 clrldi r7,r7,32
136 clrldi r8,r8,32
13715:
138 slwi r0,r0,4
139 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
140 mtctr r10
141 bctrl /* Call handler */
142
143syscall_exit:
401d1f02 144 std r3,RESULT(r1)
9994a338 145#ifdef SHOW_SYSCALLS
9994a338 146 bl .do_show_syscall_exit
401d1f02 147 ld r3,RESULT(r1)
9994a338 148#endif
9994a338 149 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
150
151 /* disable interrupts so current_thread_info()->flags can't change,
152 and so that we don't get interrupted after loading SRR0/1. */
153 ld r8,_MSR(r1)
154 andi. r10,r8,MSR_RI
155 beq- unrecov_restore
156 mfmsr r10
157 rldicl r10,r10,48,1
158 rotldi r10,r10,16
159 mtmsrd r10,1
160 ld r9,TI_FLAGS(r12)
401d1f02 161 li r11,-_LAST_ERRNO
1bd79336 162 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 163 bne- syscall_exit_work
401d1f02
DW
164 cmpld r3,r11
165 ld r5,_CCR(r1)
166 bge- syscall_error
167syscall_error_cont:
9994a338
PM
168 ld r7,_NIP(r1)
169 stdcx. r0,0,r1 /* to clear the reservation */
170 andi. r6,r8,MSR_PR
171 ld r4,_LINK(r1)
c6622f63
PM
172 beq- 1f
173 ACCOUNT_CPU_USER_EXIT(r11, r12)
174 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338
PM
1751: ld r2,GPR2(r1)
176 li r12,MSR_RI
3eb6f26b
PM
177 andc r11,r10,r12
178 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
179 ld r1,GPR1(r1)
180 mtlr r4
181 mtcr r5
182 mtspr SPRN_SRR0,r7
183 mtspr SPRN_SRR1,r8
184 rfid
185 b . /* prevent speculative execution */
186
401d1f02 187syscall_error:
9994a338 188 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 189 neg r3,r3
9994a338
PM
190 std r5,_CCR(r1)
191 b syscall_error_cont
401d1f02 192
9994a338
PM
193/* Traced system call support */
194syscall_dotrace:
195 bl .save_nvgprs
196 addi r3,r1,STACK_FRAME_OVERHEAD
197 bl .do_syscall_trace_enter
198 ld r0,GPR0(r1) /* Restore original registers */
199 ld r3,GPR3(r1)
200 ld r4,GPR4(r1)
201 ld r5,GPR5(r1)
202 ld r6,GPR6(r1)
203 ld r7,GPR7(r1)
204 ld r8,GPR8(r1)
205 addi r9,r1,STACK_FRAME_OVERHEAD
206 clrrdi r10,r1,THREAD_SHIFT
207 ld r10,TI_FLAGS(r10)
208 b syscall_dotrace_cont
209
401d1f02
DW
210syscall_enosys:
211 li r3,-ENOSYS
212 b syscall_exit
213
214syscall_exit_work:
215 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
216 If TIF_NOERROR is set, just save r3 as it is. */
217
218 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
219 beq+ 0f
220 REST_NVGPRS(r1)
221 b 2f
2220: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
223 blt+ 1f
224 andi. r0,r9,_TIF_NOERROR
225 bne- 1f
226 ld r5,_CCR(r1)
227 neg r3,r3
228 oris r5,r5,0x1000 /* Set SO bit in CR */
229 std r5,_CCR(r1)
2301: std r3,GPR3(r1)
2312: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
232 beq 4f
233
1bd79336 234 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
235
236 li r11,_TIF_PERSYSCALL_MASK
237 addi r12,r12,TI_FLAGS
2383: ldarx r10,0,r12
239 andc r10,r10,r11
240 stdcx. r10,0,r12
241 bne- 3b
242 subi r12,r12,TI_FLAGS
1bd79336
PM
243
2444: /* Anything else left to do? */
245 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
246 beq .ret_from_except_lite
247
248 /* Re-enable interrupts */
249 mfmsr r10
250 ori r10,r10,MSR_EE
251 mtmsrd r10,1
252
1bd79336 253 bl .save_nvgprs
9994a338
PM
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 bl .do_syscall_trace_leave
1bd79336 256 b .ret_from_except
9994a338
PM
257
258/* Save non-volatile GPRs, if not already saved. */
259_GLOBAL(save_nvgprs)
260 ld r11,_TRAP(r1)
261 andi. r0,r11,1
262 beqlr-
263 SAVE_NVGPRS(r1)
264 clrrdi r0,r11,1
265 std r0,_TRAP(r1)
266 blr
267
401d1f02 268
9994a338
PM
269/*
270 * The sigsuspend and rt_sigsuspend system calls can call do_signal
271 * and thus put the process into the stopped state where we might
272 * want to examine its user state with ptrace. Therefore we need
273 * to save all the nonvolatile registers (r14 - r31) before calling
274 * the C code. Similarly, fork, vfork and clone need the full
275 * register state on the stack so that it can be copied to the child.
276 */
9994a338
PM
277
278_GLOBAL(ppc_fork)
279 bl .save_nvgprs
280 bl .sys_fork
281 b syscall_exit
282
283_GLOBAL(ppc_vfork)
284 bl .save_nvgprs
285 bl .sys_vfork
286 b syscall_exit
287
288_GLOBAL(ppc_clone)
289 bl .save_nvgprs
290 bl .sys_clone
291 b syscall_exit
292
1bd79336
PM
293_GLOBAL(ppc32_swapcontext)
294 bl .save_nvgprs
295 bl .compat_sys_swapcontext
296 b syscall_exit
297
298_GLOBAL(ppc64_swapcontext)
299 bl .save_nvgprs
300 bl .sys_swapcontext
301 b syscall_exit
302
9994a338
PM
303_GLOBAL(ret_from_fork)
304 bl .schedule_tail
305 REST_NVGPRS(r1)
306 li r3,0
307 b syscall_exit
308
309/*
310 * This routine switches between two different tasks. The process
311 * state of one is saved on its kernel stack. Then the state
312 * of the other is restored from its kernel stack. The memory
313 * management hardware is updated to the second process's state.
314 * Finally, we can return to the second process, via ret_from_except.
315 * On entry, r3 points to the THREAD for the current task, r4
316 * points to the THREAD for the new task.
317 *
318 * Note: there are two ways to get to the "going out" portion
319 * of this code; either by coming in via the entry (_switch)
320 * or via "fork" which must set up an environment equivalent
321 * to the "_switch" path. If you change this you'll have to change
322 * the fork code also.
323 *
324 * The code which creates the new task context is in 'copy_thread'
2ef9481e 325 * in arch/powerpc/kernel/process.c
9994a338
PM
326 */
327 .align 7
328_GLOBAL(_switch)
329 mflr r0
330 std r0,16(r1)
331 stdu r1,-SWITCH_FRAME_SIZE(r1)
332 /* r3-r13 are caller saved -- Cort */
333 SAVE_8GPRS(14, r1)
334 SAVE_10GPRS(22, r1)
335 mflr r20 /* Return to switch caller */
336 mfmsr r22
337 li r0, MSR_FP
338#ifdef CONFIG_ALTIVEC
339BEGIN_FTR_SECTION
340 oris r0,r0,MSR_VEC@h /* Disable altivec */
341 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
342 std r24,THREAD_VRSAVE(r3)
343END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
344#endif /* CONFIG_ALTIVEC */
345 and. r0,r0,r22
346 beq+ 1f
347 andc r22,r22,r0
348 mtmsrd r22
349 isync
3501: std r20,_NIP(r1)
351 mfcr r23
352 std r23,_CCR(r1)
353 std r1,KSP(r3) /* Set old stack pointer */
354
355#ifdef CONFIG_SMP
356 /* We need a sync somewhere here to make sure that if the
357 * previous task gets rescheduled on another CPU, it sees all
358 * stores it has performed on this one.
359 */
360 sync
361#endif /* CONFIG_SMP */
362
363 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
364 std r6,PACACURRENT(r13) /* Set new 'current' */
365
366 ld r8,KSP(r4) /* new stack pointer */
367BEGIN_FTR_SECTION
368 clrrdi r6,r8,28 /* get its ESID */
369 clrrdi r9,r1,28 /* get current sp ESID */
370 clrldi. r0,r6,2 /* is new ESID c00000000? */
371 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
372 cror eq,4*cr1+eq,eq
373 beq 2f /* if yes, don't slbie it */
374
375 /* Bolt in the new stack SLB entry */
376 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
377 oris r0,r6,(SLB_ESID_V)@h
378 ori r0,r0,(SLB_NUM_BOLTED-1)@l
2f6093c8
MN
379
380 /* Update the last bolted SLB */
381 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
382 li r12,0
383 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
384 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
385 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 386
9994a338
PM
387 slbie r6
388 slbie r6 /* Workaround POWER5 < DD2.1 issue */
389 slbmte r7,r0
390 isync
391
3922:
393END_FTR_SECTION_IFSET(CPU_FTR_SLB)
394 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
395 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
396 because we don't need to leave the 288-byte ABI gap at the
397 top of the kernel stack. */
398 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
399
400 mr r1,r8 /* start using new stack pointer */
401 std r7,PACAKSAVE(r13)
402
403 ld r6,_CCR(r1)
404 mtcrf 0xFF,r6
405
406#ifdef CONFIG_ALTIVEC
407BEGIN_FTR_SECTION
408 ld r0,THREAD_VRSAVE(r4)
409 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
410END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
411#endif /* CONFIG_ALTIVEC */
412
413 /* r3-r13 are destroyed -- Cort */
414 REST_8GPRS(14, r1)
415 REST_10GPRS(22, r1)
416
417 /* convert old thread to its task_struct for return value */
418 addi r3,r3,-THREAD
419 ld r7,_NIP(r1) /* Return to _switch caller in new task */
420 mtlr r7
421 addi r1,r1,SWITCH_FRAME_SIZE
422 blr
423
424 .align 7
425_GLOBAL(ret_from_except)
426 ld r11,_TRAP(r1)
427 andi. r0,r11,1
428 bne .ret_from_except_lite
429 REST_NVGPRS(r1)
430
431_GLOBAL(ret_from_except_lite)
432 /*
433 * Disable interrupts so that current_thread_info()->flags
434 * can't change between when we test it and when we return
435 * from the interrupt.
436 */
437 mfmsr r10 /* Get current interrupt state */
438 rldicl r9,r10,48,1 /* clear MSR_EE */
439 rotldi r9,r9,16
440 mtmsrd r9,1 /* Update machine state */
441
442#ifdef CONFIG_PREEMPT
443 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
444 li r0,_TIF_NEED_RESCHED /* bits to check */
445 ld r3,_MSR(r1)
446 ld r4,TI_FLAGS(r9)
447 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
448 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
449 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
450 bne do_work
451
452#else /* !CONFIG_PREEMPT */
453 ld r3,_MSR(r1) /* Returning to user mode? */
454 andi. r3,r3,MSR_PR
455 beq restore /* if not, just restore regs and return */
456
457 /* Check current_thread_info()->flags */
458 clrrdi r9,r1,THREAD_SHIFT
459 ld r4,TI_FLAGS(r9)
460 andi. r0,r4,_TIF_USER_WORK_MASK
461 bne do_work
462#endif
463
464restore:
d04c56f7 465 ld r5,SOFTE(r1)
9994a338 466#ifdef CONFIG_PPC_ISERIES
3f639ee8 467BEGIN_FW_FTR_SECTION
9994a338
PM
468 cmpdi 0,r5,0
469 beq 4f
470 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
471 ld r3,PACALPPACAPTR(r13)
472 ld r3,LPPACAANYINT(r3)
9994a338
PM
473 cmpdi r3,0
474 beq+ 4f /* skip do_IRQ if no interrupts */
475
476 li r3,0
d04c56f7 477 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
9994a338
PM
478 ori r10,r10,MSR_EE
479 mtmsrd r10 /* hard-enable again */
480 addi r3,r1,STACK_FRAME_OVERHEAD
481 bl .do_IRQ
482 b .ret_from_except_lite /* loop back and handle more */
d04c56f7 4834:
3f639ee8 484END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 485#endif
d04c56f7 486 stb r5,PACASOFTIRQEN(r13)
9994a338
PM
487
488 ld r3,_MSR(r1)
489 andi. r0,r3,MSR_RI
490 beq- unrecov_restore
491
b0a779de
PM
492 /* extract EE bit and use it to restore paca->hard_enabled */
493 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
494 stb r4,PACAHARDIRQEN(r13)
495
9994a338
PM
496 andi. r0,r3,MSR_PR
497
498 /*
499 * r13 is our per cpu area, only restore it if we are returning to
500 * userspace
501 */
502 beq 1f
c6622f63 503 ACCOUNT_CPU_USER_EXIT(r3, r4)
9994a338
PM
504 REST_GPR(13, r1)
5051:
506 ld r3,_CTR(r1)
507 ld r0,_LINK(r1)
508 mtctr r3
509 mtlr r0
510 ld r3,_XER(r1)
511 mtspr SPRN_XER,r3
512
513 REST_8GPRS(5, r1)
514
515 stdcx. r0,0,r1 /* to clear the reservation */
516
517 mfmsr r0
518 li r2, MSR_RI
519 andc r0,r0,r2
520 mtmsrd r0,1
521
522 ld r0,_MSR(r1)
523 mtspr SPRN_SRR1,r0
524
525 ld r2,_CCR(r1)
526 mtcrf 0xFF,r2
527 ld r2,_NIP(r1)
528 mtspr SPRN_SRR0,r2
529
530 ld r0,GPR0(r1)
531 ld r2,GPR2(r1)
532 ld r3,GPR3(r1)
533 ld r4,GPR4(r1)
534 ld r1,GPR1(r1)
535
536 rfid
537 b . /* prevent speculative execution */
538
539/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
540do_work:
541#ifdef CONFIG_PREEMPT
542 andi. r0,r3,MSR_PR /* Returning to user mode? */
543 bne user_work
544 /* Check that preempt_count() == 0 and interrupts are enabled */
545 lwz r8,TI_PREEMPT(r9)
546 cmpwi cr1,r8,0
9994a338
PM
547 ld r0,SOFTE(r1)
548 cmpdi r0,0
9994a338
PM
549 crandc eq,cr1*4+eq,eq
550 bne restore
551 /* here we are preempting the current task */
5521:
9994a338 553 li r0,1
d04c56f7
PM
554 stb r0,PACASOFTIRQEN(r13)
555 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
556 ori r10,r10,MSR_EE
557 mtmsrd r10,1 /* reenable interrupts */
558 bl .preempt_schedule
559 mfmsr r10
560 clrrdi r9,r1,THREAD_SHIFT
561 rldicl r10,r10,48,1 /* disable interrupts again */
562 rotldi r10,r10,16
563 mtmsrd r10,1
564 ld r4,TI_FLAGS(r9)
565 andi. r0,r4,_TIF_NEED_RESCHED
566 bne 1b
567 b restore
568
569user_work:
570#endif
571 /* Enable interrupts */
572 ori r10,r10,MSR_EE
573 mtmsrd r10,1
574
575 andi. r0,r4,_TIF_NEED_RESCHED
576 beq 1f
577 bl .schedule
578 b .ret_from_except_lite
579
5801: bl .save_nvgprs
581 li r3,0
582 addi r4,r1,STACK_FRAME_OVERHEAD
583 bl .do_signal
584 b .ret_from_except
585
586unrecov_restore:
587 addi r3,r1,STACK_FRAME_OVERHEAD
588 bl .unrecoverable_exception
589 b unrecov_restore
590
591#ifdef CONFIG_PPC_RTAS
592/*
593 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
594 * called with the MMU off.
595 *
596 * In addition, we need to be in 32b mode, at least for now.
597 *
598 * Note: r3 is an input parameter to rtas, so don't trash it...
599 */
600_GLOBAL(enter_rtas)
601 mflr r0
602 std r0,16(r1)
603 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
604
605 /* Because RTAS is running in 32b mode, it clobbers the high order half
606 * of all registers that it saves. We therefore save those registers
607 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
608 */
609 SAVE_GPR(2, r1) /* Save the TOC */
610 SAVE_GPR(13, r1) /* Save paca */
611 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
612 SAVE_10GPRS(22, r1) /* ditto */
613
614 mfcr r4
615 std r4,_CCR(r1)
616 mfctr r5
617 std r5,_CTR(r1)
618 mfspr r6,SPRN_XER
619 std r6,_XER(r1)
620 mfdar r7
621 std r7,_DAR(r1)
622 mfdsisr r8
623 std r8,_DSISR(r1)
624 mfsrr0 r9
625 std r9,_SRR0(r1)
626 mfsrr1 r10
627 std r10,_SRR1(r1)
628
9fe901d1
MK
629 /* Temporary workaround to clear CR until RTAS can be modified to
630 * ignore all bits.
631 */
632 li r0,0
633 mtcr r0
634
9994a338
PM
635 /* There is no way it is acceptable to get here with interrupts enabled,
636 * check it with the asm equivalent of WARN_ON
637 */
d04c56f7 638 lbz r0,PACASOFTIRQEN(r13)
9994a338
PM
6391: tdnei r0,0
640.section __bug_table,"a"
641 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
642.previous
643.section .rodata,"a"
6441: .asciz __FILE__
6452: .asciz "enter_rtas"
646.previous
d04c56f7
PM
647
648 /* Hard-disable interrupts */
649 mfmsr r6
650 rldicl r7,r6,48,1
651 rotldi r7,r7,16
652 mtmsrd r7,1
653
9994a338
PM
654 /* Unfortunately, the stack pointer and the MSR are also clobbered,
655 * so they are saved in the PACA which allows us to restore
656 * our original state after RTAS returns.
657 */
658 std r1,PACAR1(r13)
659 std r6,PACASAVEDMSR(r13)
660
661 /* Setup our real return addr */
e58c3495
DG
662 LOAD_REG_ADDR(r4,.rtas_return_loc)
663 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
664 mtlr r4
665
666 li r0,0
667 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
668 andc r0,r6,r0
669
670 li r9,1
671 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
672 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
673 andc r6,r0,r9
674 ori r6,r6,MSR_RI
675 sync /* disable interrupts so SRR0/1 */
676 mtmsrd r0 /* don't get trashed */
677
e58c3495 678 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
679 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
680 ld r4,RTASBASE(r4) /* get the rtas->base value */
681
682 mtspr SPRN_SRR0,r5
683 mtspr SPRN_SRR1,r6
684 rfid
685 b . /* prevent speculative execution */
686
687_STATIC(rtas_return_loc)
688 /* relocation is off at this point */
689 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 690 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
691
692 mfmsr r6
693 li r0,MSR_RI
694 andc r6,r6,r0
695 sync
696 mtmsrd r6
697
698 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 699 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
700 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
701
702 mtspr SPRN_SRR0,r3
703 mtspr SPRN_SRR1,r4
704 rfid
705 b . /* prevent speculative execution */
706
707_STATIC(rtas_restore_regs)
708 /* relocation is on at this point */
709 REST_GPR(2, r1) /* Restore the TOC */
710 REST_GPR(13, r1) /* Restore paca */
711 REST_8GPRS(14, r1) /* Restore the non-volatiles */
712 REST_10GPRS(22, r1) /* ditto */
713
714 mfspr r13,SPRN_SPRG3
715
716 ld r4,_CCR(r1)
717 mtcr r4
718 ld r5,_CTR(r1)
719 mtctr r5
720 ld r6,_XER(r1)
721 mtspr SPRN_XER,r6
722 ld r7,_DAR(r1)
723 mtdar r7
724 ld r8,_DSISR(r1)
725 mtdsisr r8
726 ld r9,_SRR0(r1)
727 mtsrr0 r9
728 ld r10,_SRR1(r1)
729 mtsrr1 r10
730
731 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
732 ld r0,16(r1) /* get return address */
733
734 mtlr r0
735 blr /* return to caller */
736
737#endif /* CONFIG_PPC_RTAS */
738
9994a338
PM
739_GLOBAL(enter_prom)
740 mflr r0
741 std r0,16(r1)
742 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
743
744 /* Because PROM is running in 32b mode, it clobbers the high order half
745 * of all registers that it saves. We therefore save those registers
746 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
747 */
748 SAVE_8GPRS(2, r1)
749 SAVE_GPR(13, r1)
750 SAVE_8GPRS(14, r1)
751 SAVE_10GPRS(22, r1)
752 mfcr r4
753 std r4,_CCR(r1)
754 mfctr r5
755 std r5,_CTR(r1)
756 mfspr r6,SPRN_XER
757 std r6,_XER(r1)
758 mfdar r7
759 std r7,_DAR(r1)
760 mfdsisr r8
761 std r8,_DSISR(r1)
762 mfsrr0 r9
763 std r9,_SRR0(r1)
764 mfsrr1 r10
765 std r10,_SRR1(r1)
766 mfmsr r11
767 std r11,_MSR(r1)
768
769 /* Get the PROM entrypoint */
770 ld r0,GPR4(r1)
771 mtlr r0
772
773 /* Switch MSR to 32 bits mode
774 */
775 mfmsr r11
776 li r12,1
777 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
778 andc r11,r11,r12
779 li r12,1
780 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
781 andc r11,r11,r12
782 mtmsrd r11
783 isync
784
785 /* Restore arguments & enter PROM here... */
786 ld r3,GPR3(r1)
787 blrl
788
789 /* Just make sure that r1 top 32 bits didn't get
790 * corrupt by OF
791 */
792 rldicl r1,r1,0,32
793
794 /* Restore the MSR (back to 64 bits) */
795 ld r0,_MSR(r1)
796 mtmsrd r0
797 isync
798
799 /* Restore other registers */
800 REST_GPR(2, r1)
801 REST_GPR(13, r1)
802 REST_8GPRS(14, r1)
803 REST_10GPRS(22, r1)
804 ld r4,_CCR(r1)
805 mtcr r4
806 ld r5,_CTR(r1)
807 mtctr r5
808 ld r6,_XER(r1)
809 mtspr SPRN_XER,r6
810 ld r7,_DAR(r1)
811 mtdar r7
812 ld r8,_DSISR(r1)
813 mtdsisr r8
814 ld r9,_SRR0(r1)
815 mtsrr0 r9
816 ld r10,_SRR1(r1)
817 mtsrr1 r10
818
819 addi r1,r1,PROM_FRAME_SIZE
820 ld r0,16(r1)
821 mtlr r0
822 blr