head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
-ifeq ($(CONFIG_PPC32),y)
head-$(CONFIG_6xx) += arch/powerpc/kernel/idle_6xx.o
+head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
-endif
core-y += arch/powerpc/kernel/ \
arch/$(OLDARCH)/kernel/ \
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_6xx) += idle_6xx.o
+extra-$(CONFIG_PPC64) += entry_64.o
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds
-obj-y := traps.o prom.o semaphore.o
-obj-$(CONFIG_PPC32) += setup_32.o process.o
+obj-y += traps.o prom.o semaphore.o
+obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o
obj-$(CONFIG_PPC64) += idle_power4.o
+obj-$(CONFIG_PPC64) += misc_64.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_PPC_OF) += prom_init.o of_device.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
--- /dev/null
+/*
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ * This file contains the system call entry code, context switch
+ * code, and exception/interrupt return code for PowerPC.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sys.h>
+#include <linux/threads.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+#undef SHOW_SYSCALLS
+#undef SHOW_SYSCALLS_TASK
+
+/*
+ * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
+ */
+#if MSR_KERNEL >= 0x10000
+#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
+#else
+#define LOAD_MSR_KERNEL(r, x) li r,(x)
+#endif
+
+#ifdef CONFIG_BOOKE
+#include "head_booke.h"
+#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
+ mtspr exc_level##_SPRG,r8; \
+ BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
+ lwz r0,GPR10-INT_FRAME_SIZE(r8); \
+ stw r0,GPR10(r11); \
+ lwz r0,GPR11-INT_FRAME_SIZE(r8); \
+ stw r0,GPR11(r11); \
+ mfspr r8,exc_level##_SPRG
+
+ .globl mcheck_transfer_to_handler
+mcheck_transfer_to_handler:
+ TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
+ b transfer_to_handler_full
+
+ .globl debug_transfer_to_handler
+debug_transfer_to_handler:
+ TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
+ b transfer_to_handler_full
+
+ .globl crit_transfer_to_handler
+crit_transfer_to_handler:
+ TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
+ /* fall through */
+#endif
+
+#ifdef CONFIG_40x
+ .globl crit_transfer_to_handler
+crit_transfer_to_handler:
+ lwz r0,crit_r10@l(0)
+ stw r0,GPR10(r11)
+ lwz r0,crit_r11@l(0)
+ stw r0,GPR11(r11)
+ /* fall through */
+#endif
+
+/*
+ * This code finishes saving the registers to the exception frame
+ * and jumps to the appropriate handler for the exception, turning
+ * on address translation.
+ * Note that we rely on the caller having set cr0.eq iff the exception
+ * occurred in kernel mode (i.e. MSR:PR = 0).
+ */
+ .globl transfer_to_handler_full
+transfer_to_handler_full:
+ SAVE_NVGPRS(r11)
+ /* fall through */
+
+ .globl transfer_to_handler
+transfer_to_handler:
+ stw r2,GPR2(r11)
+ stw r12,_NIP(r11)
+ stw r9,_MSR(r11)
+ andi. r2,r9,MSR_PR
+ mfctr r12
+ mfspr r2,SPRN_XER
+ stw r12,_CTR(r11)
+ stw r2,_XER(r11)
+ mfspr r12,SPRN_SPRG3
+ addi r2,r12,-THREAD
+ tovirt(r2,r2) /* set r2 to current */
+ beq 2f /* if from user, fix up THREAD.regs */
+ addi r11,r1,STACK_FRAME_OVERHEAD
+ stw r11,PT_REGS(r12)
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ /* Check to see if the dbcr0 register is set up to debug. Use the
+ single-step bit to do this. */
+ lwz r12,THREAD_DBCR0(r12)
+ andis. r12,r12,DBCR0_IC@h
+ beq+ 3f
+ /* From user and task is ptraced - load up global dbcr0 */
+ li r12,-1 /* clear all pending debug events */
+ mtspr SPRN_DBSR,r12
+ lis r11,global_dbcr0@ha
+ tophys(r11,r11)
+ addi r11,r11,global_dbcr0@l
+ lwz r12,0(r11)
+ mtspr SPRN_DBCR0,r12
+ lwz r12,4(r11)
+ addi r12,r12,-1
+ stw r12,4(r11)
+#endif
+ b 3f
+2: /* if from kernel, check interrupted DOZE/NAP mode and
+ * check for stack overflow
+ */
+#ifdef CONFIG_6xx
+ mfspr r11,SPRN_HID0
+ mtcr r11
+BEGIN_FTR_SECTION
+ bt- 8,power_save_6xx_restore /* Check DOZE */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+BEGIN_FTR_SECTION
+ bt- 9,power_save_6xx_restore /* Check NAP */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+#endif /* CONFIG_6xx */
+ .globl transfer_to_handler_cont
+transfer_to_handler_cont:
+ lwz r11,THREAD_INFO-THREAD(r12)
+ cmplw r1,r11 /* if r1 <= current->thread_info */
+ ble- stack_ovf /* then the kernel stack overflowed */
+3:
+ mflr r9
+ lwz r11,0(r9) /* virtual address of handler */
+ lwz r9,4(r9) /* where to go when done */
+ FIX_SRR1(r10,r12)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r10
+ mtlr r9
+ SYNC
+ RFI /* jump to handler, enable MMU */
+
+/*
+ * On kernel stack overflow, load up an initial stack pointer
+ * and call StackOverflow(regs), which should not return.
+ */
+stack_ovf:
+ /* sometimes we use a statically-allocated stack, which is OK. */
+ lis r11,_end@h
+ ori r11,r11,_end@l
+ cmplw r1,r11
+ ble 3b /* r1 <= &_end is OK */
+ SAVE_NVGPRS(r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lis r1,init_thread_union@ha
+ addi r1,r1,init_thread_union@l
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ lis r9,StackOverflow@ha
+ addi r9,r9,StackOverflow@l
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+ FIX_SRR1(r10,r12)
+ mtspr SPRN_SRR0,r9
+ mtspr SPRN_SRR1,r10
+ SYNC
+ RFI
+
+/*
+ * Handle a system call.
+ */
+ .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
+ .stabs "entry_32.S",N_SO,0,0,0f
+0:
+
+_GLOBAL(DoSyscall)
+ stw r0,THREAD+LAST_SYSCALL(r2)
+ stw r3,ORIG_GPR3(r1)
+ li r12,0
+ stw r12,RESULT(r1)
+ lwz r11,_CCR(r1) /* Clear SO bit in CR */
+ rlwinm r11,r11,0,4,2
+ stw r11,_CCR(r1)
+#ifdef SHOW_SYSCALLS
+ bl do_show_syscall
+#endif /* SHOW_SYSCALLS */
+ rlwinm r10,r1,0,0,18 /* current_thread_info() */
+ lwz r11,TI_LOCAL_FLAGS(r10)
+ rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
+ stw r11,TI_LOCAL_FLAGS(r10)
+ lwz r11,TI_FLAGS(r10)
+ andi. r11,r11,_TIF_SYSCALL_T_OR_A
+ bne- syscall_dotrace
+syscall_dotrace_cont:
+ cmplwi 0,r0,NR_syscalls
+ lis r10,sys_call_table@h
+ ori r10,r10,sys_call_table@l
+ slwi r0,r0,2
+ bge- 66f
+ lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
+ mtlr r10
+ addi r9,r1,STACK_FRAME_OVERHEAD
+ PPC440EP_ERR42
+ blrl /* Call handler */
+ .globl ret_from_syscall
+ret_from_syscall:
+#ifdef SHOW_SYSCALLS
+ bl do_show_syscall_exit
+#endif
+ mr r6,r3
+ li r11,-_LAST_ERRNO
+ cmplw 0,r3,r11
+ rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ blt+ 30f
+ lwz r11,TI_LOCAL_FLAGS(r12)
+ andi. r11,r11,_TIFL_FORCE_NOERROR
+ bne 30f
+ neg r3,r3
+ lwz r10,_CCR(r1) /* Set SO bit in CR */
+ oris r10,r10,0x1000
+ stw r10,_CCR(r1)
+
+ /* disable interrupts so current_thread_info()->flags can't change */
+30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ SYNC
+ MTMSRD(r10)
+ lwz r9,TI_FLAGS(r12)
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ bne- syscall_exit_work
+syscall_exit_cont:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+ /* If the process has its own DBCR0 value, load it up. The single
+ step bit tells us that dbcr0 should be loaded. */
+ lwz r0,THREAD+THREAD_DBCR0(r2)
+ andis. r10,r0,DBCR0_IC@h
+ bnel- load_dbcr0
+#endif
+ stwcx. r0,0,r1 /* to clear the reservation */
+ lwz r4,_LINK(r1)
+ lwz r5,_CCR(r1)
+ mtlr r4
+ mtcr r5
+ lwz r7,_NIP(r1)
+ lwz r8,_MSR(r1)
+ FIX_SRR1(r8, r0)
+ lwz r2,GPR2(r1)
+ lwz r1,GPR1(r1)
+ mtspr SPRN_SRR0,r7
+ mtspr SPRN_SRR1,r8
+ SYNC
+ RFI
+
+66: li r3,-ENOSYS
+ b ret_from_syscall
+
+ .globl ret_from_fork
+ret_from_fork:
+ REST_NVGPRS(r1)
+ bl schedule_tail
+ li r3,0
+ b ret_from_syscall
+
+/* Traced system call support */
+syscall_dotrace:
+ SAVE_NVGPRS(r1)
+ li r0,0xc00
+ stw r0,TRAP(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_syscall_trace_enter
+ lwz r0,GPR0(r1) /* Restore original registers */
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ REST_NVGPRS(r1)
+ b syscall_dotrace_cont
+
+syscall_exit_work:
+ stw r6,RESULT(r1) /* Save result */
+ stw r3,GPR3(r1) /* Update return value */
+ andi. r0,r9,_TIF_SYSCALL_T_OR_A
+ beq 5f
+ ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10) /* re-enable interrupts */
+ lwz r4,TRAP(r1)
+ andi. r4,r4,1
+ beq 4f
+ SAVE_NVGPRS(r1)
+ li r4,0xc00
+ stw r4,TRAP(r1)
+4:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_syscall_trace_leave
+ REST_NVGPRS(r1)
+2:
+ lwz r3,GPR3(r1)
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ SYNC
+ MTMSRD(r10) /* disable interrupts again */
+ rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ lwz r9,TI_FLAGS(r12)
+5:
+ andi. r0,r9,_TIF_NEED_RESCHED
+ bne 1f
+ lwz r5,_MSR(r1)
+ andi. r5,r5,MSR_PR
+ beq syscall_exit_cont
+ andi. r0,r9,_TIF_SIGPENDING
+ beq syscall_exit_cont
+ b do_user_signal
+1:
+ ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10) /* re-enable interrupts */
+ bl schedule
+ b 2b
+
+#ifdef SHOW_SYSCALLS
+do_show_syscall:
+#ifdef SHOW_SYSCALLS_TASK
+ lis r11,show_syscalls_task@ha
+ lwz r11,show_syscalls_task@l(r11)
+ cmp 0,r2,r11
+ bnelr
+#endif
+ stw r31,GPR31(r1)
+ mflr r31
+ lis r3,7f@ha
+ addi r3,r3,7f@l
+ lwz r4,GPR0(r1)
+ lwz r5,GPR3(r1)
+ lwz r6,GPR4(r1)
+ lwz r7,GPR5(r1)
+ lwz r8,GPR6(r1)
+ lwz r9,GPR7(r1)
+ bl printk
+ lis r3,77f@ha
+ addi r3,r3,77f@l
+ lwz r4,GPR8(r1)
+ mr r5,r2
+ bl printk
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ mtlr r31
+ lwz r31,GPR31(r1)
+ blr
+
+do_show_syscall_exit:
+#ifdef SHOW_SYSCALLS_TASK
+ lis r11,show_syscalls_task@ha
+ lwz r11,show_syscalls_task@l(r11)
+ cmp 0,r2,r11
+ bnelr
+#endif
+ stw r31,GPR31(r1)
+ mflr r31
+ stw r3,RESULT(r1) /* Save result */
+ mr r4,r3
+ lis r3,79f@ha
+ addi r3,r3,79f@l
+ bl printk
+ lwz r3,RESULT(r1)
+ mtlr r31
+ lwz r31,GPR31(r1)
+ blr
+
+7: .string "syscall %d(%x, %x, %x, %x, %x, "
+77: .string "%x), current=%p\n"
+79: .string " -> %x\n"
+ .align 2,0
+
+#ifdef SHOW_SYSCALLS_TASK
+ .data
+ .globl show_syscalls_task
+show_syscalls_task:
+ .long -1
+ .text
+#endif
+#endif /* SHOW_SYSCALLS */
+
+/*
+ * The sigsuspend and rt_sigsuspend system calls can call do_signal
+ * and thus put the process into the stopped state where we might
+ * want to examine its user state with ptrace. Therefore we need
+ * to save all the nonvolatile registers (r13 - r31) before calling
+ * the C code.
+ */
+ .globl ppc_sigsuspend
+ppc_sigsuspend:
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,TRAP(r1) /* register set saved */
+ b sys_sigsuspend
+
+ .globl ppc_rt_sigsuspend
+ppc_rt_sigsuspend:
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ rlwinm r0,r0,0,0,30
+ stw r0,TRAP(r1)
+ b sys_rt_sigsuspend
+
+ .globl ppc_fork
+ppc_fork:
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,TRAP(r1) /* register set saved */
+ b sys_fork
+
+ .globl ppc_vfork
+ppc_vfork:
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,TRAP(r1) /* register set saved */
+ b sys_vfork
+
+ .globl ppc_clone
+ppc_clone:
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,TRAP(r1) /* register set saved */
+ b sys_clone
+
+ .globl ppc_swapcontext
+ppc_swapcontext:
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,TRAP(r1) /* register set saved */
+ b sys_swapcontext
+
+/*
+ * Top-level page fault handling.
+ * This is in assembler because if do_page_fault tells us that
+ * it is a bad kernel page fault, we want to save the non-volatile
+ * registers before calling bad_page_fault.
+ */
+ .globl handle_page_fault
+handle_page_fault:
+ stw r4,_DAR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_page_fault
+ cmpwi r3,0
+ beq+ ret_from_except
+ SAVE_NVGPRS(r1)
+ lwz r0,TRAP(r1)
+ clrrwi r0,r0,1
+ stw r0,TRAP(r1)
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+ bl bad_page_fault
+ b ret_from_except_full
+
+/*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+ * of the other is restored from its kernel stack. The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process.
+ * On entry, r3 points to the THREAD for the current task, r4
+ * points to the THREAD for the new task.
+ *
+ * This routine is always called with interrupts disabled.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path. If you change this , you'll have to
+ * change the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc/kernel/process.c
+ */
+_GLOBAL(_switch)
+ stwu r1,-INT_FRAME_SIZE(r1)
+ mflr r0
+ stw r0,INT_FRAME_SIZE+4(r1)
+ /* r3-r12 are caller saved -- Cort */
+ SAVE_NVGPRS(r1)
+ stw r0,_NIP(r1) /* Return to switch caller */
+ mfmsr r11
+ li r0,MSR_FP /* Disable floating-point */
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r0,r0,MSR_VEC@h /* Disable altivec */
+ mfspr r12,SPRN_VRSAVE /* save vrsave register value */
+ stw r12,THREAD+THREAD_VRSAVE(r2)
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ oris r0,r0,MSR_SPE@h /* Disable SPE */
+ mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
+ stw r12,THREAD+THREAD_SPEFSCR(r2)
+#endif /* CONFIG_SPE */
+ and. r0,r0,r11 /* FP or altivec or SPE enabled? */
+ beq+ 1f
+ andc r11,r11,r0
+ MTMSRD(r11)
+ isync
+1: stw r11,_MSR(r1)
+ mfcr r10
+ stw r10,_CCR(r1)
+ stw r1,KSP(r3) /* Set old stack pointer */
+
+#ifdef CONFIG_SMP
+ /* We need a sync somewhere here to make sure that if the
+ * previous task gets rescheduled on another CPU, it sees all
+ * stores it has performed on this one.
+ */
+ sync
+#endif /* CONFIG_SMP */
+
+ tophys(r0,r4)
+ CLR_TOP32(r0)
+ mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
+ lwz r1,KSP(r4) /* Load new stack pointer */
+
+ /* save the old current 'last' for return value */
+ mr r3,r2
+ addi r2,r4,-THREAD /* Update current */
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ lwz r0,THREAD+THREAD_VRSAVE(r2)
+ mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ lwz r0,THREAD+THREAD_SPEFSCR(r2)
+ mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
+#endif /* CONFIG_SPE */
+
+ lwz r0,_CCR(r1)
+ mtcrf 0xFF,r0
+ /* r3-r12 are destroyed -- Cort */
+ REST_NVGPRS(r1)
+
+ lwz r4,_NIP(r1) /* Return to _switch caller in new task */
+ mtlr r4
+ addi r1,r1,INT_FRAME_SIZE
+ blr
+
+ .globl fast_exception_return
+fast_exception_return:
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+ andi. r10,r9,MSR_RI /* check for recoverable interrupt */
+ beq 1f /* if not, we've got problems */
+#endif
+
+2: REST_4GPRS(3, r11)
+ lwz r10,_CCR(r11)
+ REST_GPR(1, r11)
+ mtcr r10
+ lwz r10,_LINK(r11)
+ mtlr r10
+ REST_GPR(10, r11)
+ mtspr SPRN_SRR1,r9
+ mtspr SPRN_SRR0,r12
+ REST_GPR(9, r11)
+ REST_GPR(12, r11)
+ lwz r11,GPR11(r11)
+ SYNC
+ RFI
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+/* check if the exception happened in a restartable section */
+1: lis r3,exc_exit_restart_end@ha
+ addi r3,r3,exc_exit_restart_end@l
+ cmplw r12,r3
+ bge 3f
+ lis r4,exc_exit_restart@ha
+ addi r4,r4,exc_exit_restart@l
+ cmplw r12,r4
+ blt 3f
+ lis r3,fee_restarts@ha
+ tophys(r3,r3)
+ lwz r5,fee_restarts@l(r3)
+ addi r5,r5,1
+ stw r5,fee_restarts@l(r3)
+ mr r12,r4 /* restart at exc_exit_restart */
+ b 2b
+
+ .comm fee_restarts,4
+
+/* aargh, a nonrecoverable interrupt, panic */
+/* aargh, we don't know which trap this is */
+/* but the 601 doesn't implement the RI bit, so assume it's OK */
+3:
+BEGIN_FTR_SECTION
+ b 2b
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+ li r10,-1
+ stw r10,TRAP(r11)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lis r10,MSR_KERNEL@h
+ ori r10,r10,MSR_KERNEL@l
+ bl transfer_to_handler_full
+ .long nonrecoverable_exception
+ .long ret_from_except
+#endif
+
+ .globl sigreturn_exit
+sigreturn_exit:
+ subi r1,r3,STACK_FRAME_OVERHEAD
+ rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ lwz r9,TI_FLAGS(r12)
+ andi. r0,r9,_TIF_SYSCALL_T_OR_A
+ bnel- do_syscall_trace_leave
+ /* fall through */
+
+ .globl ret_from_except_full
+ret_from_except_full:
+ REST_NVGPRS(r1)
+ /* fall through */
+
+ .globl ret_from_except
+ret_from_except:
+ /* Hard-disable interrupts so that current_thread_info()->flags
+ * can't change between when we test it and when we return
+ * from the interrupt. */
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+ SYNC /* Some chip revs have problems here... */
+ MTMSRD(r10) /* disable interrupts */
+
+ lwz r3,_MSR(r1) /* Returning to user mode? */
+ andi. r0,r3,MSR_PR
+ beq resume_kernel
+
+user_exc_return: /* r10 contains MSR_KERNEL here */
+ /* Check current_thread_info()->flags */
+ rlwinm r9,r1,0,0,18
+ lwz r9,TI_FLAGS(r9)
+ andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ bne do_work
+
+restore_user:
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+ /* Check whether this process has its own DBCR0 value. The single
+ step bit tells us that dbcr0 should be loaded. */
+ lwz r0,THREAD+THREAD_DBCR0(r2)
+ andis. r10,r0,DBCR0_IC@h
+ bnel- load_dbcr0
+#endif
+
+#ifdef CONFIG_PREEMPT
+ b restore
+
+/* N.B. the only way to get here is from the beq following ret_from_except. */
+resume_kernel:
+ /* check current_thread_info->preempt_count */
+ rlwinm r9,r1,0,0,18
+ lwz r0,TI_PREEMPT(r9)
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore
+ lwz r0,TI_FLAGS(r9)
+ andi. r0,r0,_TIF_NEED_RESCHED
+ beq+ restore
+ andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore /* don't schedule if so */
+1: bl preempt_schedule_irq
+ rlwinm r9,r1,0,0,18
+ lwz r3,TI_FLAGS(r9)
+ andi. r0,r3,_TIF_NEED_RESCHED
+ bne- 1b
+#else
+resume_kernel:
+#endif /* CONFIG_PREEMPT */
+
+ /* interrupts are hard-disabled at this point */
+restore:
+ lwz r0,GPR0(r1)
+ lwz r2,GPR2(r1)
+ REST_4GPRS(3, r1)
+ REST_2GPRS(7, r1)
+
+ lwz r10,_XER(r1)
+ lwz r11,_CTR(r1)
+ mtspr SPRN_XER,r10
+ mtctr r11
+
+ PPC405_ERR77(0,r1)
+ stwcx. r0,0,r1 /* to clear the reservation */
+
+#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+ lwz r9,_MSR(r1)
+ andi. r10,r9,MSR_RI /* check if this exception occurred */
+ beql nonrecoverable /* at a bad place (MSR:RI = 0) */
+
+ lwz r10,_CCR(r1)
+ lwz r11,_LINK(r1)
+ mtcrf 0xFF,r10
+ mtlr r11
+
+ /*
+ * Once we put values in SRR0 and SRR1, we are in a state
+ * where exceptions are not recoverable, since taking an
+ * exception will trash SRR0 and SRR1. Therefore we clear the
+ * MSR:RI bit to indicate this. If we do take an exception,
+ * we can't return to the point of the exception but we
+ * can restart the exception exit path at the label
+ * exc_exit_restart below. -- paulus
+ */
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
+ SYNC
+ MTMSRD(r10) /* clear the RI bit */
+ .globl exc_exit_restart
+exc_exit_restart:
+ lwz r9,_MSR(r1)
+ lwz r12,_NIP(r1)
+ FIX_SRR1(r9,r10)
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r9
+ REST_4GPRS(9, r1)
+ lwz r1,GPR1(r1)
+ .globl exc_exit_restart_end
+exc_exit_restart_end:
+ SYNC
+ RFI
+
+#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
+ /*
+ * This is a bit different on 4xx/Book-E because it doesn't have
+ * the RI bit in the MSR.
+ * The TLB miss handler checks if we have interrupted
+ * the exception exit path and restarts it if so
+ * (well maybe one day it will... :).
+ */
+ lwz r11,_LINK(r1)
+ mtlr r11
+ lwz r10,_CCR(r1)
+ mtcrf 0xff,r10
+ REST_2GPRS(9, r1)
+ .globl exc_exit_restart
+exc_exit_restart:
+ lwz r11,_NIP(r1)
+ lwz r12,_MSR(r1)
+exc_exit_start:
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+ REST_2GPRS(11, r1)
+ lwz r1,GPR1(r1)
+ .globl exc_exit_restart_end
+exc_exit_restart_end:
+ PPC405_ERR77_SYNC
+ rfi
+ b . /* prevent prefetch past rfi */
+
+/*
+ * Returning from a critical interrupt in user mode doesn't need
+ * to be any different from a normal exception. For a critical
+ * interrupt in the kernel, we just return (without checking for
+ * preemption) since the interrupt may have happened at some crucial
+ * place (e.g. inside the TLB miss handler), and because we will be
+ * running with r1 pointing into critical_stack, not the current
+ * process's kernel stack (and therefore current_thread_info() will
+ * give the wrong answer).
+ * We have to restore various SPRs that may have been in use at the
+ * time of the critical interrupt.
+ *
+ */
+#ifdef CONFIG_40x
+#define PPC_40x_TURN_OFF_MSR_DR \
+ /* avoid any possible TLB misses here by turning off MSR.DR, we \
+ * assume the instructions here are mapped by a pinned TLB entry */ \
+ li r10,MSR_IR; \
+ mtmsr r10; \
+ isync; \
+ tophys(r1, r1);
+#else
+#define PPC_40x_TURN_OFF_MSR_DR
+#endif
+
+#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
+ REST_NVGPRS(r1); \
+ lwz r3,_MSR(r1); \
+ andi. r3,r3,MSR_PR; \
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
+ bne user_exc_return; \
+ lwz r0,GPR0(r1); \
+ lwz r2,GPR2(r1); \
+ REST_4GPRS(3, r1); \
+ REST_2GPRS(7, r1); \
+ lwz r10,_XER(r1); \
+ lwz r11,_CTR(r1); \
+ mtspr SPRN_XER,r10; \
+ mtctr r11; \
+ PPC405_ERR77(0,r1); \
+ stwcx. r0,0,r1; /* to clear the reservation */ \
+ lwz r11,_LINK(r1); \
+ mtlr r11; \
+ lwz r10,_CCR(r1); \
+ mtcrf 0xff,r10; \
+ PPC_40x_TURN_OFF_MSR_DR; \
+ lwz r9,_DEAR(r1); \
+ lwz r10,_ESR(r1); \
+ mtspr SPRN_DEAR,r9; \
+ mtspr SPRN_ESR,r10; \
+ lwz r11,_NIP(r1); \
+ lwz r12,_MSR(r1); \
+ mtspr exc_lvl_srr0,r11; \
+ mtspr exc_lvl_srr1,r12; \
+ lwz r9,GPR9(r1); \
+ lwz r12,GPR12(r1); \
+ lwz r10,GPR10(r1); \
+ lwz r11,GPR11(r1); \
+ lwz r1,GPR1(r1); \
+ PPC405_ERR77_SYNC; \
+ exc_lvl_rfi; \
+ b .; /* prevent prefetch past exc_lvl_rfi */
+
+ .globl ret_from_crit_exc
+ret_from_crit_exc:
+ RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
+
+#ifdef CONFIG_BOOKE
+ .globl ret_from_debug_exc
+ret_from_debug_exc:
+ RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
+
+ .globl ret_from_mcheck_exc
+ret_from_mcheck_exc:
+ RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
+#endif /* CONFIG_BOOKE */
+
+/*
+ * Load the DBCR0 value for a task that is being ptraced,
+ * having first saved away the global DBCR0. Note that r0
+ * has the dbcr0 value to set upon entry to this.
+ */
+load_dbcr0:
+ mfmsr r10 /* first disable debug exceptions */
+ rlwinm r10,r10,0,~MSR_DE
+ mtmsr r10
+ isync
+ mfspr r10,SPRN_DBCR0
+ lis r11,global_dbcr0@ha
+ addi r11,r11,global_dbcr0@l
+ stw r10,0(r11)
+ mtspr SPRN_DBCR0,r0
+ lwz r10,4(r11)
+ addi r10,r10,1
+ stw r10,4(r11)
+ li r11,-1
+ mtspr SPRN_DBSR,r11 /* clear all pending debug events */
+ blr
+
+ .comm global_dbcr0,8
+#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+do_work: /* r10 contains MSR_KERNEL here */
+ andi. r0,r9,_TIF_NEED_RESCHED
+ beq do_user_signal
+
+do_resched: /* r10 contains MSR_KERNEL here */
+ ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10) /* hard-enable interrupts */
+ bl schedule
+recheck:
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL)
+ SYNC
+ MTMSRD(r10) /* disable interrupts */
+ rlwinm r9,r1,0,0,18
+ lwz r9,TI_FLAGS(r9)
+ andi. r0,r9,_TIF_NEED_RESCHED
+ bne- do_resched
+ andi. r0,r9,_TIF_SIGPENDING
+ beq restore_user
+do_user_signal: /* r10 contains MSR_KERNEL here */
+ ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10) /* hard-enable interrupts */
+ /* save r13-r31 in the exception frame, if not already done */
+ lwz r3,TRAP(r1)
+ andi. r0,r3,1
+ beq 2f
+ SAVE_NVGPRS(r1)
+ rlwinm r3,r3,0,0,30
+ stw r3,TRAP(r1)
+2: li r3,0
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ bl do_signal
+ REST_NVGPRS(r1)
+ b recheck
+
+/*
+ * We come here when we are at the end of handling an exception
+ * that occurred at a place where taking an exception will lose
+ * state information, such as the contents of SRR0 and SRR1.
+ */
+nonrecoverable:
+ lis r10,exc_exit_restart_end@ha
+ addi r10,r10,exc_exit_restart_end@l
+ cmplw r12,r10
+ bge 3f
+ lis r11,exc_exit_restart@ha
+ addi r11,r11,exc_exit_restart@l
+ cmplw r12,r11
+ blt 3f
+ lis r10,ee_restarts@ha
+ lwz r12,ee_restarts@l(r10)
+ addi r12,r12,1
+ stw r12,ee_restarts@l(r10)
+ mr r12,r11 /* restart at exc_exit_restart */
+ blr
+3: /* OK, we can't recover, kill this process */
+ /* but the 601 doesn't implement the RI bit, so assume it's OK */
+BEGIN_FTR_SECTION
+ blr
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+ lwz r3,TRAP(r1)
+ andi. r0,r3,1
+ beq 4f
+ SAVE_NVGPRS(r1)
+ rlwinm r3,r3,0,0,30
+ stw r3,TRAP(r1)
+4: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl nonrecoverable_exception
+ /* shouldn't return */
+ b 4b
+
+ .comm ee_restarts,4
+
+/*
+ * PROM code for specific machines follows. Put it
+ * here so it's easy to add arch-specific sections later.
+ * -- Cort
+ */
+#ifdef CONFIG_PPC_OF
+/*
+ * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
+ * called with the MMU off.
+ */
+_GLOBAL(enter_rtas)
+ stwu r1,-INT_FRAME_SIZE(r1)
+ mflr r0
+ stw r0,INT_FRAME_SIZE+4(r1)
+ lis r4,rtas_data@ha
+ lwz r4,rtas_data@l(r4)
+ lis r6,1f@ha /* physical return address for rtas */
+ addi r6,r6,1f@l
+ tophys(r6,r6)
+ tophys(r7,r1)
+ lis r8,rtas_entry@ha
+ lwz r8,rtas_entry@l(r8)
+ mfmsr r9
+ stw r9,8(r1)
+ LOAD_MSR_KERNEL(r0,MSR_KERNEL)
+ SYNC /* disable interrupts so SRR0/1 */
+ MTMSRD(r0) /* don't get trashed */
+ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ mtlr r6
+ CLR_TOP32(r7)
+ mtspr SPRN_SPRG2,r7
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ RFI
+1: tophys(r9,r1)
+ lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
+ lwz r9,8(r9) /* original msr value */
+ FIX_SRR1(r9,r0)
+ addi r1,r1,INT_FRAME_SIZE
+ li r0,0
+ mtspr SPRN_SPRG2,r0
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ RFI /* return to caller */
+
+ .globl machine_check_in_rtas
+machine_check_in_rtas:
+ twi 31,0,0
+ /* XXX load up BATs and panic */
+
+#endif /* CONFIG_PPC_OF */
--- /dev/null
+/*
+ * arch/ppc64/kernel/entry.S
+ *
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
+ *
+ * This file contains the system call entry code, context switch
+ * code, and exception/interrupt return code for PowerPC.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <asm/unistd.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#define DO_SOFT_DISABLE
+#endif
+
+/*
+ * System calls.
+ */
+ .section ".toc","aw"
+.SYS_CALL_TABLE:
+ .tc .sys_call_table[TC],.sys_call_table
+
+/* This value is used to mark exception frames on the stack. */
+exception_marker:
+ .tc ID_72656773_68657265[TC],0x7265677368657265
+
+ .section ".text"
+ .align 7
+
+#undef SHOW_SYSCALLS
+
+ .globl system_call_common
+system_call_common:
+ andi. r10,r12,MSR_PR
+ mr r10,r1
+ addi r1,r1,-INT_FRAME_SIZE
+ beq- 1f
+ ld r1,PACAKSAVE(r13)
+1: std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ li r11,0
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ crclr so
+ mfcr r9
+ mflr r10
+ li r11,0xc01
+ std r9,_CCR(r1)
+ std r10,_LINK(r1)
+ std r11,_TRAP(r1)
+ mfxer r9
+ mfctr r10
+ std r9,_XER(r1)
+ std r10,_CTR(r1)
+ std r3,ORIG_GPR3(r1)
+ ld r2,PACATOC(r13)
+ addi r9,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r9) /* "regshere" marker */
+#ifdef CONFIG_PPC_ISERIES
+ /* Hack for handling interrupts when soft-enabling on iSeries */
+ cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
+ andi. r10,r12,MSR_PR /* from kernel */
+ crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
+ beq hardware_interrupt_entry
+ lbz r10,PACAPROCENABLED(r13)
+ std r10,SOFTE(r1)
+#endif
+ mfmsr r11
+ ori r11,r11,MSR_EE
+ mtmsrd r11,1
+
+#ifdef SHOW_SYSCALLS
+ bl .do_show_syscall
+ REST_GPR(0,r1)
+ REST_4GPRS(3,r1)
+ REST_2GPRS(7,r1)
+ addi r9,r1,STACK_FRAME_OVERHEAD
+#endif
+ clrrdi r11,r1,THREAD_SHIFT
+ li r12,0
+ ld r10,TI_FLAGS(r11)
+ stb r12,TI_SC_NOERR(r11)
+ andi. r11,r10,_TIF_SYSCALL_T_OR_A
+ bne- syscall_dotrace
+syscall_dotrace_cont:
+ cmpldi 0,r0,NR_syscalls
+ bge- syscall_enosys
+
+system_call: /* label this so stack traces look sane */
+/*
+ * Need to vector to 32 Bit or default sys_call_table here,
+ * based on caller's run-mode / personality.
+ */
+ ld r11,.SYS_CALL_TABLE@toc(2)
+ andi. r10,r10,_TIF_32BIT
+ beq 15f
+ addi r11,r11,8 /* use 32-bit syscall entries */
+ clrldi r3,r3,32
+ clrldi r4,r4,32
+ clrldi r5,r5,32
+ clrldi r6,r6,32
+ clrldi r7,r7,32
+ clrldi r8,r8,32
+15:
+ slwi r0,r0,4
+ ldx r10,r11,r0 /* Fetch system call handler [ptr] */
+ mtctr r10
+ bctrl /* Call handler */
+
+syscall_exit:
+#ifdef SHOW_SYSCALLS
+ std r3,GPR3(r1)
+ bl .do_show_syscall_exit
+ ld r3,GPR3(r1)
+#endif
+ std r3,RESULT(r1)
+ ld r5,_CCR(r1)
+ li r10,-_LAST_ERRNO
+ cmpld r3,r10
+ clrrdi r12,r1,THREAD_SHIFT
+ bge- syscall_error
+syscall_error_cont:
+
+ /* check for syscall tracing or audit */
+ ld r9,TI_FLAGS(r12)
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ bne- syscall_exit_trace
+syscall_exit_trace_cont:
+
+ /* disable interrupts so current_thread_info()->flags can't change,
+ and so that we don't get interrupted after loading SRR0/1. */
+ ld r8,_MSR(r1)
+ andi. r10,r8,MSR_RI
+ beq- unrecov_restore
+ mfmsr r10
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+ ld r9,TI_FLAGS(r12)
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ bne- syscall_exit_work
+ ld r7,_NIP(r1)
+ stdcx. r0,0,r1 /* to clear the reservation */
+ andi. r6,r8,MSR_PR
+ ld r4,_LINK(r1)
+ beq- 1f /* only restore r13 if */
+ ld r13,GPR13(r1) /* returning to usermode */
+1: ld r2,GPR2(r1)
+ li r12,MSR_RI
+ andc r10,r10,r12
+ mtmsrd r10,1 /* clear MSR.RI */
+ ld r1,GPR1(r1)
+ mtlr r4
+ mtcr r5
+ mtspr SPRN_SRR0,r7
+ mtspr SPRN_SRR1,r8
+ rfid
+ b . /* prevent speculative execution */
+
+syscall_enosys:
+ li r3,-ENOSYS
+ std r3,RESULT(r1)
+ clrrdi r12,r1,THREAD_SHIFT
+ ld r5,_CCR(r1)
+
+syscall_error:
+ lbz r11,TI_SC_NOERR(r12)
+ cmpwi 0,r11,0
+ bne- syscall_error_cont
+ neg r3,r3
+ oris r5,r5,0x1000 /* Set SO bit in CR */
+ std r5,_CCR(r1)
+ b syscall_error_cont
+
+/* Traced system call support */
+syscall_dotrace:
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_syscall_trace_enter
+ ld r0,GPR0(r1) /* Restore original registers */
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ addi r9,r1,STACK_FRAME_OVERHEAD
+ clrrdi r10,r1,THREAD_SHIFT
+ ld r10,TI_FLAGS(r10)
+ b syscall_dotrace_cont
+
+syscall_exit_trace:
+ std r3,GPR3(r1)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_syscall_trace_leave
+ REST_NVGPRS(r1)
+ ld r3,GPR3(r1)
+ ld r5,_CCR(r1)
+ clrrdi r12,r1,THREAD_SHIFT
+ b syscall_exit_trace_cont
+
+/* Stuff to do on exit from a system call. */
+syscall_exit_work:
+ std r3,GPR3(r1)
+ std r5,_CCR(r1)
+ b .ret_from_except_lite
+
+/* Save non-volatile GPRs, if not already saved. */
+_GLOBAL(save_nvgprs)
+ ld r11,_TRAP(r1)
+ andi. r0,r11,1
+ beqlr-
+ SAVE_NVGPRS(r1)
+ clrrdi r0,r11,1
+ std r0,_TRAP(r1)
+ blr
+
+/*
+ * The sigsuspend and rt_sigsuspend system calls can call do_signal
+ * and thus put the process into the stopped state where we might
+ * want to examine its user state with ptrace. Therefore we need
+ * to save all the nonvolatile registers (r14 - r31) before calling
+ * the C code. Similarly, fork, vfork and clone need the full
+ * register state on the stack so that it can be copied to the child.
+ */
+_GLOBAL(ppc32_sigsuspend)
+ bl .save_nvgprs
+ bl .sys32_sigsuspend
+ b 70f
+
+_GLOBAL(ppc64_rt_sigsuspend)
+ bl .save_nvgprs
+ bl .sys_rt_sigsuspend
+ b 70f
+
+_GLOBAL(ppc32_rt_sigsuspend)
+ bl .save_nvgprs
+ bl .sys32_rt_sigsuspend
+70: cmpdi 0,r3,0
+ /* If it returned an error, we need to return via syscall_exit to set
+ the SO bit in cr0 and potentially stop for ptrace. */
+ bne syscall_exit
+ /* If sigsuspend() returns zero, we are going into a signal handler. We
+ may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
+#ifdef CONFIG_AUDIT
+ ld r3,PACACURRENT(r13)
+ ld r4,AUDITCONTEXT(r3)
+ cmpdi 0,r4,0
+ beq .ret_from_except /* No audit_context: Leave immediately. */
+ li r4, 2 /* AUDITSC_FAILURE */
+ li r5,-4 /* It's always -EINTR */
+ bl .audit_syscall_exit
+#endif
+ b .ret_from_except
+
+_GLOBAL(ppc_fork)
+ bl .save_nvgprs
+ bl .sys_fork
+ b syscall_exit
+
+_GLOBAL(ppc_vfork)
+ bl .save_nvgprs
+ bl .sys_vfork
+ b syscall_exit
+
+_GLOBAL(ppc_clone)
+ bl .save_nvgprs
+ bl .sys_clone
+ b syscall_exit
+
+_GLOBAL(ppc32_swapcontext)
+ bl .save_nvgprs
+ bl .sys32_swapcontext
+ b 80f
+
+_GLOBAL(ppc64_swapcontext)
+ bl .save_nvgprs
+ bl .sys_swapcontext
+ b 80f
+
+_GLOBAL(ppc32_sigreturn)
+ bl .sys32_sigreturn
+ b 80f
+
+_GLOBAL(ppc32_rt_sigreturn)
+ bl .sys32_rt_sigreturn
+ b 80f
+
+_GLOBAL(ppc64_rt_sigreturn)
+ bl .sys_rt_sigreturn
+
+80: cmpdi 0,r3,0
+ blt syscall_exit
+ clrrdi r4,r1,THREAD_SHIFT
+ ld r4,TI_FLAGS(r4)
+ andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ beq+ 81f
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_syscall_trace_leave
+81: b .ret_from_except
+
+_GLOBAL(ret_from_fork)
+ bl .schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0
+ b syscall_exit
+
+/*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+ * of the other is restored from its kernel stack. The memory
+ * management hardware is updated to the second process's state.
+ * Finally, we can return to the second process, via ret_from_except.
+ * On entry, r3 points to the THREAD for the current task, r4
+ * points to the THREAD for the new task.
+ *
+ * Note: there are two ways to get to the "going out" portion
+ * of this code; either by coming in via the entry (_switch)
+ * or via "fork" which must set up an environment equivalent
+ * to the "_switch" path. If you change this you'll have to change
+ * the fork code also.
+ *
+ * The code which creates the new task context is in 'copy_thread'
+ * in arch/ppc64/kernel/process.c
+ */
+ .align 7
+_GLOBAL(_switch)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-SWITCH_FRAME_SIZE(r1)
+ /* r3-r13 are caller saved -- Cort */
+ SAVE_8GPRS(14, r1)
+ SAVE_10GPRS(22, r1)
+ mflr r20 /* Return to switch caller */
+ mfmsr r22
+ li r0, MSR_FP
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ oris r0,r0,MSR_VEC@h /* Disable altivec */
+ mfspr r24,SPRN_VRSAVE /* save vrsave register value */
+ std r24,THREAD_VRSAVE(r3)
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+ and. r0,r0,r22
+ beq+ 1f
+ andc r22,r22,r0
+ mtmsrd r22
+ isync
+1: std r20,_NIP(r1)
+ mfcr r23
+ std r23,_CCR(r1)
+ std r1,KSP(r3) /* Set old stack pointer */
+
+#ifdef CONFIG_SMP
+ /* We need a sync somewhere here to make sure that if the
+ * previous task gets rescheduled on another CPU, it sees all
+ * stores it has performed on this one.
+ */
+ sync
+#endif /* CONFIG_SMP */
+
+ addi r6,r4,-THREAD /* Convert THREAD to 'current' */
+ std r6,PACACURRENT(r13) /* Set new 'current' */
+
+ ld r8,KSP(r4) /* new stack pointer */
+BEGIN_FTR_SECTION
+ clrrdi r6,r8,28 /* get its ESID */
+ clrrdi r9,r1,28 /* get current sp ESID */
+ clrldi. r0,r6,2 /* is new ESID c00000000? */
+ cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
+ cror eq,4*cr1+eq,eq
+ beq 2f /* if yes, don't slbie it */
+
+ /* Bolt in the new stack SLB entry */
+ ld r7,KSP_VSID(r4) /* Get new stack's VSID */
+ oris r0,r6,(SLB_ESID_V)@h
+ ori r0,r0,(SLB_NUM_BOLTED-1)@l
+ slbie r6
+ slbie r6 /* Workaround POWER5 < DD2.1 issue */
+ slbmte r7,r0
+ isync
+
+2:
+END_FTR_SECTION_IFSET(CPU_FTR_SLB)
+ clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
+ /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+ because we don't need to leave the 288-byte ABI gap at the
+ top of the kernel stack. */
+ addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
+
+ mr r1,r8 /* start using new stack pointer */
+ std r7,PACAKSAVE(r13)
+
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ ld r0,THREAD_VRSAVE(r4)
+ mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif /* CONFIG_ALTIVEC */
+
+ /* r3-r13 are destroyed -- Cort */
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+
+ /* convert old thread to its task_struct for return value */
+ addi r3,r3,-THREAD
+ ld r7,_NIP(r1) /* Return to _switch caller in new task */
+ mtlr r7
+ addi r1,r1,SWITCH_FRAME_SIZE
+ blr
+
+ .align 7
+_GLOBAL(ret_from_except)
+ ld r11,_TRAP(r1)
+ andi. r0,r11,1
+ bne .ret_from_except_lite
+ REST_NVGPRS(r1)
+
+_GLOBAL(ret_from_except_lite)
+ /*
+ * Disable interrupts so that current_thread_info()->flags
+ * can't change between when we test it and when we return
+ * from the interrupt.
+ */
+ mfmsr r10 /* Get current interrupt state */
+ rldicl r9,r10,48,1 /* clear MSR_EE */
+ rotldi r9,r9,16
+ mtmsrd r9,1 /* Update machine state */
+
+#ifdef CONFIG_PREEMPT
+ clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
+ li r0,_TIF_NEED_RESCHED /* bits to check */
+ ld r3,_MSR(r1)
+ ld r4,TI_FLAGS(r9)
+ /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
+ rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
+ and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
+ bne do_work
+
+#else /* !CONFIG_PREEMPT */
+ ld r3,_MSR(r1) /* Returning to user mode? */
+ andi. r3,r3,MSR_PR
+ beq restore /* if not, just restore regs and return */
+
+ /* Check current_thread_info()->flags */
+ clrrdi r9,r1,THREAD_SHIFT
+ ld r4,TI_FLAGS(r9)
+ andi. r0,r4,_TIF_USER_WORK_MASK
+ bne do_work
+#endif
+
+restore:
+#ifdef CONFIG_PPC_ISERIES
+ ld r5,SOFTE(r1)
+ cmpdi 0,r5,0
+ beq 4f
+ /* Check for pending interrupts (iSeries) */
+ ld r3,PACALPPACA+LPPACAANYINT(r13)
+ cmpdi r3,0
+ beq+ 4f /* skip do_IRQ if no interrupts */
+
+ li r3,0
+ stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
+ ori r10,r10,MSR_EE
+ mtmsrd r10 /* hard-enable again */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_IRQ
+ b .ret_from_except_lite /* loop back and handle more */
+
+4: stb r5,PACAPROCENABLED(r13)
+#endif
+
+ ld r3,_MSR(r1)
+ andi. r0,r3,MSR_RI
+ beq- unrecov_restore
+
+ andi. r0,r3,MSR_PR
+
+ /*
+ * r13 is our per cpu area, only restore it if we are returning to
+ * userspace
+ */
+ beq 1f
+ REST_GPR(13, r1)
+1:
+ ld r3,_CTR(r1)
+ ld r0,_LINK(r1)
+ mtctr r3
+ mtlr r0
+ ld r3,_XER(r1)
+ mtspr SPRN_XER,r3
+
+ REST_8GPRS(5, r1)
+
+ stdcx. r0,0,r1 /* to clear the reservation */
+
+ mfmsr r0
+ li r2, MSR_RI
+ andc r0,r0,r2
+ mtmsrd r0,1
+
+ ld r0,_MSR(r1)
+ mtspr SPRN_SRR1,r0
+
+ ld r2,_CCR(r1)
+ mtcrf 0xFF,r2
+ ld r2,_NIP(r1)
+ mtspr SPRN_SRR0,r2
+
+ ld r0,GPR0(r1)
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r1,GPR1(r1)
+
+ rfid
+ b . /* prevent speculative execution */
+
+/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
+do_work:
+#ifdef CONFIG_PREEMPT
+ andi. r0,r3,MSR_PR /* Returning to user mode? */
+ bne user_work
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi cr1,r8,0
+#ifdef CONFIG_PPC_ISERIES
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+#else
+ andi. r0,r3,MSR_EE
+#endif
+ crandc eq,cr1*4+eq,eq
+ bne restore
+ /* here we are preempting the current task */
+1:
+#ifdef CONFIG_PPC_ISERIES
+ li r0,1
+ stb r0,PACAPROCENABLED(r13)
+#endif
+ ori r10,r10,MSR_EE
+ mtmsrd r10,1 /* reenable interrupts */
+ bl .preempt_schedule
+ mfmsr r10
+ clrrdi r9,r1,THREAD_SHIFT
+ rldicl r10,r10,48,1 /* disable interrupts again */
+ rotldi r10,r10,16
+ mtmsrd r10,1
+ ld r4,TI_FLAGS(r9)
+ andi. r0,r4,_TIF_NEED_RESCHED
+ bne 1b
+ b restore
+
+user_work:
+#endif
+ /* Enable interrupts */
+ ori r10,r10,MSR_EE
+ mtmsrd r10,1
+
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq 1f
+ bl .schedule
+ b .ret_from_except_lite
+
+1: bl .save_nvgprs
+ li r3,0
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ bl .do_signal
+ b .ret_from_except
+
+unrecov_restore:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b unrecov_restore
+
+#ifdef CONFIG_PPC_RTAS
+/*
+ * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
+ * called with the MMU off.
+ *
+ * In addition, we need to be in 32b mode, at least for now.
+ *
+ * Note: r3 is an input parameter to rtas, so don't trash it...
+ */
+_GLOBAL(enter_rtas)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
+
+ /* Because RTAS is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
+ */
+ SAVE_GPR(2, r1) /* Save the TOC */
+ SAVE_GPR(13, r1) /* Save paca */
+ SAVE_8GPRS(14, r1) /* Save the non-volatiles */
+ SAVE_10GPRS(22, r1) /* ditto */
+
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,SPRN_XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+ mfsrr0 r9
+ std r9,_SRR0(r1)
+ mfsrr1 r10
+ std r10,_SRR1(r1)
+
+ /* There is no way it is acceptable to get here with interrupts enabled,
+ * check it with the asm equivalent of WARN_ON
+ */
+ mfmsr r6
+ andi. r0,r6,MSR_EE
+1: tdnei r0,0
+.section __bug_table,"a"
+ .llong 1b,__LINE__ + 0x1000000, 1f, 2f
+.previous
+.section .rodata,"a"
+1: .asciz __FILE__
+2: .asciz "enter_rtas"
+.previous
+
+ /* Unfortunately, the stack pointer and the MSR are also clobbered,
+ * so they are saved in the PACA which allows us to restore
+ * our original state after RTAS returns.
+ */
+ std r1,PACAR1(r13)
+ std r6,PACASAVEDMSR(r13)
+
+ /* Setup our real return addr */
+ SET_REG_TO_LABEL(r4,.rtas_return_loc)
+ SET_REG_TO_CONST(r9,KERNELBASE)
+ sub r4,r4,r9
+ mtlr r4
+
+ li r0,0
+ ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
+ andc r0,r6,r0
+
+ li r9,1
+ rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
+ ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
+ andc r6,r0,r9
+ ori r6,r6,MSR_RI
+ sync /* disable interrupts so SRR0/1 */
+ mtmsrd r0 /* don't get trashed */
+
+ SET_REG_TO_LABEL(r4,rtas)
+ ld r5,RTASENTRY(r4) /* get the rtas->entry value */
+ ld r4,RTASBASE(r4) /* get the rtas->base value */
+
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r6
+ rfid
+ b . /* prevent speculative execution */
+
+_STATIC(rtas_return_loc)
+ /* relocation is off at this point */
+ mfspr r4,SPRN_SPRG3 /* Get PACA */
+ SET_REG_TO_CONST(r5, KERNELBASE)
+ sub r4,r4,r5 /* RELOC the PACA base pointer */
+
+ mfmsr r6
+ li r0,MSR_RI
+ andc r6,r6,r0
+ sync
+ mtmsrd r6
+
+ ld r1,PACAR1(r4) /* Restore our SP */
+ LOADADDR(r3,.rtas_restore_regs)
+ ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
+
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+ rfid
+ b . /* prevent speculative execution */
+
+_STATIC(rtas_restore_regs)
+ /* relocation is on at this point */
+ REST_GPR(2, r1) /* Restore the TOC */
+ REST_GPR(13, r1) /* Restore paca */
+ REST_8GPRS(14, r1) /* Restore the non-volatiles */
+ REST_10GPRS(22, r1) /* ditto */
+
+ mfspr r13,SPRN_SPRG3
+
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr SPRN_XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+ ld r9,_SRR0(r1)
+ mtsrr0 r9
+ ld r10,_SRR1(r1)
+ mtsrr1 r10
+
+ addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
+ ld r0,16(r1) /* get return address */
+
+ mtlr r0
+ blr /* return to caller */
+
+#endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+
+_GLOBAL(enter_prom)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
+
+ /* Because PROM is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * PROM might touch to the stack. (r0, r3-r13 are caller saved)
+ */
+ SAVE_8GPRS(2, r1)
+ SAVE_GPR(13, r1)
+ SAVE_8GPRS(14, r1)
+ SAVE_10GPRS(22, r1)
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,SPRN_XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+ mfsrr0 r9
+ std r9,_SRR0(r1)
+ mfsrr1 r10
+ std r10,_SRR1(r1)
+ mfmsr r11
+ std r11,_MSR(r1)
+
+ /* Get the PROM entrypoint */
+ ld r0,GPR4(r1)
+ mtlr r0
+
+ /* Switch MSR to 32 bits mode
+ */
+ mfmsr r11
+ li r12,1
+ rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+ andc r11,r11,r12
+ li r12,1
+ rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+ andc r11,r11,r12
+ mtmsrd r11
+ isync
+
+ /* Restore arguments & enter PROM here... */
+ ld r3,GPR3(r1)
+ blrl
+
+ /* Just make sure that r1 top 32 bits didn't get
+ * corrupt by OF
+ */
+ rldicl r1,r1,0,32
+
+ /* Restore the MSR (back to 64 bits) */
+ ld r0,_MSR(r1)
+ mtmsrd r0
+ isync
+
+ /* Restore other registers */
+ REST_GPR(2, r1)
+ REST_GPR(13, r1)
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr SPRN_XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+ ld r9,_SRR0(r1)
+ mtsrr0 r9
+ ld r10,_SRR1(r1)
+ mtsrr1 r10
+
+ addi r1,r1,PROM_FRAME_SIZE
+ ld r0,16(r1)
+ mtlr r0
+ blr
+
+#endif /* CONFIG_PPC_MULTIPLATFORM */
--- /dev/null
+/*
+ * This file contains miscellaneous low-level functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/cputable.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+ .text
+
+ .align 5
+_GLOBAL(__delay)
+ cmpwi 0,r3,0
+ mtctr r3
+ beqlr
+1: bdnz 1b
+ blr
+
+/*
+ * Returns (address we're running at) - (address we were linked at)
+ * for use before the text and data are mapped to KERNELBASE.
+ */
+_GLOBAL(reloc_offset)
+ mflr r0
+ bl 1f
+1: mflr r3
+ lis r4,1b@ha
+ addi r4,r4,1b@l
+ subf r3,r4,r3
+ mtlr r0
+ blr
+
+/*
+ * add_reloc_offset(x) returns x + reloc_offset().
+ */
+_GLOBAL(add_reloc_offset)
+ mflr r0
+ bl 1f
+1: mflr r5
+ lis r4,1b@ha
+ addi r4,r4,1b@l
+ subf r5,r4,r5
+ add r3,r3,r5
+ mtlr r0
+ blr
+
+/*
+ * sub_reloc_offset(x) returns x - reloc_offset().
+ */
+_GLOBAL(sub_reloc_offset)
+ mflr r0
+ bl 1f
+1: mflr r5
+ lis r4,1b@ha
+ addi r4,r4,1b@l
+ subf r5,r4,r5
+ subf r3,r5,r3
+ mtlr r0
+ blr
+
+/*
+ * reloc_got2 runs through the .got2 section adding an offset
+ * to each entry.
+ */
+_GLOBAL(reloc_got2)
+ mflr r11
+ lis r7,__got2_start@ha
+ addi r7,r7,__got2_start@l
+ lis r8,__got2_end@ha
+ addi r8,r8,__got2_end@l
+ subf r8,r7,r8
+ srwi. r8,r8,2
+ beqlr
+ mtctr r8
+ bl 1f
+1: mflr r0
+ lis r4,1b@ha
+ addi r4,r4,1b@l
+ subf r0,r4,r0
+ add r7,r0,r7
+2: lwz r0,0(r7)
+ add r0,r0,r3
+ stw r0,0(r7)
+ addi r7,r7,4
+ bdnz 2b
+ mtlr r11
+ blr
+
+/*
+ * identify_cpu,
+ * called with r3 = data offset and r4 = CPU number
+ * doesn't change r3
+ */
+_GLOBAL(identify_cpu)
+ addis r8,r3,cpu_specs@ha
+ addi r8,r8,cpu_specs@l
+ mfpvr r7
+1:
+ lwz r5,CPU_SPEC_PVR_MASK(r8)
+ and r5,r5,r7
+ lwz r6,CPU_SPEC_PVR_VALUE(r8)
+ cmplw 0,r6,r5
+ beq 1f
+ addi r8,r8,CPU_SPEC_ENTRY_SIZE
+ b 1b
+1:
+ addis r6,r3,cur_cpu_spec@ha
+ addi r6,r6,cur_cpu_spec@l
+ sub r8,r8,r3
+ stw r8,0(r6)
+ blr
+
+/*
+ * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
+ * and writes nop's over sections of code that don't apply for this cpu.
+ * r3 = data offset (not changed)
+ */
+_GLOBAL(do_cpu_ftr_fixups)
+ /* Get CPU 0 features */
+ addis r6,r3,cur_cpu_spec@ha
+ addi r6,r6,cur_cpu_spec@l
+ lwz r4,0(r6)
+ add r4,r4,r3
+ lwz r4,CPU_SPEC_FEATURES(r4)
+
+ /* Get the fixup table */
+ addis r6,r3,__start___ftr_fixup@ha
+ addi r6,r6,__start___ftr_fixup@l
+ addis r7,r3,__stop___ftr_fixup@ha
+ addi r7,r7,__stop___ftr_fixup@l
+
+ /* Do the fixup */
+1: cmplw 0,r6,r7
+ bgelr
+ addi r6,r6,16
+ lwz r8,-16(r6) /* mask */
+ and r8,r8,r4
+ lwz r9,-12(r6) /* value */
+ cmplw 0,r8,r9
+ beq 1b
+ lwz r8,-8(r6) /* section begin */
+ lwz r9,-4(r6) /* section end */
+ subf. r9,r8,r9
+ beq 1b
+ /* write nops over the section of code */
+ /* todo: if large section, add a branch at the start of it */
+ srwi r9,r9,2
+ mtctr r9
+ add r8,r8,r3
+ lis r0,0x60000000@h /* nop */
+3: stw r0,0(r8)
+ andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
+ beq 2f
+ dcbst 0,r8 /* suboptimal, but simpler */
+ sync
+ icbi 0,r8
+2: addi r8,r8,4
+ bdnz 3b
+ sync /* additional sync needed on g4 */
+ isync
+ b 1b
+
+/*
+ * call_setup_cpu - call the setup_cpu function for this cpu
+ * r3 = data offset, r24 = cpu number
+ *
+ * Setup function is called with:
+ * r3 = data offset
+ * r4 = ptr to CPU spec (relocated)
+ */
+_GLOBAL(call_setup_cpu)
+ addis r4,r3,cur_cpu_spec@ha
+ addi r4,r4,cur_cpu_spec@l
+ lwz r4,0(r4)
+ add r4,r4,r3
+ lwz r5,CPU_SPEC_SETUP(r4)
+ cmpi 0,r5,0
+ add r5,r5,r3
+ beqlr
+ mtctr r5
+ bctr
+
+#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
+
+/* This gets called by via-pmu.c to switch the PLL selection
+ * on 750fx CPU. This function should really be moved to some
+ * other place (as most of the cpufreq code in via-pmu
+ */
+_GLOBAL(low_choose_750fx_pll)
+ /* Clear MSR:EE */
+ mfmsr r7
+ rlwinm r0,r7,0,17,15
+ mtmsr r0
+
+ /* If switching to PLL1, disable HID0:BTIC */
+ cmplwi cr0,r3,0
+ beq 1f
+ mfspr r5,SPRN_HID0
+ rlwinm r5,r5,0,27,25
+ sync
+ mtspr SPRN_HID0,r5
+ isync
+ sync
+
+1:
+ /* Calc new HID1 value */
+ mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
+ rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
+ rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
+ or r4,r4,r5
+ mtspr SPRN_HID1,r4
+
+ /* Store new HID1 image */
+ rlwinm r6,r1,0,0,18
+ lwz r6,TI_CPU(r6)
+ slwi r6,r6,2
+ addis r6,r6,nap_save_hid1@ha
+ stw r4,nap_save_hid1@l(r6)
+
+ /* If switching to PLL0, enable HID0:BTIC */
+ cmplwi cr0,r3,0
+ bne 1f
+ mfspr r5,SPRN_HID0
+ ori r5,r5,HID0_BTIC
+ sync
+ mtspr SPRN_HID0,r5
+ isync
+ sync
+
+1:
+ /* Return */
+ mtmsr r7
+ blr
+
+_GLOBAL(low_choose_7447a_dfs)
+ /* Clear MSR:EE */
+ mfmsr r7
+ rlwinm r0,r7,0,17,15
+ mtmsr r0
+
+ /* Calc new HID1 value */
+ mfspr r4,SPRN_HID1
+ insrwi r4,r3,1,9 /* insert parameter into bit 9 */
+ sync
+ mtspr SPRN_HID1,r4
+ sync
+ isync
+
+ /* Return */
+ mtmsr r7
+ blr
+
+#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
+
+/*
+ * complement mask on the msr then "or" some values on.
+ * _nmask_and_or_msr(nmask, value_to_or)
+ */
+_GLOBAL(_nmask_and_or_msr)
+ mfmsr r0 /* Get current msr */
+ andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
+ or r0,r0,r4 /* Or on the bits in r4 (second parm) */
+ SYNC /* Some chip revs have problems here... */
+ mtmsr r0 /* Update machine state */
+ isync
+ blr /* Done */
+
+
+/*
+ * Flush MMU TLB
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_40x)
+ sync /* Flush to memory before changing mapping */
+ tlbia
+ isync /* Flush shadow TLB */
+#elif defined(CONFIG_44x)
+ li r3,0
+ sync
+
+ /* Load high watermark */
+ lis r4,tlb_44x_hwater@ha
+ lwz r5,tlb_44x_hwater@l(r4)
+
+1: tlbwe r3,r3,PPC44x_TLB_PAGEID
+ addi r3,r3,1
+ cmpw 0,r3,r5
+ ble 1b
+
+ isync
+#elif defined(CONFIG_FSL_BOOKE)
+ /* Invalidate all entries in TLB0 */
+ li r3, 0x04
+ tlbivax 0,3
+ /* Invalidate all entries in TLB1 */
+ li r3, 0x0c
+ tlbivax 0,3
+ /* Invalidate all entries in TLB2 */
+ li r3, 0x14
+ tlbivax 0,3
+ /* Invalidate all entries in TLB3 */
+ li r3, 0x1c
+ tlbivax 0,3
+ msync
+#ifdef CONFIG_SMP
+ tlbsync
+#endif /* CONFIG_SMP */
+#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
+#if defined(CONFIG_SMP)
+ rlwinm r8,r1,0,0,18
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,10
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ sync
+ tlbia
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ sync
+ tlbia
+ sync
+#endif /* CONFIG_SMP */
+#endif /* ! defined(CONFIG_40x) */
+ blr
+
+/*
+ * Flush MMU TLB for a particular address
+ */
+_GLOBAL(_tlbie)
+#if defined(CONFIG_40x)
+ tlbsx. r3, 0, r3
+ bne 10f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
+ * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
+ * the TLB entry. */
+ tlbwe r3, r3, TLB_TAG
+ isync
+10:
+#elif defined(CONFIG_44x)
+ mfspr r4,SPRN_MMUCR
+ mfspr r5,SPRN_PID /* Get PID */
+ rlwimi r4,r5,0,24,31 /* Set TID */
+ mtspr SPRN_MMUCR,r4
+
+ tlbsx. r3, 0, r3
+ bne 10f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64,
+ * which means bit 22, is clear. Since 22 is
+ * the V bit in the TLB_PAGEID, loading this
+ * value will invalidate the TLB entry.
+ */
+ tlbwe r3, r3, PPC44x_TLB_PAGEID
+ isync
+10:
+#elif defined(CONFIG_FSL_BOOKE)
+ rlwinm r4, r3, 0, 0, 19
+ ori r5, r4, 0x08 /* TLBSEL = 1 */
+ ori r6, r4, 0x10 /* TLBSEL = 2 */
+ ori r7, r4, 0x18 /* TLBSEL = 3 */
+ tlbivax 0, r4
+ tlbivax 0, r5
+ tlbivax 0, r6
+ tlbivax 0, r7
+ msync
+#if defined(CONFIG_SMP)
+ tlbsync
+#endif /* CONFIG_SMP */
+#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
+#if defined(CONFIG_SMP)
+ rlwinm r8,r1,0,0,18
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,11
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+ tlbie r3
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ tlbie r3
+ sync
+#endif /* CONFIG_SMP */
+#endif /* ! CONFIG_40x */
+ blr
+
+/*
+ * Flush instruction cache.
+ * This is a no-op on the 601.
+ */
+_GLOBAL(flush_instruction_cache)
+#if defined(CONFIG_8xx)
+ isync
+ lis r5, IDC_INVALL@h
+ mtspr SPRN_IC_CST, r5
+#elif defined(CONFIG_4xx)
+#ifdef CONFIG_403GCX
+ li r3, 512
+ mtctr r3
+ lis r4, KERNELBASE@h
+1: iccci 0, r4
+ addi r4, r4, 16
+ bdnz 1b
+#else
+ lis r3, KERNELBASE@h
+ iccci 0,r3
+#endif
+#elif CONFIG_FSL_BOOKE
+BEGIN_FTR_SECTION
+ mfspr r3,SPRN_L1CSR0
+ ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
+ /* msync; isync recommended here */
+ mtspr SPRN_L1CSR0,r3
+ isync
+ blr
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ mfspr r3,SPRN_L1CSR1
+ ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
+ mtspr SPRN_L1CSR1,r3
+#else
+ mfspr r3,SPRN_PVR
+ rlwinm r3,r3,16,16,31
+ cmpwi 0,r3,1
+ beqlr /* for 601, do nothing */
+ /* 603/604 processor - use invalidate-all bit in HID0 */
+ mfspr r3,SPRN_HID0
+ ori r3,r3,HID0_ICFI
+ mtspr SPRN_HID0,r3
+#endif /* CONFIG_8xx/4xx */
+ isync
+ blr
+
+/*
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ * This is a no-op on the 601.
+ *
+ * flush_icache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(flush_icache_range)
+BEGIN_FTR_SECTION
+ blr /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ li r5,L1_CACHE_LINE_SIZE-1
+ andc r3,r3,r5
+ subf r4,r3,r4
+ add r4,r4,r5
+ srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ beqlr
+ mtctr r4
+ mr r6,r3
+1: dcbst 0,r3
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ sync /* wait for dcbst's to get to ram */
+ mtctr r4
+2: icbi 0,r6
+ addi r6,r6,L1_CACHE_LINE_SIZE
+ bdnz 2b
+ sync /* additional sync needed on g4 */
+ isync
+ blr
+/*
+ * Write any modified data cache blocks out to memory.
+ * Does not invalidate the corresponding cache lines (especially for
+ * any corresponding instruction cache).
+ *
+ * clean_dcache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(clean_dcache_range)
+ li r5,L1_CACHE_LINE_SIZE-1
+ andc r3,r3,r5
+ subf r4,r3,r4
+ add r4,r4,r5
+ srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ beqlr
+ mtctr r4
+
+1: dcbst 0,r3
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ sync /* wait for dcbst's to get to ram */
+ blr
+
+/*
+ * Write any modified data cache blocks out to memory and invalidate them.
+ * Does not invalidate the corresponding instruction cache blocks.
+ *
+ * flush_dcache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(flush_dcache_range)
+ li r5,L1_CACHE_LINE_SIZE-1
+ andc r3,r3,r5
+ subf r4,r3,r4
+ add r4,r4,r5
+ srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ beqlr
+ mtctr r4
+
+1: dcbf 0,r3
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ sync /* wait for dcbst's to get to ram */
+ blr
+
+/*
+ * Like above, but invalidate the D-cache. This is used by the 8xx
+ * to invalidate the cache so the PPC core doesn't get stale data
+ * from the CPM (no cache snooping here :-).
+ *
+ * invalidate_dcache_range(unsigned long start, unsigned long stop)
+ */
+_GLOBAL(invalidate_dcache_range)
+ li r5,L1_CACHE_LINE_SIZE-1
+ andc r3,r3,r5
+ subf r4,r3,r4
+ add r4,r4,r5
+ srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
+ beqlr
+ mtctr r4
+
+1: dcbi 0,r3
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ sync /* wait for dcbi's to get to ram */
+ blr
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+/*
+ * 40x cores have 8K or 16K dcache and 32 byte line size.
+ * 44x has a 32K dcache and 32 byte line size.
+ * 8xx has 1, 2, 4, 8K variants.
+ * For now, cover the worst case of the 44x.
+ * Must be called with external interrupts disabled.
+ */
+#define CACHE_NWAYS 64
+#define CACHE_NLINES 16
+
+_GLOBAL(flush_dcache_all)
+ li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
+ mtctr r4
+ lis r5, KERNELBASE@h
+1: lwz r3, 0(r5) /* Load one word from every line */
+ addi r5, r5, L1_CACHE_LINE_SIZE
+ bdnz 1b
+ blr
+#endif /* CONFIG_NOT_COHERENT_CACHE */
+
+/*
+ * Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ * This is a no-op on the 601 which has a unified cache.
+ *
+ * void __flush_dcache_icache(void *page)
+ */
+_GLOBAL(__flush_dcache_icache)
+BEGIN_FTR_SECTION
+ blr /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ rlwinm r3,r3,0,0,19 /* Get page base address */
+ li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
+ mtctr r4
+ mr r6,r3
+0: dcbst 0,r3 /* Write line to ram */
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 0b
+ sync
+ mtctr r4
+1: icbi 0,r6
+ addi r6,r6,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ sync
+ isync
+ blr
+
+/*
+ * Flush a particular page from the data cache to RAM, identified
+ * by its physical address. We turn off the MMU so we can just use
+ * the physical address (this may be a highmem page without a kernel
+ * mapping).
+ *
+ * void __flush_dcache_icache_phys(unsigned long physaddr)
+ */
+_GLOBAL(__flush_dcache_icache_phys)
+BEGIN_FTR_SECTION
+ blr /* for 601, do nothing */
+END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
+ mfmsr r10
+ rlwinm r0,r10,0,28,26 /* clear DR */
+ mtmsr r0
+ isync
+ rlwinm r3,r3,0,0,19 /* Get page base address */
+ li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
+ mtctr r4
+ mr r6,r3
+0: dcbst 0,r3 /* Write line to ram */
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 0b
+ sync
+ mtctr r4
+1: icbi 0,r6
+ addi r6,r6,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ sync
+ mtmsr r10 /* restore DR */
+ isync
+ blr
+
+/*
+ * Clear pages using the dcbz instruction, which doesn't cause any
+ * memory traffic (except to write out any cache lines which get
+ * displaced). This only works on cacheable memory.
+ *
+ * void clear_pages(void *page, int order) ;
+ */
+_GLOBAL(clear_pages)
+ li r0,4096/L1_CACHE_LINE_SIZE
+ slw r0,r0,r4
+ mtctr r0
+#ifdef CONFIG_8xx
+ li r4, 0
+1: stw r4, 0(r3)
+ stw r4, 4(r3)
+ stw r4, 8(r3)
+ stw r4, 12(r3)
+#else
+1: dcbz 0,r3
+#endif
+ addi r3,r3,L1_CACHE_LINE_SIZE
+ bdnz 1b
+ blr
+
+/*
+ * Copy a whole page. We use the dcbz instruction on the destination
+ * to reduce memory traffic (it eliminates the unnecessary reads of
+ * the destination into cache). This requires that the destination
+ * is cacheable.
+ */
+#define COPY_16_BYTES \
+ lwz r6,4(r4); \
+ lwz r7,8(r4); \
+ lwz r8,12(r4); \
+ lwzu r9,16(r4); \
+ stw r6,4(r3); \
+ stw r7,8(r3); \
+ stw r8,12(r3); \
+ stwu r9,16(r3)
+
+_GLOBAL(copy_page)
+ addi r3,r3,-4
+ addi r4,r4,-4
+
+#ifdef CONFIG_8xx
+ /* don't use prefetch on 8xx */
+ li r0,4096/L1_CACHE_LINE_SIZE
+ mtctr r0
+1: COPY_16_BYTES
+ bdnz 1b
+ blr
+
+#else /* not 8xx, we can prefetch */
+ li r5,4
+
+#if MAX_COPY_PREFETCH > 1
+ li r0,MAX_COPY_PREFETCH
+ li r11,4
+ mtctr r0
+11: dcbt r11,r4
+ addi r11,r11,L1_CACHE_LINE_SIZE
+ bdnz 11b
+#else /* MAX_COPY_PREFETCH == 1 */
+ dcbt r5,r4
+ li r11,L1_CACHE_LINE_SIZE+4
+#endif /* MAX_COPY_PREFETCH */
+ li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
+ crclr 4*cr0+eq
+2:
+ mtctr r0
+1:
+ dcbt r11,r4
+ dcbz r5,r3
+ COPY_16_BYTES
+#if L1_CACHE_LINE_SIZE >= 32
+ COPY_16_BYTES
+#if L1_CACHE_LINE_SIZE >= 64
+ COPY_16_BYTES
+ COPY_16_BYTES
+#if L1_CACHE_LINE_SIZE >= 128
+ COPY_16_BYTES
+ COPY_16_BYTES
+ COPY_16_BYTES
+ COPY_16_BYTES
+#endif
+#endif
+#endif
+ bdnz 1b
+ beqlr
+ crnot 4*cr0+eq,4*cr0+eq
+ li r0,MAX_COPY_PREFETCH
+ li r11,4
+ b 2b
+#endif /* CONFIG_8xx */
+
+/*
+ * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
+ * void atomic_set_mask(atomic_t mask, atomic_t *addr);
+ */
+_GLOBAL(atomic_clear_mask)
+10: lwarx r5,0,r4
+ andc r5,r5,r3
+ PPC405_ERR77(0,r4)
+ stwcx. r5,0,r4
+ bne- 10b
+ blr
+_GLOBAL(atomic_set_mask)
+10: lwarx r5,0,r4
+ or r5,r5,r3
+ PPC405_ERR77(0,r4)
+ stwcx. r5,0,r4
+ bne- 10b
+ blr
+
+/*
+ * I/O string operations
+ *
+ * insb(port, buf, len)
+ * outsb(port, buf, len)
+ * insw(port, buf, len)
+ * outsw(port, buf, len)
+ * insl(port, buf, len)
+ * outsl(port, buf, len)
+ * insw_ns(port, buf, len)
+ * outsw_ns(port, buf, len)
+ * insl_ns(port, buf, len)
+ * outsl_ns(port, buf, len)
+ *
+ * The *_ns versions don't do byte-swapping.
+ */
+_GLOBAL(_insb)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,1
+ blelr-
+00: lbz r5,0(r3)
+ eieio
+ stbu r5,1(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsb)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,1
+ blelr-
+00: lbzu r5,1(r4)
+ stb r5,0(r3)
+ eieio
+ bdnz 00b
+ blr
+
+_GLOBAL(_insw)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhbrx r5,0,r3
+ eieio
+ sthu r5,2(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsw)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhzu r5,2(r4)
+ eieio
+ sthbrx r5,0,r3
+ bdnz 00b
+ blr
+
+_GLOBAL(_insl)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwbrx r5,0,r3
+ eieio
+ stwu r5,4(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(_outsl)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwzu r5,4(r4)
+ stwbrx r5,0,r3
+ eieio
+ bdnz 00b
+ blr
+
+_GLOBAL(__ide_mm_insw)
+_GLOBAL(_insw_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhz r5,0(r3)
+ eieio
+ sthu r5,2(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(__ide_mm_outsw)
+_GLOBAL(_outsw_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhzu r5,2(r4)
+ sth r5,0(r3)
+ eieio
+ bdnz 00b
+ blr
+
+_GLOBAL(__ide_mm_insl)
+_GLOBAL(_insl_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwz r5,0(r3)
+ eieio
+ stwu r5,4(r4)
+ bdnz 00b
+ blr
+
+_GLOBAL(__ide_mm_outsl)
+_GLOBAL(_outsl_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwzu r5,4(r4)
+ stw r5,0(r3)
+ eieio
+ bdnz 00b
+ blr
+
+/*
+ * Extended precision shifts.
+ *
+ * Updated to be valid for shift counts from 0 to 63 inclusive.
+ * -- Gabriel
+ *
+ * R3/R4 has 64 bit value
+ * R5 has shift count
+ * result in R3/R4
+ *
+ * ashrdi3: arithmetic right shift (sign propagation)
+ * lshrdi3: logical right shift
+ * ashldi3: left shift
+ */
+_GLOBAL(__ashrdi3)
+ subfic r6,r5,32
+ srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
+ addi r7,r5,32 # could be xori, or addi with -32
+ slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
+ rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
+ sraw r7,r3,r7 # t2 = MSW >> (count-32)
+ or r4,r4,r6 # LSW |= t1
+ slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
+ sraw r3,r3,r5 # MSW = MSW >> count
+ or r4,r4,r7 # LSW |= t2
+ blr
+
+_GLOBAL(__ashldi3)
+ subfic r6,r5,32
+ slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
+ addi r7,r5,32 # could be xori, or addi with -32
+ srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
+ slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
+ or r3,r3,r6 # MSW |= t1
+ slw r4,r4,r5 # LSW = LSW << count
+ or r3,r3,r7 # MSW |= t2
+ blr
+
+_GLOBAL(__lshrdi3)
+ subfic r6,r5,32
+ srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
+ addi r7,r5,32 # could be xori, or addi with -32
+ slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
+ srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
+ or r4,r4,r6 # LSW |= t1
+ srw r3,r3,r5 # MSW = MSW >> count
+ or r4,r4,r7 # LSW |= t2
+ blr
+
+_GLOBAL(abs)
+ srawi r4,r3,31
+ xor r3,r3,r4
+ sub r3,r3,r4
+ blr
+
+_GLOBAL(_get_SP)
+ mr r3,r1 /* Close enough */
+ blr
+
+/*
+ * These are used in the alignment trap handler when emulating
+ * single-precision loads and stores.
+ * We restore and save the fpscr so the task gets the same result
+ * and exceptions as if the cpu had performed the load or store.
+ */
+
+#ifdef CONFIG_PPC_FPU
+_GLOBAL(cvt_fd)
+ lfd 0,-4(r5) /* load up fpscr value */
+ mtfsf 0xff,0
+ lfs 0,0(r3)
+ stfd 0,0(r4)
+ mffs 0 /* save new fpscr value */
+ stfd 0,-4(r5)
+ blr
+
+_GLOBAL(cvt_df)
+ lfd 0,-4(r5) /* load up fpscr value */
+ mtfsf 0xff,0
+ lfd 0,0(r3)
+ stfs 0,0(r4)
+ mffs 0 /* save new fpscr value */
+ stfd 0,-4(r5)
+ blr
+#endif
+
+/*
+ * Create a kernel thread
+ * kernel_thread(fn, arg, flags)
+ */
+_GLOBAL(kernel_thread)
+ stwu r1,-16(r1)
+ stw r30,8(r1)
+ stw r31,12(r1)
+ mr r30,r3 /* function */
+ mr r31,r4 /* argument */
+ ori r3,r5,CLONE_VM /* flags */
+ oris r3,r3,CLONE_UNTRACED>>16
+ li r4,0 /* new sp (unused) */
+ li r0,__NR_clone
+ sc
+ cmpwi 0,r3,0 /* parent or child? */
+ bne 1f /* return if parent */
+ li r0,0 /* make top-level stack frame */
+ stwu r0,-16(r1)
+ mtlr r30 /* fn addr in lr */
+ mr r3,r31 /* load arg and call fn */
+ PPC440EP_ERR42
+ blrl
+ li r0,__NR_exit /* exit if function returns */
+ li r3,0
+ sc
+1: lwz r30,8(r1)
+ lwz r31,12(r1)
+ addi r1,r1,16
+ blr
+
+_GLOBAL(execve)
+ li r0,__NR_execve
+ sc
+ bnslr
+ neg r3,r3
+ blr
+
+/*
+ * This routine is just here to keep GCC happy - sigh...
+ */
+_GLOBAL(__main)
+ blr
--- /dev/null
+/*
+ * arch/powerpc/kernel/misc64.S
+ *
+ * This file contains miscellaneous low-level functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+
+ .text
+
+/*
+ * Returns (address we are running at) - (address we were linked at)
+ * for use before the text and data are mapped to KERNELBASE.
+ */
+
+_GLOBAL(reloc_offset)
+ mflr r0
+ bl 1f
+1: mflr r3
+ LOADADDR(r4,1b)
+ subf r3,r4,r3
+ mtlr r0
+ blr
+
+/*
+ * add_reloc_offset(x) returns x + reloc_offset().
+ */
+_GLOBAL(add_reloc_offset)
+ mflr r0
+ bl 1f
+1: mflr r5
+ LOADADDR(r4,1b)
+ subf r5,r4,r5
+ add r3,r3,r5
+ mtlr r0
+ blr
+
+_GLOBAL(get_msr)
+ mfmsr r3
+ blr
+
+_GLOBAL(get_dar)
+ mfdar r3
+ blr
+
+_GLOBAL(get_srr0)
+ mfsrr0 r3
+ blr
+
+_GLOBAL(get_srr1)
+ mfsrr1 r3
+ blr
+
+_GLOBAL(get_sp)
+ mr r3,r1
+ blr
+
+#ifdef CONFIG_IRQSTACKS
+_GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,THREAD_SIZE-112(r3)
+ mr r1,r3
+ bl .__do_softirq
+ ld r1,0(r1)
+ ld r0,16(r1)
+ mtlr r0
+ blr
+
+_GLOBAL(call_handle_IRQ_event)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,THREAD_SIZE-112(r6)
+ mr r1,r6
+ bl .handle_IRQ_event
+ ld r1,0(r1)
+ ld r0,16(r1)
+ mtlr r0
+ blr
+#endif /* CONFIG_IRQSTACKS */
+
+ /*
+ * To be called by C code which needs to do some operations with MMU
+ * disabled. Note that interrupts have to be disabled by the caller
+ * prior to calling us. The code called _MUST_ be in the RMO of course
+ * and part of the linear mapping as we don't attempt to translate the
+ * stack pointer at all. The function is called with the stack switched
+ * to this CPU emergency stack
+ *
+ * prototype is void *call_with_mmu_off(void *func, void *data);
+ *
+ * the called function is expected to be of the form
+ *
+ * void *called(void *data);
+ */
+_GLOBAL(call_with_mmu_off)
+ mflr r0 /* get link, save it on stackframe */
+ std r0,16(r1)
+ mr r1,r5 /* save old stack ptr */
+ ld r1,PACAEMERGSP(r13) /* get emerg. stack */
+ subi r1,r1,STACK_FRAME_OVERHEAD
+ std r0,16(r1) /* save link on emerg. stack */
+ std r5,0(r1) /* save old stack ptr in backchain */
+ ld r3,0(r3) /* get to real function ptr (assume same TOC) */
+ bl 2f /* we need LR to return, continue at label 2 */
+
+ ld r0,16(r1) /* we return here from the call, get LR and */
+ ld r1,0(r1) /* .. old stack ptr */
+ mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
+ mfmsr r4
+ ori r4,r4,MSR_IR|MSR_DR
+ mtspr SPRN_SRR1,r4
+ rfid
+
+2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
+ mr r3,r4 /* get parameter */
+ mfmsr r0
+ ori r0,r0,MSR_IR|MSR_DR
+ xori r0,r0,MSR_IR|MSR_DR
+ mtspr SPRN_SRR1,r0
+ rfid
+
+
+ .section ".toc","aw"
+PPC64_CACHES:
+ .tc ppc64_caches[TC],ppc64_caches
+ .section ".text"
+
+/*
+ * Write any modified data cache blocks out to memory
+ * and invalidate the corresponding instruction cache blocks.
+ *
+ * flush_icache_range(unsigned long start, unsigned long stop)
+ *
+ * flush all bytes from start through stop-1 inclusive
+ */
+
+_KPROBE(__flush_icache_range)
+
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ * and in some cases i-cache and d-cache line sizes differ from
+ * each other.
+ */
+ ld r10,PPC64_CACHES@toc(r2)
+ lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+1: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 1b
+ sync
+
+/* Now invalidate the instruction cache */
+
+ lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5
+ lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+2: icbi 0,r6
+ add r6,r6,r7
+ bdnz 2b
+ isync
+ blr
+ .previous .text
+/*
+ * Like above, but only do the D-cache.
+ *
+ * flush_dcache_range(unsigned long start, unsigned long stop)
+ *
+ * flush all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(flush_dcache_range)
+
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ */
+ ld r10,PPC64_CACHES@toc(r2)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+0: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ blr
+
+/*
+ * Like above, but works on non-mapped physical addresses.
+ * Use only for non-LPAR setups ! It also assumes real mode
+ * is cacheable. Used for flushing out the DART before using
+ * it as uncacheable memory
+ *
+ * flush_dcache_phys_range(unsigned long start, unsigned long stop)
+ *
+ * flush all bytes from start to stop-1 inclusive
+ */
+_GLOBAL(flush_dcache_phys_range)
+ ld r10,PPC64_CACHES@toc(r2)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mfmsr r5 /* Disable MMU Data Relocation */
+ ori r0,r5,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsr r0
+ sync
+ isync
+ mtctr r8
+0: dcbst 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ isync
+ mtmsr r5 /* Re-enable MMU Data Relocation */
+ sync
+ isync
+ blr
+
+_GLOBAL(flush_inval_dcache_range)
+ ld r10,PPC64_CACHES@toc(r2)
+ lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
+ addi r5,r7,-1
+ andc r6,r3,r5 /* round low to line bdy */
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
+ srw. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ sync
+ isync
+ mtctr r8
+0: dcbf 0,r6
+ add r6,r6,r7
+ bdnz 0b
+ sync
+ isync
+ blr
+
+
+/*
+ * Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ * void __flush_dcache_icache(void *page)
+ */
+_GLOBAL(__flush_dcache_icache)
+/*
+ * Flush the data cache to memory
+ *
+ * Different systems have different cache line sizes
+ */
+
+/* Flush the dcache */
+ ld r7,PPC64_CACHES@toc(r2)
+ clrrdi r3,r3,PAGE_SHIFT /* Page align */
+ lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
+ lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
+ mr r6,r3
+ mtctr r4
+0: dcbst 0,r6
+ add r6,r6,r5
+ bdnz 0b
+ sync
+
+/* Now invalidate the icache */
+
+ lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
+ lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
+ mtctr r4
+1: icbi 0,r3
+ add r3,r3,r5
+ bdnz 1b
+ isync
+ blr
+
+/*
+ * I/O string operations
+ *
+ * insb(port, buf, len)
+ * outsb(port, buf, len)
+ * insw(port, buf, len)
+ * outsw(port, buf, len)
+ * insl(port, buf, len)
+ * outsl(port, buf, len)
+ * insw_ns(port, buf, len)
+ * outsw_ns(port, buf, len)
+ * insl_ns(port, buf, len)
+ * outsl_ns(port, buf, len)
+ *
+ * The *_ns versions don't do byte-swapping.
+ */
+_GLOBAL(_insb)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,1
+ blelr-
+00: lbz r5,0(r3)
+ eieio
+ stbu r5,1(r4)
+ bdnz 00b
+ twi 0,r5,0
+ isync
+ blr
+
+_GLOBAL(_outsb)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,1
+ blelr-
+00: lbzu r5,1(r4)
+ stb r5,0(r3)
+ bdnz 00b
+ sync
+ blr
+
+_GLOBAL(_insw)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhbrx r5,0,r3
+ eieio
+ sthu r5,2(r4)
+ bdnz 00b
+ twi 0,r5,0
+ isync
+ blr
+
+_GLOBAL(_outsw)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhzu r5,2(r4)
+ sthbrx r5,0,r3
+ bdnz 00b
+ sync
+ blr
+
+_GLOBAL(_insl)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwbrx r5,0,r3
+ eieio
+ stwu r5,4(r4)
+ bdnz 00b
+ twi 0,r5,0
+ isync
+ blr
+
+_GLOBAL(_outsl)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwzu r5,4(r4)
+ stwbrx r5,0,r3
+ bdnz 00b
+ sync
+ blr
+
+/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
+_GLOBAL(_insw_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhz r5,0(r3)
+ eieio
+ sthu r5,2(r4)
+ bdnz 00b
+ twi 0,r5,0
+ isync
+ blr
+
+/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
+_GLOBAL(_outsw_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,2
+ blelr-
+00: lhzu r5,2(r4)
+ sth r5,0(r3)
+ bdnz 00b
+ sync
+ blr
+
+_GLOBAL(_insl_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwz r5,0(r3)
+ eieio
+ stwu r5,4(r4)
+ bdnz 00b
+ twi 0,r5,0
+ isync
+ blr
+
+_GLOBAL(_outsl_ns)
+ cmpwi 0,r5,0
+ mtctr r5
+ subi r4,r4,4
+ blelr-
+00: lwzu r5,4(r4)
+ stw r5,0(r3)
+ bdnz 00b
+ sync
+ blr
+
+
+_GLOBAL(cvt_fd)
+ lfd 0,0(r5) /* load up fpscr value */
+ mtfsf 0xff,0
+ lfs 0,0(r3)
+ stfd 0,0(r4)
+ mffs 0 /* save new fpscr value */
+ stfd 0,0(r5)
+ blr
+
+_GLOBAL(cvt_df)
+ lfd 0,0(r5) /* load up fpscr value */
+ mtfsf 0xff,0
+ lfd 0,0(r3)
+ stfs 0,0(r4)
+ mffs 0 /* save new fpscr value */
+ stfd 0,0(r5)
+ blr
+
+/*
+ * identify_cpu and calls setup_cpu
+ * In: r3 = base of the cpu_specs array
+ * r4 = address of cur_cpu_spec
+ * r5 = relocation offset
+ */
+_GLOBAL(identify_cpu)
+ mfpvr r7
+1:
+ lwz r8,CPU_SPEC_PVR_MASK(r3)
+ and r8,r8,r7
+ lwz r9,CPU_SPEC_PVR_VALUE(r3)
+ cmplw 0,r9,r8
+ beq 1f
+ addi r3,r3,CPU_SPEC_ENTRY_SIZE
+ b 1b
+1:
+ sub r0,r3,r5
+ std r0,0(r4)
+ ld r4,CPU_SPEC_SETUP(r3)
+ add r4,r4,r5
+ ld r4,0(r4)
+ add r4,r4,r5
+ mtctr r4
+ /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
+ mr r4,r3
+ mr r3,r5
+ bctr
+
+/*
+ * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
+ * and writes nop's over sections of code that don't apply for this cpu.
+ * r3 = data offset (not changed)
+ */
+_GLOBAL(do_cpu_ftr_fixups)
+ /* Get CPU 0 features */
+ LOADADDR(r6,cur_cpu_spec)
+ sub r6,r6,r3
+ ld r4,0(r6)
+ sub r4,r4,r3
+ ld r4,CPU_SPEC_FEATURES(r4)
+ /* Get the fixup table */
+ LOADADDR(r6,__start___ftr_fixup)
+ sub r6,r6,r3
+ LOADADDR(r7,__stop___ftr_fixup)
+ sub r7,r7,r3
+ /* Do the fixup */
+1: cmpld r6,r7
+ bgelr
+ addi r6,r6,32
+ ld r8,-32(r6) /* mask */
+ and r8,r8,r4
+ ld r9,-24(r6) /* value */
+ cmpld r8,r9
+ beq 1b
+ ld r8,-16(r6) /* section begin */
+ ld r9,-8(r6) /* section end */
+ subf. r9,r8,r9
+ beq 1b
+ /* write nops over the section of code */
+ /* todo: if large section, add a branch at the start of it */
+ srwi r9,r9,2
+ mtctr r9
+ sub r8,r8,r3
+ lis r0,0x60000000@h /* nop */
+3: stw r0,0(r8)
+ andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
+ beq 2f
+ dcbst 0,r8 /* suboptimal, but simpler */
+ sync
+ icbi 0,r8
+2: addi r8,r8,4
+ bdnz 3b
+ sync /* additional sync needed on g4 */
+ isync
+ b 1b
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
+/*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_readb)
+ mfmsr r7
+ ori r0,r7,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsrd r0
+ sync
+ isync
+ mfspr r6,SPRN_HID4
+ rldicl r5,r6,32,0
+ ori r5,r5,0x100
+ rldicl r5,r5,32,0
+ sync
+ mtspr SPRN_HID4,r5
+ isync
+ slbia
+ isync
+ lbz r3,0(r3)
+ sync
+ mtspr SPRN_HID4,r6
+ isync
+ slbia
+ isync
+ mtmsrd r7
+ sync
+ isync
+ blr
+
+ /*
+ * Do an IO access in real mode
+ */
+_GLOBAL(real_writeb)
+ mfmsr r7
+ ori r0,r7,MSR_DR
+ xori r0,r0,MSR_DR
+ sync
+ mtmsrd r0
+ sync
+ isync
+ mfspr r6,SPRN_HID4
+ rldicl r5,r6,32,0
+ ori r5,r5,0x100
+ rldicl r5,r5,32,0
+ sync
+ mtspr SPRN_HID4,r5
+ isync
+ slbia
+ isync
+ stb r3,0(r4)
+ sync
+ mtspr SPRN_HID4,r6
+ isync
+ slbia
+ isync
+ mtmsrd r7
+ sync
+ isync
+ blr
+#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
+
+/*
+ * Create a kernel thread
+ * kernel_thread(fn, arg, flags)
+ */
+_GLOBAL(kernel_thread)
+ std r29,-24(r1)
+ std r30,-16(r1)
+ stdu r1,-STACK_FRAME_OVERHEAD(r1)
+ mr r29,r3
+ mr r30,r4
+ ori r3,r5,CLONE_VM /* flags */
+ oris r3,r3,(CLONE_UNTRACED>>16)
+ li r4,0 /* new sp (unused) */
+ li r0,__NR_clone
+ sc
+ cmpdi 0,r3,0 /* parent or child? */
+ bne 1f /* return if parent */
+ li r0,0
+ stdu r0,-STACK_FRAME_OVERHEAD(r1)
+ ld r2,8(r29)
+ ld r29,0(r29)
+ mtlr r29 /* fn addr in lr */
+ mr r3,r30 /* load arg and call fn */
+ blrl
+ li r0,__NR_exit /* exit after child exits */
+ li r3,0
+ sc
+1: addi r1,r1,STACK_FRAME_OVERHEAD
+ ld r29,-24(r1)
+ ld r30,-16(r1)
+ blr
+
+/*
+ * disable_kernel_fp()
+ * Disable the FPU.
+ */
+_GLOBAL(disable_kernel_fp)
+ mfmsr r3
+ rldicl r0,r3,(63-MSR_FP_LG),1
+ rldicl r3,r0,(MSR_FP_LG+1),0
+ mtmsrd r3 /* disable use of fpu now */
+ isync
+ blr
+
+#ifdef CONFIG_ALTIVEC
+
+#if 0 /* this has no callers for now */
+/*
+ * disable_kernel_altivec()
+ * Disable the VMX.
+ */
+_GLOBAL(disable_kernel_altivec)
+ mfmsr r3
+ rldicl r0,r3,(63-MSR_VEC_LG),1
+ rldicl r3,r0,(MSR_VEC_LG+1),0
+ mtmsrd r3 /* disable use of VMX now */
+ isync
+ blr
+#endif /* 0 */
+
+/*
+ * giveup_altivec(tsk)
+ * Disable VMX for the task given as the argument,
+ * and save the vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ */
+_GLOBAL(giveup_altivec)
+ mfmsr r5
+ oris r5,r5,MSR_VEC@h
+ mtmsrd r5 /* enable use of VMX now */
+ isync
+ cmpdi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ ld r5,PT_REGS(r3)
+ cmpdi 0,r5,0
+ SAVE_32VRS(0,r4,r3)
+ mfvscr vr0
+ li r4,THREAD_VSCR
+ stvx vr0,r4,r3
+ beq 1f
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r3,MSR_VEC@h
+ andc r4,r4,r3 /* disable FP for previous task */
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ ld r4,last_task_used_altivec@got(r2)
+ std r5,0(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#endif /* CONFIG_ALTIVEC */
+
+_GLOBAL(__setup_cpu_power3)
+ blr
+
+_GLOBAL(execve)
+ li r0,__NR_execve
+ sc
+ bnslr
+ neg r3,r3
+ blr
+
+/* kexec_wait(phys_cpu)
+ *
+ * wait for the flag to change, indicating this kernel is going away but
+ * the slave code for the next one is at addresses 0 to 100.
+ *
+ * This is used by all slaves.
+ *
+ * Physical (hardware) cpu id should be in r3.
+ */
+_GLOBAL(kexec_wait)
+ bl 1f
+1: mflr r5
+ addi r5,r5,kexec_flag-1b
+
+99: HMT_LOW
+#ifdef CONFIG_KEXEC /* use no memory without kexec */
+ lwz r4,0(r5)
+ cmpwi 0,r4,0
+ bnea 0x60
+#endif
+ b 99b
+
+/* this can be in text because we won't change it until we are
+ * running in real anyways
+ */
+kexec_flag:
+ .long 0
+
+
+#ifdef CONFIG_KEXEC
+
+/* kexec_smp_wait(void)
+ *
+ * call with interrupts off
+ * note: this is a terminal routine, it does not save lr
+ *
+ * get phys id from paca
+ * set paca id to -1 to say we got here
+ * switch to real mode
+ * join other cpus in kexec_wait(phys_id)
+ */
+_GLOBAL(kexec_smp_wait)
+ lhz r3,PACAHWCPUID(r13)
+ li r4,-1
+ sth r4,PACAHWCPUID(r13) /* let others know we left */
+ bl real_mode
+ b .kexec_wait
+
+/*
+ * switch to real mode (turn mmu off)
+ * we use the early kernel trick that the hardware ignores bits
+ * 0 and 1 (big endian) of the effective address in real mode
+ *
+ * don't overwrite r3 here, it is live for kexec_wait above.
+ */
+real_mode: /* assume normal blr return */
+1: li r9,MSR_RI
+ li r10,MSR_DR|MSR_IR
+ mflr r11 /* return address to SRR0 */
+ mfmsr r12
+ andc r9,r12,r9
+ andc r10,r12,r10
+
+ mtmsrd r9,1
+ mtspr SPRN_SRR1,r10
+ mtspr SPRN_SRR0,r11
+ rfid
+
+
+/*
+ * kexec_sequence(newstack, start, image, control, clear_all())
+ *
+ * does the grungy work with stack switching and real mode switches
+ * also does simple calls to other code
+ */
+
+_GLOBAL(kexec_sequence)
+ mflr r0
+ std r0,16(r1)
+
+ /* switch stacks to newstack -- &kexec_stack.stack */
+ stdu r1,THREAD_SIZE-112(r3)
+ mr r1,r3
+
+ li r0,0
+ std r0,16(r1)
+
+ /* save regs for local vars on new stack.
+ * yes, we won't go back, but ...
+ */
+ std r31,-8(r1)
+ std r30,-16(r1)
+ std r29,-24(r1)
+ std r28,-32(r1)
+ std r27,-40(r1)
+ std r26,-48(r1)
+ std r25,-56(r1)
+
+ stdu r1,-112-64(r1)
+
+ /* save args into preserved regs */
+ mr r31,r3 /* newstack (both) */
+ mr r30,r4 /* start (real) */
+ mr r29,r5 /* image (virt) */
+ mr r28,r6 /* control, unused */
+ mr r27,r7 /* clear_all() fn desc */
+ mr r26,r8 /* spare */
+ lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
+
+ /* disable interrupts, we are overwriting kernel data next */
+ mfmsr r3
+ rlwinm r3,r3,0,17,15
+ mtmsrd r3,1
+
+ /* copy dest pages, flush whole dest image */
+ mr r3,r29
+ bl .kexec_copy_flush /* (image) */
+
+ /* turn off mmu */
+ bl real_mode
+
+ /* clear out hardware hash page table and tlb */
+ ld r5,0(r27) /* deref function descriptor */
+ mtctr r5
+ bctrl /* ppc_md.hash_clear_all(void); */
+
+/*
+ * kexec image calling is:
+ * the first 0x100 bytes of the entry point are copied to 0
+ *
+ * all slaves branch to slave = 0x60 (absolute)
+ * slave(phys_cpu_id);
+ *
+ * master goes to start = entry point
+ * start(phys_cpu_id, start, 0);
+ *
+ *
+ * a wrapper is needed to call existing kernels, here is an approximate
+ * description of one method:
+ *
+ * v2: (2.6.10)
+ * start will be near the boot_block (maybe 0x100 bytes before it?)
+ * it will have a 0x60, which will b to boot_block, where it will wait
+ * and 0 will store phys into struct boot-block and load r3 from there,
+ * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
+ *
+ * v1: (2.6.9)
+ * boot block will have all cpus scanning device tree to see if they
+ * are the boot cpu ?????
+ * other device tree differences (prop sizes, va vs pa, etc)...
+ */
+
+ /* copy 0x100 bytes starting at start to 0 */
+ li r3,0
+ mr r4,r30
+ li r5,0x100
+ li r6,0
+ bl .copy_and_flush /* (dest, src, copy limit, start offset) */
+1: /* assume normal blr return */
+
+ /* release other cpus to the new kernel secondary start at 0x60 */
+ mflr r5
+ li r6,1
+ stw r6,kexec_flag-1b(5)
+ mr r3,r25 # my phys cpu
+ mr r4,r30 # start, aka phys mem offset
+ mtlr 4
+ li r5,0
+ blr /* image->start(physid, image->start, 0); */
+#endif /* CONFIG_KEXEC */
--- /dev/null
+/*
+ * This file contains the table of syscall-handling functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/ppc_asm.h>
+
+#ifdef CONFIG_PPC64
+#define SYSCALL(func) .llong .sys_##func,.sys_##func
+#define SYSCALL32(func) .llong .sys_##func,.sys32_##func
+#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
+#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
+#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
+#define SYS32ONLY(func) .llong .sys_ni_syscall,.sys32_##func
+#define SYSX(f, f3264, f32) .llong .f,.f3264
+#else
+#define SYSCALL(func) .long sys_##func
+#define SYSCALL32(func) .long sys_##func
+#define COMPAT_SYS(func) .long sys_##func
+#define PPC_SYS(func) .long ppc_##func
+#define OLDSYS(func) .long sys_##func
+#define SYS32ONLY(func) .long sys_##func
+#define SYSX(f, f3264, f32) .long f32
+#endif
+
+#ifdef CONFIG_PPC64
+#define sys_sigpending sys_ni_syscall
+#define sys_old_getrlimit sys_ni_syscall
+#else
+#define ppc_rtas sys_ni_syscall
+#endif
+
+_GLOBAL(sys_call_table)
+SYSCALL(restart_syscall)
+SYSCALL(exit)
+PPC_SYS(fork)
+SYSCALL(read)
+SYSCALL(write)
+COMPAT_SYS(open)
+SYSCALL(close)
+SYSCALL32(waitpid)
+SYSCALL32(creat)
+SYSCALL(link)
+SYSCALL(unlink)
+SYSCALL32(execve)
+SYSCALL(chdir)
+SYSX(sys64_time,compat_sys_time,sys_time)
+SYSCALL(mknod)
+SYSCALL(chmod)
+SYSCALL(lchown)
+SYSCALL(ni_syscall)
+OLDSYS(stat)
+SYSX(sys_lseek,ppc32_lseek,sys_lseek)
+SYSCALL(getpid)
+COMPAT_SYS(mount)
+SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
+SYSCALL(setuid)
+SYSCALL(getuid)
+COMPAT_SYS(stime)
+SYSCALL32(ptrace)
+SYSCALL(alarm)
+OLDSYS(fstat)
+SYSCALL32(pause)
+COMPAT_SYS(utime)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYSCALL32(access)
+SYSCALL32(nice)
+SYSCALL(ni_syscall)
+SYSCALL(sync)
+SYSCALL32(kill)
+SYSCALL(rename)
+SYSCALL32(mkdir)
+SYSCALL(rmdir)
+SYSCALL(dup)
+SYSCALL(pipe)
+COMPAT_SYS(times)
+SYSCALL(ni_syscall)
+SYSCALL(brk)
+SYSCALL(setgid)
+SYSCALL(getgid)
+SYSCALL(signal)
+SYSCALL(geteuid)
+SYSCALL(getegid)
+SYSCALL(acct)
+SYSCALL(umount)
+SYSCALL(ni_syscall)
+COMPAT_SYS(ioctl)
+COMPAT_SYS(fcntl)
+SYSCALL(ni_syscall)
+SYSCALL32(setpgid)
+SYSCALL(ni_syscall)
+SYS32ONLY(olduname)
+SYSCALL32(umask)
+SYSCALL(chroot)
+SYSCALL(ustat)
+SYSCALL(dup2)
+SYSCALL(getppid)
+SYSCALL(getpgrp)
+SYSCALL(setsid)
+SYS32ONLY(sigaction)
+SYSCALL(sgetmask)
+SYSCALL32(ssetmask)
+SYSCALL(setreuid)
+SYSCALL(setregid)
+SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
+COMPAT_SYS(sigpending)
+SYSCALL32(sethostname)
+COMPAT_SYS(setrlimit)
+COMPAT_SYS(old_getrlimit)
+COMPAT_SYS(getrusage)
+SYSCALL32(gettimeofday)
+SYSCALL32(settimeofday)
+SYSCALL32(getgroups)
+SYSCALL32(setgroups)
+SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
+SYSCALL(symlink)
+OLDSYS(lstat)
+SYSCALL32(readlink)
+SYSCALL(uselib)
+SYSCALL(swapon)
+SYSCALL(reboot)
+SYSX(sys_ni_syscall,old32_readdir,old_readdir)
+SYSCALL(mmap)
+SYSCALL(munmap)
+SYSCALL(truncate)
+SYSCALL(ftruncate)
+SYSCALL(fchmod)
+SYSCALL(fchown)
+SYSCALL32(getpriority)
+SYSCALL32(setpriority)
+SYSCALL(ni_syscall)
+COMPAT_SYS(statfs)
+COMPAT_SYS(fstatfs)
+SYSCALL(ni_syscall)
+COMPAT_SYS(socketcall)
+SYSCALL32(syslog)
+COMPAT_SYS(setitimer)
+COMPAT_SYS(getitimer)
+COMPAT_SYS(newstat)
+COMPAT_SYS(newlstat)
+COMPAT_SYS(newfstat)
+SYSX(sys_ni_syscall,sys32_uname,sys_uname)
+SYSCALL(ni_syscall)
+SYSCALL(vhangup)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS(wait4)
+SYSCALL(swapoff)
+SYSCALL32(sysinfo)
+SYSCALL32(ipc)
+SYSCALL(fsync)
+SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
+PPC_SYS(clone)
+SYSCALL32(setdomainname)
+SYSX(ppc64_newuname,ppc64_newuname,sys_newuname)
+SYSCALL(ni_syscall)
+SYSCALL32(adjtimex)
+SYSCALL(mprotect)
+SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
+SYSCALL(ni_syscall)
+SYSCALL(init_module)
+SYSCALL(delete_module)
+SYSCALL(ni_syscall)
+SYSCALL(quotactl)
+SYSCALL32(getpgid)
+SYSCALL(fchdir)
+SYSCALL(bdflush)
+SYSCALL32(sysfs)
+SYSX(ppc64_personality,ppc64_personality,sys_personality)
+SYSCALL(ni_syscall)
+SYSCALL(setfsuid)
+SYSCALL(setfsgid)
+SYSCALL(llseek)
+SYSCALL32(getdents)
+SYSX(sys_select,ppc32_select,ppc_select)
+SYSCALL(flock)
+SYSCALL(msync)
+COMPAT_SYS(readv)
+COMPAT_SYS(writev)
+SYSCALL32(getsid)
+SYSCALL(fdatasync)
+SYSCALL32(sysctl)
+SYSCALL(mlock)
+SYSCALL(munlock)
+SYSCALL(mlockall)
+SYSCALL(munlockall)
+SYSCALL32(sched_setparam)
+SYSCALL32(sched_getparam)
+SYSCALL32(sched_setscheduler)
+SYSCALL32(sched_getscheduler)
+SYSCALL(sched_yield)
+SYSCALL32(sched_get_priority_max)
+SYSCALL32(sched_get_priority_min)
+SYSCALL32(sched_rr_get_interval)
+COMPAT_SYS(nanosleep)
+SYSCALL(mremap)
+SYSCALL(setresuid)
+SYSCALL(getresuid)
+SYSCALL(ni_syscall)
+SYSCALL(poll)
+COMPAT_SYS(nfsservctl)
+SYSCALL(setresgid)
+SYSCALL(getresgid)
+SYSCALL32(prctl)
+SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
+SYSCALL32(rt_sigaction)
+SYSCALL32(rt_sigprocmask)
+SYSCALL32(rt_sigpending)
+COMPAT_SYS(rt_sigtimedwait)
+SYSCALL32(rt_sigqueueinfo)
+SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
+SYSCALL32(pread64)
+SYSCALL32(pwrite64)
+SYSCALL(chown)
+SYSCALL(getcwd)
+SYSCALL(capget)
+SYSCALL(capset)
+SYSCALL32(sigaltstack)
+SYSX(sys_sendfile64,sys32_sendfile,sys_sendfile)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+PPC_SYS(vfork)
+COMPAT_SYS(getrlimit)
+SYSCALL32(readahead)
+SYS32ONLY(mmap2)
+SYS32ONLY(truncate64)
+SYS32ONLY(ftruncate64)
+SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
+SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
+SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
+SYSCALL32(pciconfig_read)
+SYSCALL32(pciconfig_write)
+SYSCALL32(pciconfig_iobase)
+SYSCALL(ni_syscall)
+SYSCALL(getdents64)
+SYSCALL(pivot_root)
+SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
+SYSCALL(madvise)
+SYSCALL(mincore)
+SYSCALL(gettid)
+SYSCALL(tkill)
+SYSCALL(setxattr)
+SYSCALL(lsetxattr)
+SYSCALL(fsetxattr)
+SYSCALL(getxattr)
+SYSCALL(lgetxattr)
+SYSCALL(fgetxattr)
+SYSCALL(listxattr)
+SYSCALL(llistxattr)
+SYSCALL(flistxattr)
+SYSCALL(removexattr)
+SYSCALL(lremovexattr)
+SYSCALL(fremovexattr)
+COMPAT_SYS(futex)
+COMPAT_SYS(sched_setaffinity)
+COMPAT_SYS(sched_getaffinity)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+SYS32ONLY(sendfile64)
+COMPAT_SYS(io_setup)
+SYSCALL(io_destroy)
+COMPAT_SYS(io_getevents)
+COMPAT_SYS(io_submit)
+SYSCALL(io_cancel)
+SYSCALL(set_tid_address)
+SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
+SYSCALL(exit_group)
+SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
+SYSCALL(epoll_create)
+SYSCALL(epoll_ctl)
+SYSCALL(epoll_wait)
+SYSCALL(remap_file_pages)
+SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
+COMPAT_SYS(timer_settime)
+COMPAT_SYS(timer_gettime)
+SYSCALL(timer_getoverrun)
+SYSCALL(timer_delete)
+COMPAT_SYS(clock_settime)
+COMPAT_SYS(clock_gettime)
+COMPAT_SYS(clock_getres)
+COMPAT_SYS(clock_nanosleep)
+SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
+SYSCALL32(tgkill)
+SYSCALL32(utimes)
+COMPAT_SYS(statfs64)
+COMPAT_SYS(fstatfs64)
+SYSX(sys_ni_syscall, ppc32_fadvise64_64, sys_fadvise64_64)
+PPC_SYS(rtas)
+OLDSYS(debug_setcontext)
+SYSCALL(ni_syscall)
+SYSCALL(ni_syscall)
+COMPAT_SYS(mbind)
+COMPAT_SYS(get_mempolicy)
+COMPAT_SYS(set_mempolicy)
+COMPAT_SYS(mq_open)
+SYSCALL(mq_unlink)
+COMPAT_SYS(mq_timedsend)
+COMPAT_SYS(mq_timedreceive)
+COMPAT_SYS(mq_notify)
+COMPAT_SYS(mq_getsetattr)
+COMPAT_SYS(kexec_load)
+SYSCALL32(add_key)
+SYSCALL32(request_key)
+COMPAT_SYS(keyctl)
+COMPAT_SYS(waitid)
+SYSCALL32(ioprio_set)
+SYSCALL32(ioprio_get)
+SYSCALL(inotify_init)
+SYSCALL(inotify_add_watch)
+SYSCALL(inotify_rm_watch)