* low halfword of the address, but for Kdump we need the whole low
* word.
*/
-#ifdef CONFIG_CRASH_DUMP
#define LOAD_HANDLER(reg, label) \
- oris reg,reg,(label)@h; /* virt addr of handler ... */ \
- ori reg,reg,(label)@l; /* .. and the rest */
-#else
-#define LOAD_HANDLER(reg, label) \
- ori reg,reg,(label)@l; /* virt addr of handler ... */
-#endif
+ addi reg,reg,(label)-_stext; /* virt addr of handler ... */
#define EXCEPTION_PROLOG_1(area) \
mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
std r9,area+EX_R13(r13); \
mfcr r9
-/*
- * Equal to EXCEPTION_PROLOG_PSERIES, except that it forces 64bit mode.
- * The firmware calls the registered system_reset_fwnmi and
- * machine_check_fwnmi handlers in 32bit mode if the cpu happens to run
- * a 32bit application at the time of the event.
- * This firmware bug is present on POWER4 and JS20.
- */
-#define EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(area, label) \
- EXCEPTION_PROLOG_1(area); \
- clrrdi r12,r13,32; /* get high part of &label */ \
- mfmsr r10; \
- /* force 64bit mode */ \
- li r11,5; /* MSR_SF_LG|MSR_ISF_LG */ \
- rldimi r10,r11,61,0; /* insert into top 3 bits */ \
- /* done 64bit mode */ \
- mfspr r11,SPRN_SRR0; /* save SRR0 */ \
- LOAD_HANDLER(r12,label) \
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
- mtspr SPRN_SRR0,r12; \
- mfspr r12,SPRN_SRR1; /* and SRR1 */ \
- mtspr SPRN_SRR1,r10; \
- rfid; \
- b . /* prevent speculative execution */
-
#define EXCEPTION_PROLOG_PSERIES(area, label) \
EXCEPTION_PROLOG_1(area); \
- clrrdi r12,r13,32; /* get high part of &label */ \
- mfmsr r10; \
+ ld r12,PACAKBASE(r13); /* get high part of &label */ \
+ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label) \
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
mtspr SPRN_SRR0,r12; \
mfspr r12,SPRN_SRR1; /* and SRR1 */ \
mtspr SPRN_SRR1,r10; \
std r10,PACA_EXGEN+EX_R13(r13); \
std r11,PACA_EXGEN+EX_R11(r13); \
std r12,PACA_EXGEN+EX_R12(r13); \
- clrrdi r12,r13,32; /* get high part of &label */ \
- mfmsr r10; \
+ ld r12,PACAKBASE(r13); /* get high part of &label */ \
+ ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \
mfspr r11,SPRN_SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label##_common) \
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
mtspr SPRN_SRR0,r12; \
mfspr r12,SPRN_SRR1; /* and SRR1 */ \
mtspr SPRN_SRR1,r10; \
u16 paca_index; /* Logical processor number */
u64 kernel_toc; /* Kernel TOC address */
+ u64 kernelbase; /* Base address of kernel */
+ u64 kernel_msr; /* MSR while running in kernel */
u64 stab_real; /* Absolute address of segment table */
u64 stab_addr; /* Virtual address of segment table */
void *emergency_sp; /* pointer to emergency stack */
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
+ DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
+ DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
/* Catch branch to 0 in real mode */
trap
- /* Secondary processors spin on this value until it goes to 1. */
+ /* Secondary processors spin on this value until it becomes nonzero.
+ * When it does it contains the real address of the descriptor
+ * of the function that the cpu should jump to to continue
+ * initialization.
+ */
.globl __secondary_hold_spinloop
__secondary_hold_spinloop:
.llong 0x0
* before the bulk of the kernel has been relocated. This code
* is relocated to physical address 0x60 before prom_init is run.
* All of it must fit below the first exception vector at 0x100.
+ * Use .globl here not _GLOBAL because we want __secondary_hold
+ * to be the actual text address, not a descriptor.
*/
-_GLOBAL(__secondary_hold)
+ .globl __secondary_hold
+__secondary_hold:
mfmsr r24
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
/* All secondary cpus wait here until told to start. */
100: ld r4,__secondary_hold_spinloop@l(0)
- cmpdi 0,r4,1
- bne 100b
+ cmpdi 0,r4,0
+ beq 100b
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
- LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
+ ld r4,0(r4) /* deref function descriptor */
mtctr r4
mr r3,r24
bctr
/*
* This is the start of the interrupt handlers for pSeries
* This code runs with relocation off.
+ * Code from here to __end_interrupts gets copied down to real
+ * address 0x100 when we are running a relocatable kernel.
+ * Therefore any relative branches in this section must only
+ * branch to labels in this section.
*/
. = 0x100
.globl __start_interrupts
mfspr r10,SPRN_SPRG1
std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
- b .slb_miss_realmode /* Rel. branch works in real mode */
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ /*
+ * We can't just use a direct branch to .slb_miss_realmode
+ * because the distance from here to there depends on where
+ * the kernel ends up being put.
+ */
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
STD_EXCEPTION_PSERIES(0x400, instruction_access)
mfspr r10,SPRN_SPRG1
std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
- b .slb_miss_realmode /* Rel. branch works in real mode */
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
STD_EXCEPTION_PSERIES(0x600, alignment)
beq- 1f
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mr r9,r13
- mfmsr r10
mfspr r13,SPRN_SPRG3
mfspr r11,SPRN_SRR0
- clrrdi r12,r13,32
- oris r12,r12,system_call_common@h
- ori r12,r12,system_call_common@l
+ ld r12,PACAKBASE(r13)
+ ld r10,PACAKMSR(r13)
+ LOAD_HANDLER(r12, system_call_entry)
mtspr SPRN_SRR0,r12
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI
mfspr r12,SPRN_SRR1
mtspr SPRN_SRR1,r10
rfid
/*
* Code from here down to __end_handlers is invoked from the
- * exception prologs above.
+ * exception prologs above. Because the prologs assemble the
+ * addresses of these handlers using the LOAD_HANDLER macro,
+ * which uses an addi instruction, these handlers must be in
+ * the first 32k of the kernel image.
*/
/*** Common interrupt handlers ***/
STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
#endif /* CONFIG_CBE_RAS */
+ .align 7
+system_call_entry:
+ b system_call_common
+
/*
* Here we have detected that the kernel stack pointer is bad.
* R9 contains the saved CR, r13 points to the paca,
*/
_GLOBAL(slb_miss_realmode)
mflr r10
+#ifdef CONFIG_RELOCATABLE
+ mtctr r11
+#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
#endif /* CONFIG_PPC_ISERIES */
mfspr r11,SPRN_SRR0
- clrrdi r10,r13,32
+ ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
- mfmsr r10
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI
+ ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
new_paca->lock_token = 0x8000;
new_paca->paca_index = cpu;
new_paca->kernel_toc = kernel_toc;
+ new_paca->kernelbase = KERNELBASE;
+ new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff;
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
new_paca->__current = &init_task;
*
* -- Cort
*/
-extern void __secondary_hold(void);
+extern char __secondary_hold;
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
= (void *) LOW_ADDR(__secondary_hold_spinloop);
unsigned long *acknowledge
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
-#ifdef CONFIG_PPC64
- /* __secondary_hold is actually a descriptor, not the text address */
- unsigned long secondary_hold
- = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
-#else
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
-#endif
prom_debug("prom_hold_cpus: start...\n");
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
#endif /* CONFIG_SMP */
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+extern unsigned long __secondary_hold_spinloop;
+extern void generic_secondary_smp_init(void);
+
void smp_release_cpus(void)
{
- extern unsigned long __secondary_hold_spinloop;
unsigned long *ptr;
DBG(" -> smp_release_cpus()\n");
* all now so they can start to spin on their individual paca
* spinloops. For non SMP kernels, the secondary cpus never get out
* of the common spinloop.
- * This is useless but harmless on iSeries, secondaries are already
- * waiting on their paca spinloops. */
+ */
ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
- PHYSICAL_START);
- *ptr = 1;
+ *ptr = __pa(generic_secondary_smp_init);
mb();
DBG(" <- smp_release_cpus()\n");