Commit | Line | Data |
---|---|---|
14cf11af | 1 | /* |
14cf11af PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Adapted for Power Macintosh by Paul Mackerras. | |
8 | * Low-level exception handlers and MMU support | |
9 | * rewritten by Paul Mackerras. | |
10 | * Copyright (C) 1996 Paul Mackerras. | |
11 | * | |
12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | |
13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | |
14 | * | |
0ebc4cda BH |
15 | * This file contains the entry point for the 64-bit kernel along |
16 | * with some early initialization code common to all 64-bit powerpc | |
17 | * variants. | |
14cf11af PM |
18 | * |
19 | * This program is free software; you can redistribute it and/or | |
20 | * modify it under the terms of the GNU General Public License | |
21 | * as published by the Free Software Foundation; either version | |
22 | * 2 of the License, or (at your option) any later version. | |
23 | */ | |
24 | ||
14cf11af | 25 | #include <linux/threads.h> |
b5bbeb23 | 26 | #include <asm/reg.h> |
14cf11af PM |
27 | #include <asm/page.h> |
28 | #include <asm/mmu.h> | |
14cf11af PM |
29 | #include <asm/ppc_asm.h> |
30 | #include <asm/asm-offsets.h> | |
31 | #include <asm/bug.h> | |
32 | #include <asm/cputable.h> | |
33 | #include <asm/setup.h> | |
34 | #include <asm/hvcall.h> | |
c43a55ff | 35 | #include <asm/iseries/lpar_map.h> |
6cb7bfeb | 36 | #include <asm/thread_info.h> |
3f639ee8 | 37 | #include <asm/firmware.h> |
16a15a30 | 38 | #include <asm/page_64.h> |
945feb17 | 39 | #include <asm/irqflags.h> |
14cf11af | 40 | |
0ebc4cda BH |
41 | /* The physical memory is layed out such that the secondary processor |
42 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow | |
43 | * using the layout described in exceptions-64s.S | |
14cf11af PM |
44 | */ |
45 | ||
46 | /* | |
47 | * Entering into this code we make the following assumptions: | |
0ebc4cda BH |
48 | * |
49 | * For pSeries or server processors: | |
14cf11af PM |
50 | * 1. The MMU is off & open firmware is running in real mode. |
51 | * 2. The kernel is entered at __start | |
52 | * | |
53 | * For iSeries: | |
54 | * 1. The MMU is on (as it always is for iSeries) | |
55 | * 2. The kernel is entered at system_reset_iSeries | |
0ebc4cda BH |
56 | * |
57 | * For Book3E processors: | |
58 | * 1. The MMU is on running in AS0 in a state defined in ePAPR | |
59 | * 2. The kernel is entered at __start | |
14cf11af PM |
60 | */ |
61 | ||
62 | .text | |
63 | .globl _stext | |
64 | _stext: | |
14cf11af PM |
65 | _GLOBAL(__start) |
66 | /* NOP this out unconditionally */ | |
67 | BEGIN_FTR_SECTION | |
b85a046a | 68 | b .__start_initialization_multiplatform |
14cf11af | 69 | END_FTR_SECTION(0, 1) |
14cf11af PM |
70 | |
71 | /* Catch branch to 0 in real mode */ | |
72 | trap | |
73 | ||
1f6a93e4 PM |
74 | /* Secondary processors spin on this value until it becomes nonzero. |
75 | * When it does it contains the real address of the descriptor | |
76 | * of the function that the cpu should jump to to continue | |
77 | * initialization. | |
78 | */ | |
14cf11af PM |
79 | .globl __secondary_hold_spinloop |
80 | __secondary_hold_spinloop: | |
81 | .llong 0x0 | |
82 | ||
83 | /* Secondary processors write this value with their cpu # */ | |
84 | /* after they enter the spin loop immediately below. */ | |
85 | .globl __secondary_hold_acknowledge | |
86 | __secondary_hold_acknowledge: | |
87 | .llong 0x0 | |
88 | ||
1dce0e30 ME |
89 | #ifdef CONFIG_PPC_ISERIES |
90 | /* | |
91 | * At offset 0x20, there is a pointer to iSeries LPAR data. | |
92 | * This is required by the hypervisor | |
93 | */ | |
94 | . = 0x20 | |
95 | .llong hvReleaseData-KERNELBASE | |
96 | #endif /* CONFIG_PPC_ISERIES */ | |
97 | ||
8b8b0cc1 MM |
98 | #ifdef CONFIG_CRASH_DUMP |
99 | /* This flag is set to 1 by a loader if the kernel should run | |
100 | * at the loaded address instead of the linked address. This | |
101 | * is used by kexec-tools to keep the the kdump kernel in the | |
102 | * crash_kernel region. The loader is responsible for | |
103 | * observing the alignment requirement. | |
104 | */ | |
105 | /* Do not move this variable as kexec-tools knows about it. */ | |
106 | . = 0x5c | |
107 | .globl __run_at_load | |
108 | __run_at_load: | |
109 | .long 0x72756e30 /* "run0" -- relocate to 0 by default */ | |
110 | #endif | |
111 | ||
14cf11af PM |
112 | . = 0x60 |
113 | /* | |
75423b7b GL |
114 | * The following code is used to hold secondary processors |
115 | * in a spin loop after they have entered the kernel, but | |
14cf11af PM |
116 | * before the bulk of the kernel has been relocated. This code |
117 | * is relocated to physical address 0x60 before prom_init is run. | |
118 | * All of it must fit below the first exception vector at 0x100. | |
1f6a93e4 PM |
119 | * Use .globl here not _GLOBAL because we want __secondary_hold |
120 | * to be the actual text address, not a descriptor. | |
14cf11af | 121 | */ |
1f6a93e4 PM |
122 | .globl __secondary_hold |
123 | __secondary_hold: | |
14cf11af PM |
124 | mfmsr r24 |
125 | ori r24,r24,MSR_RI | |
126 | mtmsrd r24 /* RI on */ | |
127 | ||
f1870f77 | 128 | /* Grab our physical cpu number */ |
14cf11af PM |
129 | mr r24,r3 |
130 | ||
131 | /* Tell the master cpu we're here */ | |
132 | /* Relocation is off & we are located at an address less */ | |
133 | /* than 0x100, so only need to grab low order offset. */ | |
e31aa453 | 134 | std r24,__secondary_hold_acknowledge-_stext(0) |
14cf11af PM |
135 | sync |
136 | ||
137 | /* All secondary cpus wait here until told to start. */ | |
e31aa453 | 138 | 100: ld r4,__secondary_hold_spinloop-_stext(0) |
1f6a93e4 PM |
139 | cmpdi 0,r4,0 |
140 | beq 100b | |
14cf11af | 141 | |
f1870f77 | 142 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
1f6a93e4 | 143 | ld r4,0(r4) /* deref function descriptor */ |
758438a7 | 144 | mtctr r4 |
14cf11af | 145 | mr r3,r24 |
758438a7 | 146 | bctr |
14cf11af PM |
147 | #else |
148 | BUG_OPCODE | |
149 | #endif | |
14cf11af PM |
150 | |
151 | /* This value is used to mark exception frames on the stack. */ | |
152 | .section ".toc","aw" | |
153 | exception_marker: | |
154 | .tc ID_72656773_68657265[TC],0x7265677368657265 | |
155 | .text | |
156 | ||
14cf11af | 157 | /* |
0ebc4cda BH |
158 | * On server, we include the exception vectors code here as it |
159 | * relies on absolute addressing which is only possible within | |
160 | * this compilation unit | |
3c726f8d | 161 | */ |
0ebc4cda BH |
162 | #ifdef CONFIG_PPC_BOOK3S |
163 | #include "exceptions-64s.S" | |
1f6a93e4 | 164 | #endif |
3c726f8d | 165 | |
14cf11af PM |
166 | |
167 | /* | |
f39b7a55 OJ |
168 | * On pSeries and most other platforms, secondary processors spin |
169 | * in the following code. | |
14cf11af PM |
170 | * At entry, r3 = this processor's number (physical cpu id) |
171 | */ | |
f39b7a55 | 172 | _GLOBAL(generic_secondary_smp_init) |
14cf11af PM |
173 | mr r24,r3 |
174 | ||
175 | /* turn on 64-bit mode */ | |
176 | bl .enable_64b_mode | |
14cf11af | 177 | |
e31aa453 PM |
178 | /* get the TOC pointer (real address) */ |
179 | bl .relative_toc | |
180 | ||
14cf11af PM |
181 | /* Set up a paca value for this processor. Since we have the |
182 | * physical cpu id in r24, we need to search the pacas to find | |
183 | * which logical id maps to our physical one. | |
184 | */ | |
e31aa453 | 185 | LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */ |
14cf11af PM |
186 | li r5,0 /* logical cpu id */ |
187 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | |
188 | cmpw r6,r24 /* Compare to our id */ | |
189 | beq 2f | |
190 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | |
191 | addi r5,r5,1 | |
192 | cmpwi r5,NR_CPUS | |
193 | blt 1b | |
194 | ||
195 | mr r3,r24 /* not found, copy phys to r3 */ | |
196 | b .kexec_wait /* next kernel might do better */ | |
197 | ||
b5bbeb23 | 198 | 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
199 | /* From now on, r24 is expected to be logical cpuid */ |
200 | mr r24,r5 | |
201 | 3: HMT_LOW | |
202 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | |
203 | /* start. */ | |
14cf11af | 204 | |
f39b7a55 OJ |
205 | #ifndef CONFIG_SMP |
206 | b 3b /* Never go on non-SMP */ | |
207 | #else | |
208 | cmpwi 0,r23,0 | |
209 | beq 3b /* Loop until told to go */ | |
210 | ||
b6f6b98a SR |
211 | sync /* order paca.run and cur_cpu_spec */ |
212 | ||
f39b7a55 | 213 | /* See if we need to call a cpu state restore handler */ |
e31aa453 | 214 | LOAD_REG_ADDR(r23, cur_cpu_spec) |
f39b7a55 OJ |
215 | ld r23,0(r23) |
216 | ld r23,CPU_SPEC_RESTORE(r23) | |
217 | cmpdi 0,r23,0 | |
218 | beq 4f | |
219 | ld r23,0(r23) | |
220 | mtctr r23 | |
221 | bctrl | |
222 | ||
223 | 4: /* Create a temp kernel stack for use before relocation is on. */ | |
14cf11af PM |
224 | ld r1,PACAEMERGSP(r13) |
225 | subi r1,r1,STACK_FRAME_OVERHEAD | |
226 | ||
c705677e | 227 | b __secondary_start |
14cf11af | 228 | #endif |
14cf11af | 229 | |
e31aa453 PM |
230 | /* |
231 | * Turn the MMU off. | |
232 | * Assumes we're mapped EA == RA if the MMU is on. | |
233 | */ | |
14cf11af PM |
234 | _STATIC(__mmu_off) |
235 | mfmsr r3 | |
236 | andi. r0,r3,MSR_IR|MSR_DR | |
237 | beqlr | |
e31aa453 | 238 | mflr r4 |
14cf11af PM |
239 | andc r3,r3,r0 |
240 | mtspr SPRN_SRR0,r4 | |
241 | mtspr SPRN_SRR1,r3 | |
242 | sync | |
243 | rfid | |
244 | b . /* prevent speculative execution */ | |
245 | ||
246 | ||
247 | /* | |
248 | * Here is our main kernel entry point. We support currently 2 kind of entries | |
249 | * depending on the value of r5. | |
250 | * | |
251 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | |
252 | * in r3...r7 | |
253 | * | |
254 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | |
255 | * DT block, r4 is a physical pointer to the kernel itself | |
256 | * | |
257 | */ | |
258 | _GLOBAL(__start_initialization_multiplatform) | |
e31aa453 PM |
259 | /* Make sure we are running in 64 bits mode */ |
260 | bl .enable_64b_mode | |
261 | ||
262 | /* Get TOC pointer (current runtime address) */ | |
263 | bl .relative_toc | |
264 | ||
265 | /* find out where we are now */ | |
266 | bcl 20,31,$+4 | |
267 | 0: mflr r26 /* r26 = runtime addr here */ | |
268 | addis r26,r26,(_stext - 0b)@ha | |
269 | addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ | |
270 | ||
14cf11af PM |
271 | /* |
272 | * Are we booted from a PROM Of-type client-interface ? | |
273 | */ | |
274 | cmpldi cr0,r5,0 | |
939e60f6 SR |
275 | beq 1f |
276 | b .__boot_from_prom /* yes -> prom */ | |
277 | 1: | |
14cf11af PM |
278 | /* Save parameters */ |
279 | mr r31,r3 | |
280 | mr r30,r4 | |
281 | ||
14cf11af | 282 | /* Setup some critical 970 SPRs before switching MMU off */ |
f39b7a55 OJ |
283 | mfspr r0,SPRN_PVR |
284 | srwi r0,r0,16 | |
285 | cmpwi r0,0x39 /* 970 */ | |
286 | beq 1f | |
287 | cmpwi r0,0x3c /* 970FX */ | |
288 | beq 1f | |
289 | cmpwi r0,0x44 /* 970MP */ | |
190a24f5 OJ |
290 | beq 1f |
291 | cmpwi r0,0x45 /* 970GX */ | |
f39b7a55 OJ |
292 | bne 2f |
293 | 1: bl .__cpu_preinit_ppc970 | |
294 | 2: | |
14cf11af | 295 | |
e31aa453 | 296 | /* Switch off MMU if not already off */ |
14cf11af PM |
297 | bl .__mmu_off |
298 | b .__after_prom_start | |
299 | ||
939e60f6 | 300 | _INIT_STATIC(__boot_from_prom) |
28794d34 | 301 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE |
14cf11af PM |
302 | /* Save parameters */ |
303 | mr r31,r3 | |
304 | mr r30,r4 | |
305 | mr r29,r5 | |
306 | mr r28,r6 | |
307 | mr r27,r7 | |
308 | ||
6088857b OH |
309 | /* |
310 | * Align the stack to 16-byte boundary | |
311 | * Depending on the size and layout of the ELF sections in the initial | |
e31aa453 | 312 | * boot binary, the stack pointer may be unaligned on PowerMac |
6088857b | 313 | */ |
c05b4770 LT |
314 | rldicr r1,r1,0,59 |
315 | ||
549e8152 PM |
316 | #ifdef CONFIG_RELOCATABLE |
317 | /* Relocate code for where we are now */ | |
318 | mr r3,r26 | |
319 | bl .relocate | |
320 | #endif | |
321 | ||
14cf11af PM |
322 | /* Restore parameters */ |
323 | mr r3,r31 | |
324 | mr r4,r30 | |
325 | mr r5,r29 | |
326 | mr r6,r28 | |
327 | mr r7,r27 | |
328 | ||
329 | /* Do all of the interaction with OF client interface */ | |
549e8152 | 330 | mr r8,r26 |
14cf11af | 331 | bl .prom_init |
28794d34 BH |
332 | #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ |
333 | ||
334 | /* We never return. We also hit that trap if trying to boot | |
335 | * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ | |
14cf11af PM |
336 | trap |
337 | ||
14cf11af | 338 | _STATIC(__after_prom_start) |
549e8152 PM |
339 | #ifdef CONFIG_RELOCATABLE |
340 | /* process relocations for the final address of the kernel */ | |
341 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | |
342 | sldi r25,r25,32 | |
54622f10 | 343 | #ifdef CONFIG_CRASH_DUMP |
8b8b0cc1 MM |
344 | lwz r7,__run_at_load-_stext(r26) |
345 | cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */ | |
54622f10 MK |
346 | bne 1f |
347 | add r25,r25,r26 | |
348 | #endif | |
349 | 1: mr r3,r25 | |
549e8152 PM |
350 | bl .relocate |
351 | #endif | |
14cf11af PM |
352 | |
353 | /* | |
e31aa453 | 354 | * We need to run with _stext at physical address PHYSICAL_START. |
14cf11af PM |
355 | * This will leave some code in the first 256B of |
356 | * real memory, which are reserved for software use. | |
14cf11af PM |
357 | * |
358 | * Note: This process overwrites the OF exception vectors. | |
14cf11af | 359 | */ |
549e8152 PM |
360 | li r3,0 /* target addr */ |
361 | mr. r4,r26 /* In some cases the loader may */ | |
e31aa453 | 362 | beq 9f /* have already put us at zero */ |
14cf11af PM |
363 | li r6,0x100 /* Start offset, the first 0x100 */ |
364 | /* bytes were copied earlier. */ | |
365 | ||
54622f10 MK |
366 | #ifdef CONFIG_CRASH_DUMP |
367 | /* | |
368 | * Check if the kernel has to be running as relocatable kernel based on the | |
8b8b0cc1 | 369 | * variable __run_at_load, if it is set the kernel is treated as relocatable |
54622f10 MK |
370 | * kernel, otherwise it will be moved to PHYSICAL_START |
371 | */ | |
8b8b0cc1 MM |
372 | lwz r7,__run_at_load-_stext(r26) |
373 | cmplwi cr0,r7,1 | |
54622f10 MK |
374 | bne 3f |
375 | ||
376 | li r5,__end_interrupts - _stext /* just copy interrupts */ | |
377 | b 5f | |
378 | 3: | |
379 | #endif | |
380 | lis r5,(copy_to_here - _stext)@ha | |
381 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | |
382 | ||
14cf11af PM |
383 | bl .copy_and_flush /* copy the first n bytes */ |
384 | /* this includes the code being */ | |
385 | /* executed here. */ | |
e31aa453 PM |
386 | addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ |
387 | addi r8,r8,(4f - _stext)@l /* that we just made */ | |
388 | mtctr r8 | |
14cf11af PM |
389 | bctr |
390 | ||
54622f10 MK |
391 | p_end: .llong _end - _stext |
392 | ||
e31aa453 PM |
393 | 4: /* Now copy the rest of the kernel up to _end */ |
394 | addis r5,r26,(p_end - _stext)@ha | |
395 | ld r5,(p_end - _stext)@l(r5) /* get _end */ | |
54622f10 | 396 | 5: bl .copy_and_flush /* copy the rest */ |
e31aa453 PM |
397 | |
398 | 9: b .start_here_multiplatform | |
399 | ||
14cf11af PM |
400 | /* |
401 | * Copy routine used to copy the kernel to start at physical address 0 | |
402 | * and flush and invalidate the caches as needed. | |
403 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | |
404 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | |
405 | * | |
406 | * Note: this routine *only* clobbers r0, r6 and lr | |
407 | */ | |
408 | _GLOBAL(copy_and_flush) | |
409 | addi r5,r5,-8 | |
410 | addi r6,r6,-8 | |
5a2fe38d | 411 | 4: li r0,8 /* Use the smallest common */ |
14cf11af PM |
412 | /* denominator cache line */ |
413 | /* size. This results in */ | |
414 | /* extra cache line flushes */ | |
415 | /* but operation is correct. */ | |
416 | /* Can't get cache line size */ | |
417 | /* from NACA as it is being */ | |
418 | /* moved too. */ | |
419 | ||
420 | mtctr r0 /* put # words/line in ctr */ | |
421 | 3: addi r6,r6,8 /* copy a cache line */ | |
422 | ldx r0,r6,r4 | |
423 | stdx r0,r6,r3 | |
424 | bdnz 3b | |
425 | dcbst r6,r3 /* write it to memory */ | |
426 | sync | |
427 | icbi r6,r3 /* flush the icache line */ | |
428 | cmpld 0,r6,r5 | |
429 | blt 4b | |
430 | sync | |
431 | addi r5,r5,8 | |
432 | addi r6,r6,8 | |
433 | blr | |
434 | ||
435 | .align 8 | |
436 | copy_to_here: | |
437 | ||
438 | #ifdef CONFIG_SMP | |
439 | #ifdef CONFIG_PPC_PMAC | |
440 | /* | |
441 | * On PowerMac, secondary processors starts from the reset vector, which | |
442 | * is temporarily turned into a call to one of the functions below. | |
443 | */ | |
444 | .section ".text"; | |
445 | .align 2 ; | |
446 | ||
35499c01 PM |
447 | .globl __secondary_start_pmac_0 |
448 | __secondary_start_pmac_0: | |
449 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | |
450 | li r24,0 | |
451 | b 1f | |
452 | li r24,1 | |
453 | b 1f | |
454 | li r24,2 | |
455 | b 1f | |
456 | li r24,3 | |
457 | 1: | |
14cf11af PM |
458 | |
459 | _GLOBAL(pmac_secondary_start) | |
460 | /* turn on 64-bit mode */ | |
461 | bl .enable_64b_mode | |
14cf11af | 462 | |
c478b581 BH |
463 | li r0,0 |
464 | mfspr r3,SPRN_HID4 | |
465 | rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ | |
466 | sync | |
467 | mtspr SPRN_HID4,r3 | |
468 | isync | |
469 | sync | |
470 | slbia | |
471 | ||
e31aa453 PM |
472 | /* get TOC pointer (real address) */ |
473 | bl .relative_toc | |
474 | ||
14cf11af | 475 | /* Copy some CPU settings from CPU 0 */ |
f39b7a55 | 476 | bl .__restore_cpu_ppc970 |
14cf11af PM |
477 | |
478 | /* pSeries do that early though I don't think we really need it */ | |
479 | mfmsr r3 | |
480 | ori r3,r3,MSR_RI | |
481 | mtmsrd r3 /* RI on */ | |
482 | ||
483 | /* Set up a paca value for this processor. */ | |
e31aa453 PM |
484 | LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */ |
485 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | |
14cf11af | 486 | add r13,r13,r4 /* for this processor. */ |
e31aa453 | 487 | mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
488 | |
489 | /* Create a temp kernel stack for use before relocation is on. */ | |
490 | ld r1,PACAEMERGSP(r13) | |
491 | subi r1,r1,STACK_FRAME_OVERHEAD | |
492 | ||
c705677e | 493 | b __secondary_start |
14cf11af PM |
494 | |
495 | #endif /* CONFIG_PPC_PMAC */ | |
496 | ||
497 | /* | |
498 | * This function is called after the master CPU has released the | |
499 | * secondary processors. The execution environment is relocation off. | |
500 | * The paca for this processor has the following fields initialized at | |
501 | * this point: | |
502 | * 1. Processor number | |
503 | * 2. Segment table pointer (virtual address) | |
504 | * On entry the following are set: | |
505 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | |
506 | * r24 = cpu# (in Linux terms) | |
507 | * r13 = paca virtual address | |
508 | * SPRG3 = paca virtual address | |
509 | */ | |
fc68e869 | 510 | .globl __secondary_start |
c705677e | 511 | __secondary_start: |
799d6046 PM |
512 | /* Set thread priority to MEDIUM */ |
513 | HMT_MEDIUM | |
14cf11af | 514 | |
799d6046 PM |
515 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ |
516 | bl .early_setup_secondary | |
14cf11af PM |
517 | |
518 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | |
e58c3495 | 519 | LOAD_REG_ADDR(r3, current_set) |
14cf11af PM |
520 | sldi r28,r24,3 /* get current_set[cpu#] */ |
521 | ldx r1,r3,r28 | |
522 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
523 | std r1,PACAKSAVE(r13) | |
524 | ||
799d6046 | 525 | /* Clear backchain so we get nice backtraces */ |
14cf11af PM |
526 | li r7,0 |
527 | mtlr r7 | |
528 | ||
529 | /* enable MMU and jump to start_secondary */ | |
e58c3495 DG |
530 | LOAD_REG_ADDR(r3, .start_secondary_prolog) |
531 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | |
d04c56f7 | 532 | #ifdef CONFIG_PPC_ISERIES |
3f639ee8 | 533 | BEGIN_FW_FTR_SECTION |
14cf11af | 534 | ori r4,r4,MSR_EE |
ff3da2e0 BH |
535 | li r8,1 |
536 | stb r8,PACAHARDIRQEN(r13) | |
3f639ee8 | 537 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 538 | #endif |
d04c56f7 | 539 | BEGIN_FW_FTR_SECTION |
d04c56f7 PM |
540 | stb r7,PACAHARDIRQEN(r13) |
541 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
ff3da2e0 | 542 | stb r7,PACASOFTIRQEN(r13) |
d04c56f7 | 543 | |
b5bbeb23 PM |
544 | mtspr SPRN_SRR0,r3 |
545 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
546 | rfid |
547 | b . /* prevent speculative execution */ | |
548 | ||
549 | /* | |
550 | * Running with relocation on at this point. All we want to do is | |
e31aa453 PM |
551 | * zero the stack back-chain pointer and get the TOC virtual address |
552 | * before going into C code. | |
14cf11af PM |
553 | */ |
554 | _GLOBAL(start_secondary_prolog) | |
e31aa453 | 555 | ld r2,PACATOC(r13) |
14cf11af PM |
556 | li r3,0 |
557 | std r3,0(r1) /* Zero the stack frame pointer */ | |
558 | bl .start_secondary | |
799d6046 | 559 | b . |
14cf11af PM |
560 | #endif |
561 | ||
562 | /* | |
563 | * This subroutine clobbers r11 and r12 | |
564 | */ | |
565 | _GLOBAL(enable_64b_mode) | |
566 | mfmsr r11 /* grab the current MSR */ | |
e31aa453 PM |
567 | li r12,(MSR_SF | MSR_ISF)@highest |
568 | sldi r12,r12,48 | |
14cf11af PM |
569 | or r11,r11,r12 |
570 | mtmsrd r11 | |
571 | isync | |
572 | blr | |
573 | ||
e31aa453 PM |
574 | /* |
575 | * This puts the TOC pointer into r2, offset by 0x8000 (as expected | |
576 | * by the toolchain). It computes the correct value for wherever we | |
577 | * are running at the moment, using position-independent code. | |
578 | */ | |
579 | _GLOBAL(relative_toc) | |
580 | mflr r0 | |
581 | bcl 20,31,$+4 | |
582 | 0: mflr r9 | |
583 | ld r2,(p_toc - 0b)(r9) | |
584 | add r2,r2,r9 | |
585 | mtlr r0 | |
586 | blr | |
587 | ||
588 | p_toc: .llong __toc_start + 0x8000 - 0b | |
589 | ||
14cf11af PM |
590 | /* |
591 | * This is where the main kernel code starts. | |
592 | */ | |
939e60f6 | 593 | _INIT_STATIC(start_here_multiplatform) |
e31aa453 PM |
594 | /* set up the TOC (real address) */ |
595 | bl .relative_toc | |
14cf11af PM |
596 | |
597 | /* Clear out the BSS. It may have been done in prom_init, | |
598 | * already but that's irrelevant since prom_init will soon | |
599 | * be detached from the kernel completely. Besides, we need | |
600 | * to clear it now for kexec-style entry. | |
601 | */ | |
e31aa453 PM |
602 | LOAD_REG_ADDR(r11,__bss_stop) |
603 | LOAD_REG_ADDR(r8,__bss_start) | |
14cf11af PM |
604 | sub r11,r11,r8 /* bss size */ |
605 | addi r11,r11,7 /* round up to an even double word */ | |
e31aa453 | 606 | srdi. r11,r11,3 /* shift right by 3 */ |
14cf11af PM |
607 | beq 4f |
608 | addi r8,r8,-8 | |
609 | li r0,0 | |
610 | mtctr r11 /* zero this many doublewords */ | |
611 | 3: stdu r0,8(r8) | |
612 | bdnz 3b | |
613 | 4: | |
614 | ||
615 | mfmsr r6 | |
616 | ori r6,r6,MSR_RI | |
617 | mtmsrd r6 /* RI on */ | |
618 | ||
549e8152 PM |
619 | #ifdef CONFIG_RELOCATABLE |
620 | /* Save the physical address we're running at in kernstart_addr */ | |
621 | LOAD_REG_ADDR(r4, kernstart_addr) | |
622 | clrldi r0,r25,2 | |
623 | std r0,0(r4) | |
624 | #endif | |
625 | ||
e31aa453 | 626 | /* The following gets the stack set up with the regs */ |
14cf11af PM |
627 | /* pointing to the real addr of the kernel stack. This is */ |
628 | /* all done to support the C function call below which sets */ | |
629 | /* up the htab. This is done because we have relocated the */ | |
630 | /* kernel but are still running in real mode. */ | |
631 | ||
e31aa453 | 632 | LOAD_REG_ADDR(r3,init_thread_union) |
14cf11af | 633 | |
e31aa453 | 634 | /* set up a stack pointer */ |
14cf11af PM |
635 | addi r1,r3,THREAD_SIZE |
636 | li r0,0 | |
637 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
638 | ||
14cf11af PM |
639 | /* Do very early kernel initializations, including initial hash table, |
640 | * stab and slb setup before we turn on relocation. */ | |
641 | ||
642 | /* Restore parameters passed from prom_init/kexec */ | |
643 | mr r3,r31 | |
e31aa453 | 644 | bl .early_setup /* also sets r13 and SPRG3 */ |
14cf11af | 645 | |
e31aa453 PM |
646 | LOAD_REG_ADDR(r3, .start_here_common) |
647 | ld r4,PACAKMSR(r13) | |
b5bbeb23 PM |
648 | mtspr SPRN_SRR0,r3 |
649 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
650 | rfid |
651 | b . /* prevent speculative execution */ | |
14cf11af PM |
652 | |
653 | /* This is where all platforms converge execution */ | |
fc68e869 | 654 | _INIT_GLOBAL(start_here_common) |
14cf11af | 655 | /* relocation is on at this point */ |
e31aa453 | 656 | std r1,PACAKSAVE(r13) |
14cf11af | 657 | |
e31aa453 | 658 | /* Load the TOC (virtual address) */ |
14cf11af | 659 | ld r2,PACATOC(r13) |
14cf11af PM |
660 | |
661 | bl .setup_system | |
662 | ||
663 | /* Load up the kernel context */ | |
664 | 5: | |
14cf11af | 665 | li r5,0 |
d04c56f7 PM |
666 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ |
667 | #ifdef CONFIG_PPC_ISERIES | |
668 | BEGIN_FW_FTR_SECTION | |
14cf11af | 669 | mfmsr r5 |
ff3da2e0 | 670 | ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ |
14cf11af | 671 | mtmsrd r5 |
ff3da2e0 | 672 | li r5,1 |
3f639ee8 | 673 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 674 | #endif |
ff3da2e0 | 675 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ |
14cf11af | 676 | |
ff3da2e0 | 677 | bl .start_kernel |
14cf11af | 678 | |
f1870f77 AB |
679 | /* Not reached */ |
680 | BUG_OPCODE | |
14cf11af | 681 | |
14cf11af PM |
682 | /* |
683 | * We put a few things here that have to be page-aligned. | |
684 | * This stuff goes at the beginning of the bss, which is page-aligned. | |
685 | */ | |
686 | .section ".bss" | |
687 | ||
688 | .align PAGE_SHIFT | |
689 | ||
690 | .globl empty_zero_page | |
691 | empty_zero_page: | |
692 | .space PAGE_SIZE | |
693 | ||
694 | .globl swapper_pg_dir | |
695 | swapper_pg_dir: | |
ee7a76da | 696 | .space PGD_TABLE_SIZE |