Commit | Line | Data |
---|---|---|
14cf11af | 1 | /* |
14cf11af PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Adapted for Power Macintosh by Paul Mackerras. | |
8 | * Low-level exception handlers and MMU support | |
9 | * rewritten by Paul Mackerras. | |
10 | * Copyright (C) 1996 Paul Mackerras. | |
11 | * | |
12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | |
13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | |
14 | * | |
15 | * This file contains the low-level support and setup for the | |
16 | * PowerPC-64 platform, including trap and interrupt dispatch. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
23 | ||
14cf11af | 24 | #include <linux/threads.h> |
b5bbeb23 | 25 | #include <asm/reg.h> |
14cf11af PM |
26 | #include <asm/page.h> |
27 | #include <asm/mmu.h> | |
14cf11af PM |
28 | #include <asm/ppc_asm.h> |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/bug.h> | |
31 | #include <asm/cputable.h> | |
32 | #include <asm/setup.h> | |
33 | #include <asm/hvcall.h> | |
c43a55ff | 34 | #include <asm/iseries/lpar_map.h> |
6cb7bfeb | 35 | #include <asm/thread_info.h> |
3f639ee8 | 36 | #include <asm/firmware.h> |
16a15a30 | 37 | #include <asm/page_64.h> |
f9ff0f30 | 38 | #include <asm/exception.h> |
945feb17 | 39 | #include <asm/irqflags.h> |
14cf11af PM |
40 | |
41 | /* | |
42 | * We layout physical memory as follows: | |
43 | * 0x0000 - 0x00ff : Secondary processor spin code | |
44 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | |
45 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | |
46 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | |
47 | * 0x7000 - 0x7fff : FWNMI data area | |
48 | * 0x8000 - : Early init and support code | |
49 | */ | |
50 | ||
51 | /* | |
52 | * SPRG Usage | |
53 | * | |
54 | * Register Definition | |
55 | * | |
56 | * SPRG0 reserved for hypervisor | |
57 | * SPRG1 temp - used to save gpr | |
58 | * SPRG2 temp - used to save gpr | |
59 | * SPRG3 virt addr of paca | |
60 | */ | |
61 | ||
62 | /* | |
63 | * Entering into this code we make the following assumptions: | |
64 | * For pSeries: | |
65 | * 1. The MMU is off & open firmware is running in real mode. | |
66 | * 2. The kernel is entered at __start | |
67 | * | |
68 | * For iSeries: | |
69 | * 1. The MMU is on (as it always is for iSeries) | |
70 | * 2. The kernel is entered at system_reset_iSeries | |
71 | */ | |
72 | ||
73 | .text | |
74 | .globl _stext | |
75 | _stext: | |
14cf11af PM |
76 | _GLOBAL(__start) |
77 | /* NOP this out unconditionally */ | |
78 | BEGIN_FTR_SECTION | |
b85a046a | 79 | b .__start_initialization_multiplatform |
14cf11af | 80 | END_FTR_SECTION(0, 1) |
14cf11af PM |
81 | |
82 | /* Catch branch to 0 in real mode */ | |
83 | trap | |
84 | ||
14cf11af PM |
85 | /* Secondary processors spin on this value until it goes to 1. */ |
86 | .globl __secondary_hold_spinloop | |
87 | __secondary_hold_spinloop: | |
88 | .llong 0x0 | |
89 | ||
90 | /* Secondary processors write this value with their cpu # */ | |
91 | /* after they enter the spin loop immediately below. */ | |
92 | .globl __secondary_hold_acknowledge | |
93 | __secondary_hold_acknowledge: | |
94 | .llong 0x0 | |
95 | ||
1dce0e30 ME |
96 | #ifdef CONFIG_PPC_ISERIES |
97 | /* | |
98 | * At offset 0x20, there is a pointer to iSeries LPAR data. | |
99 | * This is required by the hypervisor | |
100 | */ | |
101 | . = 0x20 | |
102 | .llong hvReleaseData-KERNELBASE | |
103 | #endif /* CONFIG_PPC_ISERIES */ | |
104 | ||
14cf11af PM |
105 | . = 0x60 |
106 | /* | |
75423b7b GL |
107 | * The following code is used to hold secondary processors |
108 | * in a spin loop after they have entered the kernel, but | |
14cf11af PM |
109 | * before the bulk of the kernel has been relocated. This code |
110 | * is relocated to physical address 0x60 before prom_init is run. | |
111 | * All of it must fit below the first exception vector at 0x100. | |
112 | */ | |
113 | _GLOBAL(__secondary_hold) | |
114 | mfmsr r24 | |
115 | ori r24,r24,MSR_RI | |
116 | mtmsrd r24 /* RI on */ | |
117 | ||
f1870f77 | 118 | /* Grab our physical cpu number */ |
14cf11af PM |
119 | mr r24,r3 |
120 | ||
121 | /* Tell the master cpu we're here */ | |
122 | /* Relocation is off & we are located at an address less */ | |
123 | /* than 0x100, so only need to grab low order offset. */ | |
124 | std r24,__secondary_hold_acknowledge@l(0) | |
125 | sync | |
126 | ||
127 | /* All secondary cpus wait here until told to start. */ | |
128 | 100: ld r4,__secondary_hold_spinloop@l(0) | |
129 | cmpdi 0,r4,1 | |
130 | bne 100b | |
131 | ||
f1870f77 | 132 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
f39b7a55 | 133 | LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) |
758438a7 | 134 | mtctr r4 |
14cf11af | 135 | mr r3,r24 |
758438a7 | 136 | bctr |
14cf11af PM |
137 | #else |
138 | BUG_OPCODE | |
139 | #endif | |
14cf11af PM |
140 | |
141 | /* This value is used to mark exception frames on the stack. */ | |
142 | .section ".toc","aw" | |
143 | exception_marker: | |
144 | .tc ID_72656773_68657265[TC],0x7265677368657265 | |
145 | .text | |
146 | ||
14cf11af PM |
147 | /* |
148 | * This is the start of the interrupt handlers for pSeries | |
149 | * This code runs with relocation off. | |
14cf11af PM |
150 | */ |
151 | . = 0x100 | |
152 | .globl __start_interrupts | |
153 | __start_interrupts: | |
154 | ||
155 | STD_EXCEPTION_PSERIES(0x100, system_reset) | |
156 | ||
157 | . = 0x200 | |
158 | _machine_check_pSeries: | |
159 | HMT_MEDIUM | |
b5bbeb23 | 160 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
14cf11af PM |
161 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
162 | ||
163 | . = 0x300 | |
164 | .globl data_access_pSeries | |
165 | data_access_pSeries: | |
166 | HMT_MEDIUM | |
b5bbeb23 | 167 | mtspr SPRN_SPRG1,r13 |
14cf11af | 168 | BEGIN_FTR_SECTION |
b5bbeb23 PM |
169 | mtspr SPRN_SPRG2,r12 |
170 | mfspr r13,SPRN_DAR | |
171 | mfspr r12,SPRN_DSISR | |
14cf11af PM |
172 | srdi r13,r13,60 |
173 | rlwimi r13,r12,16,0x20 | |
174 | mfcr r12 | |
175 | cmpwi r13,0x2c | |
3ccfc65c | 176 | beq do_stab_bolted_pSeries |
14cf11af | 177 | mtcrf 0x80,r12 |
b5bbeb23 | 178 | mfspr r12,SPRN_SPRG2 |
14cf11af PM |
179 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
180 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | |
181 | ||
182 | . = 0x380 | |
183 | .globl data_access_slb_pSeries | |
184 | data_access_slb_pSeries: | |
185 | HMT_MEDIUM | |
b5bbeb23 | 186 | mtspr SPRN_SPRG1,r13 |
b5bbeb23 | 187 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
3c726f8d BH |
188 | std r3,PACA_EXSLB+EX_R3(r13) |
189 | mfspr r3,SPRN_DAR | |
14cf11af | 190 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
3c726f8d BH |
191 | mfcr r9 |
192 | #ifdef __DISABLED__ | |
193 | /* Keep that around for when we re-implement dynamic VSIDs */ | |
194 | cmpdi r3,0 | |
195 | bge slb_miss_user_pseries | |
196 | #endif /* __DISABLED__ */ | |
14cf11af PM |
197 | std r10,PACA_EXSLB+EX_R10(r13) |
198 | std r11,PACA_EXSLB+EX_R11(r13) | |
199 | std r12,PACA_EXSLB+EX_R12(r13) | |
3c726f8d BH |
200 | mfspr r10,SPRN_SPRG1 |
201 | std r10,PACA_EXSLB+EX_R13(r13) | |
b5bbeb23 | 202 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
3c726f8d | 203 | b .slb_miss_realmode /* Rel. branch works in real mode */ |
14cf11af PM |
204 | |
205 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | |
206 | ||
207 | . = 0x480 | |
208 | .globl instruction_access_slb_pSeries | |
209 | instruction_access_slb_pSeries: | |
210 | HMT_MEDIUM | |
b5bbeb23 | 211 | mtspr SPRN_SPRG1,r13 |
b5bbeb23 | 212 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
3c726f8d BH |
213 | std r3,PACA_EXSLB+EX_R3(r13) |
214 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | |
14cf11af | 215 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
3c726f8d BH |
216 | mfcr r9 |
217 | #ifdef __DISABLED__ | |
218 | /* Keep that around for when we re-implement dynamic VSIDs */ | |
219 | cmpdi r3,0 | |
220 | bge slb_miss_user_pseries | |
221 | #endif /* __DISABLED__ */ | |
14cf11af PM |
222 | std r10,PACA_EXSLB+EX_R10(r13) |
223 | std r11,PACA_EXSLB+EX_R11(r13) | |
224 | std r12,PACA_EXSLB+EX_R12(r13) | |
3c726f8d BH |
225 | mfspr r10,SPRN_SPRG1 |
226 | std r10,PACA_EXSLB+EX_R13(r13) | |
b5bbeb23 | 227 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
3c726f8d | 228 | b .slb_miss_realmode /* Rel. branch works in real mode */ |
14cf11af | 229 | |
d04c56f7 | 230 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
14cf11af PM |
231 | STD_EXCEPTION_PSERIES(0x600, alignment) |
232 | STD_EXCEPTION_PSERIES(0x700, program_check) | |
233 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | |
d04c56f7 | 234 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) |
14cf11af PM |
235 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) |
236 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | |
237 | ||
238 | . = 0xc00 | |
239 | .globl system_call_pSeries | |
240 | system_call_pSeries: | |
241 | HMT_MEDIUM | |
745a14cc PM |
242 | BEGIN_FTR_SECTION |
243 | cmpdi r0,0x1ebe | |
244 | beq- 1f | |
245 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |
14cf11af PM |
246 | mr r9,r13 |
247 | mfmsr r10 | |
b5bbeb23 PM |
248 | mfspr r13,SPRN_SPRG3 |
249 | mfspr r11,SPRN_SRR0 | |
14cf11af PM |
250 | clrrdi r12,r13,32 |
251 | oris r12,r12,system_call_common@h | |
252 | ori r12,r12,system_call_common@l | |
b5bbeb23 | 253 | mtspr SPRN_SRR0,r12 |
14cf11af | 254 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI |
b5bbeb23 PM |
255 | mfspr r12,SPRN_SRR1 |
256 | mtspr SPRN_SRR1,r10 | |
14cf11af PM |
257 | rfid |
258 | b . /* prevent speculative execution */ | |
259 | ||
745a14cc PM |
260 | /* Fast LE/BE switch system call */ |
261 | 1: mfspr r12,SPRN_SRR1 | |
262 | xori r12,r12,MSR_LE | |
263 | mtspr SPRN_SRR1,r12 | |
264 | rfid /* return to userspace */ | |
265 | b . | |
266 | ||
14cf11af PM |
267 | STD_EXCEPTION_PSERIES(0xd00, single_step) |
268 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | |
269 | ||
270 | /* We need to deal with the Altivec unavailable exception | |
271 | * here which is at 0xf20, thus in the middle of the | |
272 | * prolog code of the PerformanceMonitor one. A little | |
273 | * trickery is thus necessary | |
274 | */ | |
275 | . = 0xf00 | |
276 | b performance_monitor_pSeries | |
277 | ||
10e34392 MN |
278 | . = 0xf20 |
279 | b altivec_unavailable_pSeries | |
14cf11af | 280 | |
ce48b210 MN |
281 | . = 0xf40 |
282 | b vsx_unavailable_pSeries | |
283 | ||
acf7d768 BH |
284 | #ifdef CONFIG_CBE_RAS |
285 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | |
286 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 287 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) |
acf7d768 BH |
288 | #ifdef CONFIG_CBE_RAS |
289 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | |
290 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 291 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) |
acf7d768 BH |
292 | #ifdef CONFIG_CBE_RAS |
293 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | |
294 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af PM |
295 | |
296 | . = 0x3000 | |
297 | ||
298 | /*** pSeries interrupt support ***/ | |
299 | ||
300 | /* moved from 0xf00 */ | |
449d846d | 301 | STD_EXCEPTION_PSERIES(., performance_monitor) |
10e34392 | 302 | STD_EXCEPTION_PSERIES(., altivec_unavailable) |
ce48b210 | 303 | STD_EXCEPTION_PSERIES(., vsx_unavailable) |
d04c56f7 PM |
304 | |
305 | /* | |
306 | * An interrupt came in while soft-disabled; clear EE in SRR1, | |
307 | * clear paca->hard_enabled and return. | |
308 | */ | |
309 | masked_interrupt: | |
310 | stb r10,PACAHARDIRQEN(r13) | |
311 | mtcrf 0x80,r9 | |
312 | ld r9,PACA_EXGEN+EX_R9(r13) | |
313 | mfspr r10,SPRN_SRR1 | |
314 | rldicl r10,r10,48,1 /* clear MSR_EE */ | |
315 | rotldi r10,r10,16 | |
316 | mtspr SPRN_SRR1,r10 | |
317 | ld r10,PACA_EXGEN+EX_R10(r13) | |
318 | mfspr r13,SPRN_SPRG1 | |
319 | rfid | |
320 | b . | |
14cf11af PM |
321 | |
322 | .align 7 | |
3ccfc65c | 323 | do_stab_bolted_pSeries: |
14cf11af | 324 | mtcrf 0x80,r12 |
b5bbeb23 | 325 | mfspr r12,SPRN_SPRG2 |
14cf11af PM |
326 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) |
327 | ||
9a955167 PM |
328 | #ifdef CONFIG_PPC_PSERIES |
329 | /* | |
330 | * Vectors for the FWNMI option. Share common code. | |
331 | */ | |
332 | .globl system_reset_fwnmi | |
333 | .align 7 | |
334 | system_reset_fwnmi: | |
335 | HMT_MEDIUM | |
336 | mtspr SPRN_SPRG1,r13 /* save r13 */ | |
337 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | |
338 | ||
339 | .globl machine_check_fwnmi | |
340 | .align 7 | |
341 | machine_check_fwnmi: | |
342 | HMT_MEDIUM | |
343 | mtspr SPRN_SPRG1,r13 /* save r13 */ | |
344 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | |
345 | ||
346 | #endif /* CONFIG_PPC_PSERIES */ | |
347 | ||
348 | #ifdef __DISABLED__ | |
3c726f8d | 349 | /* |
3c726f8d BH |
350 | * This is used for when the SLB miss handler has to go virtual, |
351 | * which doesn't happen for now anymore but will once we re-implement | |
352 | * dynamic VSIDs for shared page tables | |
353 | */ | |
3c726f8d BH |
354 | slb_miss_user_pseries: |
355 | std r10,PACA_EXGEN+EX_R10(r13) | |
356 | std r11,PACA_EXGEN+EX_R11(r13) | |
357 | std r12,PACA_EXGEN+EX_R12(r13) | |
358 | mfspr r10,SPRG1 | |
359 | ld r11,PACA_EXSLB+EX_R9(r13) | |
360 | ld r12,PACA_EXSLB+EX_R3(r13) | |
361 | std r10,PACA_EXGEN+EX_R13(r13) | |
362 | std r11,PACA_EXGEN+EX_R9(r13) | |
363 | std r12,PACA_EXGEN+EX_R3(r13) | |
364 | clrrdi r12,r13,32 | |
365 | mfmsr r10 | |
366 | mfspr r11,SRR0 /* save SRR0 */ | |
367 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | |
368 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | |
369 | mtspr SRR0,r12 | |
370 | mfspr r12,SRR1 /* and SRR1 */ | |
371 | mtspr SRR1,r10 | |
372 | rfid | |
373 | b . /* prevent spec. execution */ | |
374 | #endif /* __DISABLED__ */ | |
375 | ||
9a955167 PM |
376 | .align 7 |
377 | .globl __end_interrupts | |
378 | __end_interrupts: | |
379 | ||
14cf11af | 380 | /* |
9a955167 PM |
381 | * Code from here down to __end_handlers is invoked from the |
382 | * exception prologs above. | |
14cf11af | 383 | */ |
9e4859ef | 384 | |
14cf11af PM |
385 | /*** Common interrupt handlers ***/ |
386 | ||
387 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | |
388 | ||
389 | /* | |
390 | * Machine check is different because we use a different | |
391 | * save area: PACA_EXMC instead of PACA_EXGEN. | |
392 | */ | |
393 | .align 7 | |
394 | .globl machine_check_common | |
395 | machine_check_common: | |
396 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | |
f39224a8 | 397 | FINISH_NAP |
14cf11af PM |
398 | DISABLE_INTS |
399 | bl .save_nvgprs | |
400 | addi r3,r1,STACK_FRAME_OVERHEAD | |
401 | bl .machine_check_exception | |
402 | b .ret_from_except | |
403 | ||
404 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | |
405 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | |
406 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | |
407 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | |
408 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | |
f39224a8 | 409 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) |
14cf11af PM |
410 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
411 | #ifdef CONFIG_ALTIVEC | |
412 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | |
413 | #else | |
414 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | |
415 | #endif | |
acf7d768 BH |
416 | #ifdef CONFIG_CBE_RAS |
417 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | |
418 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | |
419 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | |
420 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af PM |
421 | |
422 | /* | |
423 | * Here we have detected that the kernel stack pointer is bad. | |
424 | * R9 contains the saved CR, r13 points to the paca, | |
425 | * r10 contains the (bad) kernel stack pointer, | |
426 | * r11 and r12 contain the saved SRR0 and SRR1. | |
427 | * We switch to using an emergency stack, save the registers there, | |
428 | * and call kernel_bad_stack(), which panics. | |
429 | */ | |
430 | bad_stack: | |
431 | ld r1,PACAEMERGSP(r13) | |
432 | subi r1,r1,64+INT_FRAME_SIZE | |
433 | std r9,_CCR(r1) | |
434 | std r10,GPR1(r1) | |
435 | std r11,_NIP(r1) | |
436 | std r12,_MSR(r1) | |
b5bbeb23 PM |
437 | mfspr r11,SPRN_DAR |
438 | mfspr r12,SPRN_DSISR | |
14cf11af PM |
439 | std r11,_DAR(r1) |
440 | std r12,_DSISR(r1) | |
441 | mflr r10 | |
442 | mfctr r11 | |
443 | mfxer r12 | |
444 | std r10,_LINK(r1) | |
445 | std r11,_CTR(r1) | |
446 | std r12,_XER(r1) | |
447 | SAVE_GPR(0,r1) | |
448 | SAVE_GPR(2,r1) | |
449 | SAVE_4GPRS(3,r1) | |
450 | SAVE_2GPRS(7,r1) | |
451 | SAVE_10GPRS(12,r1) | |
452 | SAVE_10GPRS(22,r1) | |
68730401 OJ |
453 | lhz r12,PACA_TRAP_SAVE(r13) |
454 | std r12,_TRAP(r1) | |
14cf11af PM |
455 | addi r11,r1,INT_FRAME_SIZE |
456 | std r11,0(r1) | |
457 | li r12,0 | |
458 | std r12,0(r11) | |
459 | ld r2,PACATOC(r13) | |
460 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
461 | bl .kernel_bad_stack | |
462 | b 1b | |
463 | ||
14cf11af PM |
464 | /* |
465 | * Here r13 points to the paca, r9 contains the saved CR, | |
466 | * SRR0 and SRR1 are saved in r11 and r12, | |
467 | * r9 - r13 are saved in paca->exgen. | |
468 | */ | |
469 | .align 7 | |
470 | .globl data_access_common | |
471 | data_access_common: | |
b5bbeb23 | 472 | mfspr r10,SPRN_DAR |
14cf11af | 473 | std r10,PACA_EXGEN+EX_DAR(r13) |
b5bbeb23 | 474 | mfspr r10,SPRN_DSISR |
14cf11af PM |
475 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
476 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | |
477 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
478 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
479 | li r5,0x300 | |
480 | b .do_hash_page /* Try to handle as hpte fault */ | |
481 | ||
482 | .align 7 | |
483 | .globl instruction_access_common | |
484 | instruction_access_common: | |
485 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | |
486 | ld r3,_NIP(r1) | |
487 | andis. r4,r12,0x5820 | |
488 | li r5,0x400 | |
489 | b .do_hash_page /* Try to handle as hpte fault */ | |
490 | ||
3c726f8d BH |
491 | /* |
492 | * Here is the common SLB miss user that is used when going to virtual | |
493 | * mode for SLB misses, that is currently not used | |
494 | */ | |
495 | #ifdef __DISABLED__ | |
496 | .align 7 | |
497 | .globl slb_miss_user_common | |
498 | slb_miss_user_common: | |
499 | mflr r10 | |
500 | std r3,PACA_EXGEN+EX_DAR(r13) | |
501 | stw r9,PACA_EXGEN+EX_CCR(r13) | |
502 | std r10,PACA_EXGEN+EX_LR(r13) | |
503 | std r11,PACA_EXGEN+EX_SRR0(r13) | |
504 | bl .slb_allocate_user | |
505 | ||
506 | ld r10,PACA_EXGEN+EX_LR(r13) | |
507 | ld r3,PACA_EXGEN+EX_R3(r13) | |
508 | lwz r9,PACA_EXGEN+EX_CCR(r13) | |
509 | ld r11,PACA_EXGEN+EX_SRR0(r13) | |
510 | mtlr r10 | |
511 | beq- slb_miss_fault | |
512 | ||
513 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
514 | beq- unrecov_user_slb | |
515 | mfmsr r10 | |
516 | ||
517 | .machine push | |
518 | .machine "power4" | |
519 | mtcrf 0x80,r9 | |
520 | .machine pop | |
521 | ||
522 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | |
523 | mtmsrd r10,1 | |
524 | ||
525 | mtspr SRR0,r11 | |
526 | mtspr SRR1,r12 | |
527 | ||
528 | ld r9,PACA_EXGEN+EX_R9(r13) | |
529 | ld r10,PACA_EXGEN+EX_R10(r13) | |
530 | ld r11,PACA_EXGEN+EX_R11(r13) | |
531 | ld r12,PACA_EXGEN+EX_R12(r13) | |
532 | ld r13,PACA_EXGEN+EX_R13(r13) | |
533 | rfid | |
534 | b . | |
535 | ||
536 | slb_miss_fault: | |
537 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | |
538 | ld r4,PACA_EXGEN+EX_DAR(r13) | |
539 | li r5,0 | |
540 | std r4,_DAR(r1) | |
541 | std r5,_DSISR(r1) | |
3ccfc65c | 542 | b handle_page_fault |
3c726f8d BH |
543 | |
544 | unrecov_user_slb: | |
545 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | |
546 | DISABLE_INTS | |
547 | bl .save_nvgprs | |
548 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
549 | bl .unrecoverable_exception | |
550 | b 1b | |
551 | ||
552 | #endif /* __DISABLED__ */ | |
553 | ||
554 | ||
555 | /* | |
556 | * r13 points to the PACA, r9 contains the saved CR, | |
557 | * r12 contain the saved SRR1, SRR0 is still ready for return | |
558 | * r3 has the faulting address | |
559 | * r9 - r13 are saved in paca->exslb. | |
560 | * r3 is saved in paca->slb_r3 | |
561 | * We assume we aren't going to take any exceptions during this procedure. | |
562 | */ | |
563 | _GLOBAL(slb_miss_realmode) | |
564 | mflr r10 | |
565 | ||
566 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
567 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | |
568 | ||
569 | bl .slb_allocate_realmode | |
570 | ||
571 | /* All done -- return from exception. */ | |
572 | ||
573 | ld r10,PACA_EXSLB+EX_LR(r13) | |
574 | ld r3,PACA_EXSLB+EX_R3(r13) | |
575 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
576 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 577 | BEGIN_FW_FTR_SECTION |
3356bb9f DG |
578 | ld r11,PACALPPACAPTR(r13) |
579 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | |
3f639ee8 | 580 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
581 | #endif /* CONFIG_PPC_ISERIES */ |
582 | ||
583 | mtlr r10 | |
584 | ||
585 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
320787c7 | 586 | beq- 2f |
3c726f8d BH |
587 | |
588 | .machine push | |
589 | .machine "power4" | |
590 | mtcrf 0x80,r9 | |
591 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | |
592 | .machine pop | |
593 | ||
594 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 595 | BEGIN_FW_FTR_SECTION |
3c726f8d BH |
596 | mtspr SPRN_SRR0,r11 |
597 | mtspr SPRN_SRR1,r12 | |
3f639ee8 | 598 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
599 | #endif /* CONFIG_PPC_ISERIES */ |
600 | ld r9,PACA_EXSLB+EX_R9(r13) | |
601 | ld r10,PACA_EXSLB+EX_R10(r13) | |
602 | ld r11,PACA_EXSLB+EX_R11(r13) | |
603 | ld r12,PACA_EXSLB+EX_R12(r13) | |
604 | ld r13,PACA_EXSLB+EX_R13(r13) | |
605 | rfid | |
606 | b . /* prevent speculative execution */ | |
607 | ||
320787c7 PM |
608 | 2: |
609 | #ifdef CONFIG_PPC_ISERIES | |
610 | BEGIN_FW_FTR_SECTION | |
611 | b unrecov_slb | |
612 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |
613 | #endif /* CONFIG_PPC_ISERIES */ | |
614 | mfspr r11,SPRN_SRR0 | |
615 | clrrdi r10,r13,32 | |
616 | LOAD_HANDLER(r10,unrecov_slb) | |
617 | mtspr SPRN_SRR0,r10 | |
618 | mfmsr r10 | |
619 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | |
620 | mtspr SPRN_SRR1,r10 | |
621 | rfid | |
622 | b . | |
623 | ||
3c726f8d BH |
624 | unrecov_slb: |
625 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | |
626 | DISABLE_INTS | |
627 | bl .save_nvgprs | |
628 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
629 | bl .unrecoverable_exception | |
630 | b 1b | |
631 | ||
14cf11af PM |
632 | .align 7 |
633 | .globl hardware_interrupt_common | |
634 | .globl hardware_interrupt_entry | |
635 | hardware_interrupt_common: | |
636 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | |
f39224a8 | 637 | FINISH_NAP |
14cf11af PM |
638 | hardware_interrupt_entry: |
639 | DISABLE_INTS | |
a416561b | 640 | BEGIN_FTR_SECTION |
cb2c9b27 | 641 | bl .ppc64_runlatch_on |
a416561b | 642 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) |
14cf11af PM |
643 | addi r3,r1,STACK_FRAME_OVERHEAD |
644 | bl .do_IRQ | |
645 | b .ret_from_except_lite | |
646 | ||
f39224a8 PM |
647 | #ifdef CONFIG_PPC_970_NAP |
648 | power4_fixup_nap: | |
649 | andc r9,r9,r10 | |
650 | std r9,TI_LOCAL_FLAGS(r11) | |
651 | ld r10,_LINK(r1) /* make idle task do the */ | |
652 | std r10,_NIP(r1) /* equivalent of a blr */ | |
653 | blr | |
654 | #endif | |
655 | ||
14cf11af PM |
656 | .align 7 |
657 | .globl alignment_common | |
658 | alignment_common: | |
b5bbeb23 | 659 | mfspr r10,SPRN_DAR |
14cf11af | 660 | std r10,PACA_EXGEN+EX_DAR(r13) |
b5bbeb23 | 661 | mfspr r10,SPRN_DSISR |
14cf11af PM |
662 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
663 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | |
664 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
665 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
666 | std r3,_DAR(r1) | |
667 | std r4,_DSISR(r1) | |
668 | bl .save_nvgprs | |
669 | addi r3,r1,STACK_FRAME_OVERHEAD | |
670 | ENABLE_INTS | |
671 | bl .alignment_exception | |
672 | b .ret_from_except | |
673 | ||
674 | .align 7 | |
675 | .globl program_check_common | |
676 | program_check_common: | |
677 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | |
678 | bl .save_nvgprs | |
679 | addi r3,r1,STACK_FRAME_OVERHEAD | |
680 | ENABLE_INTS | |
681 | bl .program_check_exception | |
682 | b .ret_from_except | |
683 | ||
684 | .align 7 | |
685 | .globl fp_unavailable_common | |
686 | fp_unavailable_common: | |
687 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | |
3ccfc65c | 688 | bne 1f /* if from user, just load it up */ |
14cf11af PM |
689 | bl .save_nvgprs |
690 | addi r3,r1,STACK_FRAME_OVERHEAD | |
691 | ENABLE_INTS | |
692 | bl .kernel_fp_unavailable_exception | |
693 | BUG_OPCODE | |
6f3d8e69 MN |
694 | 1: bl .load_up_fpu |
695 | b fast_exception_return | |
14cf11af | 696 | |
14cf11af PM |
697 | .align 7 |
698 | .globl altivec_unavailable_common | |
699 | altivec_unavailable_common: | |
700 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | |
701 | #ifdef CONFIG_ALTIVEC | |
702 | BEGIN_FTR_SECTION | |
6f3d8e69 MN |
703 | beq 1f |
704 | bl .load_up_altivec | |
705 | b fast_exception_return | |
706 | 1: | |
14cf11af PM |
707 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
708 | #endif | |
709 | bl .save_nvgprs | |
710 | addi r3,r1,STACK_FRAME_OVERHEAD | |
711 | ENABLE_INTS | |
712 | bl .altivec_unavailable_exception | |
713 | b .ret_from_except | |
714 | ||
9a955167 PM |
715 | .align 7 |
716 | .globl vsx_unavailable_common | |
717 | vsx_unavailable_common: | |
718 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | |
719 | #ifdef CONFIG_VSX | |
720 | BEGIN_FTR_SECTION | |
721 | bne .load_up_vsx | |
722 | 1: | |
723 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
724 | #endif | |
725 | bl .save_nvgprs | |
726 | addi r3,r1,STACK_FRAME_OVERHEAD | |
727 | ENABLE_INTS | |
728 | bl .vsx_unavailable_exception | |
729 | b .ret_from_except | |
730 | ||
731 | .align 7 | |
732 | .globl __end_handlers | |
733 | __end_handlers: | |
734 | ||
735 | /* | |
736 | * Return from an exception with minimal checks. | |
737 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | |
738 | * If interrupts have been enabled, or anything has been | |
739 | * done that might have changed the scheduling status of | |
740 | * any task or sent any task a signal, you should use | |
741 | * ret_from_except or ret_from_except_lite instead of this. | |
742 | */ | |
743 | fast_exc_return_irq: /* restores irq state too */ | |
744 | ld r3,SOFTE(r1) | |
745 | TRACE_AND_RESTORE_IRQ(r3); | |
746 | ld r12,_MSR(r1) | |
747 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | |
748 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | |
749 | b 1f | |
750 | ||
751 | .globl fast_exception_return | |
752 | fast_exception_return: | |
753 | ld r12,_MSR(r1) | |
754 | 1: ld r11,_NIP(r1) | |
755 | andi. r3,r12,MSR_RI /* check if RI is set */ | |
756 | beq- unrecov_fer | |
757 | ||
758 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
759 | andi. r3,r12,MSR_PR | |
760 | beq 2f | |
761 | ACCOUNT_CPU_USER_EXIT(r3, r4) | |
762 | 2: | |
763 | #endif | |
764 | ||
765 | ld r3,_CCR(r1) | |
766 | ld r4,_LINK(r1) | |
767 | ld r5,_CTR(r1) | |
768 | ld r6,_XER(r1) | |
769 | mtcr r3 | |
770 | mtlr r4 | |
771 | mtctr r5 | |
772 | mtxer r6 | |
773 | REST_GPR(0, r1) | |
774 | REST_8GPRS(2, r1) | |
775 | ||
776 | mfmsr r10 | |
777 | rldicl r10,r10,48,1 /* clear EE */ | |
778 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | |
779 | mtmsrd r10,1 | |
780 | ||
781 | mtspr SPRN_SRR1,r12 | |
782 | mtspr SPRN_SRR0,r11 | |
783 | REST_4GPRS(10, r1) | |
784 | ld r1,GPR1(r1) | |
785 | rfid | |
786 | b . /* prevent speculative execution */ | |
787 | ||
788 | unrecov_fer: | |
789 | bl .save_nvgprs | |
790 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
791 | bl .unrecoverable_exception | |
792 | b 1b | |
793 | ||
14cf11af PM |
794 | #ifdef CONFIG_ALTIVEC |
795 | /* | |
796 | * load_up_altivec(unused, unused, tsk) | |
797 | * Disable VMX for the task which had it previously, | |
798 | * and save its vector registers in its thread_struct. | |
799 | * Enables the VMX for use in the kernel on return. | |
800 | * On SMP we know the VMX is free, since we give it up every | |
801 | * switch (ie, no lazy save of the vector registers). | |
802 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | |
803 | */ | |
804 | _STATIC(load_up_altivec) | |
805 | mfmsr r5 /* grab the current MSR */ | |
806 | oris r5,r5,MSR_VEC@h | |
807 | mtmsrd r5 /* enable use of VMX now */ | |
808 | isync | |
809 | ||
810 | /* | |
811 | * For SMP, we don't do lazy VMX switching because it just gets too | |
812 | * horrendously complex, especially when a task switches from one CPU | |
813 | * to another. Instead we call giveup_altvec in switch_to. | |
814 | * VRSAVE isn't dealt with here, that is done in the normal context | |
815 | * switch code. Note that we could rely on vrsave value to eventually | |
816 | * avoid saving all of the VREGs here... | |
817 | */ | |
818 | #ifndef CONFIG_SMP | |
819 | ld r3,last_task_used_altivec@got(r2) | |
820 | ld r4,0(r3) | |
821 | cmpdi 0,r4,0 | |
822 | beq 1f | |
823 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | |
824 | addi r4,r4,THREAD | |
825 | SAVE_32VRS(0,r5,r4) | |
826 | mfvscr vr0 | |
827 | li r10,THREAD_VSCR | |
828 | stvx vr0,r10,r4 | |
829 | /* Disable VMX for last_task_used_altivec */ | |
830 | ld r5,PT_REGS(r4) | |
831 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
832 | lis r6,MSR_VEC@h | |
833 | andc r4,r4,r6 | |
834 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
835 | 1: | |
836 | #endif /* CONFIG_SMP */ | |
837 | /* Hack: if we get an altivec unavailable trap with VRSAVE | |
838 | * set to all zeros, we assume this is a broken application | |
839 | * that fails to set it properly, and thus we switch it to | |
840 | * all 1's | |
841 | */ | |
842 | mfspr r4,SPRN_VRSAVE | |
843 | cmpdi 0,r4,0 | |
844 | bne+ 1f | |
845 | li r4,-1 | |
846 | mtspr SPRN_VRSAVE,r4 | |
847 | 1: | |
848 | /* enable use of VMX after return */ | |
849 | ld r4,PACACURRENT(r13) | |
850 | addi r5,r4,THREAD /* Get THREAD */ | |
851 | oris r12,r12,MSR_VEC@h | |
852 | std r12,_MSR(r1) | |
853 | li r4,1 | |
854 | li r10,THREAD_VSCR | |
855 | stw r4,THREAD_USED_VR(r5) | |
856 | lvx vr0,r10,r5 | |
857 | mtvscr vr0 | |
858 | REST_32VRS(0,r4,r5) | |
859 | #ifndef CONFIG_SMP | |
860 | /* Update last_task_used_math to 'current' */ | |
861 | subi r4,r5,THREAD /* Back to 'current' */ | |
862 | std r4,0(r3) | |
863 | #endif /* CONFIG_SMP */ | |
864 | /* restore registers and return */ | |
6f3d8e69 | 865 | blr |
14cf11af PM |
866 | #endif /* CONFIG_ALTIVEC */ |
867 | ||
ce48b210 MN |
868 | #ifdef CONFIG_VSX |
869 | /* | |
870 | * load_up_vsx(unused, unused, tsk) | |
871 | * Disable VSX for the task which had it previously, | |
872 | * and save its vector registers in its thread_struct. | |
873 | * Reuse the fp and vsx saves, but first check to see if they have | |
874 | * been saved already. | |
875 | * On entry: r13 == 'current' && last_task_used_vsx != 'current' | |
876 | */ | |
877 | _STATIC(load_up_vsx) | |
878 | /* Load FP and VSX registers if they haven't been done yet */ | |
879 | andi. r5,r12,MSR_FP | |
880 | beql+ load_up_fpu /* skip if already loaded */ | |
881 | andis. r5,r12,MSR_VEC@h | |
882 | beql+ load_up_altivec /* skip if already loaded */ | |
883 | ||
884 | #ifndef CONFIG_SMP | |
885 | ld r3,last_task_used_vsx@got(r2) | |
886 | ld r4,0(r3) | |
887 | cmpdi 0,r4,0 | |
888 | beq 1f | |
889 | /* Disable VSX for last_task_used_vsx */ | |
890 | addi r4,r4,THREAD | |
891 | ld r5,PT_REGS(r4) | |
892 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
893 | lis r6,MSR_VSX@h | |
894 | andc r6,r4,r6 | |
895 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | |
896 | 1: | |
897 | #endif /* CONFIG_SMP */ | |
898 | ld r4,PACACURRENT(r13) | |
899 | addi r4,r4,THREAD /* Get THREAD */ | |
900 | li r6,1 | |
901 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | |
902 | /* enable use of VSX after return */ | |
903 | oris r12,r12,MSR_VSX@h | |
904 | std r12,_MSR(r1) | |
905 | #ifndef CONFIG_SMP | |
906 | /* Update last_task_used_math to 'current' */ | |
907 | ld r4,PACACURRENT(r13) | |
908 | std r4,0(r3) | |
909 | #endif /* CONFIG_SMP */ | |
910 | b fast_exception_return | |
911 | #endif /* CONFIG_VSX */ | |
912 | ||
14cf11af PM |
913 | /* |
914 | * Hash table stuff | |
915 | */ | |
916 | .align 7 | |
945feb17 | 917 | _STATIC(do_hash_page) |
14cf11af PM |
918 | std r3,_DAR(r1) |
919 | std r4,_DSISR(r1) | |
920 | ||
921 | andis. r0,r4,0xa450 /* weird error? */ | |
3ccfc65c | 922 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
14cf11af PM |
923 | BEGIN_FTR_SECTION |
924 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | |
3ccfc65c | 925 | bne- do_ste_alloc /* If so handle it */ |
14cf11af PM |
926 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
927 | ||
945feb17 BH |
928 | /* |
929 | * On iSeries, we soft-disable interrupts here, then | |
930 | * hard-enable interrupts so that the hash_page code can spin on | |
931 | * the hash_table_lock without problems on a shared processor. | |
932 | */ | |
933 | DISABLE_INTS | |
934 | ||
935 | /* | |
936 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | |
937 | * and will clobber volatile registers when irq tracing is enabled | |
938 | * so we need to reload them. It may be possible to be smarter here | |
939 | * and move the irq tracing elsewhere but let's keep it simple for | |
940 | * now | |
941 | */ | |
942 | #ifdef CONFIG_TRACE_IRQFLAGS | |
943 | ld r3,_DAR(r1) | |
944 | ld r4,_DSISR(r1) | |
945 | ld r5,_TRAP(r1) | |
946 | ld r12,_MSR(r1) | |
947 | clrrdi r5,r5,4 | |
948 | #endif /* CONFIG_TRACE_IRQFLAGS */ | |
14cf11af PM |
949 | /* |
950 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | |
951 | * accessing a userspace segment (even from the kernel). We assume | |
952 | * kernel addresses always have the high bit set. | |
953 | */ | |
954 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | |
955 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | |
956 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | |
957 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | |
958 | ori r4,r4,1 /* add _PAGE_PRESENT */ | |
959 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | |
960 | ||
14cf11af PM |
961 | /* |
962 | * r3 contains the faulting address | |
963 | * r4 contains the required access permissions | |
964 | * r5 contains the trap number | |
965 | * | |
966 | * at return r3 = 0 for success | |
967 | */ | |
968 | bl .hash_page /* build HPTE if possible */ | |
969 | cmpdi r3,0 /* see if hash_page succeeded */ | |
970 | ||
3f639ee8 | 971 | BEGIN_FW_FTR_SECTION |
14cf11af PM |
972 | /* |
973 | * If we had interrupts soft-enabled at the point where the | |
974 | * DSI/ISI occurred, and an interrupt came in during hash_page, | |
975 | * handle it now. | |
976 | * We jump to ret_from_except_lite rather than fast_exception_return | |
977 | * because ret_from_except_lite will check for and handle pending | |
978 | * interrupts if necessary. | |
979 | */ | |
3ccfc65c | 980 | beq 13f |
b0a779de | 981 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
945feb17 | 982 | |
b0a779de PM |
983 | BEGIN_FW_FTR_SECTION |
984 | /* | |
985 | * Here we have interrupts hard-disabled, so it is sufficient | |
986 | * to restore paca->{soft,hard}_enable and get out. | |
987 | */ | |
988 | beq fast_exc_return_irq /* Return from exception on success */ | |
989 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
990 | ||
14cf11af PM |
991 | /* For a hash failure, we don't bother re-enabling interrupts */ |
992 | ble- 12f | |
993 | ||
994 | /* | |
995 | * hash_page couldn't handle it, set soft interrupt enable back | |
945feb17 | 996 | * to what it was before the trap. Note that .raw_local_irq_restore |
14cf11af PM |
997 | * handles any interrupts pending at this point. |
998 | */ | |
999 | ld r3,SOFTE(r1) | |
945feb17 BH |
1000 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
1001 | bl .raw_local_irq_restore | |
14cf11af | 1002 | b 11f |
14cf11af PM |
1003 | |
1004 | /* Here we have a page fault that hash_page can't handle. */ | |
3ccfc65c | 1005 | handle_page_fault: |
14cf11af PM |
1006 | ENABLE_INTS |
1007 | 11: ld r4,_DAR(r1) | |
1008 | ld r5,_DSISR(r1) | |
1009 | addi r3,r1,STACK_FRAME_OVERHEAD | |
1010 | bl .do_page_fault | |
1011 | cmpdi r3,0 | |
3ccfc65c | 1012 | beq+ 13f |
14cf11af PM |
1013 | bl .save_nvgprs |
1014 | mr r5,r3 | |
1015 | addi r3,r1,STACK_FRAME_OVERHEAD | |
1016 | lwz r4,_DAR(r1) | |
1017 | bl .bad_page_fault | |
1018 | b .ret_from_except | |
1019 | ||
79acbb3f PM |
1020 | 13: b .ret_from_except_lite |
1021 | ||
14cf11af PM |
1022 | /* We have a page fault that hash_page could handle but HV refused |
1023 | * the PTE insertion | |
1024 | */ | |
1025 | 12: bl .save_nvgprs | |
fa28237c | 1026 | mr r5,r3 |
14cf11af | 1027 | addi r3,r1,STACK_FRAME_OVERHEAD |
a792e75d | 1028 | ld r4,_DAR(r1) |
14cf11af PM |
1029 | bl .low_hash_fault |
1030 | b .ret_from_except | |
1031 | ||
1032 | /* here we have a segment miss */ | |
3ccfc65c | 1033 | do_ste_alloc: |
14cf11af PM |
1034 | bl .ste_allocate /* try to insert stab entry */ |
1035 | cmpdi r3,0 | |
3ccfc65c PM |
1036 | bne- handle_page_fault |
1037 | b fast_exception_return | |
14cf11af PM |
1038 | |
1039 | /* | |
1040 | * r13 points to the PACA, r9 contains the saved CR, | |
1041 | * r11 and r12 contain the saved SRR0 and SRR1. | |
1042 | * r9 - r13 are saved in paca->exslb. | |
1043 | * We assume we aren't going to take any exceptions during this procedure. | |
1044 | * We assume (DAR >> 60) == 0xc. | |
1045 | */ | |
1046 | .align 7 | |
1047 | _GLOBAL(do_stab_bolted) | |
1048 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
1049 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | |
1050 | ||
1051 | /* Hash to the primary group */ | |
1052 | ld r10,PACASTABVIRT(r13) | |
b5bbeb23 | 1053 | mfspr r11,SPRN_DAR |
14cf11af PM |
1054 | srdi r11,r11,28 |
1055 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | |
1056 | ||
1057 | /* Calculate VSID */ | |
1058 | /* This is a kernel address, so protovsid = ESID */ | |
1189be65 | 1059 | ASM_VSID_SCRAMBLE(r11, r9, 256M) |
14cf11af PM |
1060 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ |
1061 | ||
1062 | /* Search the primary group for a free entry */ | |
1063 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | |
1064 | andi. r11,r11,0x80 | |
1065 | beq 2f | |
1066 | addi r10,r10,16 | |
1067 | andi. r11,r10,0x70 | |
1068 | bne 1b | |
1069 | ||
1070 | /* Stick for only searching the primary group for now. */ | |
1071 | /* At least for now, we use a very simple random castout scheme */ | |
1072 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | |
1073 | mftb r11 | |
1074 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | |
1075 | ori r11,r11,0x10 | |
1076 | ||
1077 | /* r10 currently points to an ste one past the group of interest */ | |
1078 | /* make it point to the randomly selected entry */ | |
1079 | subi r10,r10,128 | |
1080 | or r10,r10,r11 /* r10 is the entry to invalidate */ | |
1081 | ||
1082 | isync /* mark the entry invalid */ | |
1083 | ld r11,0(r10) | |
1084 | rldicl r11,r11,56,1 /* clear the valid bit */ | |
1085 | rotldi r11,r11,8 | |
1086 | std r11,0(r10) | |
1087 | sync | |
1088 | ||
1089 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | |
1090 | slbie r11 | |
1091 | ||
1092 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | |
1093 | eieio | |
1094 | ||
b5bbeb23 | 1095 | mfspr r11,SPRN_DAR /* Get the new esid */ |
14cf11af PM |
1096 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ |
1097 | ori r11,r11,0x90 /* Turn on valid and kp */ | |
1098 | std r11,0(r10) /* Put new entry back into the stab */ | |
1099 | ||
1100 | sync | |
1101 | ||
1102 | /* All done -- return from exception. */ | |
1103 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
1104 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | |
1105 | ||
1106 | andi. r10,r12,MSR_RI | |
1107 | beq- unrecov_slb | |
1108 | ||
1109 | mtcrf 0x80,r9 /* restore CR */ | |
1110 | ||
1111 | mfmsr r10 | |
1112 | clrrdi r10,r10,2 | |
1113 | mtmsrd r10,1 | |
1114 | ||
b5bbeb23 PM |
1115 | mtspr SPRN_SRR0,r11 |
1116 | mtspr SPRN_SRR1,r12 | |
14cf11af PM |
1117 | ld r9,PACA_EXSLB+EX_R9(r13) |
1118 | ld r10,PACA_EXSLB+EX_R10(r13) | |
1119 | ld r11,PACA_EXSLB+EX_R11(r13) | |
1120 | ld r12,PACA_EXSLB+EX_R12(r13) | |
1121 | ld r13,PACA_EXSLB+EX_R13(r13) | |
1122 | rfid | |
1123 | b . /* prevent speculative execution */ | |
1124 | ||
14cf11af PM |
1125 | /* |
1126 | * Space for CPU0's segment table. | |
1127 | * | |
1128 | * On iSeries, the hypervisor must fill in at least one entry before | |
16a15a30 SR |
1129 | * we get control (with relocate on). The address is given to the hv |
1130 | * as a page number (see xLparMap below), so this must be at a | |
14cf11af PM |
1131 | * fixed address (the linker can't compute (u64)&initial_stab >> |
1132 | * PAGE_SHIFT). | |
1133 | */ | |
758438a7 | 1134 | . = STAB0_OFFSET /* 0x6000 */ |
14cf11af PM |
1135 | .globl initial_stab |
1136 | initial_stab: | |
1137 | .space 4096 | |
1138 | ||
9e4859ef | 1139 | #ifdef CONFIG_PPC_PSERIES |
14cf11af PM |
1140 | /* |
1141 | * Data area reserved for FWNMI option. | |
1142 | * This address (0x7000) is fixed by the RPA. | |
1143 | */ | |
1144 | .= 0x7000 | |
1145 | .globl fwnmi_data_area | |
1146 | fwnmi_data_area: | |
9e4859ef | 1147 | #endif /* CONFIG_PPC_PSERIES */ |
14cf11af PM |
1148 | |
1149 | /* iSeries does not use the FWNMI stuff, so it is safe to put | |
1150 | * this here, even if we later allow kernels that will boot on | |
1151 | * both pSeries and iSeries */ | |
1152 | #ifdef CONFIG_PPC_ISERIES | |
1153 | . = LPARMAP_PHYS | |
16a15a30 SR |
1154 | .globl xLparMap |
1155 | xLparMap: | |
1156 | .quad HvEsidsToMap /* xNumberEsids */ | |
1157 | .quad HvRangesToMap /* xNumberRanges */ | |
1158 | .quad STAB0_PAGE /* xSegmentTableOffs */ | |
1159 | .zero 40 /* xRsvd */ | |
1160 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | |
1161 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | |
1162 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | |
1163 | .quad VMALLOC_START_ESID /* xKernelEsid */ | |
1164 | .quad VMALLOC_START_VSID /* xKernelVsid */ | |
1165 | /* xRanges (HvRangesToMap entries of 3 quads) */ | |
1166 | .quad HvPagesToMap /* xPages */ | |
1167 | .quad 0 /* xOffset */ | |
1168 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | |
1169 | ||
14cf11af PM |
1170 | #endif /* CONFIG_PPC_ISERIES */ |
1171 | ||
9e4859ef | 1172 | #ifdef CONFIG_PPC_PSERIES |
14cf11af | 1173 | . = 0x8000 |
9e4859ef | 1174 | #endif /* CONFIG_PPC_PSERIES */ |
14cf11af PM |
1175 | |
1176 | /* | |
f39b7a55 OJ |
1177 | * On pSeries and most other platforms, secondary processors spin |
1178 | * in the following code. | |
14cf11af PM |
1179 | * At entry, r3 = this processor's number (physical cpu id) |
1180 | */ | |
f39b7a55 | 1181 | _GLOBAL(generic_secondary_smp_init) |
14cf11af PM |
1182 | mr r24,r3 |
1183 | ||
1184 | /* turn on 64-bit mode */ | |
1185 | bl .enable_64b_mode | |
14cf11af | 1186 | |
14cf11af PM |
1187 | /* Set up a paca value for this processor. Since we have the |
1188 | * physical cpu id in r24, we need to search the pacas to find | |
1189 | * which logical id maps to our physical one. | |
1190 | */ | |
e58c3495 | 1191 | LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ |
14cf11af PM |
1192 | li r5,0 /* logical cpu id */ |
1193 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | |
1194 | cmpw r6,r24 /* Compare to our id */ | |
1195 | beq 2f | |
1196 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | |
1197 | addi r5,r5,1 | |
1198 | cmpwi r5,NR_CPUS | |
1199 | blt 1b | |
1200 | ||
1201 | mr r3,r24 /* not found, copy phys to r3 */ | |
1202 | b .kexec_wait /* next kernel might do better */ | |
1203 | ||
b5bbeb23 | 1204 | 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
1205 | /* From now on, r24 is expected to be logical cpuid */ |
1206 | mr r24,r5 | |
1207 | 3: HMT_LOW | |
1208 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | |
1209 | /* start. */ | |
14cf11af | 1210 | |
f39b7a55 OJ |
1211 | #ifndef CONFIG_SMP |
1212 | b 3b /* Never go on non-SMP */ | |
1213 | #else | |
1214 | cmpwi 0,r23,0 | |
1215 | beq 3b /* Loop until told to go */ | |
1216 | ||
b6f6b98a SR |
1217 | sync /* order paca.run and cur_cpu_spec */ |
1218 | ||
f39b7a55 OJ |
1219 | /* See if we need to call a cpu state restore handler */ |
1220 | LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) | |
1221 | ld r23,0(r23) | |
1222 | ld r23,CPU_SPEC_RESTORE(r23) | |
1223 | cmpdi 0,r23,0 | |
1224 | beq 4f | |
1225 | ld r23,0(r23) | |
1226 | mtctr r23 | |
1227 | bctrl | |
1228 | ||
1229 | 4: /* Create a temp kernel stack for use before relocation is on. */ | |
14cf11af PM |
1230 | ld r1,PACAEMERGSP(r13) |
1231 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1232 | ||
c705677e | 1233 | b __secondary_start |
14cf11af | 1234 | #endif |
14cf11af | 1235 | |
14cf11af PM |
1236 | _STATIC(__mmu_off) |
1237 | mfmsr r3 | |
1238 | andi. r0,r3,MSR_IR|MSR_DR | |
1239 | beqlr | |
1240 | andc r3,r3,r0 | |
1241 | mtspr SPRN_SRR0,r4 | |
1242 | mtspr SPRN_SRR1,r3 | |
1243 | sync | |
1244 | rfid | |
1245 | b . /* prevent speculative execution */ | |
1246 | ||
1247 | ||
1248 | /* | |
1249 | * Here is our main kernel entry point. We support currently 2 kind of entries | |
1250 | * depending on the value of r5. | |
1251 | * | |
1252 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | |
1253 | * in r3...r7 | |
1254 | * | |
1255 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | |
1256 | * DT block, r4 is a physical pointer to the kernel itself | |
1257 | * | |
1258 | */ | |
1259 | _GLOBAL(__start_initialization_multiplatform) | |
1260 | /* | |
1261 | * Are we booted from a PROM Of-type client-interface ? | |
1262 | */ | |
1263 | cmpldi cr0,r5,0 | |
939e60f6 SR |
1264 | beq 1f |
1265 | b .__boot_from_prom /* yes -> prom */ | |
1266 | 1: | |
14cf11af PM |
1267 | /* Save parameters */ |
1268 | mr r31,r3 | |
1269 | mr r30,r4 | |
1270 | ||
1271 | /* Make sure we are running in 64 bits mode */ | |
1272 | bl .enable_64b_mode | |
1273 | ||
1274 | /* Setup some critical 970 SPRs before switching MMU off */ | |
f39b7a55 OJ |
1275 | mfspr r0,SPRN_PVR |
1276 | srwi r0,r0,16 | |
1277 | cmpwi r0,0x39 /* 970 */ | |
1278 | beq 1f | |
1279 | cmpwi r0,0x3c /* 970FX */ | |
1280 | beq 1f | |
1281 | cmpwi r0,0x44 /* 970MP */ | |
190a24f5 OJ |
1282 | beq 1f |
1283 | cmpwi r0,0x45 /* 970GX */ | |
f39b7a55 OJ |
1284 | bne 2f |
1285 | 1: bl .__cpu_preinit_ppc970 | |
1286 | 2: | |
14cf11af | 1287 | |
14cf11af | 1288 | /* Switch off MMU if not already */ |
e58c3495 | 1289 | LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) |
14cf11af PM |
1290 | add r4,r4,r30 |
1291 | bl .__mmu_off | |
1292 | b .__after_prom_start | |
1293 | ||
939e60f6 | 1294 | _INIT_STATIC(__boot_from_prom) |
14cf11af PM |
1295 | /* Save parameters */ |
1296 | mr r31,r3 | |
1297 | mr r30,r4 | |
1298 | mr r29,r5 | |
1299 | mr r28,r6 | |
1300 | mr r27,r7 | |
1301 | ||
6088857b OH |
1302 | /* |
1303 | * Align the stack to 16-byte boundary | |
1304 | * Depending on the size and layout of the ELF sections in the initial | |
1305 | * boot binary, the stack pointer will be unalignet on PowerMac | |
1306 | */ | |
c05b4770 LT |
1307 | rldicr r1,r1,0,59 |
1308 | ||
14cf11af PM |
1309 | /* Make sure we are running in 64 bits mode */ |
1310 | bl .enable_64b_mode | |
1311 | ||
1312 | /* put a relocation offset into r3 */ | |
1313 | bl .reloc_offset | |
1314 | ||
e58c3495 | 1315 | LOAD_REG_IMMEDIATE(r2,__toc_start) |
14cf11af PM |
1316 | addi r2,r2,0x4000 |
1317 | addi r2,r2,0x4000 | |
1318 | ||
1319 | /* Relocate the TOC from a virt addr to a real addr */ | |
5a408329 | 1320 | add r2,r2,r3 |
14cf11af PM |
1321 | |
1322 | /* Restore parameters */ | |
1323 | mr r3,r31 | |
1324 | mr r4,r30 | |
1325 | mr r5,r29 | |
1326 | mr r6,r28 | |
1327 | mr r7,r27 | |
1328 | ||
1329 | /* Do all of the interaction with OF client interface */ | |
1330 | bl .prom_init | |
1331 | /* We never return */ | |
1332 | trap | |
1333 | ||
14cf11af PM |
1334 | _STATIC(__after_prom_start) |
1335 | ||
1336 | /* | |
758438a7 | 1337 | * We need to run with __start at physical address PHYSICAL_START. |
14cf11af PM |
1338 | * This will leave some code in the first 256B of |
1339 | * real memory, which are reserved for software use. | |
1340 | * The remainder of the first page is loaded with the fixed | |
1341 | * interrupt vectors. The next two pages are filled with | |
1342 | * unknown exception placeholders. | |
1343 | * | |
1344 | * Note: This process overwrites the OF exception vectors. | |
1345 | * r26 == relocation offset | |
1346 | * r27 == KERNELBASE | |
1347 | */ | |
1348 | bl .reloc_offset | |
1349 | mr r26,r3 | |
e58c3495 | 1350 | LOAD_REG_IMMEDIATE(r27, KERNELBASE) |
14cf11af | 1351 | |
e58c3495 | 1352 | LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ |
14cf11af PM |
1353 | |
1354 | // XXX FIXME: Use phys returned by OF (r30) | |
5a408329 | 1355 | add r4,r27,r26 /* source addr */ |
14cf11af PM |
1356 | /* current address of _start */ |
1357 | /* i.e. where we are running */ | |
1358 | /* the source addr */ | |
1359 | ||
d0b79c54 | 1360 | cmpdi r4,0 /* In some cases the loader may */ |
939e60f6 SR |
1361 | bne 1f |
1362 | b .start_here_multiplatform /* have already put us at zero */ | |
d0b79c54 | 1363 | /* so we can skip the copy. */ |
939e60f6 | 1364 | 1: LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ |
14cf11af PM |
1365 | sub r5,r5,r27 |
1366 | ||
1367 | li r6,0x100 /* Start offset, the first 0x100 */ | |
1368 | /* bytes were copied earlier. */ | |
1369 | ||
1370 | bl .copy_and_flush /* copy the first n bytes */ | |
1371 | /* this includes the code being */ | |
1372 | /* executed here. */ | |
1373 | ||
e58c3495 | 1374 | LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ |
14cf11af PM |
1375 | mtctr r0 /* that we just made/relocated */ |
1376 | bctr | |
1377 | ||
e58c3495 | 1378 | 4: LOAD_REG_IMMEDIATE(r5,klimit) |
5a408329 | 1379 | add r5,r5,r26 |
14cf11af PM |
1380 | ld r5,0(r5) /* get the value of klimit */ |
1381 | sub r5,r5,r27 | |
1382 | bl .copy_and_flush /* copy the rest */ | |
1383 | b .start_here_multiplatform | |
1384 | ||
14cf11af PM |
1385 | /* |
1386 | * Copy routine used to copy the kernel to start at physical address 0 | |
1387 | * and flush and invalidate the caches as needed. | |
1388 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | |
1389 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | |
1390 | * | |
1391 | * Note: this routine *only* clobbers r0, r6 and lr | |
1392 | */ | |
1393 | _GLOBAL(copy_and_flush) | |
1394 | addi r5,r5,-8 | |
1395 | addi r6,r6,-8 | |
5a2fe38d | 1396 | 4: li r0,8 /* Use the smallest common */ |
14cf11af PM |
1397 | /* denominator cache line */ |
1398 | /* size. This results in */ | |
1399 | /* extra cache line flushes */ | |
1400 | /* but operation is correct. */ | |
1401 | /* Can't get cache line size */ | |
1402 | /* from NACA as it is being */ | |
1403 | /* moved too. */ | |
1404 | ||
1405 | mtctr r0 /* put # words/line in ctr */ | |
1406 | 3: addi r6,r6,8 /* copy a cache line */ | |
1407 | ldx r0,r6,r4 | |
1408 | stdx r0,r6,r3 | |
1409 | bdnz 3b | |
1410 | dcbst r6,r3 /* write it to memory */ | |
1411 | sync | |
1412 | icbi r6,r3 /* flush the icache line */ | |
1413 | cmpld 0,r6,r5 | |
1414 | blt 4b | |
1415 | sync | |
1416 | addi r5,r5,8 | |
1417 | addi r6,r6,8 | |
1418 | blr | |
1419 | ||
1420 | .align 8 | |
1421 | copy_to_here: | |
1422 | ||
1423 | #ifdef CONFIG_SMP | |
1424 | #ifdef CONFIG_PPC_PMAC | |
1425 | /* | |
1426 | * On PowerMac, secondary processors starts from the reset vector, which | |
1427 | * is temporarily turned into a call to one of the functions below. | |
1428 | */ | |
1429 | .section ".text"; | |
1430 | .align 2 ; | |
1431 | ||
35499c01 PM |
1432 | .globl __secondary_start_pmac_0 |
1433 | __secondary_start_pmac_0: | |
1434 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | |
1435 | li r24,0 | |
1436 | b 1f | |
1437 | li r24,1 | |
1438 | b 1f | |
1439 | li r24,2 | |
1440 | b 1f | |
1441 | li r24,3 | |
1442 | 1: | |
14cf11af PM |
1443 | |
1444 | _GLOBAL(pmac_secondary_start) | |
1445 | /* turn on 64-bit mode */ | |
1446 | bl .enable_64b_mode | |
14cf11af PM |
1447 | |
1448 | /* Copy some CPU settings from CPU 0 */ | |
f39b7a55 | 1449 | bl .__restore_cpu_ppc970 |
14cf11af PM |
1450 | |
1451 | /* pSeries do that early though I don't think we really need it */ | |
1452 | mfmsr r3 | |
1453 | ori r3,r3,MSR_RI | |
1454 | mtmsrd r3 /* RI on */ | |
1455 | ||
1456 | /* Set up a paca value for this processor. */ | |
e58c3495 | 1457 | LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ |
14cf11af PM |
1458 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ |
1459 | add r13,r13,r4 /* for this processor. */ | |
b5bbeb23 | 1460 | mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
1461 | |
1462 | /* Create a temp kernel stack for use before relocation is on. */ | |
1463 | ld r1,PACAEMERGSP(r13) | |
1464 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1465 | ||
c705677e | 1466 | b __secondary_start |
14cf11af PM |
1467 | |
1468 | #endif /* CONFIG_PPC_PMAC */ | |
1469 | ||
1470 | /* | |
1471 | * This function is called after the master CPU has released the | |
1472 | * secondary processors. The execution environment is relocation off. | |
1473 | * The paca for this processor has the following fields initialized at | |
1474 | * this point: | |
1475 | * 1. Processor number | |
1476 | * 2. Segment table pointer (virtual address) | |
1477 | * On entry the following are set: | |
1478 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | |
1479 | * r24 = cpu# (in Linux terms) | |
1480 | * r13 = paca virtual address | |
1481 | * SPRG3 = paca virtual address | |
1482 | */ | |
fc68e869 | 1483 | .globl __secondary_start |
c705677e | 1484 | __secondary_start: |
799d6046 PM |
1485 | /* Set thread priority to MEDIUM */ |
1486 | HMT_MEDIUM | |
14cf11af | 1487 | |
799d6046 | 1488 | /* Load TOC */ |
14cf11af | 1489 | ld r2,PACATOC(r13) |
799d6046 PM |
1490 | |
1491 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ | |
1492 | bl .early_setup_secondary | |
14cf11af PM |
1493 | |
1494 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | |
e58c3495 | 1495 | LOAD_REG_ADDR(r3, current_set) |
14cf11af PM |
1496 | sldi r28,r24,3 /* get current_set[cpu#] */ |
1497 | ldx r1,r3,r28 | |
1498 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
1499 | std r1,PACAKSAVE(r13) | |
1500 | ||
799d6046 | 1501 | /* Clear backchain so we get nice backtraces */ |
14cf11af PM |
1502 | li r7,0 |
1503 | mtlr r7 | |
1504 | ||
1505 | /* enable MMU and jump to start_secondary */ | |
e58c3495 DG |
1506 | LOAD_REG_ADDR(r3, .start_secondary_prolog) |
1507 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | |
d04c56f7 | 1508 | #ifdef CONFIG_PPC_ISERIES |
3f639ee8 | 1509 | BEGIN_FW_FTR_SECTION |
14cf11af | 1510 | ori r4,r4,MSR_EE |
ff3da2e0 BH |
1511 | li r8,1 |
1512 | stb r8,PACAHARDIRQEN(r13) | |
3f639ee8 | 1513 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 1514 | #endif |
d04c56f7 | 1515 | BEGIN_FW_FTR_SECTION |
d04c56f7 PM |
1516 | stb r7,PACAHARDIRQEN(r13) |
1517 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
ff3da2e0 | 1518 | stb r7,PACASOFTIRQEN(r13) |
d04c56f7 | 1519 | |
b5bbeb23 PM |
1520 | mtspr SPRN_SRR0,r3 |
1521 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
1522 | rfid |
1523 | b . /* prevent speculative execution */ | |
1524 | ||
1525 | /* | |
1526 | * Running with relocation on at this point. All we want to do is | |
1527 | * zero the stack back-chain pointer before going into C code. | |
1528 | */ | |
1529 | _GLOBAL(start_secondary_prolog) | |
1530 | li r3,0 | |
1531 | std r3,0(r1) /* Zero the stack frame pointer */ | |
1532 | bl .start_secondary | |
799d6046 | 1533 | b . |
14cf11af PM |
1534 | #endif |
1535 | ||
1536 | /* | |
1537 | * This subroutine clobbers r11 and r12 | |
1538 | */ | |
1539 | _GLOBAL(enable_64b_mode) | |
1540 | mfmsr r11 /* grab the current MSR */ | |
1541 | li r12,1 | |
1542 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | |
1543 | or r11,r11,r12 | |
1544 | li r12,1 | |
1545 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | |
1546 | or r11,r11,r12 | |
1547 | mtmsrd r11 | |
1548 | isync | |
1549 | blr | |
1550 | ||
14cf11af PM |
1551 | /* |
1552 | * This is where the main kernel code starts. | |
1553 | */ | |
939e60f6 | 1554 | _INIT_STATIC(start_here_multiplatform) |
14cf11af PM |
1555 | /* get a new offset, now that the kernel has moved. */ |
1556 | bl .reloc_offset | |
1557 | mr r26,r3 | |
1558 | ||
1559 | /* Clear out the BSS. It may have been done in prom_init, | |
1560 | * already but that's irrelevant since prom_init will soon | |
1561 | * be detached from the kernel completely. Besides, we need | |
1562 | * to clear it now for kexec-style entry. | |
1563 | */ | |
e58c3495 DG |
1564 | LOAD_REG_IMMEDIATE(r11,__bss_stop) |
1565 | LOAD_REG_IMMEDIATE(r8,__bss_start) | |
14cf11af PM |
1566 | sub r11,r11,r8 /* bss size */ |
1567 | addi r11,r11,7 /* round up to an even double word */ | |
1568 | rldicl. r11,r11,61,3 /* shift right by 3 */ | |
1569 | beq 4f | |
1570 | addi r8,r8,-8 | |
1571 | li r0,0 | |
1572 | mtctr r11 /* zero this many doublewords */ | |
1573 | 3: stdu r0,8(r8) | |
1574 | bdnz 3b | |
1575 | 4: | |
1576 | ||
1577 | mfmsr r6 | |
1578 | ori r6,r6,MSR_RI | |
1579 | mtmsrd r6 /* RI on */ | |
1580 | ||
14cf11af PM |
1581 | /* The following gets the stack and TOC set up with the regs */ |
1582 | /* pointing to the real addr of the kernel stack. This is */ | |
1583 | /* all done to support the C function call below which sets */ | |
1584 | /* up the htab. This is done because we have relocated the */ | |
1585 | /* kernel but are still running in real mode. */ | |
1586 | ||
e58c3495 | 1587 | LOAD_REG_IMMEDIATE(r3,init_thread_union) |
5a408329 | 1588 | add r3,r3,r26 |
14cf11af PM |
1589 | |
1590 | /* set up a stack pointer (physical address) */ | |
1591 | addi r1,r3,THREAD_SIZE | |
1592 | li r0,0 | |
1593 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1594 | ||
1595 | /* set up the TOC (physical address) */ | |
e58c3495 | 1596 | LOAD_REG_IMMEDIATE(r2,__toc_start) |
14cf11af PM |
1597 | addi r2,r2,0x4000 |
1598 | addi r2,r2,0x4000 | |
5a408329 | 1599 | add r2,r2,r26 |
14cf11af | 1600 | |
14cf11af PM |
1601 | /* Do very early kernel initializations, including initial hash table, |
1602 | * stab and slb setup before we turn on relocation. */ | |
1603 | ||
1604 | /* Restore parameters passed from prom_init/kexec */ | |
1605 | mr r3,r31 | |
1606 | bl .early_setup | |
1607 | ||
e58c3495 DG |
1608 | LOAD_REG_IMMEDIATE(r3, .start_here_common) |
1609 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | |
b5bbeb23 PM |
1610 | mtspr SPRN_SRR0,r3 |
1611 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
1612 | rfid |
1613 | b . /* prevent speculative execution */ | |
14cf11af PM |
1614 | |
1615 | /* This is where all platforms converge execution */ | |
fc68e869 | 1616 | _INIT_GLOBAL(start_here_common) |
14cf11af PM |
1617 | /* relocation is on at this point */ |
1618 | ||
1619 | /* The following code sets up the SP and TOC now that we are */ | |
1620 | /* running with translation enabled. */ | |
1621 | ||
e58c3495 | 1622 | LOAD_REG_IMMEDIATE(r3,init_thread_union) |
14cf11af PM |
1623 | |
1624 | /* set up the stack */ | |
1625 | addi r1,r3,THREAD_SIZE | |
1626 | li r0,0 | |
1627 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1628 | ||
14cf11af PM |
1629 | /* Load the TOC */ |
1630 | ld r2,PACATOC(r13) | |
1631 | std r1,PACAKSAVE(r13) | |
1632 | ||
1633 | bl .setup_system | |
1634 | ||
1635 | /* Load up the kernel context */ | |
1636 | 5: | |
14cf11af | 1637 | li r5,0 |
d04c56f7 PM |
1638 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ |
1639 | #ifdef CONFIG_PPC_ISERIES | |
1640 | BEGIN_FW_FTR_SECTION | |
14cf11af | 1641 | mfmsr r5 |
ff3da2e0 | 1642 | ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ |
14cf11af | 1643 | mtmsrd r5 |
ff3da2e0 | 1644 | li r5,1 |
3f639ee8 | 1645 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 1646 | #endif |
ff3da2e0 | 1647 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ |
14cf11af | 1648 | |
ff3da2e0 | 1649 | bl .start_kernel |
14cf11af | 1650 | |
f1870f77 AB |
1651 | /* Not reached */ |
1652 | BUG_OPCODE | |
14cf11af | 1653 | |
14cf11af PM |
1654 | /* |
1655 | * We put a few things here that have to be page-aligned. | |
1656 | * This stuff goes at the beginning of the bss, which is page-aligned. | |
1657 | */ | |
1658 | .section ".bss" | |
1659 | ||
1660 | .align PAGE_SHIFT | |
1661 | ||
1662 | .globl empty_zero_page | |
1663 | empty_zero_page: | |
1664 | .space PAGE_SIZE | |
1665 | ||
1666 | .globl swapper_pg_dir | |
1667 | swapper_pg_dir: | |
ee7a76da | 1668 | .space PGD_TABLE_SIZE |