powerpc: Hook in new transactional memory code
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15 #include <asm/hw_irq.h>
16 #include <asm/exception-64s.h>
17 #include <asm/ptrace.h>
18
19 /*
20 * We layout physical memory as follows:
21 * 0x0000 - 0x00ff : Secondary processor spin code
22 * 0x0100 - 0x17ff : pSeries Interrupt prologs
23 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26 * 0x7000 - 0x7fff : FWNMI data area
27 * 0x8000 - 0x8fff : Initial (CPU0) segment table
28 * 0x9000 - : Early init and support code
29 */
30 /* Syscall routine is used twice, in reloc-off and reloc-on paths */
31 #define SYSCALL_PSERIES_1 \
32 BEGIN_FTR_SECTION \
33 cmpdi r0,0x1ebe ; \
34 beq- 1f ; \
35 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
36 mr r9,r13 ; \
37 GET_PACA(r13) ; \
38 mfspr r11,SPRN_SRR0 ; \
39 0:
40
41 #define SYSCALL_PSERIES_2_RFID \
42 mfspr r12,SPRN_SRR1 ; \
43 ld r10,PACAKBASE(r13) ; \
44 LOAD_HANDLER(r10, system_call_entry) ; \
45 mtspr SPRN_SRR0,r10 ; \
46 ld r10,PACAKMSR(r13) ; \
47 mtspr SPRN_SRR1,r10 ; \
48 rfid ; \
49 b . ; /* prevent speculative execution */
50
51 #define SYSCALL_PSERIES_3 \
52 /* Fast LE/BE switch system call */ \
53 1: mfspr r12,SPRN_SRR1 ; \
54 xori r12,r12,MSR_LE ; \
55 mtspr SPRN_SRR1,r12 ; \
56 rfid ; /* return to userspace */ \
57 b . ; \
58 2: mfspr r12,SPRN_SRR1 ; \
59 andi. r12,r12,MSR_PR ; \
60 bne 0b ; \
61 mtspr SPRN_SRR0,r3 ; \
62 mtspr SPRN_SRR1,r4 ; \
63 mtspr SPRN_SDR1,r5 ; \
64 rfid ; \
65 b . ; /* prevent speculative execution */
66
67 #if defined(CONFIG_RELOCATABLE)
68 /*
69 * We can't branch directly; in the direct case we use LR
70 * and system_call_entry restores LR. (We thus need to move
71 * LR to r10 in the RFID case too.)
72 */
73 #define SYSCALL_PSERIES_2_DIRECT \
74 mflr r10 ; \
75 ld r12,PACAKBASE(r13) ; \
76 LOAD_HANDLER(r12, system_call_entry_direct) ; \
77 mtlr r12 ; \
78 mfspr r12,SPRN_SRR1 ; \
79 /* Re-use of r13... No spare regs to do this */ \
80 li r13,MSR_RI ; \
81 mtmsrd r13,1 ; \
82 GET_PACA(r13) ; /* get r13 back */ \
83 blr ;
84 #else
85 /* We can branch directly */
86 #define SYSCALL_PSERIES_2_DIRECT \
87 mfspr r12,SPRN_SRR1 ; \
88 li r10,MSR_RI ; \
89 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
90 b system_call_entry_direct ;
91 #endif
92
93 /*
94 * This is the start of the interrupt handlers for pSeries
95 * This code runs with relocation off.
96 * Code from here to __end_interrupts gets copied down to real
97 * address 0x100 when we are running a relocatable kernel.
98 * Therefore any relative branches in this section must only
99 * branch to labels in this section.
100 */
101 . = 0x100
102 .globl __start_interrupts
103 __start_interrupts:
104
105 .globl system_reset_pSeries;
106 system_reset_pSeries:
107 HMT_MEDIUM_PPR_DISCARD
108 SET_SCRATCH0(r13)
109 #ifdef CONFIG_PPC_P7_NAP
110 BEGIN_FTR_SECTION
111 /* Running native on arch 2.06 or later, check if we are
112 * waking up from nap. We only handle no state loss and
113 * supervisor state loss. We do -not- handle hypervisor
114 * state loss at this time.
115 */
116 mfspr r13,SPRN_SRR1
117 rlwinm. r13,r13,47-31,30,31
118 beq 9f
119
120 /* waking up from powersave (nap) state */
121 cmpwi cr1,r13,2
122 /* Total loss of HV state is fatal, we could try to use the
123 * PIR to locate a PACA, then use an emergency stack etc...
124 * but for now, let's just stay stuck here
125 */
126 bgt cr1,.
127 GET_PACA(r13)
128
129 #ifdef CONFIG_KVM_BOOK3S_64_HV
130 li r0,KVM_HWTHREAD_IN_KERNEL
131 stb r0,HSTATE_HWTHREAD_STATE(r13)
132 /* Order setting hwthread_state vs. testing hwthread_req */
133 sync
134 lbz r0,HSTATE_HWTHREAD_REQ(r13)
135 cmpwi r0,0
136 beq 1f
137 b kvm_start_guest
138 1:
139 #endif
140
141 beq cr1,2f
142 b .power7_wakeup_noloss
143 2: b .power7_wakeup_loss
144 9:
145 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146 #endif /* CONFIG_PPC_P7_NAP */
147 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148 NOTEST, 0x100)
149
150 . = 0x200
151 machine_check_pSeries_1:
152 /* This is moved out of line as it can be patched by FW, but
153 * some code path might still want to branch into the original
154 * vector
155 */
156 HMT_MEDIUM_PPR_DISCARD
157 SET_SCRATCH0(r13) /* save r13 */
158 EXCEPTION_PROLOG_0(PACA_EXMC)
159 b machine_check_pSeries_0
160
161 . = 0x300
162 .globl data_access_pSeries
163 data_access_pSeries:
164 HMT_MEDIUM_PPR_DISCARD
165 SET_SCRATCH0(r13)
166 BEGIN_FTR_SECTION
167 b data_access_check_stab
168 data_access_not_stab:
169 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
170 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
171 KVMTEST, 0x300)
172
173 . = 0x380
174 .globl data_access_slb_pSeries
175 data_access_slb_pSeries:
176 HMT_MEDIUM_PPR_DISCARD
177 SET_SCRATCH0(r13)
178 EXCEPTION_PROLOG_0(PACA_EXSLB)
179 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
180 std r3,PACA_EXSLB+EX_R3(r13)
181 mfspr r3,SPRN_DAR
182 #ifdef __DISABLED__
183 /* Keep that around for when we re-implement dynamic VSIDs */
184 cmpdi r3,0
185 bge slb_miss_user_pseries
186 #endif /* __DISABLED__ */
187 mfspr r12,SPRN_SRR1
188 #ifndef CONFIG_RELOCATABLE
189 b .slb_miss_realmode
190 #else
191 /*
192 * We can't just use a direct branch to .slb_miss_realmode
193 * because the distance from here to there depends on where
194 * the kernel ends up being put.
195 */
196 mfctr r11
197 ld r10,PACAKBASE(r13)
198 LOAD_HANDLER(r10, .slb_miss_realmode)
199 mtctr r10
200 bctr
201 #endif
202
203 STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
204
205 . = 0x480
206 .globl instruction_access_slb_pSeries
207 instruction_access_slb_pSeries:
208 HMT_MEDIUM_PPR_DISCARD
209 SET_SCRATCH0(r13)
210 EXCEPTION_PROLOG_0(PACA_EXSLB)
211 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
212 std r3,PACA_EXSLB+EX_R3(r13)
213 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
214 #ifdef __DISABLED__
215 /* Keep that around for when we re-implement dynamic VSIDs */
216 cmpdi r3,0
217 bge slb_miss_user_pseries
218 #endif /* __DISABLED__ */
219 mfspr r12,SPRN_SRR1
220 #ifndef CONFIG_RELOCATABLE
221 b .slb_miss_realmode
222 #else
223 mfctr r11
224 ld r10,PACAKBASE(r13)
225 LOAD_HANDLER(r10, .slb_miss_realmode)
226 mtctr r10
227 bctr
228 #endif
229
230 /* We open code these as we can't have a ". = x" (even with
231 * x = "." within a feature section
232 */
233 . = 0x500;
234 .globl hardware_interrupt_pSeries;
235 .globl hardware_interrupt_hv;
236 hardware_interrupt_pSeries:
237 hardware_interrupt_hv:
238 BEGIN_FTR_SECTION
239 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
240 EXC_HV, SOFTEN_TEST_HV)
241 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
242 FTR_SECTION_ELSE
243 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
244 EXC_STD, SOFTEN_TEST_HV_201)
245 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
246 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
247
248 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
249 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
250
251 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
252 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
253
254 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
255 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
256
257 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
258 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
259
260 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
261 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
262
263 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
264 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
265
266 . = 0xc00
267 .globl system_call_pSeries
268 system_call_pSeries:
269 HMT_MEDIUM
270 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
271 SET_SCRATCH0(r13)
272 GET_PACA(r13)
273 std r9,PACA_EXGEN+EX_R9(r13)
274 std r10,PACA_EXGEN+EX_R10(r13)
275 mfcr r9
276 KVMTEST(0xc00)
277 GET_SCRATCH0(r13)
278 #endif
279 SYSCALL_PSERIES_1
280 SYSCALL_PSERIES_2_RFID
281 SYSCALL_PSERIES_3
282 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
283
284 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
285 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
286
287 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
288 * out of line to handle them
289 */
290 . = 0xe00
291 hv_exception_trampoline:
292 SET_SCRATCH0(r13)
293 EXCEPTION_PROLOG_0(PACA_EXGEN)
294 b h_data_storage_hv
295
296 . = 0xe20
297 SET_SCRATCH0(r13)
298 EXCEPTION_PROLOG_0(PACA_EXGEN)
299 b h_instr_storage_hv
300
301 . = 0xe40
302 SET_SCRATCH0(r13)
303 EXCEPTION_PROLOG_0(PACA_EXGEN)
304 b emulation_assist_hv
305
306 . = 0xe60
307 SET_SCRATCH0(r13)
308 EXCEPTION_PROLOG_0(PACA_EXGEN)
309 b hmi_exception_hv
310
311 . = 0xe80
312 SET_SCRATCH0(r13)
313 EXCEPTION_PROLOG_0(PACA_EXGEN)
314 b h_doorbell_hv
315
316 /* We need to deal with the Altivec unavailable exception
317 * here which is at 0xf20, thus in the middle of the
318 * prolog code of the PerformanceMonitor one. A little
319 * trickery is thus necessary
320 */
321 performance_monitor_pSeries_1:
322 . = 0xf00
323 SET_SCRATCH0(r13)
324 EXCEPTION_PROLOG_0(PACA_EXGEN)
325 b performance_monitor_pSeries
326
327 altivec_unavailable_pSeries_1:
328 . = 0xf20
329 SET_SCRATCH0(r13)
330 EXCEPTION_PROLOG_0(PACA_EXGEN)
331 b altivec_unavailable_pSeries
332
333 vsx_unavailable_pSeries_1:
334 . = 0xf40
335 SET_SCRATCH0(r13)
336 EXCEPTION_PROLOG_0(PACA_EXGEN)
337 b vsx_unavailable_pSeries
338
339 . = 0xf60
340 SET_SCRATCH0(r13)
341 EXCEPTION_PROLOG_0(PACA_EXGEN)
342 b tm_unavailable_pSeries
343
344 #ifdef CONFIG_CBE_RAS
345 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
346 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
347 #endif /* CONFIG_CBE_RAS */
348
349 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
350 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
351
352 . = 0x1500
353 .global denorm_exception_hv
354 denorm_exception_hv:
355 HMT_MEDIUM_PPR_DISCARD
356 mtspr SPRN_SPRG_HSCRATCH0,r13
357 EXCEPTION_PROLOG_0(PACA_EXGEN)
358 std r11,PACA_EXGEN+EX_R11(r13)
359 std r12,PACA_EXGEN+EX_R12(r13)
360 mfspr r9,SPRN_SPRG_HSCRATCH0
361 std r9,PACA_EXGEN+EX_R13(r13)
362 mfcr r9
363
364 #ifdef CONFIG_PPC_DENORMALISATION
365 mfspr r10,SPRN_HSRR1
366 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
367 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
368 addi r11,r11,-4 /* HSRR0 is next instruction */
369 bne+ denorm_assist
370 #endif
371
372 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
373 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
374
375 #ifdef CONFIG_CBE_RAS
376 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
377 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
378 #endif /* CONFIG_CBE_RAS */
379
380 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
381 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
382
383 #ifdef CONFIG_CBE_RAS
384 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
385 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
386 #else
387 . = 0x1800
388 #endif /* CONFIG_CBE_RAS */
389
390
391 /*** Out of line interrupts support ***/
392
393 .align 7
394 /* moved from 0x200 */
395 machine_check_pSeries:
396 .globl machine_check_fwnmi
397 machine_check_fwnmi:
398 HMT_MEDIUM_PPR_DISCARD
399 SET_SCRATCH0(r13) /* save r13 */
400 EXCEPTION_PROLOG_0(PACA_EXMC)
401 machine_check_pSeries_0:
402 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
403 EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
404 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
405
406 /* moved from 0x300 */
407 data_access_check_stab:
408 GET_PACA(r13)
409 std r9,PACA_EXSLB+EX_R9(r13)
410 std r10,PACA_EXSLB+EX_R10(r13)
411 mfspr r10,SPRN_DAR
412 mfspr r9,SPRN_DSISR
413 srdi r10,r10,60
414 rlwimi r10,r9,16,0x20
415 #ifdef CONFIG_KVM_BOOK3S_PR
416 lbz r9,HSTATE_IN_GUEST(r13)
417 rlwimi r10,r9,8,0x300
418 #endif
419 mfcr r9
420 cmpwi r10,0x2c
421 beq do_stab_bolted_pSeries
422 mtcrf 0x80,r9
423 ld r9,PACA_EXSLB+EX_R9(r13)
424 ld r10,PACA_EXSLB+EX_R10(r13)
425 b data_access_not_stab
426 do_stab_bolted_pSeries:
427 std r11,PACA_EXSLB+EX_R11(r13)
428 std r12,PACA_EXSLB+EX_R12(r13)
429 GET_SCRATCH0(r10)
430 std r10,PACA_EXSLB+EX_R13(r13)
431 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
432
433 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
434 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
435 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
436 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
437 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
438 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
439
440 #ifdef CONFIG_PPC_DENORMALISATION
441 denorm_assist:
442 BEGIN_FTR_SECTION
443 /*
444 * To denormalise we need to move a copy of the register to itself.
445 * For POWER6 do that here for all FP regs.
446 */
447 mfmsr r10
448 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
449 xori r10,r10,(MSR_FE0|MSR_FE1)
450 mtmsrd r10
451 sync
452 fmr 0,0
453 fmr 1,1
454 fmr 2,2
455 fmr 3,3
456 fmr 4,4
457 fmr 5,5
458 fmr 6,6
459 fmr 7,7
460 fmr 8,8
461 fmr 9,9
462 fmr 10,10
463 fmr 11,11
464 fmr 12,12
465 fmr 13,13
466 fmr 14,14
467 fmr 15,15
468 fmr 16,16
469 fmr 17,17
470 fmr 18,18
471 fmr 19,19
472 fmr 20,20
473 fmr 21,21
474 fmr 22,22
475 fmr 23,23
476 fmr 24,24
477 fmr 25,25
478 fmr 26,26
479 fmr 27,27
480 fmr 28,28
481 fmr 29,29
482 fmr 30,30
483 fmr 31,31
484 FTR_SECTION_ELSE
485 /*
486 * To denormalise we need to move a copy of the register to itself.
487 * For POWER7 do that here for the first 32 VSX registers only.
488 */
489 mfmsr r10
490 oris r10,r10,MSR_VSX@h
491 mtmsrd r10
492 sync
493 XVCPSGNDP(0,0,0)
494 XVCPSGNDP(1,1,1)
495 XVCPSGNDP(2,2,2)
496 XVCPSGNDP(3,3,3)
497 XVCPSGNDP(4,4,4)
498 XVCPSGNDP(5,5,5)
499 XVCPSGNDP(6,6,6)
500 XVCPSGNDP(7,7,7)
501 XVCPSGNDP(8,8,8)
502 XVCPSGNDP(9,9,9)
503 XVCPSGNDP(10,10,10)
504 XVCPSGNDP(11,11,11)
505 XVCPSGNDP(12,12,12)
506 XVCPSGNDP(13,13,13)
507 XVCPSGNDP(14,14,14)
508 XVCPSGNDP(15,15,15)
509 XVCPSGNDP(16,16,16)
510 XVCPSGNDP(17,17,17)
511 XVCPSGNDP(18,18,18)
512 XVCPSGNDP(19,19,19)
513 XVCPSGNDP(20,20,20)
514 XVCPSGNDP(21,21,21)
515 XVCPSGNDP(22,22,22)
516 XVCPSGNDP(23,23,23)
517 XVCPSGNDP(24,24,24)
518 XVCPSGNDP(25,25,25)
519 XVCPSGNDP(26,26,26)
520 XVCPSGNDP(27,27,27)
521 XVCPSGNDP(28,28,28)
522 XVCPSGNDP(29,29,29)
523 XVCPSGNDP(30,30,30)
524 XVCPSGNDP(31,31,31)
525 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
526 mtspr SPRN_HSRR0,r11
527 mtcrf 0x80,r9
528 ld r9,PACA_EXGEN+EX_R9(r13)
529 RESTORE_PPR_PACA(PACA_EXGEN, r10)
530 ld r10,PACA_EXGEN+EX_R10(r13)
531 ld r11,PACA_EXGEN+EX_R11(r13)
532 ld r12,PACA_EXGEN+EX_R12(r13)
533 ld r13,PACA_EXGEN+EX_R13(r13)
534 HRFID
535 b .
536 #endif
537
538 .align 7
539 /* moved from 0xe00 */
540 STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
541 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
542 STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
543 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
544 STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
545 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
546 STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
547 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
548 MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
549 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
550
551 /* moved from 0xf00 */
552 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
553 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
554 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
555 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
556 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
557 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
558 STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
559 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
560
561 /*
562 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
563 * - If it was a decrementer interrupt, we bump the dec to max and and return.
564 * - If it was a doorbell we return immediately since doorbells are edge
565 * triggered and won't automatically refire.
566 * - else we hard disable and return.
567 * This is called with r10 containing the value to OR to the paca field.
568 */
569 #define MASKED_INTERRUPT(_H) \
570 masked_##_H##interrupt: \
571 std r11,PACA_EXGEN+EX_R11(r13); \
572 lbz r11,PACAIRQHAPPENED(r13); \
573 or r11,r11,r10; \
574 stb r11,PACAIRQHAPPENED(r13); \
575 cmpwi r10,PACA_IRQ_DEC; \
576 bne 1f; \
577 lis r10,0x7fff; \
578 ori r10,r10,0xffff; \
579 mtspr SPRN_DEC,r10; \
580 b 2f; \
581 1: cmpwi r10,PACA_IRQ_DBELL; \
582 beq 2f; \
583 mfspr r10,SPRN_##_H##SRR1; \
584 rldicl r10,r10,48,1; /* clear MSR_EE */ \
585 rotldi r10,r10,16; \
586 mtspr SPRN_##_H##SRR1,r10; \
587 2: mtcrf 0x80,r9; \
588 ld r9,PACA_EXGEN+EX_R9(r13); \
589 ld r10,PACA_EXGEN+EX_R10(r13); \
590 ld r11,PACA_EXGEN+EX_R11(r13); \
591 GET_SCRATCH0(r13); \
592 ##_H##rfid; \
593 b .
594
595 MASKED_INTERRUPT()
596 MASKED_INTERRUPT(H)
597
598 /*
599 * Called from arch_local_irq_enable when an interrupt needs
600 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
601 * which kind of interrupt. MSR:EE is already off. We generate a
602 * stackframe like if a real interrupt had happened.
603 *
604 * Note: While MSR:EE is off, we need to make sure that _MSR
605 * in the generated frame has EE set to 1 or the exception
606 * handler will not properly re-enable them.
607 */
608 _GLOBAL(__replay_interrupt)
609 /* We are going to jump to the exception common code which
610 * will retrieve various register values from the PACA which
611 * we don't give a damn about, so we don't bother storing them.
612 */
613 mfmsr r12
614 mflr r11
615 mfcr r9
616 ori r12,r12,MSR_EE
617 cmpwi r3,0x900
618 beq decrementer_common
619 cmpwi r3,0x500
620 beq hardware_interrupt_common
621 BEGIN_FTR_SECTION
622 cmpwi r3,0xe80
623 beq h_doorbell_common
624 FTR_SECTION_ELSE
625 cmpwi r3,0xa00
626 beq doorbell_super_common
627 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
628 blr
629
630 #ifdef CONFIG_PPC_PSERIES
631 /*
632 * Vectors for the FWNMI option. Share common code.
633 */
634 .globl system_reset_fwnmi
635 .align 7
636 system_reset_fwnmi:
637 HMT_MEDIUM_PPR_DISCARD
638 SET_SCRATCH0(r13) /* save r13 */
639 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
640 NOTEST, 0x100)
641
642 #endif /* CONFIG_PPC_PSERIES */
643
644 #ifdef __DISABLED__
645 /*
646 * This is used for when the SLB miss handler has to go virtual,
647 * which doesn't happen for now anymore but will once we re-implement
648 * dynamic VSIDs for shared page tables
649 */
650 slb_miss_user_pseries:
651 std r10,PACA_EXGEN+EX_R10(r13)
652 std r11,PACA_EXGEN+EX_R11(r13)
653 std r12,PACA_EXGEN+EX_R12(r13)
654 GET_SCRATCH0(r10)
655 ld r11,PACA_EXSLB+EX_R9(r13)
656 ld r12,PACA_EXSLB+EX_R3(r13)
657 std r10,PACA_EXGEN+EX_R13(r13)
658 std r11,PACA_EXGEN+EX_R9(r13)
659 std r12,PACA_EXGEN+EX_R3(r13)
660 clrrdi r12,r13,32
661 mfmsr r10
662 mfspr r11,SRR0 /* save SRR0 */
663 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
664 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
665 mtspr SRR0,r12
666 mfspr r12,SRR1 /* and SRR1 */
667 mtspr SRR1,r10
668 rfid
669 b . /* prevent spec. execution */
670 #endif /* __DISABLED__ */
671
672 /*
673 * Code from here down to __end_handlers is invoked from the
674 * exception prologs above. Because the prologs assemble the
675 * addresses of these handlers using the LOAD_HANDLER macro,
676 * which uses an ori instruction, these handlers must be in
677 * the first 64k of the kernel image.
678 */
679
680 /*** Common interrupt handlers ***/
681
682 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
683
684 /*
685 * Machine check is different because we use a different
686 * save area: PACA_EXMC instead of PACA_EXGEN.
687 */
688 .align 7
689 .globl machine_check_common
690 machine_check_common:
691 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
692 FINISH_NAP
693 DISABLE_INTS
694 bl .save_nvgprs
695 addi r3,r1,STACK_FRAME_OVERHEAD
696 bl .machine_check_exception
697 b .ret_from_except
698
699 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
700 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
701 STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
702 #ifdef CONFIG_PPC_DOORBELL
703 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
704 #else
705 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
706 #endif
707 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
708 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
709 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
710 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
711 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
712 #ifdef CONFIG_PPC_DOORBELL
713 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
714 #else
715 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
716 #endif
717 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
718 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
719 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
720 #ifdef CONFIG_ALTIVEC
721 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
722 #else
723 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
724 #endif
725 #ifdef CONFIG_CBE_RAS
726 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
727 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
728 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
729 #endif /* CONFIG_CBE_RAS */
730
731 /*
732 * Relocation-on interrupts: A subset of the interrupts can be delivered
733 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
734 * it. Addresses are the same as the original interrupt addresses, but
735 * offset by 0xc000000000004000.
736 * It's impossible to receive interrupts below 0x300 via this mechanism.
737 * KVM: None of these traps are from the guest ; anything that escalated
738 * to HV=1 from HV=0 is delivered via real mode handlers.
739 */
740
741 /*
742 * This uses the standard macro, since the original 0x300 vector
743 * only has extra guff for STAB-based processors -- which never
744 * come here.
745 */
746 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
747 . = 0x4380
748 .globl data_access_slb_relon_pSeries
749 data_access_slb_relon_pSeries:
750 SET_SCRATCH0(r13)
751 EXCEPTION_PROLOG_0(PACA_EXSLB)
752 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
753 std r3,PACA_EXSLB+EX_R3(r13)
754 mfspr r3,SPRN_DAR
755 mfspr r12,SPRN_SRR1
756 #ifndef CONFIG_RELOCATABLE
757 b .slb_miss_realmode
758 #else
759 /*
760 * We can't just use a direct branch to .slb_miss_realmode
761 * because the distance from here to there depends on where
762 * the kernel ends up being put.
763 */
764 mfctr r11
765 ld r10,PACAKBASE(r13)
766 LOAD_HANDLER(r10, .slb_miss_realmode)
767 mtctr r10
768 bctr
769 #endif
770
771 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
772 . = 0x4480
773 .globl instruction_access_slb_relon_pSeries
774 instruction_access_slb_relon_pSeries:
775 SET_SCRATCH0(r13)
776 EXCEPTION_PROLOG_0(PACA_EXSLB)
777 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
778 std r3,PACA_EXSLB+EX_R3(r13)
779 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
780 mfspr r12,SPRN_SRR1
781 #ifndef CONFIG_RELOCATABLE
782 b .slb_miss_realmode
783 #else
784 mfctr r11
785 ld r10,PACAKBASE(r13)
786 LOAD_HANDLER(r10, .slb_miss_realmode)
787 mtctr r10
788 bctr
789 #endif
790
791 . = 0x4500
792 .globl hardware_interrupt_relon_pSeries;
793 .globl hardware_interrupt_relon_hv;
794 hardware_interrupt_relon_pSeries:
795 hardware_interrupt_relon_hv:
796 BEGIN_FTR_SECTION
797 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
798 FTR_SECTION_ELSE
799 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
800 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
801 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
802 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
803 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
804 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
805 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
806 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
807 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
808
809 . = 0x4c00
810 .globl system_call_relon_pSeries
811 system_call_relon_pSeries:
812 HMT_MEDIUM
813 SYSCALL_PSERIES_1
814 SYSCALL_PSERIES_2_DIRECT
815 SYSCALL_PSERIES_3
816
817 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
818
819 . = 0x4e00
820 SET_SCRATCH0(r13)
821 EXCEPTION_PROLOG_0(PACA_EXGEN)
822 b h_data_storage_relon_hv
823
824 . = 0x4e20
825 SET_SCRATCH0(r13)
826 EXCEPTION_PROLOG_0(PACA_EXGEN)
827 b h_instr_storage_relon_hv
828
829 . = 0x4e40
830 SET_SCRATCH0(r13)
831 EXCEPTION_PROLOG_0(PACA_EXGEN)
832 b emulation_assist_relon_hv
833
834 . = 0x4e60
835 SET_SCRATCH0(r13)
836 EXCEPTION_PROLOG_0(PACA_EXGEN)
837 b hmi_exception_relon_hv
838
839 . = 0x4e80
840 SET_SCRATCH0(r13)
841 EXCEPTION_PROLOG_0(PACA_EXGEN)
842 b h_doorbell_relon_hv
843
844 performance_monitor_relon_pSeries_1:
845 . = 0x4f00
846 SET_SCRATCH0(r13)
847 EXCEPTION_PROLOG_0(PACA_EXGEN)
848 b performance_monitor_relon_pSeries
849
850 altivec_unavailable_relon_pSeries_1:
851 . = 0x4f20
852 SET_SCRATCH0(r13)
853 EXCEPTION_PROLOG_0(PACA_EXGEN)
854 b altivec_unavailable_relon_pSeries
855
856 vsx_unavailable_relon_pSeries_1:
857 . = 0x4f40
858 SET_SCRATCH0(r13)
859 EXCEPTION_PROLOG_0(PACA_EXGEN)
860 b vsx_unavailable_relon_pSeries
861
862 tm_unavailable_relon_pSeries_1:
863 . = 0x4f60
864 SET_SCRATCH0(r13)
865 EXCEPTION_PROLOG_0(PACA_EXGEN)
866 b tm_unavailable_relon_pSeries
867
868 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
869 #ifdef CONFIG_PPC_DENORMALISATION
870 . = 0x5500
871 b denorm_exception_hv
872 #endif
873 #ifdef CONFIG_HVC_SCOM
874 STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
875 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
876 #endif /* CONFIG_HVC_SCOM */
877 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
878
879 /* Other future vectors */
880 .align 7
881 .globl __end_interrupts
882 __end_interrupts:
883
884 .align 7
885 system_call_entry_direct:
886 #if defined(CONFIG_RELOCATABLE)
887 /* The first level prologue may have used LR to get here, saving
888 * orig in r10. To save hacking/ifdeffing common code, restore here.
889 */
890 mtlr r10
891 #endif
892 system_call_entry:
893 b system_call_common
894
895 ppc64_runlatch_on_trampoline:
896 b .__ppc64_runlatch_on
897
898 /*
899 * Here we have detected that the kernel stack pointer is bad.
900 * R9 contains the saved CR, r13 points to the paca,
901 * r10 contains the (bad) kernel stack pointer,
902 * r11 and r12 contain the saved SRR0 and SRR1.
903 * We switch to using an emergency stack, save the registers there,
904 * and call kernel_bad_stack(), which panics.
905 */
906 bad_stack:
907 ld r1,PACAEMERGSP(r13)
908 subi r1,r1,64+INT_FRAME_SIZE
909 std r9,_CCR(r1)
910 std r10,GPR1(r1)
911 std r11,_NIP(r1)
912 std r12,_MSR(r1)
913 mfspr r11,SPRN_DAR
914 mfspr r12,SPRN_DSISR
915 std r11,_DAR(r1)
916 std r12,_DSISR(r1)
917 mflr r10
918 mfctr r11
919 mfxer r12
920 std r10,_LINK(r1)
921 std r11,_CTR(r1)
922 std r12,_XER(r1)
923 SAVE_GPR(0,r1)
924 SAVE_GPR(2,r1)
925 ld r10,EX_R3(r3)
926 std r10,GPR3(r1)
927 SAVE_GPR(4,r1)
928 SAVE_4GPRS(5,r1)
929 ld r9,EX_R9(r3)
930 ld r10,EX_R10(r3)
931 SAVE_2GPRS(9,r1)
932 ld r9,EX_R11(r3)
933 ld r10,EX_R12(r3)
934 ld r11,EX_R13(r3)
935 std r9,GPR11(r1)
936 std r10,GPR12(r1)
937 std r11,GPR13(r1)
938 BEGIN_FTR_SECTION
939 ld r10,EX_CFAR(r3)
940 std r10,ORIG_GPR3(r1)
941 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
942 SAVE_8GPRS(14,r1)
943 SAVE_10GPRS(22,r1)
944 lhz r12,PACA_TRAP_SAVE(r13)
945 std r12,_TRAP(r1)
946 addi r11,r1,INT_FRAME_SIZE
947 std r11,0(r1)
948 li r12,0
949 std r12,0(r11)
950 ld r2,PACATOC(r13)
951 ld r11,exception_marker@toc(r2)
952 std r12,RESULT(r1)
953 std r11,STACK_FRAME_OVERHEAD-16(r1)
954 1: addi r3,r1,STACK_FRAME_OVERHEAD
955 bl .kernel_bad_stack
956 b 1b
957
958 /*
959 * Here r13 points to the paca, r9 contains the saved CR,
960 * SRR0 and SRR1 are saved in r11 and r12,
961 * r9 - r13 are saved in paca->exgen.
962 */
963 .align 7
964 .globl data_access_common
965 data_access_common:
966 mfspr r10,SPRN_DAR
967 std r10,PACA_EXGEN+EX_DAR(r13)
968 mfspr r10,SPRN_DSISR
969 stw r10,PACA_EXGEN+EX_DSISR(r13)
970 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
971 DISABLE_INTS
972 ld r12,_MSR(r1)
973 ld r3,PACA_EXGEN+EX_DAR(r13)
974 lwz r4,PACA_EXGEN+EX_DSISR(r13)
975 li r5,0x300
976 b .do_hash_page /* Try to handle as hpte fault */
977
978 .align 7
979 .globl h_data_storage_common
980 h_data_storage_common:
981 mfspr r10,SPRN_HDAR
982 std r10,PACA_EXGEN+EX_DAR(r13)
983 mfspr r10,SPRN_HDSISR
984 stw r10,PACA_EXGEN+EX_DSISR(r13)
985 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
986 bl .save_nvgprs
987 DISABLE_INTS
988 addi r3,r1,STACK_FRAME_OVERHEAD
989 bl .unknown_exception
990 b .ret_from_except
991
992 .align 7
993 .globl instruction_access_common
994 instruction_access_common:
995 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
996 DISABLE_INTS
997 ld r12,_MSR(r1)
998 ld r3,_NIP(r1)
999 andis. r4,r12,0x5820
1000 li r5,0x400
1001 b .do_hash_page /* Try to handle as hpte fault */
1002
1003 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
1004
1005 /*
1006 * Here is the common SLB miss user that is used when going to virtual
1007 * mode for SLB misses, that is currently not used
1008 */
1009 #ifdef __DISABLED__
1010 .align 7
1011 .globl slb_miss_user_common
1012 slb_miss_user_common:
1013 mflr r10
1014 std r3,PACA_EXGEN+EX_DAR(r13)
1015 stw r9,PACA_EXGEN+EX_CCR(r13)
1016 std r10,PACA_EXGEN+EX_LR(r13)
1017 std r11,PACA_EXGEN+EX_SRR0(r13)
1018 bl .slb_allocate_user
1019
1020 ld r10,PACA_EXGEN+EX_LR(r13)
1021 ld r3,PACA_EXGEN+EX_R3(r13)
1022 lwz r9,PACA_EXGEN+EX_CCR(r13)
1023 ld r11,PACA_EXGEN+EX_SRR0(r13)
1024 mtlr r10
1025 beq- slb_miss_fault
1026
1027 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1028 beq- unrecov_user_slb
1029 mfmsr r10
1030
1031 .machine push
1032 .machine "power4"
1033 mtcrf 0x80,r9
1034 .machine pop
1035
1036 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
1037 mtmsrd r10,1
1038
1039 mtspr SRR0,r11
1040 mtspr SRR1,r12
1041
1042 ld r9,PACA_EXGEN+EX_R9(r13)
1043 ld r10,PACA_EXGEN+EX_R10(r13)
1044 ld r11,PACA_EXGEN+EX_R11(r13)
1045 ld r12,PACA_EXGEN+EX_R12(r13)
1046 ld r13,PACA_EXGEN+EX_R13(r13)
1047 rfid
1048 b .
1049
1050 slb_miss_fault:
1051 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1052 ld r4,PACA_EXGEN+EX_DAR(r13)
1053 li r5,0
1054 std r4,_DAR(r1)
1055 std r5,_DSISR(r1)
1056 b handle_page_fault
1057
1058 unrecov_user_slb:
1059 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1060 DISABLE_INTS
1061 bl .save_nvgprs
1062 1: addi r3,r1,STACK_FRAME_OVERHEAD
1063 bl .unrecoverable_exception
1064 b 1b
1065
1066 #endif /* __DISABLED__ */
1067
1068
1069 /*
1070 * r13 points to the PACA, r9 contains the saved CR,
1071 * r12 contain the saved SRR1, SRR0 is still ready for return
1072 * r3 has the faulting address
1073 * r9 - r13 are saved in paca->exslb.
1074 * r3 is saved in paca->slb_r3
1075 * We assume we aren't going to take any exceptions during this procedure.
1076 */
1077 _GLOBAL(slb_miss_realmode)
1078 mflr r10
1079 #ifdef CONFIG_RELOCATABLE
1080 mtctr r11
1081 #endif
1082
1083 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1084 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1085
1086 bl .slb_allocate_realmode
1087
1088 /* All done -- return from exception. */
1089
1090 ld r10,PACA_EXSLB+EX_LR(r13)
1091 ld r3,PACA_EXSLB+EX_R3(r13)
1092 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1093
1094 mtlr r10
1095
1096 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1097 beq- 2f
1098
1099 .machine push
1100 .machine "power4"
1101 mtcrf 0x80,r9
1102 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1103 .machine pop
1104
1105 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1106 ld r9,PACA_EXSLB+EX_R9(r13)
1107 ld r10,PACA_EXSLB+EX_R10(r13)
1108 ld r11,PACA_EXSLB+EX_R11(r13)
1109 ld r12,PACA_EXSLB+EX_R12(r13)
1110 ld r13,PACA_EXSLB+EX_R13(r13)
1111 rfid
1112 b . /* prevent speculative execution */
1113
1114 2: mfspr r11,SPRN_SRR0
1115 ld r10,PACAKBASE(r13)
1116 LOAD_HANDLER(r10,unrecov_slb)
1117 mtspr SPRN_SRR0,r10
1118 ld r10,PACAKMSR(r13)
1119 mtspr SPRN_SRR1,r10
1120 rfid
1121 b .
1122
1123 unrecov_slb:
1124 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1125 DISABLE_INTS
1126 bl .save_nvgprs
1127 1: addi r3,r1,STACK_FRAME_OVERHEAD
1128 bl .unrecoverable_exception
1129 b 1b
1130
1131
1132 #ifdef CONFIG_PPC_970_NAP
1133 power4_fixup_nap:
1134 andc r9,r9,r10
1135 std r9,TI_LOCAL_FLAGS(r11)
1136 ld r10,_LINK(r1) /* make idle task do the */
1137 std r10,_NIP(r1) /* equivalent of a blr */
1138 blr
1139 #endif
1140
1141 .align 7
1142 .globl alignment_common
1143 alignment_common:
1144 mfspr r10,SPRN_DAR
1145 std r10,PACA_EXGEN+EX_DAR(r13)
1146 mfspr r10,SPRN_DSISR
1147 stw r10,PACA_EXGEN+EX_DSISR(r13)
1148 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1149 ld r3,PACA_EXGEN+EX_DAR(r13)
1150 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1151 std r3,_DAR(r1)
1152 std r4,_DSISR(r1)
1153 bl .save_nvgprs
1154 DISABLE_INTS
1155 addi r3,r1,STACK_FRAME_OVERHEAD
1156 bl .alignment_exception
1157 b .ret_from_except
1158
1159 .align 7
1160 .globl program_check_common
1161 program_check_common:
1162 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1163 bl .save_nvgprs
1164 DISABLE_INTS
1165 addi r3,r1,STACK_FRAME_OVERHEAD
1166 bl .program_check_exception
1167 b .ret_from_except
1168
1169 .align 7
1170 .globl fp_unavailable_common
1171 fp_unavailable_common:
1172 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1173 bne 1f /* if from user, just load it up */
1174 bl .save_nvgprs
1175 DISABLE_INTS
1176 addi r3,r1,STACK_FRAME_OVERHEAD
1177 bl .kernel_fp_unavailable_exception
1178 BUG_OPCODE
1179 1:
1180 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1181 BEGIN_FTR_SECTION
1182 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1183 * transaction), go do TM stuff
1184 */
1185 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1186 bne- 2f
1187 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1188 #endif
1189 bl .load_up_fpu
1190 b fast_exception_return
1191 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1192 2: /* User process was in a transaction */
1193 bl .save_nvgprs
1194 DISABLE_INTS
1195 addi r3,r1,STACK_FRAME_OVERHEAD
1196 bl .fp_unavailable_tm
1197 b .ret_from_except
1198 #endif
1199 .align 7
1200 .globl altivec_unavailable_common
1201 altivec_unavailable_common:
1202 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1203 #ifdef CONFIG_ALTIVEC
1204 BEGIN_FTR_SECTION
1205 beq 1f
1206 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1207 BEGIN_FTR_SECTION_NESTED(69)
1208 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1209 * transaction), go do TM stuff
1210 */
1211 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1212 bne- 2f
1213 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1214 #endif
1215 bl .load_up_altivec
1216 b fast_exception_return
1217 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1218 2: /* User process was in a transaction */
1219 bl .save_nvgprs
1220 DISABLE_INTS
1221 addi r3,r1,STACK_FRAME_OVERHEAD
1222 bl .altivec_unavailable_tm
1223 b .ret_from_except
1224 #endif
1225 1:
1226 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1227 #endif
1228 bl .save_nvgprs
1229 DISABLE_INTS
1230 addi r3,r1,STACK_FRAME_OVERHEAD
1231 bl .altivec_unavailable_exception
1232 b .ret_from_except
1233
1234 .align 7
1235 .globl vsx_unavailable_common
1236 vsx_unavailable_common:
1237 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1238 #ifdef CONFIG_VSX
1239 BEGIN_FTR_SECTION
1240 beq 1f
1241 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1242 BEGIN_FTR_SECTION_NESTED(69)
1243 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1244 * transaction), go do TM stuff
1245 */
1246 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1247 bne- 2f
1248 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1249 #endif
1250 b .load_up_vsx
1251 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1252 2: /* User process was in a transaction */
1253 bl .save_nvgprs
1254 DISABLE_INTS
1255 addi r3,r1,STACK_FRAME_OVERHEAD
1256 bl .vsx_unavailable_tm
1257 b .ret_from_except
1258 #endif
1259 1:
1260 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1261 #endif
1262 bl .save_nvgprs
1263 DISABLE_INTS
1264 addi r3,r1,STACK_FRAME_OVERHEAD
1265 bl .vsx_unavailable_exception
1266 b .ret_from_except
1267
1268 .align 7
1269 .globl tm_unavailable_common
1270 tm_unavailable_common:
1271 EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
1272 bl .save_nvgprs
1273 DISABLE_INTS
1274 addi r3,r1,STACK_FRAME_OVERHEAD
1275 bl .tm_unavailable_exception
1276 b .ret_from_except
1277
1278 .align 7
1279 .globl __end_handlers
1280 __end_handlers:
1281
1282 /* Equivalents to the above handlers for relocation-on interrupt vectors */
1283 STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
1284 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
1285 STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
1286 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
1287 STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1288 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
1289 STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
1290 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
1291 MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1292 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
1293
1294 STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1295 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1296 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1297 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
1298
1299 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1300 /*
1301 * Data area reserved for FWNMI option.
1302 * This address (0x7000) is fixed by the RPA.
1303 */
1304 .= 0x7000
1305 .globl fwnmi_data_area
1306 fwnmi_data_area:
1307
1308 /* pseries and powernv need to keep the whole page from
1309 * 0x7000 to 0x8000 free for use by the firmware
1310 */
1311 . = 0x8000
1312 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1313
1314 /* Space for CPU0's segment table */
1315 .balign 4096
1316 .globl initial_stab
1317 initial_stab:
1318 .space 4096
1319
1320 #ifdef CONFIG_PPC_POWERNV
1321 _GLOBAL(opal_mc_secondary_handler)
1322 HMT_MEDIUM_PPR_DISCARD
1323 SET_SCRATCH0(r13)
1324 GET_PACA(r13)
1325 clrldi r3,r3,2
1326 tovirt(r3,r3)
1327 std r3,PACA_OPAL_MC_EVT(r13)
1328 ld r13,OPAL_MC_SRR0(r3)
1329 mtspr SPRN_SRR0,r13
1330 ld r13,OPAL_MC_SRR1(r3)
1331 mtspr SPRN_SRR1,r13
1332 ld r3,OPAL_MC_GPR3(r3)
1333 GET_SCRATCH0(r13)
1334 b machine_check_pSeries
1335 #endif /* CONFIG_PPC_POWERNV */
1336
1337
1338 /*
1339 * Hash table stuff
1340 */
1341 .align 7
1342 _STATIC(do_hash_page)
1343 std r3,_DAR(r1)
1344 std r4,_DSISR(r1)
1345
1346 andis. r0,r4,0xa410 /* weird error? */
1347 bne- handle_page_fault /* if not, try to insert a HPTE */
1348 andis. r0,r4,DSISR_DABRMATCH@h
1349 bne- handle_dabr_fault
1350
1351 BEGIN_FTR_SECTION
1352 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1353 bne- do_ste_alloc /* If so handle it */
1354 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1355
1356 CURRENT_THREAD_INFO(r11, r1)
1357 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1358 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1359 bne 77f /* then don't call hash_page now */
1360 /*
1361 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1362 * accessing a userspace segment (even from the kernel). We assume
1363 * kernel addresses always have the high bit set.
1364 */
1365 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1366 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1367 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1368 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1369 ori r4,r4,1 /* add _PAGE_PRESENT */
1370 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1371
1372 /*
1373 * r3 contains the faulting address
1374 * r4 contains the required access permissions
1375 * r5 contains the trap number
1376 *
1377 * at return r3 = 0 for success, 1 for page fault, negative for error
1378 */
1379 bl .hash_page /* build HPTE if possible */
1380 cmpdi r3,0 /* see if hash_page succeeded */
1381
1382 /* Success */
1383 beq fast_exc_return_irq /* Return from exception on success */
1384
1385 /* Error */
1386 blt- 13f
1387
1388 /* Here we have a page fault that hash_page can't handle. */
1389 handle_page_fault:
1390 11: ld r4,_DAR(r1)
1391 ld r5,_DSISR(r1)
1392 addi r3,r1,STACK_FRAME_OVERHEAD
1393 bl .do_page_fault
1394 cmpdi r3,0
1395 beq+ 12f
1396 bl .save_nvgprs
1397 mr r5,r3
1398 addi r3,r1,STACK_FRAME_OVERHEAD
1399 lwz r4,_DAR(r1)
1400 bl .bad_page_fault
1401 b .ret_from_except
1402
1403 /* We have a data breakpoint exception - handle it */
1404 handle_dabr_fault:
1405 bl .save_nvgprs
1406 ld r4,_DAR(r1)
1407 ld r5,_DSISR(r1)
1408 addi r3,r1,STACK_FRAME_OVERHEAD
1409 bl .do_break
1410 12: b .ret_from_except_lite
1411
1412
1413 /* We have a page fault that hash_page could handle but HV refused
1414 * the PTE insertion
1415 */
1416 13: bl .save_nvgprs
1417 mr r5,r3
1418 addi r3,r1,STACK_FRAME_OVERHEAD
1419 ld r4,_DAR(r1)
1420 bl .low_hash_fault
1421 b .ret_from_except
1422
1423 /*
1424 * We come here as a result of a DSI at a point where we don't want
1425 * to call hash_page, such as when we are accessing memory (possibly
1426 * user memory) inside a PMU interrupt that occurred while interrupts
1427 * were soft-disabled. We want to invoke the exception handler for
1428 * the access, or panic if there isn't a handler.
1429 */
1430 77: bl .save_nvgprs
1431 mr r4,r3
1432 addi r3,r1,STACK_FRAME_OVERHEAD
1433 li r5,SIGSEGV
1434 bl .bad_page_fault
1435 b .ret_from_except
1436
1437 /* here we have a segment miss */
1438 do_ste_alloc:
1439 bl .ste_allocate /* try to insert stab entry */
1440 cmpdi r3,0
1441 bne- handle_page_fault
1442 b fast_exception_return
1443
1444 /*
1445 * r13 points to the PACA, r9 contains the saved CR,
1446 * r11 and r12 contain the saved SRR0 and SRR1.
1447 * r9 - r13 are saved in paca->exslb.
1448 * We assume we aren't going to take any exceptions during this procedure.
1449 * We assume (DAR >> 60) == 0xc.
1450 */
1451 .align 7
1452 _GLOBAL(do_stab_bolted)
1453 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1454 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1455
1456 /* Hash to the primary group */
1457 ld r10,PACASTABVIRT(r13)
1458 mfspr r11,SPRN_DAR
1459 srdi r11,r11,28
1460 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1461
1462 /* Calculate VSID */
1463 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1464 li r9,0x1
1465 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
1466 ASM_VSID_SCRAMBLE(r11, r9, 256M)
1467 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1468
1469 /* Search the primary group for a free entry */
1470 1: ld r11,0(r10) /* Test valid bit of the current ste */
1471 andi. r11,r11,0x80
1472 beq 2f
1473 addi r10,r10,16
1474 andi. r11,r10,0x70
1475 bne 1b
1476
1477 /* Stick for only searching the primary group for now. */
1478 /* At least for now, we use a very simple random castout scheme */
1479 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1480 mftb r11
1481 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1482 ori r11,r11,0x10
1483
1484 /* r10 currently points to an ste one past the group of interest */
1485 /* make it point to the randomly selected entry */
1486 subi r10,r10,128
1487 or r10,r10,r11 /* r10 is the entry to invalidate */
1488
1489 isync /* mark the entry invalid */
1490 ld r11,0(r10)
1491 rldicl r11,r11,56,1 /* clear the valid bit */
1492 rotldi r11,r11,8
1493 std r11,0(r10)
1494 sync
1495
1496 clrrdi r11,r11,28 /* Get the esid part of the ste */
1497 slbie r11
1498
1499 2: std r9,8(r10) /* Store the vsid part of the ste */
1500 eieio
1501
1502 mfspr r11,SPRN_DAR /* Get the new esid */
1503 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1504 ori r11,r11,0x90 /* Turn on valid and kp */
1505 std r11,0(r10) /* Put new entry back into the stab */
1506
1507 sync
1508
1509 /* All done -- return from exception. */
1510 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1511 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1512
1513 andi. r10,r12,MSR_RI
1514 beq- unrecov_slb
1515
1516 mtcrf 0x80,r9 /* restore CR */
1517
1518 mfmsr r10
1519 clrrdi r10,r10,2
1520 mtmsrd r10,1
1521
1522 mtspr SPRN_SRR0,r11
1523 mtspr SPRN_SRR1,r12
1524 ld r9,PACA_EXSLB+EX_R9(r13)
1525 ld r10,PACA_EXSLB+EX_R10(r13)
1526 ld r11,PACA_EXSLB+EX_R11(r13)
1527 ld r12,PACA_EXSLB+EX_R12(r13)
1528 ld r13,PACA_EXSLB+EX_R13(r13)
1529 rfid
1530 b . /* prevent speculative execution */