[POWERPC] Make secondary CPUs call into kdump on reset exception
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / powerpc / kernel / traps.c
1 /*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35
36 #include <asm/kdebug.h>
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/machdep.h>
42 #include <asm/rtas.h>
43 #include <asm/pmc.h>
44 #ifdef CONFIG_PPC32
45 #include <asm/reg.h>
46 #endif
47 #ifdef CONFIG_PMAC_BACKLIGHT
48 #include <asm/backlight.h>
49 #endif
50 #ifdef CONFIG_PPC64
51 #include <asm/firmware.h>
52 #include <asm/processor.h>
53 #endif
54 #include <asm/kexec.h>
55
56 #ifdef CONFIG_PPC64 /* XXX */
57 #define _IO_BASE pci_io_base
58 #ifdef CONFIG_KEXEC
59 cpumask_t cpus_in_sr = CPU_MASK_NONE;
60 #endif
61 #endif
62
63 #ifdef CONFIG_DEBUGGER
64 int (*__debugger)(struct pt_regs *regs);
65 int (*__debugger_ipi)(struct pt_regs *regs);
66 int (*__debugger_bpt)(struct pt_regs *regs);
67 int (*__debugger_sstep)(struct pt_regs *regs);
68 int (*__debugger_iabr_match)(struct pt_regs *regs);
69 int (*__debugger_dabr_match)(struct pt_regs *regs);
70 int (*__debugger_fault_handler)(struct pt_regs *regs);
71
72 EXPORT_SYMBOL(__debugger);
73 EXPORT_SYMBOL(__debugger_ipi);
74 EXPORT_SYMBOL(__debugger_bpt);
75 EXPORT_SYMBOL(__debugger_sstep);
76 EXPORT_SYMBOL(__debugger_iabr_match);
77 EXPORT_SYMBOL(__debugger_dabr_match);
78 EXPORT_SYMBOL(__debugger_fault_handler);
79 #endif
80
81 ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
82
83 int register_die_notifier(struct notifier_block *nb)
84 {
85 return atomic_notifier_chain_register(&powerpc_die_chain, nb);
86 }
87 EXPORT_SYMBOL(register_die_notifier);
88
89 int unregister_die_notifier(struct notifier_block *nb)
90 {
91 return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
92 }
93 EXPORT_SYMBOL(unregister_die_notifier);
94
95 /*
96 * Trap & Exception support
97 */
98
99 static DEFINE_SPINLOCK(die_lock);
100
101 int die(const char *str, struct pt_regs *regs, long err)
102 {
103 static int die_counter;
104
105 if (debugger(regs))
106 return 1;
107
108 console_verbose();
109 spin_lock_irq(&die_lock);
110 bust_spinlocks(1);
111 #ifdef CONFIG_PMAC_BACKLIGHT
112 mutex_lock(&pmac_backlight_mutex);
113 if (machine_is(powermac) && pmac_backlight) {
114 struct backlight_properties *props;
115
116 down(&pmac_backlight->sem);
117 props = pmac_backlight->props;
118 props->brightness = props->max_brightness;
119 props->power = FB_BLANK_UNBLANK;
120 props->update_status(pmac_backlight);
121 up(&pmac_backlight->sem);
122 }
123 mutex_unlock(&pmac_backlight_mutex);
124 #endif
125 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
126 #ifdef CONFIG_PREEMPT
127 printk("PREEMPT ");
128 #endif
129 #ifdef CONFIG_SMP
130 printk("SMP NR_CPUS=%d ", NR_CPUS);
131 #endif
132 #ifdef CONFIG_DEBUG_PAGEALLOC
133 printk("DEBUG_PAGEALLOC ");
134 #endif
135 #ifdef CONFIG_NUMA
136 printk("NUMA ");
137 #endif
138 printk("%s\n", ppc_md.name ? "" : ppc_md.name);
139
140 print_modules();
141 show_regs(regs);
142 bust_spinlocks(0);
143 spin_unlock_irq(&die_lock);
144
145 if (kexec_should_crash(current) ||
146 kexec_sr_activated(smp_processor_id()))
147 crash_kexec(regs);
148 crash_kexec_secondary(regs);
149
150 if (in_interrupt())
151 panic("Fatal exception in interrupt");
152
153 if (panic_on_oops)
154 panic("Fatal exception: panic_on_oops");
155
156 do_exit(err);
157
158 return 0;
159 }
160
161 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
162 {
163 siginfo_t info;
164
165 if (!user_mode(regs)) {
166 if (die("Exception in kernel mode", regs, signr))
167 return;
168 }
169
170 memset(&info, 0, sizeof(info));
171 info.si_signo = signr;
172 info.si_code = code;
173 info.si_addr = (void __user *) addr;
174 force_sig_info(signr, &info, current);
175
176 /*
177 * Init gets no signals that it doesn't have a handler for.
178 * That's all very well, but if it has caused a synchronous
179 * exception and we ignore the resulting signal, it will just
180 * generate the same exception over and over again and we get
181 * nowhere. Better to kill it and let the kernel panic.
182 */
183 if (current->pid == 1) {
184 __sighandler_t handler;
185
186 spin_lock_irq(&current->sighand->siglock);
187 handler = current->sighand->action[signr-1].sa.sa_handler;
188 spin_unlock_irq(&current->sighand->siglock);
189 if (handler == SIG_DFL) {
190 /* init has generated a synchronous exception
191 and it doesn't have a handler for the signal */
192 printk(KERN_CRIT "init has generated signal %d "
193 "but has no handler for it\n", signr);
194 do_exit(signr);
195 }
196 }
197 }
198
199 #ifdef CONFIG_PPC64
200 void system_reset_exception(struct pt_regs *regs)
201 {
202 /* See if any machine dependent calls */
203 if (ppc_md.system_reset_exception) {
204 if (ppc_md.system_reset_exception(regs))
205 return;
206 }
207
208 #ifdef CONFIG_KEXEC
209 cpu_set(smp_processor_id(), cpus_in_sr);
210 #endif
211
212 die("System Reset", regs, SIGABRT);
213
214 /*
215 * Some CPUs when released from the debugger will execute this path.
216 * These CPUs entered the debugger via a soft-reset. If the CPU was
217 * hung before entering the debugger it will return to the hung
218 * state when exiting this function. This causes a problem in
219 * kdump since the hung CPU(s) will not respond to the IPI sent
220 * from kdump. To prevent the problem we call crash_kexec_secondary()
221 * here. If a kdump had not been initiated or we exit the debugger
222 * with the "exit and recover" command (x) crash_kexec_secondary()
223 * will return after 5ms and the CPU returns to its previous state.
224 */
225 crash_kexec_secondary(regs);
226
227 /* Must die if the interrupt is not recoverable */
228 if (!(regs->msr & MSR_RI))
229 panic("Unrecoverable System Reset");
230
231 /* What should we do here? We could issue a shutdown or hard reset. */
232 }
233 #endif
234
235 /*
236 * I/O accesses can cause machine checks on powermacs.
237 * Check if the NIP corresponds to the address of a sync
238 * instruction for which there is an entry in the exception
239 * table.
240 * Note that the 601 only takes a machine check on TEA
241 * (transfer error ack) signal assertion, and does not
242 * set any of the top 16 bits of SRR1.
243 * -- paulus.
244 */
245 static inline int check_io_access(struct pt_regs *regs)
246 {
247 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
248 unsigned long msr = regs->msr;
249 const struct exception_table_entry *entry;
250 unsigned int *nip = (unsigned int *)regs->nip;
251
252 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
253 && (entry = search_exception_tables(regs->nip)) != NULL) {
254 /*
255 * Check that it's a sync instruction, or somewhere
256 * in the twi; isync; nop sequence that inb/inw/inl uses.
257 * As the address is in the exception table
258 * we should be able to read the instr there.
259 * For the debug message, we look at the preceding
260 * load or store.
261 */
262 if (*nip == 0x60000000) /* nop */
263 nip -= 2;
264 else if (*nip == 0x4c00012c) /* isync */
265 --nip;
266 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
267 /* sync or twi */
268 unsigned int rb;
269
270 --nip;
271 rb = (*nip >> 11) & 0x1f;
272 printk(KERN_DEBUG "%s bad port %lx at %p\n",
273 (*nip & 0x100)? "OUT to": "IN from",
274 regs->gpr[rb] - _IO_BASE, nip);
275 regs->msr |= MSR_RI;
276 regs->nip = entry->fixup;
277 return 1;
278 }
279 }
280 #endif /* CONFIG_PPC_PMAC && CONFIG_PPC32 */
281 return 0;
282 }
283
284 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
285 /* On 4xx, the reason for the machine check or program exception
286 is in the ESR. */
287 #define get_reason(regs) ((regs)->dsisr)
288 #ifndef CONFIG_FSL_BOOKE
289 #define get_mc_reason(regs) ((regs)->dsisr)
290 #else
291 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
292 #endif
293 #define REASON_FP ESR_FP
294 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
295 #define REASON_PRIVILEGED ESR_PPR
296 #define REASON_TRAP ESR_PTR
297
298 /* single-step stuff */
299 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
300 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
301
302 #else
303 /* On non-4xx, the reason for the machine check or program
304 exception is in the MSR. */
305 #define get_reason(regs) ((regs)->msr)
306 #define get_mc_reason(regs) ((regs)->msr)
307 #define REASON_FP 0x100000
308 #define REASON_ILLEGAL 0x80000
309 #define REASON_PRIVILEGED 0x40000
310 #define REASON_TRAP 0x20000
311
312 #define single_stepping(regs) ((regs)->msr & MSR_SE)
313 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
314 #endif
315
316 /*
317 * This is "fall-back" implementation for configurations
318 * which don't provide platform-specific machine check info
319 */
320 void __attribute__ ((weak))
321 platform_machine_check(struct pt_regs *regs)
322 {
323 }
324
325 void machine_check_exception(struct pt_regs *regs)
326 {
327 int recover = 0;
328 unsigned long reason = get_mc_reason(regs);
329
330 /* See if any machine dependent calls */
331 if (ppc_md.machine_check_exception)
332 recover = ppc_md.machine_check_exception(regs);
333
334 if (recover)
335 return;
336
337 if (user_mode(regs)) {
338 regs->msr |= MSR_RI;
339 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
340 return;
341 }
342
343 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
344 /* the qspan pci read routines can cause machine checks -- Cort */
345 bad_page_fault(regs, regs->dar, SIGBUS);
346 return;
347 #endif
348
349 if (debugger_fault_handler(regs)) {
350 regs->msr |= MSR_RI;
351 return;
352 }
353
354 if (check_io_access(regs))
355 return;
356
357 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
358 if (reason & ESR_IMCP) {
359 printk("Instruction");
360 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
361 } else
362 printk("Data");
363 printk(" machine check in kernel mode.\n");
364 #elif defined(CONFIG_440A)
365 printk("Machine check in kernel mode.\n");
366 if (reason & ESR_IMCP){
367 printk("Instruction Synchronous Machine Check exception\n");
368 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
369 }
370 else {
371 u32 mcsr = mfspr(SPRN_MCSR);
372 if (mcsr & MCSR_IB)
373 printk("Instruction Read PLB Error\n");
374 if (mcsr & MCSR_DRB)
375 printk("Data Read PLB Error\n");
376 if (mcsr & MCSR_DWB)
377 printk("Data Write PLB Error\n");
378 if (mcsr & MCSR_TLBP)
379 printk("TLB Parity Error\n");
380 if (mcsr & MCSR_ICP){
381 flush_instruction_cache();
382 printk("I-Cache Parity Error\n");
383 }
384 if (mcsr & MCSR_DCSP)
385 printk("D-Cache Search Parity Error\n");
386 if (mcsr & MCSR_DCFP)
387 printk("D-Cache Flush Parity Error\n");
388 if (mcsr & MCSR_IMPE)
389 printk("Machine Check exception is imprecise\n");
390
391 /* Clear MCSR */
392 mtspr(SPRN_MCSR, mcsr);
393 }
394 #elif defined (CONFIG_E500)
395 printk("Machine check in kernel mode.\n");
396 printk("Caused by (from MCSR=%lx): ", reason);
397
398 if (reason & MCSR_MCP)
399 printk("Machine Check Signal\n");
400 if (reason & MCSR_ICPERR)
401 printk("Instruction Cache Parity Error\n");
402 if (reason & MCSR_DCP_PERR)
403 printk("Data Cache Push Parity Error\n");
404 if (reason & MCSR_DCPERR)
405 printk("Data Cache Parity Error\n");
406 if (reason & MCSR_GL_CI)
407 printk("Guarded Load or Cache-Inhibited stwcx.\n");
408 if (reason & MCSR_BUS_IAERR)
409 printk("Bus - Instruction Address Error\n");
410 if (reason & MCSR_BUS_RAERR)
411 printk("Bus - Read Address Error\n");
412 if (reason & MCSR_BUS_WAERR)
413 printk("Bus - Write Address Error\n");
414 if (reason & MCSR_BUS_IBERR)
415 printk("Bus - Instruction Data Error\n");
416 if (reason & MCSR_BUS_RBERR)
417 printk("Bus - Read Data Bus Error\n");
418 if (reason & MCSR_BUS_WBERR)
419 printk("Bus - Read Data Bus Error\n");
420 if (reason & MCSR_BUS_IPERR)
421 printk("Bus - Instruction Parity Error\n");
422 if (reason & MCSR_BUS_RPERR)
423 printk("Bus - Read Parity Error\n");
424 #elif defined (CONFIG_E200)
425 printk("Machine check in kernel mode.\n");
426 printk("Caused by (from MCSR=%lx): ", reason);
427
428 if (reason & MCSR_MCP)
429 printk("Machine Check Signal\n");
430 if (reason & MCSR_CP_PERR)
431 printk("Cache Push Parity Error\n");
432 if (reason & MCSR_CPERR)
433 printk("Cache Parity Error\n");
434 if (reason & MCSR_EXCP_ERR)
435 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
436 if (reason & MCSR_BUS_IRERR)
437 printk("Bus - Read Bus Error on instruction fetch\n");
438 if (reason & MCSR_BUS_DRERR)
439 printk("Bus - Read Bus Error on data load\n");
440 if (reason & MCSR_BUS_WRERR)
441 printk("Bus - Write Bus Error on buffered store or cache line push\n");
442 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
443 printk("Machine check in kernel mode.\n");
444 printk("Caused by (from SRR1=%lx): ", reason);
445 switch (reason & 0x601F0000) {
446 case 0x80000:
447 printk("Machine check signal\n");
448 break;
449 case 0: /* for 601 */
450 case 0x40000:
451 case 0x140000: /* 7450 MSS error and TEA */
452 printk("Transfer error ack signal\n");
453 break;
454 case 0x20000:
455 printk("Data parity error signal\n");
456 break;
457 case 0x10000:
458 printk("Address parity error signal\n");
459 break;
460 case 0x20000000:
461 printk("L1 Data Cache error\n");
462 break;
463 case 0x40000000:
464 printk("L1 Instruction Cache error\n");
465 break;
466 case 0x00100000:
467 printk("L2 data cache parity error\n");
468 break;
469 default:
470 printk("Unknown values in msr\n");
471 }
472 #endif /* CONFIG_4xx */
473
474 /*
475 * Optional platform-provided routine to print out
476 * additional info, e.g. bus error registers.
477 */
478 platform_machine_check(regs);
479
480 if (debugger_fault_handler(regs))
481 return;
482 die("Machine check", regs, SIGBUS);
483
484 /* Must die if the interrupt is not recoverable */
485 if (!(regs->msr & MSR_RI))
486 panic("Unrecoverable Machine check");
487 }
488
489 void SMIException(struct pt_regs *regs)
490 {
491 die("System Management Interrupt", regs, SIGABRT);
492 }
493
494 void unknown_exception(struct pt_regs *regs)
495 {
496 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
497 regs->nip, regs->msr, regs->trap);
498
499 _exception(SIGTRAP, regs, 0, 0);
500 }
501
502 void instruction_breakpoint_exception(struct pt_regs *regs)
503 {
504 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
505 5, SIGTRAP) == NOTIFY_STOP)
506 return;
507 if (debugger_iabr_match(regs))
508 return;
509 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
510 }
511
512 void RunModeException(struct pt_regs *regs)
513 {
514 _exception(SIGTRAP, regs, 0, 0);
515 }
516
517 void __kprobes single_step_exception(struct pt_regs *regs)
518 {
519 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
520
521 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
522 5, SIGTRAP) == NOTIFY_STOP)
523 return;
524 if (debugger_sstep(regs))
525 return;
526
527 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
528 }
529
530 /*
531 * After we have successfully emulated an instruction, we have to
532 * check if the instruction was being single-stepped, and if so,
533 * pretend we got a single-step exception. This was pointed out
534 * by Kumar Gala. -- paulus
535 */
536 static void emulate_single_step(struct pt_regs *regs)
537 {
538 if (single_stepping(regs)) {
539 clear_single_step(regs);
540 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
541 }
542 }
543
544 static void parse_fpe(struct pt_regs *regs)
545 {
546 int code = 0;
547 unsigned long fpscr;
548
549 flush_fp_to_thread(current);
550
551 fpscr = current->thread.fpscr.val;
552
553 /* Invalid operation */
554 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
555 code = FPE_FLTINV;
556
557 /* Overflow */
558 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
559 code = FPE_FLTOVF;
560
561 /* Underflow */
562 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
563 code = FPE_FLTUND;
564
565 /* Divide by zero */
566 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
567 code = FPE_FLTDIV;
568
569 /* Inexact result */
570 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
571 code = FPE_FLTRES;
572
573 _exception(SIGFPE, regs, code, regs->nip);
574 }
575
576 /*
577 * Illegal instruction emulation support. Originally written to
578 * provide the PVR to user applications using the mfspr rd, PVR.
579 * Return non-zero if we can't emulate, or -EFAULT if the associated
580 * memory access caused an access fault. Return zero on success.
581 *
582 * There are a couple of ways to do this, either "decode" the instruction
583 * or directly match lots of bits. In this case, matching lots of
584 * bits is faster and easier.
585 *
586 */
587 #define INST_MFSPR_PVR 0x7c1f42a6
588 #define INST_MFSPR_PVR_MASK 0xfc1fffff
589
590 #define INST_DCBA 0x7c0005ec
591 #define INST_DCBA_MASK 0x7c0007fe
592
593 #define INST_MCRXR 0x7c000400
594 #define INST_MCRXR_MASK 0x7c0007fe
595
596 #define INST_STRING 0x7c00042a
597 #define INST_STRING_MASK 0x7c0007fe
598 #define INST_STRING_GEN_MASK 0x7c00067e
599 #define INST_LSWI 0x7c0004aa
600 #define INST_LSWX 0x7c00042a
601 #define INST_STSWI 0x7c0005aa
602 #define INST_STSWX 0x7c00052a
603
604 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
605 {
606 u8 rT = (instword >> 21) & 0x1f;
607 u8 rA = (instword >> 16) & 0x1f;
608 u8 NB_RB = (instword >> 11) & 0x1f;
609 u32 num_bytes;
610 unsigned long EA;
611 int pos = 0;
612
613 /* Early out if we are an invalid form of lswx */
614 if ((instword & INST_STRING_MASK) == INST_LSWX)
615 if ((rT == rA) || (rT == NB_RB))
616 return -EINVAL;
617
618 EA = (rA == 0) ? 0 : regs->gpr[rA];
619
620 switch (instword & INST_STRING_MASK) {
621 case INST_LSWX:
622 case INST_STSWX:
623 EA += NB_RB;
624 num_bytes = regs->xer & 0x7f;
625 break;
626 case INST_LSWI:
627 case INST_STSWI:
628 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
629 break;
630 default:
631 return -EINVAL;
632 }
633
634 while (num_bytes != 0)
635 {
636 u8 val;
637 u32 shift = 8 * (3 - (pos & 0x3));
638
639 switch ((instword & INST_STRING_MASK)) {
640 case INST_LSWX:
641 case INST_LSWI:
642 if (get_user(val, (u8 __user *)EA))
643 return -EFAULT;
644 /* first time updating this reg,
645 * zero it out */
646 if (pos == 0)
647 regs->gpr[rT] = 0;
648 regs->gpr[rT] |= val << shift;
649 break;
650 case INST_STSWI:
651 case INST_STSWX:
652 val = regs->gpr[rT] >> shift;
653 if (put_user(val, (u8 __user *)EA))
654 return -EFAULT;
655 break;
656 }
657 /* move EA to next address */
658 EA += 1;
659 num_bytes--;
660
661 /* manage our position within the register */
662 if (++pos == 4) {
663 pos = 0;
664 if (++rT == 32)
665 rT = 0;
666 }
667 }
668
669 return 0;
670 }
671
672 static int emulate_instruction(struct pt_regs *regs)
673 {
674 u32 instword;
675 u32 rd;
676
677 if (!user_mode(regs) || (regs->msr & MSR_LE))
678 return -EINVAL;
679 CHECK_FULL_REGS(regs);
680
681 if (get_user(instword, (u32 __user *)(regs->nip)))
682 return -EFAULT;
683
684 /* Emulate the mfspr rD, PVR. */
685 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
686 rd = (instword >> 21) & 0x1f;
687 regs->gpr[rd] = mfspr(SPRN_PVR);
688 return 0;
689 }
690
691 /* Emulating the dcba insn is just a no-op. */
692 if ((instword & INST_DCBA_MASK) == INST_DCBA)
693 return 0;
694
695 /* Emulate the mcrxr insn. */
696 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
697 int shift = (instword >> 21) & 0x1c;
698 unsigned long msk = 0xf0000000UL >> shift;
699
700 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
701 regs->xer &= ~0xf0000000UL;
702 return 0;
703 }
704
705 /* Emulate load/store string insn. */
706 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
707 return emulate_string_inst(regs, instword);
708
709 return -EINVAL;
710 }
711
712 /*
713 * Look through the list of trap instructions that are used for BUG(),
714 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
715 * that the exception was caused by a trap instruction of some kind.
716 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
717 * otherwise.
718 */
719 extern struct bug_entry __start___bug_table[], __stop___bug_table[];
720
721 #ifndef CONFIG_MODULES
722 #define module_find_bug(x) NULL
723 #endif
724
725 struct bug_entry *find_bug(unsigned long bugaddr)
726 {
727 struct bug_entry *bug;
728
729 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
730 if (bugaddr == bug->bug_addr)
731 return bug;
732 return module_find_bug(bugaddr);
733 }
734
735 static int check_bug_trap(struct pt_regs *regs)
736 {
737 struct bug_entry *bug;
738 unsigned long addr;
739
740 if (regs->msr & MSR_PR)
741 return 0; /* not in kernel */
742 addr = regs->nip; /* address of trap instruction */
743 if (addr < PAGE_OFFSET)
744 return 0;
745 bug = find_bug(regs->nip);
746 if (bug == NULL)
747 return 0;
748 if (bug->line & BUG_WARNING_TRAP) {
749 /* this is a WARN_ON rather than BUG/BUG_ON */
750 printk(KERN_ERR "Badness in %s at %s:%ld\n",
751 bug->function, bug->file,
752 bug->line & ~BUG_WARNING_TRAP);
753 dump_stack();
754 return 1;
755 }
756 printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
757 bug->function, bug->file, bug->line);
758
759 return 0;
760 }
761
762 void __kprobes program_check_exception(struct pt_regs *regs)
763 {
764 unsigned int reason = get_reason(regs);
765 extern int do_mathemu(struct pt_regs *regs);
766
767 #ifdef CONFIG_MATH_EMULATION
768 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
769 * but there seems to be a hardware bug on the 405GP (RevD)
770 * that means ESR is sometimes set incorrectly - either to
771 * ESR_DST (!?) or 0. In the process of chasing this with the
772 * hardware people - not sure if it can happen on any illegal
773 * instruction or only on FP instructions, whether there is a
774 * pattern to occurences etc. -dgibson 31/Mar/2003 */
775 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
776 emulate_single_step(regs);
777 return;
778 }
779 #endif /* CONFIG_MATH_EMULATION */
780
781 if (reason & REASON_FP) {
782 /* IEEE FP exception */
783 parse_fpe(regs);
784 return;
785 }
786 if (reason & REASON_TRAP) {
787 /* trap exception */
788 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
789 == NOTIFY_STOP)
790 return;
791 if (debugger_bpt(regs))
792 return;
793 if (check_bug_trap(regs)) {
794 regs->nip += 4;
795 return;
796 }
797 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
798 return;
799 }
800
801 local_irq_enable();
802
803 /* Try to emulate it if we should. */
804 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
805 switch (emulate_instruction(regs)) {
806 case 0:
807 regs->nip += 4;
808 emulate_single_step(regs);
809 return;
810 case -EFAULT:
811 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
812 return;
813 }
814 }
815
816 if (reason & REASON_PRIVILEGED)
817 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
818 else
819 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
820 }
821
822 void alignment_exception(struct pt_regs *regs)
823 {
824 int fixed = 0;
825
826 /* we don't implement logging of alignment exceptions */
827 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
828 fixed = fix_alignment(regs);
829
830 if (fixed == 1) {
831 regs->nip += 4; /* skip over emulated instruction */
832 emulate_single_step(regs);
833 return;
834 }
835
836 /* Operand address was bad */
837 if (fixed == -EFAULT) {
838 if (user_mode(regs))
839 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
840 else
841 /* Search exception table */
842 bad_page_fault(regs, regs->dar, SIGSEGV);
843 return;
844 }
845 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
846 }
847
848 void StackOverflow(struct pt_regs *regs)
849 {
850 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
851 current, regs->gpr[1]);
852 debugger(regs);
853 show_regs(regs);
854 panic("kernel stack overflow");
855 }
856
857 void nonrecoverable_exception(struct pt_regs *regs)
858 {
859 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
860 regs->nip, regs->msr);
861 debugger(regs);
862 die("nonrecoverable exception", regs, SIGKILL);
863 }
864
865 void trace_syscall(struct pt_regs *regs)
866 {
867 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
868 current, current->pid, regs->nip, regs->link, regs->gpr[0],
869 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
870 }
871
872 void kernel_fp_unavailable_exception(struct pt_regs *regs)
873 {
874 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
875 "%lx at %lx\n", regs->trap, regs->nip);
876 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
877 }
878
879 void altivec_unavailable_exception(struct pt_regs *regs)
880 {
881 #if !defined(CONFIG_ALTIVEC)
882 if (user_mode(regs)) {
883 /* A user program has executed an altivec instruction,
884 but this kernel doesn't support altivec. */
885 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
886 return;
887 }
888 #endif
889 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
890 "%lx at %lx\n", regs->trap, regs->nip);
891 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
892 }
893
894 void performance_monitor_exception(struct pt_regs *regs)
895 {
896 perf_irq(regs);
897 }
898
899 #ifdef CONFIG_8xx
900 void SoftwareEmulation(struct pt_regs *regs)
901 {
902 extern int do_mathemu(struct pt_regs *);
903 extern int Soft_emulate_8xx(struct pt_regs *);
904 int errcode;
905
906 CHECK_FULL_REGS(regs);
907
908 if (!user_mode(regs)) {
909 debugger(regs);
910 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
911 }
912
913 #ifdef CONFIG_MATH_EMULATION
914 errcode = do_mathemu(regs);
915 #else
916 errcode = Soft_emulate_8xx(regs);
917 #endif
918 if (errcode) {
919 if (errcode > 0)
920 _exception(SIGFPE, regs, 0, 0);
921 else if (errcode == -EFAULT)
922 _exception(SIGSEGV, regs, 0, 0);
923 else
924 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
925 } else
926 emulate_single_step(regs);
927 }
928 #endif /* CONFIG_8xx */
929
930 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
931
932 void DebugException(struct pt_regs *regs, unsigned long debug_status)
933 {
934 if (debug_status & DBSR_IC) { /* instruction completion */
935 regs->msr &= ~MSR_DE;
936 if (user_mode(regs)) {
937 current->thread.dbcr0 &= ~DBCR0_IC;
938 } else {
939 /* Disable instruction completion */
940 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
941 /* Clear the instruction completion event */
942 mtspr(SPRN_DBSR, DBSR_IC);
943 if (debugger_sstep(regs))
944 return;
945 }
946 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
947 }
948 }
949 #endif /* CONFIG_4xx || CONFIG_BOOKE */
950
951 #if !defined(CONFIG_TAU_INT)
952 void TAUException(struct pt_regs *regs)
953 {
954 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
955 regs->nip, regs->msr, regs->trap, print_tainted());
956 }
957 #endif /* CONFIG_INT_TAU */
958
959 #ifdef CONFIG_ALTIVEC
960 void altivec_assist_exception(struct pt_regs *regs)
961 {
962 int err;
963
964 if (!user_mode(regs)) {
965 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
966 " at %lx\n", regs->nip);
967 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
968 }
969
970 flush_altivec_to_thread(current);
971
972 err = emulate_altivec(regs);
973 if (err == 0) {
974 regs->nip += 4; /* skip emulated instruction */
975 emulate_single_step(regs);
976 return;
977 }
978
979 if (err == -EFAULT) {
980 /* got an error reading the instruction */
981 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
982 } else {
983 /* didn't recognize the instruction */
984 /* XXX quick hack for now: set the non-Java bit in the VSCR */
985 if (printk_ratelimit())
986 printk(KERN_ERR "Unrecognized altivec instruction "
987 "in %s at %lx\n", current->comm, regs->nip);
988 current->thread.vscr.u[3] |= 0x10000;
989 }
990 }
991 #endif /* CONFIG_ALTIVEC */
992
993 #ifdef CONFIG_FSL_BOOKE
994 void CacheLockingException(struct pt_regs *regs, unsigned long address,
995 unsigned long error_code)
996 {
997 /* We treat cache locking instructions from the user
998 * as priv ops, in the future we could try to do
999 * something smarter
1000 */
1001 if (error_code & (ESR_DLK|ESR_ILK))
1002 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1003 return;
1004 }
1005 #endif /* CONFIG_FSL_BOOKE */
1006
1007 #ifdef CONFIG_SPE
1008 void SPEFloatingPointException(struct pt_regs *regs)
1009 {
1010 unsigned long spefscr;
1011 int fpexc_mode;
1012 int code = 0;
1013
1014 spefscr = current->thread.spefscr;
1015 fpexc_mode = current->thread.fpexc_mode;
1016
1017 /* Hardware does not neccessarily set sticky
1018 * underflow/overflow/invalid flags */
1019 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1020 code = FPE_FLTOVF;
1021 spefscr |= SPEFSCR_FOVFS;
1022 }
1023 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1024 code = FPE_FLTUND;
1025 spefscr |= SPEFSCR_FUNFS;
1026 }
1027 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1028 code = FPE_FLTDIV;
1029 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1030 code = FPE_FLTINV;
1031 spefscr |= SPEFSCR_FINVS;
1032 }
1033 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1034 code = FPE_FLTRES;
1035
1036 current->thread.spefscr = spefscr;
1037
1038 _exception(SIGFPE, regs, code, regs->nip);
1039 return;
1040 }
1041 #endif
1042
1043 /*
1044 * We enter here if we get an unrecoverable exception, that is, one
1045 * that happened at a point where the RI (recoverable interrupt) bit
1046 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1047 * we therefore lost state by taking this exception.
1048 */
1049 void unrecoverable_exception(struct pt_regs *regs)
1050 {
1051 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1052 regs->trap, regs->nip);
1053 die("Unrecoverable exception", regs, SIGABRT);
1054 }
1055
1056 #ifdef CONFIG_BOOKE_WDT
1057 /*
1058 * Default handler for a Watchdog exception,
1059 * spins until a reboot occurs
1060 */
1061 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1062 {
1063 /* Generic WatchdogHandler, implement your own */
1064 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1065 return;
1066 }
1067
1068 void WatchdogException(struct pt_regs *regs)
1069 {
1070 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1071 WatchdogHandler(regs);
1072 }
1073 #endif
1074
1075 /*
1076 * We enter here if we discover during exception entry that we are
1077 * running in supervisor mode with a userspace value in the stack pointer.
1078 */
1079 void kernel_bad_stack(struct pt_regs *regs)
1080 {
1081 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1082 regs->gpr[1], regs->nip);
1083 die("Bad kernel stack pointer", regs, SIGABRT);
1084 }
1085
1086 void __init trap_init(void)
1087 {
1088 }