2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/prctl.h>
30 #include <linux/delay.h>
31 #include <linux/kprobes.h>
32 #include <linux/kexec.h>
33 #include <linux/backlight.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
36 #include <linux/debugfs.h>
37 #include <linux/ratelimit.h>
38 #include <linux/context_tracking.h>
40 #include <asm/emulated_ops.h>
41 #include <asm/pgtable.h>
42 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
50 #ifdef CONFIG_PMAC_BACKLIGHT
51 #include <asm/backlight.h>
54 #include <asm/firmware.h>
55 #include <asm/processor.h>
58 #include <asm/kexec.h>
59 #include <asm/ppc-opcode.h>
61 #include <asm/fadump.h>
62 #include <asm/switch_to.h>
64 #include <asm/debug.h>
66 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
67 int (*__debugger
)(struct pt_regs
*regs
) __read_mostly
;
68 int (*__debugger_ipi
)(struct pt_regs
*regs
) __read_mostly
;
69 int (*__debugger_bpt
)(struct pt_regs
*regs
) __read_mostly
;
70 int (*__debugger_sstep
)(struct pt_regs
*regs
) __read_mostly
;
71 int (*__debugger_iabr_match
)(struct pt_regs
*regs
) __read_mostly
;
72 int (*__debugger_break_match
)(struct pt_regs
*regs
) __read_mostly
;
73 int (*__debugger_fault_handler
)(struct pt_regs
*regs
) __read_mostly
;
75 EXPORT_SYMBOL(__debugger
);
76 EXPORT_SYMBOL(__debugger_ipi
);
77 EXPORT_SYMBOL(__debugger_bpt
);
78 EXPORT_SYMBOL(__debugger_sstep
);
79 EXPORT_SYMBOL(__debugger_iabr_match
);
80 EXPORT_SYMBOL(__debugger_break_match
);
81 EXPORT_SYMBOL(__debugger_fault_handler
);
84 /* Transactional Memory trap debug */
86 #define TM_DEBUG(x...) printk(KERN_INFO x)
88 #define TM_DEBUG(x...) do { } while(0)
92 * Trap & Exception support
95 #ifdef CONFIG_PMAC_BACKLIGHT
96 static void pmac_backlight_unblank(void)
98 mutex_lock(&pmac_backlight_mutex
);
100 struct backlight_properties
*props
;
102 props
= &pmac_backlight
->props
;
103 props
->brightness
= props
->max_brightness
;
104 props
->power
= FB_BLANK_UNBLANK
;
105 backlight_update_status(pmac_backlight
);
107 mutex_unlock(&pmac_backlight_mutex
);
110 static inline void pmac_backlight_unblank(void) { }
113 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
114 static int die_owner
= -1;
115 static unsigned int die_nest_count
;
116 static int die_counter
;
118 static unsigned __kprobes
long oops_begin(struct pt_regs
*regs
)
128 /* racy, but better than risking deadlock. */
129 raw_local_irq_save(flags
);
130 cpu
= smp_processor_id();
131 if (!arch_spin_trylock(&die_lock
)) {
132 if (cpu
== die_owner
)
133 /* nested oops. should stop eventually */;
135 arch_spin_lock(&die_lock
);
141 if (machine_is(powermac
))
142 pmac_backlight_unblank();
146 static void __kprobes
oops_end(unsigned long flags
, struct pt_regs
*regs
,
151 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
156 /* Nest count reaches zero, release the lock. */
157 arch_spin_unlock(&die_lock
);
158 raw_local_irq_restore(flags
);
160 crash_fadump(regs
, "die oops");
163 * A system reset (0x100) is a request to dump, so we always send
164 * it through the crashdump code.
166 if (kexec_should_crash(current
) || (TRAP(regs
) == 0x100)) {
170 * We aren't the primary crash CPU. We need to send it
171 * to a holding pattern to avoid it ending up in the panic
174 crash_kexec_secondary(regs
);
181 * While our oops output is serialised by a spinlock, output
182 * from panic() called below can race and corrupt it. If we
183 * know we are going to panic, delay for 1 second so we have a
184 * chance to get clean backtraces from all CPUs that are oopsing.
186 if (in_interrupt() || panic_on_oops
|| !current
->pid
||
187 is_global_init(current
)) {
188 mdelay(MSEC_PER_SEC
);
192 panic("Fatal exception in interrupt");
194 panic("Fatal exception");
198 static int __kprobes
__die(const char *str
, struct pt_regs
*regs
, long err
)
200 printk("Oops: %s, sig: %ld [#%d]\n", str
, err
, ++die_counter
);
201 #ifdef CONFIG_PREEMPT
205 printk("SMP NR_CPUS=%d ", NR_CPUS
);
207 #ifdef CONFIG_DEBUG_PAGEALLOC
208 printk("DEBUG_PAGEALLOC ");
213 printk("%s\n", ppc_md
.name
? ppc_md
.name
: "");
215 if (notify_die(DIE_OOPS
, str
, regs
, err
, 255, SIGSEGV
) == NOTIFY_STOP
)
224 void die(const char *str
, struct pt_regs
*regs
, long err
)
226 unsigned long flags
= oops_begin(regs
);
228 if (__die(str
, regs
, err
))
230 oops_end(flags
, regs
, err
);
233 void user_single_step_siginfo(struct task_struct
*tsk
,
234 struct pt_regs
*regs
, siginfo_t
*info
)
236 memset(info
, 0, sizeof(*info
));
237 info
->si_signo
= SIGTRAP
;
238 info
->si_code
= TRAP_TRACE
;
239 info
->si_addr
= (void __user
*)regs
->nip
;
242 void _exception(int signr
, struct pt_regs
*regs
, int code
, unsigned long addr
)
245 const char fmt32
[] = KERN_INFO
"%s[%d]: unhandled signal %d " \
246 "at %08lx nip %08lx lr %08lx code %x\n";
247 const char fmt64
[] = KERN_INFO
"%s[%d]: unhandled signal %d " \
248 "at %016lx nip %016lx lr %016lx code %x\n";
250 if (!user_mode(regs
)) {
251 die("Exception in kernel mode", regs
, signr
);
255 if (show_unhandled_signals
&& unhandled_signal(current
, signr
)) {
256 printk_ratelimited(regs
->msr
& MSR_64BIT
? fmt64
: fmt32
,
257 current
->comm
, current
->pid
, signr
,
258 addr
, regs
->nip
, regs
->link
, code
);
261 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs
))
264 current
->thread
.trap_nr
= code
;
265 memset(&info
, 0, sizeof(info
));
266 info
.si_signo
= signr
;
268 info
.si_addr
= (void __user
*) addr
;
269 force_sig_info(signr
, &info
, current
);
273 void system_reset_exception(struct pt_regs
*regs
)
275 /* See if any machine dependent calls */
276 if (ppc_md
.system_reset_exception
) {
277 if (ppc_md
.system_reset_exception(regs
))
281 die("System Reset", regs
, SIGABRT
);
283 /* Must die if the interrupt is not recoverable */
284 if (!(regs
->msr
& MSR_RI
))
285 panic("Unrecoverable System Reset");
287 /* What should we do here? We could issue a shutdown or hard reset. */
292 * I/O accesses can cause machine checks on powermacs.
293 * Check if the NIP corresponds to the address of a sync
294 * instruction for which there is an entry in the exception
296 * Note that the 601 only takes a machine check on TEA
297 * (transfer error ack) signal assertion, and does not
298 * set any of the top 16 bits of SRR1.
301 static inline int check_io_access(struct pt_regs
*regs
)
304 unsigned long msr
= regs
->msr
;
305 const struct exception_table_entry
*entry
;
306 unsigned int *nip
= (unsigned int *)regs
->nip
;
308 if (((msr
& 0xffff0000) == 0 || (msr
& (0x80000 | 0x40000)))
309 && (entry
= search_exception_tables(regs
->nip
)) != NULL
) {
311 * Check that it's a sync instruction, or somewhere
312 * in the twi; isync; nop sequence that inb/inw/inl uses.
313 * As the address is in the exception table
314 * we should be able to read the instr there.
315 * For the debug message, we look at the preceding
318 if (*nip
== 0x60000000) /* nop */
320 else if (*nip
== 0x4c00012c) /* isync */
322 if (*nip
== 0x7c0004ac || (*nip
>> 26) == 3) {
327 rb
= (*nip
>> 11) & 0x1f;
328 printk(KERN_DEBUG
"%s bad port %lx at %p\n",
329 (*nip
& 0x100)? "OUT to": "IN from",
330 regs
->gpr
[rb
] - _IO_BASE
, nip
);
332 regs
->nip
= entry
->fixup
;
336 #endif /* CONFIG_PPC32 */
340 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
341 /* On 4xx, the reason for the machine check or program exception
343 #define get_reason(regs) ((regs)->dsisr)
344 #ifndef CONFIG_FSL_BOOKE
345 #define get_mc_reason(regs) ((regs)->dsisr)
347 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
349 #define REASON_FP ESR_FP
350 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
351 #define REASON_PRIVILEGED ESR_PPR
352 #define REASON_TRAP ESR_PTR
354 /* single-step stuff */
355 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
356 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
359 /* On non-4xx, the reason for the machine check or program
360 exception is in the MSR. */
361 #define get_reason(regs) ((regs)->msr)
362 #define get_mc_reason(regs) ((regs)->msr)
363 #define REASON_TM 0x200000
364 #define REASON_FP 0x100000
365 #define REASON_ILLEGAL 0x80000
366 #define REASON_PRIVILEGED 0x40000
367 #define REASON_TRAP 0x20000
369 #define single_stepping(regs) ((regs)->msr & MSR_SE)
370 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
373 #if defined(CONFIG_4xx)
374 int machine_check_4xx(struct pt_regs
*regs
)
376 unsigned long reason
= get_mc_reason(regs
);
378 if (reason
& ESR_IMCP
) {
379 printk("Instruction");
380 mtspr(SPRN_ESR
, reason
& ~ESR_IMCP
);
383 printk(" machine check in kernel mode.\n");
388 int machine_check_440A(struct pt_regs
*regs
)
390 unsigned long reason
= get_mc_reason(regs
);
392 printk("Machine check in kernel mode.\n");
393 if (reason
& ESR_IMCP
){
394 printk("Instruction Synchronous Machine Check exception\n");
395 mtspr(SPRN_ESR
, reason
& ~ESR_IMCP
);
398 u32 mcsr
= mfspr(SPRN_MCSR
);
400 printk("Instruction Read PLB Error\n");
402 printk("Data Read PLB Error\n");
404 printk("Data Write PLB Error\n");
405 if (mcsr
& MCSR_TLBP
)
406 printk("TLB Parity Error\n");
407 if (mcsr
& MCSR_ICP
){
408 flush_instruction_cache();
409 printk("I-Cache Parity Error\n");
411 if (mcsr
& MCSR_DCSP
)
412 printk("D-Cache Search Parity Error\n");
413 if (mcsr
& MCSR_DCFP
)
414 printk("D-Cache Flush Parity Error\n");
415 if (mcsr
& MCSR_IMPE
)
416 printk("Machine Check exception is imprecise\n");
419 mtspr(SPRN_MCSR
, mcsr
);
424 int machine_check_47x(struct pt_regs
*regs
)
426 unsigned long reason
= get_mc_reason(regs
);
429 printk(KERN_ERR
"Machine check in kernel mode.\n");
430 if (reason
& ESR_IMCP
) {
432 "Instruction Synchronous Machine Check exception\n");
433 mtspr(SPRN_ESR
, reason
& ~ESR_IMCP
);
436 mcsr
= mfspr(SPRN_MCSR
);
438 printk(KERN_ERR
"Instruction Read PLB Error\n");
440 printk(KERN_ERR
"Data Read PLB Error\n");
442 printk(KERN_ERR
"Data Write PLB Error\n");
443 if (mcsr
& MCSR_TLBP
)
444 printk(KERN_ERR
"TLB Parity Error\n");
445 if (mcsr
& MCSR_ICP
) {
446 flush_instruction_cache();
447 printk(KERN_ERR
"I-Cache Parity Error\n");
449 if (mcsr
& MCSR_DCSP
)
450 printk(KERN_ERR
"D-Cache Search Parity Error\n");
451 if (mcsr
& PPC47x_MCSR_GPR
)
452 printk(KERN_ERR
"GPR Parity Error\n");
453 if (mcsr
& PPC47x_MCSR_FPR
)
454 printk(KERN_ERR
"FPR Parity Error\n");
455 if (mcsr
& PPC47x_MCSR_IPR
)
456 printk(KERN_ERR
"Machine Check exception is imprecise\n");
459 mtspr(SPRN_MCSR
, mcsr
);
463 #elif defined(CONFIG_E500)
464 int machine_check_e500mc(struct pt_regs
*regs
)
466 unsigned long mcsr
= mfspr(SPRN_MCSR
);
467 unsigned long reason
= mcsr
;
470 if (reason
& MCSR_LD
) {
471 recoverable
= fsl_rio_mcheck_exception(regs
);
472 if (recoverable
== 1)
476 printk("Machine check in kernel mode.\n");
477 printk("Caused by (from MCSR=%lx): ", reason
);
479 if (reason
& MCSR_MCP
)
480 printk("Machine Check Signal\n");
482 if (reason
& MCSR_ICPERR
) {
483 printk("Instruction Cache Parity Error\n");
486 * This is recoverable by invalidating the i-cache.
488 mtspr(SPRN_L1CSR1
, mfspr(SPRN_L1CSR1
) | L1CSR1_ICFI
);
489 while (mfspr(SPRN_L1CSR1
) & L1CSR1_ICFI
)
493 * This will generally be accompanied by an instruction
494 * fetch error report -- only treat MCSR_IF as fatal
495 * if it wasn't due to an L1 parity error.
500 if (reason
& MCSR_DCPERR_MC
) {
501 printk("Data Cache Parity Error\n");
504 * In write shadow mode we auto-recover from the error, but it
505 * may still get logged and cause a machine check. We should
506 * only treat the non-write shadow case as non-recoverable.
508 if (!(mfspr(SPRN_L1CSR2
) & L1CSR2_DCWS
))
512 if (reason
& MCSR_L2MMU_MHIT
) {
513 printk("Hit on multiple TLB entries\n");
517 if (reason
& MCSR_NMI
)
518 printk("Non-maskable interrupt\n");
520 if (reason
& MCSR_IF
) {
521 printk("Instruction Fetch Error Report\n");
525 if (reason
& MCSR_LD
) {
526 printk("Load Error Report\n");
530 if (reason
& MCSR_ST
) {
531 printk("Store Error Report\n");
535 if (reason
& MCSR_LDG
) {
536 printk("Guarded Load Error Report\n");
540 if (reason
& MCSR_TLBSYNC
)
541 printk("Simultaneous tlbsync operations\n");
543 if (reason
& MCSR_BSL2_ERR
) {
544 printk("Level 2 Cache Error\n");
548 if (reason
& MCSR_MAV
) {
551 addr
= mfspr(SPRN_MCAR
);
552 addr
|= (u64
)mfspr(SPRN_MCARU
) << 32;
554 printk("Machine Check %s Address: %#llx\n",
555 reason
& MCSR_MEA
? "Effective" : "Physical", addr
);
559 mtspr(SPRN_MCSR
, mcsr
);
560 return mfspr(SPRN_MCSR
) == 0 && recoverable
;
563 int machine_check_e500(struct pt_regs
*regs
)
565 unsigned long reason
= get_mc_reason(regs
);
567 if (reason
& MCSR_BUS_RBERR
) {
568 if (fsl_rio_mcheck_exception(regs
))
572 printk("Machine check in kernel mode.\n");
573 printk("Caused by (from MCSR=%lx): ", reason
);
575 if (reason
& MCSR_MCP
)
576 printk("Machine Check Signal\n");
577 if (reason
& MCSR_ICPERR
)
578 printk("Instruction Cache Parity Error\n");
579 if (reason
& MCSR_DCP_PERR
)
580 printk("Data Cache Push Parity Error\n");
581 if (reason
& MCSR_DCPERR
)
582 printk("Data Cache Parity Error\n");
583 if (reason
& MCSR_BUS_IAERR
)
584 printk("Bus - Instruction Address Error\n");
585 if (reason
& MCSR_BUS_RAERR
)
586 printk("Bus - Read Address Error\n");
587 if (reason
& MCSR_BUS_WAERR
)
588 printk("Bus - Write Address Error\n");
589 if (reason
& MCSR_BUS_IBERR
)
590 printk("Bus - Instruction Data Error\n");
591 if (reason
& MCSR_BUS_RBERR
)
592 printk("Bus - Read Data Bus Error\n");
593 if (reason
& MCSR_BUS_WBERR
)
594 printk("Bus - Read Data Bus Error\n");
595 if (reason
& MCSR_BUS_IPERR
)
596 printk("Bus - Instruction Parity Error\n");
597 if (reason
& MCSR_BUS_RPERR
)
598 printk("Bus - Read Parity Error\n");
603 int machine_check_generic(struct pt_regs
*regs
)
607 #elif defined(CONFIG_E200)
608 int machine_check_e200(struct pt_regs
*regs
)
610 unsigned long reason
= get_mc_reason(regs
);
612 printk("Machine check in kernel mode.\n");
613 printk("Caused by (from MCSR=%lx): ", reason
);
615 if (reason
& MCSR_MCP
)
616 printk("Machine Check Signal\n");
617 if (reason
& MCSR_CP_PERR
)
618 printk("Cache Push Parity Error\n");
619 if (reason
& MCSR_CPERR
)
620 printk("Cache Parity Error\n");
621 if (reason
& MCSR_EXCP_ERR
)
622 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
623 if (reason
& MCSR_BUS_IRERR
)
624 printk("Bus - Read Bus Error on instruction fetch\n");
625 if (reason
& MCSR_BUS_DRERR
)
626 printk("Bus - Read Bus Error on data load\n");
627 if (reason
& MCSR_BUS_WRERR
)
628 printk("Bus - Write Bus Error on buffered store or cache line push\n");
633 int machine_check_generic(struct pt_regs
*regs
)
635 unsigned long reason
= get_mc_reason(regs
);
637 printk("Machine check in kernel mode.\n");
638 printk("Caused by (from SRR1=%lx): ", reason
);
639 switch (reason
& 0x601F0000) {
641 printk("Machine check signal\n");
643 case 0: /* for 601 */
645 case 0x140000: /* 7450 MSS error and TEA */
646 printk("Transfer error ack signal\n");
649 printk("Data parity error signal\n");
652 printk("Address parity error signal\n");
655 printk("L1 Data Cache error\n");
658 printk("L1 Instruction Cache error\n");
661 printk("L2 data cache parity error\n");
664 printk("Unknown values in msr\n");
668 #endif /* everything else */
670 void machine_check_exception(struct pt_regs
*regs
)
672 enum ctx_state prev_state
= exception_enter();
675 __get_cpu_var(irq_stat
).mce_exceptions
++;
677 /* See if any machine dependent calls. In theory, we would want
678 * to call the CPU first, and call the ppc_md. one if the CPU
679 * one returns a positive number. However there is existing code
680 * that assumes the board gets a first chance, so let's keep it
681 * that way for now and fix things later. --BenH.
683 if (ppc_md
.machine_check_exception
)
684 recover
= ppc_md
.machine_check_exception(regs
);
685 else if (cur_cpu_spec
->machine_check
)
686 recover
= cur_cpu_spec
->machine_check(regs
);
691 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
692 /* the qspan pci read routines can cause machine checks -- Cort
694 * yuck !!! that totally needs to go away ! There are better ways
695 * to deal with that than having a wart in the mcheck handler.
698 bad_page_fault(regs
, regs
->dar
, SIGBUS
);
702 if (debugger_fault_handler(regs
))
705 if (check_io_access(regs
))
708 die("Machine check", regs
, SIGBUS
);
710 /* Must die if the interrupt is not recoverable */
711 if (!(regs
->msr
& MSR_RI
))
712 panic("Unrecoverable Machine check");
715 exception_exit(prev_state
);
718 void SMIException(struct pt_regs
*regs
)
720 die("System Management Interrupt", regs
, SIGABRT
);
723 void unknown_exception(struct pt_regs
*regs
)
725 enum ctx_state prev_state
= exception_enter();
727 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
728 regs
->nip
, regs
->msr
, regs
->trap
);
730 _exception(SIGTRAP
, regs
, 0, 0);
732 exception_exit(prev_state
);
735 void instruction_breakpoint_exception(struct pt_regs
*regs
)
737 enum ctx_state prev_state
= exception_enter();
739 if (notify_die(DIE_IABR_MATCH
, "iabr_match", regs
, 5,
740 5, SIGTRAP
) == NOTIFY_STOP
)
742 if (debugger_iabr_match(regs
))
744 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
747 exception_exit(prev_state
);
750 void RunModeException(struct pt_regs
*regs
)
752 _exception(SIGTRAP
, regs
, 0, 0);
755 void __kprobes
single_step_exception(struct pt_regs
*regs
)
757 enum ctx_state prev_state
= exception_enter();
759 clear_single_step(regs
);
761 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
762 5, SIGTRAP
) == NOTIFY_STOP
)
764 if (debugger_sstep(regs
))
767 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
770 exception_exit(prev_state
);
774 * After we have successfully emulated an instruction, we have to
775 * check if the instruction was being single-stepped, and if so,
776 * pretend we got a single-step exception. This was pointed out
777 * by Kumar Gala. -- paulus
779 static void emulate_single_step(struct pt_regs
*regs
)
781 if (single_stepping(regs
))
782 single_step_exception(regs
);
785 static inline int __parse_fpscr(unsigned long fpscr
)
789 /* Invalid operation */
790 if ((fpscr
& FPSCR_VE
) && (fpscr
& FPSCR_VX
))
794 else if ((fpscr
& FPSCR_OE
) && (fpscr
& FPSCR_OX
))
798 else if ((fpscr
& FPSCR_UE
) && (fpscr
& FPSCR_UX
))
802 else if ((fpscr
& FPSCR_ZE
) && (fpscr
& FPSCR_ZX
))
806 else if ((fpscr
& FPSCR_XE
) && (fpscr
& FPSCR_XX
))
812 static void parse_fpe(struct pt_regs
*regs
)
816 flush_fp_to_thread(current
);
818 code
= __parse_fpscr(current
->thread
.fpscr
.val
);
820 _exception(SIGFPE
, regs
, code
, regs
->nip
);
824 * Illegal instruction emulation support. Originally written to
825 * provide the PVR to user applications using the mfspr rd, PVR.
826 * Return non-zero if we can't emulate, or -EFAULT if the associated
827 * memory access caused an access fault. Return zero on success.
829 * There are a couple of ways to do this, either "decode" the instruction
830 * or directly match lots of bits. In this case, matching lots of
831 * bits is faster and easier.
834 static int emulate_string_inst(struct pt_regs
*regs
, u32 instword
)
836 u8 rT
= (instword
>> 21) & 0x1f;
837 u8 rA
= (instword
>> 16) & 0x1f;
838 u8 NB_RB
= (instword
>> 11) & 0x1f;
843 /* Early out if we are an invalid form of lswx */
844 if ((instword
& PPC_INST_STRING_MASK
) == PPC_INST_LSWX
)
845 if ((rT
== rA
) || (rT
== NB_RB
))
848 EA
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
850 switch (instword
& PPC_INST_STRING_MASK
) {
854 num_bytes
= regs
->xer
& 0x7f;
858 num_bytes
= (NB_RB
== 0) ? 32 : NB_RB
;
864 while (num_bytes
!= 0)
867 u32 shift
= 8 * (3 - (pos
& 0x3));
869 switch ((instword
& PPC_INST_STRING_MASK
)) {
872 if (get_user(val
, (u8 __user
*)EA
))
874 /* first time updating this reg,
878 regs
->gpr
[rT
] |= val
<< shift
;
882 val
= regs
->gpr
[rT
] >> shift
;
883 if (put_user(val
, (u8 __user
*)EA
))
887 /* move EA to next address */
891 /* manage our position within the register */
902 static int emulate_popcntb_inst(struct pt_regs
*regs
, u32 instword
)
907 ra
= (instword
>> 16) & 0x1f;
908 rs
= (instword
>> 21) & 0x1f;
911 tmp
= tmp
- ((tmp
>> 1) & 0x5555555555555555ULL
);
912 tmp
= (tmp
& 0x3333333333333333ULL
) + ((tmp
>> 2) & 0x3333333333333333ULL
);
913 tmp
= (tmp
+ (tmp
>> 4)) & 0x0f0f0f0f0f0f0f0fULL
;
919 static int emulate_isel(struct pt_regs
*regs
, u32 instword
)
921 u8 rT
= (instword
>> 21) & 0x1f;
922 u8 rA
= (instword
>> 16) & 0x1f;
923 u8 rB
= (instword
>> 11) & 0x1f;
924 u8 BC
= (instword
>> 6) & 0x1f;
928 tmp
= (rA
== 0) ? 0 : regs
->gpr
[rA
];
929 bit
= (regs
->ccr
>> (31 - BC
)) & 0x1;
931 regs
->gpr
[rT
] = bit
? tmp
: regs
->gpr
[rB
];
936 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
937 static inline bool tm_abort_check(struct pt_regs
*regs
, int cause
)
939 /* If we're emulating a load/store in an active transaction, we cannot
940 * emulate it as the kernel operates in transaction suspended context.
941 * We need to abort the transaction. This creates a persistent TM
942 * abort so tell the user what caused it with a new code.
944 if (MSR_TM_TRANSACTIONAL(regs
->msr
)) {
952 static inline bool tm_abort_check(struct pt_regs
*regs
, int reason
)
958 static int emulate_instruction(struct pt_regs
*regs
)
963 if (!user_mode(regs
) || (regs
->msr
& MSR_LE
))
965 CHECK_FULL_REGS(regs
);
967 if (get_user(instword
, (u32 __user
*)(regs
->nip
)))
970 /* Emulate the mfspr rD, PVR. */
971 if ((instword
& PPC_INST_MFSPR_PVR_MASK
) == PPC_INST_MFSPR_PVR
) {
972 PPC_WARN_EMULATED(mfpvr
, regs
);
973 rd
= (instword
>> 21) & 0x1f;
974 regs
->gpr
[rd
] = mfspr(SPRN_PVR
);
978 /* Emulating the dcba insn is just a no-op. */
979 if ((instword
& PPC_INST_DCBA_MASK
) == PPC_INST_DCBA
) {
980 PPC_WARN_EMULATED(dcba
, regs
);
984 /* Emulate the mcrxr insn. */
985 if ((instword
& PPC_INST_MCRXR_MASK
) == PPC_INST_MCRXR
) {
986 int shift
= (instword
>> 21) & 0x1c;
987 unsigned long msk
= 0xf0000000UL
>> shift
;
989 PPC_WARN_EMULATED(mcrxr
, regs
);
990 regs
->ccr
= (regs
->ccr
& ~msk
) | ((regs
->xer
>> shift
) & msk
);
991 regs
->xer
&= ~0xf0000000UL
;
995 /* Emulate load/store string insn. */
996 if ((instword
& PPC_INST_STRING_GEN_MASK
) == PPC_INST_STRING
) {
997 if (tm_abort_check(regs
,
998 TM_CAUSE_EMULATE
| TM_CAUSE_PERSISTENT
))
1000 PPC_WARN_EMULATED(string
, regs
);
1001 return emulate_string_inst(regs
, instword
);
1004 /* Emulate the popcntb (Population Count Bytes) instruction. */
1005 if ((instword
& PPC_INST_POPCNTB_MASK
) == PPC_INST_POPCNTB
) {
1006 PPC_WARN_EMULATED(popcntb
, regs
);
1007 return emulate_popcntb_inst(regs
, instword
);
1010 /* Emulate isel (Integer Select) instruction */
1011 if ((instword
& PPC_INST_ISEL_MASK
) == PPC_INST_ISEL
) {
1012 PPC_WARN_EMULATED(isel
, regs
);
1013 return emulate_isel(regs
, instword
);
1017 /* Emulate the mfspr rD, DSCR. */
1018 if ((((instword
& PPC_INST_MFSPR_DSCR_USER_MASK
) ==
1019 PPC_INST_MFSPR_DSCR_USER
) ||
1020 ((instword
& PPC_INST_MFSPR_DSCR_MASK
) ==
1021 PPC_INST_MFSPR_DSCR
)) &&
1022 cpu_has_feature(CPU_FTR_DSCR
)) {
1023 PPC_WARN_EMULATED(mfdscr
, regs
);
1024 rd
= (instword
>> 21) & 0x1f;
1025 regs
->gpr
[rd
] = mfspr(SPRN_DSCR
);
1028 /* Emulate the mtspr DSCR, rD. */
1029 if ((((instword
& PPC_INST_MTSPR_DSCR_USER_MASK
) ==
1030 PPC_INST_MTSPR_DSCR_USER
) ||
1031 ((instword
& PPC_INST_MTSPR_DSCR_MASK
) ==
1032 PPC_INST_MTSPR_DSCR
)) &&
1033 cpu_has_feature(CPU_FTR_DSCR
)) {
1034 PPC_WARN_EMULATED(mtdscr
, regs
);
1035 rd
= (instword
>> 21) & 0x1f;
1036 current
->thread
.dscr
= regs
->gpr
[rd
];
1037 current
->thread
.dscr_inherit
= 1;
1038 mtspr(SPRN_DSCR
, current
->thread
.dscr
);
1046 int is_valid_bugaddr(unsigned long addr
)
1048 return is_kernel_addr(addr
);
1051 void __kprobes
program_check_exception(struct pt_regs
*regs
)
1053 enum ctx_state prev_state
= exception_enter();
1054 unsigned int reason
= get_reason(regs
);
1055 extern int do_mathemu(struct pt_regs
*regs
);
1057 /* We can now get here via a FP Unavailable exception if the core
1058 * has no FPU, in that case the reason flags will be 0 */
1060 if (reason
& REASON_FP
) {
1061 /* IEEE FP exception */
1065 if (reason
& REASON_TRAP
) {
1066 /* Debugger is first in line to stop recursive faults in
1067 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1068 if (debugger_bpt(regs
))
1071 /* trap exception */
1072 if (notify_die(DIE_BPT
, "breakpoint", regs
, 5, 5, SIGTRAP
)
1076 if (!(regs
->msr
& MSR_PR
) && /* not user-mode */
1077 report_bug(regs
->nip
, regs
) == BUG_TRAP_TYPE_WARN
) {
1081 _exception(SIGTRAP
, regs
, TRAP_BRKPT
, regs
->nip
);
1084 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1085 if (reason
& REASON_TM
) {
1086 /* This is a TM "Bad Thing Exception" program check.
1088 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1089 * transition in TM states.
1090 * - A trechkpt is attempted when transactional.
1091 * - A treclaim is attempted when non transactional.
1092 * - A tend is illegally attempted.
1093 * - writing a TM SPR when transactional.
1095 if (!user_mode(regs
) &&
1096 report_bug(regs
->nip
, regs
) == BUG_TRAP_TYPE_WARN
) {
1100 /* If usermode caused this, it's done something illegal and
1101 * gets a SIGILL slap on the wrist. We call it an illegal
1102 * operand to distinguish from the instruction just being bad
1103 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1104 * illegal /placement/ of a valid instruction.
1106 if (user_mode(regs
)) {
1107 _exception(SIGILL
, regs
, ILL_ILLOPN
, regs
->nip
);
1110 printk(KERN_EMERG
"Unexpected TM Bad Thing exception "
1111 "at %lx (msr 0x%x)\n", regs
->nip
, reason
);
1112 die("Unrecoverable exception", regs
, SIGABRT
);
1117 /* We restore the interrupt state now */
1118 if (!arch_irq_disabled_regs(regs
))
1121 #ifdef CONFIG_MATH_EMULATION
1122 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1123 * but there seems to be a hardware bug on the 405GP (RevD)
1124 * that means ESR is sometimes set incorrectly - either to
1125 * ESR_DST (!?) or 0. In the process of chasing this with the
1126 * hardware people - not sure if it can happen on any illegal
1127 * instruction or only on FP instructions, whether there is a
1128 * pattern to occurrences etc. -dgibson 31/Mar/2003 */
1129 switch (do_mathemu(regs
)) {
1131 emulate_single_step(regs
);
1135 code
= __parse_fpscr(current
->thread
.fpscr
.val
);
1136 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1140 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1143 /* fall through on any other errors */
1144 #endif /* CONFIG_MATH_EMULATION */
1146 /* Try to emulate it if we should. */
1147 if (reason
& (REASON_ILLEGAL
| REASON_PRIVILEGED
)) {
1148 switch (emulate_instruction(regs
)) {
1151 emulate_single_step(regs
);
1154 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1159 if (reason
& REASON_PRIVILEGED
)
1160 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1162 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1165 exception_exit(prev_state
);
1169 * This occurs when running in hypervisor mode on POWER6 or later
1170 * and an illegal instruction is encountered.
1172 void __kprobes
emulation_assist_interrupt(struct pt_regs
*regs
)
1174 regs
->msr
|= REASON_ILLEGAL
;
1175 program_check_exception(regs
);
1178 void alignment_exception(struct pt_regs
*regs
)
1180 enum ctx_state prev_state
= exception_enter();
1181 int sig
, code
, fixed
= 0;
1183 /* We restore the interrupt state now */
1184 if (!arch_irq_disabled_regs(regs
))
1187 if (tm_abort_check(regs
, TM_CAUSE_ALIGNMENT
| TM_CAUSE_PERSISTENT
))
1190 /* we don't implement logging of alignment exceptions */
1191 if (!(current
->thread
.align_ctl
& PR_UNALIGN_SIGBUS
))
1192 fixed
= fix_alignment(regs
);
1195 regs
->nip
+= 4; /* skip over emulated instruction */
1196 emulate_single_step(regs
);
1200 /* Operand address was bad */
1201 if (fixed
== -EFAULT
) {
1208 if (user_mode(regs
))
1209 _exception(sig
, regs
, code
, regs
->dar
);
1211 bad_page_fault(regs
, regs
->dar
, sig
);
1214 exception_exit(prev_state
);
1217 void StackOverflow(struct pt_regs
*regs
)
1219 printk(KERN_CRIT
"Kernel stack overflow in process %p, r1=%lx\n",
1220 current
, regs
->gpr
[1]);
1223 panic("kernel stack overflow");
1226 void nonrecoverable_exception(struct pt_regs
*regs
)
1228 printk(KERN_ERR
"Non-recoverable exception at PC=%lx MSR=%lx\n",
1229 regs
->nip
, regs
->msr
);
1231 die("nonrecoverable exception", regs
, SIGKILL
);
1234 void trace_syscall(struct pt_regs
*regs
)
1236 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
1237 current
, task_pid_nr(current
), regs
->nip
, regs
->link
, regs
->gpr
[0],
1238 regs
->ccr
&0x10000000?"Error=":"", regs
->gpr
[3], print_tainted());
1241 void kernel_fp_unavailable_exception(struct pt_regs
*regs
)
1243 enum ctx_state prev_state
= exception_enter();
1245 printk(KERN_EMERG
"Unrecoverable FP Unavailable Exception "
1246 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1247 die("Unrecoverable FP Unavailable Exception", regs
, SIGABRT
);
1249 exception_exit(prev_state
);
1252 void altivec_unavailable_exception(struct pt_regs
*regs
)
1254 enum ctx_state prev_state
= exception_enter();
1256 if (user_mode(regs
)) {
1257 /* A user program has executed an altivec instruction,
1258 but this kernel doesn't support altivec. */
1259 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1263 printk(KERN_EMERG
"Unrecoverable VMX/Altivec Unavailable Exception "
1264 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1265 die("Unrecoverable VMX/Altivec Unavailable Exception", regs
, SIGABRT
);
1268 exception_exit(prev_state
);
1271 void vsx_unavailable_exception(struct pt_regs
*regs
)
1273 if (user_mode(regs
)) {
1274 /* A user program has executed an vsx instruction,
1275 but this kernel doesn't support vsx. */
1276 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1280 printk(KERN_EMERG
"Unrecoverable VSX Unavailable Exception "
1281 "%lx at %lx\n", regs
->trap
, regs
->nip
);
1282 die("Unrecoverable VSX Unavailable Exception", regs
, SIGABRT
);
1285 void tm_unavailable_exception(struct pt_regs
*regs
)
1287 /* We restore the interrupt state now */
1288 if (!arch_irq_disabled_regs(regs
))
1291 /* Currently we never expect a TMU exception. Catch
1292 * this and kill the process!
1294 printk(KERN_EMERG
"Unexpected TM unavailable exception at %lx "
1296 regs
->nip
, regs
->msr
);
1298 if (user_mode(regs
)) {
1299 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1303 die("Unexpected TM unavailable exception", regs
, SIGABRT
);
1306 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1308 extern void do_load_up_fpu(struct pt_regs
*regs
);
1310 void fp_unavailable_tm(struct pt_regs
*regs
)
1312 /* Note: This does not handle any kind of FP laziness. */
1314 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1315 regs
->nip
, regs
->msr
);
1318 /* We can only have got here if the task started using FP after
1319 * beginning the transaction. So, the transactional regs are just a
1320 * copy of the checkpointed ones. But, we still need to recheckpoint
1321 * as we're enabling FP for the process; it will return, abort the
1322 * transaction, and probably retry but now with FP enabled. So the
1323 * checkpointed FP registers need to be loaded.
1325 tm_reclaim(¤t
->thread
, current
->thread
.regs
->msr
,
1327 /* Reclaim didn't save out any FPRs to transact_fprs. */
1329 /* Enable FP for the task: */
1330 regs
->msr
|= (MSR_FP
| current
->thread
.fpexc_mode
);
1332 /* This loads and recheckpoints the FP registers from
1333 * thread.fpr[]. They will remain in registers after the
1334 * checkpoint so we don't need to reload them after.
1336 tm_recheckpoint(¤t
->thread
, regs
->msr
);
1339 #ifdef CONFIG_ALTIVEC
1340 extern void do_load_up_altivec(struct pt_regs
*regs
);
1342 void altivec_unavailable_tm(struct pt_regs
*regs
)
1344 /* See the comments in fp_unavailable_tm(). This function operates
1348 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1350 regs
->nip
, regs
->msr
);
1352 tm_reclaim(¤t
->thread
, current
->thread
.regs
->msr
,
1354 regs
->msr
|= MSR_VEC
;
1355 tm_recheckpoint(¤t
->thread
, regs
->msr
);
1356 current
->thread
.used_vr
= 1;
1361 void vsx_unavailable_tm(struct pt_regs
*regs
)
1363 /* See the comments in fp_unavailable_tm(). This works similarly,
1364 * though we're loading both FP and VEC registers in here.
1366 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1367 * regs. Either way, set MSR_VSX.
1370 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1372 regs
->nip
, regs
->msr
);
1375 /* This reclaims FP and/or VR regs if they're already enabled */
1376 tm_reclaim(¤t
->thread
, current
->thread
.regs
->msr
,
1379 regs
->msr
|= MSR_VEC
| MSR_FP
| current
->thread
.fpexc_mode
|
1381 /* This loads & recheckpoints FP and VRs. */
1382 tm_recheckpoint(¤t
->thread
, regs
->msr
);
1383 current
->thread
.used_vsr
= 1;
1386 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1388 void performance_monitor_exception(struct pt_regs
*regs
)
1390 __get_cpu_var(irq_stat
).pmu_irqs
++;
1396 void SoftwareEmulation(struct pt_regs
*regs
)
1398 extern int do_mathemu(struct pt_regs
*);
1399 extern int Soft_emulate_8xx(struct pt_regs
*);
1400 #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
1404 CHECK_FULL_REGS(regs
);
1406 if (!user_mode(regs
)) {
1408 die("Kernel Mode Software FPU Emulation", regs
, SIGFPE
);
1411 #ifdef CONFIG_MATH_EMULATION
1412 errcode
= do_mathemu(regs
);
1414 PPC_WARN_EMULATED(math
, regs
);
1418 emulate_single_step(regs
);
1422 code
= __parse_fpscr(current
->thread
.fpscr
.val
);
1423 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1427 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1430 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1434 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1435 errcode
= Soft_emulate_8xx(regs
);
1437 PPC_WARN_EMULATED(8xx
, regs
);
1441 emulate_single_step(regs
);
1444 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1447 _exception(SIGSEGV
, regs
, SEGV_MAPERR
, regs
->nip
);
1451 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1454 #endif /* CONFIG_8xx */
1456 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1457 static void handle_debug(struct pt_regs
*regs
, unsigned long debug_status
)
1461 * Determine the cause of the debug event, clear the
1462 * event flags and send a trap to the handler. Torez
1464 if (debug_status
& (DBSR_DAC1R
| DBSR_DAC1W
)) {
1465 dbcr_dac(current
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
1466 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1467 current
->thread
.dbcr2
&= ~DBCR2_DAC12MODE
;
1469 do_send_trap(regs
, mfspr(SPRN_DAC1
), debug_status
, TRAP_HWBKPT
,
1472 } else if (debug_status
& (DBSR_DAC2R
| DBSR_DAC2W
)) {
1473 dbcr_dac(current
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
1474 do_send_trap(regs
, mfspr(SPRN_DAC2
), debug_status
, TRAP_HWBKPT
,
1477 } else if (debug_status
& DBSR_IAC1
) {
1478 current
->thread
.dbcr0
&= ~DBCR0_IAC1
;
1479 dbcr_iac_range(current
) &= ~DBCR_IAC12MODE
;
1480 do_send_trap(regs
, mfspr(SPRN_IAC1
), debug_status
, TRAP_HWBKPT
,
1483 } else if (debug_status
& DBSR_IAC2
) {
1484 current
->thread
.dbcr0
&= ~DBCR0_IAC2
;
1485 do_send_trap(regs
, mfspr(SPRN_IAC2
), debug_status
, TRAP_HWBKPT
,
1488 } else if (debug_status
& DBSR_IAC3
) {
1489 current
->thread
.dbcr0
&= ~DBCR0_IAC3
;
1490 dbcr_iac_range(current
) &= ~DBCR_IAC34MODE
;
1491 do_send_trap(regs
, mfspr(SPRN_IAC3
), debug_status
, TRAP_HWBKPT
,
1494 } else if (debug_status
& DBSR_IAC4
) {
1495 current
->thread
.dbcr0
&= ~DBCR0_IAC4
;
1496 do_send_trap(regs
, mfspr(SPRN_IAC4
), debug_status
, TRAP_HWBKPT
,
1501 * At the point this routine was called, the MSR(DE) was turned off.
1502 * Check all other debug flags and see if that bit needs to be turned
1505 if (DBCR_ACTIVE_EVENTS(current
->thread
.dbcr0
, current
->thread
.dbcr1
))
1506 regs
->msr
|= MSR_DE
;
1508 /* Make sure the IDM flag is off */
1509 current
->thread
.dbcr0
&= ~DBCR0_IDM
;
1512 mtspr(SPRN_DBCR0
, current
->thread
.dbcr0
);
1515 void __kprobes
DebugException(struct pt_regs
*regs
, unsigned long debug_status
)
1517 current
->thread
.dbsr
= debug_status
;
1519 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1520 * on server, it stops on the target of the branch. In order to simulate
1521 * the server behaviour, we thus restart right away with a single step
1522 * instead of stopping here when hitting a BT
1524 if (debug_status
& DBSR_BT
) {
1525 regs
->msr
&= ~MSR_DE
;
1528 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_BT
);
1529 /* Clear the BT event */
1530 mtspr(SPRN_DBSR
, DBSR_BT
);
1532 /* Do the single step trick only when coming from userspace */
1533 if (user_mode(regs
)) {
1534 current
->thread
.dbcr0
&= ~DBCR0_BT
;
1535 current
->thread
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
1536 regs
->msr
|= MSR_DE
;
1540 if (notify_die(DIE_SSTEP
, "block_step", regs
, 5,
1541 5, SIGTRAP
) == NOTIFY_STOP
) {
1544 if (debugger_sstep(regs
))
1546 } else if (debug_status
& DBSR_IC
) { /* Instruction complete */
1547 regs
->msr
&= ~MSR_DE
;
1549 /* Disable instruction completion */
1550 mtspr(SPRN_DBCR0
, mfspr(SPRN_DBCR0
) & ~DBCR0_IC
);
1551 /* Clear the instruction completion event */
1552 mtspr(SPRN_DBSR
, DBSR_IC
);
1554 if (notify_die(DIE_SSTEP
, "single_step", regs
, 5,
1555 5, SIGTRAP
) == NOTIFY_STOP
) {
1559 if (debugger_sstep(regs
))
1562 if (user_mode(regs
)) {
1563 current
->thread
.dbcr0
&= ~DBCR0_IC
;
1564 if (DBCR_ACTIVE_EVENTS(current
->thread
.dbcr0
,
1565 current
->thread
.dbcr1
))
1566 regs
->msr
|= MSR_DE
;
1568 /* Make sure the IDM bit is off */
1569 current
->thread
.dbcr0
&= ~DBCR0_IDM
;
1572 _exception(SIGTRAP
, regs
, TRAP_TRACE
, regs
->nip
);
1574 handle_debug(regs
, debug_status
);
1576 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1578 #if !defined(CONFIG_TAU_INT)
1579 void TAUException(struct pt_regs
*regs
)
1581 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1582 regs
->nip
, regs
->msr
, regs
->trap
, print_tainted());
1584 #endif /* CONFIG_INT_TAU */
1586 #ifdef CONFIG_ALTIVEC
1587 void altivec_assist_exception(struct pt_regs
*regs
)
1591 if (!user_mode(regs
)) {
1592 printk(KERN_EMERG
"VMX/Altivec assist exception in kernel mode"
1593 " at %lx\n", regs
->nip
);
1594 die("Kernel VMX/Altivec assist exception", regs
, SIGILL
);
1597 flush_altivec_to_thread(current
);
1599 PPC_WARN_EMULATED(altivec
, regs
);
1600 err
= emulate_altivec(regs
);
1602 regs
->nip
+= 4; /* skip emulated instruction */
1603 emulate_single_step(regs
);
1607 if (err
== -EFAULT
) {
1608 /* got an error reading the instruction */
1609 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1611 /* didn't recognize the instruction */
1612 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1613 printk_ratelimited(KERN_ERR
"Unrecognized altivec instruction "
1614 "in %s at %lx\n", current
->comm
, regs
->nip
);
1615 current
->thread
.vscr
.u
[3] |= 0x10000;
1618 #endif /* CONFIG_ALTIVEC */
1621 void vsx_assist_exception(struct pt_regs
*regs
)
1623 if (!user_mode(regs
)) {
1624 printk(KERN_EMERG
"VSX assist exception in kernel mode"
1625 " at %lx\n", regs
->nip
);
1626 die("Kernel VSX assist exception", regs
, SIGILL
);
1629 flush_vsx_to_thread(current
);
1630 printk(KERN_INFO
"VSX assist not supported at %lx\n", regs
->nip
);
1631 _exception(SIGILL
, regs
, ILL_ILLOPC
, regs
->nip
);
1633 #endif /* CONFIG_VSX */
1635 #ifdef CONFIG_FSL_BOOKE
1636 void CacheLockingException(struct pt_regs
*regs
, unsigned long address
,
1637 unsigned long error_code
)
1639 /* We treat cache locking instructions from the user
1640 * as priv ops, in the future we could try to do
1643 if (error_code
& (ESR_DLK
|ESR_ILK
))
1644 _exception(SIGILL
, regs
, ILL_PRVOPC
, regs
->nip
);
1647 #endif /* CONFIG_FSL_BOOKE */
1650 void SPEFloatingPointException(struct pt_regs
*regs
)
1652 extern int do_spe_mathemu(struct pt_regs
*regs
);
1653 unsigned long spefscr
;
1658 flush_spe_to_thread(current
);
1660 spefscr
= current
->thread
.spefscr
;
1661 fpexc_mode
= current
->thread
.fpexc_mode
;
1663 if ((spefscr
& SPEFSCR_FOVF
) && (fpexc_mode
& PR_FP_EXC_OVF
)) {
1666 else if ((spefscr
& SPEFSCR_FUNF
) && (fpexc_mode
& PR_FP_EXC_UND
)) {
1669 else if ((spefscr
& SPEFSCR_FDBZ
) && (fpexc_mode
& PR_FP_EXC_DIV
))
1671 else if ((spefscr
& SPEFSCR_FINV
) && (fpexc_mode
& PR_FP_EXC_INV
)) {
1674 else if ((spefscr
& (SPEFSCR_FG
| SPEFSCR_FX
)) && (fpexc_mode
& PR_FP_EXC_RES
))
1677 err
= do_spe_mathemu(regs
);
1679 regs
->nip
+= 4; /* skip emulated instruction */
1680 emulate_single_step(regs
);
1684 if (err
== -EFAULT
) {
1685 /* got an error reading the instruction */
1686 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1687 } else if (err
== -EINVAL
) {
1688 /* didn't recognize the instruction */
1689 printk(KERN_ERR
"unrecognized spe instruction "
1690 "in %s at %lx\n", current
->comm
, regs
->nip
);
1692 _exception(SIGFPE
, regs
, code
, regs
->nip
);
1698 void SPEFloatingPointRoundException(struct pt_regs
*regs
)
1700 extern int speround_handler(struct pt_regs
*regs
);
1704 if (regs
->msr
& MSR_SPE
)
1705 giveup_spe(current
);
1709 err
= speround_handler(regs
);
1711 regs
->nip
+= 4; /* skip emulated instruction */
1712 emulate_single_step(regs
);
1716 if (err
== -EFAULT
) {
1717 /* got an error reading the instruction */
1718 _exception(SIGSEGV
, regs
, SEGV_ACCERR
, regs
->nip
);
1719 } else if (err
== -EINVAL
) {
1720 /* didn't recognize the instruction */
1721 printk(KERN_ERR
"unrecognized spe instruction "
1722 "in %s at %lx\n", current
->comm
, regs
->nip
);
1724 _exception(SIGFPE
, regs
, 0, regs
->nip
);
1731 * We enter here if we get an unrecoverable exception, that is, one
1732 * that happened at a point where the RI (recoverable interrupt) bit
1733 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1734 * we therefore lost state by taking this exception.
1736 void unrecoverable_exception(struct pt_regs
*regs
)
1738 printk(KERN_EMERG
"Unrecoverable exception %lx at %lx\n",
1739 regs
->trap
, regs
->nip
);
1740 die("Unrecoverable exception", regs
, SIGABRT
);
1743 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1745 * Default handler for a Watchdog exception,
1746 * spins until a reboot occurs
1748 void __attribute__ ((weak
)) WatchdogHandler(struct pt_regs
*regs
)
1750 /* Generic WatchdogHandler, implement your own */
1751 mtspr(SPRN_TCR
, mfspr(SPRN_TCR
)&(~TCR_WIE
));
1755 void WatchdogException(struct pt_regs
*regs
)
1757 printk (KERN_EMERG
"PowerPC Book-E Watchdog Exception\n");
1758 WatchdogHandler(regs
);
1763 * We enter here if we discover during exception entry that we are
1764 * running in supervisor mode with a userspace value in the stack pointer.
1766 void kernel_bad_stack(struct pt_regs
*regs
)
1768 printk(KERN_EMERG
"Bad kernel stack pointer %lx at %lx\n",
1769 regs
->gpr
[1], regs
->nip
);
1770 die("Bad kernel stack pointer", regs
, SIGABRT
);
1773 void __init
trap_init(void)
1778 #ifdef CONFIG_PPC_EMULATED_STATS
1780 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1782 struct ppc_emulated ppc_emulated
= {
1783 #ifdef CONFIG_ALTIVEC
1784 WARN_EMULATED_SETUP(altivec
),
1786 WARN_EMULATED_SETUP(dcba
),
1787 WARN_EMULATED_SETUP(dcbz
),
1788 WARN_EMULATED_SETUP(fp_pair
),
1789 WARN_EMULATED_SETUP(isel
),
1790 WARN_EMULATED_SETUP(mcrxr
),
1791 WARN_EMULATED_SETUP(mfpvr
),
1792 WARN_EMULATED_SETUP(multiple
),
1793 WARN_EMULATED_SETUP(popcntb
),
1794 WARN_EMULATED_SETUP(spe
),
1795 WARN_EMULATED_SETUP(string
),
1796 WARN_EMULATED_SETUP(unaligned
),
1797 #ifdef CONFIG_MATH_EMULATION
1798 WARN_EMULATED_SETUP(math
),
1799 #elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1800 WARN_EMULATED_SETUP(8xx
),
1803 WARN_EMULATED_SETUP(vsx
),
1806 WARN_EMULATED_SETUP(mfdscr
),
1807 WARN_EMULATED_SETUP(mtdscr
),
1811 u32 ppc_warn_emulated
;
1813 void ppc_warn_emulated_print(const char *type
)
1815 pr_warn_ratelimited("%s used emulated %s instruction\n", current
->comm
,
1819 static int __init
ppc_warn_emulated_init(void)
1821 struct dentry
*dir
, *d
;
1823 struct ppc_emulated_entry
*entries
= (void *)&ppc_emulated
;
1825 if (!powerpc_debugfs_root
)
1828 dir
= debugfs_create_dir("emulated_instructions",
1829 powerpc_debugfs_root
);
1833 d
= debugfs_create_u32("do_warn", S_IRUGO
| S_IWUSR
, dir
,
1834 &ppc_warn_emulated
);
1838 for (i
= 0; i
< sizeof(ppc_emulated
)/sizeof(*entries
); i
++) {
1839 d
= debugfs_create_u32(entries
[i
].name
, S_IRUGO
| S_IWUSR
, dir
,
1840 (u32
*)&entries
[i
].val
.counter
);
1848 debugfs_remove_recursive(dir
);
1852 device_initcall(ppc_warn_emulated_init
);
1854 #endif /* CONFIG_PPC_EMULATED_STATS */