[SPARC64]: Don't printk() any messaages in sun4v_build_irq().
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / kernel / traps.c
CommitLineData
1da177e4
LT
1/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for jiffies */
15#include <linux/kernel.h>
16#include <linux/kallsyms.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/mm.h>
21#include <linux/init.h>
22
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
6c52a96e 36#include <asm/sfafsr.h>
1da177e4
LT
37#include <asm/psrcompat.h>
38#include <asm/processor.h>
39#include <asm/timer.h>
40#include <asm/kdebug.h>
92704a1c 41#include <asm/head.h>
1da177e4
LT
42#ifdef CONFIG_KMOD
43#include <linux/kmod.h>
44#endif
45
46struct notifier_block *sparc64die_chain;
47static DEFINE_SPINLOCK(die_notifier_lock);
48
49int register_die_notifier(struct notifier_block *nb)
50{
51 int err = 0;
52 unsigned long flags;
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
56 return err;
57}
58
59/* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
62 * is as follows:
63 */
64struct tl1_traplog {
65 struct {
66 unsigned long tstate;
67 unsigned long tpc;
68 unsigned long tnpc;
69 unsigned long tt;
70 } trapstack[4];
71 unsigned long tl;
72};
73
74static void dump_tl1_traplog(struct tl1_traplog *p)
75{
76 int i;
77
78 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
79 p->tl);
80 for (i = 0; i < 4; i++) {
81 printk(KERN_CRIT
82 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
83 "TNPC[%016lx] TT[%lx]\n",
84 i + 1,
85 p->trapstack[i].tstate, p->trapstack[i].tpc,
86 p->trapstack[i].tnpc, p->trapstack[i].tt);
87 }
88}
89
90void do_call_debug(struct pt_regs *regs)
91{
92 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
93}
94
95void bad_trap(struct pt_regs *regs, long lvl)
96{
97 char buffer[32];
98 siginfo_t info;
99
100 if (notify_die(DIE_TRAP, "bad trap", regs,
101 0, lvl, SIGTRAP) == NOTIFY_STOP)
102 return;
103
104 if (lvl < 0x100) {
105 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
106 die_if_kernel(buffer, regs);
107 }
108
109 lvl -= 0x100;
110 if (regs->tstate & TSTATE_PRIV) {
111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
112 die_if_kernel(buffer, regs);
113 }
114 if (test_thread_flag(TIF_32BIT)) {
115 regs->tpc &= 0xffffffff;
116 regs->tnpc &= 0xffffffff;
117 }
118 info.si_signo = SIGILL;
119 info.si_errno = 0;
120 info.si_code = ILL_ILLTRP;
121 info.si_addr = (void __user *)regs->tpc;
122 info.si_trapno = lvl;
123 force_sig_info(SIGILL, &info, current);
124}
125
126void bad_trap_tl1(struct pt_regs *regs, long lvl)
127{
128 char buffer[32];
129
130 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
131 0, lvl, SIGTRAP) == NOTIFY_STOP)
132 return;
133
134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
135
136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
137 die_if_kernel (buffer, regs);
138}
139
140#ifdef CONFIG_DEBUG_BUGVERBOSE
141void do_BUG(const char *file, int line)
142{
143 bust_spinlocks(1);
144 printk("kernel BUG at %s:%d!\n", file, line);
145}
146#endif
147
6c52a96e 148void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
149{
150 siginfo_t info;
151
152 if (notify_die(DIE_TRAP, "instruction access exception", regs,
153 0, 0x8, SIGTRAP) == NOTIFY_STOP)
154 return;
155
156 if (regs->tstate & TSTATE_PRIV) {
6c52a96e
DM
157 printk("spitfire_insn_access_exception: SFSR[%016lx] "
158 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
159 die_if_kernel("Iax", regs);
160 }
161 if (test_thread_flag(TIF_32BIT)) {
162 regs->tpc &= 0xffffffff;
163 regs->tnpc &= 0xffffffff;
164 }
165 info.si_signo = SIGSEGV;
166 info.si_errno = 0;
167 info.si_code = SEGV_MAPERR;
168 info.si_addr = (void __user *)regs->tpc;
169 info.si_trapno = 0;
170 force_sig_info(SIGSEGV, &info, current);
171}
172
6c52a96e 173void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
174{
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
177 return;
178
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 180 spitfire_insn_access_exception(regs, sfsr, sfar);
1da177e4
LT
181}
182
ed6b0b45
DM
183void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
184{
185 unsigned short type = (type_ctx >> 16);
186 unsigned short ctx = (type_ctx & 0xffff);
187 siginfo_t info;
188
189 if (notify_die(DIE_TRAP, "instruction access exception", regs,
190 0, 0x8, SIGTRAP) == NOTIFY_STOP)
191 return;
192
193 if (regs->tstate & TSTATE_PRIV) {
194 printk("sun4v_insn_access_exception: ADDR[%016lx] "
195 "CTX[%04x] TYPE[%04x], going.\n",
196 addr, ctx, type);
197 die_if_kernel("Iax", regs);
198 }
199
200 if (test_thread_flag(TIF_32BIT)) {
201 regs->tpc &= 0xffffffff;
202 regs->tnpc &= 0xffffffff;
203 }
204 info.si_signo = SIGSEGV;
205 info.si_errno = 0;
206 info.si_code = SEGV_MAPERR;
207 info.si_addr = (void __user *) addr;
208 info.si_trapno = 0;
209 force_sig_info(SIGSEGV, &info, current);
210}
211
212void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
213{
214 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
215 0, 0x8, SIGTRAP) == NOTIFY_STOP)
216 return;
217
218 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
219 sun4v_insn_access_exception(regs, addr, type_ctx);
220}
221
6c52a96e 222void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
1da177e4
LT
223{
224 siginfo_t info;
225
226 if (notify_die(DIE_TRAP, "data access exception", regs,
227 0, 0x30, SIGTRAP) == NOTIFY_STOP)
228 return;
229
230 if (regs->tstate & TSTATE_PRIV) {
231 /* Test if this comes from uaccess places. */
8cf14af0 232 const struct exception_table_entry *entry;
1da177e4 233
8cf14af0
DM
234 entry = search_exception_tables(regs->tpc);
235 if (entry) {
236 /* Ouch, somebody is trying VM hole tricks on us... */
1da177e4
LT
237#ifdef DEBUG_EXCEPTIONS
238 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
8cf14af0
DM
239 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
240 regs->tpc, entry->fixup);
1da177e4 241#endif
8cf14af0 242 regs->tpc = entry->fixup;
1da177e4 243 regs->tnpc = regs->tpc + 4;
1da177e4
LT
244 return;
245 }
246 /* Shit... */
6c52a96e
DM
247 printk("spitfire_data_access_exception: SFSR[%016lx] "
248 "SFAR[%016lx], going.\n", sfsr, sfar);
1da177e4
LT
249 die_if_kernel("Dax", regs);
250 }
251
252 info.si_signo = SIGSEGV;
253 info.si_errno = 0;
254 info.si_code = SEGV_MAPERR;
255 info.si_addr = (void __user *)sfar;
256 info.si_trapno = 0;
257 force_sig_info(SIGSEGV, &info, current);
258}
259
6c52a96e 260void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
bde4e4ee
DM
261{
262 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
263 0, 0x30, SIGTRAP) == NOTIFY_STOP)
264 return;
265
266 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6c52a96e 267 spitfire_data_access_exception(regs, sfsr, sfar);
bde4e4ee
DM
268}
269
ed6b0b45
DM
270void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
271{
272 unsigned short type = (type_ctx >> 16);
273 unsigned short ctx = (type_ctx & 0xffff);
274 siginfo_t info;
275
276 if (notify_die(DIE_TRAP, "data access exception", regs,
277 0, 0x8, SIGTRAP) == NOTIFY_STOP)
278 return;
279
280 if (regs->tstate & TSTATE_PRIV) {
281 printk("sun4v_data_access_exception: ADDR[%016lx] "
282 "CTX[%04x] TYPE[%04x], going.\n",
283 addr, ctx, type);
284 die_if_kernel("Iax", regs);
285 }
286
287 if (test_thread_flag(TIF_32BIT)) {
288 regs->tpc &= 0xffffffff;
289 regs->tnpc &= 0xffffffff;
290 }
291 info.si_signo = SIGSEGV;
292 info.si_errno = 0;
293 info.si_code = SEGV_MAPERR;
294 info.si_addr = (void __user *) addr;
295 info.si_trapno = 0;
296 force_sig_info(SIGSEGV, &info, current);
297}
298
299void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
300{
301 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
302 0, 0x8, SIGTRAP) == NOTIFY_STOP)
303 return;
304
305 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
306 sun4v_data_access_exception(regs, addr, type_ctx);
307}
308
1da177e4
LT
309#ifdef CONFIG_PCI
310/* This is really pathetic... */
311extern volatile int pci_poke_in_progress;
312extern volatile int pci_poke_cpu;
313extern volatile int pci_poke_faulted;
314#endif
315
316/* When access exceptions happen, we must do this. */
317static void spitfire_clean_and_reenable_l1_caches(void)
318{
319 unsigned long va;
320
321 if (tlb_type != spitfire)
322 BUG();
323
324 /* Clean 'em. */
325 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
326 spitfire_put_icache_tag(va, 0x0);
327 spitfire_put_dcache_tag(va, 0x0);
328 }
329
330 /* Re-enable in LSU. */
331 __asm__ __volatile__("flush %%g6\n\t"
332 "membar #Sync\n\t"
333 "stxa %0, [%%g0] %1\n\t"
334 "membar #Sync"
335 : /* no outputs */
336 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
337 LSU_CONTROL_IM | LSU_CONTROL_DM),
338 "i" (ASI_LSU_CONTROL)
339 : "memory");
340}
341
6c52a96e 342static void spitfire_enable_estate_errors(void)
1da177e4 343{
6c52a96e
DM
344 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
345 "membar #Sync"
346 : /* no outputs */
347 : "r" (ESTATE_ERR_ALL),
348 "i" (ASI_ESTATE_ERROR_EN));
1da177e4
LT
349}
350
351static char ecc_syndrome_table[] = {
352 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
353 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
354 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
355 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
356 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
357 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
358 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
359 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
360 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
361 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
362 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
363 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
364 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
365 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
366 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
367 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
368 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
369 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
370 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
371 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
372 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
373 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
374 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
375 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
376 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
377 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
378 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
379 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
380 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
381 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
382 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
383 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
384};
385
1da177e4
LT
386static char *syndrome_unknown = "<Unknown>";
387
6c52a96e 388static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
1da177e4 389{
6c52a96e
DM
390 unsigned short scode;
391 char memmod_str[64], *p;
1da177e4 392
6c52a96e
DM
393 if (udbl & bit) {
394 scode = ecc_syndrome_table[udbl & 0xff];
1da177e4
LT
395 if (prom_getunumber(scode, afar,
396 memmod_str, sizeof(memmod_str)) == -1)
397 p = syndrome_unknown;
398 else
399 p = memmod_str;
400 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
401 "Memory Module \"%s\"\n",
402 smp_processor_id(), scode, p);
403 }
404
6c52a96e
DM
405 if (udbh & bit) {
406 scode = ecc_syndrome_table[udbh & 0xff];
1da177e4
LT
407 if (prom_getunumber(scode, afar,
408 memmod_str, sizeof(memmod_str)) == -1)
409 p = syndrome_unknown;
410 else
411 p = memmod_str;
412 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
413 "Memory Module \"%s\"\n",
414 smp_processor_id(), scode, p);
415 }
6c52a96e
DM
416
417}
418
419static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
420{
421
422 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
423 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
424 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
425
426 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
427
428 /* We always log it, even if someone is listening for this
429 * trap.
430 */
431 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
432 0, TRAP_TYPE_CEE, SIGTRAP);
433
434 /* The Correctable ECC Error trap does not disable I/D caches. So
435 * we only have to restore the ESTATE Error Enable register.
436 */
437 spitfire_enable_estate_errors();
438}
439
440static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
441{
442 siginfo_t info;
443
444 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
445 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
446 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
447
448 /* XXX add more human friendly logging of the error status
449 * XXX as is implemented for cheetah
450 */
451
452 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
453
454 /* We always log it, even if someone is listening for this
455 * trap.
456 */
457 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
458 0, tt, SIGTRAP);
459
460 if (regs->tstate & TSTATE_PRIV) {
461 if (tl1)
462 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
463 die_if_kernel("UE", regs);
464 }
465
466 /* XXX need more intelligent processing here, such as is implemented
467 * XXX for cheetah errors, in fact if the E-cache still holds the
468 * XXX line with bad parity this will loop
469 */
470
471 spitfire_clean_and_reenable_l1_caches();
472 spitfire_enable_estate_errors();
473
474 if (test_thread_flag(TIF_32BIT)) {
475 regs->tpc &= 0xffffffff;
476 regs->tnpc &= 0xffffffff;
477 }
478 info.si_signo = SIGBUS;
479 info.si_errno = 0;
480 info.si_code = BUS_OBJERR;
481 info.si_addr = (void *)0;
482 info.si_trapno = 0;
483 force_sig_info(SIGBUS, &info, current);
484}
485
486void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
487{
488 unsigned long afsr, tt, udbh, udbl;
489 int tl1;
490
491 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
492 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
493 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
494 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
495 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
496
497#ifdef CONFIG_PCI
498 if (tt == TRAP_TYPE_DAE &&
499 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
500 spitfire_clean_and_reenable_l1_caches();
501 spitfire_enable_estate_errors();
502
503 pci_poke_faulted = 1;
504 regs->tnpc = regs->tpc + 4;
505 return;
506 }
507#endif
508
509 if (afsr & SFAFSR_UE)
510 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
511
512 if (tt == TRAP_TYPE_CEE) {
513 /* Handle the case where we took a CEE trap, but ACK'd
514 * only the UE state in the UDB error registers.
515 */
516 if (afsr & SFAFSR_UE) {
517 if (udbh & UDBE_CE) {
518 __asm__ __volatile__(
519 "stxa %0, [%1] %2\n\t"
520 "membar #Sync"
521 : /* no outputs */
522 : "r" (udbh & UDBE_CE),
523 "r" (0x0), "i" (ASI_UDB_ERROR_W));
524 }
525 if (udbl & UDBE_CE) {
526 __asm__ __volatile__(
527 "stxa %0, [%1] %2\n\t"
528 "membar #Sync"
529 : /* no outputs */
530 : "r" (udbl & UDBE_CE),
531 "r" (0x18), "i" (ASI_UDB_ERROR_W));
532 }
533 }
534
535 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
536 }
1da177e4
LT
537}
538
816242da
DM
539int cheetah_pcache_forced_on;
540
541void cheetah_enable_pcache(void)
542{
543 unsigned long dcr;
544
545 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
546 smp_processor_id());
547
548 __asm__ __volatile__("ldxa [%%g0] %1, %0"
549 : "=r" (dcr)
550 : "i" (ASI_DCU_CONTROL_REG));
551 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
552 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
553 "membar #Sync"
554 : /* no outputs */
555 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
556}
557
1da177e4
LT
558/* Cheetah error trap handling. */
559static unsigned long ecache_flush_physbase;
560static unsigned long ecache_flush_linesize;
561static unsigned long ecache_flush_size;
562
563/* WARNING: The error trap handlers in assembly know the precise
564 * layout of the following structure.
565 *
566 * C-level handlers below use this information to log the error
567 * and then determine how to recover (if possible).
568 */
569struct cheetah_err_info {
570/*0x00*/u64 afsr;
571/*0x08*/u64 afar;
572
573 /* D-cache state */
574/*0x10*/u64 dcache_data[4]; /* The actual data */
575/*0x30*/u64 dcache_index; /* D-cache index */
576/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
577/*0x40*/u64 dcache_utag; /* D-cache microtag */
578/*0x48*/u64 dcache_stag; /* D-cache snooptag */
579
580 /* I-cache state */
581/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
582/*0x90*/u64 icache_index; /* I-cache index */
583/*0x98*/u64 icache_tag; /* I-cache phys tag */
584/*0xa0*/u64 icache_utag; /* I-cache microtag */
585/*0xa8*/u64 icache_stag; /* I-cache snooptag */
586/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
587/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
588
589 /* E-cache state */
590/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
591/*0xe0*/u64 ecache_index; /* E-cache index */
592/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
593
594/*0xf0*/u64 __pad[32 - 30];
595};
596#define CHAFSR_INVALID ((u64)-1L)
597
598/* This table is ordered in priority of errors and matches the
599 * AFAR overwrite policy as well.
600 */
601
602struct afsr_error_table {
603 unsigned long mask;
604 const char *name;
605};
606
607static const char CHAFSR_PERR_msg[] =
608 "System interface protocol error";
609static const char CHAFSR_IERR_msg[] =
610 "Internal processor error";
611static const char CHAFSR_ISAP_msg[] =
612 "System request parity error on incoming addresss";
613static const char CHAFSR_UCU_msg[] =
614 "Uncorrectable E-cache ECC error for ifetch/data";
615static const char CHAFSR_UCC_msg[] =
616 "SW Correctable E-cache ECC error for ifetch/data";
617static const char CHAFSR_UE_msg[] =
618 "Uncorrectable system bus data ECC error for read";
619static const char CHAFSR_EDU_msg[] =
620 "Uncorrectable E-cache ECC error for stmerge/blkld";
621static const char CHAFSR_EMU_msg[] =
622 "Uncorrectable system bus MTAG error";
623static const char CHAFSR_WDU_msg[] =
624 "Uncorrectable E-cache ECC error for writeback";
625static const char CHAFSR_CPU_msg[] =
626 "Uncorrectable ECC error for copyout";
627static const char CHAFSR_CE_msg[] =
628 "HW corrected system bus data ECC error for read";
629static const char CHAFSR_EDC_msg[] =
630 "HW corrected E-cache ECC error for stmerge/blkld";
631static const char CHAFSR_EMC_msg[] =
632 "HW corrected system bus MTAG ECC error";
633static const char CHAFSR_WDC_msg[] =
634 "HW corrected E-cache ECC error for writeback";
635static const char CHAFSR_CPC_msg[] =
636 "HW corrected ECC error for copyout";
637static const char CHAFSR_TO_msg[] =
638 "Unmapped error from system bus";
639static const char CHAFSR_BERR_msg[] =
640 "Bus error response from system bus";
641static const char CHAFSR_IVC_msg[] =
642 "HW corrected system bus data ECC error for ivec read";
643static const char CHAFSR_IVU_msg[] =
644 "Uncorrectable system bus data ECC error for ivec read";
645static struct afsr_error_table __cheetah_error_table[] = {
646 { CHAFSR_PERR, CHAFSR_PERR_msg },
647 { CHAFSR_IERR, CHAFSR_IERR_msg },
648 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
649 { CHAFSR_UCU, CHAFSR_UCU_msg },
650 { CHAFSR_UCC, CHAFSR_UCC_msg },
651 { CHAFSR_UE, CHAFSR_UE_msg },
652 { CHAFSR_EDU, CHAFSR_EDU_msg },
653 { CHAFSR_EMU, CHAFSR_EMU_msg },
654 { CHAFSR_WDU, CHAFSR_WDU_msg },
655 { CHAFSR_CPU, CHAFSR_CPU_msg },
656 { CHAFSR_CE, CHAFSR_CE_msg },
657 { CHAFSR_EDC, CHAFSR_EDC_msg },
658 { CHAFSR_EMC, CHAFSR_EMC_msg },
659 { CHAFSR_WDC, CHAFSR_WDC_msg },
660 { CHAFSR_CPC, CHAFSR_CPC_msg },
661 { CHAFSR_TO, CHAFSR_TO_msg },
662 { CHAFSR_BERR, CHAFSR_BERR_msg },
663 /* These two do not update the AFAR. */
664 { CHAFSR_IVC, CHAFSR_IVC_msg },
665 { CHAFSR_IVU, CHAFSR_IVU_msg },
666 { 0, NULL },
667};
668static const char CHPAFSR_DTO_msg[] =
669 "System bus unmapped error for prefetch/storequeue-read";
670static const char CHPAFSR_DBERR_msg[] =
671 "System bus error for prefetch/storequeue-read";
672static const char CHPAFSR_THCE_msg[] =
673 "Hardware corrected E-cache Tag ECC error";
674static const char CHPAFSR_TSCE_msg[] =
675 "SW handled correctable E-cache Tag ECC error";
676static const char CHPAFSR_TUE_msg[] =
677 "Uncorrectable E-cache Tag ECC error";
678static const char CHPAFSR_DUE_msg[] =
679 "System bus uncorrectable data ECC error due to prefetch/store-fill";
680static struct afsr_error_table __cheetah_plus_error_table[] = {
681 { CHAFSR_PERR, CHAFSR_PERR_msg },
682 { CHAFSR_IERR, CHAFSR_IERR_msg },
683 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
684 { CHAFSR_UCU, CHAFSR_UCU_msg },
685 { CHAFSR_UCC, CHAFSR_UCC_msg },
686 { CHAFSR_UE, CHAFSR_UE_msg },
687 { CHAFSR_EDU, CHAFSR_EDU_msg },
688 { CHAFSR_EMU, CHAFSR_EMU_msg },
689 { CHAFSR_WDU, CHAFSR_WDU_msg },
690 { CHAFSR_CPU, CHAFSR_CPU_msg },
691 { CHAFSR_CE, CHAFSR_CE_msg },
692 { CHAFSR_EDC, CHAFSR_EDC_msg },
693 { CHAFSR_EMC, CHAFSR_EMC_msg },
694 { CHAFSR_WDC, CHAFSR_WDC_msg },
695 { CHAFSR_CPC, CHAFSR_CPC_msg },
696 { CHAFSR_TO, CHAFSR_TO_msg },
697 { CHAFSR_BERR, CHAFSR_BERR_msg },
698 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
699 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
700 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
701 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
702 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
703 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
704 /* These two do not update the AFAR. */
705 { CHAFSR_IVC, CHAFSR_IVC_msg },
706 { CHAFSR_IVU, CHAFSR_IVU_msg },
707 { 0, NULL },
708};
709static const char JPAFSR_JETO_msg[] =
710 "System interface protocol error, hw timeout caused";
711static const char JPAFSR_SCE_msg[] =
712 "Parity error on system snoop results";
713static const char JPAFSR_JEIC_msg[] =
714 "System interface protocol error, illegal command detected";
715static const char JPAFSR_JEIT_msg[] =
716 "System interface protocol error, illegal ADTYPE detected";
717static const char JPAFSR_OM_msg[] =
718 "Out of range memory error has occurred";
719static const char JPAFSR_ETP_msg[] =
720 "Parity error on L2 cache tag SRAM";
721static const char JPAFSR_UMS_msg[] =
722 "Error due to unsupported store";
723static const char JPAFSR_RUE_msg[] =
724 "Uncorrectable ECC error from remote cache/memory";
725static const char JPAFSR_RCE_msg[] =
726 "Correctable ECC error from remote cache/memory";
727static const char JPAFSR_BP_msg[] =
728 "JBUS parity error on returned read data";
729static const char JPAFSR_WBP_msg[] =
730 "JBUS parity error on data for writeback or block store";
731static const char JPAFSR_FRC_msg[] =
732 "Foreign read to DRAM incurring correctable ECC error";
733static const char JPAFSR_FRU_msg[] =
734 "Foreign read to DRAM incurring uncorrectable ECC error";
735static struct afsr_error_table __jalapeno_error_table[] = {
736 { JPAFSR_JETO, JPAFSR_JETO_msg },
737 { JPAFSR_SCE, JPAFSR_SCE_msg },
738 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
739 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
740 { CHAFSR_PERR, CHAFSR_PERR_msg },
741 { CHAFSR_IERR, CHAFSR_IERR_msg },
742 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
743 { CHAFSR_UCU, CHAFSR_UCU_msg },
744 { CHAFSR_UCC, CHAFSR_UCC_msg },
745 { CHAFSR_UE, CHAFSR_UE_msg },
746 { CHAFSR_EDU, CHAFSR_EDU_msg },
747 { JPAFSR_OM, JPAFSR_OM_msg },
748 { CHAFSR_WDU, CHAFSR_WDU_msg },
749 { CHAFSR_CPU, CHAFSR_CPU_msg },
750 { CHAFSR_CE, CHAFSR_CE_msg },
751 { CHAFSR_EDC, CHAFSR_EDC_msg },
752 { JPAFSR_ETP, JPAFSR_ETP_msg },
753 { CHAFSR_WDC, CHAFSR_WDC_msg },
754 { CHAFSR_CPC, CHAFSR_CPC_msg },
755 { CHAFSR_TO, CHAFSR_TO_msg },
756 { CHAFSR_BERR, CHAFSR_BERR_msg },
757 { JPAFSR_UMS, JPAFSR_UMS_msg },
758 { JPAFSR_RUE, JPAFSR_RUE_msg },
759 { JPAFSR_RCE, JPAFSR_RCE_msg },
760 { JPAFSR_BP, JPAFSR_BP_msg },
761 { JPAFSR_WBP, JPAFSR_WBP_msg },
762 { JPAFSR_FRC, JPAFSR_FRC_msg },
763 { JPAFSR_FRU, JPAFSR_FRU_msg },
764 /* These two do not update the AFAR. */
765 { CHAFSR_IVU, CHAFSR_IVU_msg },
766 { 0, NULL },
767};
768static struct afsr_error_table *cheetah_error_table;
769static unsigned long cheetah_afsr_errors;
770
771/* This is allocated at boot time based upon the largest hardware
772 * cpu ID in the system. We allocate two entries per cpu, one for
773 * TL==0 logging and one for TL >= 1 logging.
774 */
775struct cheetah_err_info *cheetah_error_log;
776
777static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
778{
779 struct cheetah_err_info *p;
780 int cpu = smp_processor_id();
781
782 if (!cheetah_error_log)
783 return NULL;
784
785 p = cheetah_error_log + (cpu * 2);
786 if ((afsr & CHAFSR_TL1) != 0UL)
787 p++;
788
789 return p;
790}
791
792extern unsigned int tl0_icpe[], tl1_icpe[];
793extern unsigned int tl0_dcpe[], tl1_dcpe[];
794extern unsigned int tl0_fecc[], tl1_fecc[];
795extern unsigned int tl0_cee[], tl1_cee[];
796extern unsigned int tl0_iae[], tl1_iae[];
797extern unsigned int tl0_dae[], tl1_dae[];
798extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
799extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
800extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
801extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
802extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
803
804void __init cheetah_ecache_flush_init(void)
805{
806 unsigned long largest_size, smallest_linesize, order, ver;
807 int node, i, instance;
808
809 /* Scan all cpu device tree nodes, note two values:
810 * 1) largest E-cache size
811 * 2) smallest E-cache line size
812 */
813 largest_size = 0UL;
814 smallest_linesize = ~0UL;
815
816 instance = 0;
817 while (!cpu_find_by_instance(instance, &node, NULL)) {
818 unsigned long val;
819
820 val = prom_getintdefault(node, "ecache-size",
821 (2 * 1024 * 1024));
822 if (val > largest_size)
823 largest_size = val;
824 val = prom_getintdefault(node, "ecache-line-size", 64);
825 if (val < smallest_linesize)
826 smallest_linesize = val;
827 instance++;
828 }
829
830 if (largest_size == 0UL || smallest_linesize == ~0UL) {
831 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
832 "parameters.\n");
833 prom_halt();
834 }
835
836 ecache_flush_size = (2 * largest_size);
837 ecache_flush_linesize = smallest_linesize;
838
10147570 839 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
1da177e4 840
10147570 841 if (ecache_flush_physbase == ~0UL) {
1da177e4 842 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
10147570
DM
843 "contiguous physical memory.\n",
844 ecache_flush_size);
1da177e4
LT
845 prom_halt();
846 }
847
848 /* Now allocate error trap reporting scoreboard. */
849 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
850 for (order = 0; order < MAX_ORDER; order++) {
851 if ((PAGE_SIZE << order) >= node)
852 break;
853 }
854 cheetah_error_log = (struct cheetah_err_info *)
855 __get_free_pages(GFP_KERNEL, order);
856 if (!cheetah_error_log) {
857 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
858 "error logging scoreboard (%d bytes).\n", node);
859 prom_halt();
860 }
861 memset(cheetah_error_log, 0, PAGE_SIZE << order);
862
863 /* Mark all AFSRs as invalid so that the trap handler will
864 * log new new information there.
865 */
866 for (i = 0; i < 2 * NR_CPUS; i++)
867 cheetah_error_log[i].afsr = CHAFSR_INVALID;
868
869 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
92704a1c
DM
870 if ((ver >> 32) == __JALAPENO_ID ||
871 (ver >> 32) == __SERRANO_ID) {
1da177e4
LT
872 cheetah_error_table = &__jalapeno_error_table[0];
873 cheetah_afsr_errors = JPAFSR_ERRORS;
874 } else if ((ver >> 32) == 0x003e0015) {
875 cheetah_error_table = &__cheetah_plus_error_table[0];
876 cheetah_afsr_errors = CHPAFSR_ERRORS;
877 } else {
878 cheetah_error_table = &__cheetah_error_table[0];
879 cheetah_afsr_errors = CHAFSR_ERRORS;
880 }
881
882 /* Now patch trap tables. */
883 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
884 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
885 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
886 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
887 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
888 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
889 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
890 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
891 if (tlb_type == cheetah_plus) {
892 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
893 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
894 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
895 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
896 }
897 flushi(PAGE_OFFSET);
898}
899
900static void cheetah_flush_ecache(void)
901{
902 unsigned long flush_base = ecache_flush_physbase;
903 unsigned long flush_linesize = ecache_flush_linesize;
904 unsigned long flush_size = ecache_flush_size;
905
906 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
907 " bne,pt %%xcc, 1b\n\t"
908 " ldxa [%2 + %0] %3, %%g0\n\t"
909 : "=&r" (flush_size)
910 : "0" (flush_size), "r" (flush_base),
911 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
912}
913
914static void cheetah_flush_ecache_line(unsigned long physaddr)
915{
916 unsigned long alias;
917
918 physaddr &= ~(8UL - 1UL);
919 physaddr = (ecache_flush_physbase +
920 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
921 alias = physaddr + (ecache_flush_size >> 1UL);
922 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
923 "ldxa [%1] %2, %%g0\n\t"
924 "membar #Sync"
925 : /* no outputs */
926 : "r" (physaddr), "r" (alias),
927 "i" (ASI_PHYS_USE_EC));
928}
929
930/* Unfortunately, the diagnostic access to the I-cache tags we need to
931 * use to clear the thing interferes with I-cache coherency transactions.
932 *
933 * So we must only flush the I-cache when it is disabled.
934 */
935static void __cheetah_flush_icache(void)
936{
80dc0d6b
DM
937 unsigned int icache_size, icache_line_size;
938 unsigned long addr;
939
940 icache_size = local_cpu_data().icache_size;
941 icache_line_size = local_cpu_data().icache_line_size;
1da177e4
LT
942
943 /* Clear the valid bits in all the tags. */
80dc0d6b 944 for (addr = 0; addr < icache_size; addr += icache_line_size) {
1da177e4
LT
945 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
946 "membar #Sync"
947 : /* no outputs */
80dc0d6b
DM
948 : "r" (addr | (2 << 3)),
949 "i" (ASI_IC_TAG));
1da177e4
LT
950 }
951}
952
953static void cheetah_flush_icache(void)
954{
955 unsigned long dcu_save;
956
957 /* Save current DCU, disable I-cache. */
958 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
959 "or %0, %2, %%g1\n\t"
960 "stxa %%g1, [%%g0] %1\n\t"
961 "membar #Sync"
962 : "=r" (dcu_save)
963 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
964 : "g1");
965
966 __cheetah_flush_icache();
967
968 /* Restore DCU register */
969 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
970 "membar #Sync"
971 : /* no outputs */
972 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
973}
974
975static void cheetah_flush_dcache(void)
976{
80dc0d6b
DM
977 unsigned int dcache_size, dcache_line_size;
978 unsigned long addr;
979
980 dcache_size = local_cpu_data().dcache_size;
981 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 982
80dc0d6b 983 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1da177e4
LT
984 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
985 "membar #Sync"
986 : /* no outputs */
80dc0d6b 987 : "r" (addr), "i" (ASI_DCACHE_TAG));
1da177e4
LT
988 }
989}
990
991/* In order to make the even parity correct we must do two things.
992 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
993 * Next, we clear out all 32-bytes of data for that line. Data of
994 * all-zero + tag parity value of zero == correct parity.
995 */
996static void cheetah_plus_zap_dcache_parity(void)
997{
80dc0d6b
DM
998 unsigned int dcache_size, dcache_line_size;
999 unsigned long addr;
1000
1001 dcache_size = local_cpu_data().dcache_size;
1002 dcache_line_size = local_cpu_data().dcache_line_size;
1da177e4 1003
80dc0d6b
DM
1004 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1005 unsigned long tag = (addr >> 14);
1006 unsigned long line;
1da177e4
LT
1007
1008 __asm__ __volatile__("membar #Sync\n\t"
1009 "stxa %0, [%1] %2\n\t"
1010 "membar #Sync"
1011 : /* no outputs */
80dc0d6b 1012 : "r" (tag), "r" (addr),
1da177e4 1013 "i" (ASI_DCACHE_UTAG));
80dc0d6b 1014 for (line = addr; line < addr + dcache_line_size; line += 8)
1da177e4
LT
1015 __asm__ __volatile__("membar #Sync\n\t"
1016 "stxa %%g0, [%0] %1\n\t"
1017 "membar #Sync"
1018 : /* no outputs */
80dc0d6b
DM
1019 : "r" (line),
1020 "i" (ASI_DCACHE_DATA));
1da177e4
LT
1021 }
1022}
1023
1024/* Conversion tables used to frob Cheetah AFSR syndrome values into
1025 * something palatable to the memory controller driver get_unumber
1026 * routine.
1027 */
1028#define MT0 137
1029#define MT1 138
1030#define MT2 139
1031#define NONE 254
1032#define MTC0 140
1033#define MTC1 141
1034#define MTC2 142
1035#define MTC3 143
1036#define C0 128
1037#define C1 129
1038#define C2 130
1039#define C3 131
1040#define C4 132
1041#define C5 133
1042#define C6 134
1043#define C7 135
1044#define C8 136
1045#define M2 144
1046#define M3 145
1047#define M4 146
1048#define M 147
1049static unsigned char cheetah_ecc_syntab[] = {
1050/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1051/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1052/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1053/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1054/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1055/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1056/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1057/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1058/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1059/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1060/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1061/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1062/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1063/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1064/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1065/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1066/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1067/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1068/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1069/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1070/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1071/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1072/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1073/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1074/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1075/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1076/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1077/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1078/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1079/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1080/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1081/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1082};
1083static unsigned char cheetah_mtag_syntab[] = {
1084 NONE, MTC0,
1085 MTC1, NONE,
1086 MTC2, NONE,
1087 NONE, MT0,
1088 MTC3, NONE,
1089 NONE, MT1,
1090 NONE, MT2,
1091 NONE, NONE
1092};
1093
1094/* Return the highest priority error conditon mentioned. */
1095static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1096{
1097 unsigned long tmp = 0;
1098 int i;
1099
1100 for (i = 0; cheetah_error_table[i].mask; i++) {
1101 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1102 return tmp;
1103 }
1104 return tmp;
1105}
1106
1107static const char *cheetah_get_string(unsigned long bit)
1108{
1109 int i;
1110
1111 for (i = 0; cheetah_error_table[i].mask; i++) {
1112 if ((bit & cheetah_error_table[i].mask) != 0UL)
1113 return cheetah_error_table[i].name;
1114 }
1115 return "???";
1116}
1117
1118extern int chmc_getunumber(int, unsigned long, char *, int);
1119
1120static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1121 unsigned long afsr, unsigned long afar, int recoverable)
1122{
1123 unsigned long hipri;
1124 char unum[256];
1125
1126 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1128 afsr, afar,
1129 (afsr & CHAFSR_TL1) ? 1 : 0);
1130 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1131 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1132 regs->tpc, regs->tnpc, regs->tstate);
1133 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1136 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1137 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1138 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1139 hipri = cheetah_get_hipri(afsr);
1140 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1141 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1142 hipri, cheetah_get_string(hipri));
1143
1144 /* Try to get unumber if relevant. */
1145#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1146 CHAFSR_CPC | CHAFSR_CPU | \
1147 CHAFSR_UE | CHAFSR_CE | \
1148 CHAFSR_EDC | CHAFSR_EDU | \
1149 CHAFSR_UCC | CHAFSR_UCU | \
1150 CHAFSR_WDU | CHAFSR_WDC)
1151#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1152 if (afsr & ESYND_ERRORS) {
1153 int syndrome;
1154 int ret;
1155
1156 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1157 syndrome = cheetah_ecc_syntab[syndrome];
1158 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1159 if (ret != -1)
1160 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1161 (recoverable ? KERN_WARNING : KERN_CRIT),
1162 smp_processor_id(), unum);
1163 } else if (afsr & MSYND_ERRORS) {
1164 int syndrome;
1165 int ret;
1166
1167 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1168 syndrome = cheetah_mtag_syntab[syndrome];
1169 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1170 if (ret != -1)
1171 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1172 (recoverable ? KERN_WARNING : KERN_CRIT),
1173 smp_processor_id(), unum);
1174 }
1175
1176 /* Now dump the cache snapshots. */
1177 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1178 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1179 (int) info->dcache_index,
1180 info->dcache_tag,
1181 info->dcache_utag,
1182 info->dcache_stag);
1183 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1184 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1185 info->dcache_data[0],
1186 info->dcache_data[1],
1187 info->dcache_data[2],
1188 info->dcache_data[3]);
1189 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1190 "u[%016lx] l[%016lx]\n",
1191 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1192 (int) info->icache_index,
1193 info->icache_tag,
1194 info->icache_utag,
1195 info->icache_stag,
1196 info->icache_upper,
1197 info->icache_lower);
1198 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1199 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1200 info->icache_data[0],
1201 info->icache_data[1],
1202 info->icache_data[2],
1203 info->icache_data[3]);
1204 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1205 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1206 info->icache_data[4],
1207 info->icache_data[5],
1208 info->icache_data[6],
1209 info->icache_data[7]);
1210 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1211 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1212 (int) info->ecache_index, info->ecache_tag);
1213 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1214 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1215 info->ecache_data[0],
1216 info->ecache_data[1],
1217 info->ecache_data[2],
1218 info->ecache_data[3]);
1219
1220 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1221 while (afsr != 0UL) {
1222 unsigned long bit = cheetah_get_hipri(afsr);
1223
1224 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1225 (recoverable ? KERN_WARNING : KERN_CRIT),
1226 bit, cheetah_get_string(bit));
1227
1228 afsr &= ~bit;
1229 }
1230
1231 if (!recoverable)
1232 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1233}
1234
1235static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1236{
1237 unsigned long afsr, afar;
1238 int ret = 0;
1239
1240 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1241 : "=r" (afsr)
1242 : "i" (ASI_AFSR));
1243 if ((afsr & cheetah_afsr_errors) != 0) {
1244 if (logp != NULL) {
1245 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1246 : "=r" (afar)
1247 : "i" (ASI_AFAR));
1248 logp->afsr = afsr;
1249 logp->afar = afar;
1250 }
1251 ret = 1;
1252 }
1253 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1254 "membar #Sync\n\t"
1255 : : "r" (afsr), "i" (ASI_AFSR));
1256
1257 return ret;
1258}
1259
1260void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1261{
1262 struct cheetah_err_info local_snapshot, *p;
1263 int recoverable;
1264
1265 /* Flush E-cache */
1266 cheetah_flush_ecache();
1267
1268 p = cheetah_get_error_log(afsr);
1269 if (!p) {
1270 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1271 afsr, afar);
1272 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1273 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1274 prom_halt();
1275 }
1276
1277 /* Grab snapshot of logged error. */
1278 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1279
1280 /* If the current trap snapshot does not match what the
1281 * trap handler passed along into our args, big trouble.
1282 * In such a case, mark the local copy as invalid.
1283 *
1284 * Else, it matches and we mark the afsr in the non-local
1285 * copy as invalid so we may log new error traps there.
1286 */
1287 if (p->afsr != afsr || p->afar != afar)
1288 local_snapshot.afsr = CHAFSR_INVALID;
1289 else
1290 p->afsr = CHAFSR_INVALID;
1291
1292 cheetah_flush_icache();
1293 cheetah_flush_dcache();
1294
1295 /* Re-enable I-cache/D-cache */
1296 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1297 "or %%g1, %1, %%g1\n\t"
1298 "stxa %%g1, [%%g0] %0\n\t"
1299 "membar #Sync"
1300 : /* no outputs */
1301 : "i" (ASI_DCU_CONTROL_REG),
1302 "i" (DCU_DC | DCU_IC)
1303 : "g1");
1304
1305 /* Re-enable error reporting */
1306 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1307 "or %%g1, %1, %%g1\n\t"
1308 "stxa %%g1, [%%g0] %0\n\t"
1309 "membar #Sync"
1310 : /* no outputs */
1311 : "i" (ASI_ESTATE_ERROR_EN),
1312 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1313 : "g1");
1314
1315 /* Decide if we can continue after handling this trap and
1316 * logging the error.
1317 */
1318 recoverable = 1;
1319 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1320 recoverable = 0;
1321
1322 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1323 * error was logged while we had error reporting traps disabled.
1324 */
1325 if (cheetah_recheck_errors(&local_snapshot)) {
1326 unsigned long new_afsr = local_snapshot.afsr;
1327
1328 /* If we got a new asynchronous error, die... */
1329 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1330 CHAFSR_WDU | CHAFSR_CPU |
1331 CHAFSR_IVU | CHAFSR_UE |
1332 CHAFSR_BERR | CHAFSR_TO))
1333 recoverable = 0;
1334 }
1335
1336 /* Log errors. */
1337 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1338
1339 if (!recoverable)
1340 panic("Irrecoverable Fast-ECC error trap.\n");
1341
1342 /* Flush E-cache to kick the error trap handlers out. */
1343 cheetah_flush_ecache();
1344}
1345
1346/* Try to fix a correctable error by pushing the line out from
1347 * the E-cache. Recheck error reporting registers to see if the
1348 * problem is intermittent.
1349 */
1350static int cheetah_fix_ce(unsigned long physaddr)
1351{
1352 unsigned long orig_estate;
1353 unsigned long alias1, alias2;
1354 int ret;
1355
1356 /* Make sure correctable error traps are disabled. */
1357 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1358 "andn %0, %1, %%g1\n\t"
1359 "stxa %%g1, [%%g0] %2\n\t"
1360 "membar #Sync"
1361 : "=&r" (orig_estate)
1362 : "i" (ESTATE_ERROR_CEEN),
1363 "i" (ASI_ESTATE_ERROR_EN)
1364 : "g1");
1365
1366 /* We calculate alias addresses that will force the
1367 * cache line in question out of the E-cache. Then
1368 * we bring it back in with an atomic instruction so
1369 * that we get it in some modified/exclusive state,
1370 * then we displace it again to try and get proper ECC
1371 * pushed back into the system.
1372 */
1373 physaddr &= ~(8UL - 1UL);
1374 alias1 = (ecache_flush_physbase +
1375 (physaddr & ((ecache_flush_size >> 1) - 1)));
1376 alias2 = alias1 + (ecache_flush_size >> 1);
1377 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1378 "ldxa [%1] %3, %%g0\n\t"
1379 "casxa [%2] %3, %%g0, %%g0\n\t"
1380 "membar #StoreLoad | #StoreStore\n\t"
1381 "ldxa [%0] %3, %%g0\n\t"
1382 "ldxa [%1] %3, %%g0\n\t"
1383 "membar #Sync"
1384 : /* no outputs */
1385 : "r" (alias1), "r" (alias2),
1386 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1387
1388 /* Did that trigger another error? */
1389 if (cheetah_recheck_errors(NULL)) {
1390 /* Try one more time. */
1391 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1392 "membar #Sync"
1393 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1394 if (cheetah_recheck_errors(NULL))
1395 ret = 2;
1396 else
1397 ret = 1;
1398 } else {
1399 /* No new error, intermittent problem. */
1400 ret = 0;
1401 }
1402
1403 /* Restore error enables. */
1404 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1405 "membar #Sync"
1406 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1407
1408 return ret;
1409}
1410
1411/* Return non-zero if PADDR is a valid physical memory address. */
1412static int cheetah_check_main_memory(unsigned long paddr)
1413{
10147570 1414 unsigned long vaddr = PAGE_OFFSET + paddr;
1da177e4 1415
13edad7a 1416 if (vaddr > (unsigned long) high_memory)
ed3ffaf7
DM
1417 return 0;
1418
10147570 1419 return kern_addr_valid(vaddr);
1da177e4
LT
1420}
1421
1422void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1423{
1424 struct cheetah_err_info local_snapshot, *p;
1425 int recoverable, is_memory;
1426
1427 p = cheetah_get_error_log(afsr);
1428 if (!p) {
1429 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1430 afsr, afar);
1431 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1432 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1433 prom_halt();
1434 }
1435
1436 /* Grab snapshot of logged error. */
1437 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1438
1439 /* If the current trap snapshot does not match what the
1440 * trap handler passed along into our args, big trouble.
1441 * In such a case, mark the local copy as invalid.
1442 *
1443 * Else, it matches and we mark the afsr in the non-local
1444 * copy as invalid so we may log new error traps there.
1445 */
1446 if (p->afsr != afsr || p->afar != afar)
1447 local_snapshot.afsr = CHAFSR_INVALID;
1448 else
1449 p->afsr = CHAFSR_INVALID;
1450
1451 is_memory = cheetah_check_main_memory(afar);
1452
1453 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1454 /* XXX Might want to log the results of this operation
1455 * XXX somewhere... -DaveM
1456 */
1457 cheetah_fix_ce(afar);
1458 }
1459
1460 {
1461 int flush_all, flush_line;
1462
1463 flush_all = flush_line = 0;
1464 if ((afsr & CHAFSR_EDC) != 0UL) {
1465 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1466 flush_line = 1;
1467 else
1468 flush_all = 1;
1469 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1470 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1471 flush_line = 1;
1472 else
1473 flush_all = 1;
1474 }
1475
1476 /* Trap handler only disabled I-cache, flush it. */
1477 cheetah_flush_icache();
1478
1479 /* Re-enable I-cache */
1480 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1481 "or %%g1, %1, %%g1\n\t"
1482 "stxa %%g1, [%%g0] %0\n\t"
1483 "membar #Sync"
1484 : /* no outputs */
1485 : "i" (ASI_DCU_CONTROL_REG),
1486 "i" (DCU_IC)
1487 : "g1");
1488
1489 if (flush_all)
1490 cheetah_flush_ecache();
1491 else if (flush_line)
1492 cheetah_flush_ecache_line(afar);
1493 }
1494
1495 /* Re-enable error reporting */
1496 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1497 "or %%g1, %1, %%g1\n\t"
1498 "stxa %%g1, [%%g0] %0\n\t"
1499 "membar #Sync"
1500 : /* no outputs */
1501 : "i" (ASI_ESTATE_ERROR_EN),
1502 "i" (ESTATE_ERROR_CEEN)
1503 : "g1");
1504
1505 /* Decide if we can continue after handling this trap and
1506 * logging the error.
1507 */
1508 recoverable = 1;
1509 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1510 recoverable = 0;
1511
1512 /* Re-check AFSR/AFAR */
1513 (void) cheetah_recheck_errors(&local_snapshot);
1514
1515 /* Log errors. */
1516 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1517
1518 if (!recoverable)
1519 panic("Irrecoverable Correctable-ECC error trap.\n");
1520}
1521
1522void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1523{
1524 struct cheetah_err_info local_snapshot, *p;
1525 int recoverable, is_memory;
1526
1527#ifdef CONFIG_PCI
1528 /* Check for the special PCI poke sequence. */
1529 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1530 cheetah_flush_icache();
1531 cheetah_flush_dcache();
1532
1533 /* Re-enable I-cache/D-cache */
1534 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1535 "or %%g1, %1, %%g1\n\t"
1536 "stxa %%g1, [%%g0] %0\n\t"
1537 "membar #Sync"
1538 : /* no outputs */
1539 : "i" (ASI_DCU_CONTROL_REG),
1540 "i" (DCU_DC | DCU_IC)
1541 : "g1");
1542
1543 /* Re-enable error reporting */
1544 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1545 "or %%g1, %1, %%g1\n\t"
1546 "stxa %%g1, [%%g0] %0\n\t"
1547 "membar #Sync"
1548 : /* no outputs */
1549 : "i" (ASI_ESTATE_ERROR_EN),
1550 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1551 : "g1");
1552
1553 (void) cheetah_recheck_errors(NULL);
1554
1555 pci_poke_faulted = 1;
1556 regs->tpc += 4;
1557 regs->tnpc = regs->tpc + 4;
1558 return;
1559 }
1560#endif
1561
1562 p = cheetah_get_error_log(afsr);
1563 if (!p) {
1564 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1565 afsr, afar);
1566 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1567 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1568 prom_halt();
1569 }
1570
1571 /* Grab snapshot of logged error. */
1572 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1573
1574 /* If the current trap snapshot does not match what the
1575 * trap handler passed along into our args, big trouble.
1576 * In such a case, mark the local copy as invalid.
1577 *
1578 * Else, it matches and we mark the afsr in the non-local
1579 * copy as invalid so we may log new error traps there.
1580 */
1581 if (p->afsr != afsr || p->afar != afar)
1582 local_snapshot.afsr = CHAFSR_INVALID;
1583 else
1584 p->afsr = CHAFSR_INVALID;
1585
1586 is_memory = cheetah_check_main_memory(afar);
1587
1588 {
1589 int flush_all, flush_line;
1590
1591 flush_all = flush_line = 0;
1592 if ((afsr & CHAFSR_EDU) != 0UL) {
1593 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1594 flush_line = 1;
1595 else
1596 flush_all = 1;
1597 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1598 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1599 flush_line = 1;
1600 else
1601 flush_all = 1;
1602 }
1603
1604 cheetah_flush_icache();
1605 cheetah_flush_dcache();
1606
1607 /* Re-enable I/D caches */
1608 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1609 "or %%g1, %1, %%g1\n\t"
1610 "stxa %%g1, [%%g0] %0\n\t"
1611 "membar #Sync"
1612 : /* no outputs */
1613 : "i" (ASI_DCU_CONTROL_REG),
1614 "i" (DCU_IC | DCU_DC)
1615 : "g1");
1616
1617 if (flush_all)
1618 cheetah_flush_ecache();
1619 else if (flush_line)
1620 cheetah_flush_ecache_line(afar);
1621 }
1622
1623 /* Re-enable error reporting */
1624 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1625 "or %%g1, %1, %%g1\n\t"
1626 "stxa %%g1, [%%g0] %0\n\t"
1627 "membar #Sync"
1628 : /* no outputs */
1629 : "i" (ASI_ESTATE_ERROR_EN),
1630 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1631 : "g1");
1632
1633 /* Decide if we can continue after handling this trap and
1634 * logging the error.
1635 */
1636 recoverable = 1;
1637 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1638 recoverable = 0;
1639
1640 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1641 * error was logged while we had error reporting traps disabled.
1642 */
1643 if (cheetah_recheck_errors(&local_snapshot)) {
1644 unsigned long new_afsr = local_snapshot.afsr;
1645
1646 /* If we got a new asynchronous error, die... */
1647 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1648 CHAFSR_WDU | CHAFSR_CPU |
1649 CHAFSR_IVU | CHAFSR_UE |
1650 CHAFSR_BERR | CHAFSR_TO))
1651 recoverable = 0;
1652 }
1653
1654 /* Log errors. */
1655 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1656
1657 /* "Recoverable" here means we try to yank the page from ever
1658 * being newly used again. This depends upon a few things:
1659 * 1) Must be main memory, and AFAR must be valid.
1660 * 2) If we trapped from user, OK.
1661 * 3) Else, if we trapped from kernel we must find exception
1662 * table entry (ie. we have to have been accessing user
1663 * space).
1664 *
1665 * If AFAR is not in main memory, or we trapped from kernel
1666 * and cannot find an exception table entry, it is unacceptable
1667 * to try and continue.
1668 */
1669 if (recoverable && is_memory) {
1670 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1671 /* OK, usermode access. */
1672 recoverable = 1;
1673 } else {
8cf14af0 1674 const struct exception_table_entry *entry;
1da177e4 1675
8cf14af0
DM
1676 entry = search_exception_tables(regs->tpc);
1677 if (entry) {
1da177e4
LT
1678 /* OK, kernel access to userspace. */
1679 recoverable = 1;
1680
1681 } else {
1682 /* BAD, privileged state is corrupted. */
1683 recoverable = 0;
1684 }
1685
1686 if (recoverable) {
1687 if (pfn_valid(afar >> PAGE_SHIFT))
1688 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1689 else
1690 recoverable = 0;
1691
1692 /* Only perform fixup if we still have a
1693 * recoverable condition.
1694 */
1695 if (recoverable) {
8cf14af0 1696 regs->tpc = entry->fixup;
1da177e4 1697 regs->tnpc = regs->tpc + 4;
1da177e4
LT
1698 }
1699 }
1700 }
1701 } else {
1702 recoverable = 0;
1703 }
1704
1705 if (!recoverable)
1706 panic("Irrecoverable deferred error trap.\n");
1707}
1708
1709/* Handle a D/I cache parity error trap. TYPE is encoded as:
1710 *
1711 * Bit0: 0=dcache,1=icache
1712 * Bit1: 0=recoverable,1=unrecoverable
1713 *
1714 * The hardware has disabled both the I-cache and D-cache in
1715 * the %dcr register.
1716 */
1717void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1718{
1719 if (type & 0x1)
1720 __cheetah_flush_icache();
1721 else
1722 cheetah_plus_zap_dcache_parity();
1723 cheetah_flush_dcache();
1724
1725 /* Re-enable I-cache/D-cache */
1726 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1727 "or %%g1, %1, %%g1\n\t"
1728 "stxa %%g1, [%%g0] %0\n\t"
1729 "membar #Sync"
1730 : /* no outputs */
1731 : "i" (ASI_DCU_CONTROL_REG),
1732 "i" (DCU_DC | DCU_IC)
1733 : "g1");
1734
1735 if (type & 0x2) {
1736 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1737 smp_processor_id(),
1738 (type & 0x1) ? 'I' : 'D',
1739 regs->tpc);
1740 panic("Irrecoverable Cheetah+ parity error.");
1741 }
1742
1743 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1744 smp_processor_id(),
1745 (type & 0x1) ? 'I' : 'D',
1746 regs->tpc);
1747}
1748
5b0c0572
DM
1749struct sun4v_error_entry {
1750 u64 err_handle;
1751 u64 err_stick;
1752
1753 u32 err_type;
1754#define SUN4V_ERR_TYPE_UNDEFINED 0
1755#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1756#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1757#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1758#define SUN4V_ERR_TYPE_WARNING_RES 4
1759
1760 u32 err_attrs;
1761#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1762#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1763#define SUN4V_ERR_ATTRS_PIO 0x00000004
1764#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1765#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1766#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1767#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1768#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1769
1770 u64 err_raddr;
1771 u32 err_size;
1772 u16 err_cpu;
1773 u16 err_pad;
1774};
1775
1776static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1777static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1778
1779static const char *sun4v_err_type_to_str(u32 type)
1780{
1781 switch (type) {
1782 case SUN4V_ERR_TYPE_UNDEFINED:
1783 return "undefined";
1784 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1785 return "uncorrected resumable";
1786 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1787 return "precise nonresumable";
1788 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1789 return "deferred nonresumable";
1790 case SUN4V_ERR_TYPE_WARNING_RES:
1791 return "warning resumable";
1792 default:
1793 return "unknown";
1794 };
1795}
1796
1797static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1798{
1799 int cnt;
1800
1801 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1802 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1803 pfx,
1804 ent->err_handle, ent->err_stick,
1805 ent->err_type,
1806 sun4v_err_type_to_str(ent->err_type));
1807 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1808 pfx,
1809 ent->err_attrs,
1810 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1811 "processor" : ""),
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1813 "memory" : ""),
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1815 "pio" : ""),
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1817 "integer-regs" : ""),
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1819 "fpu-regs" : ""),
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1821 "user" : ""),
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1823 "privileged" : ""),
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1825 "queue-full" : ""));
1826 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1827 pfx,
1828 ent->err_raddr, ent->err_size, ent->err_cpu);
1829
1830 if ((cnt = atomic_read(ocnt)) != 0) {
1831 atomic_set(ocnt, 0);
1832 wmb();
1833 printk("%s: Queue overflowed %d times.\n",
1834 pfx, cnt);
1835 }
1836}
1837
1838/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1839 * Log the event and clear the first word of the entry.
1840 */
1841void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1842{
1843 struct sun4v_error_entry *ent, local_copy;
1844 struct trap_per_cpu *tb;
1845 unsigned long paddr;
1846 int cpu;
1847
1848 cpu = get_cpu();
1849
1850 tb = &trap_block[cpu];
1851 paddr = tb->resum_kernel_buf_pa + offset;
1852 ent = __va(paddr);
1853
1854 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1855
1856 /* We have a local copy now, so release the entry. */
1857 ent->err_handle = 0;
1858 wmb();
1859
1860 put_cpu();
1861
1862 sun4v_log_error(&local_copy, cpu,
1863 KERN_ERR "RESUMABLE ERROR",
1864 &sun4v_resum_oflow_cnt);
1865}
1866
1867/* If we try to printk() we'll probably make matters worse, by trying
1868 * to retake locks this cpu already holds or causing more errors. So
1869 * just bump a counter, and we'll report these counter bumps above.
1870 */
1871void sun4v_resum_overflow(struct pt_regs *regs)
1872{
1873 atomic_inc(&sun4v_resum_oflow_cnt);
1874}
1875
1876/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1877 * Log the event, clear the first word of the entry, and die.
1878 */
1879void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1880{
1881 struct sun4v_error_entry *ent, local_copy;
1882 struct trap_per_cpu *tb;
1883 unsigned long paddr;
1884 int cpu;
1885
1886 cpu = get_cpu();
1887
1888 tb = &trap_block[cpu];
1889 paddr = tb->nonresum_kernel_buf_pa + offset;
1890 ent = __va(paddr);
1891
1892 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1893
1894 /* We have a local copy now, so release the entry. */
1895 ent->err_handle = 0;
1896 wmb();
1897
1898 put_cpu();
1899
1900#ifdef CONFIG_PCI
1901 /* Check for the special PCI poke sequence. */
1902 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1903 pci_poke_faulted = 1;
1904 regs->tpc += 4;
1905 regs->tnpc = regs->tpc + 4;
1906 return;
1907 }
1908#endif
1909
1910 sun4v_log_error(&local_copy, cpu,
1911 KERN_EMERG "NON-RESUMABLE ERROR",
1912 &sun4v_nonresum_oflow_cnt);
1913
1914 panic("Non-resumable error.");
1915}
1916
1917/* If we try to printk() we'll probably make matters worse, by trying
1918 * to retake locks this cpu already holds or causing more errors. So
1919 * just bump a counter, and we'll report these counter bumps above.
1920 */
1921void sun4v_nonresum_overflow(struct pt_regs *regs)
1922{
1923 /* XXX Actually even this can make not that much sense. Perhaps
1924 * XXX we should just pull the plug and panic directly from here?
1925 */
1926 atomic_inc(&sun4v_nonresum_oflow_cnt);
1927}
1928
1da177e4
LT
1929void do_fpe_common(struct pt_regs *regs)
1930{
1931 if (regs->tstate & TSTATE_PRIV) {
1932 regs->tpc = regs->tnpc;
1933 regs->tnpc += 4;
1934 } else {
1935 unsigned long fsr = current_thread_info()->xfsr[0];
1936 siginfo_t info;
1937
1938 if (test_thread_flag(TIF_32BIT)) {
1939 regs->tpc &= 0xffffffff;
1940 regs->tnpc &= 0xffffffff;
1941 }
1942 info.si_signo = SIGFPE;
1943 info.si_errno = 0;
1944 info.si_addr = (void __user *)regs->tpc;
1945 info.si_trapno = 0;
1946 info.si_code = __SI_FAULT;
1947 if ((fsr & 0x1c000) == (1 << 14)) {
1948 if (fsr & 0x10)
1949 info.si_code = FPE_FLTINV;
1950 else if (fsr & 0x08)
1951 info.si_code = FPE_FLTOVF;
1952 else if (fsr & 0x04)
1953 info.si_code = FPE_FLTUND;
1954 else if (fsr & 0x02)
1955 info.si_code = FPE_FLTDIV;
1956 else if (fsr & 0x01)
1957 info.si_code = FPE_FLTRES;
1958 }
1959 force_sig_info(SIGFPE, &info, current);
1960 }
1961}
1962
1963void do_fpieee(struct pt_regs *regs)
1964{
1965 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1966 0, 0x24, SIGFPE) == NOTIFY_STOP)
1967 return;
1968
1969 do_fpe_common(regs);
1970}
1971
1972extern int do_mathemu(struct pt_regs *, struct fpustate *);
1973
1974void do_fpother(struct pt_regs *regs)
1975{
1976 struct fpustate *f = FPUSTATE;
1977 int ret = 0;
1978
1979 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1980 0, 0x25, SIGFPE) == NOTIFY_STOP)
1981 return;
1982
1983 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1984 case (2 << 14): /* unfinished_FPop */
1985 case (3 << 14): /* unimplemented_FPop */
1986 ret = do_mathemu(regs, f);
1987 break;
1988 }
1989 if (ret)
1990 return;
1991 do_fpe_common(regs);
1992}
1993
1994void do_tof(struct pt_regs *regs)
1995{
1996 siginfo_t info;
1997
1998 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1999 0, 0x26, SIGEMT) == NOTIFY_STOP)
2000 return;
2001
2002 if (regs->tstate & TSTATE_PRIV)
2003 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2004 if (test_thread_flag(TIF_32BIT)) {
2005 regs->tpc &= 0xffffffff;
2006 regs->tnpc &= 0xffffffff;
2007 }
2008 info.si_signo = SIGEMT;
2009 info.si_errno = 0;
2010 info.si_code = EMT_TAGOVF;
2011 info.si_addr = (void __user *)regs->tpc;
2012 info.si_trapno = 0;
2013 force_sig_info(SIGEMT, &info, current);
2014}
2015
2016void do_div0(struct pt_regs *regs)
2017{
2018 siginfo_t info;
2019
2020 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2021 0, 0x28, SIGFPE) == NOTIFY_STOP)
2022 return;
2023
2024 if (regs->tstate & TSTATE_PRIV)
2025 die_if_kernel("TL0: Kernel divide by zero.", regs);
2026 if (test_thread_flag(TIF_32BIT)) {
2027 regs->tpc &= 0xffffffff;
2028 regs->tnpc &= 0xffffffff;
2029 }
2030 info.si_signo = SIGFPE;
2031 info.si_errno = 0;
2032 info.si_code = FPE_INTDIV;
2033 info.si_addr = (void __user *)regs->tpc;
2034 info.si_trapno = 0;
2035 force_sig_info(SIGFPE, &info, current);
2036}
2037
2038void instruction_dump (unsigned int *pc)
2039{
2040 int i;
2041
2042 if ((((unsigned long) pc) & 3))
2043 return;
2044
2045 printk("Instruction DUMP:");
2046 for (i = -3; i < 6; i++)
2047 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2048 printk("\n");
2049}
2050
2051static void user_instruction_dump (unsigned int __user *pc)
2052{
2053 int i;
2054 unsigned int buf[9];
2055
2056 if ((((unsigned long) pc) & 3))
2057 return;
2058
2059 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2060 return;
2061
2062 printk("Instruction DUMP:");
2063 for (i = 0; i < 9; i++)
2064 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2065 printk("\n");
2066}
2067
2068void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2069{
2070 unsigned long pc, fp, thread_base, ksp;
ee3eea16 2071 void *tp = task_stack_page(tsk);
1da177e4
LT
2072 struct reg_window *rw;
2073 int count = 0;
2074
2075 ksp = (unsigned long) _ksp;
2076
2077 if (tp == current_thread_info())
2078 flushw_all();
2079
2080 fp = ksp + STACK_BIAS;
2081 thread_base = (unsigned long) tp;
2082
2083 printk("Call Trace:");
2084#ifdef CONFIG_KALLSYMS
2085 printk("\n");
2086#endif
2087 do {
2088 /* Bogus frame pointer? */
2089 if (fp < (thread_base + sizeof(struct thread_info)) ||
2090 fp >= (thread_base + THREAD_SIZE))
2091 break;
2092 rw = (struct reg_window *)fp;
2093 pc = rw->ins[7];
2094 printk(" [%016lx] ", pc);
2095 print_symbol("%s\n", pc);
2096 fp = rw->ins[6] + STACK_BIAS;
2097 } while (++count < 16);
2098#ifndef CONFIG_KALLSYMS
2099 printk("\n");
2100#endif
2101}
2102
2103void dump_stack(void)
2104{
2105 unsigned long *ksp;
2106
2107 __asm__ __volatile__("mov %%fp, %0"
2108 : "=r" (ksp));
2109 show_stack(current, ksp);
2110}
2111
2112EXPORT_SYMBOL(dump_stack);
2113
2114static inline int is_kernel_stack(struct task_struct *task,
2115 struct reg_window *rw)
2116{
2117 unsigned long rw_addr = (unsigned long) rw;
2118 unsigned long thread_base, thread_end;
2119
2120 if (rw_addr < PAGE_OFFSET) {
2121 if (task != &init_task)
2122 return 0;
2123 }
2124
ee3eea16 2125 thread_base = (unsigned long) task_stack_page(task);
1da177e4
LT
2126 thread_end = thread_base + sizeof(union thread_union);
2127 if (rw_addr >= thread_base &&
2128 rw_addr < thread_end &&
2129 !(rw_addr & 0x7UL))
2130 return 1;
2131
2132 return 0;
2133}
2134
2135static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2136{
2137 unsigned long fp = rw->ins[6];
2138
2139 if (!fp)
2140 return NULL;
2141
2142 return (struct reg_window *) (fp + STACK_BIAS);
2143}
2144
2145void die_if_kernel(char *str, struct pt_regs *regs)
2146{
2147 static int die_counter;
2148 extern void __show_regs(struct pt_regs * regs);
2149 extern void smp_report_regs(void);
2150 int count = 0;
2151
2152 /* Amuse the user. */
2153 printk(
2154" \\|/ ____ \\|/\n"
2155" \"@'/ .. \\`@\"\n"
2156" /_| \\__/ |_\\\n"
2157" \\__U_/\n");
2158
2159 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2160 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2161 __asm__ __volatile__("flushw");
2162 __show_regs(regs);
2163 if (regs->tstate & TSTATE_PRIV) {
2164 struct reg_window *rw = (struct reg_window *)
2165 (regs->u_regs[UREG_FP] + STACK_BIAS);
2166
2167 /* Stop the back trace when we hit userland or we
2168 * find some badly aligned kernel stack.
2169 */
2170 while (rw &&
2171 count++ < 30&&
2172 is_kernel_stack(current, rw)) {
2173 printk("Caller[%016lx]", rw->ins[7]);
2174 print_symbol(": %s", rw->ins[7]);
2175 printk("\n");
2176
2177 rw = kernel_stack_up(rw);
2178 }
2179 instruction_dump ((unsigned int *) regs->tpc);
2180 } else {
2181 if (test_thread_flag(TIF_32BIT)) {
2182 regs->tpc &= 0xffffffff;
2183 regs->tnpc &= 0xffffffff;
2184 }
2185 user_instruction_dump ((unsigned int __user *) regs->tpc);
2186 }
2187#ifdef CONFIG_SMP
2188 smp_report_regs();
2189#endif
2190
2191 if (regs->tstate & TSTATE_PRIV)
2192 do_exit(SIGKILL);
2193 do_exit(SIGSEGV);
2194}
2195
2196extern int handle_popc(u32 insn, struct pt_regs *regs);
2197extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2198
2199void do_illegal_instruction(struct pt_regs *regs)
2200{
2201 unsigned long pc = regs->tpc;
2202 unsigned long tstate = regs->tstate;
2203 u32 insn;
2204 siginfo_t info;
2205
2206 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2207 0, 0x10, SIGILL) == NOTIFY_STOP)
2208 return;
2209
2210 if (tstate & TSTATE_PRIV)
2211 die_if_kernel("Kernel illegal instruction", regs);
2212 if (test_thread_flag(TIF_32BIT))
2213 pc = (u32)pc;
2214 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2215 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2216 if (handle_popc(insn, regs))
2217 return;
2218 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2219 if (handle_ldf_stq(insn, regs))
2220 return;
2221 }
2222 }
2223 info.si_signo = SIGILL;
2224 info.si_errno = 0;
2225 info.si_code = ILL_ILLOPC;
2226 info.si_addr = (void __user *)pc;
2227 info.si_trapno = 0;
2228 force_sig_info(SIGILL, &info, current);
2229}
2230
ed6b0b45
DM
2231extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2232
1da177e4
LT
2233void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2234{
2235 siginfo_t info;
2236
2237 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2238 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2239 return;
2240
2241 if (regs->tstate & TSTATE_PRIV) {
ed6b0b45 2242 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
1da177e4
LT
2243 return;
2244 }
2245 info.si_signo = SIGBUS;
2246 info.si_errno = 0;
2247 info.si_code = BUS_ADRALN;
2248 info.si_addr = (void __user *)sfar;
2249 info.si_trapno = 0;
2250 force_sig_info(SIGBUS, &info, current);
2251}
2252
ed6b0b45
DM
2253void sun4v_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2254{
2255 siginfo_t info;
2256
2257 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2258 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2259 return;
2260
2261 if (regs->tstate & TSTATE_PRIV) {
2262 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2263 return;
2264 }
2265 info.si_signo = SIGBUS;
2266 info.si_errno = 0;
2267 info.si_code = BUS_ADRALN;
2268 info.si_addr = (void __user *) addr;
2269 info.si_trapno = 0;
2270 force_sig_info(SIGBUS, &info, current);
2271}
2272
1da177e4
LT
2273void do_privop(struct pt_regs *regs)
2274{
2275 siginfo_t info;
2276
2277 if (notify_die(DIE_TRAP, "privileged operation", regs,
2278 0, 0x11, SIGILL) == NOTIFY_STOP)
2279 return;
2280
2281 if (test_thread_flag(TIF_32BIT)) {
2282 regs->tpc &= 0xffffffff;
2283 regs->tnpc &= 0xffffffff;
2284 }
2285 info.si_signo = SIGILL;
2286 info.si_errno = 0;
2287 info.si_code = ILL_PRVOPC;
2288 info.si_addr = (void __user *)regs->tpc;
2289 info.si_trapno = 0;
2290 force_sig_info(SIGILL, &info, current);
2291}
2292
2293void do_privact(struct pt_regs *regs)
2294{
2295 do_privop(regs);
2296}
2297
2298/* Trap level 1 stuff or other traps we should never see... */
2299void do_cee(struct pt_regs *regs)
2300{
2301 die_if_kernel("TL0: Cache Error Exception", regs);
2302}
2303
2304void do_cee_tl1(struct pt_regs *regs)
2305{
2306 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2307 die_if_kernel("TL1: Cache Error Exception", regs);
2308}
2309
2310void do_dae_tl1(struct pt_regs *regs)
2311{
2312 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2313 die_if_kernel("TL1: Data Access Exception", regs);
2314}
2315
2316void do_iae_tl1(struct pt_regs *regs)
2317{
2318 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2319 die_if_kernel("TL1: Instruction Access Exception", regs);
2320}
2321
2322void do_div0_tl1(struct pt_regs *regs)
2323{
2324 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2325 die_if_kernel("TL1: DIV0 Exception", regs);
2326}
2327
2328void do_fpdis_tl1(struct pt_regs *regs)
2329{
2330 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2331 die_if_kernel("TL1: FPU Disabled", regs);
2332}
2333
2334void do_fpieee_tl1(struct pt_regs *regs)
2335{
2336 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2337 die_if_kernel("TL1: FPU IEEE Exception", regs);
2338}
2339
2340void do_fpother_tl1(struct pt_regs *regs)
2341{
2342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2343 die_if_kernel("TL1: FPU Other Exception", regs);
2344}
2345
2346void do_ill_tl1(struct pt_regs *regs)
2347{
2348 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2349 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2350}
2351
2352void do_irq_tl1(struct pt_regs *regs)
2353{
2354 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2355 die_if_kernel("TL1: IRQ Exception", regs);
2356}
2357
2358void do_lddfmna_tl1(struct pt_regs *regs)
2359{
2360 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2361 die_if_kernel("TL1: LDDF Exception", regs);
2362}
2363
2364void do_stdfmna_tl1(struct pt_regs *regs)
2365{
2366 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2367 die_if_kernel("TL1: STDF Exception", regs);
2368}
2369
2370void do_paw(struct pt_regs *regs)
2371{
2372 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2373}
2374
2375void do_paw_tl1(struct pt_regs *regs)
2376{
2377 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2378 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2379}
2380
2381void do_vaw(struct pt_regs *regs)
2382{
2383 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2384}
2385
2386void do_vaw_tl1(struct pt_regs *regs)
2387{
2388 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2389 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2390}
2391
2392void do_tof_tl1(struct pt_regs *regs)
2393{
2394 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2395 die_if_kernel("TL1: Tag Overflow Exception", regs);
2396}
2397
2398void do_getpsr(struct pt_regs *regs)
2399{
2400 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2401 regs->tpc = regs->tnpc;
2402 regs->tnpc += 4;
2403 if (test_thread_flag(TIF_32BIT)) {
2404 regs->tpc &= 0xffffffff;
2405 regs->tnpc &= 0xffffffff;
2406 }
2407}
2408
56fb4df6
DM
2409struct trap_per_cpu trap_block[NR_CPUS];
2410
2411/* This can get invoked before sched_init() so play it super safe
2412 * and use hard_smp_processor_id().
2413 */
2414void init_cur_cpu_trap(void)
2415{
2416 int cpu = hard_smp_processor_id();
2417 struct trap_per_cpu *p = &trap_block[cpu];
2418
2419 p->thread = current_thread_info();
2420 p->pgd_paddr = 0;
2421}
2422
1da177e4 2423extern void thread_info_offsets_are_bolixed_dave(void);
56fb4df6 2424extern void trap_per_cpu_offsets_are_bolixed_dave(void);
1da177e4
LT
2425
2426/* Only invoked on boot processor. */
2427void __init trap_init(void)
2428{
2429 /* Compile time sanity check. */
2430 if (TI_TASK != offsetof(struct thread_info, task) ||
2431 TI_FLAGS != offsetof(struct thread_info, flags) ||
2432 TI_CPU != offsetof(struct thread_info, cpu) ||
2433 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2434 TI_KSP != offsetof(struct thread_info, ksp) ||
2435 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2436 TI_KREGS != offsetof(struct thread_info, kregs) ||
2437 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2438 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2439 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2440 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2441 TI_GSR != offsetof(struct thread_info, gsr) ||
2442 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2443 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2444 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2445 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2446 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2447 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
1da177e4 2448 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
db7d9a4e
DM
2449 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2450 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
a3f99858
DM
2451 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2452 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2453 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
1da177e4
LT
2454 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2455 (TI_FPREGS & (64 - 1)))
2456 thread_info_offsets_are_bolixed_dave();
2457
56fb4df6 2458 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
e088ad7c
DM
2459 (TRAP_PER_CPU_PGD_PADDR !=
2460 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2461 (TRAP_PER_CPU_CPU_MONDO_PA !=
2462 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2463 (TRAP_PER_CPU_DEV_MONDO_PA !=
2464 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2465 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2466 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
5b0c0572
DM
2467 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2468 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
e088ad7c
DM
2469 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2470 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
5b0c0572
DM
2471 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2472 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
e088ad7c 2473 (TRAP_PER_CPU_FAULT_INFO !=
1d2f1f90
DM
2474 offsetof(struct trap_per_cpu, fault_info)) ||
2475 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2476 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2477 (TRAP_PER_CPU_CPU_LIST_PA !=
2478 offsetof(struct trap_per_cpu, cpu_list_pa)))
56fb4df6
DM
2479 trap_per_cpu_offsets_are_bolixed_dave();
2480
1da177e4
LT
2481 /* Attach to the address space of init_task. On SMP we
2482 * do this in smp.c:smp_callin for other cpus.
2483 */
2484 atomic_inc(&init_mm.mm_count);
2485 current->active_mm = &init_mm;
2486}