[PATCH] spinlock consolidation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc64 / kernel / traps.c
1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8 /*
9 * I like traps on v9, :))))
10 */
11
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
22
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
32 #include <asm/lsu.h>
33 #include <asm/dcu.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
41 #ifdef CONFIG_KMOD
42 #include <linux/kmod.h>
43 #endif
44
45 struct notifier_block *sparc64die_chain;
46 static DEFINE_SPINLOCK(die_notifier_lock);
47
48 int register_die_notifier(struct notifier_block *nb)
49 {
50 int err = 0;
51 unsigned long flags;
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
55 return err;
56 }
57
58 /* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap
60 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
61 * is as follows:
62 */
63 struct tl1_traplog {
64 struct {
65 unsigned long tstate;
66 unsigned long tpc;
67 unsigned long tnpc;
68 unsigned long tt;
69 } trapstack[4];
70 unsigned long tl;
71 };
72
73 static void dump_tl1_traplog(struct tl1_traplog *p)
74 {
75 int i;
76
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
78 p->tl);
79 for (i = 0; i < 4; i++) {
80 printk(KERN_CRIT
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n",
83 i + 1,
84 p->trapstack[i].tstate, p->trapstack[i].tpc,
85 p->trapstack[i].tnpc, p->trapstack[i].tt);
86 }
87 }
88
89 void do_call_debug(struct pt_regs *regs)
90 {
91 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
92 }
93
94 void bad_trap(struct pt_regs *regs, long lvl)
95 {
96 char buffer[32];
97 siginfo_t info;
98
99 if (notify_die(DIE_TRAP, "bad trap", regs,
100 0, lvl, SIGTRAP) == NOTIFY_STOP)
101 return;
102
103 if (lvl < 0x100) {
104 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
105 die_if_kernel(buffer, regs);
106 }
107
108 lvl -= 0x100;
109 if (regs->tstate & TSTATE_PRIV) {
110 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
111 die_if_kernel(buffer, regs);
112 }
113 if (test_thread_flag(TIF_32BIT)) {
114 regs->tpc &= 0xffffffff;
115 regs->tnpc &= 0xffffffff;
116 }
117 info.si_signo = SIGILL;
118 info.si_errno = 0;
119 info.si_code = ILL_ILLTRP;
120 info.si_addr = (void __user *)regs->tpc;
121 info.si_trapno = lvl;
122 force_sig_info(SIGILL, &info, current);
123 }
124
125 void bad_trap_tl1(struct pt_regs *regs, long lvl)
126 {
127 char buffer[32];
128
129 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
130 0, lvl, SIGTRAP) == NOTIFY_STOP)
131 return;
132
133 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
134
135 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
136 die_if_kernel (buffer, regs);
137 }
138
139 #ifdef CONFIG_DEBUG_BUGVERBOSE
140 void do_BUG(const char *file, int line)
141 {
142 bust_spinlocks(1);
143 printk("kernel BUG at %s:%d!\n", file, line);
144 }
145 #endif
146
147 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
148 {
149 siginfo_t info;
150
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
153 return;
154
155 if (regs->tstate & TSTATE_PRIV) {
156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 "SFAR[%016lx], going.\n", sfsr, sfar);
158 die_if_kernel("Iax", regs);
159 }
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
163 }
164 info.si_signo = SIGSEGV;
165 info.si_errno = 0;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
168 info.si_trapno = 0;
169 force_sig_info(SIGSEGV, &info, current);
170 }
171
172 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
173 {
174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
176 return;
177
178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
179 spitfire_insn_access_exception(regs, sfsr, sfar);
180 }
181
182 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
183 {
184 siginfo_t info;
185
186 if (notify_die(DIE_TRAP, "data access exception", regs,
187 0, 0x30, SIGTRAP) == NOTIFY_STOP)
188 return;
189
190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */
192 unsigned long fixup;
193 unsigned long g2 = regs->u_regs[UREG_G2];
194
195 if ((fixup = search_extables_range(regs->tpc, &g2))) {
196 /* Ouch, somebody is trying ugly VM hole tricks on us... */
197 #ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
200 "g2<%016lx>\n", regs->tpc, fixup, g2);
201 #endif
202 regs->tpc = fixup;
203 regs->tnpc = regs->tpc + 4;
204 regs->u_regs[UREG_G2] = g2;
205 return;
206 }
207 /* Shit... */
208 printk("spitfire_data_access_exception: SFSR[%016lx] "
209 "SFAR[%016lx], going.\n", sfsr, sfar);
210 die_if_kernel("Dax", regs);
211 }
212
213 info.si_signo = SIGSEGV;
214 info.si_errno = 0;
215 info.si_code = SEGV_MAPERR;
216 info.si_addr = (void __user *)sfar;
217 info.si_trapno = 0;
218 force_sig_info(SIGSEGV, &info, current);
219 }
220
221 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
222 {
223 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
224 0, 0x30, SIGTRAP) == NOTIFY_STOP)
225 return;
226
227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
228 spitfire_data_access_exception(regs, sfsr, sfar);
229 }
230
231 #ifdef CONFIG_PCI
232 /* This is really pathetic... */
233 extern volatile int pci_poke_in_progress;
234 extern volatile int pci_poke_cpu;
235 extern volatile int pci_poke_faulted;
236 #endif
237
238 /* When access exceptions happen, we must do this. */
239 static void spitfire_clean_and_reenable_l1_caches(void)
240 {
241 unsigned long va;
242
243 if (tlb_type != spitfire)
244 BUG();
245
246 /* Clean 'em. */
247 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
248 spitfire_put_icache_tag(va, 0x0);
249 spitfire_put_dcache_tag(va, 0x0);
250 }
251
252 /* Re-enable in LSU. */
253 __asm__ __volatile__("flush %%g6\n\t"
254 "membar #Sync\n\t"
255 "stxa %0, [%%g0] %1\n\t"
256 "membar #Sync"
257 : /* no outputs */
258 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
259 LSU_CONTROL_IM | LSU_CONTROL_DM),
260 "i" (ASI_LSU_CONTROL)
261 : "memory");
262 }
263
264 static void spitfire_enable_estate_errors(void)
265 {
266 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
267 "membar #Sync"
268 : /* no outputs */
269 : "r" (ESTATE_ERR_ALL),
270 "i" (ASI_ESTATE_ERROR_EN));
271 }
272
273 static char ecc_syndrome_table[] = {
274 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
275 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
276 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
277 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
278 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
279 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
280 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
281 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
282 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
283 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
284 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
285 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
286 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
287 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
288 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
289 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
290 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
291 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
292 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
293 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
294 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
295 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
296 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
297 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
298 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
299 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
300 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
301 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
302 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
303 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
304 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
306 };
307
308 static char *syndrome_unknown = "<Unknown>";
309
310 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
311 {
312 unsigned short scode;
313 char memmod_str[64], *p;
314
315 if (udbl & bit) {
316 scode = ecc_syndrome_table[udbl & 0xff];
317 if (prom_getunumber(scode, afar,
318 memmod_str, sizeof(memmod_str)) == -1)
319 p = syndrome_unknown;
320 else
321 p = memmod_str;
322 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
323 "Memory Module \"%s\"\n",
324 smp_processor_id(), scode, p);
325 }
326
327 if (udbh & bit) {
328 scode = ecc_syndrome_table[udbh & 0xff];
329 if (prom_getunumber(scode, afar,
330 memmod_str, sizeof(memmod_str)) == -1)
331 p = syndrome_unknown;
332 else
333 p = memmod_str;
334 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
335 "Memory Module \"%s\"\n",
336 smp_processor_id(), scode, p);
337 }
338
339 }
340
341 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
342 {
343
344 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
345 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
346 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
347
348 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
349
350 /* We always log it, even if someone is listening for this
351 * trap.
352 */
353 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
354 0, TRAP_TYPE_CEE, SIGTRAP);
355
356 /* The Correctable ECC Error trap does not disable I/D caches. So
357 * we only have to restore the ESTATE Error Enable register.
358 */
359 spitfire_enable_estate_errors();
360 }
361
362 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
363 {
364 siginfo_t info;
365
366 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
367 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
368 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
369
370 /* XXX add more human friendly logging of the error status
371 * XXX as is implemented for cheetah
372 */
373
374 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
375
376 /* We always log it, even if someone is listening for this
377 * trap.
378 */
379 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
380 0, tt, SIGTRAP);
381
382 if (regs->tstate & TSTATE_PRIV) {
383 if (tl1)
384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
385 die_if_kernel("UE", regs);
386 }
387
388 /* XXX need more intelligent processing here, such as is implemented
389 * XXX for cheetah errors, in fact if the E-cache still holds the
390 * XXX line with bad parity this will loop
391 */
392
393 spitfire_clean_and_reenable_l1_caches();
394 spitfire_enable_estate_errors();
395
396 if (test_thread_flag(TIF_32BIT)) {
397 regs->tpc &= 0xffffffff;
398 regs->tnpc &= 0xffffffff;
399 }
400 info.si_signo = SIGBUS;
401 info.si_errno = 0;
402 info.si_code = BUS_OBJERR;
403 info.si_addr = (void *)0;
404 info.si_trapno = 0;
405 force_sig_info(SIGBUS, &info, current);
406 }
407
408 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
409 {
410 unsigned long afsr, tt, udbh, udbl;
411 int tl1;
412
413 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
414 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
415 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
416 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
417 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
418
419 #ifdef CONFIG_PCI
420 if (tt == TRAP_TYPE_DAE &&
421 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
422 spitfire_clean_and_reenable_l1_caches();
423 spitfire_enable_estate_errors();
424
425 pci_poke_faulted = 1;
426 regs->tnpc = regs->tpc + 4;
427 return;
428 }
429 #endif
430
431 if (afsr & SFAFSR_UE)
432 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
433
434 if (tt == TRAP_TYPE_CEE) {
435 /* Handle the case where we took a CEE trap, but ACK'd
436 * only the UE state in the UDB error registers.
437 */
438 if (afsr & SFAFSR_UE) {
439 if (udbh & UDBE_CE) {
440 __asm__ __volatile__(
441 "stxa %0, [%1] %2\n\t"
442 "membar #Sync"
443 : /* no outputs */
444 : "r" (udbh & UDBE_CE),
445 "r" (0x0), "i" (ASI_UDB_ERROR_W));
446 }
447 if (udbl & UDBE_CE) {
448 __asm__ __volatile__(
449 "stxa %0, [%1] %2\n\t"
450 "membar #Sync"
451 : /* no outputs */
452 : "r" (udbl & UDBE_CE),
453 "r" (0x18), "i" (ASI_UDB_ERROR_W));
454 }
455 }
456
457 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
458 }
459 }
460
461 int cheetah_pcache_forced_on;
462
463 void cheetah_enable_pcache(void)
464 {
465 unsigned long dcr;
466
467 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
468 smp_processor_id());
469
470 __asm__ __volatile__("ldxa [%%g0] %1, %0"
471 : "=r" (dcr)
472 : "i" (ASI_DCU_CONTROL_REG));
473 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
474 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
475 "membar #Sync"
476 : /* no outputs */
477 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
478 }
479
480 /* Cheetah error trap handling. */
481 static unsigned long ecache_flush_physbase;
482 static unsigned long ecache_flush_linesize;
483 static unsigned long ecache_flush_size;
484
485 /* WARNING: The error trap handlers in assembly know the precise
486 * layout of the following structure.
487 *
488 * C-level handlers below use this information to log the error
489 * and then determine how to recover (if possible).
490 */
491 struct cheetah_err_info {
492 /*0x00*/u64 afsr;
493 /*0x08*/u64 afar;
494
495 /* D-cache state */
496 /*0x10*/u64 dcache_data[4]; /* The actual data */
497 /*0x30*/u64 dcache_index; /* D-cache index */
498 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
499 /*0x40*/u64 dcache_utag; /* D-cache microtag */
500 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
501
502 /* I-cache state */
503 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
504 /*0x90*/u64 icache_index; /* I-cache index */
505 /*0x98*/u64 icache_tag; /* I-cache phys tag */
506 /*0xa0*/u64 icache_utag; /* I-cache microtag */
507 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
508 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
509 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
510
511 /* E-cache state */
512 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
513 /*0xe0*/u64 ecache_index; /* E-cache index */
514 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
515
516 /*0xf0*/u64 __pad[32 - 30];
517 };
518 #define CHAFSR_INVALID ((u64)-1L)
519
520 /* This table is ordered in priority of errors and matches the
521 * AFAR overwrite policy as well.
522 */
523
524 struct afsr_error_table {
525 unsigned long mask;
526 const char *name;
527 };
528
529 static const char CHAFSR_PERR_msg[] =
530 "System interface protocol error";
531 static const char CHAFSR_IERR_msg[] =
532 "Internal processor error";
533 static const char CHAFSR_ISAP_msg[] =
534 "System request parity error on incoming addresss";
535 static const char CHAFSR_UCU_msg[] =
536 "Uncorrectable E-cache ECC error for ifetch/data";
537 static const char CHAFSR_UCC_msg[] =
538 "SW Correctable E-cache ECC error for ifetch/data";
539 static const char CHAFSR_UE_msg[] =
540 "Uncorrectable system bus data ECC error for read";
541 static const char CHAFSR_EDU_msg[] =
542 "Uncorrectable E-cache ECC error for stmerge/blkld";
543 static const char CHAFSR_EMU_msg[] =
544 "Uncorrectable system bus MTAG error";
545 static const char CHAFSR_WDU_msg[] =
546 "Uncorrectable E-cache ECC error for writeback";
547 static const char CHAFSR_CPU_msg[] =
548 "Uncorrectable ECC error for copyout";
549 static const char CHAFSR_CE_msg[] =
550 "HW corrected system bus data ECC error for read";
551 static const char CHAFSR_EDC_msg[] =
552 "HW corrected E-cache ECC error for stmerge/blkld";
553 static const char CHAFSR_EMC_msg[] =
554 "HW corrected system bus MTAG ECC error";
555 static const char CHAFSR_WDC_msg[] =
556 "HW corrected E-cache ECC error for writeback";
557 static const char CHAFSR_CPC_msg[] =
558 "HW corrected ECC error for copyout";
559 static const char CHAFSR_TO_msg[] =
560 "Unmapped error from system bus";
561 static const char CHAFSR_BERR_msg[] =
562 "Bus error response from system bus";
563 static const char CHAFSR_IVC_msg[] =
564 "HW corrected system bus data ECC error for ivec read";
565 static const char CHAFSR_IVU_msg[] =
566 "Uncorrectable system bus data ECC error for ivec read";
567 static struct afsr_error_table __cheetah_error_table[] = {
568 { CHAFSR_PERR, CHAFSR_PERR_msg },
569 { CHAFSR_IERR, CHAFSR_IERR_msg },
570 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
571 { CHAFSR_UCU, CHAFSR_UCU_msg },
572 { CHAFSR_UCC, CHAFSR_UCC_msg },
573 { CHAFSR_UE, CHAFSR_UE_msg },
574 { CHAFSR_EDU, CHAFSR_EDU_msg },
575 { CHAFSR_EMU, CHAFSR_EMU_msg },
576 { CHAFSR_WDU, CHAFSR_WDU_msg },
577 { CHAFSR_CPU, CHAFSR_CPU_msg },
578 { CHAFSR_CE, CHAFSR_CE_msg },
579 { CHAFSR_EDC, CHAFSR_EDC_msg },
580 { CHAFSR_EMC, CHAFSR_EMC_msg },
581 { CHAFSR_WDC, CHAFSR_WDC_msg },
582 { CHAFSR_CPC, CHAFSR_CPC_msg },
583 { CHAFSR_TO, CHAFSR_TO_msg },
584 { CHAFSR_BERR, CHAFSR_BERR_msg },
585 /* These two do not update the AFAR. */
586 { CHAFSR_IVC, CHAFSR_IVC_msg },
587 { CHAFSR_IVU, CHAFSR_IVU_msg },
588 { 0, NULL },
589 };
590 static const char CHPAFSR_DTO_msg[] =
591 "System bus unmapped error for prefetch/storequeue-read";
592 static const char CHPAFSR_DBERR_msg[] =
593 "System bus error for prefetch/storequeue-read";
594 static const char CHPAFSR_THCE_msg[] =
595 "Hardware corrected E-cache Tag ECC error";
596 static const char CHPAFSR_TSCE_msg[] =
597 "SW handled correctable E-cache Tag ECC error";
598 static const char CHPAFSR_TUE_msg[] =
599 "Uncorrectable E-cache Tag ECC error";
600 static const char CHPAFSR_DUE_msg[] =
601 "System bus uncorrectable data ECC error due to prefetch/store-fill";
602 static struct afsr_error_table __cheetah_plus_error_table[] = {
603 { CHAFSR_PERR, CHAFSR_PERR_msg },
604 { CHAFSR_IERR, CHAFSR_IERR_msg },
605 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
606 { CHAFSR_UCU, CHAFSR_UCU_msg },
607 { CHAFSR_UCC, CHAFSR_UCC_msg },
608 { CHAFSR_UE, CHAFSR_UE_msg },
609 { CHAFSR_EDU, CHAFSR_EDU_msg },
610 { CHAFSR_EMU, CHAFSR_EMU_msg },
611 { CHAFSR_WDU, CHAFSR_WDU_msg },
612 { CHAFSR_CPU, CHAFSR_CPU_msg },
613 { CHAFSR_CE, CHAFSR_CE_msg },
614 { CHAFSR_EDC, CHAFSR_EDC_msg },
615 { CHAFSR_EMC, CHAFSR_EMC_msg },
616 { CHAFSR_WDC, CHAFSR_WDC_msg },
617 { CHAFSR_CPC, CHAFSR_CPC_msg },
618 { CHAFSR_TO, CHAFSR_TO_msg },
619 { CHAFSR_BERR, CHAFSR_BERR_msg },
620 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
621 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
622 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
623 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
624 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
625 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
626 /* These two do not update the AFAR. */
627 { CHAFSR_IVC, CHAFSR_IVC_msg },
628 { CHAFSR_IVU, CHAFSR_IVU_msg },
629 { 0, NULL },
630 };
631 static const char JPAFSR_JETO_msg[] =
632 "System interface protocol error, hw timeout caused";
633 static const char JPAFSR_SCE_msg[] =
634 "Parity error on system snoop results";
635 static const char JPAFSR_JEIC_msg[] =
636 "System interface protocol error, illegal command detected";
637 static const char JPAFSR_JEIT_msg[] =
638 "System interface protocol error, illegal ADTYPE detected";
639 static const char JPAFSR_OM_msg[] =
640 "Out of range memory error has occurred";
641 static const char JPAFSR_ETP_msg[] =
642 "Parity error on L2 cache tag SRAM";
643 static const char JPAFSR_UMS_msg[] =
644 "Error due to unsupported store";
645 static const char JPAFSR_RUE_msg[] =
646 "Uncorrectable ECC error from remote cache/memory";
647 static const char JPAFSR_RCE_msg[] =
648 "Correctable ECC error from remote cache/memory";
649 static const char JPAFSR_BP_msg[] =
650 "JBUS parity error on returned read data";
651 static const char JPAFSR_WBP_msg[] =
652 "JBUS parity error on data for writeback or block store";
653 static const char JPAFSR_FRC_msg[] =
654 "Foreign read to DRAM incurring correctable ECC error";
655 static const char JPAFSR_FRU_msg[] =
656 "Foreign read to DRAM incurring uncorrectable ECC error";
657 static struct afsr_error_table __jalapeno_error_table[] = {
658 { JPAFSR_JETO, JPAFSR_JETO_msg },
659 { JPAFSR_SCE, JPAFSR_SCE_msg },
660 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
661 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
662 { CHAFSR_PERR, CHAFSR_PERR_msg },
663 { CHAFSR_IERR, CHAFSR_IERR_msg },
664 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
665 { CHAFSR_UCU, CHAFSR_UCU_msg },
666 { CHAFSR_UCC, CHAFSR_UCC_msg },
667 { CHAFSR_UE, CHAFSR_UE_msg },
668 { CHAFSR_EDU, CHAFSR_EDU_msg },
669 { JPAFSR_OM, JPAFSR_OM_msg },
670 { CHAFSR_WDU, CHAFSR_WDU_msg },
671 { CHAFSR_CPU, CHAFSR_CPU_msg },
672 { CHAFSR_CE, CHAFSR_CE_msg },
673 { CHAFSR_EDC, CHAFSR_EDC_msg },
674 { JPAFSR_ETP, JPAFSR_ETP_msg },
675 { CHAFSR_WDC, CHAFSR_WDC_msg },
676 { CHAFSR_CPC, CHAFSR_CPC_msg },
677 { CHAFSR_TO, CHAFSR_TO_msg },
678 { CHAFSR_BERR, CHAFSR_BERR_msg },
679 { JPAFSR_UMS, JPAFSR_UMS_msg },
680 { JPAFSR_RUE, JPAFSR_RUE_msg },
681 { JPAFSR_RCE, JPAFSR_RCE_msg },
682 { JPAFSR_BP, JPAFSR_BP_msg },
683 { JPAFSR_WBP, JPAFSR_WBP_msg },
684 { JPAFSR_FRC, JPAFSR_FRC_msg },
685 { JPAFSR_FRU, JPAFSR_FRU_msg },
686 /* These two do not update the AFAR. */
687 { CHAFSR_IVU, CHAFSR_IVU_msg },
688 { 0, NULL },
689 };
690 static struct afsr_error_table *cheetah_error_table;
691 static unsigned long cheetah_afsr_errors;
692
693 /* This is allocated at boot time based upon the largest hardware
694 * cpu ID in the system. We allocate two entries per cpu, one for
695 * TL==0 logging and one for TL >= 1 logging.
696 */
697 struct cheetah_err_info *cheetah_error_log;
698
699 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
700 {
701 struct cheetah_err_info *p;
702 int cpu = smp_processor_id();
703
704 if (!cheetah_error_log)
705 return NULL;
706
707 p = cheetah_error_log + (cpu * 2);
708 if ((afsr & CHAFSR_TL1) != 0UL)
709 p++;
710
711 return p;
712 }
713
714 extern unsigned int tl0_icpe[], tl1_icpe[];
715 extern unsigned int tl0_dcpe[], tl1_dcpe[];
716 extern unsigned int tl0_fecc[], tl1_fecc[];
717 extern unsigned int tl0_cee[], tl1_cee[];
718 extern unsigned int tl0_iae[], tl1_iae[];
719 extern unsigned int tl0_dae[], tl1_dae[];
720 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
721 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
722 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
723 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
724 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
725
726 void __init cheetah_ecache_flush_init(void)
727 {
728 unsigned long largest_size, smallest_linesize, order, ver;
729 int node, i, instance;
730
731 /* Scan all cpu device tree nodes, note two values:
732 * 1) largest E-cache size
733 * 2) smallest E-cache line size
734 */
735 largest_size = 0UL;
736 smallest_linesize = ~0UL;
737
738 instance = 0;
739 while (!cpu_find_by_instance(instance, &node, NULL)) {
740 unsigned long val;
741
742 val = prom_getintdefault(node, "ecache-size",
743 (2 * 1024 * 1024));
744 if (val > largest_size)
745 largest_size = val;
746 val = prom_getintdefault(node, "ecache-line-size", 64);
747 if (val < smallest_linesize)
748 smallest_linesize = val;
749 instance++;
750 }
751
752 if (largest_size == 0UL || smallest_linesize == ~0UL) {
753 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
754 "parameters.\n");
755 prom_halt();
756 }
757
758 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize;
760
761 /* Discover a physically contiguous chunk of physical
762 * memory in 'sp_banks' of size ecache_flush_size calculated
763 * above. Store the physical base of this area at
764 * ecache_flush_physbase.
765 */
766 for (node = 0; ; node++) {
767 if (sp_banks[node].num_bytes == 0)
768 break;
769 if (sp_banks[node].num_bytes >= ecache_flush_size) {
770 ecache_flush_physbase = sp_banks[node].base_addr;
771 break;
772 }
773 }
774
775 /* Note: Zero would be a valid value of ecache_flush_physbase so
776 * don't use that as the success test. :-)
777 */
778 if (sp_banks[node].num_bytes == 0) {
779 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
780 "contiguous physical memory.\n", ecache_flush_size);
781 prom_halt();
782 }
783
784 /* Now allocate error trap reporting scoreboard. */
785 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
786 for (order = 0; order < MAX_ORDER; order++) {
787 if ((PAGE_SIZE << order) >= node)
788 break;
789 }
790 cheetah_error_log = (struct cheetah_err_info *)
791 __get_free_pages(GFP_KERNEL, order);
792 if (!cheetah_error_log) {
793 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
794 "error logging scoreboard (%d bytes).\n", node);
795 prom_halt();
796 }
797 memset(cheetah_error_log, 0, PAGE_SIZE << order);
798
799 /* Mark all AFSRs as invalid so that the trap handler will
800 * log new new information there.
801 */
802 for (i = 0; i < 2 * NR_CPUS; i++)
803 cheetah_error_log[i].afsr = CHAFSR_INVALID;
804
805 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
806 if ((ver >> 32) == 0x003e0016) {
807 cheetah_error_table = &__jalapeno_error_table[0];
808 cheetah_afsr_errors = JPAFSR_ERRORS;
809 } else if ((ver >> 32) == 0x003e0015) {
810 cheetah_error_table = &__cheetah_plus_error_table[0];
811 cheetah_afsr_errors = CHPAFSR_ERRORS;
812 } else {
813 cheetah_error_table = &__cheetah_error_table[0];
814 cheetah_afsr_errors = CHAFSR_ERRORS;
815 }
816
817 /* Now patch trap tables. */
818 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
819 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
820 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
821 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
822 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
823 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
824 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
825 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
826 if (tlb_type == cheetah_plus) {
827 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
828 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
829 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
830 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
831 }
832 flushi(PAGE_OFFSET);
833 }
834
835 static void cheetah_flush_ecache(void)
836 {
837 unsigned long flush_base = ecache_flush_physbase;
838 unsigned long flush_linesize = ecache_flush_linesize;
839 unsigned long flush_size = ecache_flush_size;
840
841 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
842 " bne,pt %%xcc, 1b\n\t"
843 " ldxa [%2 + %0] %3, %%g0\n\t"
844 : "=&r" (flush_size)
845 : "0" (flush_size), "r" (flush_base),
846 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
847 }
848
849 static void cheetah_flush_ecache_line(unsigned long physaddr)
850 {
851 unsigned long alias;
852
853 physaddr &= ~(8UL - 1UL);
854 physaddr = (ecache_flush_physbase +
855 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
856 alias = physaddr + (ecache_flush_size >> 1UL);
857 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
858 "ldxa [%1] %2, %%g0\n\t"
859 "membar #Sync"
860 : /* no outputs */
861 : "r" (physaddr), "r" (alias),
862 "i" (ASI_PHYS_USE_EC));
863 }
864
865 /* Unfortunately, the diagnostic access to the I-cache tags we need to
866 * use to clear the thing interferes with I-cache coherency transactions.
867 *
868 * So we must only flush the I-cache when it is disabled.
869 */
870 static void __cheetah_flush_icache(void)
871 {
872 unsigned long i;
873
874 /* Clear the valid bits in all the tags. */
875 for (i = 0; i < (1 << 15); i += (1 << 5)) {
876 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
877 "membar #Sync"
878 : /* no outputs */
879 : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
880 }
881 }
882
883 static void cheetah_flush_icache(void)
884 {
885 unsigned long dcu_save;
886
887 /* Save current DCU, disable I-cache. */
888 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
889 "or %0, %2, %%g1\n\t"
890 "stxa %%g1, [%%g0] %1\n\t"
891 "membar #Sync"
892 : "=r" (dcu_save)
893 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
894 : "g1");
895
896 __cheetah_flush_icache();
897
898 /* Restore DCU register */
899 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
900 "membar #Sync"
901 : /* no outputs */
902 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
903 }
904
905 static void cheetah_flush_dcache(void)
906 {
907 unsigned long i;
908
909 for (i = 0; i < (1 << 16); i += (1 << 5)) {
910 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
911 "membar #Sync"
912 : /* no outputs */
913 : "r" (i), "i" (ASI_DCACHE_TAG));
914 }
915 }
916
917 /* In order to make the even parity correct we must do two things.
918 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
919 * Next, we clear out all 32-bytes of data for that line. Data of
920 * all-zero + tag parity value of zero == correct parity.
921 */
922 static void cheetah_plus_zap_dcache_parity(void)
923 {
924 unsigned long i;
925
926 for (i = 0; i < (1 << 16); i += (1 << 5)) {
927 unsigned long tag = (i >> 14);
928 unsigned long j;
929
930 __asm__ __volatile__("membar #Sync\n\t"
931 "stxa %0, [%1] %2\n\t"
932 "membar #Sync"
933 : /* no outputs */
934 : "r" (tag), "r" (i),
935 "i" (ASI_DCACHE_UTAG));
936 for (j = i; j < i + (1 << 5); j += (1 << 3))
937 __asm__ __volatile__("membar #Sync\n\t"
938 "stxa %%g0, [%0] %1\n\t"
939 "membar #Sync"
940 : /* no outputs */
941 : "r" (j), "i" (ASI_DCACHE_DATA));
942 }
943 }
944
945 /* Conversion tables used to frob Cheetah AFSR syndrome values into
946 * something palatable to the memory controller driver get_unumber
947 * routine.
948 */
949 #define MT0 137
950 #define MT1 138
951 #define MT2 139
952 #define NONE 254
953 #define MTC0 140
954 #define MTC1 141
955 #define MTC2 142
956 #define MTC3 143
957 #define C0 128
958 #define C1 129
959 #define C2 130
960 #define C3 131
961 #define C4 132
962 #define C5 133
963 #define C6 134
964 #define C7 135
965 #define C8 136
966 #define M2 144
967 #define M3 145
968 #define M4 146
969 #define M 147
970 static unsigned char cheetah_ecc_syntab[] = {
971 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
972 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
973 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
974 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
975 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
976 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
977 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
978 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
979 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
980 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
981 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
982 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
983 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
984 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
985 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
986 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
987 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
988 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
989 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
990 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
991 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
992 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
993 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
994 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
995 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
996 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
997 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
998 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
999 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1000 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1001 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1002 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1003 };
1004 static unsigned char cheetah_mtag_syntab[] = {
1005 NONE, MTC0,
1006 MTC1, NONE,
1007 MTC2, NONE,
1008 NONE, MT0,
1009 MTC3, NONE,
1010 NONE, MT1,
1011 NONE, MT2,
1012 NONE, NONE
1013 };
1014
1015 /* Return the highest priority error conditon mentioned. */
1016 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1017 {
1018 unsigned long tmp = 0;
1019 int i;
1020
1021 for (i = 0; cheetah_error_table[i].mask; i++) {
1022 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1023 return tmp;
1024 }
1025 return tmp;
1026 }
1027
1028 static const char *cheetah_get_string(unsigned long bit)
1029 {
1030 int i;
1031
1032 for (i = 0; cheetah_error_table[i].mask; i++) {
1033 if ((bit & cheetah_error_table[i].mask) != 0UL)
1034 return cheetah_error_table[i].name;
1035 }
1036 return "???";
1037 }
1038
1039 extern int chmc_getunumber(int, unsigned long, char *, int);
1040
1041 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1042 unsigned long afsr, unsigned long afar, int recoverable)
1043 {
1044 unsigned long hipri;
1045 char unum[256];
1046
1047 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1048 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1049 afsr, afar,
1050 (afsr & CHAFSR_TL1) ? 1 : 0);
1051 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1052 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1053 regs->tpc, regs->tnpc, regs->tstate);
1054 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1055 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1056 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1057 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1058 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1059 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1060 hipri = cheetah_get_hipri(afsr);
1061 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1062 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1063 hipri, cheetah_get_string(hipri));
1064
1065 /* Try to get unumber if relevant. */
1066 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1067 CHAFSR_CPC | CHAFSR_CPU | \
1068 CHAFSR_UE | CHAFSR_CE | \
1069 CHAFSR_EDC | CHAFSR_EDU | \
1070 CHAFSR_UCC | CHAFSR_UCU | \
1071 CHAFSR_WDU | CHAFSR_WDC)
1072 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1073 if (afsr & ESYND_ERRORS) {
1074 int syndrome;
1075 int ret;
1076
1077 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1078 syndrome = cheetah_ecc_syntab[syndrome];
1079 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1080 if (ret != -1)
1081 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1082 (recoverable ? KERN_WARNING : KERN_CRIT),
1083 smp_processor_id(), unum);
1084 } else if (afsr & MSYND_ERRORS) {
1085 int syndrome;
1086 int ret;
1087
1088 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1089 syndrome = cheetah_mtag_syntab[syndrome];
1090 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1091 if (ret != -1)
1092 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1093 (recoverable ? KERN_WARNING : KERN_CRIT),
1094 smp_processor_id(), unum);
1095 }
1096
1097 /* Now dump the cache snapshots. */
1098 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1099 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1100 (int) info->dcache_index,
1101 info->dcache_tag,
1102 info->dcache_utag,
1103 info->dcache_stag);
1104 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1105 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1106 info->dcache_data[0],
1107 info->dcache_data[1],
1108 info->dcache_data[2],
1109 info->dcache_data[3]);
1110 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1111 "u[%016lx] l[%016lx]\n",
1112 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1113 (int) info->icache_index,
1114 info->icache_tag,
1115 info->icache_utag,
1116 info->icache_stag,
1117 info->icache_upper,
1118 info->icache_lower);
1119 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1120 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1121 info->icache_data[0],
1122 info->icache_data[1],
1123 info->icache_data[2],
1124 info->icache_data[3]);
1125 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1126 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1127 info->icache_data[4],
1128 info->icache_data[5],
1129 info->icache_data[6],
1130 info->icache_data[7]);
1131 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1132 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1133 (int) info->ecache_index, info->ecache_tag);
1134 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1136 info->ecache_data[0],
1137 info->ecache_data[1],
1138 info->ecache_data[2],
1139 info->ecache_data[3]);
1140
1141 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1142 while (afsr != 0UL) {
1143 unsigned long bit = cheetah_get_hipri(afsr);
1144
1145 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1146 (recoverable ? KERN_WARNING : KERN_CRIT),
1147 bit, cheetah_get_string(bit));
1148
1149 afsr &= ~bit;
1150 }
1151
1152 if (!recoverable)
1153 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1154 }
1155
1156 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1157 {
1158 unsigned long afsr, afar;
1159 int ret = 0;
1160
1161 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1162 : "=r" (afsr)
1163 : "i" (ASI_AFSR));
1164 if ((afsr & cheetah_afsr_errors) != 0) {
1165 if (logp != NULL) {
1166 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1167 : "=r" (afar)
1168 : "i" (ASI_AFAR));
1169 logp->afsr = afsr;
1170 logp->afar = afar;
1171 }
1172 ret = 1;
1173 }
1174 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1175 "membar #Sync\n\t"
1176 : : "r" (afsr), "i" (ASI_AFSR));
1177
1178 return ret;
1179 }
1180
1181 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1182 {
1183 struct cheetah_err_info local_snapshot, *p;
1184 int recoverable;
1185
1186 /* Flush E-cache */
1187 cheetah_flush_ecache();
1188
1189 p = cheetah_get_error_log(afsr);
1190 if (!p) {
1191 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1192 afsr, afar);
1193 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1194 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1195 prom_halt();
1196 }
1197
1198 /* Grab snapshot of logged error. */
1199 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1200
1201 /* If the current trap snapshot does not match what the
1202 * trap handler passed along into our args, big trouble.
1203 * In such a case, mark the local copy as invalid.
1204 *
1205 * Else, it matches and we mark the afsr in the non-local
1206 * copy as invalid so we may log new error traps there.
1207 */
1208 if (p->afsr != afsr || p->afar != afar)
1209 local_snapshot.afsr = CHAFSR_INVALID;
1210 else
1211 p->afsr = CHAFSR_INVALID;
1212
1213 cheetah_flush_icache();
1214 cheetah_flush_dcache();
1215
1216 /* Re-enable I-cache/D-cache */
1217 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1218 "or %%g1, %1, %%g1\n\t"
1219 "stxa %%g1, [%%g0] %0\n\t"
1220 "membar #Sync"
1221 : /* no outputs */
1222 : "i" (ASI_DCU_CONTROL_REG),
1223 "i" (DCU_DC | DCU_IC)
1224 : "g1");
1225
1226 /* Re-enable error reporting */
1227 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1228 "or %%g1, %1, %%g1\n\t"
1229 "stxa %%g1, [%%g0] %0\n\t"
1230 "membar #Sync"
1231 : /* no outputs */
1232 : "i" (ASI_ESTATE_ERROR_EN),
1233 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1234 : "g1");
1235
1236 /* Decide if we can continue after handling this trap and
1237 * logging the error.
1238 */
1239 recoverable = 1;
1240 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1241 recoverable = 0;
1242
1243 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1244 * error was logged while we had error reporting traps disabled.
1245 */
1246 if (cheetah_recheck_errors(&local_snapshot)) {
1247 unsigned long new_afsr = local_snapshot.afsr;
1248
1249 /* If we got a new asynchronous error, die... */
1250 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1251 CHAFSR_WDU | CHAFSR_CPU |
1252 CHAFSR_IVU | CHAFSR_UE |
1253 CHAFSR_BERR | CHAFSR_TO))
1254 recoverable = 0;
1255 }
1256
1257 /* Log errors. */
1258 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1259
1260 if (!recoverable)
1261 panic("Irrecoverable Fast-ECC error trap.\n");
1262
1263 /* Flush E-cache to kick the error trap handlers out. */
1264 cheetah_flush_ecache();
1265 }
1266
1267 /* Try to fix a correctable error by pushing the line out from
1268 * the E-cache. Recheck error reporting registers to see if the
1269 * problem is intermittent.
1270 */
1271 static int cheetah_fix_ce(unsigned long physaddr)
1272 {
1273 unsigned long orig_estate;
1274 unsigned long alias1, alias2;
1275 int ret;
1276
1277 /* Make sure correctable error traps are disabled. */
1278 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1279 "andn %0, %1, %%g1\n\t"
1280 "stxa %%g1, [%%g0] %2\n\t"
1281 "membar #Sync"
1282 : "=&r" (orig_estate)
1283 : "i" (ESTATE_ERROR_CEEN),
1284 "i" (ASI_ESTATE_ERROR_EN)
1285 : "g1");
1286
1287 /* We calculate alias addresses that will force the
1288 * cache line in question out of the E-cache. Then
1289 * we bring it back in with an atomic instruction so
1290 * that we get it in some modified/exclusive state,
1291 * then we displace it again to try and get proper ECC
1292 * pushed back into the system.
1293 */
1294 physaddr &= ~(8UL - 1UL);
1295 alias1 = (ecache_flush_physbase +
1296 (physaddr & ((ecache_flush_size >> 1) - 1)));
1297 alias2 = alias1 + (ecache_flush_size >> 1);
1298 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1299 "ldxa [%1] %3, %%g0\n\t"
1300 "casxa [%2] %3, %%g0, %%g0\n\t"
1301 "membar #StoreLoad | #StoreStore\n\t"
1302 "ldxa [%0] %3, %%g0\n\t"
1303 "ldxa [%1] %3, %%g0\n\t"
1304 "membar #Sync"
1305 : /* no outputs */
1306 : "r" (alias1), "r" (alias2),
1307 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1308
1309 /* Did that trigger another error? */
1310 if (cheetah_recheck_errors(NULL)) {
1311 /* Try one more time. */
1312 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1313 "membar #Sync"
1314 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1315 if (cheetah_recheck_errors(NULL))
1316 ret = 2;
1317 else
1318 ret = 1;
1319 } else {
1320 /* No new error, intermittent problem. */
1321 ret = 0;
1322 }
1323
1324 /* Restore error enables. */
1325 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1326 "membar #Sync"
1327 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1328
1329 return ret;
1330 }
1331
1332 /* Return non-zero if PADDR is a valid physical memory address. */
1333 static int cheetah_check_main_memory(unsigned long paddr)
1334 {
1335 int i;
1336
1337 for (i = 0; ; i++) {
1338 if (sp_banks[i].num_bytes == 0)
1339 break;
1340 if (paddr >= sp_banks[i].base_addr &&
1341 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1342 return 1;
1343 }
1344 return 0;
1345 }
1346
1347 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1348 {
1349 struct cheetah_err_info local_snapshot, *p;
1350 int recoverable, is_memory;
1351
1352 p = cheetah_get_error_log(afsr);
1353 if (!p) {
1354 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1355 afsr, afar);
1356 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1357 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1358 prom_halt();
1359 }
1360
1361 /* Grab snapshot of logged error. */
1362 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1363
1364 /* If the current trap snapshot does not match what the
1365 * trap handler passed along into our args, big trouble.
1366 * In such a case, mark the local copy as invalid.
1367 *
1368 * Else, it matches and we mark the afsr in the non-local
1369 * copy as invalid so we may log new error traps there.
1370 */
1371 if (p->afsr != afsr || p->afar != afar)
1372 local_snapshot.afsr = CHAFSR_INVALID;
1373 else
1374 p->afsr = CHAFSR_INVALID;
1375
1376 is_memory = cheetah_check_main_memory(afar);
1377
1378 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1379 /* XXX Might want to log the results of this operation
1380 * XXX somewhere... -DaveM
1381 */
1382 cheetah_fix_ce(afar);
1383 }
1384
1385 {
1386 int flush_all, flush_line;
1387
1388 flush_all = flush_line = 0;
1389 if ((afsr & CHAFSR_EDC) != 0UL) {
1390 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1391 flush_line = 1;
1392 else
1393 flush_all = 1;
1394 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1395 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1396 flush_line = 1;
1397 else
1398 flush_all = 1;
1399 }
1400
1401 /* Trap handler only disabled I-cache, flush it. */
1402 cheetah_flush_icache();
1403
1404 /* Re-enable I-cache */
1405 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1406 "or %%g1, %1, %%g1\n\t"
1407 "stxa %%g1, [%%g0] %0\n\t"
1408 "membar #Sync"
1409 : /* no outputs */
1410 : "i" (ASI_DCU_CONTROL_REG),
1411 "i" (DCU_IC)
1412 : "g1");
1413
1414 if (flush_all)
1415 cheetah_flush_ecache();
1416 else if (flush_line)
1417 cheetah_flush_ecache_line(afar);
1418 }
1419
1420 /* Re-enable error reporting */
1421 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1422 "or %%g1, %1, %%g1\n\t"
1423 "stxa %%g1, [%%g0] %0\n\t"
1424 "membar #Sync"
1425 : /* no outputs */
1426 : "i" (ASI_ESTATE_ERROR_EN),
1427 "i" (ESTATE_ERROR_CEEN)
1428 : "g1");
1429
1430 /* Decide if we can continue after handling this trap and
1431 * logging the error.
1432 */
1433 recoverable = 1;
1434 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1435 recoverable = 0;
1436
1437 /* Re-check AFSR/AFAR */
1438 (void) cheetah_recheck_errors(&local_snapshot);
1439
1440 /* Log errors. */
1441 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1442
1443 if (!recoverable)
1444 panic("Irrecoverable Correctable-ECC error trap.\n");
1445 }
1446
1447 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1448 {
1449 struct cheetah_err_info local_snapshot, *p;
1450 int recoverable, is_memory;
1451
1452 #ifdef CONFIG_PCI
1453 /* Check for the special PCI poke sequence. */
1454 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1455 cheetah_flush_icache();
1456 cheetah_flush_dcache();
1457
1458 /* Re-enable I-cache/D-cache */
1459 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1460 "or %%g1, %1, %%g1\n\t"
1461 "stxa %%g1, [%%g0] %0\n\t"
1462 "membar #Sync"
1463 : /* no outputs */
1464 : "i" (ASI_DCU_CONTROL_REG),
1465 "i" (DCU_DC | DCU_IC)
1466 : "g1");
1467
1468 /* Re-enable error reporting */
1469 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1470 "or %%g1, %1, %%g1\n\t"
1471 "stxa %%g1, [%%g0] %0\n\t"
1472 "membar #Sync"
1473 : /* no outputs */
1474 : "i" (ASI_ESTATE_ERROR_EN),
1475 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1476 : "g1");
1477
1478 (void) cheetah_recheck_errors(NULL);
1479
1480 pci_poke_faulted = 1;
1481 regs->tpc += 4;
1482 regs->tnpc = regs->tpc + 4;
1483 return;
1484 }
1485 #endif
1486
1487 p = cheetah_get_error_log(afsr);
1488 if (!p) {
1489 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1490 afsr, afar);
1491 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1492 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1493 prom_halt();
1494 }
1495
1496 /* Grab snapshot of logged error. */
1497 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1498
1499 /* If the current trap snapshot does not match what the
1500 * trap handler passed along into our args, big trouble.
1501 * In such a case, mark the local copy as invalid.
1502 *
1503 * Else, it matches and we mark the afsr in the non-local
1504 * copy as invalid so we may log new error traps there.
1505 */
1506 if (p->afsr != afsr || p->afar != afar)
1507 local_snapshot.afsr = CHAFSR_INVALID;
1508 else
1509 p->afsr = CHAFSR_INVALID;
1510
1511 is_memory = cheetah_check_main_memory(afar);
1512
1513 {
1514 int flush_all, flush_line;
1515
1516 flush_all = flush_line = 0;
1517 if ((afsr & CHAFSR_EDU) != 0UL) {
1518 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1519 flush_line = 1;
1520 else
1521 flush_all = 1;
1522 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1523 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1524 flush_line = 1;
1525 else
1526 flush_all = 1;
1527 }
1528
1529 cheetah_flush_icache();
1530 cheetah_flush_dcache();
1531
1532 /* Re-enable I/D caches */
1533 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1534 "or %%g1, %1, %%g1\n\t"
1535 "stxa %%g1, [%%g0] %0\n\t"
1536 "membar #Sync"
1537 : /* no outputs */
1538 : "i" (ASI_DCU_CONTROL_REG),
1539 "i" (DCU_IC | DCU_DC)
1540 : "g1");
1541
1542 if (flush_all)
1543 cheetah_flush_ecache();
1544 else if (flush_line)
1545 cheetah_flush_ecache_line(afar);
1546 }
1547
1548 /* Re-enable error reporting */
1549 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1550 "or %%g1, %1, %%g1\n\t"
1551 "stxa %%g1, [%%g0] %0\n\t"
1552 "membar #Sync"
1553 : /* no outputs */
1554 : "i" (ASI_ESTATE_ERROR_EN),
1555 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1556 : "g1");
1557
1558 /* Decide if we can continue after handling this trap and
1559 * logging the error.
1560 */
1561 recoverable = 1;
1562 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1563 recoverable = 0;
1564
1565 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1566 * error was logged while we had error reporting traps disabled.
1567 */
1568 if (cheetah_recheck_errors(&local_snapshot)) {
1569 unsigned long new_afsr = local_snapshot.afsr;
1570
1571 /* If we got a new asynchronous error, die... */
1572 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1573 CHAFSR_WDU | CHAFSR_CPU |
1574 CHAFSR_IVU | CHAFSR_UE |
1575 CHAFSR_BERR | CHAFSR_TO))
1576 recoverable = 0;
1577 }
1578
1579 /* Log errors. */
1580 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1581
1582 /* "Recoverable" here means we try to yank the page from ever
1583 * being newly used again. This depends upon a few things:
1584 * 1) Must be main memory, and AFAR must be valid.
1585 * 2) If we trapped from user, OK.
1586 * 3) Else, if we trapped from kernel we must find exception
1587 * table entry (ie. we have to have been accessing user
1588 * space).
1589 *
1590 * If AFAR is not in main memory, or we trapped from kernel
1591 * and cannot find an exception table entry, it is unacceptable
1592 * to try and continue.
1593 */
1594 if (recoverable && is_memory) {
1595 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1596 /* OK, usermode access. */
1597 recoverable = 1;
1598 } else {
1599 unsigned long g2 = regs->u_regs[UREG_G2];
1600 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1601
1602 if (fixup != 0UL) {
1603 /* OK, kernel access to userspace. */
1604 recoverable = 1;
1605
1606 } else {
1607 /* BAD, privileged state is corrupted. */
1608 recoverable = 0;
1609 }
1610
1611 if (recoverable) {
1612 if (pfn_valid(afar >> PAGE_SHIFT))
1613 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1614 else
1615 recoverable = 0;
1616
1617 /* Only perform fixup if we still have a
1618 * recoverable condition.
1619 */
1620 if (recoverable) {
1621 regs->tpc = fixup;
1622 regs->tnpc = regs->tpc + 4;
1623 regs->u_regs[UREG_G2] = g2;
1624 }
1625 }
1626 }
1627 } else {
1628 recoverable = 0;
1629 }
1630
1631 if (!recoverable)
1632 panic("Irrecoverable deferred error trap.\n");
1633 }
1634
1635 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1636 *
1637 * Bit0: 0=dcache,1=icache
1638 * Bit1: 0=recoverable,1=unrecoverable
1639 *
1640 * The hardware has disabled both the I-cache and D-cache in
1641 * the %dcr register.
1642 */
1643 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1644 {
1645 if (type & 0x1)
1646 __cheetah_flush_icache();
1647 else
1648 cheetah_plus_zap_dcache_parity();
1649 cheetah_flush_dcache();
1650
1651 /* Re-enable I-cache/D-cache */
1652 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1653 "or %%g1, %1, %%g1\n\t"
1654 "stxa %%g1, [%%g0] %0\n\t"
1655 "membar #Sync"
1656 : /* no outputs */
1657 : "i" (ASI_DCU_CONTROL_REG),
1658 "i" (DCU_DC | DCU_IC)
1659 : "g1");
1660
1661 if (type & 0x2) {
1662 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1663 smp_processor_id(),
1664 (type & 0x1) ? 'I' : 'D',
1665 regs->tpc);
1666 panic("Irrecoverable Cheetah+ parity error.");
1667 }
1668
1669 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1670 smp_processor_id(),
1671 (type & 0x1) ? 'I' : 'D',
1672 regs->tpc);
1673 }
1674
1675 void do_fpe_common(struct pt_regs *regs)
1676 {
1677 if (regs->tstate & TSTATE_PRIV) {
1678 regs->tpc = regs->tnpc;
1679 regs->tnpc += 4;
1680 } else {
1681 unsigned long fsr = current_thread_info()->xfsr[0];
1682 siginfo_t info;
1683
1684 if (test_thread_flag(TIF_32BIT)) {
1685 regs->tpc &= 0xffffffff;
1686 regs->tnpc &= 0xffffffff;
1687 }
1688 info.si_signo = SIGFPE;
1689 info.si_errno = 0;
1690 info.si_addr = (void __user *)regs->tpc;
1691 info.si_trapno = 0;
1692 info.si_code = __SI_FAULT;
1693 if ((fsr & 0x1c000) == (1 << 14)) {
1694 if (fsr & 0x10)
1695 info.si_code = FPE_FLTINV;
1696 else if (fsr & 0x08)
1697 info.si_code = FPE_FLTOVF;
1698 else if (fsr & 0x04)
1699 info.si_code = FPE_FLTUND;
1700 else if (fsr & 0x02)
1701 info.si_code = FPE_FLTDIV;
1702 else if (fsr & 0x01)
1703 info.si_code = FPE_FLTRES;
1704 }
1705 force_sig_info(SIGFPE, &info, current);
1706 }
1707 }
1708
1709 void do_fpieee(struct pt_regs *regs)
1710 {
1711 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1712 0, 0x24, SIGFPE) == NOTIFY_STOP)
1713 return;
1714
1715 do_fpe_common(regs);
1716 }
1717
1718 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1719
1720 void do_fpother(struct pt_regs *regs)
1721 {
1722 struct fpustate *f = FPUSTATE;
1723 int ret = 0;
1724
1725 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1726 0, 0x25, SIGFPE) == NOTIFY_STOP)
1727 return;
1728
1729 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1730 case (2 << 14): /* unfinished_FPop */
1731 case (3 << 14): /* unimplemented_FPop */
1732 ret = do_mathemu(regs, f);
1733 break;
1734 }
1735 if (ret)
1736 return;
1737 do_fpe_common(regs);
1738 }
1739
1740 void do_tof(struct pt_regs *regs)
1741 {
1742 siginfo_t info;
1743
1744 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1745 0, 0x26, SIGEMT) == NOTIFY_STOP)
1746 return;
1747
1748 if (regs->tstate & TSTATE_PRIV)
1749 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1750 if (test_thread_flag(TIF_32BIT)) {
1751 regs->tpc &= 0xffffffff;
1752 regs->tnpc &= 0xffffffff;
1753 }
1754 info.si_signo = SIGEMT;
1755 info.si_errno = 0;
1756 info.si_code = EMT_TAGOVF;
1757 info.si_addr = (void __user *)regs->tpc;
1758 info.si_trapno = 0;
1759 force_sig_info(SIGEMT, &info, current);
1760 }
1761
1762 void do_div0(struct pt_regs *regs)
1763 {
1764 siginfo_t info;
1765
1766 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1767 0, 0x28, SIGFPE) == NOTIFY_STOP)
1768 return;
1769
1770 if (regs->tstate & TSTATE_PRIV)
1771 die_if_kernel("TL0: Kernel divide by zero.", regs);
1772 if (test_thread_flag(TIF_32BIT)) {
1773 regs->tpc &= 0xffffffff;
1774 regs->tnpc &= 0xffffffff;
1775 }
1776 info.si_signo = SIGFPE;
1777 info.si_errno = 0;
1778 info.si_code = FPE_INTDIV;
1779 info.si_addr = (void __user *)regs->tpc;
1780 info.si_trapno = 0;
1781 force_sig_info(SIGFPE, &info, current);
1782 }
1783
1784 void instruction_dump (unsigned int *pc)
1785 {
1786 int i;
1787
1788 if ((((unsigned long) pc) & 3))
1789 return;
1790
1791 printk("Instruction DUMP:");
1792 for (i = -3; i < 6; i++)
1793 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1794 printk("\n");
1795 }
1796
1797 static void user_instruction_dump (unsigned int __user *pc)
1798 {
1799 int i;
1800 unsigned int buf[9];
1801
1802 if ((((unsigned long) pc) & 3))
1803 return;
1804
1805 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1806 return;
1807
1808 printk("Instruction DUMP:");
1809 for (i = 0; i < 9; i++)
1810 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1811 printk("\n");
1812 }
1813
1814 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1815 {
1816 unsigned long pc, fp, thread_base, ksp;
1817 struct thread_info *tp = tsk->thread_info;
1818 struct reg_window *rw;
1819 int count = 0;
1820
1821 ksp = (unsigned long) _ksp;
1822
1823 if (tp == current_thread_info())
1824 flushw_all();
1825
1826 fp = ksp + STACK_BIAS;
1827 thread_base = (unsigned long) tp;
1828
1829 printk("Call Trace:");
1830 #ifdef CONFIG_KALLSYMS
1831 printk("\n");
1832 #endif
1833 do {
1834 /* Bogus frame pointer? */
1835 if (fp < (thread_base + sizeof(struct thread_info)) ||
1836 fp >= (thread_base + THREAD_SIZE))
1837 break;
1838 rw = (struct reg_window *)fp;
1839 pc = rw->ins[7];
1840 printk(" [%016lx] ", pc);
1841 print_symbol("%s\n", pc);
1842 fp = rw->ins[6] + STACK_BIAS;
1843 } while (++count < 16);
1844 #ifndef CONFIG_KALLSYMS
1845 printk("\n");
1846 #endif
1847 }
1848
1849 void dump_stack(void)
1850 {
1851 unsigned long *ksp;
1852
1853 __asm__ __volatile__("mov %%fp, %0"
1854 : "=r" (ksp));
1855 show_stack(current, ksp);
1856 }
1857
1858 EXPORT_SYMBOL(dump_stack);
1859
1860 static inline int is_kernel_stack(struct task_struct *task,
1861 struct reg_window *rw)
1862 {
1863 unsigned long rw_addr = (unsigned long) rw;
1864 unsigned long thread_base, thread_end;
1865
1866 if (rw_addr < PAGE_OFFSET) {
1867 if (task != &init_task)
1868 return 0;
1869 }
1870
1871 thread_base = (unsigned long) task->thread_info;
1872 thread_end = thread_base + sizeof(union thread_union);
1873 if (rw_addr >= thread_base &&
1874 rw_addr < thread_end &&
1875 !(rw_addr & 0x7UL))
1876 return 1;
1877
1878 return 0;
1879 }
1880
1881 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1882 {
1883 unsigned long fp = rw->ins[6];
1884
1885 if (!fp)
1886 return NULL;
1887
1888 return (struct reg_window *) (fp + STACK_BIAS);
1889 }
1890
1891 void die_if_kernel(char *str, struct pt_regs *regs)
1892 {
1893 static int die_counter;
1894 extern void __show_regs(struct pt_regs * regs);
1895 extern void smp_report_regs(void);
1896 int count = 0;
1897
1898 /* Amuse the user. */
1899 printk(
1900 " \\|/ ____ \\|/\n"
1901 " \"@'/ .. \\`@\"\n"
1902 " /_| \\__/ |_\\\n"
1903 " \\__U_/\n");
1904
1905 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1906 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1907 __asm__ __volatile__("flushw");
1908 __show_regs(regs);
1909 if (regs->tstate & TSTATE_PRIV) {
1910 struct reg_window *rw = (struct reg_window *)
1911 (regs->u_regs[UREG_FP] + STACK_BIAS);
1912
1913 /* Stop the back trace when we hit userland or we
1914 * find some badly aligned kernel stack.
1915 */
1916 while (rw &&
1917 count++ < 30&&
1918 is_kernel_stack(current, rw)) {
1919 printk("Caller[%016lx]", rw->ins[7]);
1920 print_symbol(": %s", rw->ins[7]);
1921 printk("\n");
1922
1923 rw = kernel_stack_up(rw);
1924 }
1925 instruction_dump ((unsigned int *) regs->tpc);
1926 } else {
1927 if (test_thread_flag(TIF_32BIT)) {
1928 regs->tpc &= 0xffffffff;
1929 regs->tnpc &= 0xffffffff;
1930 }
1931 user_instruction_dump ((unsigned int __user *) regs->tpc);
1932 }
1933 #ifdef CONFIG_SMP
1934 smp_report_regs();
1935 #endif
1936
1937 if (regs->tstate & TSTATE_PRIV)
1938 do_exit(SIGKILL);
1939 do_exit(SIGSEGV);
1940 }
1941
1942 extern int handle_popc(u32 insn, struct pt_regs *regs);
1943 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1944
1945 void do_illegal_instruction(struct pt_regs *regs)
1946 {
1947 unsigned long pc = regs->tpc;
1948 unsigned long tstate = regs->tstate;
1949 u32 insn;
1950 siginfo_t info;
1951
1952 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1953 0, 0x10, SIGILL) == NOTIFY_STOP)
1954 return;
1955
1956 if (tstate & TSTATE_PRIV)
1957 die_if_kernel("Kernel illegal instruction", regs);
1958 if (test_thread_flag(TIF_32BIT))
1959 pc = (u32)pc;
1960 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1961 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1962 if (handle_popc(insn, regs))
1963 return;
1964 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1965 if (handle_ldf_stq(insn, regs))
1966 return;
1967 }
1968 }
1969 info.si_signo = SIGILL;
1970 info.si_errno = 0;
1971 info.si_code = ILL_ILLOPC;
1972 info.si_addr = (void __user *)pc;
1973 info.si_trapno = 0;
1974 force_sig_info(SIGILL, &info, current);
1975 }
1976
1977 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1978 {
1979 siginfo_t info;
1980
1981 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1982 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1983 return;
1984
1985 if (regs->tstate & TSTATE_PRIV) {
1986 extern void kernel_unaligned_trap(struct pt_regs *regs,
1987 unsigned int insn,
1988 unsigned long sfar,
1989 unsigned long sfsr);
1990
1991 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
1992 sfar, sfsr);
1993 return;
1994 }
1995 info.si_signo = SIGBUS;
1996 info.si_errno = 0;
1997 info.si_code = BUS_ADRALN;
1998 info.si_addr = (void __user *)sfar;
1999 info.si_trapno = 0;
2000 force_sig_info(SIGBUS, &info, current);
2001 }
2002
2003 void do_privop(struct pt_regs *regs)
2004 {
2005 siginfo_t info;
2006
2007 if (notify_die(DIE_TRAP, "privileged operation", regs,
2008 0, 0x11, SIGILL) == NOTIFY_STOP)
2009 return;
2010
2011 if (test_thread_flag(TIF_32BIT)) {
2012 regs->tpc &= 0xffffffff;
2013 regs->tnpc &= 0xffffffff;
2014 }
2015 info.si_signo = SIGILL;
2016 info.si_errno = 0;
2017 info.si_code = ILL_PRVOPC;
2018 info.si_addr = (void __user *)regs->tpc;
2019 info.si_trapno = 0;
2020 force_sig_info(SIGILL, &info, current);
2021 }
2022
2023 void do_privact(struct pt_regs *regs)
2024 {
2025 do_privop(regs);
2026 }
2027
2028 /* Trap level 1 stuff or other traps we should never see... */
2029 void do_cee(struct pt_regs *regs)
2030 {
2031 die_if_kernel("TL0: Cache Error Exception", regs);
2032 }
2033
2034 void do_cee_tl1(struct pt_regs *regs)
2035 {
2036 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2037 die_if_kernel("TL1: Cache Error Exception", regs);
2038 }
2039
2040 void do_dae_tl1(struct pt_regs *regs)
2041 {
2042 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2043 die_if_kernel("TL1: Data Access Exception", regs);
2044 }
2045
2046 void do_iae_tl1(struct pt_regs *regs)
2047 {
2048 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2049 die_if_kernel("TL1: Instruction Access Exception", regs);
2050 }
2051
2052 void do_div0_tl1(struct pt_regs *regs)
2053 {
2054 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2055 die_if_kernel("TL1: DIV0 Exception", regs);
2056 }
2057
2058 void do_fpdis_tl1(struct pt_regs *regs)
2059 {
2060 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2061 die_if_kernel("TL1: FPU Disabled", regs);
2062 }
2063
2064 void do_fpieee_tl1(struct pt_regs *regs)
2065 {
2066 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2067 die_if_kernel("TL1: FPU IEEE Exception", regs);
2068 }
2069
2070 void do_fpother_tl1(struct pt_regs *regs)
2071 {
2072 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2073 die_if_kernel("TL1: FPU Other Exception", regs);
2074 }
2075
2076 void do_ill_tl1(struct pt_regs *regs)
2077 {
2078 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2079 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2080 }
2081
2082 void do_irq_tl1(struct pt_regs *regs)
2083 {
2084 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2085 die_if_kernel("TL1: IRQ Exception", regs);
2086 }
2087
2088 void do_lddfmna_tl1(struct pt_regs *regs)
2089 {
2090 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2091 die_if_kernel("TL1: LDDF Exception", regs);
2092 }
2093
2094 void do_stdfmna_tl1(struct pt_regs *regs)
2095 {
2096 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2097 die_if_kernel("TL1: STDF Exception", regs);
2098 }
2099
2100 void do_paw(struct pt_regs *regs)
2101 {
2102 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2103 }
2104
2105 void do_paw_tl1(struct pt_regs *regs)
2106 {
2107 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2108 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2109 }
2110
2111 void do_vaw(struct pt_regs *regs)
2112 {
2113 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2114 }
2115
2116 void do_vaw_tl1(struct pt_regs *regs)
2117 {
2118 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2119 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2120 }
2121
2122 void do_tof_tl1(struct pt_regs *regs)
2123 {
2124 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2125 die_if_kernel("TL1: Tag Overflow Exception", regs);
2126 }
2127
2128 void do_getpsr(struct pt_regs *regs)
2129 {
2130 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2131 regs->tpc = regs->tnpc;
2132 regs->tnpc += 4;
2133 if (test_thread_flag(TIF_32BIT)) {
2134 regs->tpc &= 0xffffffff;
2135 regs->tnpc &= 0xffffffff;
2136 }
2137 }
2138
2139 extern void thread_info_offsets_are_bolixed_dave(void);
2140
2141 /* Only invoked on boot processor. */
2142 void __init trap_init(void)
2143 {
2144 /* Compile time sanity check. */
2145 if (TI_TASK != offsetof(struct thread_info, task) ||
2146 TI_FLAGS != offsetof(struct thread_info, flags) ||
2147 TI_CPU != offsetof(struct thread_info, cpu) ||
2148 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2149 TI_KSP != offsetof(struct thread_info, ksp) ||
2150 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2151 TI_KREGS != offsetof(struct thread_info, kregs) ||
2152 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2153 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2154 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2155 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2156 TI_GSR != offsetof(struct thread_info, gsr) ||
2157 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2158 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2159 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2160 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2161 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2162 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2163 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2164 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2165 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2166 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2167 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2168 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2169 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2170 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2171 (TI_FPREGS & (64 - 1)))
2172 thread_info_offsets_are_bolixed_dave();
2173
2174 /* Attach to the address space of init_task. On SMP we
2175 * do this in smp.c:smp_callin for other cpus.
2176 */
2177 atomic_inc(&init_mm.mm_count);
2178 current->active_mm = &init_mm;
2179 }